diff --git a/src/hir2mpl/CMakeLists.txt b/src/hir2mpl/CMakeLists.txt index bbc804edce1e1b7045d67cfc65d9c57a157aa62a..c952ed0020d8d2cebf6badedc8df852d37f1b33b 100755 --- a/src/hir2mpl/CMakeLists.txt +++ b/src/hir2mpl/CMakeLists.txt @@ -19,6 +19,7 @@ endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DJAVA_OBJ_IN_MFILE=1") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DJAVA_OBJ_IN_MFILE=1") +set(CMAKE_SKIP_RPATH TRUE) if (${ONLY_C} STREQUAL "1") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -DONLY_C") diff --git a/src/hir2mpl/ast_input/clang/include/ast_expr.h b/src/hir2mpl/ast_input/clang/include/ast_expr.h index cde8201fa37cfbf5015bef0e012963b627385a3a..59f0df8235dcd87064b745f718a344a798bbf666 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_expr.h +++ b/src/hir2mpl/ast_input/clang/include/ast_expr.h @@ -24,6 +24,8 @@ class ASTFunc; class ASTStmt; struct ASTValue { union Value { + uint64 f128[2]; + Int128Arr i128; uint8 u8; uint16 u16; uint32 u32; @@ -35,7 +37,7 @@ struct ASTValue { int64 i64; double f64; UStrIdx strIdx; - } val = { 0 }; + } val = {{0, 0}}; PrimType pty = PTY_begin; PrimType GetPrimType() const { @@ -167,7 +169,7 @@ class ASTExpr { virtual MIRConst *GenerateMIRConstImpl() const; virtual UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const = 0; virtual ASTExpr *IgnoreParensImpl(); - + MIRIntrinsicID SetVectorSetLane(const MIRType &type) const; virtual ASTDecl *GetASTDeclImpl() const { return refedDecl; } @@ -197,7 +199,9 @@ class ASTCastExpr : public ASTExpr { explicit ASTCastExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpCast) { (void)allocatorIn; } - ~ASTCastExpr() = default; + ~ASTCastExpr() override { + child = nullptr; + } void SetASTExpr(ASTExpr *expr) { child = expr; @@ -285,6 +289,7 @@ class ASTCastExpr : public ASTExpr { private: MIRConst *GenerateMIRDoubleConst() const; + MIRConst *GenerateMIRFloat128Const() const; MIRConst *GenerateMIRFloatConst() const; MIRConst *GenerateMIRIntConst() const; UniqueFEIRExpr EmitExprVdupVector(PrimType primtype, UniqueFEIRExpr &subExpr) const; @@ -308,11 +313,23 @@ class ASTDeclRefExpr : public ASTExpr { explicit ASTDeclRefExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpRef) { (void)allocatorIn; } - ~ASTDeclRefExpr() = default; + ~ASTDeclRefExpr() override = default; + + void SetIsVectorType(bool flag) { + isVectorType = flag; + } + + void SetIsAddrOfType(bool flag) { + isAddrOfType = flag; + } protected: MIRConst *GenerateMIRConstImpl() const override; UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + private: + bool isVectorType = false; + bool isAddrOfType = false; }; class ASTUnaryOperatorExpr : public ASTExpr { @@ -320,7 +337,9 @@ class ASTUnaryOperatorExpr : public ASTExpr { explicit ASTUnaryOperatorExpr(MapleAllocator &allocatorIn, ASTOp o) : ASTExpr(allocatorIn, o) { (void)allocatorIn; } - virtual ~ASTUnaryOperatorExpr() = default; + ~ASTUnaryOperatorExpr() override { + variableArrayExpr = nullptr; + } void SetUOExpr(ASTExpr *astExpr); const ASTExpr *GetUOExpr() const { @@ -357,8 +376,8 @@ class ASTUnaryOperatorExpr : public ASTExpr { return isGlobal; } - void SetVariableArrayExpr(ASTExpr *expr) { - variableArrayExpr = expr; + void SetVariableArrayExpr(ASTExpr *varArrayexpr) { + variableArrayExpr = varArrayexpr; } void SetisVariableArrayType(bool isVariableArrayTypeArg) { @@ -381,7 +400,7 @@ class ASTUnaryOperatorExpr : public ASTExpr { class ASTUOMinusExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOMinusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpMinus) {} - ~ASTUOMinusExpr() = default; + ~ASTUOMinusExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -390,7 +409,7 @@ class ASTUOMinusExpr : public ASTUnaryOperatorExpr { class ASTUONotExpr : public ASTUnaryOperatorExpr { public: explicit ASTUONotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpNot) {} - ~ASTUONotExpr() = default; + ~ASTUONotExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -399,7 +418,7 @@ class ASTUONotExpr : public ASTUnaryOperatorExpr { class ASTUOLNotExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOLNotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpLNot) {} - ~ASTUOLNotExpr() = default; + ~ASTUOLNotExpr() override = default; void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) override { trueIdx = leftIdx; @@ -416,7 +435,7 @@ class ASTUOPostIncExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOPostIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostInc), tempVarName(FEUtils::GetSequentialName("postinc_")) {} - ~ASTUOPostIncExpr() = default; + ~ASTUOPostIncExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -427,7 +446,7 @@ class ASTUOPostDecExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOPostDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostDec), tempVarName(FEUtils::GetSequentialName("postdec_")) {} - ~ASTUOPostDecExpr() = default; + ~ASTUOPostDecExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -437,7 +456,7 @@ class ASTUOPostDecExpr : public ASTUnaryOperatorExpr { class ASTUOPreIncExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOPreIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreInc) {} - ~ASTUOPreIncExpr() = default; + ~ASTUOPreIncExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -446,7 +465,7 @@ class ASTUOPreIncExpr : public ASTUnaryOperatorExpr { class ASTUOPreDecExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOPreDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreDec) {} - ~ASTUOPreDecExpr() = default; + ~ASTUOPreDecExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -456,7 +475,7 @@ class ASTUOPreDecExpr : public ASTUnaryOperatorExpr { class ASTUOAddrOfExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOAddrOfExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOf) {} - ~ASTUOAddrOfExpr() = default; + ~ASTUOAddrOfExpr() override = default; protected: MIRConst *GenerateMIRConstImpl() const override; @@ -469,7 +488,7 @@ class ASTUOAddrOfLabelExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOAddrOfLabelExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOfLabel), labelName("", allocatorIn.GetMemPool()) {} - ~ASTUOAddrOfLabelExpr() = default; + ~ASTUOAddrOfLabelExpr() override = default; void SetLabelName(const std::string &name) { labelName = name; @@ -490,7 +509,7 @@ class ASTUOAddrOfLabelExpr : public ASTUnaryOperatorExpr { class ASTUODerefExpr : public ASTUnaryOperatorExpr { public: explicit ASTUODerefExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpDeref) {} - ~ASTUODerefExpr() = default; + ~ASTUODerefExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -501,7 +520,7 @@ class ASTUODerefExpr : public ASTUnaryOperatorExpr { class ASTUOPlusExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOPlusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPlus) {} - ~ASTUOPlusExpr() = default; + ~ASTUOPlusExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -510,7 +529,9 @@ class ASTUOPlusExpr : public ASTUnaryOperatorExpr { class ASTUORealExpr : public ASTUnaryOperatorExpr { public: explicit ASTUORealExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpReal) {} - ~ASTUORealExpr() = default; + ~ASTUORealExpr() override { + elementType = nullptr; + } void SetElementType(MIRType *type) { elementType = type; @@ -524,7 +545,9 @@ class ASTUORealExpr : public ASTUnaryOperatorExpr { class ASTUOImagExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOImagExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpImag) {} - ~ASTUOImagExpr() = default; + ~ASTUOImagExpr() override { + elementType = nullptr; + } void SetElementType(MIRType *type) { elementType = type; @@ -538,7 +561,7 @@ class ASTUOImagExpr : public ASTUnaryOperatorExpr { class ASTUOExtensionExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOExtensionExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpExtension) {} - ~ASTUOExtensionExpr() = default; + ~ASTUOExtensionExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -547,7 +570,7 @@ class ASTUOExtensionExpr : public ASTUnaryOperatorExpr { class ASTUOCoawaitExpr : public ASTUnaryOperatorExpr { public: explicit ASTUOCoawaitExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpCoawait) {} - ~ASTUOCoawaitExpr() = default; + ~ASTUOCoawaitExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -558,7 +581,9 @@ class ASTPredefinedExpr : public ASTExpr { explicit ASTPredefinedExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpPredefined) { (void)allocatorIn; } - ~ASTPredefinedExpr() = default; + ~ASTPredefinedExpr() override { + child = nullptr; + } void SetASTExpr(ASTExpr *astExpr); private: @@ -571,7 +596,7 @@ class ASTOpaqueValueExpr : public ASTExpr { explicit ASTOpaqueValueExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpOpaqueValue) { (void)allocatorIn; } - ~ASTOpaqueValueExpr() = default; + ~ASTOpaqueValueExpr() override = default; void SetASTExpr(ASTExpr *astExpr); private: @@ -584,7 +609,9 @@ class ASTNoInitExpr : public ASTExpr { explicit ASTNoInitExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpNoInitExpr) { (void)allocatorIn; } - ~ASTNoInitExpr() = default; + ~ASTNoInitExpr() override { + noInitType = nullptr; + } void SetNoInitType(MIRType *type); private: @@ -598,7 +625,9 @@ class ASTCompoundLiteralExpr : public ASTExpr { explicit ASTCompoundLiteralExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpCompoundLiteralExpr) { (void)allocatorIn; } - ~ASTCompoundLiteralExpr() = default; + ~ASTCompoundLiteralExpr() override { + child = nullptr; + } void SetCompoundLiteralType(MIRType *clType); void SetASTExpr(ASTExpr *astExpr); @@ -630,7 +659,9 @@ class ASTOffsetOfExpr : public ASTExpr { explicit ASTOffsetOfExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpOffsetOfExpr) { (void)allocatorIn; } - ~ASTOffsetOfExpr() = default; + ~ASTOffsetOfExpr() override { + structType = nullptr; + } void SetStructType(MIRType *stype); void SetFieldName(const std::string &fName); @@ -650,7 +681,9 @@ class ASTInitListExpr : public ASTExpr { explicit ASTInitListExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpInitListExpr), initExprs(allocatorIn.Adapter()), varName("", allocatorIn.GetMemPool()) {} - ~ASTInitListExpr() = default; + ~ASTInitListExpr() override { + arrayFillerExpr = nullptr; + } void SetInitExprs(ASTExpr *astExpr); void SetInitListType(MIRType *type); @@ -758,7 +791,6 @@ class ASTInitListExpr : public ASTExpr { const UniqueFEIRExpr &addrOfArrayField) const; void ProcessVectorInitList(std::variant, UniqueFEIRExpr> &base, const ASTInitListExpr &initList, std::list &stmts) const; - MIRIntrinsicID SetVectorSetLane(const MIRType &type) const; void ProcessDesignatedInitUpdater(std::variant, UniqueFEIRExpr> &base, const UniqueFEIRExpr &addrOfCharArray, ASTExpr *expr, std::list &stmts) const; @@ -789,7 +821,9 @@ class ASTBinaryConditionalOperator : public ASTExpr { kASTOpBinaryConditionalOperator) { (void)allocatorIn; } - ~ASTBinaryConditionalOperator() = default; + ~ASTBinaryConditionalOperator() override { + falseExpr = nullptr; + } void SetCondExpr(ASTExpr *expr); void SetFalseExpr(ASTExpr *expr); @@ -902,7 +936,7 @@ class ASTImplicitValueInitExpr : public ASTExpr { kASTImplicitValueInitExpr) { (void)allocatorIn; } - ~ASTImplicitValueInitExpr() = default; + ~ASTImplicitValueInitExpr() override = default; protected: MIRConst *GenerateMIRConstImpl() const override; @@ -915,7 +949,7 @@ class ASTStringLiteral : public ASTExpr { public: explicit ASTStringLiteral(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTStringLiteral), codeUnits(allocatorIn.Adapter()), str(allocatorIn.Adapter()) {} - ~ASTStringLiteral() = default; + ~ASTStringLiteral() override = default; void SetLength(size_t len) { length = len; @@ -969,7 +1003,9 @@ class ASTArraySubscriptExpr : public ASTExpr { explicit ASTArraySubscriptExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTSubscriptExpr) { (void)allocatorIn; } - ~ASTArraySubscriptExpr() = default; + ~ASTArraySubscriptExpr() override { + vlaSizeExpr = nullptr; + } void SetBaseExpr(ASTExpr *astExpr) { baseExpr = astExpr; @@ -991,7 +1027,7 @@ class ASTArraySubscriptExpr : public ASTExpr { arrayType = ty; } - const MIRType *GetArrayType() const { + MIRType *GetArrayType() const { return arrayType; } @@ -1005,6 +1041,10 @@ class ASTArraySubscriptExpr : public ASTExpr { vlaSizeExpr = expr; } + void SetIsVectorType(bool flag) { + isVectorType = flag; + } + private: ASTExpr *FindFinalBase() const; MIRConst *GenerateMIRConstImpl() const override; @@ -1018,12 +1058,15 @@ class ASTArraySubscriptExpr : public ASTExpr { const UniqueFEIRExpr &baseAddrExpr) const; bool InsertBoundaryChecking(std::list &stmts, UniqueFEIRExpr indexExpr, UniqueFEIRExpr baseAddrFEExpr) const; + MIRIntrinsicID SetVectorGetLane(const MIRType &type) const; + MIRIntrinsicID SetVectorGetQLane(const MIRType &type) const; ASTExpr *baseExpr = nullptr; MIRType *arrayType = nullptr; ASTExpr *idxExpr = nullptr; bool isVLA = false; ASTExpr *vlaSizeExpr = nullptr; + bool isVectorType = false; }; class ASTExprUnaryExprOrTypeTraitExpr : public ASTExpr { @@ -1032,7 +1075,9 @@ class ASTExprUnaryExprOrTypeTraitExpr : public ASTExpr { kASTExprUnaryExprOrTypeTraitExpr) { (void)allocatorIn; } - ~ASTExprUnaryExprOrTypeTraitExpr() = default; + ~ASTExprUnaryExprOrTypeTraitExpr() override { + idxExpr = nullptr; + } void SetIdxExpr(ASTExpr *astExpr) { idxExpr = astExpr; @@ -1051,7 +1096,9 @@ class ASTMemberExpr : public ASTExpr { public: explicit ASTMemberExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTMemberExpr), memberName("", allocatorIn.GetMemPool()) {} - ~ASTMemberExpr() = default; + ~ASTMemberExpr() override { + baseExpr = nullptr; + } void SetBaseExpr(ASTExpr *astExpr) { baseExpr = astExpr; @@ -1121,7 +1168,9 @@ class ASTDesignatedInitUpdateExpr : public ASTExpr { kASTASTDesignatedInitUpdateExpr) { (void)allocatorIn; } - ~ASTDesignatedInitUpdateExpr() = default; + ~ASTDesignatedInitUpdateExpr() override { + initListType = nullptr; + } void SetBaseExpr(ASTExpr *astExpr) { baseExpr = astExpr; @@ -1204,7 +1253,9 @@ class ASTCallExpr : public ASTExpr { explicit ASTCallExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpCall), args(allocatorIn.Adapter()), funcName("", allocatorIn.GetMemPool()), varName(FEUtils::GetSequentialName("retVar_"), allocatorIn.GetMemPool()) {} - ~ASTCallExpr() = default; + ~ASTCallExpr() override { + funcDecl = nullptr; + } void SetCalleeExpr(ASTExpr *astExpr) { calleeExpr = astExpr; } @@ -1257,11 +1308,6 @@ class ASTCallExpr : public ASTExpr { return mirType->GetPrimType() != PTY_void; } - bool IsFirstArgRet() const { - // If the return value exceeds 16 bytes, it is passed as the first parameter. - return mirType->GetPrimType() == PTY_agg && mirType->GetSize() > 16; - } - void SetFuncDecl(ASTFunc *decl) { funcDecl = decl; } @@ -1288,16 +1334,29 @@ class ASTCallExpr : public ASTExpr { private: using FuncPtrBuiltinFunc = UniqueFEIRExpr (ASTCallExpr::*)(std::list &stmts) const; static std::unordered_map InitBuiltinFuncPtrMap(); + static UniqueFEIRExpr EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); + static UniqueFEIRExpr EmitBuiltinVectorStore(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); + static UniqueFEIRExpr EmitBuiltinVectorShli(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); + static UniqueFEIRExpr EmitBuiltinVectorShri(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); + static UniqueFEIRExpr EmitBuiltinVectorShru(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); + static UniqueFEIRExpr EmitBuiltinVectorStFunc(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage); UniqueFEIRExpr CreateIntrinsicopForC(std::list &stmts, MIRIntrinsicID argIntrinsicID, bool genTempVar = true) const; UniqueFEIRExpr CreateIntrinsicCallAssignedForC(std::list &stmts, MIRIntrinsicID argIntrinsicID) const; UniqueFEIRExpr CreateBinaryExpr(std::list &stmts, Opcode op) const; UniqueFEIRExpr EmitBuiltinFunc(std::list &stmts) const; - UniqueFEIRExpr EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish) const; - UniqueFEIRExpr EmitBuiltinVectorStore(std::list &stmts, bool &isFinish) const; - UniqueFEIRExpr EmitBuiltinVectorShli(std::list &stmts, bool &isFinish) const; - UniqueFEIRExpr EmitBuiltinVectorShri(std::list &stmts, bool &isFinish) const; - UniqueFEIRExpr EmitBuiltinVectorShru(std::list &stmts, bool &isFinish) const; UniqueFEIRExpr EmitBuiltinRotate(std::list &stmts, PrimType rotType, bool isLeft) const; #define EMIT_BUILTIIN_FUNC(FUNC) EmitBuiltin##FUNC(std::list &stmts) const UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ctz); @@ -1478,7 +1537,9 @@ class ASTParenExpr : public ASTExpr { explicit ASTParenExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTParen) { (void)allocatorIn; } - ~ASTParenExpr() = default; + ~ASTParenExpr() override { + child = nullptr; + } void SetASTExpr(ASTExpr *astExpr) { child = astExpr; @@ -1512,16 +1573,21 @@ class ASTParenExpr : public ASTExpr { class ASTIntegerLiteral : public ASTExpr { public: - explicit ASTIntegerLiteral(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTIntegerLiteral) { + explicit ASTIntegerLiteral(MapleAllocator &allocatorIn) + : ASTExpr(allocatorIn, kASTIntegerLiteral), val(static_cast(0), PTY_i64) { (void)allocatorIn; } - ~ASTIntegerLiteral() = default; + ~ASTIntegerLiteral() override = default; - int64 GetVal() const { + const IntVal &GetVal() const { return val; } - void SetVal(int64 valIn) { + void SetVal(const IntVal &valIn) { + val.Assign(valIn); + } + + void SetVal(uint64 valIn) { val = valIn; } @@ -1531,29 +1597,47 @@ class ASTIntegerLiteral : public ASTExpr { private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; - int64 val = 0; + IntVal val; }; enum class FloatKind { F32, - F64 + F64, + F128 }; class ASTFloatingLiteral : public ASTExpr { + static constexpr size_t kFloatArraySize = 2; public: explicit ASTFloatingLiteral(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTFloatingLiteral) { (void)allocatorIn; } - ~ASTFloatingLiteral() = default; - double GetVal() const { - return val; + ~ASTFloatingLiteral() override = default; + + std::array GetVal() const { + return std::get<1>(val); + } + + double GetDoubleVal() const { + return std::get<0>(val); } void SetVal(double valIn) { val = valIn; } + using floatArraySizes = std::array; + void SetVal(const floatArraySizes &valIn) { + floatArraySizes buf = {valIn[1], valIn[0]}; + val = buf; + } + + void SetVal(const uint64_t valIn[2]) { + std::array buf = {valIn[1], valIn[0]}; + val = buf; + } + void SetKind(FloatKind argKind) { kind = argKind; } @@ -1565,8 +1649,8 @@ class ASTFloatingLiteral : public ASTExpr { private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; MIRConst *GenerateMIRConstImpl() const override; - double val = 0; FloatKind kind = FloatKind::F32; + std::variant> val; }; class ASTCharacterLiteral : public ASTExpr { @@ -1574,7 +1658,7 @@ class ASTCharacterLiteral : public ASTExpr { explicit ASTCharacterLiteral(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTCharacterLiteral) { (void)allocatorIn; } - ~ASTCharacterLiteral() = default; + ~ASTCharacterLiteral() override = default; int64 GetVal() const { return val; @@ -1609,7 +1693,9 @@ class ASTVAArgExpr : public ASTExpr { explicit ASTVAArgExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTVAArgExpr) { (void)allocatorIn; } - ~ASTVAArgExpr() = default; + ~ASTVAArgExpr() override { + child = nullptr; + } void SetASTExpr(ASTExpr *astExpr) { child = astExpr; @@ -1634,7 +1720,9 @@ class ASTConstantExpr : public ASTExpr { explicit ASTConstantExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kConstantExpr) { (void)allocatorIn; } - ~ASTConstantExpr() = default; + ~ASTConstantExpr()override { + child = nullptr; + } void SetASTExpr(ASTExpr *astExpr) { child = astExpr; } @@ -1656,7 +1744,9 @@ class ASTImaginaryLiteral : public ASTExpr { explicit ASTImaginaryLiteral(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTImaginaryLiteral) { (void)allocatorIn; } - ~ASTImaginaryLiteral() = default; + ~ASTImaginaryLiteral() override { + child = nullptr; + } void SetASTExpr(ASTExpr *astExpr) { child = astExpr; } @@ -1681,7 +1771,9 @@ class ASTConditionalOperator : public ASTExpr { explicit ASTConditionalOperator(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTConditionalOperator) { (void)allocatorIn; } - ~ASTConditionalOperator() = default; + ~ASTConditionalOperator() override { + falseExpr = nullptr; + } void SetCondExpr(ASTExpr *astExpr) { condExpr = astExpr; @@ -1718,7 +1810,9 @@ class ASTArrayInitLoopExpr : public ASTExpr { explicit ASTArrayInitLoopExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpArrayInitLoop) { (void)allocatorIn; } - ~ASTArrayInitLoopExpr() = default; + ~ASTArrayInitLoopExpr() override { + commonExpr = nullptr; + } void SetCommonExpr(ASTExpr *expr) { commonExpr = expr; @@ -1738,7 +1832,9 @@ class ASTArrayInitIndexExpr : public ASTExpr { explicit ASTArrayInitIndexExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpArrayInitLoop) { (void)allocatorIn; } - ~ASTArrayInitIndexExpr() = default; + ~ASTArrayInitIndexExpr() override { + primType = nullptr; + } void SetPrimType(MIRType *pType) { primType = pType; @@ -1767,7 +1863,9 @@ class ASTExprWithCleanups : public ASTExpr { explicit ASTExprWithCleanups(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpExprWithCleanups) { (void)allocatorIn; } - ~ASTExprWithCleanups() = default; + ~ASTExprWithCleanups() override { + subExpr = nullptr; + } void SetSubExpr(ASTExpr *sub) { subExpr = sub; @@ -1788,7 +1886,7 @@ class ASTMaterializeTemporaryExpr : public ASTExpr { kASTOpMaterializeTemporary) { (void)allocatorIn; } - ~ASTMaterializeTemporaryExpr() = default; + ~ASTMaterializeTemporaryExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -1800,7 +1898,7 @@ class ASTSubstNonTypeTemplateParmExpr : public ASTExpr { kASTOpSubstNonTypeTemplateParm) { (void)allocatorIn; } - ~ASTSubstNonTypeTemplateParmExpr() = default; + ~ASTSubstNonTypeTemplateParmExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -1812,7 +1910,7 @@ class ASTDependentScopeDeclRefExpr : public ASTExpr { kASTOpDependentScopeDeclRef) { (void)allocatorIn; } - ~ASTDependentScopeDeclRefExpr() = default; + ~ASTDependentScopeDeclRefExpr() override = default; private: UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; @@ -1832,7 +1930,9 @@ class ASTAtomicExpr : public ASTExpr { varName(FEUtils::GetSequentialName("ret.var.")) { (void)allocatorIn; } - ~ASTAtomicExpr() = default; + ~ASTAtomicExpr() override { + val2Type = nullptr; + } void SetRefType(MIRType *ref) { refType = ref; @@ -1946,7 +2046,9 @@ class ASTExprStmtExpr : public ASTExpr { explicit ASTExprStmtExpr(MapleAllocator &allocatorIn) : ASTExpr(allocatorIn, kASTOpStmtExpr) { (void)allocatorIn; } - ~ASTExprStmtExpr() = default; + ~ASTExprStmtExpr() override { + cpdStmt = nullptr; + } void SetCompoundStmt(ASTStmt *sub) { cpdStmt = sub; } diff --git a/src/hir2mpl/ast_input/clang/include/ast_function.h b/src/hir2mpl/ast_input/clang/include/ast_function.h index 814c3c34a212947db099de9191eacbc47466a029..2bb55e71629a8878639fb2ae9296d281e7475578 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_function.h +++ b/src/hir2mpl/ast_input/clang/include/ast_function.h @@ -22,7 +22,7 @@ class ASTFunction : public FEFunction { public: ASTFunction(const ASTFunc2FEHelper &argMethodHelper, MIRFunction &mirFunc, const std::unique_ptr &argPhaseResultTotal); - virtual ~ASTFunction() = default; + ~ASTFunction() override = default; protected: bool GenerateGeneralStmt(const std::string &phaseName) override { diff --git a/src/hir2mpl/ast_input/clang/include/ast_op.h b/src/hir2mpl/ast_input/clang/include/ast_op.h index a1f8802e8aae2d2aa71398e33509e905f8cd676c..6511ac8dafcae4e148aa3902970d46e74ea874ff 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_op.h +++ b/src/hir2mpl/ast_input/clang/include/ast_op.h @@ -136,6 +136,7 @@ enum ASTStmtOp { kASTStmtLabel, kASTStmtAddrOfLabelExpr, + kASTStmtMemberExpr, kASTStmtDo, kASTStmtFor, diff --git a/src/hir2mpl/ast_input/clang/include/ast_parser.h b/src/hir2mpl/ast_input/clang/include/ast_parser.h index 7c0fcfebdaf9636ab00fa58aa02835247b052686..67bf2e673efa234ac3ad9a911955e74e7e08404e 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_parser.h +++ b/src/hir2mpl/ast_input/clang/include/ast_parser.h @@ -91,6 +91,7 @@ class ASTParser { ASTStmt *PROCESS_STMT(DeclRefExpr); ASTStmt *PROCESS_STMT(UnaryExprOrTypeTraitExpr); ASTStmt *PROCESS_STMT(AddrLabelExpr); + ASTStmt *PROCESS_STMT(MemberExpr); bool HasDefault(const clang::Stmt &stmt); // ProcessExpr @@ -194,7 +195,7 @@ class ASTParser { void SetInitExprForASTVar(MapleAllocator &allocator, const clang::VarDecl &varDecl, const GenericAttrs &attrs, ASTVar &astVar); void SetAlignmentForASTVar(const clang::VarDecl &varDecl, ASTVar &astVar) const; -#define PROCESS_DECL(CLASS) ProcessDecl##CLASS##Decl(MapleAllocator &allocator, const clang::CLASS##Decl&) +#define PROCESS_DECL(CLASS) ProcessDecl##CLASS##Decl(MapleAllocator &allocator, const clang::CLASS##Decl &decl) ASTDecl *PROCESS_DECL(Field); ASTDecl *PROCESS_DECL(Record); ASTDecl *PROCESS_DECL(Var); @@ -263,16 +264,16 @@ class ASTParser { ASTExpr *BuildExprToComputeSizeFromVLA(MapleAllocator &allocator, const clang::QualType &qualType); ASTExpr *ProcessExprBinaryOperatorComplex(MapleAllocator &allocator, const clang::BinaryOperator &bo); bool CheckIncContinueStmtExpr(const clang::Stmt &bodyStmt) const; - void CheckVarNameValid(std::string varName); + void CheckVarNameValid(const std::string &varName) const; void ParserExprVLASizeExpr(MapleAllocator &allocator, const clang::Type &type, ASTExpr &expr); void ParserStmtVLASizeExpr(MapleAllocator &allocator, const clang::Type &type, std::list &stmts); void SetAtomExprValType(MapleAllocator &allocator, const clang::AtomicExpr &atomicExpr, ASTAtomicExpr &astExpr); void SetAtomExchangeType(MapleAllocator &allocator, const clang::AtomicExpr &atomicExpr, ASTAtomicExpr &astExpr); - clang::Expr *GetAtomValExpr(clang::Expr *valExpr); - clang::QualType GetPointeeType(const clang::Expr &expr); + clang::Expr *GetAtomValExpr(clang::Expr *valExpr) const; + clang::QualType GetPointeeType(const clang::Expr &expr) const; bool IsNeedGetPointeeType(const clang::FunctionDecl &funcDecl) const; MapleVector CvtFuncTypeAndRetType(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, - clang::QualType qualType); + const clang::QualType &qualType) const; void CheckAtomicClearArg(const clang::CallExpr &expr) const; std::string GetFuncNameFromFuncDecl(const clang::FunctionDecl &funcDecl) const; using FuncPtrBuiltinFunc = ASTExpr *(ASTParser::*)(MapleAllocator &allocator, const clang::CallExpr &expr, @@ -289,9 +290,12 @@ ASTExpr *ParseBuiltinFunc(MapleAllocator &allocator, const clang::CallExpr &expr ASTExpr *PARSE_BUILTIIN_FUNC(Isinfsign); ASTExpr *PARSE_BUILTIIN_FUNC(HugeVal); ASTExpr *PARSE_BUILTIIN_FUNC(HugeValf); + ASTExpr *PARSE_BUILTIIN_FUNC(HugeVall); ASTExpr *PARSE_BUILTIIN_FUNC(Inf); + ASTExpr *PARSE_BUILTIIN_FUNC(Infl); ASTExpr *PARSE_BUILTIIN_FUNC(Inff); ASTExpr *PARSE_BUILTIIN_FUNC(Nan); + ASTExpr *PARSE_BUILTIIN_FUNC(Nanl); ASTExpr *PARSE_BUILTIIN_FUNC(Nanf); ASTExpr *PARSE_BUILTIIN_FUNC(Signbit); ASTExpr *PARSE_BUILTIIN_FUNC(SignBitf); @@ -323,6 +327,7 @@ ASTExpr *ParseBuiltinFunc(MapleAllocator &allocator, const clang::CallExpr &expr MapleList &astFileScopeAsms; MapleList &astEnums; MapleMap vlaSizeMap; + std::unordered_map> structFileNameMap; }; } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_PARSER_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_stmt.h b/src/hir2mpl/ast_input/clang/include/ast_stmt.h index 06cca3d5361e81a5d5993be196d1f980137a244b..7bcc761b921e61222fe294a4b90ea58d0f26b214 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_stmt.h +++ b/src/hir2mpl/ast_input/clang/include/ast_stmt.h @@ -101,7 +101,7 @@ class ASTStmt { class ASTStmtDummy : public ASTStmt { public: explicit ASTStmtDummy(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDummy) {} - ~ASTStmtDummy() = default; + ~ASTStmtDummy() override = default; private: std::list Emit2FEStmtImpl() const override; @@ -111,7 +111,7 @@ class ASTCompoundStmt : public ASTStmt { public: explicit ASTCompoundStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCompound), astStmts(allocatorIn.Adapter()) {} - ~ASTCompoundStmt() = default; + ~ASTCompoundStmt() override = default; void SetASTStmt(ASTStmt *astStmt); void InsertASTStmtsAtFront(const std::list &stmts); const MapleList &GetASTStmtList() const; @@ -144,7 +144,7 @@ class ASTCompoundStmt : public ASTStmt { class ASTReturnStmt : public ASTStmt { public: explicit ASTReturnStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtReturn) {} - ~ASTReturnStmt() = default; + ~ASTReturnStmt() override = default; private: std::list Emit2FEStmtImpl() const override; @@ -315,7 +315,7 @@ class ASTContinueStmt : public ASTStmt { class ASTUnaryOperatorStmt : public ASTStmt { public: explicit ASTUnaryOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtUO) {} - ~ASTUnaryOperatorStmt() = default; + ~ASTUnaryOperatorStmt() override = default; private: std::list Emit2FEStmtImpl() const override; @@ -334,7 +334,7 @@ class ASTGotoStmt : public ASTStmt { public: explicit ASTGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtGoto), labelName("", allocatorIn.GetMemPool()) {} - ~ASTGotoStmt() = default; + ~ASTGotoStmt() override = default; std::string GetLabelName() const { return labelName.c_str() == nullptr ? "" : labelName.c_str(); @@ -352,7 +352,7 @@ class ASTGotoStmt : public ASTStmt { class ASTIndirectGotoStmt : public ASTStmt { public: explicit ASTIndirectGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIndirectGoto) {} - ~ASTIndirectGotoStmt() = default; + ~ASTIndirectGotoStmt() override = default; protected: std::list Emit2FEStmtImpl() const override; @@ -361,7 +361,9 @@ class ASTIndirectGotoStmt : public ASTStmt { class ASTSwitchStmt : public ASTStmt { public: explicit ASTSwitchStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtSwitch) {} - ~ASTSwitchStmt() = default; + ~ASTSwitchStmt() override { + condStmt = nullptr; + } void SetCondStmt(ASTStmt *cond) { condStmt = cond; @@ -412,7 +414,9 @@ class ASTSwitchStmt : public ASTStmt { class ASTCaseStmt : public ASTStmt { public: explicit ASTCaseStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCase) {} - ~ASTCaseStmt() = default; + ~ASTCaseStmt() override { + subStmt = nullptr; + } void SetLHS(ASTExpr *l) { lhs = l; @@ -466,7 +470,9 @@ class ASTCaseStmt : public ASTStmt { class ASTDefaultStmt : public ASTStmt { public: explicit ASTDefaultStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDefault) {} - ~ASTDefaultStmt() = default; + ~ASTDefaultStmt() override { + child = nullptr; + } void SetChildStmt(ASTStmt* ch) { child = ch; @@ -484,7 +490,7 @@ class ASTDefaultStmt : public ASTStmt { class ASTNullStmt : public ASTStmt { public: explicit ASTNullStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtNull) {} - ~ASTNullStmt() = default; + ~ASTNullStmt() override = default; private: std::list Emit2FEStmtImpl() const override; @@ -496,7 +502,7 @@ class ASTDeclStmt : public ASTStmt { : ASTStmt(allocatorIn, kASTStmtDecl), subDecls(allocatorIn.Adapter()), subDeclInfos(allocatorIn.Adapter()) {} - ~ASTDeclStmt() = default; + ~ASTDeclStmt() override = default; void SetSubDecl(ASTDecl *decl) { subDecls.emplace_back(decl); @@ -739,5 +745,15 @@ class ASTUOAddrOfLabelExprStmt : public ASTStmt { private: std::list Emit2FEStmtImpl() const override; }; + +class ASTMemberExprStmt : public ASTStmt { + public: + explicit ASTMemberExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtMemberExpr) {} + ~ASTMemberExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_STMT_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h index 3879607e44882f576f68a004eeb99765fb685e61..27aac4bad053b879cb8184ea9c82010f436c9827 100644 --- a/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h +++ b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h @@ -22,7 +22,7 @@ namespace maple { class ASTStruct2FEHelper : public FEInputStructHelper { public: ASTStruct2FEHelper(MapleAllocator &allocator, ASTStruct &structIn); - ~ASTStruct2FEHelper() = default; + ~ASTStruct2FEHelper() override = default; const ASTStruct &GetASTStruct() const { return astStruct; @@ -52,7 +52,7 @@ class ASTGlobalVar2FEHelper : public FEInputGlobalVarHelper { ASTGlobalVar2FEHelper(MapleAllocator &allocatorIn, const ASTVar &varIn) : FEInputGlobalVarHelper(allocatorIn), astVar(varIn) {} - ~ASTGlobalVar2FEHelper() = default; + ~ASTGlobalVar2FEHelper() override = default; protected: bool ProcessDeclImpl(MapleAllocator &allocator) override; @@ -64,7 +64,7 @@ class ASTFileScopeAsm2FEHelper : public FEInputFileScopeAsmHelper { ASTFileScopeAsm2FEHelper(MapleAllocator &allocatorIn, const ASTFileScopeAsm &astAsmIn) : FEInputFileScopeAsmHelper(allocatorIn), astAsm(astAsmIn) {} - ~ASTFileScopeAsm2FEHelper() = default; + ~ASTFileScopeAsm2FEHelper() override = default; protected: bool ProcessDeclImpl(MapleAllocator &allocator) override; @@ -76,7 +76,7 @@ class ASTEnum2FEHelper : public FEInputEnumHelper { ASTEnum2FEHelper(MapleAllocator &allocatorIn, const ASTEnumDecl &astEnumIn) : FEInputEnumHelper(allocatorIn), astEnum(astEnumIn) {} - ~ASTEnum2FEHelper() = default; + ~ASTEnum2FEHelper() override = default; protected: bool ProcessDeclImpl(MapleAllocator &allocator) override; @@ -88,7 +88,7 @@ class ASTStructField2FEHelper : public FEInputFieldHelper { ASTStructField2FEHelper(MapleAllocator &allocator, ASTField &fieldIn, MIRType &structTypeIn) : FEInputFieldHelper(allocator), field(fieldIn), structType(structTypeIn) {} - ~ASTStructField2FEHelper() = default; + ~ASTStructField2FEHelper() override = default; protected: bool ProcessDeclImpl(MapleAllocator &allocator) override; @@ -104,7 +104,7 @@ class ASTFunc2FEHelper : public FEInputMethodHelper { func(funcIn) { srcLang = kSrcLangC; } - ~ASTFunc2FEHelper() = default; + ~ASTFunc2FEHelper() override = default; ASTFunc &GetMethod() const { return func; } diff --git a/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def b/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def index 35808d2f31e5c1a31cbab2d0709ba55d16e8fe67..907b64e0676dc78a8718de5ecd6905dba8aa25e6 100644 --- a/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def +++ b/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def @@ -3,14 +3,14 @@ BUILTIN_FUNC_PARSE("__builtin_constant_p", &ASTParser::ParseBuiltinConstantP) BUILTIN_FUNC_PARSE("__builtin_isinf_sign", &ASTParser::ParseBuiltinIsinfsign) BUILTIN_FUNC_PARSE("__builtin_huge_val", &ASTParser::ParseBuiltinHugeVal) BUILTIN_FUNC_PARSE("__builtin_huge_valf", &ASTParser::ParseBuiltinHugeValf) -BUILTIN_FUNC_PARSE("__builtin_huge_vall", &ASTParser::ParseBuiltinHugeVal) +BUILTIN_FUNC_PARSE("__builtin_huge_vall", &ASTParser::ParseBuiltinHugeVall) BUILTIN_FUNC_PARSE("__builtin_inf", &ASTParser::ParseBuiltinInf) BUILTIN_FUNC_PARSE("__builtin_inff", &ASTParser::ParseBuiltinInff) -BUILTIN_FUNC_PARSE("__builtin_infl", &ASTParser::ParseBuiltinInf) +BUILTIN_FUNC_PARSE("__builtin_infl", &ASTParser::ParseBuiltinInfl) BUILTIN_FUNC_PARSE("__builtin_nan", &ASTParser::ParseBuiltinNan) BUILTIN_FUNC_PARSE("__builtin_nanf", &ASTParser::ParseBuiltinNanf) -BUILTIN_FUNC_PARSE("__builtin_nanl", &ASTParser::ParseBuiltinNan) -BUILTIN_FUNC_PARSE("__builtin_signbit", &ASTParser::ParseBuiltinSignbit) +BUILTIN_FUNC_PARSE("__builtin_nanl", &ASTParser::ParseBuiltinNanl) +BUILTIN_FUNC_PARSE("__builtin_signbit", &ASTParser::ParseBuiltinSignBitl) BUILTIN_FUNC_PARSE("__builtin_signbitf", &ASTParser::ParseBuiltinSignBitf) BUILTIN_FUNC_PARSE("__builtin_signbitl", &ASTParser::ParseBuiltinSignBitl) BUILTIN_FUNC_PARSE("__builtin_trap", &ASTParser::ParseBuiltinTrap) @@ -18,4 +18,5 @@ BUILTIN_FUNC_PARSE("__builtin_copysignf", &ASTParser::ParseBuiltinCopysignf) BUILTIN_FUNC_PARSE("__builtin_copysign", &ASTParser::ParseBuiltinCopysign) BUILTIN_FUNC_PARSE("__builtin_copysignl", &ASTParser::ParseBuiltinCopysignl) BUILTIN_FUNC_PARSE("__atomic_clear", &ASTParser::ParseBuiltinAtomicClear) -BUILTIN_FUNC_PARSE("__atomic_test_and_set", &ASTParser::ParseBuiltinAtomicTestAndSet) \ No newline at end of file +BUILTIN_FUNC_PARSE("__atomic_test_and_set", &ASTParser::ParseBuiltinAtomicTestAndSet) +BUILTIN_FUNC_PARSE("__builtin_abort", &ASTParser::ParseBuiltinTrap) diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp index 9a777a01dfa681d2a1066b16e869ca62f8594e51..a228e053a7728aafa523426028e7da29af46155a 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp @@ -13,6 +13,7 @@ * See the Mulan PSL v2 for more details. */ #include "ast_interface.h" +#include #include "mpl_logging.h" #include "ast_util.h" #include "fe_utils.h" @@ -323,6 +324,40 @@ void LibAstFile::CollectFuncReturnVarAttrs(const clang::CallExpr &expr, GenericA } } +void LibAstFile::SetAttrVisibility(const clang::DeclaratorDecl &decl, GenericAttrs &genAttrs) const { + if (decl.getLinkageAndVisibility().isVisibilityExplicit()) { + auto visibilityInfo = decl.getLinkageAndVisibility().getVisibility(); + switch (visibilityInfo) { + case clang::Visibility::HiddenVisibility: + genAttrs.SetAttr(GENATTR_visibility_hidden); + break; + case clang::Visibility::ProtectedVisibility: + genAttrs.SetAttr(GENATTR_visibility_protected); + break; + default: + break; + } + } +} + +void LibAstFile::SetAttrTLSModel(const clang::VarDecl &decl, GenericAttrs &genAttrs) const { + if (decl.hasAttr()) { + const clang::TLSModelAttr *tlsAttr = decl.getAttr(); + const std::string tlsModelName = tlsAttr->getModel().str(); + if (tlsModelName == "local-exec") { + genAttrs.SetAttr(GENATTR_local_exec); + } else if (tlsModelName == "local-dynamic") { + genAttrs.SetAttr(GENATTR_local_dynamic); + } else if (tlsModelName == "initial-exec") { + genAttrs.SetAttr(GENATTR_initial_exec); + } else if (tlsModelName == "global-dynamic") { + genAttrs.SetAttr(GENATTR_global_dynamic); + } else { + CHECK_FATAL(false, "TLSMODE: %s is not support", tlsModelName.c_str()); + } + } +} + void LibAstFile::CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { CollectAttrs(decl, genAttrs, access); if (decl.isVirtualAsWritten()) { @@ -419,18 +454,7 @@ void LibAstFile::CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs genAttrs.ResetAttr(GENATTR_extern); } } - if (decl.getLinkageAndVisibility().isVisibilityExplicit()) { - auto visibilityInfo = decl.getLinkageAndVisibility().getVisibility(); - switch (visibilityInfo) { - case clang::Visibility::HiddenVisibility: - genAttrs.SetAttr(GENATTR_visibility_hidden); - break; - case clang::Visibility::ProtectedVisibility: - genAttrs.SetAttr(GENATTR_visibility_protected); - break; - default: break; - } - } + SetAttrVisibility(decl, genAttrs); CheckUnsupportedFuncAttrs(decl); } @@ -456,18 +480,8 @@ void LibAstFile::CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl) cons void LibAstFile::CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { CollectAttrs(decl, genAttrs, access); - if (decl.getLinkageAndVisibility().isVisibilityExplicit()) { - auto visibilityInfo = decl.getLinkageAndVisibility().getVisibility(); - switch (visibilityInfo) { - case clang::Visibility::HiddenVisibility: - genAttrs.SetAttr(GENATTR_visibility_hidden); - break; - case clang::Visibility::ProtectedVisibility: - genAttrs.SetAttr(GENATTR_visibility_protected); - break; - default: break; - } - } + SetAttrVisibility(decl, genAttrs); + SetAttrTLSModel(decl, genAttrs); // handle __thread if (decl.getTLSKind() == clang::VarDecl::TLS_Static) { genAttrs.SetAttr(GENATTR_tls_static); @@ -582,7 +596,44 @@ void LibAstFile::EmitQualifierName(const clang::QualType qualType, std::stringst } } +void LibAstFile::BuildFieldName(std::stringstream &recordLayoutStr, const clang::FieldDecl &fieldDecl) { + std::string canonicalTypeName = fieldDecl.getType().getCanonicalType().getAsString(); + std::string fieldName = GetMangledName(fieldDecl); + if (fieldName.empty() && fieldDecl.isAnonymousStructOrUnion()) { + fieldName = GetOrCreateMappedUnnamedName(*(fieldDecl.getType()->getAsRecordDecl())); + } + FEUtils::EraseFileNameforClangTypeStr(canonicalTypeName); + recordLayoutStr << fieldName << "-" << canonicalTypeName; + const auto *fieldRDcl = fieldDecl.getType()->getAsRecordDecl(); + if (fieldRDcl != nullptr) { + recordLayoutStr << ":{"; + for (const auto field : fieldRDcl->fields()) { + BuildFieldName(recordLayoutStr, *field); + } + recordLayoutStr << "}"; + } + recordLayoutStr << ","; +} + const std::string LibAstFile::GetOrCreateMappedUnnamedName(const clang::Decl &decl) { + if (FEOptions::GetInstance().GetWPAA()) { + std::stringstream recordLayoutStr; + const clang::RecordDecl *myRecordDecl = nullptr; + if (const auto *fieldDecl = llvm::dyn_cast(&decl)) { + myRecordDecl = fieldDecl->getType()->getAsRecordDecl(); + } else if (const auto *recordDecl = llvm::dyn_cast(&decl)) { + myRecordDecl = recordDecl; + } + if (myRecordDecl) { + recordLayoutStr << myRecordDecl->getKindName().str() << ":{"; + for (const auto field : myRecordDecl->fields()) { + BuildFieldName(recordLayoutStr, *field); + } + recordLayoutStr << "}"; + } + return "unnamed." + FEUtils::GetHashStr(recordLayoutStr.str()); + } + uint32 uid; if (FEOptions::GetInstance().GetFuncInlineSize() != 0 && !decl.getLocation().isMacroID()) { // use loc as key for wpaa mode @@ -660,30 +711,39 @@ void LibAstFile::EmitTypeName(const clang::RecordType &recordType, std::stringst } else { ss << GetOrCreateMappedUnnamedName(*recordDecl); } - if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { - std::string recordStr = recordDecl->getDefinition() == nullptr ? "" : GetRecordLayoutString(*recordDecl); - std::string filename = astContext->getSourceManager().getFilename(recordDecl->getLocation()).str(); - ss << FEUtils::GetFileNameHashStr(filename + recordStr); + if (FEOptions::GetInstance().GetFuncInlineSize() != 0 || FEOptions::GetInstance().GetWPAA()) { + std::string layout = recordDecl->getDefinition() == nullptr ? "" : GetRecordLayoutString(*recordDecl); + ss << FEUtils::GetFileNameHashStr(layout); } CHECK_FATAL(ss.rdbuf()->in_avail() != 0, "stringstream is empty"); } +void LibAstFile::BuildFieldLayoutString(std::stringstream &recordLayoutStr, const clang::FieldDecl &fieldDecl) { + std::string canonicalTypeName = fieldDecl.getType().getCanonicalType().getAsString(); + std::string fieldName = GetMangledName(fieldDecl); + if (fieldName.empty() && fieldDecl.isAnonymousStructOrUnion()) { + fieldName = GetOrCreateMappedUnnamedName(fieldDecl); + } + + FEUtils::EraseFileNameforClangTypeStr(canonicalTypeName); + recordLayoutStr << fieldName << "-" << canonicalTypeName; + const auto *fieldCl = fieldDecl.getType()->getAsRecordDecl(); + if (fieldCl != nullptr) { + recordLayoutStr << ":{"; + for (const auto field : fieldCl->fields()) { + BuildFieldLayoutString(recordLayoutStr, *field); + } + recordLayoutStr << "}"; + } + recordLayoutStr << ","; +} std::string LibAstFile::GetRecordLayoutString(const clang::RecordDecl &recordDecl) { std::stringstream recordLayoutStr; - const clang::ASTRecordLayout &recordLayout = GetContext()->getASTRecordLayout(&recordDecl); - unsigned int fieldCount = recordLayout.getFieldCount(); - uint64_t recordSize = static_cast(recordLayout.getSize().getQuantity()); - recordLayoutStr << std::to_string(fieldCount) << std::to_string(recordSize); - clang::RecordDecl::field_iterator it = recordDecl.field_begin(); - for (unsigned i = 0, e = recordLayout.getFieldCount(); i != e; ++i, ++it) { - const clang::FieldDecl *fieldDecl = *it; - recordLayoutStr << std::to_string(recordLayout.getFieldOffset(i)); - std::string fieldName = GetMangledName(*fieldDecl); - if (fieldName.empty()) { - fieldName = GetOrCreateMappedUnnamedName(*fieldDecl); - } - recordLayoutStr << fieldName; + recordLayoutStr << recordDecl.getKindName().str() << ":{"; + for (auto field : recordDecl.fields()) { + BuildFieldLayoutString(recordLayoutStr, *field); } + recordLayoutStr << "}"; return recordLayoutStr.str(); } @@ -699,4 +759,96 @@ std::string LibAstFile::GetTypedefNameFromUnnamedStruct(const clang::RecordDecl } return std::string(); } -} // namespace maple +// with macro enpanded, preproceeded code +std::string LibAstFile::GetSourceText(const clang::Stmt &stmt) { + std::string s; + llvm::raw_string_ostream sos(s); + clang::PrintingPolicy pp(astContext->getLangOpts()); + stmt.printPretty(sos, nullptr, pp); + sos.flush(); + return s; +} + +// non preproceeded code +std::string LibAstFile::GetSourceTextRaw(const clang::SourceRange range, const clang::SourceManager &sm) const { + clang::LangOptions lo; + auto startLoc = sm.getSpellingLoc(range.getBegin()); + auto lastTokenLoc = sm.getSpellingLoc(range.getEnd()); + auto endLoc = clang::Lexer::getLocForEndOfToken(lastTokenLoc, 0, sm, lo); + auto printableRange = clang::SourceRange{startLoc, endLoc}; + return clang::Lexer::getSourceText(clang::CharSourceRange::getCharRange(printableRange), sm, clang::LangOptions()) + .str(); +} + +// when function is considered unique, return true; +bool LibAstFile::CheckAndBuildStaticFunctionLayout(const clang::FunctionDecl &funcDecl, + std::stringstream &funcNameStream, + std::unordered_set &visitedCalls) { + if (!funcDecl.isStatic()) { + return false; + } + if (!funcDecl.getBody()) { + return false; + } + std::string funcSignature = BuildStaticFunctionSignature(funcDecl); + funcNameStream << funcSignature << "{"; + std::string funcSourceCode = ""; + funcSourceCode = GetSourceText(*(funcDecl.getBody())); + funcNameStream << funcSourceCode << "};"; + CallCollector collector = CallCollector(astContext); + + collector.TraverseStmt(funcDecl.getBody()); + if (collector.IsNeedToBeUniq()) { + return true; + } + auto callExprs = collector.GetCallExprs(); + funcNameStream << "->${"; + for (auto const &pair : callExprs) { + if (visitedCalls.count(pair.first) > 0) { + // recursive call + continue; + } + (void)visitedCalls.insert(pair.first); + if (const clang::FunctionDecl *currentFuncDecl = pair.second->getDirectCallee()) { + if (CheckAndBuildStaticFunctionLayout(*currentFuncDecl, funcNameStream, visitedCalls)) { + return true; + } + } + } + funcNameStream << "}$, "; + return false; +} + +void LibAstFile::BuildStaticFunctionLayout(const clang::FunctionDecl &funcDecl, std::string &funcName) { + std::stringstream funcNameStream; + funcNameStream << funcName; + std::unordered_set visitedCalls; + if (CheckAndBuildStaticFunctionLayout(funcDecl, funcNameStream, visitedCalls)) { + funcName = funcName + ".static_involved" + this->GetAstFileNameHashStr(); + } else { + funcName = funcName + FEUtils::GetFileNameHashStr(funcNameStream.str()); + } +} + +std::string LibAstFile::BuildStaticFunctionSignature(const clang::FunctionDecl &funcDecl) { + std::string signature; + signature += funcDecl.getReturnType().getCanonicalType().getAsString(); + signature += " "; + signature += funcDecl.getNameAsString(); + signature += "("; + + // If the function definition is a noprototype declaration, + // which is allowed in c99, the parameter information will not be recorded, as well as func(void); + if (const auto *funcType = funcDecl.getType()->getAs()) { + for (unsigned int i = 0; i < funcType->getNumParams(); i++) { + const auto paramType = funcType->getParamType(i).getCanonicalType(); + signature += paramType.getAsString() + ","; + } + if (signature.back() == ',') { + signature.pop_back(); + } + } + signature += ");"; + return signature; +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.h b/src/hir2mpl/ast_input/clang/lib/ast_interface.h index 4f2593df1977c370f019d9c1b096435994004950..070ac333570dde6498855eb86acda6df6c04f81e 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_interface.h +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.h @@ -53,7 +53,15 @@ class LibAstFile { void EmitTypeName(const clang::RecordType &recordType, std::stringstream &ss); void EmitQualifierName(const clang::QualType qualType, std::stringstream &ss) const; std::string GetTypedefNameFromUnnamedStruct(const clang::RecordDecl &recoDecl) const; + void BuildFieldName(std::stringstream &recordLayoutStr, const clang::FieldDecl &fieldDecl); + std::string GetSourceText(const clang::Stmt &stmt); + std::string GetSourceTextRaw(const clang::SourceRange range, const clang::SourceManager &sm) const; + std::string BuildStaticFunctionSignature(const clang::FunctionDecl &funcDecl); + void BuildStaticFunctionLayout(const clang::FunctionDecl &funcDecl, std::string &funcName); + bool CheckAndBuildStaticFunctionLayout(const clang::FunctionDecl &funcDecl, std::stringstream &funcNameStream, + std::unordered_set &visitedCalls); std::string GetRecordLayoutString(const clang::RecordDecl &recordDecl); + void BuildFieldLayoutString(std::stringstream &recordLayoutStr, const clang::FieldDecl &fieldDecl); void CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, TypeAttrs &elemAttr, std::vector &operands, bool isSourceType); @@ -73,6 +81,8 @@ class LibAstFile { void CollectAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; void CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; void CollectFuncReturnVarAttrs(const clang::CallExpr &expr, GenericAttrs &genAttrs) const; + void SetAttrVisibility(const clang::DeclaratorDecl &decl, GenericAttrs &genAttrs) const; + void SetAttrTLSModel(const clang::VarDecl &decl, GenericAttrs &genAttrs) const; void CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl) const; void CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; void CheckUnsupportedVarAttrs(const clang::VarDecl &decl) const; @@ -92,8 +102,9 @@ class LibAstFile { MIRType *CvtFieldType(const clang::NamedDecl &decl); MIRType *CvtComplexType(const clang::QualType srcType) const; MIRType *CvtVectorType(const clang::QualType srcType); - MIRType *CvtVectorSizeType(MIRType *elemType, MIRType *destType, uint32_t arrLen, uint32_t vecLen, - uint32 alignNum); + MIRType *CvtVectorSizeType(const MIRType &elemType, MIRType *destType, uint32_t arrLen, uint32_t vecLen, + uint32 alignNum) const; + bool CheckSourceTypeNameNotNull(const clang::QualType &currQualType, MIRType *&elemType, bool isSourceType); MIRType *CvtTypedef(const clang::QualType &qualType); MIRType *CvtTypedefDecl(const clang::TypedefNameDecl &typedefDecl); bool TypeHasMayAlias(const clang::QualType srcType) const; @@ -116,13 +127,13 @@ class LibAstFile { uint32 RetrieveAggTypeAlign(const clang::Type *ty) const; private: - MapleSet recordDeclSet; + MapleSet recordDeclSet; MapleMap unnamedSymbolMap; MapleMap compoundLiteralExprInitSymbolMap; MIRModule *module = nullptr; - MapleList &recordDecles; - MapleList &enumDecles; + MapleList &recordDecles; + MapleList &enumDecles; clang::ASTContext *astContext = nullptr; clang::TranslationUnitDecl *astUnitDecl = nullptr; @@ -132,5 +143,37 @@ class LibAstFile { MapleString astFileName; static std::map unnamedLocMap; }; -} // namespace maple -#endif // HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H + +class CallCollector : public clang::RecursiveASTVisitor { + public: + explicit CallCollector(clang::ASTContext *myAstContext) : astContext(myAstContext) {} + + bool VisitCallExpr(clang::CallExpr *expr) { + callExprs.insert(std::pair(expr->getID(*astContext), expr)); + return true; + } + + bool VisitDeclRefExpr(clang::DeclRefExpr *expr) { + if (auto *varDecl = llvm::dyn_cast(expr->getDecl())) { + if (!(varDecl->getType().isConstQualified()) && (varDecl->getStorageClass() == clang::SC_Static)) { + needToBeUnique = true; + return false; + } + } + return true; + } + std::map GetCallExprs() { + return callExprs; + } + + bool IsNeedToBeUniq() { + return needToBeUnique; + } + + private: + clang::ASTContext *astContext; + std::map callExprs; + bool needToBeUnique = false; +}; +} // namespace maple +#endif // HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H diff --git a/src/hir2mpl/ast_input/clang/lib/ast_type.cpp b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp index b83bfefc0fc1239df57231f16662ad9778f6ea27..de9e8806ad52a8d0acc2182a409072026ba2d73e 100644 --- a/src/hir2mpl/ast_input/clang/lib/ast_type.cpp +++ b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp @@ -26,12 +26,13 @@ std::map LibAstFile::unnamedLocMap; static constexpr size_t kNumber16PowOf2 = 4; static constexpr size_t k8BytesPowOf2Number = 3; static constexpr size_t kSignedTypes = 2; +static constexpr size_t kMaxPrimTypeSize = 128; static PrimType vectorTypeMap[kNumber16PowOf2 + 1][k8BytesPowOf2Number + 1][kSignedTypes] = { - {{PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_v1i64, PTY_v1u64}}, - {{PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_v2i32, PTY_v2u32}, {PTY_v2i64, PTY_v2u64}}, - {{PTY_begin, PTY_begin}, {PTY_v4i16, PTY_v4u16}, {PTY_v4i32, PTY_v4u32}, {PTY_begin, PTY_begin}}, - {{PTY_v8i8, PTY_v8u8}, {PTY_v8i16, PTY_v8u16}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}}, - {{PTY_v16i8, PTY_v16u8}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}}, + {{PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_v1i64, PTY_v1u64}}, + {{PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_v2i32, PTY_v2u32}, {PTY_v2i64, PTY_v2u64}}, + {{PTY_begin, PTY_begin}, {PTY_v4i16, PTY_v4u16}, {PTY_v4i32, PTY_v4u32}, {PTY_begin, PTY_begin}}, + {{PTY_v8i8, PTY_v8u8}, {PTY_v8i16, PTY_v8u16}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}}, + {{PTY_v16i8, PTY_v16u8}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}, {PTY_begin, PTY_begin}}, }; MIRType *LibAstFile::CvtPrimType(const clang::QualType qualType, bool isSourceType) const { @@ -118,10 +119,10 @@ PrimType LibAstFile::CvtPrimType(const clang::BuiltinType::Kind kind, bool isSou case clang::BuiltinType::Float: return PTY_f32; case clang::BuiltinType::Double: - case clang::BuiltinType::LongDouble: return PTY_f64; case clang::BuiltinType::Float128: - return PTY_f64; + case clang::BuiltinType::LongDouble: + return PTY_f128; case clang::BuiltinType::NullPtr: // default 64-bit, need to update return PTY_a64; case clang::BuiltinType::Half: // PTY_f16, NOTYETHANDLED @@ -303,9 +304,9 @@ MIRType *LibAstFile::CvtRecordType(const clang::QualType qualType) { std::string name(ss.str()); if (!recordDecl->isDefinedOutsideFunctionOrMethod()) { Loc l = GetLOC(recordDecl->getLocation()); - std::stringstream ss; - ss << name << "_" << l.line << "_" << l.column; - name = ss.str(); + std::stringstream ssLocal; + ssLocal << name << "_" << l.line << "_" << l.column; + name = ssLocal.str(); } type = FEManager::GetTypeManager().GetOrCreateStructType(name); type->SetMIRTypeKind(srcType->isUnionType() ? kTypeUnion : kTypeStruct); @@ -383,26 +384,6 @@ MIRType *LibAstFile::CvtFunctionType(const clang::QualType srcType, bool isSourc MIRType *retType = CvtType(funcType->getReturnType(), isSourceType); std::vector argsVec; std::vector attrsVec; - bool isFirstArgRet = false; - const clang::QualType &retQualType = funcType->getReturnType().getCanonicalType(); - // setup first_arg_retrun if ret struct size > 16 - if (!isSourceType && retQualType->isRecordType()) { - const auto *recordType = llvm::cast(retQualType); - clang::RecordDecl *recordDecl = recordType->getDecl(); - const clang::ASTRecordLayout &layout = astContext->getASTRecordLayout(recordDecl->getDefinition()); - const unsigned twoByteSize = 16; - if (layout.getSize().getQuantity() > twoByteSize) { - MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*retType); - GenericAttrs genAttrs; - if (IsOneElementVector(retQualType)) { - genAttrs.SetAttr(GENATTR_oneelem_simd); - } - attrsVec.push_back(genAttrs.ConvertToTypeAttrs()); - argsVec.push_back(ptrType->GetTypeIndex()); - retType = GlobalTables::GetTypeTable().GetVoid(); - isFirstArgRet = true; - } - } if (funcType->isFunctionProtoType()) { const auto *funcProtoType = funcType->castAs(); using ItType = clang::FunctionProtoType::param_type_iterator; @@ -428,13 +409,9 @@ MIRType *LibAstFile::CvtFunctionType(const clang::QualType srcType, bool isSourc } MIRType *mirFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( retType->GetTypeIndex(), argsVec, attrsVec); - if (isFirstArgRet) { - static_cast(mirFuncType)->SetFirstArgReturn(); - } return GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirFuncType); } - void LibAstFile::CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, TypeAttrs &elemAttr, std::vector &operands, bool isSourceType) { @@ -462,15 +439,23 @@ void LibAstFile::CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualT } } -void LibAstFile::CollectBaseEltTypeAndDimFromVariaArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, - TypeAttrs &elemAttr, uint8_t &dim, bool isSourceType) { +bool LibAstFile::CheckSourceTypeNameNotNull(const clang::QualType &currQualType, MIRType *&elemType, + bool isSourceType) { if (isSourceType) { MIRType *nameType = CvtTypedef(currQualType); if (nameType != nullptr) { elemType = nameType; - return; + return true; } } + return false; +} + +void LibAstFile::CollectBaseEltTypeAndDimFromVariaArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, uint8_t &dim, bool isSourceType) { + if (CheckSourceTypeNameNotNull(currQualType, elemType, isSourceType)) { + return; + } const clang::Type *ptrType = currQualType.getTypePtrOrNull(); ASSERT(ptrType != nullptr, "Null type", currQualType.getAsString().c_str()); if (ptrType->isArrayType()) { @@ -485,12 +470,8 @@ void LibAstFile::CollectBaseEltTypeAndDimFromVariaArrayDecl(const clang::QualTyp void LibAstFile::CollectBaseEltTypeAndDimFromDependentSizedArrayDecl( const clang::QualType currQualType, MIRType *&elemType, TypeAttrs &elemAttr, std::vector &operands, bool isSourceType) { - if (isSourceType) { - MIRType *nameType = CvtTypedef(currQualType); - if (nameType != nullptr) { - elemType = nameType; - return; - } + if (CheckSourceTypeNameNotNull(currQualType, elemType, isSourceType)) { + return; } const clang::Type *ptrType = currQualType.getTypePtrOrNull(); ASSERT(ptrType != nullptr, "ERROR:null pointer!"); @@ -521,15 +502,15 @@ void LibAstFile::CollectBaseEltTypeFromArrayDecl(const clang::QualType &currQual } } -MIRType *LibAstFile::CvtVectorSizeType(MIRType *elemType, MIRType *destType, uint32_t arrLen, uint32_t vecLen, - uint32 alignNum) { +MIRType *LibAstFile::CvtVectorSizeType(const MIRType &elemType, MIRType *destType, uint32_t arrLen, uint32_t vecLen, + uint32 alignNum) const{ MIRStructType *structType = nullptr; MIRType *arrayType = nullptr; TypeAttrs elemAttrs; FieldAttrs attrs; FieldPair mirFieldPair; std::string fieldName = "val"; - std::string typeName = elemType->GetMplTypeName(); + std::string typeName = elemType.GetMplTypeName(); std::vector mirFieldVector; std::string name = typeName + "x" + std::to_string(vecLen) + "x" + std::to_string(arrLen) + "_t"; @@ -543,7 +524,7 @@ MIRType *LibAstFile::CvtVectorSizeType(MIRType *elemType, MIRType *destType, uin mirFieldPair.first = idx; mirFieldPair.second.first = arrayType->GetTypeIndex(); mirFieldPair.second.second = attrs; - mirFieldVector.emplace(mirFieldVector.begin(), mirFieldPair); + (void)mirFieldVector.emplace(mirFieldVector.begin(), mirFieldPair); structType->SetFields(mirFieldVector); return structType; } @@ -554,22 +535,22 @@ MIRType *LibAstFile::CvtVectorType(const clang::QualType srcType) { unsigned numElems = vectorType->getNumElements(); MIRType *destType = nullptr; auto elemTypeSize = elemType->GetSize(); - uint32_t vecSize = numElems * elemTypeSize * 8; - CHECK_FATAL(!(vecSize & (vecSize - 1)), "VectorSize is not Multiples of 2"); - if (vecSize > 128) { - uint32_t arrayLen = vecSize / 128; + uint32_t vecSize = static_cast(numElems * elemTypeSize * 8); + CHECK_FATAL((vecSize & (vecSize - 1)) == 0, "VectorSize is not Multiples of 2"); + if (vecSize > kMaxPrimTypeSize) { + uint32_t arrayLen = vecSize / kMaxPrimTypeSize; numElems = numElems / arrayLen; } - auto powOf2NumElements = static_cast(__builtin_ctz(numElems)); - auto powOf2ElementByteSize = static_cast(__builtin_ctzl(elemType->GetSize())); + auto powOf2NumElements = static_cast(static_cast(__builtin_ctz(numElems))); + auto powOf2ElementByteSize = static_cast(static_cast(__builtin_ctzl(elemType->GetSize()))); auto isSigned = IsPrimitiveUnsigned(elemType->GetPrimType()) ? 1ULL : 0ULL; auto primType = vectorTypeMap[powOf2NumElements][powOf2ElementByteSize][isSigned]; CHECK_FATAL(primType != PTY_begin, "unexpected vector type"); destType = GlobalTables::GetTypeTable().GetPrimType(primType); - if (vecSize > 128) { - uint32_t arrayLen = vecSize / 128; + if (vecSize > kMaxPrimTypeSize) { + uint32_t arrayLen = vecSize / kMaxPrimTypeSize; uint32_t vecLen = vecSize / (arrayLen * elemType->GetSize() * 8); - return CvtVectorSizeType(elemType, destType, arrayLen, vecLen, numElems * elemTypeSize); + return CvtVectorSizeType(*elemType, destType, arrayLen, vecLen, static_cast(numElems * elemTypeSize)); } return destType; } diff --git a/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h b/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h index bb090a902fd5846310baf0d9d4ca297bc6a620a7..304744346e331ab76c2d837d7885001d7772e9a1 100644 --- a/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h +++ b/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h @@ -2214,16 +2214,16 @@ uint32x4_t __builtin_mpl_vector_mulq_laneq_v4u32(uint32x4_t a, uint32x4_t b, con #define vmulq_laneq_u32(a, b, c) __builtin_mpl_vector_mulq_laneq_v4u32(a, b, c) int32x4_t __builtin_mpl_vector_mull_n_v4i32(int16x4_t a, int16_t b); -#define vmull_n_s16(a, b) (vmull_s16(a, ((int16x4_t){b,b,b,b}))) +#define vmull_n_s16(a, b) (vmull_s16(a, ((int16x4_t){b, b, b, b}))) int64x2_t __builtin_mpl_vector_mull_n_v2i64(int32x2_t a, int32_t b); -#define vmull_n_s32(a, b) (vmull_s32(a, ((int32x2_t){b,b}))) +#define vmull_n_s32(a, b) (vmull_s32(a, ((int32x2_t){b, b}))) uint32x4_t __builtin_mpl_vector_mull_n_v4u32(uint16x4_t a, uint16_t b); -#define vmull_n_u16(a, b) (vmull_u16(a, ((uint16x4_t){b,b,b,b}))) +#define vmull_n_u16(a, b) (vmull_u16(a, ((uint16x4_t){b, b, b, b}))) uint64x2_t __builtin_mpl_vector_mull_n_v2u64(uint32x2_t a, uint32_t b); -#define vmull_n_u32(a, b) (vmull_u32(a, ((uint32x2_t){b,b}))) +#define vmull_n_u32(a, b) (vmull_u32(a, ((uint32x2_t){b, b}))) int32x4_t __builtin_mpl_vector_mull_high_n_v4i32(int16x8_t a, int16_t b); #define vmull_high_n_s16(a, b) vmull_n_s16((vget_high_s16(a)), b) diff --git a/src/hir2mpl/ast_input/clang/src/ast_expr.cpp b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp index 51a8bc194711c7ffcdd7765235d21bac21a8ac24..1d405739720a4d4121bbe0de3950919b2fc3810b 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_expr.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp @@ -174,6 +174,10 @@ MIRConst *ASTValue::Translate2MIRConst() const { return FEManager::GetModule().GetMemPool()->New( val.f64, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); } + case PTY_f128: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(val.f128), *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } case PTY_a64: { return FEManager::GetModule().GetMemPool()->New( val.strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); @@ -297,7 +301,7 @@ UniqueFEIRExpr ASTDeclRefExpr::Emit2FEExprImpl(std::list &stmts) UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(refedDecl->GenerateUniqueVarName(), *mirType, refedDecl->IsGlobal(), false); feirVar->SetAttrs(attrs); - if (mirType->GetKind() == kTypeArray) { + if (mirType->GetKind() == kTypeArray || (isVectorType && isAddrOfType)) { feirRefExpr = FEIRBuilder::CreateExprAddrofVar(std::move(feirVar)); } else { feirRefExpr = FEIRBuilder::CreateExprDRead(std::move(feirVar)); @@ -336,7 +340,7 @@ void ASTCallExpr::AddArgsExpr(const std::unique_ptr &callStmt, s UniqueFEIRExpr expr = args[i]->Emit2FEExpr(stmts); callStmt->AddExprArgReverse(std::move(expr)); } - if (IsFirstArgRet()) { + if (IsReturnInMemory(*mirType)) { UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *mirType, false, false); UniqueFEIRExpr expr = FEIRBuilder::CreateExprAddrofVar(var->Clone()); callStmt->AddExprArgReverse(std::move(expr)); @@ -363,7 +367,7 @@ UniqueFEIRExpr ASTCallExpr::AddRetExpr(const std::unique_ptr &ca UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *mirType, false, false); var->SetAttrs(GetReturnVarAttrs()); UniqueFEIRVar dreadVar = var->Clone(); - if (!IsFirstArgRet()) { + if (!IsReturnInMemory(*mirType)) { callStmt->SetVar(var->Clone()); } return FEIRBuilder::CreateExprDRead(dreadVar->Clone()); @@ -387,7 +391,7 @@ std::unique_ptr ASTCallExpr::GenCallStmt() const { ASSERT_NOT_NULL(info); info->SetFuncAttrs(funcAttrs); FEIRTypeNative *retTypeInfo = nullptr; - if (IsFirstArgRet()) { + if (IsReturnInMemory(*mirType)) { retTypeInfo = mp->New(*GlobalTables::GetTypeTable().GetPrimType(PTY_void)); } else { retTypeInfo = mp->New(*mirType); @@ -438,6 +442,8 @@ MIRConst *ASTCastExpr::GenerateMIRConstImpl() const { return GenerateMIRDoubleConst(); } else if (dst->GetPrimType() == PTY_f32) { return GenerateMIRFloatConst(); + } else if (dst->GetPrimType() == PTY_f128) { + return GenerateMIRFloat128Const(); } else { return GenerateMIRIntConst(); } @@ -467,6 +473,48 @@ MIRConst *ASTCastExpr::GenerateMIRDoubleConst() const { static_cast(static_cast(childConst)->GetValue()), *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); } + case kConstFloat128Const: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetDoubleValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + default: { + CHECK_FATAL(false, "Unsupported pty type: %d", GetConstantValue()->pty); + return nullptr; + } + } +} + +MIRConst *ASTCastExpr::GenerateMIRFloat128Const() const { + MIRConst *childConst = child->GenerateMIRConst(); + switch (childConst->GetKind()) { + case kConstFloatConst: { + std::pair floatInt = static_cast(childConst)->GetFloat128Value(); + uint64 arr[2] = {floatInt.first, floatInt.second}; + return FEManager::GetModule().GetMemPool()->New(arr, + *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } + case kConstInt: { + std::pair floatInt = MIRDoubleConst( + static_cast(childConst)->GetValue().GetExtValue(), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)).GetFloat128Value(); + + uint64 arr[2] = {floatInt.first, floatInt.second}; + return FEManager::GetModule().GetMemPool()->New(arr, + *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } + case kConstDoubleConst: { + std::pair floatInt = static_cast(childConst)->GetFloat128Value(); + uint64 arr[2] = {floatInt.first, floatInt.second}; + return FEManager::GetModule().GetMemPool()->New( + static_cast(arr), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } + case kConstFloat128Const: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetIntValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } default: { CHECK_FATAL(false, "Unsupported pty type: %d", GetConstantValue()->pty); return nullptr; @@ -717,7 +765,7 @@ MIRIntrinsicID intrinsic; CHECK_FATAL(false, "Unhandled vector type in CreateExprVdupAnyVector"); } UniqueFEIRType feType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPrimType(primtype)); - std::vector> argOpnds; + std::vector argOpnds; argOpnds.push_back(std::move(subExpr)); return std::make_unique(std::move(feType), intrinsic, argOpnds); } @@ -796,7 +844,8 @@ UniqueFEIRExpr ASTUnaryOperatorExpr::ASTUOSideEffectExpr(Opcode op, std::list(*GlobalTables::GetTypeTable(). GetPrimType(PTY_i64)) : std::make_unique(*GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); UniqueFEIRExpr subExpr = (subPrimType == PTY_ptr) ? std::make_unique(pointeeLen, PTY_i32) : - FEIRBuilder::CreateExprConstAnyScalar(subPrimType, 1); + ((subPrimType != PTY_f128) ? FEIRBuilder::CreateExprConstAnyScalar(subPrimType, 1) : + FEIRBuilder::CreateExprConstAnyScalar(subPrimType, 0x3FFFLL << 48)); UniqueFEIRExpr sideEffectExpr = FEIRBuilder::CreateExprMathBinary(op, childFEIRExpr->Clone(), subExpr->Clone()); UniqueFEIRStmt sideEffectStmt = FEIRBuilder::AssginStmtField(childFEIRExpr->Clone(), std::move(sideEffectExpr), 0); if (isVariableArrayType) { @@ -1485,9 +1534,9 @@ bool ASTInitListExpr::SolveInitListPartialOfZero(std::variantGetBitOffsetFromBaseAddr(fieldIdOfLastZero) + - static_cast(fieldSizeOfLastZero * kOneByte)) - initBitSize; + uint64 fieldsBitSize = + static_cast((baseStructMirType->GetBitOffsetFromBaseAddr(fieldIdOfLastZero) + + static_cast(fieldSizeOfLastZero * kOneByte)) - initBitSize); if (fieldsCount >= 2 && fieldsBitSize % kOneByte == 0 && (fieldsBitSize / kOneByte) % 4 == 0) { auto addrOfExpr = CalculateStartAddressForMemset(var, static_cast(initBitSize / 8), fieldID, base); ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(fieldsBitSize / kOneByte), 1, stmts, @@ -1636,7 +1685,7 @@ void ASTInitListExpr::ProcessStructInitList(std::variantGetKind() == kTypeStruct && fieldMirType->GetKind() != kTypeBitField && // skip bitfield type field because it not follows byte alignment initList.initExprs[i]->GetEvaluatedFlag() == kEvaluatedAsZero && - (baseStructMirType->GetBitOffsetFromBaseAddr(fieldID) / kOneByte) % 4 == 0) { + (static_cast(baseStructMirType->GetBitOffsetFromBaseAddr(fieldID)) / kOneByte) % 4 == 0) { if (SolveInitListPartialOfZero(base, fieldID, i, initList, stmts)) { continue; } @@ -1651,7 +1700,8 @@ void ASTInitListExpr::ProcessStructInitList(std::variantGetASTOp() == kASTOpInitListExpr || initList.initExprs[i]->GetASTOp() == kASTASTDesignatedInitUpdateExpr) { - SolveInitListExprOrDesignatedInitUpdateExpr(fieldInfo, *(initList.initExprs[i]), baseStructFEPtrType, base, stmts); + SolveInitListExprOrDesignatedInitUpdateExpr(fieldInfo, *(initList.initExprs[i]), + baseStructFEPtrType, base, stmts); } else if (fieldMirType->GetKind() == kTypeArray && initList.initExprs[i]->GetASTOp() == kASTStringLiteral) { SolveStructFieldOfArrayTypeInitWithStringLiteral(fieldInfo, *(initList.initExprs[i]), baseStructFEPtrType, base, stmts); @@ -1664,7 +1714,7 @@ void ASTInitListExpr::ProcessStructInitList(std::variantGetKind() == kTypeUnion) { UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), baseFieldID); ProcessImplicitInit(addrOfExpr->Clone(), curFieldTypeSize, - curStructMirType->GetSize(), 1, stmts, initList.GetSrcLoc()); + static_cast(curStructMirType->GetSize()), 1, stmts, initList.GetSrcLoc()); } } @@ -1710,8 +1760,8 @@ void ASTInitListExpr::HandleImplicitInitSections(const UniqueFEIRExpr &addrOfArr } CHECK_FATAL(elemSize != 0, "elemSize should not 0"); auto allElemCnt = allSize / elemSize; - ProcessImplicitInit(addrOfArray->Clone(), static_cast(initList.initExprs.size()), allElemCnt, elemSize, stmts, - initList.GetSrcLoc()); + ProcessImplicitInit(addrOfArray->Clone(), static_cast(initList.initExprs.size()), + static_cast(allElemCnt), static_cast(elemSize), stmts, initList.GetSrcLoc()); } void ASTInitListExpr::ProcessArrayInitList(const UniqueFEIRExpr &addrOfArray, const ASTInitListExpr &initList, @@ -1789,7 +1839,7 @@ void ASTInitListExpr::ProcessVectorInitList(std::variantEmit2FEExpr(stmts); - std::vector> argOpnds; + std::vector argOpnds; argOpnds.push_back(std::move(elemExpr)); argOpnds.push_back(dreadVar->Clone()); argOpnds.push_back(std::move(indexExpr)); @@ -1801,31 +1851,31 @@ void ASTInitListExpr::ProcessVectorInitList(std::variantClone(), std::move(offsetExpr)); } +MIRIntrinsicID ASTArraySubscriptExpr::SetVectorGetLane(const MIRType &type) const { + MIRIntrinsicID intrinsic; + switch (type.GetPrimType()) { +#define GET_LANE(TY) \ + case PTY_##TY: \ + intrinsic = INTRN_vector_get_lane_##TY; \ + break + + GET_LANE(v2i32); + GET_LANE(v4i16); + GET_LANE(v8i8); + GET_LANE(v2u32); + GET_LANE(v4u16); + GET_LANE(v8u8); + GET_LANE(v1i64); + GET_LANE(v1u64); + default: + CHECK_FATAL(false, "Unhandled vector type"); + return INTRN_UNDEFINED; + } + return intrinsic; +} + +MIRIntrinsicID ASTArraySubscriptExpr::SetVectorGetQLane(const MIRType &type) const { + MIRIntrinsicID intrinsic; + switch (type.GetPrimType()) { +#define GETQ_LANE(TY) \ + case PTY_##TY: \ + intrinsic = INTRN_vector_getq_lane_##TY; \ + break + + GETQ_LANE(v2i64); + GETQ_LANE(v4i32); + GETQ_LANE(v8i16); + GETQ_LANE(v16i8); + GETQ_LANE(v2u64); + GETQ_LANE(v4u32); + GETQ_LANE(v8u16); + GETQ_LANE(v16u8); + default: + CHECK_FATAL(false, "Unhandled vector type"); + return INTRN_UNDEFINED; + } + return intrinsic; +} + UniqueFEIRExpr ASTArraySubscriptExpr::Emit2FEExprImpl(std::list &stmts) const { std::list subStmts; // To delete redundant bounds checks in one ASTArraySubscriptExpr stmts. auto baseAddrFEExpr = baseExpr->Emit2FEExpr(subStmts); @@ -2036,11 +2132,25 @@ UniqueFEIRExpr ASTArraySubscriptExpr::Emit2FEExprImpl(std::list } else { addrOfArray = SolveOtherArrayType(baseAddrFEExpr, subStmts); } - if (InsertBoundaryChecking(subStmts, addrOfArray->Clone(), std::move(baseAddrFEExpr))) { + if (InsertBoundaryChecking(subStmts, addrOfArray->Clone(), baseAddrFEExpr->Clone())) { addrOfArray->SetIsBoundaryChecking(true); } stmts.splice(stmts.end(), subStmts); - return FEIRBuilder::CreateExprIRead(std::move(retFEType), fePtrType->Clone(), addrOfArray->Clone()); + if (isVectorType && idxExpr->GetASTOp() == kASTIntegerLiteral) { + MIRIntrinsicID intrinsic; + if (arrayType->GetSize() < 16) { // vectortype size < 128 bits. + intrinsic = SetVectorGetLane(*arrayType); + } else { + intrinsic = SetVectorGetQLane(*arrayType); + } + std::vector argOpnds; + UniqueFEIRExpr idxFEIRExpr = FEIRBuilder::CreateExprConstI32(idxExpr->GetConstantValue()->val.i32); + argOpnds.push_back(baseAddrFEExpr->Clone()); + argOpnds.push_back(idxFEIRExpr->Clone()); + UniqueFEIRType srcType = FEIRTypeHelper::CreateTypeNative(*mirType); + return std::make_unique(std::move(srcType), intrinsic, argOpnds); + } + return FEIRBuilder::CreateExprIRead(std::move(retFEType), fePtrType->Clone(), std::move(addrOfArray)); } UniqueFEIRExpr ASTExprUnaryExprOrTypeTraitExpr::Emit2FEExprImpl(std::list &stmts) const { @@ -2572,6 +2682,26 @@ UniqueFEIRExpr ASTAssignExpr::Emit2FEExprImpl(std::list &stmts) rightFEExpr->Clone(), fieldID); stmts.emplace_back(std::move(preStmt)); return leftFEExpr; + } else if (leftFEExpr->GetKind() == FEIRNodeKind::kExprIntrinsicop) { + auto vectorArrayExpr = static_cast(leftExpr); + MIRType *arrayType = vectorArrayExpr->GetArrayType(); + MIRIntrinsicID intrinsic = SetVectorSetLane(*arrayType); + std::vector argOpnds; + auto baseAddrFEExpr = vectorArrayExpr->GetBaseExpr()->Emit2FEExpr(stmts); + auto vectorDecl = vectorArrayExpr->GetBaseExpr()->GetASTDecl(); + auto idxFEIRExpr = FEIRBuilder::CreateExprConstI32(vectorArrayExpr->GetIdxExpr()->GetConstantValue()->val.i32); + GetActualRightExpr(rightFEExpr, leftFEExpr); + argOpnds.push_back(rightFEExpr->Clone()); // Intrinsicop_set_lane arg0 : value + argOpnds.push_back(baseAddrFEExpr->Clone()); // Intrinsicop_set_lane arg1 : vectortype + argOpnds.push_back(idxFEIRExpr->Clone()); // Intrinsicop_set_lane arg2 : index + UniqueFEIRType srcType = FEIRTypeHelper::CreateTypeNative(*arrayType); + UniqueFEIRExpr intrinsicFEIRExpr = std::make_unique(std::move(srcType), + intrinsic, argOpnds); + UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(vectorDecl->GenerateUniqueVarName(), *arrayType, + vectorDecl->IsGlobal()); + auto preStmt = FEIRBuilder::CreateStmtDAssignAggField(feirVar->Clone(), intrinsicFEIRExpr->Clone(), 0); + stmts.emplace_back(std::move(preStmt)); + return leftFEExpr; } return nullptr; } @@ -2617,27 +2747,33 @@ UniqueFEIRExpr ASTParenExpr::Emit2FEExprImpl(std::list &stmts) c // ---------- ASTIntegerLiteral ---------- MIRConst *ASTIntegerLiteral::GenerateMIRConstImpl() const { - return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(val), - *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + PrimType pty = GetType()->GetPrimType(); + if (IsInt128Ty(pty)) { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, + *GlobalTables::GetTypeTable().GetPrimType(PTY_i128)); + } + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); } UniqueFEIRExpr ASTIntegerLiteral::Emit2FEExprImpl(std::list &stmts) const { (void)stmts; - UniqueFEIRExpr constExpr = std::make_unique(val, mirType->GetPrimType()); - return constExpr; + return std::make_unique(val, mirType->GetPrimType()); } // ---------- ASTFloatingLiteral ---------- MIRConst *ASTFloatingLiteral::GenerateMIRConstImpl() const { MemPool *mp = FEManager::GetModule().GetMemPool(); - MIRConst *cst; + MIRConst *cst = nullptr; MIRType *type; if (kind == FloatKind::F32) { type = GlobalTables::GetTypeTable().GetPrimType(PTY_f32); - cst = mp->New(static_cast(val), *type); - } else { + cst = mp->New(GetDoubleVal(), *type); + } else if (kind == FloatKind::F64) { type = GlobalTables::GetTypeTable().GetPrimType(PTY_f64); - cst = mp->New(val, *type); + cst = mp->New(GetDoubleVal(), *type); + } else { + type = GlobalTables::GetTypeTable().GetPrimType(PTY_f128); + cst = mp->New(std::get<1>(val).data(), *type); } return cst; } @@ -2646,9 +2782,11 @@ UniqueFEIRExpr ASTFloatingLiteral::Emit2FEExprImpl(std::list &st (void)stmts; UniqueFEIRExpr expr; if (kind == FloatKind::F32) { - expr = FEIRBuilder::CreateExprConstF32(static_cast(val)); + expr = FEIRBuilder::CreateExprConstF32(static_cast(GetDoubleVal())); + } else if (kind == FloatKind::F64) { + expr = FEIRBuilder::CreateExprConstF64(GetDoubleVal()); } else { - expr = FEIRBuilder::CreateExprConstF64(val); + expr = FEIRBuilder::CreateExprConstF128(std::get<1>(val).data()); } CHECK_NULL_FATAL(expr); return expr; @@ -2870,6 +3008,9 @@ VaArgInfo ASTVAArgExpr::ProcessValistArgInfo(const MIRType &type) const { case PTY_f64: // double info = { false, 16, 8, false, nullptr }; break; + case PTY_f128: + info = { false, 16, 16, false, nullptr }; + break; case PTY_i32: case PTY_u32: case PTY_i64: @@ -2888,6 +3029,20 @@ VaArgInfo ASTVAArgExpr::ProcessValistArgInfo(const MIRType &type) const { MIRStructType structType = static_cast(type); size_t size = structType.GetSize(); size = (size + 7) & -8; // size round up 8 +#ifdef TARGAARCH64 + PrimType baseType = PTY_begin; + size_t elemNum = 0; + if (IsHomogeneousAggregates(type, baseType, elemNum)) { + // homogeneous aggregates is passed by fp register + info = { false, static_cast(elemNum * k16BitSize), static_cast(size), false, + GlobalTables::GetTypeTable().GetPrimType(baseType) }; + } else if (size > k16BitSize) { + // aggregates size > 16-byte, is passed by address + info = { true, k8BitSize, k8BitSize, true, nullptr }; + } else { + info = { true, static_cast(size), static_cast(size), false, nullptr }; + } +#else if (size > 16) { info = { true, 8, 8, true, nullptr }; } else { @@ -2899,6 +3054,7 @@ VaArgInfo ASTVAArgExpr::ProcessValistArgInfo(const MIRType &type) const { info = { true, static_cast(size), static_cast(size), false, nullptr }; } } +#endif // TARGAARCH64 } else { CHECK_FATAL(false, "unsupport mirtype"); } diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp index 54020ba1fcac8b90141e5fde094869727dd50436..ae93c4bcb993e92c2e8cad6c126f60f70059b20b 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_parser.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp @@ -26,6 +26,7 @@ #include "fe_manager.h" #include "enhance_c_checker.h" #include "fe_macros.h" +#include "int128_util.h" namespace maple { @@ -165,6 +166,7 @@ ASTStmt *ASTParser::ProcessStmt(MapleAllocator &allocator, const clang::Stmt &st STMT_CASE(DeclRefExpr); STMT_CASE(UnaryExprOrTypeTraitExpr); STMT_CASE(AddrLabelExpr); + STMT_CASE(MemberExpr); default: { CHECK_FATAL(false, "ASTStmt: %s NIY", stmt.getStmtClassName()); return nullptr; @@ -681,6 +683,16 @@ ASTStmt *ASTParser::ProcessStmtAddrLabelExpr(MapleAllocator &allocator, const cl return astStmt; } +ASTStmt *ASTParser::ProcessStmtMemberExpr(MapleAllocator &allocator, const clang::MemberExpr &expr) { + ASTMemberExprStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + CHECK_FATAL(astExpr != nullptr, "astExpr is nullptr"); + astStmt->SetCallAlloca(astExpr->IsCallAlloca()); + astStmt->SetASTExpr(astExpr); + return astStmt; +} + ASTStmt *ASTParser::ProcessStmtCaseStmt(MapleAllocator &allocator, const clang::CaseStmt &caseStmt) { ASTCaseStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); @@ -841,8 +853,8 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, } astValue->pty = PTY_i64; break; - case PTY_i128: - astValue->val.i64 = static_cast(result.Val.getInt().getSExtValue()); + case PTY_i128: { + Int128Util::CopyInt128(astValue->val.i128, result.Val.getInt().getRawData()); astValue->pty = PTY_i128; static bool i128Warning = true; if (i128Warning) { @@ -852,6 +864,7 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, i128Warning = false; } break; + } case PTY_u8: astValue->val.u8 = static_cast(result.Val.getInt().getExtValue()); astValue->pty = PTY_u8; @@ -872,8 +885,8 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, } astValue->pty = PTY_u64; break; - case PTY_u128: - astValue->val.u64 = static_cast(result.Val.getInt().getZExtValue()); + case PTY_u128: { + Int128Util::CopyInt128(astValue->val.i128, result.Val.getInt().getRawData()); astValue->pty = PTY_u128; static bool u128Warning = true; if (u128Warning) { @@ -883,6 +896,7 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, u128Warning = false; } break; + } case PTY_u1: astValue->val.u8 = (result.Val.getInt().getExtValue() == 0 ? 0 : 1); astValue->pty = PTY_u1; @@ -921,6 +935,12 @@ ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, llvm::APFloatBase::rmNearestTiesToAway, &losesInfo); astValue->val.f64 = fValue.convertToDouble(); + } else if (constMirType->GetPrimType() == PTY_f128) { + (void)fValue.convert(llvm::APFloat::IEEEquad(), llvm::APFloatBase::rmNearestTiesToAway, + &losesInfo); + llvm::APInt intValue = fValue.bitcastToAPInt(); + astValue->val.f128[0] = intValue.getRawData()[0]; + astValue->val.f128[1] = intValue.getRawData()[1]; } else { (void)fValue.convert(llvm::APFloat::IEEEsingle(), llvm::APFloatBase::rmNearestTiesToAway, @@ -1052,8 +1072,9 @@ ASTExpr *ASTParser::EvaluateExprAsConst(MapleAllocator &allocator, const clang:: if (constVal.isInt()) { ASTIntegerLiteral *intExpr = allocator.New(allocator); llvm::APSInt intVal = constVal.getInt(); - intExpr->SetVal(intVal.getExtValue()); - if (intVal.getExtValue() == 0) { + intExpr->SetVal(IntVal(intVal.getRawData(), intVal.getBitWidth(), intVal.isSigned())); + + if (intVal == 0) { intExpr->SetEvaluatedFlag(kEvaluatedAsZero); } else { intExpr->SetEvaluatedFlag(kEvaluatedAsNonZero); @@ -1074,10 +1095,11 @@ ASTExpr *ASTParser::EvaluateExprAsConst(MapleAllocator &allocator, const clang:: floatExpr->SetVal(val); } else if (&fltSem == &llvm::APFloat::IEEEquad() || &fltSem == &llvm::APFloat::x87DoubleExtended()) { bool losesInfo; - (void)floatVal.convert(llvm::APFloat::IEEEdouble(), llvm::APFloatBase::rmNearestTiesToAway, &losesInfo); - val = static_cast(floatVal.convertToDouble()); - floatExpr->SetKind(FloatKind::F64); - floatExpr->SetVal(val); + (void)floatVal.convert(llvm::APFloat::IEEEquad(), + llvm::APFloatBase::rmNearestTiesToAway, &losesInfo); + llvm::APInt intValue = floatVal.bitcastToAPInt(); + floatExpr->SetKind(FloatKind::F128); + floatExpr->SetVal(intValue.getRawData()); } else { return nullptr; } @@ -1158,7 +1180,6 @@ ASTExpr *ASTParser::ProcessExpr(MapleAllocator &allocator, const clang::Expr *ex EXPR_CASE(GenericSelectionExpr); default: CHECK_FATAL(false, "ASTExpr %s NIY", expr->getStmtClassName()); - return nullptr; } } @@ -1265,8 +1286,8 @@ ASTExpr *ASTParser::ProcessExprUnaryOperator(MapleAllocator &allocator, const cl ASTExpr *vlaTypeSizeExpr = BuildExprToComputeSizeFromVLA(allocator, desugaredType); astUOExpr->SetVariableArrayExpr(vlaTypeSizeExpr); } else { - const clang::QualType desugaredType = qualType.getDesugaredType(*(astFile->GetContext())); - len = astFile->GetContext()->getTypeSizeInChars(desugaredType).getQuantity(); + const clang::QualType desugaredTyp = qualType.getDesugaredType(*(astFile->GetContext())); + len = astFile->GetContext()->getTypeSizeInChars(desugaredTyp).getQuantity(); astUOExpr->SetPointeeLen(len); } } @@ -1607,18 +1628,22 @@ ASTExpr *ASTParser::ProcessExprArraySubscriptExpr(MapleAllocator &allocator, con base = PeelParen2(*base); ASTExpr *idxExpr = ProcessExpr(allocator, expr.getIdx()); - astArraySubscriptExpr->SetIdxExpr(idxExpr); - astArraySubscriptExpr->SetCallAlloca(idxExpr != nullptr && idxExpr->IsCallAlloca()); clang::QualType arrayQualType = base->getType().getCanonicalType(); if (base->getStmtClass() == clang::Stmt::ImplicitCastExprClass && !static_cast(base)->isPartOfExplicitCast()) { arrayQualType = static_cast(base)->getSubExpr()->getType().getCanonicalType(); } - - CHECK_FATAL(!arrayQualType->isVectorType(), "Unsupported vector type in astArraySubscriptExpr"); - - auto arrayMirType = astFile->CvtType(arrayQualType); + MIRType *arrayMirType = astFile->CvtType(arrayQualType); + if (arrayQualType->isVectorType()) { + if (arrayMirType->GetSize() <= 16) { // vectortype size <= 128 bits. + astArraySubscriptExpr->SetIsVectorType(true); + } else { + CHECK_FATAL(false, "Unsupported vectortype size > 128 in astArraySubscriptExpr"); + } + } + astArraySubscriptExpr->SetIdxExpr(idxExpr); astArraySubscriptExpr->SetArrayType(arrayMirType); + astArraySubscriptExpr->SetCallAlloca(idxExpr != nullptr && idxExpr->IsCallAlloca()); clang::QualType exprType = expr.getType().getCanonicalType(); if (arrayQualType->isVariablyModifiedType()) { @@ -1627,6 +1652,10 @@ ASTExpr *ASTParser::ProcessExprArraySubscriptExpr(MapleAllocator &allocator, con astArraySubscriptExpr->SetVLASizeExpr(vlaTypeSizeExpr); } ASTExpr *astBaseExpr = ProcessExpr(allocator, base); + if (astBaseExpr->GetASTOp() == kASTOpRef && idxExpr->GetASTOp() != kASTIntegerLiteral) { + auto refExpr = static_cast(astBaseExpr); + refExpr->SetIsAddrOfType(true); + } astArraySubscriptExpr->SetCallAlloca(astBaseExpr != nullptr && astBaseExpr->IsCallAlloca()); astArraySubscriptExpr->SetBaseExpr(astBaseExpr); auto *mirType = astFile->CvtType(exprType); @@ -1747,7 +1776,7 @@ ASTExpr *ASTParser::BuildExprToComputeSizeFromVLA(MapleAllocator &allocator, con if (sizeExpr == nullptr) { return nullptr; } - MapleMap::const_iterator iter = vlaSizeMap.find(sizeExpr); + MapleMap::const_iterator iter = std::as_const(vlaSizeMap).find(sizeExpr); if (iter != vlaSizeMap.cend()) { return iter->second; } @@ -2183,7 +2212,7 @@ ASTExpr *ASTParser::ProcessExprFloatingLiteral(MapleAllocator &allocator, const llvm::APFloatBase::rmNearestTiesToAway, &losesInfo); val = static_cast(apf.convertToDouble()); - astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetKind(FloatKind::F128); astFloatingLiteral->SetVal(val); } else { CHECK_FATAL(false, "unsupported floating literal"); @@ -2321,6 +2350,15 @@ ASTExpr *ASTParser::ProcessExprDeclRefExpr(MapleAllocator &allocator, const clan astDecl = ProcessDecl(allocator, *(expr.getDecl()->getCanonicalDecl())); } } + clang::QualType type = expr.getType(); + if (llvm::isa(type)) { + auto typedefType = llvm::dyn_cast(type); + if (llvm::isa(typedefType->desugar())) { + astRefExpr->SetIsVectorType(true); + } + } else if (llvm::isa(type)) { + astRefExpr->SetIsVectorType(true); + } astRefExpr->SetASTDecl(astDecl); astRefExpr->SetType(astDecl->GetTypeDesc().front()); return astRefExpr; @@ -2525,7 +2563,7 @@ ASTExpr *ASTParser::ProcessExprArrayInitIndexExpr(MapleAllocator &allocator, return astExpr; } -clang::Expr *ASTParser::GetAtomValExpr(clang::Expr *valExpr) { +clang::Expr *ASTParser::GetAtomValExpr(clang::Expr *valExpr) const { clang::Expr *atomValExpr = valExpr; while (llvm::isa(atomValExpr) || llvm::isa(atomValExpr) || llvm::isa(atomValExpr)) { @@ -2540,7 +2578,7 @@ clang::Expr *ASTParser::GetAtomValExpr(clang::Expr *valExpr) { return atomValExpr; } -clang::QualType ASTParser::GetPointeeType(const clang::Expr &expr) { +clang::QualType ASTParser::GetPointeeType(const clang::Expr &expr) const { clang::QualType type = expr.getType().getCanonicalType(); if (type->isPointerType() && !type->getPointeeType()->isRecordType()) { type = type->getPointeeType(); @@ -2598,15 +2636,15 @@ ASTExpr *ASTParser::ProcessExprAtomicExpr(MapleAllocator &allocator, CHECK_FATAL(astExpr != nullptr, "astCastExpr is nullptr"); astExpr->SetObjExpr(ProcessExpr(allocator, atomicExpr.getPtr())); astExpr->SetType(astFile->CvtType(atomicExpr.getPtr()->getType())); - astExpr->SetRefType(astFile->CvtType(atomicExpr.getPtr()->getType()->getPointeeType())); + const clang::QualType firstArgPointeeType = GetPointeeType(*atomicExpr.getPtr()); + astExpr->SetRefType(astFile->CvtType(firstArgPointeeType)); if (atomicExpr.getOp() != clang::AtomicExpr::AO__atomic_load_n) { SetAtomExprValType(allocator, atomicExpr, *astExpr); if (atomicExpr.getOp() == clang::AtomicExpr::AO__atomic_exchange) { SetAtomExchangeType(allocator, atomicExpr, *astExpr); } } else { - const clang::QualType valType = GetPointeeType(atomicExpr); - astExpr->SetVal1Type(astFile->CvtType(valType)); + astExpr->SetVal1Type(astFile->CvtType(firstArgPointeeType)); } astExpr->SetOrderExpr(ProcessExpr(allocator, atomicExpr.getOrder())); @@ -2718,14 +2756,14 @@ bool ASTParser::PreProcessAST() { #define SET_LOC(astDeclaration, decl, astFile) \ do { \ - if (astDeclaration != nullptr) { \ - astDeclaration->SetGlobal(decl.isDefinedOutsideFunctionOrMethod()); \ - if (astDeclaration->GetSrcFileIdx() == 0) { \ - Loc loc = astFile->GetLOC(decl.getLocation()); \ - astDeclaration->SetSrcLoc(loc); \ + if ((astDeclaration) != nullptr) { \ + (astDeclaration)->SetGlobal((decl).isDefinedOutsideFunctionOrMethod()); \ + if ((astDeclaration)->GetSrcFileIdx() == 0) { \ + Loc loc = (astFile)->GetLOC((decl).getLocation()); \ + (astDeclaration)->SetSrcLoc(loc); \ } \ } \ - } while(0) + } while (0) #define DECL_CASE(CLASS) \ case clang::Decl::CLASS: { \ @@ -2752,7 +2790,6 @@ ASTDecl *ASTParser::ProcessDecl(MapleAllocator &allocator, const clang::Decl &de DECL_CASE(FileScopeAsm); default: CHECK_FATAL(false, "ASTDecl: %s NIY", decl.getDeclKindName()); - return nullptr; } } @@ -2851,14 +2888,17 @@ GenericAttrs ASTParser::SolveFunctionAttributes(const clang::FunctionDecl &funcD astFile->CollectFuncAttrs(funcDecl, attrs, kPublic); // for inline optimize if (attrs.GetAttr(GENATTR_static) && FEOptions::GetInstance().GetFuncInlineSize() != 0) { - funcName = funcName + astFile->GetAstFileNameHashStr(); + if (FEOptions::GetInstance().GetWPAA() && FEOptions::GetInstance().IsEnableFuncMerge()) { + astFile->BuildStaticFunctionLayout(funcDecl, funcName); + } else { + funcName = funcName + astFile->GetAstFileNameHashStr(); + } } // set inline functions as weak symbols as it's in C++ if (opts::inlineAsWeak == true && attrs.GetAttr(GENATTR_inline) && !attrs.GetAttr(GENATTR_static)) { attrs.SetAttr(GENATTR_weak); } - return attrs; } @@ -2876,7 +2916,7 @@ ASTStmt *ASTParser::SolveFunctionBody(MapleAllocator &allocator, } MapleVector ASTParser::CvtFuncTypeAndRetType(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, - clang::QualType qualType) { + const clang::QualType &qualType) const { MapleVector typeDescIn(allocator.Adapter()); clang::QualType funcQualType = funcDecl.getType(); MIRType *mirFuncType = astFile->CvtType(funcQualType); @@ -3057,7 +3097,7 @@ void ASTParser::SetAlignmentForASTVar(const clang::VarDecl &varDecl, ASTVar &ast } } -void ASTParser::CheckVarNameValid(std::string varName) { +void ASTParser::CheckVarNameValid(const std::string &varName) const { CHECK_FATAL(isalpha(varName[0]) || varName[0] == '_', "%s' varName is invalid", varName.c_str()); for (size_t i = 1; i < varName.size(); i++) { /* check valid varName in C, but unsupport Unicode */ @@ -3092,6 +3132,13 @@ ASTDecl *ASTParser::ProcessDeclVarDecl(MapleAllocator &allocator, const clang::V FE_ERR(kLncErr, astFile->GetLOC(varDecl.getLocation()), "tentative definition of variable '%s' has incomplete" " struct type 'struct '%s''", varName.c_str(), varType->GetName().c_str()); } + if (varDecl.hasInit() && attrs.GetAttr(GENATTR_extern)) { + attrs.ResetAttr(GENATTR_extern); + } + if (attrs.GetAttr(GENATTR_extern) && attrs.GetAttr(GENATTR_visibility_hidden)) { + attrs.ResetAttr(GENATTR_extern); + attrs.SetAttr(GENATTR_static); + } astVar = ASTDeclsBuilder::GetInstance(allocator).ASTVarBuilder( allocator, fileName, varName, MapleVector({varType}, allocator.Adapter()), attrs, varDecl.getID()); if (FEOptions::GetInstance().IsDbgFriendly()) { @@ -3141,7 +3188,11 @@ ASTDecl *ASTParser::ProcessDeclParmVarDecl(MapleAllocator &allocator, const clan const clang::QualType parmQualType = parmVarDecl.getType(); std::string parmName = parmVarDecl.getNameAsString(); if (parmName.length() == 0) { - parmName = FEUtils::GetSequentialName("arg|"); + if (FEOptions::GetInstance().GetWPAA() && FEOptions::GetInstance().IsEnableFuncMerge()) { + parmName = "arg|"; + } else { + parmName = FEUtils::GetSequentialName("arg|"); + } } MIRType *paramType = astFile->CvtType(parmQualType); if (paramType == nullptr) { @@ -3299,14 +3350,41 @@ bool ASTParser::RetrieveStructs(MapleAllocator &allocator) { recDecl = recDeclDef; } } - ASTStruct *curStructOrUnion = static_cast(ProcessDecl(allocator, *recDecl)); - if (curStructOrUnion == nullptr) { - return false; - } - auto itor = std::find(astStructs.cbegin(), astStructs.cend(), curStructOrUnion); - if (itor != astStructs.end()) { - } else { + if (FEOptions::GetInstance().GetWPAA()) { + std::string srcFileName = GetSourceFileName(); + std::stringstream recName; + clang::QualType qType = recDecl->getTypeForDecl()->getCanonicalTypeInternal(); + astFile->EmitTypeName(*qType->getAs(), recName); + std::string recordName = recName.str(); + auto itFile = structFileNameMap.find(srcFileName); + if (itFile != structFileNameMap.end()) { + auto itIdxSet = itFile->second; + auto itIdx = itIdxSet.find(recordName); + if (itIdx == itIdxSet.end()) { + (void)itIdxSet.insert(recordName); + } else { + continue; + } + } else { + std::unordered_set structIdxSet; + (void)structIdxSet.insert(recordName); + structFileNameMap.insert(std::pair>(srcFileName, structIdxSet)); + } + ASTStruct *curStructOrUnion = static_cast(ProcessDecl(allocator, *recDecl)); + if (curStructOrUnion == nullptr) { + return false; + } astStructs.emplace_back(curStructOrUnion); + } else { + ASTStruct *curStructOrUnion = static_cast(ProcessDecl(allocator, *recDecl)); + if (curStructOrUnion == nullptr) { + return false; + } + auto itor = std::find(astStructs.cbegin(), astStructs.cend(), curStructOrUnion); + if (itor != astStructs.end()) { + } else { + (void)astStructs.emplace_back(curStructOrUnion); + } } } return true; diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp index 32f3db3f9c3ac3e1eb0729a24301ee37cf7134e0..7d81bd14f875ab6e8b0a3c7bb25988110bac8378 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp @@ -115,26 +115,23 @@ UniqueFEIRExpr ASTCallExpr::CreateBinaryExpr(std::list &stmts, O } UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, bool &isFinish) const { - // process a kind of builtinFunc - std::string prefix = "__builtin_mpl_vector_load"; - if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { - return EmitBuiltinVectorLoad(stmts, isFinish); - } - prefix = "__builtin_mpl_vector_store"; - if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { - return EmitBuiltinVectorStore(stmts, isFinish); - } - prefix = "__builtin_mpl_vector_shli"; - if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { - return EmitBuiltinVectorShli(stmts, isFinish); - } - prefix = "__builtin_mpl_vector_shri"; - if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { - return EmitBuiltinVectorShri(stmts, isFinish); - } - prefix = "__builtin_mpl_vector_shru"; - if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { - return EmitBuiltinVectorShru(stmts, isFinish); + // process a kind of builtinFunc & unsupport __builtin_mpl_vector_st_call function + using BuiltinFunc = UniqueFEIRExpr(*)(std::list &stmtsBuiltin, bool &isFinishBuiltin, + const MapleVector &argsBuiltin, MIRType &mirTypeBuiltin, + const std::pair &funcMessageBuiltin); + static std::map> builtinFuncKindMap = { + {"__builtin_mpl_vector_load", &EmitBuiltinVectorLoad}, + {"__builtin_mpl_vector_store", &EmitBuiltinVectorStore}, + {"__builtin_mpl_vector_shli", &EmitBuiltinVectorShli}, + {"__builtin_mpl_vector_shri", &EmitBuiltinVectorShri}, + {"__builtin_mpl_vector_shru", &EmitBuiltinVectorShru}, + {"__builtin_mpl_vector_st", &EmitBuiltinVectorStFunc} + }; + const std::pair funcMessage = {GetFuncName(), GetSrcLoc()}; + for (auto &it : builtinFuncKindMap) { + if (GetFuncName().compare(0, it.first.size(), it.first) == 0) { + return (it.second)(stmts, isFinish, args, *mirType, funcMessage); + } } // process a single builtinFunc auto ptrFunc = builtingFuncPtrMap.find(GetFuncName()); @@ -144,7 +141,7 @@ UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, } isFinish = false; if (FEOptions::GetInstance().GetDumpLevel() >= FEOptions::kDumpLevelInfo) { - prefix = "__builtin"; + std::string prefix = "__builtin"; if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "%s:%d BuiltinFunc (%s) has not been implemented", FEManager::GetModule().GetFileNameFromFileNum(GetSrcFileIdx()).c_str(), GetSrcFileLineNum(), @@ -171,50 +168,80 @@ UniqueFEIRExpr ASTCallExpr::EmitBuiltin##STR(std::list &stmts) c #include "intrinsic_vector_new.def" #undef DEF_MIR_INTRINSIC -UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish) const { - auto argExpr = args[0]->Emit2FEExpr(stmts); - UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*mirType); +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)funcMessage; + auto argExpr = callArgs[0]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(mirType); UniqueFEIRType ptrType = FEIRTypeHelper::CreateTypeNative( - *GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType)); + *GlobalTables::GetTypeTable().GetOrCreatePointerType(mirType)); isFinish = true; return FEIRBuilder::CreateExprIRead(std::move(type), std::move(ptrType), std::move(argExpr)); } -UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorStore(std::list &stmts, bool &isFinish) const { - auto arg1Expr = args[0]->Emit2FEExpr(stmts); - auto arg2Expr = args[1]->Emit2FEExpr(stmts); +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorStore(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)funcMessage; + (void)mirType; + auto arg1Expr = callArgs[0]->Emit2FEExpr(stmts); + auto arg2Expr = callArgs[1]->Emit2FEExpr(stmts); UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative( - *GlobalTables::GetTypeTable().GetOrCreatePointerType(*args[1]->GetType())); + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*callArgs[1]->GetType())); auto stmt = FEIRBuilder::CreateStmtIAssign(std::move(type), std::move(arg1Expr), std::move(arg2Expr)); (void)stmts.emplace_back(std::move(stmt)); isFinish = true; return nullptr; } -UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShli(std::list &stmts, bool &isFinish) const { +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShli(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)funcMessage; + (void)mirType; isFinish = true; - UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); - auto arg1Expr = args[0]->Emit2FEExpr(stmts); - auto arg2Expr = args[1]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*callArgs[0]->GetType()); + auto arg1Expr = callArgs[0]->Emit2FEExpr(stmts); + auto arg2Expr = callArgs[1]->Emit2FEExpr(stmts); return FEIRBuilder::CreateExprBinary(std::move(type), OP_shl, std::move(arg1Expr), std::move(arg2Expr)); } -UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShri(std::list &stmts, bool &isFinish) const { +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShri(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)funcMessage; + (void)mirType; isFinish = true; - UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); - auto arg1Expr = args[0]->Emit2FEExpr(stmts); - auto arg2Expr = args[1]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*callArgs[0]->GetType()); + auto arg1Expr = callArgs[0]->Emit2FEExpr(stmts); + auto arg2Expr = callArgs[1]->Emit2FEExpr(stmts); return FEIRBuilder::CreateExprBinary(std::move(type), OP_ashr, std::move(arg1Expr), std::move(arg2Expr)); } -UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShru(std::list &stmts, bool &isFinish) const { +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShru(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)funcMessage; + (void)mirType; isFinish = true; - UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); - auto arg1Expr = args[0]->Emit2FEExpr(stmts); - auto arg2Expr = args[1]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*callArgs[0]->GetType()); + auto arg1Expr = callArgs[0]->Emit2FEExpr(stmts); + auto arg2Expr = callArgs[1]->Emit2FEExpr(stmts); return FEIRBuilder::CreateExprBinary(std::move(type), OP_lshr, std::move(arg1Expr), std::move(arg2Expr)); } +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorStFunc(std::list &stmts, bool &isFinish, + const MapleVector &callArgs, MIRType &mirType, + const std::pair &funcMessage) { + (void)stmts; + (void)isFinish; + (void)callArgs; + (void)mirType; + FE_ERR(kLncErr, funcMessage.second, "Unsupport arm_neon intrinsic_call %s", funcMessage.first.c_str()); + return std::make_unique(funcMessage.second.line); +} + UniqueFEIRExpr ASTCallExpr::EmitBuiltinVaStart(std::list &stmts) const { // args auto exprArgList = std::make_unique>(); @@ -700,12 +727,15 @@ UniqueFEIRExpr ASTCallExpr::EmitBuiltinExpect(std::list &stmts) } UniqueFEIRExpr ASTCallExpr::EmitBuiltinAbs(std::list &stmts) const { - auto arg = args[0]->Emit2FEExpr(stmts); - CHECK_NULL_FATAL(mirType); - auto abs = std::make_unique(FEIRTypeHelper::CreateTypeNative(*mirType), OP_abs, std::move(arg)); - auto feType = std::make_unique(*mirType); - abs->SetType(std::move(feType)); - return abs; + if (mirType->GetPrimType() != PTY_f128) { + auto arg = args[0]->Emit2FEExpr(stmts); + CHECK_NULL_FATAL(mirType); + auto abs = std::make_unique(FEIRTypeHelper::CreateTypeNative(*mirType), OP_abs, std::move(arg)); + auto feType = std::make_unique(*mirType); + abs->SetType(std::move(feType)); + return abs; + } + return CreateIntrinsicopForC(stmts, INTRN_C_fabsl); } UniqueFEIRExpr ASTCallExpr::EmitBuiltinACos(std::list &stmts) const { @@ -785,11 +815,19 @@ UniqueFEIRExpr ASTCallExpr::EmitBuiltinBswap16(std::list &stmts) } UniqueFEIRExpr ASTCallExpr::EmitBuiltinFmax(std::list &stmts) const { - return CreateBinaryExpr(stmts, OP_max); + if (mirType->GetPrimType() != PTY_f128) { + return CreateBinaryExpr(stmts, OP_max); + } else { + return CreateIntrinsicopForC(stmts, INTRN_C_fmaxl); + } } UniqueFEIRExpr ASTCallExpr::EmitBuiltinFmin(std::list &stmts) const { - return CreateBinaryExpr(stmts, OP_min); + if (mirType->GetPrimType() != PTY_f128) { + return CreateBinaryExpr(stmts, OP_min); + } else { + return CreateIntrinsicopForC(stmts, INTRN_C_fminl); + } } UniqueFEIRExpr ASTCallExpr::EmitBuiltinLog(std::list &stmts) const { @@ -967,7 +1005,9 @@ ASTExpr *ASTParser::ParseBuiltinIsinfsign(MapleAllocator &allocator, const clang MIRType *mirType = astFile->CvtType(expr.getArg(0)->getType()); if (mirType != nullptr) { PrimType type = mirType->GetPrimType(); - if (type == PTY_f64) { + if (type == PTY_f128) { + ss << "__isinfl"; + } else if (type == PTY_f64) { ss << "__isinf"; } else if (type == PTY_f32) { ss << "__isinff"; @@ -989,6 +1029,18 @@ ASTExpr *ASTParser::ParseBuiltinHugeVal(MapleAllocator &allocator, const clang:: return astFloatingLiteral; } +ASTExpr *ASTParser::ParseBuiltinHugeVall(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss, ASTCallExpr &astCallExpr) const { + (void)astCallExpr; + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F128); + ASTFloatingLiteral::floatArraySizes initVal = {0x0, 0x7fff000000000000}; + astFloatingLiteral->SetVal(std::move(initVal)); + return astFloatingLiteral; +} + ASTExpr *ASTParser::ParseBuiltinHugeValf(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { (void)astCallExpr; @@ -1011,6 +1063,18 @@ ASTExpr *ASTParser::ParseBuiltinInf(MapleAllocator &allocator, const clang::Call return astFloatingLiteral; } +ASTExpr *ASTParser::ParseBuiltinInfl(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss, ASTCallExpr &astCallExpr) const { + (void)astCallExpr; + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F128); + ASTFloatingLiteral::floatArraySizes initVal = {0x0, 0x7fff000000000000}; + astFloatingLiteral->SetVal(std::move(initVal)); + return astFloatingLiteral; +} + ASTExpr *ASTParser::ParseBuiltinInff(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { (void)astCallExpr; @@ -1033,6 +1097,18 @@ ASTExpr *ASTParser::ParseBuiltinNan(MapleAllocator &allocator, const clang::Call return astFloatingLiteral; } +ASTExpr *ASTParser::ParseBuiltinNanl(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss, ASTCallExpr &astCallExpr) const { + (void)astCallExpr; + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F128); + ASTFloatingLiteral::floatArraySizes initVal = {0x0, 0x7fff800000000000}; + astFloatingLiteral->SetVal(std::move(initVal)); + return astFloatingLiteral; +} + ASTExpr *ASTParser::ParseBuiltinNanf(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { (void)astCallExpr; @@ -1053,6 +1129,9 @@ ASTExpr *ASTParser::ParseBuiltinSignBitf(MapleAllocator &allocator, const clang: ASTExpr *ASTParser::ParseBuiltinSignBitl(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { (void)astCallExpr; + if (astFile->CvtType(expr.getArg(0)->getType())->GetPrimType() != PTY_f128) { + return ProcessBuiltinFuncByName(allocator, expr, ss, "__signbit"); + } return ProcessBuiltinFuncByName(allocator, expr, ss, "__signbitl"); } @@ -1082,6 +1161,8 @@ ASTExpr *ASTParser::ParseBuiltinCopysignl(MapleAllocator &allocator, const clang ASTExpr *ASTParser::ParseBuiltinAtomicClear(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { + (void)allocator; + (void)ss; (void)astCallExpr; CheckAtomicClearArg(expr); return nullptr; @@ -1089,6 +1170,9 @@ ASTExpr *ASTParser::ParseBuiltinAtomicClear(MapleAllocator &allocator, const cla ASTExpr *ASTParser::ParseBuiltinAtomicTestAndSet(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, ASTCallExpr &astCallExpr) const { + (void)allocator; + (void)ss; + (void)expr; astCallExpr.SetType(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u8)); return nullptr; } diff --git a/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp index 5e9a00f605183a1481483dd23553db59c7557911..153d8d04eb2a2374672e06ab2702b7fbb298adf2 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp @@ -686,4 +686,12 @@ std::list ASTUOAddrOfLabelExprStmt::Emit2FEStmtImpl() const { } return stmts; } + +std::list ASTMemberExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} } // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp index e7c0a5ddc7eb619ba00d3d1c6c3e61a7eae6d38a..c934f6d02b81dd2112b409e611800d002290fe1d 100644 --- a/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp +++ b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp @@ -339,7 +339,7 @@ void ASTFunc2FEHelper::SolveReturnAndArgTypesImpl(MapleAllocator &allocator) { retMIRType = returnAndArgTypeNames[1]; // skip funcType and returnType (void)argMIRTypes.insert(argMIRTypes.cbegin(), returnAndArgTypeNames.cbegin() + 2, returnAndArgTypeNames.cend()); - if (retMIRType->GetPrimType() == PTY_agg && retMIRType->GetSize() > 16) { + if (IsReturnInMemory(*retMIRType)) { firstArgRet = true; MIRType *retPointerType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*retMIRType); (void)argMIRTypes.insert(argMIRTypes.cbegin(), retPointerType); diff --git a/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h b/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h index 680d471f86dbe3ce6e376320c56f44b87aa48a2b..9ad96d8b39fc4247f4f12510f42380890aa202d7 100644 --- a/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h +++ b/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h @@ -87,25 +87,51 @@ void ASTCompilerComponent::ParseInputStructs() { template void ASTCompilerComponent::ParseInputFuncs() { - for (auto &astFunc : astInput.GetASTFuncs()) { - auto it = funcNameMap.find(astFunc->GetName()); - if (it != funcNameMap.cend()) { - // save the function with funcbody - if (it->second->HasCode() && !astFunc->HasCode()) { - continue; + if (!FEOptions::GetInstance().GetWPAA()) { + for (auto &astFunc : astInput.GetASTFuncs()) { + auto it = funcNameMap.find(astFunc->GetName()); + if (it != funcNameMap.cend()) { + // save the function with funcbody + if (it->second->HasCode() && !astFunc->HasCode()) { + continue; + } else { + (void)funcNameMap.erase(it); + auto itHelper = std::find_if(std::begin(globalFuncHelpers), std::end(globalFuncHelpers), + [&astFunc](FEInputMethodHelper *s) -> bool { + return (s->GetMethodName(false) == astFunc->GetName()); + }); + CHECK_FATAL(itHelper != globalFuncHelpers.end(), "astFunc not found"); + (void)globalFuncHelpers.erase(itHelper); + } + } + FEInputMethodHelper *funcHelper = allocator.GetMemPool()->New(allocator, *astFunc); + globalFuncHelpers.emplace_back(funcHelper); + funcNameMap.insert(std::make_pair(astFunc->GetName(), funcHelper)); + } + } else { + int i = 1; + for (auto &astFunc : astInput.GetASTFuncs()) { + FETimer timer; + std::stringstream ss; + ss << "ReadASTFunc[" << (i++) << "/" << astInput.GetASTFuncs().size() << "]: " << astFunc->GetName(); + timer.StartAndDump(ss.str()); + auto it = funcIdxMap.find(astFunc->GetName()); + if (it != funcIdxMap.cend()) { + // save the function with funcbody + if (it->second < globalLTOFuncHelpers.size() && globalLTOFuncHelpers[it->second]->HasCode() && + !astFunc->HasCode()) { + continue; + } else { + FEInputMethodHelper *funcHelper = allocator.GetMemPool()->New(allocator, *astFunc); + globalLTOFuncHelpers[it->second] = funcHelper; + } } else { - (void)funcNameMap.erase(it); - auto itHelper = std::find_if(std::begin(globalFuncHelpers), std::end(globalFuncHelpers), - [&astFunc](FEInputMethodHelper *s) -> bool { - return (s->GetMethodName(false) == astFunc->GetName()); - }); - CHECK_FATAL(itHelper != globalFuncHelpers.end(), "astFunc not found"); - (void)globalFuncHelpers.erase(itHelper); + funcIdxMap.insert(std::make_pair(astFunc->GetName(), globalLTOFuncHelpers.size())); + FEInputMethodHelper *funcHelper = allocator.GetMemPool()->New(allocator, *astFunc); + globalLTOFuncHelpers.push_back(funcHelper); } + timer.StopAndDumpTimeMS(ss.str()); } - FEInputMethodHelper *funcHelper = allocator.GetMemPool()->New(allocator, *astFunc); - globalFuncHelpers.emplace_back(funcHelper); - funcNameMap.insert(std::make_pair(astFunc->GetName(), funcHelper)); } } @@ -144,20 +170,38 @@ bool ASTCompilerComponent::ProcessFunctionSerialImpl() { timer.StartAndDump(ss.str()); bool success = true; FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process %s =====", ss.str().c_str()); - for (FEInputMethodHelper *methodHelper : globalFuncHelpers) { - ASSERT_NOT_NULL(methodHelper); - if (methodHelper->HasCode()) { - ASTFunc2FEHelper *astFuncHelper = static_cast(methodHelper); - std::unique_ptr feFunction = CreatFEFunction(methodHelper); - feFunction->SetSrcFileName(astFuncHelper->GetSrcFileName()); - bool processResult = feFunction->Process(); - if (!processResult) { - (void)compileFailedFEFunctions.insert(feFunction.get()); + if (!FEOptions::GetInstance().GetWPAA()) { + for (FEInputMethodHelper *methodHelper : globalFuncHelpers) { + ASSERT_NOT_NULL(methodHelper); + if (methodHelper->HasCode()) { + ASTFunc2FEHelper *astFuncHelper = static_cast(methodHelper); + std::unique_ptr feFunction = CreatFEFunction(methodHelper); + feFunction->SetSrcFileName(astFuncHelper->GetSrcFileName()); + bool processResult = feFunction->Process(); + if (!processResult) { + (void)compileFailedFEFunctions.insert(feFunction.get()); + } + success = success && processResult; + feFunction->Finish(); + } + funcSize++; + } + } else { + for (FEInputMethodHelper *methodHelper : globalLTOFuncHelpers) { + ASSERT_NOT_NULL(methodHelper); + if (methodHelper->HasCode()) { + ASTFunc2FEHelper *astFuncHelper = static_cast(methodHelper); + std::unique_ptr feFunction = CreatFEFunction(methodHelper); + feFunction->SetSrcFileName(astFuncHelper->GetSrcFileName()); + bool processResult = feFunction->Process(); + if (!processResult) { + (void)compileFailedFEFunctions.insert(feFunction.get()); + } + success = success && processResult; + feFunction->Finish(); } - success = success && processResult; - feFunction->Finish(); + funcSize++; } - funcSize++; } timer.StopAndDumpTimeMS(ss.str()); return success; diff --git a/src/hir2mpl/ast_input/common/include/ast_compiler_component.h b/src/hir2mpl/ast_input/common/include/ast_compiler_component.h index be00e27ab356d0c60bd1e70dfa29084da80d24b6..81308564c6655b75f00a7643c79dc634161ffc6d 100644 --- a/src/hir2mpl/ast_input/common/include/ast_compiler_component.h +++ b/src/hir2mpl/ast_input/common/include/ast_compiler_component.h @@ -51,6 +51,7 @@ class ASTCompilerComponent : public HIR2MPLCompilerComponent { ASTInput astInput; std::unordered_set structNameSet; std::unordered_map funcNameMap; + std::unordered_map funcIdxMap; void ParseInputStructs(); void ParseInputFuncs(); diff --git a/src/hir2mpl/ast_input/common/include/ast_decl.h b/src/hir2mpl/ast_input/common/include/ast_decl.h index 6603e63ea314e9d6b2b0813f3dc8599fae6392c6..8383bc369451b26119b7d7450d7edcd6cf8b5543 100644 --- a/src/hir2mpl/ast_input/common/include/ast_decl.h +++ b/src/hir2mpl/ast_input/common/include/ast_decl.h @@ -219,7 +219,7 @@ class ASTField : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTField; } - ~ASTField() = default; + ~ASTField() override = default; bool IsAnonymousField() const { return isAnonymousField; } @@ -286,7 +286,7 @@ class ASTStruct : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTStruct; } - ~ASTStruct() = default; + ~ASTStruct() override = default; std::string GetStructName(bool mapled) const; @@ -322,7 +322,9 @@ class ASTVar : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTVar; } - virtual ~ASTVar() = default; + virtual ~ASTVar() override { + variableArrayExpr = nullptr; + } void SetInitExpr(ASTExpr *init) { initExpr = init; @@ -373,7 +375,7 @@ class ASTFileScopeAsm : public ASTDecl { : ASTDecl(srcFile, MapleString("", allocatorIn.GetMemPool()), MapleVector(allocatorIn.Adapter())) { declKind = kASTFileScopeAsm; } - ~ASTFileScopeAsm() = default; + ~ASTFileScopeAsm() override = default; void SetAsmStr(const std::string &str) { asmStr = str; @@ -395,7 +397,7 @@ class ASTEnumConstant : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTEnumConstant; } - ~ASTEnumConstant() = default; + ~ASTEnumConstant() override = default; void SetValue(const IntVal &val); const IntVal &GetValue() const; @@ -413,7 +415,7 @@ class ASTEnumDecl : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTEnumDecl; } - ~ASTEnumDecl() = default; + ~ASTEnumDecl() override = default; void PushConstant(ASTEnumConstant *c) { consts.emplace_back(c); @@ -437,7 +439,9 @@ class ASTTypedefDecl : public ASTDecl { genAttrs = genAttrsIn; declKind = kASTTypedefDecl; } - ~ASTTypedefDecl() = default; + ~ASTTypedefDecl() override { + subTypedefDecl = nullptr; + } void SetSubTypedefDecl(ASTTypedefDecl *decl) { subTypedefDecl = decl; @@ -451,7 +455,6 @@ class ASTTypedefDecl : public ASTDecl { void GenerateInitStmtImpl(std::list &stmts) override; ASTTypedefDecl* subTypedefDecl = nullptr; - }; } // namespace maple #endif // HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_H diff --git a/src/hir2mpl/ast_input/common/include/ast_input-inl.h b/src/hir2mpl/ast_input/common/include/ast_input-inl.h index f32bc241d043a9e39dff26650f05084c027dfaf4..5e988d09af12b35536f6788156cc3a5eaf307e02 100644 --- a/src/hir2mpl/ast_input/common/include/ast_input-inl.h +++ b/src/hir2mpl/ast_input/common/include/ast_input-inl.h @@ -12,6 +12,7 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ +#include "fe_timer.h" #include "ast_input.h" #include "global_tables.h" #include "fe_macros.h" @@ -68,4 +69,4 @@ void ASTInput::RegisterFileInfo(const std::string &fileName) { module.PushFileInfoPair(MIRInfoPair(fileInfoIdx, fileNameIdx)); module.PushFileInfoIsString(true); } -} \ No newline at end of file +} diff --git a/src/hir2mpl/ast_input/common/include/ast_input.h b/src/hir2mpl/ast_input/common/include/ast_input.h index 37634ea8e5e7e58307eeeb452c7572336c0f7f46..cd0a4e9481f5514739f03f770e7af4b5403132c0 100644 --- a/src/hir2mpl/ast_input/common/include/ast_input.h +++ b/src/hir2mpl/ast_input/common/include/ast_input.h @@ -40,8 +40,8 @@ class ASTInput { } void AddASTStruct(ASTStruct *astStruct) { - auto itor = std::find(astStructs.begin(), astStructs.end(), astStruct); - if (itor == astStructs.end()) { + auto itor = std::find(astStructs.cbegin(), astStructs.cend(), astStruct); + if (itor == astStructs.cend()) { astStructs.emplace_back(astStruct); } } @@ -95,4 +95,4 @@ class ASTInput { MapleList astEnums; }; } -#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_INPUT_H \ No newline at end of file +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_INPUT_H diff --git a/src/hir2mpl/ast_input/common/src/ast_decl.cpp b/src/hir2mpl/ast_input/common/src/ast_decl.cpp index f59df61ed582e6d4092680caa8ae53ff322eac09..f57d818ce67475f11c201f67e78e1310848e82f8 100644 --- a/src/hir2mpl/ast_input/common/src/ast_decl.cpp +++ b/src/hir2mpl/ast_input/common/src/ast_decl.cpp @@ -282,7 +282,11 @@ std::list ASTFunc::EmitASTStmtToFEIR() const { if (!typeDesc[1]->IsScalarType()) { retType = PTY_i32; } - retExpr = FEIRBuilder::CreateExprConstAnyScalar(retType, static_cast(0)); + if (retType != PTY_f128) { + retExpr = FEIRBuilder::CreateExprConstAnyScalar(retType, static_cast(0)); + } else { + retExpr = FEIRBuilder::CreateExprConstAnyScalar(retType, {0, 0}); + } } UniqueFEIRStmt retStmt = std::make_unique(std::move(retExpr)); endLoc.column = 0; diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_attr.h b/src/hir2mpl/bytecode_input/class/include/jbc_attr.h index ea6ccc9f535724d6bab36cea2fb6cd49bb74efcc..7ca68c4168c8bf971ab9675ed38d88e86db5e7cf 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_attr.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_attr.h @@ -228,7 +228,7 @@ class JBCAttrCode : public JBCAttr { class JBCAttrStackMapTable : public JBCAttr { public: JBCAttrStackMapTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrStackMapTable() = default; + ~JBCAttrStackMapTable() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -245,7 +245,7 @@ class JBCAttrStackMapTable : public JBCAttr { class JBCAttrException : public JBCAttr { public: JBCAttrException(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrException() = default; + ~JBCAttrException() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -262,7 +262,7 @@ class JBCAttrException : public JBCAttr { class JBCAttrInnerClass : public JBCAttr { public: JBCAttrInnerClass(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrInnerClass() = default; + ~JBCAttrInnerClass() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -305,7 +305,7 @@ class JBCAttrEnclosingMethod : public JBCAttr { class JBCAttrSynthetic : public JBCAttr { public: JBCAttrSynthetic(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrSynthetic() = default; + ~JBCAttrSynthetic() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -374,7 +374,7 @@ class JBCAttrSourceDebugEx : public JBCAttr { class JBCAttrLineNumberTable : public JBCAttr { public: JBCAttrLineNumberTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrLineNumberTable() = default; + ~JBCAttrLineNumberTable() override = default; const MapleVector &GetLineNums() const { return lineNums; } @@ -394,7 +394,7 @@ class JBCAttrLineNumberTable : public JBCAttr { class JBCAttrLocalVariableTable : public JBCAttr { public: JBCAttrLocalVariableTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrLocalVariableTable() = default; + ~JBCAttrLocalVariableTable() override = default; const MapleVector &GetLocalVarInfos() const { return localVarInfos; } @@ -414,7 +414,7 @@ class JBCAttrLocalVariableTable : public JBCAttr { class JBCAttrLocalVariableTypeTable : public JBCAttr { public: JBCAttrLocalVariableTypeTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrLocalVariableTypeTable() = default; + ~JBCAttrLocalVariableTypeTable() override = default; const MapleVector &GetLocalVarTypeInfos() const { return localVarTypeInfos; } @@ -434,7 +434,7 @@ class JBCAttrLocalVariableTypeTable : public JBCAttr { class JBCAttrDeprecated : public JBCAttr { public: JBCAttrDeprecated(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrDeprecated() = default; + ~JBCAttrDeprecated() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -448,7 +448,7 @@ class JBCAttrDeprecated : public JBCAttr { class JBCAttrRTAnnotations : public JBCAttr { public: JBCAttrRTAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); - virtual ~JBCAttrRTAnnotations() = default; + virtual ~JBCAttrRTAnnotations() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -462,13 +462,13 @@ class JBCAttrRTAnnotations : public JBCAttr { class JBCAttrRTVisAnnotations : public JBCAttrRTAnnotations { public: JBCAttrRTVisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrRTVisAnnotations() = default; + ~JBCAttrRTVisAnnotations() override = default; }; class JBCAttrRTInvisAnnotations : public JBCAttrRTAnnotations { public: JBCAttrRTInvisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrRTInvisAnnotations() = default; + ~JBCAttrRTInvisAnnotations() override = default; }; // RuntimeParamAnnoations Attribute @@ -477,7 +477,7 @@ class JBCAttrRTInvisAnnotations : public JBCAttrRTAnnotations { class JBCAttrRTParamAnnotations : public JBCAttr { public: JBCAttrRTParamAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); - virtual ~JBCAttrRTParamAnnotations() = default; + virtual ~JBCAttrRTParamAnnotations() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -506,7 +506,7 @@ class JBCAttrRTInvisParamAnnotations : public JBCAttrRTParamAnnotations { class JBCAttrRTTypeAnnotations : public JBCAttr { public: JBCAttrRTTypeAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); - virtual ~JBCAttrRTTypeAnnotations() = default; + virtual ~JBCAttrRTTypeAnnotations() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -520,13 +520,13 @@ class JBCAttrRTTypeAnnotations : public JBCAttr { class JBCAttrRTVisTypeAnnotations : public JBCAttrRTTypeAnnotations { public: JBCAttrRTVisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrRTVisTypeAnnotations() = default; + ~JBCAttrRTVisTypeAnnotations() override = default; }; class JBCAttrRTInvisTypeAnnotations : public JBCAttrRTTypeAnnotations { public: JBCAttrRTInvisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrRTInvisTypeAnnotations() = default; + ~JBCAttrRTInvisTypeAnnotations() override = default; }; // AnnotationDefault Attribute @@ -550,7 +550,7 @@ class JBCAttrAnnotationDefault : public JBCAttr { class JBCAttrBootstrapMethods : public JBCAttr { public: JBCAttrBootstrapMethods(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrBootstrapMethods() = default; + ~JBCAttrBootstrapMethods() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; @@ -567,7 +567,7 @@ class JBCAttrBootstrapMethods : public JBCAttr { class JBCAttrMethodParameters : public JBCAttr { public: JBCAttrMethodParameters(MapleAllocator &allocator, uint16 nameIdx, uint32 length); - ~JBCAttrMethodParameters() = default; + ~JBCAttrMethodParameters() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h b/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h index e94c7d12c58877e5f036c4dc968083bb0cf2e75a..d6afc3bef7f00361e8a2d2e1faa68fb4dc6143bf 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h @@ -146,7 +146,7 @@ enum StackMapFrameItemTag : uint8 { class StackMapFrameItem : public JBCAttrItem { public: StackMapFrameItem(uint8 frameTypeIn, StackMapFrameItemTag tagIn); - virtual ~StackMapFrameItem() = default; + virtual ~StackMapFrameItem() override = default; static std::map InitTagName(); static std::string TagName(StackMapFrameItemTag tag); static StackMapFrameItemTag FrameType2Tag(uint8 frameType); @@ -161,7 +161,7 @@ class StackMapFrameItem : public JBCAttrItem { class StackMapFrameItemSame : public StackMapFrameItem { public: explicit StackMapFrameItemSame(uint8 frameType); - ~StackMapFrameItemSame() = default; + ~StackMapFrameItemSame() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -201,7 +201,7 @@ class StackMapFrameItemSameLocals1Ex : public StackMapFrameItem { class StackMapFrameItemChop : public StackMapFrameItem { public: explicit StackMapFrameItemChop(uint8 frameType); - ~StackMapFrameItemChop() = default; + ~StackMapFrameItemChop() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -215,7 +215,7 @@ class StackMapFrameItemChop : public StackMapFrameItem { class StackMapFrameItemSameEx : public StackMapFrameItem { public: explicit StackMapFrameItemSameEx(uint8 frameType); - ~StackMapFrameItemSameEx() = default; + ~StackMapFrameItemSameEx() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -229,7 +229,7 @@ class StackMapFrameItemSameEx : public StackMapFrameItem { class StackMapFrameItemAppend : public StackMapFrameItem { public: StackMapFrameItemAppend(MapleAllocator &allocator, uint8 frameType); - ~StackMapFrameItemAppend() = default; + ~StackMapFrameItemAppend() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -244,7 +244,7 @@ class StackMapFrameItemAppend : public StackMapFrameItem { class StackMapFrameItemFull : public StackMapFrameItem { public: StackMapFrameItemFull(MapleAllocator &allocator, uint8 frameType); - ~StackMapFrameItemFull() = default; + ~StackMapFrameItemFull() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -297,7 +297,7 @@ class InnerClassItem : public JBCAttrItem { class LineNumberTableItem : public JBCAttrItem { public: LineNumberTableItem(); - ~LineNumberTableItem() = default; + ~LineNumberTableItem() override = default; uint16 GetStartPC() const { return startPC; } @@ -423,7 +423,7 @@ class LocalVariableTypeTableItem : public JBCAttrItem { class ElementValueItem : public JBCAttrItem { public: ElementValueItem(ElementValueKind kindIn, char tagIn); - virtual ~ElementValueItem() = default; + virtual ~ElementValueItem() override = default; static std::map InitTagKindMap(); static std::map InitKindNameMap(); static std::string KindName(ElementValueKind kind); @@ -499,7 +499,7 @@ class Annotation : public JBCAttrItem { class ParamAnnotationItem : public JBCAttrItem { public: explicit ParamAnnotationItem(MapleAllocator &allocator); - ~ParamAnnotationItem() = default; + ~ParamAnnotationItem() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -634,7 +634,7 @@ class ElementValueAnnotation : public ElementValueItem { class ElementValueArray : public ElementValueItem { public: explicit ElementValueArray(MapleAllocator &allocator); - ~ElementValueArray() = default; + ~ElementValueArray() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -690,7 +690,7 @@ enum TargetInfoItemType : uint8 { class TargetInfoItem : public JBCAttrItem { public: explicit TargetInfoItem(TargetInfoItemTag tagIn); - virtual ~TargetInfoItem() = default; + virtual ~TargetInfoItem() override = default; static std::map InitTypeTagMap(); static TargetInfoItemTag TargetType2Tag(TargetInfoItemType type); static TargetInfoItem *NewItem(MapleAllocator &allocator, BasicIORead &io, TargetInfoItemType targetType); @@ -703,7 +703,7 @@ class TargetInfoItem : public JBCAttrItem { class TargetTypeParam : public TargetInfoItem { public: TargetTypeParam(); - ~TargetTypeParam() = default; + ~TargetTypeParam() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -717,7 +717,7 @@ class TargetTypeParam : public TargetInfoItem { class TargetSuperType : public TargetInfoItem { public: TargetSuperType(); - ~TargetSuperType() = default; + ~TargetSuperType() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -731,7 +731,7 @@ class TargetSuperType : public TargetInfoItem { class TargetTypeParamBound : public TargetInfoItem { public: TargetTypeParamBound(); - ~TargetTypeParamBound() = default; + ~TargetTypeParamBound() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -746,7 +746,7 @@ class TargetTypeParamBound : public TargetInfoItem { class TargetEmpty : public TargetInfoItem { public: TargetEmpty(); - ~TargetEmpty() = default; + ~TargetEmpty() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -757,7 +757,7 @@ class TargetEmpty : public TargetInfoItem { class TargetFormalParam : public TargetInfoItem { public: TargetFormalParam(); - ~TargetFormalParam() = default; + ~TargetFormalParam() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -771,7 +771,7 @@ class TargetFormalParam : public TargetInfoItem { class TargetThrows : public TargetInfoItem { public: TargetThrows(); - ~TargetThrows() = default; + ~TargetThrows() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -785,7 +785,7 @@ class TargetThrows : public TargetInfoItem { class TargetLocalVarItem : public TargetInfoItem { public: TargetLocalVarItem(); - ~TargetLocalVarItem() = default; + ~TargetLocalVarItem() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -801,7 +801,7 @@ class TargetLocalVarItem : public TargetInfoItem { class TargetLocalVar : public TargetInfoItem { public: explicit TargetLocalVar(MapleAllocator &allocator); - ~TargetLocalVar() = default; + ~TargetLocalVar() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -816,7 +816,7 @@ class TargetLocalVar : public TargetInfoItem { class TargetCatch : public TargetInfoItem { public: TargetCatch(); - ~TargetCatch() = default; + ~TargetCatch() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -830,7 +830,7 @@ class TargetCatch : public TargetInfoItem { class TargetOffset : public TargetInfoItem { public: TargetOffset(); - ~TargetOffset() = default; + ~TargetOffset() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -844,7 +844,7 @@ class TargetOffset : public TargetInfoItem { class TargetTypeArg : public TargetInfoItem { public: TargetTypeArg(); - ~TargetTypeArg() = default; + ~TargetTypeArg() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -859,7 +859,7 @@ class TargetTypeArg : public TargetInfoItem { class TypePathItem : public JBCAttrItem { public: TypePathItem(); - ~TypePathItem() = default; + ~TypePathItem() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; @@ -876,7 +876,7 @@ class TypePathItem : public JBCAttrItem { class TypePath : public JBCAttrItem { public: explicit TypePath(MapleAllocator &allocator); - ~TypePath() = default; + ~TypePath() override = default; protected: bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class.h b/src/hir2mpl/bytecode_input/class/include/jbc_class.h index 39e20721cab501ead4c94a95fe9072a88112108f..de3e136ca7e4542a7ca87031f51191b37a6e7edb 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_class.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class.h @@ -96,7 +96,7 @@ class JBCClassElem { class JBCClassField : public JBCClassElem { public: JBCClassField(MapleAllocator &allocator, const JBCClass &argKlass); - ~JBCClassField() = default; + ~JBCClassField() override = default; protected: SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) override; @@ -105,7 +105,7 @@ class JBCClassField : public JBCClassElem { class JBCClassMethod : public JBCClassElem { public: JBCClassMethod(MapleAllocator &allocator, const JBCClass &argKlass); - ~JBCClassMethod() = default; + ~JBCClassMethod() override = default; bool PreProcess(); const JBCAttrCode *GetCode() const; bool IsVirtual() const; @@ -118,7 +118,7 @@ class JBCClassMethod : public JBCClassElem { class JBCClass { public: - JBCClass(MapleAllocator &allocatorIn); + explicit JBCClass(MapleAllocator &allocatorIn); LLT_MOCK_TARGET ~JBCClass() = default; bool ParseFile(BasicIORead &io); diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h index d38ba68c4e20e8760d230b020689f7f367c9510f..acf0cffed0d50f9c9086b916249cbfcdec1c0e48 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h @@ -14,6 +14,7 @@ */ #ifndef HIR2MPL_INCLUDE_JBC_CLASS_ACCESS_H #define HIR2MPL_INCLUDE_JBC_CLASS_ACCESS_H +#include "types_def.h" namespace maple { namespace jbc { diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h index e15f6648e1ee93f0f5d8619f4b2d609a0e5e3a37..facd4706e57aa483ed09204f59a5ed956120592d 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h @@ -127,7 +127,7 @@ class JBCConstUTF8 : public JBCConst { public: JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t); JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t, const std::string &argStr); - ~JBCConstUTF8() = default; + ~JBCConstUTF8() override = default; const std::string GetString() const { return MapleStringToStd(str); @@ -153,7 +153,7 @@ class JBCConst4Byte : public JBCConst { JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t); JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, int32 arg); JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, float arg); - ~JBCConst4Byte() = default; + ~JBCConst4Byte() override = default; int32 GetInt32() const { return value.ivalue; } @@ -184,7 +184,7 @@ class JBCConst8Byte : public JBCConst { JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t); JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, int64 arg); JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, double arg); - ~JBCConst8Byte() = default; + ~JBCConst8Byte() override = default; int64 GetInt64() const { return value.lvalue; @@ -258,7 +258,7 @@ class JBCConstString : public JBCConst { public: JBCConstString(MapleAllocator &alloc, JBCConstTag t); JBCConstString(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argStringIdx); - ~JBCConstString() = default; + ~JBCConstString() override = default; void SetValue(const GStrIdx &argStrIdx) { strIdx = argStrIdx; str = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h index 0acdb08a6e2c2b8292631939b4f0204206626b66..9034a53afe3929309d8aa94f90c7788ae64669ea 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h @@ -20,7 +20,7 @@ namespace maple { namespace jbc { class JBCConstPool { public: - JBCConstPool(MapleAllocator &alloc); + explicit JBCConstPool(MapleAllocator &alloc); ~JBCConstPool() = default; uint16 InsertConst(JBCConst &objConst); void InsertConstDummyForWide(); diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h index d10e5e43654ea7ce48b5836809027bc27c1f8888..d8539b1c18ddd0638e9508d903aaff2e0658cd3d 100644 --- a/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h +++ b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h @@ -15,7 +15,6 @@ #ifndef HIR2MPL_INCLUDE_JBC_OPCODE_H #define HIR2MPL_INCLUDE_JBC_OPCODE_H -#include #include #include #include @@ -29,7 +28,7 @@ namespace maple { namespace jbc { enum JBCOpcode : uint8 { #define JBC_OP(op, value, type, name, flag) \ - kOp##op = value, + kOp##op = (value), #include "jbc_opcode.def" #undef JBC_OP }; @@ -192,7 +191,7 @@ class JBCOp { class JBCOpUnused : public JBCOp { public: JBCOpUnused(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpUnused() = default; + ~JBCOpUnused() override = default; protected: bool ParseFileImpl(BasicIORead &io) override; @@ -201,7 +200,7 @@ class JBCOpUnused : public JBCOp { class JBCOpReversed : public JBCOp { public: JBCOpReversed(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpReversed() = default; + ~JBCOpReversed() override = default; protected: bool ParseFileImpl(BasicIORead &io) override; @@ -210,7 +209,7 @@ class JBCOpReversed : public JBCOp { class JBCOpDefault : public JBCOp { public: JBCOpDefault(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpDefault() = default; + ~JBCOpDefault() override = default; protected: bool ParseFileImpl(BasicIORead &io) override; @@ -247,7 +246,7 @@ class JBCOpDefault : public JBCOp { class JBCOpConst : public JBCOp { public: JBCOpConst(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpConst() = default; + ~JBCOpConst() override = default; std::string DumpBiPush() const; std::string DumpSiPush() const; std::string DumpLdc(const JBCConstPool &constPool) const; @@ -300,7 +299,7 @@ class JBCOpConst : public JBCOp { class JBCOpSlotOpr : public JBCOp { public: JBCOpSlotOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpSlotOpr() = default; + ~JBCOpSlotOpr() override = default; bool IsAStore() const; uint16 GetSlotIdx() const { return slotIdx; @@ -344,7 +343,7 @@ class JBCOpSlotOpr : public JBCOp { class JBCOpMathInc : public JBCOp { public: JBCOpMathInc(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpMathInc() = default; + ~JBCOpMathInc() override = default; uint16 GetIndex() const { return index; } @@ -377,7 +376,7 @@ class JBCOpMathInc : public JBCOp { class JBCOpBranch : public JBCOp { public: JBCOpBranch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpBranch() = default; + ~JBCOpBranch() override = default; uint32 GetTarget() const { return target; } @@ -401,7 +400,7 @@ class JBCOpBranch : public JBCOp { class JBCOpGoto : public JBCOp { public: JBCOpGoto(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpGoto() = default; + ~JBCOpGoto() override = default; uint32 GetTarget() const { return target; } @@ -421,7 +420,7 @@ class JBCOpGoto : public JBCOp { class JBCOpSwitch : public JBCOp { public: JBCOpSwitch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpSwitch() = default; + ~JBCOpSwitch() override = default; const MapleMap &GetTargets() const { return targets; } @@ -458,7 +457,7 @@ class JBCOpSwitch : public JBCOp { class JBCOpFieldOpr : public JBCOp { public: JBCOpFieldOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpFieldOpr() = default; + ~JBCOpFieldOpr() override = default; std::string GetFieldType(const JBCConstPool &constPool) const; uint16 GetFieldIdx() const { return fieldIdx; @@ -486,7 +485,7 @@ class JBCOpFieldOpr : public JBCOp { class JBCOpInvoke : public JBCOp { public: JBCOpInvoke(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpInvoke() = default; + ~JBCOpInvoke() override = default; std::string GetMethodDescription(const JBCConstPool &constPool) const; uint16 GetMethodIdx() const { return methodIdx; @@ -520,7 +519,7 @@ class JBCOpInvoke : public JBCOp { class JBCOpJsr : public JBCOp { public: JBCOpJsr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpJsr() = default; + ~JBCOpJsr() override = default; uint32 GetTarget() const { return target; } @@ -559,7 +558,7 @@ class JBCOpJsr : public JBCOp { class JBCOpRet : public JBCOp { public: JBCOpRet(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpRet() = default; + ~JBCOpRet() override = default; uint16 GetIndex() const { return index; } @@ -591,7 +590,7 @@ class JBCOpNew : public JBCOp { }; JBCOpNew(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpNew() = default; + ~JBCOpNew() override = default; GStrIdx GetTypeNameIdx(const JBCConstPool &constPool) const; std::string GetTypeName(const JBCConstPool &constPool) const; @@ -631,7 +630,7 @@ class JBCOpNew : public JBCOp { class JBCOpMultiANewArray : public JBCOp { public: JBCOpMultiANewArray(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpMultiANewArray() = default; + ~JBCOpMultiANewArray() override = default; uint16 GetRefTypeIdx() const { return refTypeIdx; } @@ -663,7 +662,7 @@ class JBCOpMultiANewArray : public JBCOp { class JBCOpTypeCheck : public JBCOp { public: JBCOpTypeCheck(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); - ~JBCOpTypeCheck() = default; + ~JBCOpTypeCheck() override = default; uint16 GetTypeIdx() const { return typeIdx; } diff --git a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h index 4cc8a3fa7c6cd26bd7bd96bede8cc21af1549d75..bd74c9359c2387c7eb275cae3bb79b8f26953571 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h @@ -95,7 +95,7 @@ bool BCCompilerComponent::LoadOnDemandTypeImpl() { template bool BCCompilerComponent::LoadOnDemandType2BCClass(const std::unordered_set &allDepsSet, const std::unordered_set &allDefsSet, - std::list> &klassList) { + std::list> &klassList) const { FETimer timer; timer.StartAndDump("LoadOnDemandType2BCClass::Open dep dexfiles"); bool success = true; diff --git a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h index b243f570e03f64974d57be3663b74416d8b9bfec..0f7048c1b3230fabdba6855b84cded201142b3bd 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h @@ -16,7 +16,7 @@ #define HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_H #include "fe_macros.h" #include "hir2mpl_compiler_component.h" -#include "bc_input.h" +#include "bc_input-inl.h" namespace maple { namespace bc { @@ -39,7 +39,7 @@ class BCCompilerComponent : public HIR2MPLCompilerComponent { private: bool LoadOnDemandType2BCClass(const std::unordered_set &allDepsSet, const std::unordered_set &allDefsSet, - std::list> &klassList); + std::list> &klassList) const; bool LoadOnDemandBCClass2FEClass(const std::list> &klassList, std::list> &structHelpers, bool isEmitDepsMplt); @@ -50,5 +50,4 @@ class BCCompilerComponent : public HIR2MPLCompilerComponent { }; // class BCCompilerComponent } // namespace bc } // namespace maple -#include "bc_compiler_component-inl.h" #endif // HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_function.h b/src/hir2mpl/bytecode_input/common/include/bc_function.h index dedebf5387aeb9a1a17328c4820314f5a31944c0..c75f58b6a76c5afa6898e8e2a630a4d81f898acd 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_function.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_function.h @@ -23,7 +23,7 @@ class BCFunction : public FEFunction { public: BCFunction(const BCClassMethod2FEHelper &argMethodHelper, MIRFunction &mirFunc, const std::unique_ptr &argPhaseResultTotal); - virtual ~BCFunction() = default; + virtual ~BCFunction() override = default; protected: bool GenerateGeneralStmt(const std::string &phaseName) override { @@ -72,4 +72,4 @@ class BCFunction : public FEFunction { }; } // namespace bc } // namespace maple -#endif \ No newline at end of file +#endif diff --git a/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h b/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h index 8a211e390462fc8ecb5d6ecdb0d55f713e693eb2..13bf19db65c68274ce8c140071188d751f73e500 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h @@ -14,8 +14,8 @@ */ #ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_INL_H_ #define HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_INL_H_ -#include "bc_input.h" #include +#include "bc_input.h" #include "dex_parser.h" #include "hir2mpl_env.h" @@ -40,7 +40,7 @@ bool BCInput::ReadBCFile(uint32 index, const std::string &fileName, const std ERR(kLncErr, "Verify file failed in : %s.", fileName.c_str()); return false; } - if (bcParser->RetrieveClasses(bcClasses) == false) { + if (!(bcParser->RetrieveClasses(bcClasses))) { ERR(kLncErr, "Retrieve classes failed in : %s.", fileName.c_str()); return false; } @@ -107,7 +107,7 @@ template bool BCInput::CollectAllDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet) { for (auto &item : bcParserMap) { std::unordered_set depSet; - if (item.second->CollectAllDepTypeNames(depSet) == false) { + if (!(item.second->CollectAllDepTypeNames(depSet))) { ERR(kLncErr, "Collect all dependent typenames failed in : %s.", item.first.c_str()); return false; } @@ -153,7 +153,7 @@ template bool BCInput::CollectClassNamesOnAllBCFiles(std::unordered_set &allClassSet) { for (auto &item : bcParserMap) { std::unordered_set classSet; - if (item.second->CollectAllClassNames(classSet) == false) { + if (!(item.second->CollectAllClassNames(classSet))) { ERR(kLncErr, "Collect all class names failed in : %s.", item.first.c_str()); return false; } diff --git a/src/hir2mpl/bytecode_input/common/include/bc_input.h b/src/hir2mpl/bytecode_input/common/include/bc_input.h index 21d657fc3a5de219f8212f22e7e8252d394159f3..08938d030e025cc61a2acc0a1f98501373ec3938 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_input.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_input.h @@ -19,7 +19,7 @@ #include #include "mir_module.h" #include "bc_class.h" -#include "bc_parser.h" +#include "bc_parser-inl.h" namespace maple { namespace bc { @@ -41,7 +41,7 @@ class BCInput { private: bool CollectAllDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet); - bool CollectMethodDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet); + bool CollectMethodDepTypeNamesOnAllBCFiles(std::unordered_set &depSet); std::map> bcParserMap; // map MIRModule &module; @@ -51,5 +51,4 @@ class BCInput { }; } } -#include "bc_input-inl.h" -#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_H \ No newline at end of file +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_parser.h b/src/hir2mpl/bytecode_input/common/include/bc_parser.h index 25a5eb000594f13583850f8a34797020d3438270..4cc5f4177012728e16791b91c83b952e8c02e7a5 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_parser.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_parser.h @@ -29,19 +29,20 @@ template class BCParser : public BCParserBase { public: BCParser(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn); - ~BCParser() = default; + ~BCParser() override = default; protected: - bool OpenFileImpl(); - uint32 CalculateCheckSumImpl(const uint8 *data, uint32 size) = 0; - bool ParseHeaderImpl() = 0; - bool VerifyImpl() = 0; - virtual bool RetrieveIndexTables() = 0; - bool RetrieveUserSpecifiedClasses(std::list> &klasses) = 0; - bool RetrieveAllClasses(std::list> &klasses) = 0; - bool CollectAllDepTypeNamesImpl(std::unordered_set &depSet) = 0; - bool CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, BCClassMethod &bcMethod) const = 0; - bool CollectAllClassNamesImpl(std::unordered_set &classSet) = 0; + bool OpenFileImpl() override; + uint32 CalculateCheckSumImpl(const uint8 *data, uint32 size) override = 0; + bool ParseHeaderImpl() override = 0; + bool VerifyImpl() override = 0; + virtual bool RetrieveIndexTables() override = 0; + bool RetrieveUserSpecifiedClasses(std::list> &klasses) override = 0; + bool RetrieveAllClasses(std::list> &klasses) override = 0; + bool CollectAllDepTypeNamesImpl(std::unordered_set &depSet) override = 0; + bool CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, BCClassMethod &bcMethod) + const override = 0; + bool CollectAllClassNamesImpl(std::unordered_set &classSet) override = 0; std::unique_ptr reader; }; @@ -56,7 +57,7 @@ class MethodProcessTask : public MplTask { klass(argKlass), idxPair(argIdxPair), parser(argParser) {} - virtual ~MethodProcessTask() = default; + virtual ~MethodProcessTask() override = default; protected: int RunImpl(MplTaskParam *param) override; @@ -93,5 +94,4 @@ class MethodProcessSchedular : public MplScheduler { }; } // namespace bc } // namespace maple -#include "bc_parser-inl.h" #endif // MPL_FE_BC_INPUT_BC_PARSER_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_pragma.h b/src/hir2mpl/bytecode_input/common/include/bc_pragma.h index cb33222911e76a5e5cf1cde05ddf86bda2f0a07e..581c77ea2d7e2e4ffd2cbdf347c049036f437fa1 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_pragma.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_pragma.h @@ -14,8 +14,6 @@ */ #ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_PRAGMA_H #define HIR2MPL_BC_INPUT_INCLUDE_BC_PRAGMA_H -#include -#include #include #include "mir_module.h" #include "mir_pragma.h" diff --git a/src/hir2mpl/bytecode_input/common/include/bc_util.h b/src/hir2mpl/bytecode_input/common/include/bc_util.h index 43ba35277f48bb07c5ba9b355515bf349ecd96f6..7ca26eb439634e13d83de9ce27fa23a8c021133e 100644 --- a/src/hir2mpl/bytecode_input/common/include/bc_util.h +++ b/src/hir2mpl/bytecode_input/common/include/bc_util.h @@ -15,7 +15,6 @@ #ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_UTIL_H #define HIR2MPL_BC_INPUT_INCLUDE_BC_UTIL_H #include -#include #include #include #include "types_def.h" diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_class.h b/src/hir2mpl/bytecode_input/dex/include/dex_class.h index 98829312fa78fcc81e02109ee55c1ac1ffdc8736..198a605c85dcda4d277e47c28f0be50bbfbf4888 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_class.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_class.h @@ -30,7 +30,7 @@ class DexClassField : public BCClassField { : BCClassField(klassIn, acc, nameIn, descIn), itemIdx(itemIdxIn), idx(idxIn) {} - ~DexClassField() = default; + ~DexClassField() override = default; protected: uint32 GetItemIdxImpl() const override; @@ -61,7 +61,7 @@ class DexClassMethod : public BCClassMethod { : BCClassMethod(klassIn, acc, isVirtualIn, nameIn, descIn), itemIdx(itemIdxIn), idx(idxIn) {} - ~DexClassMethod() = default; + ~DexClassMethod() override = default; protected: uint32 GetItemIdxImpl() const override; diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h b/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h index 1980671e617a0b466bd625dc741fddfd1e5bfa48..df57fbdd06f6cfbd99d8bd0eb297d67b89a0b8f8 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h @@ -21,7 +21,7 @@ namespace bc { class DexClass2FEHelper : public BCClass2FEHelper { public: DexClass2FEHelper(MapleAllocator &allocator, bc::BCClass &klassIn); - ~DexClass2FEHelper() = default; + ~DexClass2FEHelper() override = default; protected: void InitFieldHelpersImpl() override; @@ -33,7 +33,7 @@ class DexClassField2FEHelper : public BCClassField2FEHelper { public: DexClassField2FEHelper(MapleAllocator &allocator, const BCClassField &fieldIn) : BCClassField2FEHelper(allocator, fieldIn) {} - ~DexClassField2FEHelper() = default; + ~DexClassField2FEHelper() override = default; protected: FieldAttrs AccessFlag2AttributeImpl(uint32 accessFlag) const override; @@ -43,7 +43,7 @@ class DexClassMethod2FEHelper : public BCClassMethod2FEHelper { public: DexClassMethod2FEHelper(MapleAllocator &allocator, std::unique_ptr &methodIn) : BCClassMethod2FEHelper(allocator, methodIn) {} - ~DexClassMethod2FEHelper() = default; + ~DexClassMethod2FEHelper() override = default; protected: FuncAttrs GetAttrsImpl() const override; @@ -54,4 +54,4 @@ class DexClassMethod2FEHelper : public BCClassMethod2FEHelper { }; } } -#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_CLASS2FE_HELPER_H \ No newline at end of file +#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_CLASS2FE_HELPER_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_op.h b/src/hir2mpl/bytecode_input/dex/include/dex_op.h index 4a98e2425a1d43ee8822b4cc182f8b4a2f11ee50..e6689a5f4a8bf02613eb335909f0302197e7f7cd 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_op.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_op.h @@ -651,7 +651,7 @@ class DexOpBinaryOpLit : public DexOp { }; // 0xfa ~ 0xfb -class DexOpInvokePolymorphic: public DexOpInvoke { +class DexOpInvokePolymorphic : public DexOpInvoke { public: DexOpInvokePolymorphic(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); ~DexOpInvokePolymorphic() = default; diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_parser.h b/src/hir2mpl/bytecode_input/dex/include/dex_parser.h index 180c69f23e988a9be63a8c2d4fb2da860228d782..dee0a4da8fb8c7304bb8579136c0d1ca88669ea9 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_parser.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_parser.h @@ -14,7 +14,7 @@ */ #ifndef MPL_FE_DEX_INPUT_DEX_PARSER_H #define MPL_FE_DEX_INPUT_DEX_PARSER_H -#include "bc_parser.h" +#include "bc_parser-inl.h" #include "dex_reader.h" #include "dex_class.h" #include "types_def.h" diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h b/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h index d6d0b00accb6aa96a10c7e41d4e5f597aff557f3..b871821785696ae1f8f7e54a88c7948adbdafccd 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h @@ -39,8 +39,9 @@ class DexBCAnnotationElement { static uint64 GetUVal(const uint8 **data, uint8 len) { // get value, max 8 bytes, little-endian uint64 val = 0; + constexpr uint8 bitWidth = 3; for (uint8 j = 0; j <= len; j++) { - val |= (static_cast(*(*data)++) << (j << 3)); + val |= (static_cast(*(*data)++) << (j << bitWidth)); } return val; } @@ -279,4 +280,4 @@ class DexBCAnnotationsDirectory : public BCAnnotationsDirectory { }; } // namespace bc } // namespace maple -#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_PRAGMA_H \ No newline at end of file +#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_PRAGMA_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_reader.h b/src/hir2mpl/bytecode_input/dex/include/dex_reader.h index eb99cc1bd1baa4c6f467f9630e3ef0b10fadb256..d1d6f91a1b45f58debc1e9d7731c3be842d4a4de 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dex_reader.h +++ b/src/hir2mpl/bytecode_input/dex/include/dex_reader.h @@ -24,13 +24,13 @@ namespace maple { namespace bc { -typedef std::map>> SrcLocalInfo; +using SrcLocalInfo = std::map>>; class DexReader : public BCReader { public: DexReader(uint32 fileIdxIn, const std::string &fileNameIn) : BCReader(fileNameIn), fileIdx(fileIdxIn) {} - ~DexReader() = default; + ~DexReader() override = default; void SetDexFile(std::unique_ptr dexFile); uint32 GetClassItemsSize() const; const char *GetClassJavaSourceFileName(uint32 classIdx) const; @@ -98,4 +98,4 @@ class DexReader : public BCReader { }; } // namespace bc } // namespace maple -#endif // MPL_FE_DEX_INPUT_DEX_READER_H \ No newline at end of file +#endif // MPL_FE_DEX_INPUT_DEX_READER_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h b/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h index 57e9ee0ac2b11b9fc63cea77aa2496cf39f55123..48ae638965cbcd4b1f61e17b1df5a0bfa465682a 100644 --- a/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h +++ b/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h @@ -50,6 +50,7 @@ enum ValueType { const uint16_t kIDexNumPackedOpcodes = 0x100; const uint16_t kIDexPackedSwitchSignature = 0x100; const uint16_t kIDexSparseSwitchSignature = 0x200; +const uint8_t kMaxShiftBitArgs = 7; // opcode in dex file enum IDexOpcode : uint8_t { diff --git a/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp b/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp index c47b6e6d60d3b8d1b15d6d00b83fac6f1b76f623..d9e6a6c25d21c648477a019a3b62789a2f4aecab 100644 --- a/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp +++ b/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp @@ -179,6 +179,7 @@ ClassLoaderInfo *ClassLoaderContext::ParseInternal(const std::string &spec) { CHECK_FATAL(false, "Invalid class loader spec: %s", currentSpec.c_str()); return nullptr; } else { + // 2: index of bracket starts and ends remaining = remaining.substr(sharedLibraryClose + 2, remaining.size() - sharedLibraryClose - 2); } } diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp index cd0ef7a118b25118d72b6c76b999a7c3af59ff27..34f30021884e18fdcb42ce7f91230e7e22eb4c2c 100644 --- a/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp +++ b/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp @@ -21,7 +21,7 @@ uint64 DexEncodeValue::GetUVal(const uint8 **data, uint8 len) const { // get value, max 8 bytes, little-endian uint64 val = 0; for (uint8 j = 0; j <= len; j++) { - val |= (static_cast(*(*data)++) << (j << 3)); + val |= (static_cast(*(*data)++) << (j << 3)); // 3: left shift to 8 bytes } return val; } @@ -29,8 +29,8 @@ uint64 DexEncodeValue::GetUVal(const uint8 **data, uint8 len) const { MIRIntConst *DexEncodeValue::ProcessIntValue(const uint8 **data, uint8 valueArg, MIRType &type) { // sign extended val uint64 val = GetUVal(data, valueArg); - uint32 shiftBit = static_cast((7 - valueArg) * 8); // 7 : max shift bit args - CHECK_FATAL(valueArg <= 7, "shiftBit positive check"); + CHECK_FATAL(valueArg <= kMaxShiftBitArgs, "shiftBit positive check"); + uint32 shiftBit = static_cast((kMaxShiftBitArgs - valueArg) * 8); uint64 sVal = (static_cast(val) << shiftBit) >> shiftBit; MIRIntConst *intCst = mp.New(sVal, type); return intCst; @@ -136,7 +136,7 @@ void DexEncodeValue::ProcessEncodedValue(const uint8 **data, uint8 valueType, ui case kValueDouble: { val = GetUVal(data, valueArg); // fill 0s for least significant bits - valBuf.u = val << ((7 - valueArg) << 3); + valBuf.u = val << ((kMaxShiftBitArgs - valueArg) << 3); cst = mp.New(valBuf.d, *GlobalTables::GetTypeTable().GetDouble()); break; } diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp index 24e85c957ad5ab3500414ed5e93d2e96bdd1c595..106c88ac0046734922ee4c92d5dfe62b1e0ed184 100644 --- a/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp +++ b/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp @@ -381,7 +381,7 @@ std::list DexOpMonitor::EmitToFEIRStmtsImpl() { UniqueFEIRExpr expr = std::make_unique(vA.GenFEIRVarReg()); exprs.emplace_back(std::move(expr)); if (opcode == kDexOpMonitorEnter) { - expr = std::make_unique(static_cast(2), PTY_i32); + expr = std::make_unique(static_cast(2), PTY_i32); // 2: constval i32 2 exprs.emplace_back(std::move(expr)); } UniqueFEIRStmt stmt = diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp index da55cd4d955212289839918307aeed8bf6a8c1f3..1612a81a3f82d3413dd6ab8beb7ee4039a986072 100644 --- a/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp +++ b/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp @@ -92,7 +92,7 @@ void DexBCAnnotationElement::ProcessAnnotationEncodedValue(const uint8 **data, M case kValueDouble: { val = GetUVal(data, valueArg); // fill 0s for least significant bits - element.SetU64Val(val << ((7 - valueArg) << 3)); + element.SetU64Val(val << ((kMaxShiftBitArgs - valueArg) << 3)); cst = mp.New(element.GetDoubleVal(), *GlobalTables::GetTypeTable().GetDouble()); break; } @@ -156,8 +156,8 @@ MIRIntConst *DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProces MIRType &type) { // sign extended val uint64 val = GetUVal(data, valueArg); - uint32 shiftBit = static_cast((7 - valueArg) * 8); - CHECK_FATAL(valueArg <= 7, "shiftBit positive check"); + CHECK_FATAL(valueArg <= kMaxShiftBitArgs, "shiftBit positive check"); + uint32 shiftBit = static_cast((kMaxShiftBitArgs - valueArg) * 8); uint64 sVal = (static_cast(val) << shiftBit) >> shiftBit; element.SetI64Val(static_cast(sVal)); MIRIntConst *intCst = mp.New(sVal, type); diff --git a/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp b/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp index 3027b0ce7182ba0c657d0a355e66b1dc1ab9fce6..3b94cb52801b4df77c4a1dcab7e4bf6e661dc7d2 100644 --- a/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp +++ b/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp @@ -15,7 +15,7 @@ #include "dexfile_interface.h" #include "mpl_logging.h" -namespace maple{ +namespace maple { void ResolvedMethodType::SignatureTypes(const std::string &mt, std::list &types) { // three pointers linear scan algo size_t startPos = 1; // pos 0 should be '(' diff --git a/src/hir2mpl/common/include/basic_io.h b/src/hir2mpl/common/include/basic_io.h index fd755bd4b6724b5583fd8ebb3ae212989d76201b..a6ea1230db667d3cee5ee66c41a92d6baf1d5f9b 100644 --- a/src/hir2mpl/common/include/basic_io.h +++ b/src/hir2mpl/common/include/basic_io.h @@ -202,6 +202,28 @@ class BasicIORead { return file.GetLength(); } + private: + template + void ReadBuffer8BitLong(T *dst, uint32 length) { + const uint8 *p = GetSafeBuffer(length); + pos += length; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); + } + + template + void ReadBuffer8BitLong(T *dst, uint32 length, bool &success) { + const uint8 *p = GetBuffer(length); + if (p == nullptr) { + success = false; + return; + } + pos += length; + success = true; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); + } + protected: BasicIOMapFile &file; bool isBigEndian; diff --git a/src/hir2mpl/common/include/enhance_c_checker.h b/src/hir2mpl/common/include/enhance_c_checker.h index a47d1f144c73b1212ec3df888f9f6a0b8cf00c9b..1f6e2e9078d23436ce3217a38aeaeae7f51ab908 100644 --- a/src/hir2mpl/common/include/enhance_c_checker.h +++ b/src/hir2mpl/common/include/enhance_c_checker.h @@ -18,6 +18,7 @@ #include "feir_stmt.h" #include "ast_expr.h" #include "ast_decl.h" +#include "fe_manager.h" namespace maple { constexpr int64 kUndefValue = 0xdeadbeef; @@ -90,6 +91,23 @@ class ENCChecker { static bool IsSafeRegion(const MIRBuilder &mirBuilder); static bool IsUnsafeRegion(const MIRBuilder &mirBuilder); static void CheckLenExpr(const ASTExpr &lenExpr, const std::list &nullstmts); + + private: + template + static void InsertBoundaryInFieldOrFuncAtts(T &attr, const BoundaryInfo &boundary) { + attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); + if (boundary.lenParamIdx != -1) { + attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); + } + if (boundary.lenExpr == nullptr) { + return; + } + std::list nullStmts; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + uint32 hash = lenExpr->Hash(); + FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, std::move(lenExpr)); // save expr cache + attr.GetAttrBoundary().SetLenExprHash(hash); + } }; // class ENCChecker } // namespace maple #endif // HIR2MPL_INCLUDE_COMMON_ENCCHECKER_H diff --git a/src/hir2mpl/common/include/fe_file_type.h b/src/hir2mpl/common/include/fe_file_type.h index f7ce0726405cbef407b21347befef998de75395b..3a1fe16ba910708ae3897f7eb6f833d0fd56aea7 100644 --- a/src/hir2mpl/common/include/fe_file_type.h +++ b/src/hir2mpl/common/include/fe_file_type.h @@ -29,6 +29,7 @@ class FEFileType { kDex, kAST, kMAST, + kO, }; inline static FEFileType &GetInstance() { diff --git a/src/hir2mpl/common/include/fe_input.h b/src/hir2mpl/common/include/fe_input.h index b9cdfd2d2ace67c6fce6270297a56baf618a7f53..bccf27fb6f871959b454910ab66d6115524230f9 100644 --- a/src/hir2mpl/common/include/fe_input.h +++ b/src/hir2mpl/common/include/fe_input.h @@ -75,7 +75,7 @@ class FEInputProgramUnit { class FEInputUnitMethod : public FEInputUnit { public: explicit FEInputUnitMethod(MapleAllocator &alloc); - ~FEInputUnitMethod() = default; + ~FEInputUnitMethod() override = default; protected: std::string GetCatagoryNameImpl() override; @@ -86,7 +86,7 @@ class FEInputUnitMethod : public FEInputUnit { class FEInputUnitVariable : public FEInputUnit { public: explicit FEInputUnitVariable(MapleAllocator &alloc); - ~FEInputUnitVariable() = default; + ~FEInputUnitVariable() override = default; protected: std::string GetCatagoryNameImpl() override; @@ -97,7 +97,7 @@ class FEInputUnitVariable : public FEInputUnit { class FEInputUnitStruct : public FEInputUnit { public: explicit FEInputUnitStruct(MapleAllocator &alloc); - virtual ~FEInputUnitStruct() = default; + ~FEInputUnitStruct() override = default; MIRTypeKind GetMIRTypeKind() const { return typeKind; } diff --git a/src/hir2mpl/common/include/fe_options.h b/src/hir2mpl/common/include/fe_options.h index d115838d32ead18bc749980137f21384201710eb..fa16b0fda78baa2f21d2e8fa2a61be4409352383 100644 --- a/src/hir2mpl/common/include/fe_options.h +++ b/src/hir2mpl/common/include/fe_options.h @@ -494,6 +494,14 @@ class FEOptions { return wpaa; } + void SetFuncMergeEnable(bool flag) { + funcMerge = flag; + } + + bool IsEnableFuncMerge() const { + return funcMerge; + } + private: static FEOptions options; // input control options @@ -573,7 +581,7 @@ class FEOptions { uint32 funcInlineSize = 0; bool wpaa = false; - + bool funcMerge = false; FEOptions(); ~FEOptions() = default; }; diff --git a/src/hir2mpl/common/include/fe_type_manager.h b/src/hir2mpl/common/include/fe_type_manager.h index 546e695b8baa3b460d8adec5d478fceefd5c785d..aee2926ad3088a990caca3a955ca27820f68567b 100644 --- a/src/hir2mpl/common/include/fe_type_manager.h +++ b/src/hir2mpl/common/include/fe_type_manager.h @@ -267,7 +267,7 @@ class FETypeManager { void UpdateNameFuncMapFromTypeTable(); void UpdateDupTypes(const GStrIdx &nameIdx, bool isInterface, const std::unordered_map::iterator &importedTypeIt); - void ReleaseMIRFuncCodeMempool(std::unordered_map &map); + void ReleaseMIRFuncCodeMempool(std::unordered_map &map) const; // MCC function void InitFuncMCCGetOrInsertLiteral(); diff --git a/src/hir2mpl/common/include/fe_utils.h b/src/hir2mpl/common/include/fe_utils.h index ba3146e016961e80bf8299328067e5d1145bd790..3a5a34f1a4c85cb36afe27ab4ec9ef815d49cc7b 100644 --- a/src/hir2mpl/common/include/fe_utils.h +++ b/src/hir2mpl/common/include/fe_utils.h @@ -48,7 +48,10 @@ class FEUtils { static std::string GetBaseTypeName(const std::string &typeName); static PrimType GetPrimType(const GStrIdx &typeNameIdx); static uint32 GetSequentialNumber(); + static std::string NormalizeFileName(std::string fileName); + static void EraseFileNameforClangTypeStr(std::string &typeStr); static std::string GetFileNameHashStr(const std::string &fileName, uint32 seed = 211); + static std::string GetHashStr(const std::string &str, uint32 seed = 211); static std::string GetSequentialName0(const std::string &prefix, uint32_t num); static std::string GetSequentialName(const std::string &prefix); static std::string CreateLabelName(); @@ -78,6 +81,7 @@ class FEUtils { static const std::string kDouble; static const std::string kVoid; static const std::string kThis; + static const std::string kDotDot; static const std::string kMCCStaticFieldGetBool; static const std::string kMCCStaticFieldGetByte; static const std::string kMCCStaticFieldGetChar; diff --git a/src/hir2mpl/common/include/feir_builder.h b/src/hir2mpl/common/include/feir_builder.h index 48feebcb33785fcc42110069458ae0c468e71649..ca3d765d02b27b0eeef367b0ad61bdaed37e1231 100644 --- a/src/hir2mpl/common/include/feir_builder.h +++ b/src/hir2mpl/common/include/feir_builder.h @@ -69,7 +69,9 @@ class FEIRBuilder { static UniqueFEIRExpr CreateExprConstF32(float val); static UniqueFEIRExpr CreateExprConstF64(double val); static UniqueFEIRExpr CreateExprConstPtr(int64 val); + static UniqueFEIRExpr CreateExprConstF128(const uint64_t val[2]); static UniqueFEIRExpr CreateExprConstAnyScalar(PrimType primType, int64 val); + static UniqueFEIRExpr CreateExprConstAnyScalar(PrimType primType, std::pair val); static UniqueFEIRExpr CreateExprVdupAnyVector(PrimType primtype, int64 val); static UniqueFEIRExpr CreateExprMathUnary(Opcode op, UniqueFEIRVar var0); static UniqueFEIRExpr CreateExprMathUnary(Opcode op, UniqueFEIRExpr expr); @@ -132,8 +134,8 @@ class FEIRBuilder { static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, std::list exprIndexs, UniqueFEIRType arrayType, const std::string &argArrayName); - static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, - UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, + /* std::vector expr stores 0: exprElem; 1: exprArray; 2: exprIndex */ + static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(std::vector expr, UniqueFEIRType arrayType, UniqueFEIRType elemType, const std::string &argArrayName); static std::list CreateStmtArrayLoad(UniqueFEIRVar varElem, UniqueFEIRVar varArray, UniqueFEIRVar varIndex); diff --git a/src/hir2mpl/common/include/feir_scope.h b/src/hir2mpl/common/include/feir_scope.h index 5988a29c938e74686aef686bd66d55a3ca2ea626..5df35af357898388ac52b74bbd94ced2a85e0ff5 100644 --- a/src/hir2mpl/common/include/feir_scope.h +++ b/src/hir2mpl/common/include/feir_scope.h @@ -74,11 +74,11 @@ class FEIRScope { UniqueFEIRStmt GenVLAStackRestoreStmt() const; UniqueFEIRScope Clone() const; - void ProcessVLAStack(std::list &stmts, bool isCallAlloca, Loc endLoc) { - FEIRStmt *feirStmt = GetVLASavedStackPtr(); - if (feirStmt != nullptr) { + void ProcessVLAStack(std::list &stmts, bool isCallAlloca, const Loc endLoc) { + FEIRStmt *vlaSavedStackStmt = GetVLASavedStackPtr(); + if (vlaSavedStackStmt != nullptr) { if (isCallAlloca) { - feirStmt->SetIsNeedGenMir(false); + vlaSavedStackStmt->SetIsNeedGenMir(false); } else { auto stackRestoreStmt = GenVLAStackRestoreStmt(); stackRestoreStmt->SetSrcLoc(endLoc); diff --git a/src/hir2mpl/common/include/feir_stmt.h b/src/hir2mpl/common/include/feir_stmt.h index 4b1a60bd2fcd2fb505d7799b2efea29154e37b33..311431ed1e6dc92035be63fa8aa78af987f5be0d 100644 --- a/src/hir2mpl/common/include/feir_stmt.h +++ b/src/hir2mpl/common/include/feir_stmt.h @@ -34,6 +34,7 @@ #include "fe_options.h" #include "feir_type_helper.h" #include "feir_dfg.h" +#include "int128_util.h" namespace maple { class FEIRBuilder; @@ -108,7 +109,7 @@ class FEIRStmt : public FELinkListNode { FEIRStmt() : kind(kStmt) {} // kStmt as default - virtual ~FEIRStmt() = default; + ~FEIRStmt() override = default; void RegisterDFGNodes2CheckPoint(FEIRStmtCheckPoint &checkPoint) { RegisterDFGNodes2CheckPointImpl(checkPoint); } @@ -560,6 +561,8 @@ union ConstExprValue { uint64 u64 = 0; int64 i64; double f64; + uint64 f128[2]; + Int128Arr i128; }; class FEIRExprConst : public FEIRExpr { @@ -567,10 +570,12 @@ class FEIRExprConst : public FEIRExpr { FEIRExprConst(); FEIRExprConst(int64 val, PrimType argType); FEIRExprConst(uint64 val, PrimType argType); + FEIRExprConst(const IntVal &val, PrimType argType); explicit FEIRExprConst(uint32 val); explicit FEIRExprConst(float val); explicit FEIRExprConst(double val); - ~FEIRExprConst() = default; + explicit FEIRExprConst(const uint64_t *val); + ~FEIRExprConst() override = default; FEIRExprConst(const FEIRExprConst&) = delete; FEIRExprConst& operator=(const FEIRExprConst&) = delete; @@ -592,7 +597,7 @@ class FEIRExprConst : public FEIRExpr { class FEIRExprSizeOfType : public FEIRExpr { public: explicit FEIRExprSizeOfType(UniqueFEIRType ty); - ~FEIRExprSizeOfType() = default; + ~FEIRExprSizeOfType() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -611,7 +616,7 @@ class FEIRExprDRead : public FEIRExpr { public: explicit FEIRExprDRead(std::unique_ptr argVarSrc); FEIRExprDRead(std::unique_ptr argType, std::unique_ptr argVarSrc); - ~FEIRExprDRead() = default; + ~FEIRExprDRead() override = default; void SetVarSrc(std::unique_ptr argVarSrc); void SetTrans(UniqueFEIRVarTrans argTrans) { varSrc->SetTrans(std::move(argTrans)); @@ -686,7 +691,7 @@ class FEIRExprDRead : public FEIRExpr { class FEIRExprRegRead : public FEIRExpr { public: FEIRExprRegRead(PrimType pty, int32 regNumIn); - ~FEIRExprRegRead() = default; + ~FEIRExprRegRead() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -710,7 +715,9 @@ class FEIRExprAddrofConstArray : public FEIRExpr { std::copy(arrayIn.begin(), arrayIn.end(), std::back_inserter(array)); } - ~FEIRExprAddrofConstArray() = default; + ~FEIRExprAddrofConstArray() override { + elemType = nullptr; + } uint32 GetStringLiteralSize() const { return static_cast(array.size()); @@ -752,7 +759,7 @@ class FEIRExprAddrOfLabel : public FEIRExpr { public: FEIRExprAddrOfLabel(const std::string &lbName, UniqueFEIRType exprType) : FEIRExpr(FEIRNodeKind::kExprAddrofLabel, std::move(exprType)), labelName(lbName) {} - ~FEIRExprAddrOfLabel() = default; + ~FEIRExprAddrOfLabel() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -779,7 +786,9 @@ class FEIRExprAddrofVar : public FEIRExpr { : FEIRExpr(FEIRNodeKind::kExprAddrofVar, FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), varSrc(std::move(argVarSrc)), fieldID(id) {} - ~FEIRExprAddrofVar() = default; + ~FEIRExprAddrofVar() override { + cst = nullptr; + } void SetVarValue(MIRConst *val) { cst = val; @@ -822,7 +831,7 @@ class FEIRExprIAddrof : public FEIRExpr { ptrType(std::move(pointeeType)), fieldID(id), subExpr(std::move(expr)) {} - ~FEIRExprIAddrof() = default; + ~FEIRExprIAddrof() override = default; UniqueFEIRType GetClonedRetType() const { return type->Clone(); @@ -867,7 +876,7 @@ class FEIRExprAddrofFunc : public FEIRExpr { : FEIRExpr(FEIRNodeKind::kExprAddrofFunc, FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), funcAddr(addr) {} - ~FEIRExprAddrofFunc() = default; + ~FEIRExprAddrofFunc() override = default; const std::string &GetFuncAddr() const { return funcAddr; @@ -891,7 +900,7 @@ class FEIRExprAddrofArray : public FEIRExpr { public: FEIRExprAddrofArray(UniqueFEIRType argTypeNativeArray, UniqueFEIRExpr argExprArray, const std::string &argArrayName, std::list &argExprIndexs); - ~FEIRExprAddrofArray() = default; + ~FEIRExprAddrofArray() override = default; void SetIndexsExprs(std::list &exprs) { exprIndexs.clear(); @@ -948,7 +957,7 @@ class FEIRExprUnary : public FEIRExpr { public: FEIRExprUnary(Opcode argOp, std::unique_ptr argOpnd); FEIRExprUnary(std::unique_ptr argType, Opcode argOp, std::unique_ptr argOpnd); - ~FEIRExprUnary() = default; + ~FEIRExprUnary() override = default; void SetOpnd(std::unique_ptr argOpnd); const UniqueFEIRExpr &GetOpnd() const; static std::map InitMapOpNestableForExprUnary(); @@ -983,7 +992,7 @@ class FEIRExprTypeCvt : public FEIRExprUnary { public: FEIRExprTypeCvt(Opcode argOp, std::unique_ptr argOpnd); FEIRExprTypeCvt(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd); - ~FEIRExprTypeCvt() = default; + ~FEIRExprTypeCvt() override = default; static std::map InitMapOpNestableForTypeCvt(); static Opcode ChooseOpcodeByFromVarAndToVar(const FEIRVar &fromVar, const FEIRVar &toVar); @@ -1025,7 +1034,7 @@ class FEIRExprExtractBits : public FEIRExprUnary { FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, uint8 argBitOffset, uint8 argBitSize, std::unique_ptr argOpnd); FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, std::unique_ptr argOpnd); - ~FEIRExprExtractBits() = default; + ~FEIRExprExtractBits() override = default; static std::map InitMapOpNestableForExtractBits(); void SetBitOffset(uint8 offset) { bitOffset = offset; @@ -1118,7 +1127,7 @@ class FEIRExprBinary : public FEIRExpr { FEIRExprBinary(Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1); FEIRExprBinary(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1); - ~FEIRExprBinary() = default; + ~FEIRExprBinary() override = default; void SetOpnd0(std::unique_ptr argOpnd); void SetOpnd1(std::unique_ptr argOpnd); const std::unique_ptr &GetOpnd0() const; @@ -1168,7 +1177,7 @@ class FEIRExprTernary : public FEIRExpr { std::unique_ptr argOpnd2); FEIRExprTernary(Opcode argOp, std::unique_ptr argType, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1, std::unique_ptr argOpnd2); - ~FEIRExprTernary() = default; + ~FEIRExprTernary() override = default; void SetOpnd(std::unique_ptr argOpnd, uint32 idx); protected: @@ -1199,7 +1208,7 @@ class FEIRExprTernary : public FEIRExpr { class FEIRExprNary : public FEIRExpr { public: explicit FEIRExprNary(Opcode argOp); - virtual ~FEIRExprNary() = default; + ~FEIRExprNary() override = default; void AddOpnd(std::unique_ptr argOpnd); void AddOpnds(const std::vector> &argOpnds); void ResetOpnd(); @@ -1231,7 +1240,7 @@ class FEIRExprNary : public FEIRExpr { class FEIRExprArray : public FEIRExprNary { public: FEIRExprArray(Opcode argOp, std::unique_ptr argArray, std::unique_ptr argIndex); - ~FEIRExprArray() = default; + ~FEIRExprArray() override = default; void SetOpndArray(std::unique_ptr opndArray); void SetOpndIndex(std::unique_ptr opndIndex); @@ -1257,7 +1266,7 @@ class FEIRExprIntrinsicop : public FEIRExprNary { FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, std::unique_ptr argParamType, const std::vector> &argOpnds); - ~FEIRExprIntrinsicop() = default; + ~FEIRExprIntrinsicop() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -1285,7 +1294,7 @@ class FEIRExprIntrinsicopForC : public FEIRExprNary { public: FEIRExprIntrinsicopForC(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, const std::vector> &argOpnds); - ~FEIRExprIntrinsicopForC() = default; + ~FEIRExprIntrinsicopForC() override = default; MIRIntrinsicID GetIntrinsicID() const { return intrinsicID; @@ -1306,7 +1315,7 @@ class FEIRExprIntrinsicopForC : public FEIRExprNary { class FEIRExprJavaMerge : public FEIRExprNary { public: FEIRExprJavaMerge(std::unique_ptr mergedTypeArg, const std::vector> &argOpnds); - ~FEIRExprJavaMerge() = default; + ~FEIRExprJavaMerge() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -1321,7 +1330,7 @@ class FEIRExprJavaNewInstance : public FEIRExpr { explicit FEIRExprJavaNewInstance(UniqueFEIRType argType); FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID); FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID, bool argIsRcPermanent); - ~FEIRExprJavaNewInstance() = default; + ~FEIRExprJavaNewInstance() override = default; protected: std::unique_ptr CloneImpl() const override; @@ -1339,7 +1348,7 @@ class FEIRExprJavaNewArray : public FEIRExpr { FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID); FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID, bool argIsRcPermanent); - ~FEIRExprJavaNewArray() = default; + ~FEIRExprJavaNewArray() override = default; void SetArrayType(UniqueFEIRType argArrayType) { CHECK_NULL_FATAL(argArrayType); arrayType = std::move(argArrayType); @@ -1369,7 +1378,7 @@ class FEIRExprJavaNewArray : public FEIRExpr { class FEIRExprJavaArrayLength : public FEIRExpr { public: explicit FEIRExprJavaArrayLength(UniqueFEIRExpr argExprArray); - ~FEIRExprJavaArrayLength() = default; + ~FEIRExprJavaArrayLength() override = default; void SetExprArray(UniqueFEIRExpr argExprArray) { CHECK_NULL_FATAL(argExprArray); exprArray = std::move(argExprArray); @@ -1390,7 +1399,7 @@ class FEIRExprJavaArrayLength : public FEIRExpr { class FEIRExprArrayLoad : public FEIRExpr { public: FEIRExprArrayLoad(UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, UniqueFEIRType argTypeArray); - ~FEIRExprArrayLoad() = default; + ~FEIRExprArrayLoad() override = default; const UniqueFEIRType GetElemType() const { UniqueFEIRType typeElem = typeArray->Clone(); (void)typeElem->ArrayDecrDim(); @@ -1424,7 +1433,9 @@ class FEIRExprArrayLoad : public FEIRExpr { class FEIRExprCStyleCast : public FEIRExpr { public: FEIRExprCStyleCast(MIRType *src, MIRType *dest, UniqueFEIRExpr sub, bool isArr2Pty); - ~FEIRExprCStyleCast() = default; + ~FEIRExprCStyleCast() override { + destType = nullptr; + } void SetArray2Pointer(bool isArr2Ptr) { isArray2Pointer = isArr2Ptr; } @@ -1488,7 +1499,9 @@ inline bool IsReturnAtomicOp(ASTAtomicOp atomicOp) { class FEIRExprAtomic : public FEIRExpr { public: FEIRExprAtomic(MIRType *ty, MIRType *ref, UniqueFEIRExpr obj, ASTAtomicOp atomOp); - ~FEIRExprAtomic() = default; + ~FEIRExprAtomic() override { + refType = nullptr; + } void SetVal1Type(MIRType *ty) { val1Type = ty; @@ -1525,7 +1538,7 @@ class FEIRExprAtomic : public FEIRExpr { protected: std::unique_ptr CloneImpl() const override; BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; - TyIdx GetTyIdx(MIRBuilder &mirBuilder) const; + TyIdx GetTyIdx() const; private: MIRType *mirType = nullptr; @@ -1547,7 +1560,7 @@ class FEIRExprAtomic : public FEIRExpr { class FEIRStmtNary : public FEIRStmt { public: FEIRStmtNary(Opcode opIn, std::list> argExprsIn); - virtual ~FEIRStmtNary() = default; + ~FEIRStmtNary() override = default; void SetOP(Opcode opIn) { op = opIn; @@ -1572,7 +1585,7 @@ class FEIRStmtNary : public FEIRStmt { class FEIRStmtAssign : public FEIRStmt { public: FEIRStmtAssign(FEIRNodeKind argKind, std::unique_ptr argVar); - ~FEIRStmtAssign() = default; + ~FEIRStmtAssign() override = default; FEIRVar *GetVar() const { return var.get(); } @@ -1625,7 +1638,7 @@ class FEIRStmtAssign : public FEIRStmt { class FEIRStmtDAssign : public FEIRStmtAssign { public: FEIRStmtDAssign(std::unique_ptr argVar, std::unique_ptr argExpr, int32 argFieldID = 0); - ~FEIRStmtDAssign() = default; + ~FEIRStmtDAssign() override = default; FEIRExpr *GetExpr() const { return expr.get(); } @@ -1660,7 +1673,7 @@ class FEIRStmtIAssign : public FEIRStmt { addrExpr(std::move(argAddrExpr)), baseExpr(std::move(argBaseExpr)), fieldID(id) {} - ~FEIRStmtIAssign() = default; + ~FEIRStmtIAssign() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -1689,7 +1702,7 @@ class FEIRStmtJavaTypeCheck : public FEIRStmtAssign { std::unique_ptr argType, CheckKind argCheckKind); FEIRStmtJavaTypeCheck(std::unique_ptr argVar, std::unique_ptr argExpr, std::unique_ptr argType, CheckKind argCheckKind, uint32 argTypeID); - ~FEIRStmtJavaTypeCheck() = default; + ~FEIRStmtJavaTypeCheck() override = default; protected: std::string DumpDotStringImpl() const override; @@ -1706,7 +1719,7 @@ class FEIRStmtJavaTypeCheck : public FEIRStmtAssign { class FEIRStmtJavaConstClass : public FEIRStmtAssign { public: FEIRStmtJavaConstClass(std::unique_ptr argVar, std::unique_ptr argType); - ~FEIRStmtJavaConstClass() = default; + ~FEIRStmtJavaConstClass() override = default; protected: void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; @@ -1719,7 +1732,7 @@ class FEIRStmtJavaConstString : public FEIRStmtAssign { public: FEIRStmtJavaConstString(std::unique_ptr argVar, const std::string &argStrVal, uint32 argFileIdx, uint32 argStringID); - ~FEIRStmtJavaConstString() = default; + ~FEIRStmtJavaConstString() override = default; protected: void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; @@ -1736,7 +1749,9 @@ class FEIRStmtJavaFillArrayData : public FEIRStmtAssign { public: FEIRStmtJavaFillArrayData(std::unique_ptr arrayExprIn, const int8 *arrayDataIn, uint32 sizeIn, const std::string &arrayNameIn); - ~FEIRStmtJavaFillArrayData() = default; + ~FEIRStmtJavaFillArrayData() override { + arrayData = nullptr; + } protected: void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; @@ -1760,7 +1775,7 @@ class FEIRStmtJavaMultiANewArray : public FEIRStmtAssign { public: FEIRStmtJavaMultiANewArray(std::unique_ptr argVar, std::unique_ptr argElemType, std::unique_ptr argArrayType); - ~FEIRStmtJavaMultiANewArray() = default; + ~FEIRStmtJavaMultiANewArray() override = default; void AddVarSize(std::unique_ptr argVarSize); void AddVarSizeRev(std::unique_ptr argVarSize); void SetArrayType(std::unique_ptr argArrayType) { @@ -1793,7 +1808,7 @@ class FEIRStmtUseOnly : public FEIRStmt { public: FEIRStmtUseOnly(FEIRNodeKind argKind, Opcode argOp, std::unique_ptr argExpr); FEIRStmtUseOnly(Opcode argOp, std::unique_ptr argExpr); - virtual ~FEIRStmtUseOnly() = default; + ~FEIRStmtUseOnly() override = default; const std::unique_ptr &GetExpr() const { return expr; @@ -1848,7 +1863,7 @@ class FEIRStmtAssertNonnull : public FEIRStmtUseOnly { FEIRStmtAssertNonnull(Opcode argOp, std::unique_ptr argExpr) : FEIRStmtUseOnly(argOp, std::move(argExpr)) {} - ~FEIRStmtAssertNonnull() = default; + ~FEIRStmtAssertNonnull() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -1861,7 +1876,7 @@ class FEIRStmtCallAssertNonnull : public FEIRStmtUseOnly, public FEIRStmtSafetyC size_t paramIndex) : FEIRStmtUseOnly(argOp, std::move(argExpr)), FEIRStmtSafetyCallAssert(funcName, paramIndex) {} - ~FEIRStmtCallAssertNonnull() = default; + ~FEIRStmtCallAssertNonnull() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -1874,7 +1889,7 @@ class FEIRStmtCallAssertBoundary : public FEIRStmtNary, public FEIRStmtSafetyCal size_t paramIndex) : FEIRStmtNary(opIn, std::move(argExprsIn)), FEIRStmtSafetyCallAssert(funcName, paramIndex) {} - ~FEIRStmtCallAssertBoundary() = default; + ~FEIRStmtCallAssertBoundary() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -1885,7 +1900,7 @@ class FEIRStmtAssertBoundary : public FEIRStmtNary { public: FEIRStmtAssertBoundary(Opcode opIn, std::list> argExprsIn) : FEIRStmtNary(opIn, std::move(argExprsIn)) {} - ~FEIRStmtAssertBoundary() = default; + ~FEIRStmtAssertBoundary() override = default; void SetIsComputable(bool flag) { isComputable = flag; @@ -1906,7 +1921,7 @@ class FEIRStmtAssertBoundary : public FEIRStmtNary { class FEIRStmtReturn : public FEIRStmtUseOnly { public: explicit FEIRStmtReturn(std::unique_ptr argExpr); - ~FEIRStmtReturn() = default; + ~FEIRStmtReturn() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -1919,7 +1934,7 @@ class FEIRStmtReturn : public FEIRStmtUseOnly { class FEIRStmtPesudoLabel : public FEIRStmt { public: explicit FEIRStmtPesudoLabel(uint32 argLabelIdx); - ~FEIRStmtPesudoLabel() = default; + ~FEIRStmtPesudoLabel() override = default; void GenerateLabelIdx(const MIRBuilder &mirBuilder); uint32 GetLabelIdx() const { @@ -1946,7 +1961,7 @@ class FEIRStmtPesudoLabel2 : public FEIRStmt { FEIRStmtPesudoLabel2(uint32 qIdx0, uint32 qIdx1) : FEIRStmt(FEIRNodeKind::kStmtPesudoLabel), labelIdxOuter(qIdx0), labelIdxInner(qIdx1) {} - ~FEIRStmtPesudoLabel2() = default; + ~FEIRStmtPesudoLabel2() override = default; static LabelIdx GenMirLabelIdx(const MIRBuilder &mirBuilder, uint32 qIdx0, uint32 qIdx1); std::pair GetLabelIdx() const; uint32 GetPos() const { @@ -2004,7 +2019,9 @@ class FEIRStmtGoto : public FEIRStmt { class FEIRStmtGoto2 : public FEIRStmt { public: FEIRStmtGoto2(uint32 qIdx0, uint32 qIdx1); - virtual ~FEIRStmtGoto2() = default; + ~FEIRStmtGoto2() override { + stmtTarget = nullptr; + } std::pair GetLabelIdx() const; uint32 GetTarget() const { return labelIdxInner; @@ -2039,7 +2056,7 @@ class FEIRStmtGoto2 : public FEIRStmt { class FEIRStmtGotoForC : public FEIRStmt { public: explicit FEIRStmtGotoForC(const std::string &name); - virtual ~FEIRStmtGotoForC() = default; + ~FEIRStmtGotoForC() override = default; void SetLabelName(const std::string &name) { labelName = name; } @@ -2075,7 +2092,7 @@ class FEIRStmtGotoForC : public FEIRStmt { class FEIRStmtIGoto : public FEIRStmt { public: explicit FEIRStmtIGoto(UniqueFEIRExpr expr); - virtual ~FEIRStmtIGoto() = default; + ~FEIRStmtIGoto() override = default; protected: bool IsFallThroughImpl() const override { @@ -2096,7 +2113,7 @@ class FEIRStmtCondGotoForC : public FEIRStmt { public: explicit FEIRStmtCondGotoForC(UniqueFEIRExpr argExpr, Opcode op, const std::string &name) : FEIRStmt(FEIRNodeKind::kStmtCondGoto), expr(std::move(argExpr)), opCode(op), labelName(name) {} - virtual ~FEIRStmtCondGotoForC() = default; + ~FEIRStmtCondGotoForC() override = default; void SetLabelName(const std::string &name) { labelName = name; } @@ -2129,7 +2146,7 @@ class FEIRStmtCondGotoForC : public FEIRStmt { class FEIRStmtCondGoto : public FEIRStmtGoto { public: FEIRStmtCondGoto(Opcode argOp, uint32 argLabelIdx, UniqueFEIRExpr argExpr); - ~FEIRStmtCondGoto() = default; + ~FEIRStmtCondGoto() override = default; void SetOpcode(Opcode argOp) { op = argOp; } @@ -2166,7 +2183,7 @@ class FEIRStmtCondGoto : public FEIRStmtGoto { class FEIRStmtCondGoto2 : public FEIRStmtGoto2 { public: FEIRStmtCondGoto2(Opcode argOp, uint32 qIdx0, uint32 qIdx1, UniqueFEIRExpr argExpr); - ~FEIRStmtCondGoto2() = default; + ~FEIRStmtCondGoto2() override = default; protected: bool IsFallThroughImpl() const override { @@ -2309,7 +2326,7 @@ class FEIRStmtSwitch2 : public FEIRStmt { class FEIRStmtSwitchForC : public FEIRStmt { public: FEIRStmtSwitchForC(UniqueFEIRExpr argCondExpr, bool argHasDefault); - ~FEIRStmtSwitchForC() = default; + ~FEIRStmtSwitchForC() override = default; void AddFeirStmt(UniqueFEIRStmt stmt) { subStmts.emplace_back(std::move(stmt)); } @@ -2345,7 +2362,7 @@ class FEIRStmtCaseForC : public FEIRStmt { public: explicit FEIRStmtCaseForC(int64 label); void AddCaseTag2CaseVec(int64 lCaseTag, int64 rCaseTag); - ~FEIRStmtCaseForC() = default; + ~FEIRStmtCaseForC() override = default; void AddFeirStmt(UniqueFEIRStmt stmt) { subStmts.emplace_back(std::move(stmt)); } @@ -2368,7 +2385,7 @@ class FEIRStmtCaseForC : public FEIRStmt { class FEIRStmtDefaultForC : public FEIRStmt { public: explicit FEIRStmtDefaultForC(); - ~FEIRStmtDefaultForC() = default; + ~FEIRStmtDefaultForC() override = default; void AddFeirStmt(UniqueFEIRStmt stmt) { subStmts.emplace_back(std::move(stmt)); } @@ -2401,7 +2418,7 @@ class FEIRStmtArrayStore : public FEIRStmt { UniqueFEIRType argTypeArray, UniqueFEIRExpr argExprStruct, UniqueFEIRType argTypeStruct, const std::string &argArrayName); - ~FEIRStmtArrayStore() = default; + ~FEIRStmtArrayStore() override = default; void SetIndexsExprs(std::list &exprs) { exprIndexs.clear(); @@ -2442,7 +2459,7 @@ class FEIRStmtFieldStore : public FEIRStmt { bool argIsStatic); FEIRStmtFieldStore(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, bool argIsStatic, int32 argDexFileHashCode); - ~FEIRStmtFieldStore() = default; + ~FEIRStmtFieldStore() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2473,7 +2490,7 @@ class FEIRStmtFieldLoad : public FEIRStmtAssign { bool argIsStatic); FEIRStmtFieldLoad(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, bool argIsStatic, int32 argDexFileHashCode); - ~FEIRStmtFieldLoad() = default; + ~FEIRStmtFieldLoad() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2500,7 +2517,7 @@ class FEIRStmtFieldLoad : public FEIRStmtAssign { class FEIRStmtCallAssign : public FEIRStmtAssign { public: FEIRStmtCallAssign(FEStructMethodInfo &argMethodInfo, Opcode argMIROp, UniqueFEIRVar argVarRet, bool argIsStatic); - ~FEIRStmtCallAssign() = default; + ~FEIRStmtCallAssign() override = default; static std::map InitMapOpAssignToOp(); static std::map InitMapOpToOpAssign(); @@ -2528,7 +2545,7 @@ class FEIRStmtCallAssign : public FEIRStmtAssign { class FEIRStmtICallAssign : public FEIRStmtAssign { public: FEIRStmtICallAssign(); - ~FEIRStmtICallAssign() = default; + ~FEIRStmtICallAssign() override = default; void SetPrototype(UniqueFEIRType type) { prototype = std::move(type); @@ -2560,7 +2577,7 @@ class FEIRStmtIntrinsicCallAssign : public FEIRStmtAssign { bool isInStaticFuncIn); FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, UniqueFEIRVar argVarRet, uint32 typeIDIn); - ~FEIRStmtIntrinsicCallAssign() = default; + ~FEIRStmtIntrinsicCallAssign() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2569,7 +2586,7 @@ class FEIRStmtIntrinsicCallAssign : public FEIRStmtAssign { private: void ConstructArgsForInvokePolyMorphic(MIRBuilder &mirBuilder, MapleVector &intrnCallargs) const; - std::list GenMIRStmtsForIntrnC(MIRBuilder &mirBuilder, TyIdx tyIdx = TyIdx(0)) const; + std::list GenMIRStmtsForIntrnC(MIRBuilder &mirBuilder, TyIdx returnTyIdx = TyIdx(0)) const; std::list GenMIRStmtsForFillNewArray(MIRBuilder &mirBuilder) const; std::list GenMIRStmtsForInvokePolyMorphic(MIRBuilder &mirBuilder) const; std::list GenMIRStmtsForClintCheck(MIRBuilder &mirBuilder) const; @@ -2590,7 +2607,7 @@ class FEIRStmtIntrinsicCallAssign : public FEIRStmtAssign { class FEIRStmtPesudoLOC : public FEIRStmt { public: FEIRStmtPesudoLOC(uint32 argSrcFileIdx, uint32 argLineNumber); - ~FEIRStmtPesudoLOC() = default; + ~FEIRStmtPesudoLOC() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2601,7 +2618,7 @@ class FEIRStmtPesudoLOC : public FEIRStmt { class FEIRStmtPesudoJavaTry : public FEIRStmt { public: FEIRStmtPesudoJavaTry(); - ~FEIRStmtPesudoJavaTry() = default; + ~FEIRStmtPesudoJavaTry() override = default; void AddCatchLabelIdx(uint32 labelIdx) { catchLabelIdxVec.push_back(labelIdx); } @@ -2631,7 +2648,7 @@ class FEIRStmtPesudoJavaTry : public FEIRStmt { class FEIRStmtPesudoJavaTry2 : public FEIRStmt { public: explicit FEIRStmtPesudoJavaTry2(uint32 outerIdxIn); - ~FEIRStmtPesudoJavaTry2() = default; + ~FEIRStmtPesudoJavaTry2() override = default; void AddCatchLabelIdx(uint32 labelIdx) { catchLabelIdxVec.push_back(labelIdx); } @@ -2662,7 +2679,7 @@ class FEIRStmtPesudoJavaTry2 : public FEIRStmt { class FEIRStmtPesudoEndTry : public FEIRStmt { public: FEIRStmtPesudoEndTry(); - ~FEIRStmtPesudoEndTry() = default; + ~FEIRStmtPesudoEndTry() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2673,7 +2690,7 @@ class FEIRStmtPesudoEndTry : public FEIRStmt { class FEIRStmtPesudoCatch : public FEIRStmtPesudoLabel { public: explicit FEIRStmtPesudoCatch(uint32 argLabelIdx); - ~FEIRStmtPesudoCatch() = default; + ~FEIRStmtPesudoCatch() override = default; void AddCatchTypeNameIdx(GStrIdx typeNameIdx); protected: @@ -2688,7 +2705,7 @@ class FEIRStmtPesudoCatch : public FEIRStmtPesudoLabel { class FEIRStmtPesudoCatch2 : public FEIRStmtPesudoLabel2 { public: explicit FEIRStmtPesudoCatch2(uint32 qIdx0, uint32 qIdx1); - ~FEIRStmtPesudoCatch2() = default; + ~FEIRStmtPesudoCatch2() override = default; void AddCatchTypeNameIdx(GStrIdx typeNameIdx); protected: @@ -2702,7 +2719,7 @@ class FEIRStmtPesudoCatch2 : public FEIRStmtPesudoLabel2 { class FEIRStmtPesudoSafe : public FEIRStmt { public: explicit FEIRStmtPesudoSafe(bool isEnd); - ~FEIRStmtPesudoSafe() = default; + ~FEIRStmtPesudoSafe() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2713,7 +2730,7 @@ class FEIRStmtPesudoSafe : public FEIRStmt { class FEIRStmtPesudoUnsafe : public FEIRStmt { public: explicit FEIRStmtPesudoUnsafe(bool isEnd); - ~FEIRStmtPesudoUnsafe() = default; + ~FEIRStmtPesudoUnsafe() override = default; protected: std::string DumpDotStringImpl() const override; @@ -2726,7 +2743,7 @@ class FEIRStmtPesudoComment : public FEIRStmt { public: explicit FEIRStmtPesudoComment(FEIRNodeKind argKind = kStmtPesudoComment); explicit FEIRStmtPesudoComment(const std::string &argContent); - ~FEIRStmtPesudoComment() = default; + ~FEIRStmtPesudoComment() override = default; void SetContent(const std::string &argContent) { content = argContent; } @@ -2742,7 +2759,7 @@ class FEIRStmtPesudoComment : public FEIRStmt { class FEIRStmtPesudoCommentForInst : public FEIRStmtPesudoComment { public: FEIRStmtPesudoCommentForInst(); - ~FEIRStmtPesudoCommentForInst() = default; + ~FEIRStmtPesudoCommentForInst() override = default; void SetFileIdx(uint32 argFileIdx) { fileIdx = argFileIdx; } @@ -2773,7 +2790,7 @@ class FEIRStmtIf : public FEIRStmt { FEIRStmtIf(UniqueFEIRExpr argCondExpr, std::list &argThenStmts, std::list &argElseStmts); - ~FEIRStmtIf() = default; + ~FEIRStmtIf() override = default; void SetCondExpr(UniqueFEIRExpr argCondExpr) { CHECK_NULL_FATAL(argCondExpr); @@ -2827,7 +2844,7 @@ class FEIRStmtDoWhile : public FEIRStmt { opcode(argOpcode), condExpr(std::move(argCondExpr)), bodyStmts(std::move(argBodyStmts)) {} - ~FEIRStmtDoWhile() = default; + ~FEIRStmtDoWhile() override = default; const std::list &GetBodyStmts() const { return bodyStmts; @@ -2858,7 +2875,7 @@ class FEIRStmtDoWhile : public FEIRStmt { class FEIRStmtBreak : public FEIRStmt { public: FEIRStmtBreak(): FEIRStmt(FEIRNodeKind::kStmtBreak) {} - ~FEIRStmtBreak() = default; + ~FEIRStmtBreak() override = default; void SetBreakLabelName(std::string name) { breakLabelName = std::move(name); @@ -2878,7 +2895,7 @@ class FEIRStmtBreak : public FEIRStmt { class FEIRStmtContinue : public FEIRStmt { public: FEIRStmtContinue(): FEIRStmt(FEIRNodeKind::kStmtContinue) {} - ~FEIRStmtContinue() = default; + ~FEIRStmtContinue() override = default; void SetLabelName(std::string name) { labelName = std::move(name); @@ -2897,7 +2914,7 @@ class FEIRStmtContinue : public FEIRStmt { class FEIRStmtLabel : public FEIRStmt { public: explicit FEIRStmtLabel(const std::string &name) : FEIRStmt(FEIRNodeKind::kStmtLabel), labelName(name) {} - ~FEIRStmtLabel() = default; + ~FEIRStmtLabel() override = default; const std::string &GetLabelName() const { return labelName; @@ -2917,7 +2934,7 @@ class FEIRStmtLabel : public FEIRStmt { class FEIRStmtAtomic : public FEIRStmt { public: explicit FEIRStmtAtomic(UniqueFEIRExpr expr); - ~FEIRStmtAtomic() = default; + ~FEIRStmtAtomic() override = default; protected: std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; @@ -2928,7 +2945,7 @@ class FEIRStmtGCCAsm : public FEIRStmt { public: FEIRStmtGCCAsm(const std::string &str, bool isGotoArg, bool isVolatileArg) : FEIRStmt(FEIRNodeKind::kStmtGCCAsm), asmStr(str), isGoto(isGotoArg), isVolatile(isVolatileArg) {} - ~FEIRStmtGCCAsm() = default; + ~FEIRStmtGCCAsm() override = default; void SetLabels(const std::vector &labelsArg) { labels = labelsArg; diff --git a/src/hir2mpl/common/include/feir_type.h b/src/hir2mpl/common/include/feir_type.h index 7daf3670c114570076153c7e7068d6e128414015..601c5046d557b19338e458c12b2407b88ea81dc6 100644 --- a/src/hir2mpl/common/include/feir_type.h +++ b/src/hir2mpl/common/include/feir_type.h @@ -182,7 +182,7 @@ class FEIRTypeDefault : public FEIRType { explicit FEIRTypeDefault(PrimType argPrimType); FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx); FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx, TypeDim argDim); - ~FEIRTypeDefault() = default; + ~FEIRTypeDefault() override = default; FEIRTypeDefault(const FEIRTypeDefault&) = delete; FEIRTypeDefault &operator=(const FEIRTypeDefault&) = delete; void LoadFromJavaTypeName(const std::string &typeName, bool inMpl = true); @@ -262,7 +262,7 @@ class FEIRTypeDefault : public FEIRType { class FEIRTypeByName : public FEIRTypeDefault { public: FEIRTypeByName(PrimType argPrimType, const std::string &argTypeName, TypeDim argDim = 0); - ~FEIRTypeByName() = default; + ~FEIRTypeByName() override = default; FEIRTypeByName(const FEIRTypeByName&) = delete; FEIRTypeByName &operator=(const FEIRTypeByName&) = delete; @@ -292,7 +292,7 @@ class FEIRTypeByName : public FEIRTypeDefault { class FEIRTypeNative : public FEIRType { public: explicit FEIRTypeNative(MIRType &argMIRType); - ~FEIRTypeNative() = default; + ~FEIRTypeNative() override = default; FEIRTypeNative(const FEIRTypeNative&) = delete; FEIRTypeNative &operator=(const FEIRTypeNative&) = delete; diff --git a/src/hir2mpl/common/include/feir_var_name.h b/src/hir2mpl/common/include/feir_var_name.h index 713ec5603470998548b51bc4611e33a2afd8f127..7d6d5620af01cc55e37fb5c33747ddccad473c18 100644 --- a/src/hir2mpl/common/include/feir_var_name.h +++ b/src/hir2mpl/common/include/feir_var_name.h @@ -37,7 +37,7 @@ class FEIRVarName : public FEIRVar { FEIRVarName(const std::string &argName, std::unique_ptr argType, bool argWithType = false) : FEIRVarName(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(argName), std::move(argType), argWithType) {} - virtual ~FEIRVarName() = default; + ~FEIRVarName() override = default; protected: std::string GetNameImpl(const MIRType &mirType) const override; diff --git a/src/hir2mpl/common/include/feir_var_reg.h b/src/hir2mpl/common/include/feir_var_reg.h index a37fdfbdc78a2dfe64c840ef1fe5eb229f9f41c5..8f672df1538ec944820dc551fc56fb82ee89dcdc 100644 --- a/src/hir2mpl/common/include/feir_var_reg.h +++ b/src/hir2mpl/common/include/feir_var_reg.h @@ -33,7 +33,7 @@ class FEIRVarReg : public FEIRVar { : FEIRVar(kind, std::move(argType)), regNum(argRegNum) {} - ~FEIRVarReg() = default; + ~FEIRVarReg() override = default; uint32 GetRegNum() const { return regNum; } @@ -61,7 +61,7 @@ class FEIRVarAccumulator : public FEIRVarReg { FEIRVarAccumulator(uint32 argRegNum, std::unique_ptr argType) : FEIRVarReg(argRegNum, std::move(argType), FEIRVarKind::kFEIRVarAccumulator) {} - ~FEIRVarAccumulator() = default; + ~FEIRVarAccumulator() override = default; protected: std::string GetNameImpl(const MIRType &mirType) const override; diff --git a/src/hir2mpl/common/include/feir_var_type_scatter.h b/src/hir2mpl/common/include/feir_var_type_scatter.h index d23a717015f7da2dd553f666fd5bc2490b938a6e..77ae937f2e2199b29c3e966f3f11446d7049b069 100644 --- a/src/hir2mpl/common/include/feir_var_type_scatter.h +++ b/src/hir2mpl/common/include/feir_var_type_scatter.h @@ -22,7 +22,7 @@ namespace maple { class FEIRVarTypeScatter : public FEIRVar { public: explicit FEIRVarTypeScatter(UniqueFEIRVar argVar); - ~FEIRVarTypeScatter() = default; + ~FEIRVarTypeScatter() override = default; void AddScatterType(const UniqueFEIRType &type); const std::unordered_set &GetScatterTypes() const { return scatterTypes; diff --git a/src/hir2mpl/common/include/generic_attrs.h b/src/hir2mpl/common/include/generic_attrs.h index 6b7406d30dd1516e305de461292c3369d429bb86..fecce382c3f52ce90f321e9322b00f86b9f464d7 100644 --- a/src/hir2mpl/common/include/generic_attrs.h +++ b/src/hir2mpl/common/include/generic_attrs.h @@ -46,7 +46,7 @@ class GenericAttrs { } void ResetAttr(GenericAttrKind x) { - attrFlag.reset(x); + (void)attrFlag.reset(x); } bool GetAttr(GenericAttrKind x) const { diff --git a/src/hir2mpl/common/include/hir2mpl_compiler.h b/src/hir2mpl/common/include/hir2mpl_compiler.h index 423e67f82d95349ed2e18fb1ac23ad05f0010c1a..728f026c1eb81f26decd7636142018d32666a298 100644 --- a/src/hir2mpl/common/include/hir2mpl_compiler.h +++ b/src/hir2mpl/common/include/hir2mpl_compiler.h @@ -22,7 +22,7 @@ #include "hir2mpl_options.h" #include "jbc_compiler_component.h" #include "fe_options.h" -#include "bc_compiler_component.h" +#include "bc_compiler_component-inl.h" #include "ark_annotation_processor.h" #include "dex_reader.h" #include "ast_compiler_component.h" diff --git a/src/hir2mpl/common/include/hir2mpl_compiler_component.h b/src/hir2mpl/common/include/hir2mpl_compiler_component.h index 3e9040f0995b45669455bf44dad327450090daca..ccfb02f6db4fa16843d9ee4b40662ecdb5cbe8ce 100644 --- a/src/hir2mpl/common/include/hir2mpl_compiler_component.h +++ b/src/hir2mpl/common/include/hir2mpl_compiler_component.h @@ -26,7 +26,7 @@ namespace maple { class FEFunctionProcessTask : public MplTask { public: explicit FEFunctionProcessTask(std::unique_ptr argFunction); - virtual ~FEFunctionProcessTask() = default; + ~FEFunctionProcessTask() override = default; protected: int RunImpl(MplTaskParam *param) override; @@ -40,7 +40,7 @@ class FEFunctionProcessSchedular : public MplScheduler { public: explicit FEFunctionProcessSchedular(const std::string &name) : MplScheduler(name) {} - virtual ~FEFunctionProcessSchedular() = default; + ~FEFunctionProcessSchedular() override = default; void AddFunctionProcessTask(std::unique_ptr function); void SetDumpTime(bool arg) { dumpTime = arg; @@ -139,6 +139,7 @@ class HIR2MPLCompilerComponent { std::list enumHelpers; std::unique_ptr phaseResultTotal; std::set compileFailedFEFunctions; + std::vector globalLTOFuncHelpers; }; } // namespace maple #endif diff --git a/src/hir2mpl/common/include/hir2mpl_option.h b/src/hir2mpl/common/include/hir2mpl_option.h index 017cf2b842c92b777fbac0f3eefb637221e6d1ca..da0a12c696b15a6f0428c9e83ac8ffe872292599 100644 --- a/src/hir2mpl/common/include/hir2mpl_option.h +++ b/src/hir2mpl/common/include/hir2mpl_option.h @@ -36,7 +36,6 @@ extern maplecl::Option asciimplt; extern maplecl::Option dumpInstComment; extern maplecl::Option noMplFile; extern maplecl::Option dumpLevel; -extern maplecl::Option dumpTime; extern maplecl::Option dumpComment; extern maplecl::Option dumpLOC; extern maplecl::Option dumpPhaseTime; @@ -53,7 +52,6 @@ extern maplecl::Option dep; extern maplecl::Option depsamename; extern maplecl::Option dumpFEIRBB; extern maplecl::Option dumpFEIRCFGGraph; -extern maplecl::Option wpaa; extern maplecl::Option debug; } diff --git a/src/hir2mpl/common/include/hir2mpl_options.h b/src/hir2mpl/common/include/hir2mpl_options.h index fcc4b4b78df8d68df9850464397f8da387a1647e..e2885d9cb8128812c2079e4b037871632b8fa187 100644 --- a/src/hir2mpl/common/include/hir2mpl_options.h +++ b/src/hir2mpl/common/include/hir2mpl_options.h @@ -54,8 +54,8 @@ class HIR2MPLOptions { bool ProcessVersion(const maplecl::OptionInterface &) const; // input control options - bool ProcessInClass(const maplecl::OptionInterface &mpltSys) const; - bool ProcessInJar(const maplecl::OptionInterface &mpltApk) const; + bool ProcessInClass(const maplecl::OptionInterface &inClass) const; + bool ProcessInJar(const maplecl::OptionInterface &inJar) const; bool ProcessInDex(const maplecl::OptionInterface &inDex) const; bool ProcessInAST(const maplecl::OptionInterface &inAst) const; bool ProcessInMAST(const maplecl::OptionInterface &inMast) const; @@ -97,6 +97,7 @@ class HIR2MPLOptions { bool ProcessEnableVariableArray(const maplecl::OptionInterface &) const; bool ProcessFuncInlineSize(const maplecl::OptionInterface &funcInliceSize) const; bool ProcessWPAA(const maplecl::OptionInterface &) const; + bool ProcessFM(const maplecl::OptionInterface &) const; // ast compiler options bool ProcessUseSignedChar(const maplecl::OptionInterface &) const; diff --git a/src/hir2mpl/common/include/simple_xml.h b/src/hir2mpl/common/include/simple_xml.h index 3e07b9baf450d0c5c0879d9d706ce4048b3272d1..f289333e20b5b454e5f82b3db02548d24e044aec 100644 --- a/src/hir2mpl/common/include/simple_xml.h +++ b/src/hir2mpl/common/include/simple_xml.h @@ -99,7 +99,7 @@ class SimpleXMLElemMultiLine : public SimpleXMLElem { public: SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat); SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat, uint32 argID); - ~SimpleXMLElemMultiLine() = default; + ~SimpleXMLElemMultiLine() override = default; void AddLine(const std::string &line); protected: diff --git a/src/hir2mpl/common/src/basic_io.cpp b/src/hir2mpl/common/src/basic_io.cpp index 178142ec79e7ac710d8b1d43c0d8d8f62e5bce90..d31d814d70f43d5e708639c5d66d48d28da563b5 100644 --- a/src/hir2mpl/common/src/basic_io.cpp +++ b/src/hir2mpl/common/src/basic_io.cpp @@ -252,62 +252,29 @@ double BasicIORead::ReadDouble(bool &success) { } void BasicIORead::ReadBufferUInt8(uint8 *dst, uint32 length) { - const uint8 *p = GetSafeBuffer(length); - pos += length; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length); } void BasicIORead::ReadBufferUInt8(uint8 *dst, uint32 length, bool &success) { - const uint8 *p = GetBuffer(length); - if (p == nullptr) { - success = false; - return; - } - pos += length; - success = true; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length, success); } void BasicIORead::ReadBufferInt8(int8 *dst, uint32 length) { CHECK_NULL_FATAL(dst); - const uint8 *p = GetSafeBuffer(length); - pos += length; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length); } void BasicIORead::ReadBufferInt8(int8 *dst, uint32 length, bool &success) { CHECK_NULL_FATAL(dst); - const uint8 *p = GetBuffer(length); - if (p == nullptr) { - success = false; - return; - } - pos += length; - success = true; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length, success); } void BasicIORead::ReadBufferChar(char *dst, uint32 length) { - const uint8 *p = GetSafeBuffer(length); - pos += length; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length); } void BasicIORead::ReadBufferChar(char *dst, uint32 length, bool &success) { - const uint8 *p = GetBuffer(length); - if (p == nullptr) { - success = false; - return; - } - pos += length; - success = true; - errno_t err = memcpy_s(dst, length, p, length); - CHECK_FATAL(err == EOK, "memcpy_s failed"); + ReadBuffer8BitLong(dst, length, success); } std::string BasicIORead::ReadString(uint32 length) { diff --git a/src/hir2mpl/common/src/enhance_c_checker.cpp b/src/hir2mpl/common/src/enhance_c_checker.cpp index e5d7604b7388d74309114f5f53f5d0a8069c65b6..e62bb1bc3d574037042170a9bb909c6440bbcf41 100644 --- a/src/hir2mpl/common/src/enhance_c_checker.cpp +++ b/src/hir2mpl/common/src/enhance_c_checker.cpp @@ -18,7 +18,6 @@ #include "ast_stmt.h" #include "ast_decl_builder.h" #include "feir_builder.h" -#include "fe_manager.h" #include "fe_macros.h" namespace maple { @@ -49,7 +48,8 @@ void ASTParser::ProcessNonnullFuncPtrAttrs(MapleAllocator &allocator, const clan retAttr.SetAttr(ATTR_nonnull); } MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( - funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); + funcType->GetRetTyIdx(), funcType->GetParamTypeList(), + attrsVec, funcType->GetFuncAttrs(), retAttr); astVar.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } @@ -568,7 +568,8 @@ void ASTParser::ProcessBoundaryFuncPtrAttrs(MapleAllocator &allocator, const cla } if (isUpdated) { MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( - funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); + funcType->GetRetTyIdx(), funcType->GetParamTypeList(), + attrsVec, funcType->GetFuncAttrs(), retAttr); astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } @@ -649,7 +650,8 @@ void ASTParser::ProcessBoundaryFuncPtrAttrsByIndex(MapleAllocator &allocator, co } if (isUpdated) { MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( - funcType.GetRetTyIdx(), funcType.GetParamTypeList(), attrsVec, funcType.IsVarargs(), retAttr); + funcType.GetRetTyIdx(), funcType.GetParamTypeList(), + attrsVec, funcType.GetFuncAttrs(), retAttr); astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); } @@ -1592,7 +1594,7 @@ void ASTFunc::InsertBoundaryCheckingInRet(std::list &stmts) cons exprs.emplace_back(std::move(baseExpr)); UniqueFEIRStmt stmt = std::make_unique(OP_returnassertle, std::move(exprs)); stmt->SetSrcLoc(stmts.back()->GetSrcLoc()); - stmts.insert(--stmts.cend(), std::move(stmt)); + (void)stmts.insert(--stmts.cend(), std::move(stmt)); } void ENCChecker::InsertBoundaryAssignChecking(MIRBuilder &mirBuilder, std::list &ans, @@ -1684,33 +1686,11 @@ void ENCChecker::InsertBoundaryLenExprInAtts(TypeAttrs &attr, const UniqueFEIREx } void ENCChecker::InsertBoundaryInAtts(FieldAttrs &attr, const BoundaryInfo &boundary) { - attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); - if (boundary.lenParamIdx != -1) { - attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); - } - if (boundary.lenExpr == nullptr) { - return; - } - std::list nullStmts; - UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); - uint32 hash = lenExpr->Hash(); - FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, std::move(lenExpr)); // save expr cache - attr.GetAttrBoundary().SetLenExprHash(hash); + InsertBoundaryInFieldOrFuncAtts(attr, boundary); } void ENCChecker::InsertBoundaryInAtts(FuncAttrs &attr, const BoundaryInfo &boundary) { - attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); - if (boundary.lenParamIdx != -1) { - attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); - } - if (boundary.lenExpr == nullptr) { - return; - } - std::list nullStmts; - UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); - uint32 hash = lenExpr->Hash(); - FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, std::move(lenExpr)); // save expr cache - attr.GetAttrBoundary().SetLenExprHash(hash); + InsertBoundaryInFieldOrFuncAtts(attr, boundary); } // --------------------------- @@ -1756,7 +1736,7 @@ void FEIRStmtDAssign::AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std:: if (stmt != nullptr) { stmt->SetSrcLoc(loc); std::list stmtnodes = stmt->GenMIRStmts(mirBuilder); - ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); + (void)ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); } } ENCChecker::AssignBoundaryVar(mirBuilder, dstExpr, expr, lenExpr, ans); @@ -1787,7 +1767,7 @@ void FEIRStmtIAssign::AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std:: if (stmt != nullptr) { stmt->SetSrcLoc(loc); std::list stmtnodes = stmt->GenMIRStmts(mirBuilder); - ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); + (void)ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); } } ENCChecker::AssignBoundaryVar(mirBuilder, dstExpr, baseExpr, lenExpr, ans); diff --git a/src/hir2mpl/common/src/fe_file_type.cpp b/src/hir2mpl/common/src/fe_file_type.cpp index 65e4a594e8b2e06e5ca70bf547af2703a713b9c4..77aed1c0df27a2cac2d7fc9e22279e20532ae28a 100644 --- a/src/hir2mpl/common/src/fe_file_type.cpp +++ b/src/hir2mpl/common/src/fe_file_type.cpp @@ -90,6 +90,8 @@ void FEFileType::LoadDefault() { RegisterMagicNumber(kDex, kMagicDex); RegisterExtName(kAST, "ast"); RegisterMagicNumber(kAST, kMagicAST); + RegisterExtName(kO, "o"); + RegisterMagicNumber(kO, kMagicAST); RegisterExtName(kMAST, "mast"); RegisterMagicNumber(kMAST, kMagicMAST); } diff --git a/src/hir2mpl/common/src/fe_function.cpp b/src/hir2mpl/common/src/fe_function.cpp index 2e55bb4b07110de0ee7fc69a394272a4d94da0a0..e347ed272edd2583dde3c2d021fc8f88230416d5 100644 --- a/src/hir2mpl/common/src/fe_function.cpp +++ b/src/hir2mpl/common/src/fe_function.cpp @@ -343,7 +343,7 @@ bool FEFunction::SetupFEIRStmtJavaTry(const std::string &phaseName) { if (stmt->GetKind() == FEIRNodeKind::kStmtPesudoJavaTry) { FEIRStmtPesudoJavaTry *stmtJavaTry = static_cast(stmt); for (uint32 labelIdx : stmtJavaTry->GetCatchLabelIdxVec()) { - auto it = mapLabelStmt.find(labelIdx); + std::map::const_iterator it = mapLabelStmt.find(labelIdx); CHECK_FATAL(it != mapLabelStmt.cend(), "label is not found"); stmtJavaTry->AddCatchTarget(*(it->second)); } @@ -377,7 +377,7 @@ bool FEFunction::SetupFEIRStmtBranch(const std::string &phaseName) { } bool FEFunction::SetupFEIRStmtGoto(FEIRStmtGoto &stmt) { - auto it = mapLabelStmt.find(stmt.GetLabelIdx()); + std::map::const_iterator it = mapLabelStmt.find(stmt.GetLabelIdx()); if (it == mapLabelStmt.cend()) { ERR(kLncErr, "target not found for stmt goto"); return false; @@ -388,7 +388,7 @@ bool FEFunction::SetupFEIRStmtGoto(FEIRStmtGoto &stmt) { bool FEFunction::SetupFEIRStmtSwitch(FEIRStmtSwitch &stmt) { // default target - auto itDefault = mapLabelStmt.find(stmt.GetDefaultLabelIdx()); + std::map::const_iterator itDefault = mapLabelStmt.find(stmt.GetDefaultLabelIdx()); if (itDefault == mapLabelStmt.cend()) { ERR(kLncErr, "target not found for stmt goto"); return false; @@ -397,7 +397,7 @@ bool FEFunction::SetupFEIRStmtSwitch(FEIRStmtSwitch &stmt) { // value targets for (const auto &itItem : stmt.GetMapValueLabelIdx()) { - auto itTarget = mapLabelStmt.find(itItem.second); + std::map::const_iterator itTarget = mapLabelStmt.find(itItem.second); if (itTarget == mapLabelStmt.cend()) { ERR(kLncErr, "target not found for stmt goto"); return false; @@ -551,7 +551,8 @@ void FEFunction::LinkBB(FEIRBB &predBB, FEIRBB &succBB) { } FEIRBB &FEFunction::GetFEIRBBByStmt(const FEIRStmt &stmt) { - auto it = feirStmtBBMap.find(&stmt); + std::map::const_iterator it = feirStmtBBMap.find(&stmt); + CHECK_FATAL(it != feirStmtBBMap.cend(), "FEIRStmt cannot be found."); return *(it->second); } @@ -592,9 +593,7 @@ void FEFunction::InsertCheckPointForTrys() { currTry = static_cast(currStmt); checkPointInTry = nullptr; } - if ((currTry != nullptr) && - (currStmt->IsThrowable()) && - ((checkPointInTry == nullptr) || currStmt->HasDef())) { + if ((currTry != nullptr) && (currStmt->IsThrowable()) && ((checkPointInTry == nullptr) || currStmt->HasDef())) { FEIRBB &currBB = GetFEIRBBByStmt(*currStmt); if (currStmt == currBB.GetStmtNoAuxHead()) { checkPointInTry = &(currBB.GetCheckPointIn()); @@ -633,12 +632,14 @@ void FEFunction::InitTrans4AllVars() { } FEIRStmtPesudoJavaTry2 &FEFunction::GetJavaTryByCheckPoint(FEIRStmtCheckPoint &checkPoint) { - auto it = checkPointJavaTryMap.find(&checkPoint); + std::map::const_iterator it = checkPointJavaTryMap.find(&checkPoint); + CHECK_FATAL(it != checkPointJavaTryMap.cend(), "FEIRStmtCheckPoint cannot be found."); return *(it->second); } FEIRStmtCheckPoint &FEFunction::GetCheckPointByFEIRStmt(const FEIRStmt &stmt) { - auto it = feirStmtCheckPointMap.find(&stmt); + std::map::const_iterator it = feirStmtCheckPointMap.find(&stmt); + CHECK_FATAL(it != feirStmtCheckPointMap.cend(), "FEIRStmt cannot be found."); return *(it->second); } @@ -722,7 +723,8 @@ void FEFunction::InsertDAssignStmt4TypeCvt(const FEIRVarTypeScatter &fromVar, co } FEIRStmt &FEFunction::GetStmtByDefVarTypeScatter(const FEIRVarTypeScatter &varTypeScatter) { - auto it = defVarTypeScatterStmtMap.find(&varTypeScatter); + std::map::const_iterator it = defVarTypeScatterStmtMap.find(&varTypeScatter); + CHECK_FATAL(it != defVarTypeScatterStmtMap.cend(), "varTypeScatter cannot be found."); return *(it->second); } diff --git a/src/hir2mpl/common/src/fe_function_phase_result.cpp b/src/hir2mpl/common/src/fe_function_phase_result.cpp index 16ce9e51bbf2048c9a7733a635e0cbb49b69feee..624e1f9264adc9608ae8bdb514142cdd0da02d03 100644 --- a/src/hir2mpl/common/src/fe_function_phase_result.cpp +++ b/src/hir2mpl/common/src/fe_function_phase_result.cpp @@ -33,7 +33,7 @@ void FEFunctionPhaseResult::Combine(const FEFunctionPhaseResult &result) { void FEFunctionPhaseResult::Dump() { for (const std::string &name :phaseNames) { - auto it = phaseTimes.find(name); + std::map::const_iterator it = phaseTimes.find(name); CHECK_FATAL(it != phaseTimes.cend(), "phase time is undefined for %s", name.c_str()); INFO(kLncInfo, "[PhaseTime] %s: %lld ns", name.c_str(), it->second); } diff --git a/src/hir2mpl/common/src/fe_input_helper.cpp b/src/hir2mpl/common/src/fe_input_helper.cpp index 2dcefcd3a12d6aaac05b3e4ef506e7b2adc22b4a..de3b3c9ac182fb43f195434461c9f7cc6d06e1b8 100644 --- a/src/hir2mpl/common/src/fe_input_helper.cpp +++ b/src/hir2mpl/common/src/fe_input_helper.cpp @@ -302,7 +302,7 @@ void FEInputStructHelper::ProcessExtraFields() { } } // insert at the beginning - structType->GetFields().insert(structType->GetFields().cbegin(), + (void)structType->GetFields().insert(structType->GetFields().cbegin(), FieldPair(fieldStrIdx, TyIdxFieldAttrPair(fieldType->GetTypeIndex(), typeAttrs))); } } diff --git a/src/hir2mpl/common/src/fe_options.cpp b/src/hir2mpl/common/src/fe_options.cpp index 848f0a03719f6121ef415d2a6a2241aa135645e2..1a03ba886c6e7c287c553842a4406b0e080a4016 100644 --- a/src/hir2mpl/common/src/fe_options.cpp +++ b/src/hir2mpl/common/src/fe_options.cpp @@ -55,7 +55,7 @@ void FEOptions::AddInputDexFile(const std::string &fileName) { void FEOptions::AddInputASTFile(const std::string &fileName) { FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); - if (type == FEFileType::FileType::kAST) { + if (type == FEFileType::FileType::kAST || type == FEFileType::FileType::kO) { inputASTFiles.push_back(fileName); } else { WARN(kLncWarn, "invalid input AST file %s...skipped", fileName.c_str()); diff --git a/src/hir2mpl/common/src/fe_type_manager.cpp b/src/hir2mpl/common/src/fe_type_manager.cpp index 7f879fc929f5721e2be5ab6a3f3b5215d67d3897..23c956bb0d98a0dfba12b9608562845e105603f0 100644 --- a/src/hir2mpl/common/src/fe_type_manager.cpp +++ b/src/hir2mpl/common/src/fe_type_manager.cpp @@ -80,7 +80,7 @@ FETypeManager::~FETypeManager() { funcMCCStaticFieldSetObject = nullptr; } -void FETypeManager::ReleaseMIRFuncCodeMempool(std::unordered_map &map) { +void FETypeManager::ReleaseMIRFuncCodeMempool(std::unordered_map &map) const { for (auto it = map.begin(); it != map.end(); ++it) { it->second->ReleaseCodeMemory(); } diff --git a/src/hir2mpl/common/src/fe_utils.cpp b/src/hir2mpl/common/src/fe_utils.cpp index 9b56ec667fea73d2903b51b3bb9027c6c82694cb..17631a7edff50133cd56c6e80ea0b9b362618f34 100644 --- a/src/hir2mpl/common/src/fe_utils.cpp +++ b/src/hir2mpl/common/src/fe_utils.cpp @@ -30,6 +30,7 @@ const std::string FEUtils::kFloat = "F"; const std::string FEUtils::kDouble = "D"; const std::string FEUtils::kVoid = "V"; const std::string FEUtils::kThis = "_this"; +const std::string FEUtils::kDotDot = "/.."; const std::string FEUtils::kMCCStaticFieldGetBool = "MCC_StaticFieldGetBool"; const std::string FEUtils::kMCCStaticFieldGetByte = "MCC_StaticFieldGetByte"; const std::string FEUtils::kMCCStaticFieldGetShort = "MCC_StaticFieldGetShort"; @@ -193,14 +194,59 @@ uint32 FEUtils::GetSequentialNumber() { return unnamedSymbolIdx++; } -std::string FEUtils::GetFileNameHashStr(const std::string &fileName, uint32 seed) { - const char *name = fileName.c_str(); +// erase ".." in fileName +std::string FEUtils::NormalizeFileName(std::string fileName) { + auto locOfDotDot = fileName.find(kDotDot); + if (locOfDotDot == std::string::npos) { + return fileName; + } + + constexpr char forwardSlash = '/'; + // slash, '/', before ".." must not be the first character. + if (locOfDotDot <= 1 || fileName[locOfDotDot] != forwardSlash) { + return fileName; + } + + // calculate position of the slash before the backed-up directory. + auto locOfBackedDir = fileName.rfind(forwardSlash, locOfDotDot - 1); + if (locOfBackedDir == std::string::npos) { + return fileName; + } + + // erase the backed-up directory and ".." from fileName. + // For example, + // /home/usrname/backed-up-dir/../nextDir/*.c ==> + // /home/usrname/nextDir/*.c + constexpr size_t charNumNeedSkip = 3U; + (void) fileName.erase(fileName.begin() + static_cast(locOfBackedDir), + fileName.begin() + static_cast(locOfDotDot + charNumNeedSkip)); + return NormalizeFileName(fileName); +} + +// struct __pthread_cond_s::(anonymous at /opt/RTOS/208.2.0//bits/thread-shared-types.h:97:5) +// ==> struct __pthread_cond_s +void FEUtils::EraseFileNameforClangTypeStr(std::string &typeStr) { + auto start = typeStr.find(':'); + auto end = typeStr.find(')'); + if (start == std::string::npos || end == std::string::npos) { + return; + } + (void) typeStr.erase(start, (end - start) + 1U); +} + +std::string FEUtils::GetHashStr(const std::string &str, uint32 seed) { + const char *name = str.c_str(); uint32 hash = 0; - while (*name) { + while (*name != 0) { uint8_t uName = *name++; hash = hash * seed + uName; } - return kRenameKeyWord + std::to_string(hash); + return std::to_string(hash); +} + +std::string FEUtils::GetFileNameHashStr(const std::string &fileName, uint32 seed) { + std::string result = kRenameKeyWord + GetHashStr(fileName, seed); + return result; } std::string FEUtils::GetSequentialName(const std::string &prefix) { @@ -321,6 +367,10 @@ MIRConst *FEUtils::CreateImplicitConst(MIRType *type) { return FEManager::GetModule().GetMemPool()->New( 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); } + case PTY_f128: { + return FEManager::GetModule().GetMemPool()->New( + nullptr, *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + } case PTY_ptr: { return GlobalTables::GetIntConstTable().GetOrCreateIntConst( 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); diff --git a/src/hir2mpl/common/src/feir_builder.cpp b/src/hir2mpl/common/src/feir_builder.cpp index 9b520cb468e88ced61d45c17609ff793a5bdb763..35af8d690a0c9676a717d1cdd4e04e13dc81ef66 100644 --- a/src/hir2mpl/common/src/feir_builder.cpp +++ b/src/hir2mpl/common/src/feir_builder.cpp @@ -197,6 +197,10 @@ UniqueFEIRExpr FEIRBuilder::CreateExprConstPtr(int64 val) { return std::make_unique(val, PTY_ptr); } +UniqueFEIRExpr FEIRBuilder::CreateExprConstF128(const uint64_t val[2]) { + return std::make_unique(val); +} + // Create a const expr of specified prime type with fixed value. // Note that loss of precision, byte value is only supported. UniqueFEIRExpr FEIRBuilder::CreateExprConstAnyScalar(PrimType primType, int64 val) { @@ -212,15 +216,15 @@ UniqueFEIRExpr FEIRBuilder::CreateExprConstAnyScalar(PrimType primType, int64 va case PTY_i64: case PTY_ptr: case PTY_a64: + case PTY_u128: + case PTY_i128: return std::make_unique(val, primType); - case PTY_f128: - // Not Implemented - CHECK_FATAL(false, "Not Implemented"); - return nullptr; case PTY_f32: return CreateExprConstF32(static_cast(val)); case PTY_f64: return CreateExprConstF64(static_cast(val)); + case PTY_f128: + return CreateExprConstAnyScalar(PTY_f128, std::pair({static_cast(val), 0})); default: if (IsPrimitiveVector(primType)) { return CreateExprVdupAnyVector(primType, val); @@ -230,6 +234,18 @@ UniqueFEIRExpr FEIRBuilder::CreateExprConstAnyScalar(PrimType primType, int64 va } } +UniqueFEIRExpr FEIRBuilder::CreateExprConstAnyScalar(PrimType primType, std::pair val) { + switch (primType) { + case PTY_f128: { + const uint64_t valArray[2] = {val.first, val.second}; + return CreateExprConstF128(static_cast(valArray)); + } + default: + CHECK_FATAL(false, "unsupported const prime type"); + return nullptr; + } +} + UniqueFEIRExpr FEIRBuilder::CreateExprVdupAnyVector(PrimType primtype, int64 val) { MIRIntrinsicID intrinsic; switch (primtype) { @@ -618,12 +634,12 @@ UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprE return stmt; } -UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, - UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(std::vector expr, UniqueFEIRType arrayType, UniqueFEIRType elemType, const std::string &argArrayName) { - UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), - std::move(exprIndex), std::move(arrayType), - std::move(elemType), argArrayName); + UniqueFEIRStmt stmt = std::make_unique(std::move(expr[0]), // 0: exprElem + std::move(expr[1]), // 1: exprArray + std::move(expr[2]), // 2: exprIndex + std::move(arrayType), std::move(elemType), argArrayName); return stmt; } diff --git a/src/hir2mpl/common/src/feir_stmt.cpp b/src/hir2mpl/common/src/feir_stmt.cpp index 1fa561c74432ca9cba2d3f702f9e6b3a5ec68675..2d8e916b634e8cbb4af9703c0f5b31e4e90b7264 100644 --- a/src/hir2mpl/common/src/feir_stmt.cpp +++ b/src/hir2mpl/common/src/feir_stmt.cpp @@ -2388,19 +2388,38 @@ FEIRExprConst::FEIRExprConst() value.u64 = 0; } -FEIRExprConst::FEIRExprConst(int64 val, PrimType argType) - : FEIRExpr(FEIRNodeKind::kExprConst) { +FEIRExprConst::FEIRExprConst(int64 val, PrimType argType) : FEIRExpr(FEIRNodeKind::kExprConst) { ASSERT(type != nullptr, "type is nullptr"); type->SetPrimType(argType); - value.i64 = val; + if (IsInt128Ty(argType)) { + value.i128[0] = static_cast(val); + value.i128[1] = val < 0 ? -1 : 0; + } else { + value.i64 = val; + } CheckRawValue2SetZero(); } -FEIRExprConst::FEIRExprConst(uint64 val, PrimType argType) - : FEIRExpr(FEIRNodeKind::kExprConst) { +FEIRExprConst::FEIRExprConst(uint64 val, PrimType argType) : FEIRExpr(FEIRNodeKind::kExprConst) { ASSERT(type != nullptr, "type is nullptr"); type->SetPrimType(argType); - value.u64 = val; + if (IsInt128Ty(argType)) { + value.i128[0] = val; + value.i128[1] = 0; + } else { + value.u64 = val; + } + CheckRawValue2SetZero(); +} + +FEIRExprConst::FEIRExprConst(const IntVal &val, PrimType argType) : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(argType); + if (!IsInt128Ty(argType)) { + value.i64 = val.GetExtValue(); + } else { + Int128Util::CopyInt128(value.i128, val.GetRawData()); + } CheckRawValue2SetZero(); } @@ -2427,10 +2446,21 @@ FEIRExprConst::FEIRExprConst(double val) CheckRawValue2SetZero(); } +FEIRExprConst::FEIRExprConst(const uint64_t *val) + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(PTY_f128); + value.f128[0] = val[0]; + value.f128[1] = val[1]; + CheckRawValue2SetZero(); +} + std::unique_ptr FEIRExprConst::CloneImpl() const { std::unique_ptr expr = std::make_unique(); FEIRExprConst *exprConst = static_cast(expr.get()); - exprConst->value.u64 = value.u64; + constexpr size_t cpySize = sizeof(value); + errno_t err = memcpy_s(&exprConst->value, cpySize, &value, cpySize); + CHECK_FATAL(err == EOK, "memcpy_s failed"); ASSERT(type != nullptr, "type is nullptr"); exprConst->type->SetPrimType(type->GetPrimType()); exprConst->CheckRawValue2SetZero(); @@ -2450,15 +2480,22 @@ BaseNode *FEIRExprConst::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { case PTY_i16: case PTY_i32: case PTY_i64: - case PTY_i128: - case PTY_u128: case PTY_ref: case PTY_ptr: return mirBuilder.CreateIntConst(static_cast(value.i64), primType); + case PTY_i128: + case PTY_u128: + return mirBuilder.CreateInt128Const(value.i128, primType); case PTY_f32: return mirBuilder.CreateFloatConst(value.f32); case PTY_f64: return mirBuilder.CreateDoubleConst(value.f64); + case PTY_f128: { + uint64 v[2]; + errno_t err = memcpy_s(v, sizeof(v), value.f128, sizeof(long double)); + CHECK_FATAL(err == EOK, "memcpy_s failed"); + return mirBuilder.CreateFloat128Const(v); + } default: ERR(kLncErr, "unsupported const kind"); return nullptr; @@ -2471,8 +2508,10 @@ uint32 FEIRExprConst::HashImpl() const { } void FEIRExprConst::CheckRawValue2SetZero() { - if (value.u64 == 0) { - type->SetZero(true); + if (IsInt128Ty(type->GetPrimType())) { + type->SetZero(value.i128[0] == 0 && value.i128[1] == 0); + } else { + type->SetZero(value.u64 == 0); } } @@ -3849,14 +3888,11 @@ std::unique_ptr FEIRExprAtomic::CloneImpl() const { return expr; } -TyIdx FEIRExprAtomic::GetTyIdx(MIRBuilder &mirBuilder) const { +TyIdx FEIRExprAtomic::GetTyIdx() const { TyIdx typeIndex(0); - if (atomicOp == kAtomicOpExchange) { - typeIndex = val2Type->GetTypeIndex(); - } else { - typeIndex = val1Type->GetTypeIndex(); + if (refType != nullptr) { + typeIndex = refType->GetTypeIndex(); } - return typeIndex; } @@ -3907,7 +3943,7 @@ BaseNode *FEIRExprAtomic::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { if (atomicOp == kAtomicOpCompareExchange || atomicOp == kAtomicOpCompareExchangeN) { args.emplace_back(orderFailExpr->GenMIRNode(mirBuilder)); } - TyIdx typeIndex = GetTyIdx(mirBuilder); + TyIdx typeIndex = GetTyIdx(); return ret ? mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicID, std::move(args), retVar, typeIndex) : mirBuilder.CreateStmtIntrinsicCall(intrinsicID, std::move(args), typeIndex); } diff --git a/src/hir2mpl/common/src/feir_type.cpp b/src/hir2mpl/common/src/feir_type.cpp index 9052fa4f9de3fbfaf32df7652b7cdafd84a2d65c..378b18d74d96dc26df9c5dc1d85f298ad739dbc6 100644 --- a/src/hir2mpl/common/src/feir_type.cpp +++ b/src/hir2mpl/common/src/feir_type.cpp @@ -264,6 +264,8 @@ MIRType *FEIRTypeDefault::GenerateMIRTypeForPrim() const { return GlobalTables::GetTypeTable().GetFloat(); case PTY_f64: return GlobalTables::GetTypeTable().GetDouble(); + case PTY_f128: + return GlobalTables::GetTypeTable().GetFloat128(); case PTY_u1: return GlobalTables::GetTypeTable().GetUInt1(); case PTY_u8: diff --git a/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp b/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp index ff7abd5bb0ef6257f9a299b6296ef9c69fba5fe7..929d8c2c664903fcfd6f69e4565bd005972cfca2 100644 --- a/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp +++ b/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp @@ -88,9 +88,16 @@ bool HIR2MPLCompilerComponent::ProcessDeclImpl() { ASSERT_NOT_NULL(helper); success = helper->ProcessDecl() ? success : false; } - for (FEInputMethodHelper *helper : globalFuncHelpers) { - ASSERT_NOT_NULL(helper); - success = helper->ProcessDecl() ? success : false; + if (!FEOptions::GetInstance().GetWPAA()) { + for (FEInputMethodHelper *helper : globalFuncHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + } else { + for (FEInputMethodHelper *helper : globalLTOFuncHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } } for (FEInputGlobalVarHelper *helper : globalVarHelpers) { ASSERT_NOT_NULL(helper); @@ -106,6 +113,8 @@ bool HIR2MPLCompilerComponent::ProcessDeclImpl() { success = helper->ProcessDecl() ? success : false; } } + // Traverse type table and update func type + UpdateMIRFuncTypeFirstArgRet(); timer.StopAndDumpTimeMS("HIR2MPLCompilerComponent::ProcessDecl()"); return success; } diff --git a/src/hir2mpl/common/src/hir2mpl_option.cpp b/src/hir2mpl/common/src/hir2mpl_option.cpp index 99c60d11b5688c8de2aea44b38c0a26af5a64ed4..11bb2ff96c1065c494e3b5f11fbfc6c9a47f3cfa 100644 --- a/src/hir2mpl/common/src/hir2mpl_option.cpp +++ b/src/hir2mpl/common/src/hir2mpl_option.cpp @@ -97,10 +97,6 @@ maplecl::Option dumpLevel({"--dump-level", "-d"}, " [3] dump debug info", {hir2mplCategory}); -maplecl::Option dumpTime({"--dump-time", "-dump-time"}, - " -dump-time : dump time", - {hir2mplCategory}); - maplecl::Option dumpComment({"--dump-comment", "-dump-comment"}, " -dump-comment : gen comment stmt", {hir2mplCategory}); @@ -176,11 +172,6 @@ maplecl::Option dumpFEIRCFGGraph({"-dump-cfg", "--dump-cfg"}, " : dump cfg graph to dot file", {hir2mplCategory}); -maplecl::Option wpaa({"-wpaa", "--wpaa"}, - " -dump-cfg funcname1,funcname2\n" \ - " -wpaa : enable whole program ailas analysis", - {hir2mplCategory}); - maplecl::Option debug({"-debug", "--debug"}, " -debug : dump enabled options", {hir2mplCategory}); diff --git a/src/hir2mpl/common/src/hir2mpl_options.cpp b/src/hir2mpl/common/src/hir2mpl_options.cpp index 89ea620b54422d0319d352a760a9dcbb796f3f9f..dc1951252c8d4298ca678398620cf71d1cbcc88e 100644 --- a/src/hir2mpl/common/src/hir2mpl_options.cpp +++ b/src/hir2mpl/common/src/hir2mpl_options.cpp @@ -83,7 +83,7 @@ bool HIR2MPLOptions::InitFactory() { // debug info control options RegisterFactoryFunction(&opts::hir2mpl::dumpLevel, &HIR2MPLOptions::ProcessDumpLevel); - RegisterFactoryFunction(&opts::hir2mpl::dumpTime, + RegisterFactoryFunction(&opts::dumpTime, &HIR2MPLOptions::ProcessDumpTime); RegisterFactoryFunction(&opts::hir2mpl::dumpComment, &HIR2MPLOptions::ProcessDumpComment); @@ -114,7 +114,7 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessNoBarrier); // ast compiler options - RegisterFactoryFunction(&opts::usesignedchar, + RegisterFactoryFunction(&opts::useSignedChar, &HIR2MPLOptions::ProcessUseSignedChar); // On Demand Type Creation @@ -145,8 +145,10 @@ bool HIR2MPLOptions::InitFactory() { &HIR2MPLOptions::ProcessEnableVariableArray); RegisterFactoryFunction(&opts::funcInliceSize, &HIR2MPLOptions::ProcessFuncInlineSize); - RegisterFactoryFunction(&opts::hir2mpl::wpaa, + RegisterFactoryFunction(&opts::wpaa, &HIR2MPLOptions::ProcessWPAA); + RegisterFactoryFunction(&opts::fm, + &HIR2MPLOptions::ProcessFM); return true; } @@ -335,7 +337,7 @@ bool HIR2MPLOptions::ProcessNoMplFile(const maplecl::OptionInterface &) const { bool HIR2MPLOptions::ProcessDumpLevel(const maplecl::OptionInterface &outputName) const { unsigned int arg = outputName.GetCommonValue(); - FEOptions::GetInstance().SetDumpLevel(arg); + FEOptions::GetInstance().SetDumpLevel(static_cast(arg)); return true; } @@ -438,7 +440,7 @@ bool HIR2MPLOptions::ProcessNoBarrier(const maplecl::OptionInterface &) const { // ast compiler options bool HIR2MPLOptions::ProcessUseSignedChar(const maplecl::OptionInterface &) const { - FEOptions::GetInstance().SetUseSignedChar(opts::usesignedchar); + FEOptions::GetInstance().SetUseSignedChar(opts::useSignedChar); return true; } @@ -491,6 +493,7 @@ void HIR2MPLOptions::ProcessInputFiles(const std::vector &inputs) c FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "DEX file detected: %s", inputName.c_str()); FEOptions::GetInstance().AddInputDexFile(inputName); break; + case FEFileType::kO: case FEFileType::kAST: FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "AST file detected: %s", inputName.c_str()); FEOptions::GetInstance().AddInputASTFile(inputName); @@ -599,6 +602,13 @@ bool HIR2MPLOptions::ProcessWPAA(const maplecl::OptionInterface &) const { return true; } +// func merge +bool HIR2MPLOptions::ProcessFM(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetFuncMergeEnable(true); + return true; +} + + // AOT bool HIR2MPLOptions::ProcessAOT(const maplecl::OptionInterface &) const { FEOptions::GetInstance().SetIsAOT(true); diff --git a/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp b/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp index ae76647ccd0c811b831158d194a80f60fdb95f6a..0dbd3c6a4393ded666af0715cf9af83749fdf513 100644 --- a/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp +++ b/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp @@ -16,7 +16,7 @@ #include #include "hir2mpl_ut_environment.h" #define private public -#include "bc_compiler_component.h" +#include "bc_compiler_component-inl.h" #undef private #include "dexfile_factory.h" #include "dex_pragma.h" diff --git a/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp b/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp index 85ae3ff78000f0a5057115e356af5fe2a4c7f063..4e9d961944abc0a6bb378fbc6524329cd034653c 100644 --- a/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp +++ b/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp @@ -16,7 +16,7 @@ #include #include #include -#include "bc_parser.h" +#include "bc_parser-inl.h" #include "dex_parser.h" #include "bc_class.h" #include "types_def.h" diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn index 57b0e558f4a951fe8ab7072d41618783ad8ccd67..e92ba91a13402b82d9aff0539e138ee0c263e03d 100644 --- a/src/mapleall/maple_be/BUILD.gn +++ b/src/mapleall/maple_be/BUILD.gn @@ -109,6 +109,7 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_ico.cpp", "src/cg/aarch64/aarch64_insn.cpp", "src/cg/aarch64/aarch64_isa.cpp", + "src/cg/aarch64/aarch64_imm_valid.cpp", "src/cg/aarch64/aarch64_memlayout.cpp", "src/cg/aarch64/aarch64_args.cpp", "src/cg/aarch64/aarch64_live.cpp", @@ -132,6 +133,8 @@ src_libcgaarch64 = [ "src/cg/aarch64/aarch64_rematerialize.cpp", "src/cg/aarch64/aarch64_MPISel.cpp", "src/cg/aarch64/aarch64_standardize.cpp", + "src/cg/aarch64/aarch64_global_schedule.cpp", + "src/cg/aarch64/aarch64_local_schedule.cpp", "src/cg/aarch64/aarch64_aggressive_opt.cpp", ] @@ -240,6 +243,9 @@ src_libcgphases = [ "src/cg/data_dep_base.cpp", "src/cg/data_dep_analysis.cpp", "src/cg/global_schedule.cpp", + "src/cg/base_schedule.cpp", + "src/cg/list_scheduler.cpp", + "src/cg/local_schedule.cpp", "src/cg/cg_aggressive_opt.cpp", ] @@ -276,6 +282,7 @@ src_libcg = [ "src/cg/isa.cpp", "src/cg/insn.cpp", "src/cg/cg_phasemanager.cpp", + "src/cg/cg_callgraph_reorder.cpp", ] cflags_cc -= [ "-DRC_NO_MMAP" ] diff --git a/src/mapleall/maple_be/CMakeLists.txt b/src/mapleall/maple_be/CMakeLists.txt index 3b103a8d3a35f2f8df89e4329d83550ba1d9013f..f2c8378a28cab827e6bfc7f8eafbcdbb315a0f26 100755 --- a/src/mapleall/maple_be/CMakeLists.txt +++ b/src/mapleall/maple_be/CMakeLists.txt @@ -109,7 +109,10 @@ if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") src/cg/aarch64/aarch64_rematerialize.cpp src/cg/aarch64/aarch64_MPISel.cpp src/cg/aarch64/aarch64_standardize.cpp + src/cg/aarch64/aarch64_global_schedule.cpp + src/cg/aarch64/aarch64_local_schedule.cpp src/cg/aarch64/aarch64_aggressive_opt.cpp + src/cg/aarch64/aarch64_imm_valid.cpp src/cg/cfi_generator.cpp src/cg/cfgo.cpp src/cg/local_opt.cpp @@ -145,7 +148,10 @@ if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") src/cg/control_dep_analysis.cpp src/cg/data_dep_base.cpp src/cg/data_dep_analysis.cpp + src/cg/base_schedule.cpp src/cg/global_schedule.cpp + src/cg/local_schedule.cpp + src/cg/list_scheduler.cpp src/cg/cg_aggressive_opt.cpp ) endif() @@ -279,6 +285,7 @@ set(src_libcg src/cg/isa.cpp src/cg/insn.cpp src/cg/cg_phasemanager.cpp + src/cg/cg_callgraph_reorder.cpp ) string(REPLACE "-DRC_NO_MMAP" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") diff --git a/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td b/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td index bb1127d737296c7a1e95732956f677fc8495b33f..1bddca76621ddcea4e6bccf4eba6878aa4cc8e6b 100644 --- a/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td +++ b/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td @@ -114,6 +114,8 @@ Def Reservation : kLtAdrpLdr {6, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnit kUnitIdLdAgu]}; Def Reservation : kLtClinitTail {8, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtTlsRel {6, [kUnitIdSlotS]}; +Def Reservation : kLtTlsCall {10, [kUnitIdSlotS, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; Def Bypass {0, [kLtShift, kLtShiftReg], [kLtAlu]}; Def Bypass {1, [kLtShift], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]}; diff --git a/src/mapleall/maple_be/include/be/becommon.h b/src/mapleall/maple_be/include/be/becommon.h index 17c8b24494949898ed097a13d063262123622ea5..1a7b4768f23bc5471c1678de4e00cf4a8e9c59c9 100644 --- a/src/mapleall/maple_be/include/be/becommon.h +++ b/src/mapleall/maple_be/include/be/becommon.h @@ -32,6 +32,10 @@ enum BitsPerByte : uint8 { kLog2BitsPerByte = 3 }; +inline uint32 GetPointerBitSize() { + return GetPointerSize() * kBitsPerByte; +} + class JClassFieldInfo { /* common java class field info */ public: /* constructors */ @@ -89,7 +93,7 @@ class BECommon { void GenObjSize(const MIRClassType &classType, FILE &outFile) const; - std::pair GetFieldOffset(MIRStructType &structType, FieldID fieldID); + OffsetPair GetJClassFieldOffset(MIRStructType &classType, FieldID fieldID) const; bool IsRefField(MIRStructType &structType, FieldID fieldID) const; @@ -154,7 +158,7 @@ class BECommon { return mirModule; } - uint64 GetTypeSize(uint32 idx) const { + uint64 GetClassTypeSize(uint32 idx) const { return typeSizeTable.at(idx); } uint32 GetSizeOfTypeSizeTable() const { @@ -179,9 +183,6 @@ class BECommon { } } - uint8 GetTypeAlign(uint32 idx) const { - return typeAlignTable.at(idx); - } size_t GetSizeOfTypeAlignTable() const { return typeAlignTable.size(); } @@ -239,6 +240,13 @@ class BECommon { */ MapleUnorderedMap jClassLayoutTable; MapleUnorderedMap funcReturnType; + + uint8 GetTypeAlign(uint32 idx) const { + return typeAlignTable.at(idx); + } + uint64 GetTypeSize(uint32 idx) const { + return typeSizeTable.at(idx); + } }; /* class BECommon */ } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h index 1d912a3546c4fabce5251b242bb4347d8f0580d9..5a8741dcfd579088e9116e276de1f965e59541c1 100644 --- a/src/mapleall/maple_be/include/be/common_utils.h +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -39,11 +39,13 @@ constexpr uint32 k5BitSize = 5; constexpr uint32 k6BitSize = 6; constexpr uint32 k7BitSize = 7; constexpr uint32 k8BitSize = 8; +constexpr uint32 k12BitSize = 12; constexpr uint32 k16BitSize = 16; constexpr uint32 k24BitSize = 24; constexpr uint32 k32BitSize = 32; constexpr uint32 k40BitSize = 40; constexpr uint32 k48BitSize = 48; +constexpr uint32 k52BitSize = 52; constexpr uint32 k56BitSize = 56; constexpr uint32 k64BitSize = 64; constexpr uint32 k128BitSize = 128; @@ -77,6 +79,7 @@ constexpr int32 kNegative256BitSize = -256; constexpr int32 kNegative512BitSize = -512; constexpr int32 kNegative1024BitSize = -1024; +constexpr uint32 k0ByteSize = 0; constexpr uint32 k1ByteSize = 1; constexpr uint32 k2ByteSize = 2; constexpr uint32 k3ByteSize = 3; diff --git a/src/mapleall/maple_be/include/be/lower.h b/src/mapleall/maple_be/include/be/lower.h index 7af7a8605f6b2e092e90bbdd811fd0f8d409b55a..522b6650b746feda88e62acce26890a6410a0f9a 100644 --- a/src/mapleall/maple_be/include/be/lower.h +++ b/src/mapleall/maple_be/include/be/lower.h @@ -69,6 +69,7 @@ class CGLowerer { } ~CGLowerer() { + funcProfData = nullptr; mirBuilder = nullptr; currentBlock = nullptr; } @@ -142,7 +143,7 @@ class CGLowerer { std::string GetFileNameSymbolName(const std::string &fileName) const; void SwitchAssertBoundary(StmtNode &stmt, MapleVector &argsPrintf); - StmtNode *CreateFflushStmt(StmtNode &stmt); + StmtNode *CreateFflushStmt(StmtNode &stmt) const; void LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, std::vector &abortNode); @@ -175,7 +176,15 @@ class CGLowerer { bool uselvar = false, bool isIntrinAssign = false); BlockNode *LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall); BlockNode *LowerCallAssignedStmt(StmtNode &stmt, bool uselvar = false); - bool LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, StmtNode *&nextStmt, bool &lvar, BlockNode *oldBlk); + /* Intrinsiccall will processe return and vector as a call separately. + * To be able to handle them in a unified manner, we lower intrinsiccall to Intrinsicsicop. + */ + BlockNode *LowerIntrinsiccallToIntrinsicop(StmtNode &stmt); + bool LowerStructReturnInRegs(BlockNode &newBlk, StmtNode &stmt, const MIRSymbol &retSym); + void LowerStructReturnInGpRegs(BlockNode &newBlk, const StmtNode &stmt, const MIRSymbol &symbol); + void LowerStructReturnInFpRegs(BlockNode &newBlk, const StmtNode &stmt, const MIRSymbol &symbol, + PrimType primType, size_t elemNum); + bool LowerStructReturn(BlockNode &newBlk, StmtNode &stmt, bool &lvar); BlockNode *LowerMemop(StmtNode &stmt); BaseNode *LowerRem(BaseNode &expr, BlockNode &blk); @@ -211,9 +220,9 @@ class CGLowerer { void LowerTypePtr(BaseNode &node) const; BaseNode *GetBitField(int32 byteOffset, BaseNode *baseAddr, PrimType fieldPrimType); - StmtNode *WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + StmtNode *WriteBitField(const OffsetPair &byteBitOffsets, const MIRBitFieldType *fieldType, BaseNode *baseAddr, BaseNode *rhs, BlockNode *block); - BaseNode *ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *ReadBitField(const OffsetPair &byteBitOffsets, const MIRBitFieldType &fieldType, BaseNode *baseAddr); BaseNode *LowerDreadBitfield(DreadNode &dread); BaseNode *LowerIreadBitfield(IreadNode &iread); diff --git a/src/mapleall/maple_be/include/be/switch_lowerer.h b/src/mapleall/maple_be/include/be/switch_lowerer.h index 7d583517eecac2d5005a4cb72f7eae07d023c0a8..73631e0c1a4c82c09325e0503e4c1cad36a7104b 100644 --- a/src/mapleall/maple_be/include/be/switch_lowerer.h +++ b/src/mapleall/maple_be/include/be/switch_lowerer.h @@ -49,7 +49,7 @@ class SwitchLowerer { maple::MIRModule &mirModule; maple::SwitchNode *stmt; - CGLowerer *cgLowerer; + CGLowerer *cgLowerer = nullptr; /* * the original switch table is sorted and then each dense (in terms of the * case tags) region is condensed into 1 switch item; in the switchItems @@ -73,7 +73,7 @@ class SwitchLowerer { maple::CondGotoNode *BuildCondGotoNode(int32 idx, Opcode opCode, BaseNode &cond); maple::BlockNode *BuildCodeForSwitchItems(int32 start, int32 end, bool lowBlockNodeChecked, bool highBlockNodeChecked, FreqType freqSum, LabelIdx newLabelIdx = 0); - FreqType sumFreq(uint32 startIdx, uint32 endIdx); + FreqType SumFreq(uint32 startIdx, uint32 endIdx); }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h index 67ae1fefa6175ebf892620ab266c56806b42ac09..8dfee1a9488e2fa9ecff5a9845986d788a54c6f7 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -30,7 +30,7 @@ class AArch64MPIsel : public MPISel { void SelectReturn(NaryStmtNode &retNode) override; void SelectReturn(bool noOpnd) override; void SelectCall(CallNode &callNode) override; - void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + void SelectIcall(IcallNode &iCallNode, Operand &opnd0) override; Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; @@ -40,8 +40,8 @@ class AArch64MPIsel : public MPISel { void SelectGoto(GotoNode &stmt) override; void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Operand &opndRhs) override; - void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, - const DassignNode &s) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRh, + const DassignNode &stmt) override; void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) override; @@ -96,8 +96,8 @@ class AArch64MPIsel : public MPISel { void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) override; void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) override; Insn &AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, - ListOperand ¶mOpnds, ListOperand &retOpnds); - void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + ListOperand ¶mOpnds, ListOperand &retOpnds) const; + void SelectCalleeReturn(const MIRType *retType, ListOperand &retOpnds) const; /* Inline function implementation of va_start */ void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); @@ -106,9 +106,9 @@ class AArch64MPIsel : public MPISel { void SelectCVaStart(const IntrinsiccallNode &intrnNode); void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds); void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); - void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) const; void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); - Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) const; RegOperand &GetTargetStackPointer(PrimType primType) override; RegOperand &GetTargetBasicPointer(PrimType primType) override; std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); @@ -120,8 +120,8 @@ class AArch64MPIsel : public MPISel { bool IsParamStructCopy(const MIRSymbol &symbol); bool IsSymbolRequireIndirection(const MIRSymbol &symbol) override; void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; - Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, - const BaseNode &parent); + Operand *SelectIntrinsicOpWithOneParam(const IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, + const BaseNode /* &parent */); void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, std::vector pt); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_aggressive_opt.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_aggressive_opt.h index 33c3bf27ed60f32d3f239b476440e758652c16fd..177b1190fcdb5d51af64c32bab9751293d38e69a 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_aggressive_opt.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_aggressive_opt.h @@ -23,6 +23,7 @@ class AArch64CombineRedundantX16Opt { public: explicit AArch64CombineRedundantX16Opt(CGFunc &func) : aarFunc(static_cast(func)) {} ~AArch64CombineRedundantX16Opt() { + recentX16DefPrevInsns = nullptr; recentX16DefInsn = nullptr; } @@ -30,8 +31,8 @@ class AArch64CombineRedundantX16Opt { private: struct UseX16InsnInfo { - void InsertAddPrevInsns(MapleVector *recentPrevInsns) { - for (auto insn : *recentPrevInsns) { + void InsertAddPrevInsns(MapleVector &recentPrevInsns) const { + for (auto insn : recentPrevInsns) { addPrevInsns->emplace_back(insn); } } @@ -93,13 +94,13 @@ class AArch64CombineRedundantX16Opt { void RecordUseX16InsnInfo(Insn &insn, MemPool *tmpMp, MapleAllocator *tmpAlloc); void ComputeValidAddImmInterval(UseX16InsnInfo &x16UseInfo, bool isPair); void FindCommonX16DefInsns(MemPool *tmpMp, MapleAllocator *tmpAlloc); - void ProcessSameAddImmCombineInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc); + void ProcessSameAddImmCombineInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc) const; void ProcessIntervalIntersectionCombineInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc); void CombineRedundantX16DefInsns(BB &bb); - bool HasX16Def(Insn &insn); - bool HasX16Use(Insn &insn); - bool HasUseOpndReDef(Insn &insn); + bool HasX16Def(const Insn &insn) const; + bool HasX16Use(const Insn &insn) const; + bool HasUseOpndReDef(const Insn &insn) const; uint32 GetMemSizeFromMD(Insn &insn); RegOperand *GetAddUseOpnd(Insn &insn); uint32 GetMemOperandIdx(Insn &insn); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h index 6cc0951aa66125d64c93668f4f36f9ffbc766547..fcd208a1d2c0e6cfe4afbce6b0a7bd04e7852ecf 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h @@ -21,20 +21,6 @@ namespace maplebe { using namespace maple; -struct ArgInfo { - AArch64reg reg; - MIRType *mirTy; - uint32 symSize; - uint32 stkSize; - RegType regType; - MIRSymbol *sym; - const AArch64SymbolAlloc *symLoc; - uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ - bool doMemPairOpt; - bool createTwoStores; - bool isTwoRegParm; -}; - class AArch64MoveRegArgs : public MoveRegArgs { public: explicit AArch64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) { @@ -46,25 +32,19 @@ class AArch64MoveRegArgs : public MoveRegArgs { void Run() override; private: - RegOperand *baseReg = nullptr; - const MemSegment *lastSegment = nullptr; - void CollectRegisterArgs(std::map &argsList, std::vector &indexList, - std::map &pairReg, std::vector &numFpRegs, - std::vector &fpSize) const; - ArgInfo GetArgInfo(std::map &argsList, std::vector &numFpRegs, - std::vector &fpSize, uint32 argIndex) const; - bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const; - void GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset) const; - void GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo); - void GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize); + // gen param to stack + // call foo(var $a) -> str X0, [memOpnd] void MoveRegisterArgs(); + + // gen param to preg + // call foo(%1) -> mov V201, X0 void MoveVRegisterArgs() const; void MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const; void LoadStackArgsToVReg(MIRSymbol &mirSym) const; void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const; Insn &CreateMoveArgsToVRegInsn(MOperator mOp, RegOperand &destOpnd, RegOperand &srcOpnd, PrimType primType) const; - AArch64CGFunc *aarFunc; + AArch64CGFunc *aarFunc = nullptr; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h index 29e3eaaaf451c7988f85ad0f8000c410fbfe991f..f12094faa75b41fa1166049ba7706ccde4871e15 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h @@ -19,102 +19,59 @@ #include "becommon.h" #include "call_conv.h" #include "aarch64_isa.h" +#include "aarch64_abi.h" namespace maplebe { using namespace maple; -/* - * We use the names used in ARM IHI 0055C_beta. $ 5.4.2. - * nextGeneralRegNO (= _int_parm_num) : Next General-purpose Register number - * nextFloatRegNO (= _float_parm_num): Next SIMD and Floating-point Register Number - * nextStackArgAdress (= _last_memOffset): Next Stacked Argument Address - * for processing an incoming or outgoing parameter list - */ +// We use the names used in Procedure Call Standard for the Arm 64-bit +// Architecture (AArch64) 2022Q3. $6.8.2 +// nextGeneralRegNO (= _int_parm_num) : Next General-purpose Register number +// nextFloatRegNO (= _float_parm_num): Next SIMD and Floating-point Register Number +// nextStackArgAdress (= _last_memOffset): Next Stacked Argument Address +// for processing an incoming or outgoing parameter list class AArch64CallConvImpl { public: explicit AArch64CallConvImpl(BECommon &be) : beCommon(be) {} ~AArch64CallConvImpl() = default; - /* Return size of aggregate structure copy on stack. */ - int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *tFunc = nullptr); + // Return size of aggregate structure copy on stack. + uint64 LocateNextParm(MIRType &mirType, CCLocInfo &ploc, bool isFirst = false, + MIRFuncType *tFunc = nullptr); - int32 LocateRetVal(MIRType &retType, CCLocInfo &pLoc); + void LocateRetVal(const MIRType &retType, CCLocInfo &ploc) const; - void InitCCLocInfo(CCLocInfo &pLoc) const; + void InitCCLocInfo(CCLocInfo &ploc) const; - /* for lmbc */ - uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize); + // for lmbc + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) const; - /* return value related */ - void InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo); + void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &ploc) const; - void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const; - - void SetupToReturnThroughMemory(CCLocInfo &pLoc) const { - pLoc.regCount = 1; - pLoc.reg0 = R8; - pLoc.primTypeOfReg0 = PTY_u64; + void SetupToReturnThroughMemory(CCLocInfo &ploc) const { + ploc.regCount = 1; + ploc.reg0 = R8; + ploc.primTypeOfReg0 = GetExactPtrPrimType(); } private: BECommon &beCommon; - uint64 paramNum = 0; /* number of all types of parameters processed so far */ - uint32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */ - uint32 nextFloatRegNO = 0; /* number of float parameters processed so far */ - int32 nextStackArgAdress = 0; + uint32 nextGeneralRegNO = 0; //number of integer parameters processed so far + uint32 nextFloatRegNO = 0; // number of float parameters processed so far + uint64 nextStackArgAdress = 0; AArch64reg AllocateGPRegister() { return (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs) ? AArch64Abi::kIntParmRegs[nextGeneralRegNO++] : kRinvalid; } - void AllocateTwoGPRegisters(CCLocInfo &pLoc) { - if ((nextGeneralRegNO + 1) < AArch64Abi::kNumIntParmRegs) { - pLoc.reg0 = AArch64Abi::kIntParmRegs[nextGeneralRegNO++]; - pLoc.reg1 = AArch64Abi::kIntParmRegs[nextGeneralRegNO++]; - } else { - pLoc.reg0 = kRinvalid; - } - } + void AllocateGPRegister(const MIRType &mirType, CCLocInfo &ploc, uint64 size, uint64 align); AArch64reg AllocateSIMDFPRegister() { return (nextFloatRegNO < AArch64Abi::kNumFloatParmRegs) ? AArch64Abi::kFloatParmRegs[nextFloatRegNO++] : kRinvalid; } - void AllocateNSIMDFPRegisters(CCLocInfo &ploc, uint32 num) { - if ((nextFloatRegNO + num - 1) < AArch64Abi::kNumFloatParmRegs) { - switch (num) { - case kOneRegister: - ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - break; - case kTwoRegister: - ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - break; - case kThreeRegister: - ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg2 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - break; - case kFourRegister: - ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg2 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - ploc.reg3 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; - break; - default: - CHECK_FATAL(0, "AllocateNSIMDFPRegisters: unsupported"); - } - } else { - ploc.reg0 = kRinvalid; - } - } - - void RoundNGRNUpToNextEven() { - nextGeneralRegNO = (nextGeneralRegNO + 1U) & ~1U; - } - - int32 ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, int32 typeAlign); + uint64 AllocateRegisterForAgg(const MIRType &mirType, CCLocInfo &ploc, uint64 size, uint64 align); }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h index f8f6e2e150f4447718704dd2f42e7999850fa673..a6b04172f898ced059220cc6abd64844f9800223 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h @@ -22,7 +22,7 @@ class AArch64CFGOptimizer : public CFGOptimizer { public: AArch64CFGOptimizer(CGFunc &func, MemPool &memPool) : CFGOptimizer(func, memPool) {} - ~AArch64CFGOptimizer() = default; + ~AArch64CFGOptimizer() override = default; void InitOptimizePatterns() override; }; @@ -30,12 +30,12 @@ class AArch64FlipBRPattern : public FlipBRPattern { public: explicit AArch64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {} - ~AArch64FlipBRPattern() = default; + ~AArch64FlipBRPattern() override = default; private: uint32 GetJumpTargetIdx(const Insn &insn) override; MOperator FlipConditionOp(MOperator flippedOp) override; }; -} /* namespace maplebe */ +} // namespace maplebe -#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H */ +#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h index c6287005cd09968551da8c7b58f8caf154116e1f..c71079223a798817ebc3908dbdc50693e7440622 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2020-2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -34,6 +34,8 @@ #include "aarch64_pgo_gen.h" #include "aarch64_MPISel.h" #include "aarch64_standardize.h" +#include "aarch64_global_schedule.h" +#include "aarch64_local_schedule.h" #include "aarch64_aggressive_opt.h" namespace maplebe { @@ -201,8 +203,8 @@ class AArch64CG : public CG { CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { return mp.New(mp, f, ssaInfo); } - ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { - return mp.New(f, ssaInfo); + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { + return mp.New(mp, f, ssaInfo, ll); } RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { return mp.New(f, ssaInfo, mp); @@ -210,6 +212,14 @@ class AArch64CG : public CG { TailCallOpt *CreateCGTailCallOpt(MemPool &mp, CGFunc &f) const override { return mp.New(mp, f); } + GlobalSchedule *CreateGlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cda, + InterDataDepAnalysis &idda) const override { + return mp.New(mp, f, cda, idda); + } + LocalSchedule *CreateLocalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cda, + InterDataDepAnalysis &idda) const override { + return mp.New(mp, f, cda, idda); + } CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override { return mp.New(f, mp); } @@ -222,7 +232,7 @@ class AArch64CG : public CG { Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { return mp.New(f); } - virtual CGAggressiveOpt *CreateAggressiveOpt(MemPool &mp, CGFunc &f) const override { + CGAggressiveOpt *CreateAggressiveOpt(MemPool &mp, CGFunc &f) const override { return mp.New(f); } /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 8bcd13704384ab5ec332277a6cc183b4626163cc..59db49179c8aeacc35e580fb2ef6dfa4bbda3bd0 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -57,7 +57,9 @@ class AArch64CGFunc : public CGFunc { immOpndsRequiringOffsetAdjustment(mallocator.Adapter()), immOpndsRequiringOffsetAdjustmentForRefloc(mallocator.Adapter()) { uCatch.regNOCatch = 0; - SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule() || + SetUseFP(HasVLAOrAlloca() || !f.GetModule()->IsCModule() || + (HasCall() && CGOptions::UseFramePointer() != CGOptions::kNoneFP) || + (!HasCall() && CGOptions::UseFramePointer() == CGOptions::kAllFP) || f.GetModule()->GetFlavor() == MIRFlavor::kFlavorLmbc); } @@ -103,7 +105,7 @@ class AArch64CGFunc : public CGFunc { void HandleRetCleanup(NaryStmtNode &retNode) override; void MergeReturn() override; RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); - Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + Operand *AArchHandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(DassignNode &stmt, Operand &opnd0) override; void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; @@ -116,7 +118,8 @@ class AArch64CGFunc : public CGFunc { MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); uint32 LmbcFindTotalStkUsed(std::vector ¶mList); uint32 LmbcTotalRegsUsed(); - bool LmbcSmallAggForRet(const BaseNode &bNode, const Operand *src, int32 offset = 0, bool skip1 = false); + bool LmbcSmallAggForRet(const BaseNode &bNode, const Operand &src, int32 offset = 0, + bool skip1 = false); bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); bool GetNumReturnRegsForIassignfpoff(MIRType &rType, PrimType &primType, uint32 &numRegs); void GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType); @@ -127,13 +130,16 @@ class AArch64CGFunc : public CGFunc { void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; void SelectBlkassignoff(BlkassignoffNode &bNode, Operand &src) override; void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) override; + void GenLdStForAggIassign(uint64 ofst, uint32 rhsOffset, uint32 lhsOffset, RegOperand &rhsAddrOpnd, + Operand &lhsAddrOpnd, uint32 memOpndSize, regno_t vRegNO, bool isRefField); void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; - bool IsFirstArgReturn(StmtNode &naryNode); - bool Is64x1vec(StmtNode &naryNode, BaseNode &argExpr, uint32 pnum); - PrimType GetParamPrimType(StmtNode &naryNode, uint32 pnum, bool isCallNative); bool DoCallerEnsureValidParm(RegOperand &destOpnd, RegOperand &srcOpnd, PrimType formalPType); + void SelectParmListSmallStruct(const MIRType &mirType, const CCLocInfo &ploc, + Operand &addr, ListOperand &srcOpnds); + void SelectParmListPassByStack(const MIRType &mirType, Operand &opnd, uint32 memOffset, + bool preCopyed, std::vector &insnForStackArgs); void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, @@ -144,6 +150,7 @@ class AArch64CGFunc : public CGFunc { void SelectCall(CallNode &callNode) override; void SelectIcall(IcallNode &icallNode, Operand &srcOpnd) override; void SelectIntrinsicCall(IntrinsiccallNode &intrinsicCallNode) override; + RegOperand *SelectIntrinsicOpLoadTlsAnchor(const IntrinsicopNode& intrinsicopNode, const BaseNode &parent) override; Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) override; Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, @@ -247,7 +254,7 @@ class AArch64CGFunc : public CGFunc { void SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isDest, Insn &insn); void SelectAddAfterInsnBySize(Operand &resOpnd, Operand &opnd0, Operand &opnd1, uint32 size, bool isDest, Insn &insn); bool IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen); - bool IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx); + bool IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) const; Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; void SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType) override; Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; @@ -366,7 +373,7 @@ class AArch64CGFunc : public CGFunc { void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); void SelectStackSave(); void SelectStackRestore(const IntrinsiccallNode &intrnNode); - void SelectCDIVException(const IntrinsiccallNode &intrnNode); + void SelectCDIVException(); void SelectCVaStart(const IntrinsiccallNode &intrnNode); void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); @@ -432,7 +439,7 @@ class AArch64CGFunc : public CGFunc { } else { reg = RFP; } - return GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + return GetOrCreatePhysicalRegisterOperand(reg, GetPointerBitSize(), kRegTyInt); } RegOperand &GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType); @@ -446,7 +453,7 @@ class AArch64CGFunc : public CGFunc { MemOperand &HashMemOpnd(MemOperand &tMemOpnd); MemOperand &CreateMemOpnd(AArch64reg reg, int64 offset, uint32 size) { - RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, GetPointerBitSize(), kRegTyInt); return CreateMemOpnd(baseOpnd, offset, size); } @@ -476,7 +483,6 @@ class AArch64CGFunc : public CGFunc { Operand &GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const; void GenerateYieldpoint(BB &bb) override; - Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; void GenerateCleanupCode(BB &bb) override; bool NeedCleanup() override; void GenerateCleanupCodeForExtEpilog(BB &bb) override; @@ -736,6 +742,19 @@ class AArch64CGFunc : public CGFunc { RegOperand &GetZeroOpnd(uint32 bitLen) override; + uint32 GetNumIntregToCalleeSave() const { + return numIntregToCalleeSave; + } + void SetNumIntregToCalleeSave(uint32 val) { + numIntregToCalleeSave = val; + } + bool GetStoreFP() const { + return storeFP; + } + void SetStoreFP(bool val) { + storeFP = val; + } + private: enum RelationOperator : uint8 { kAND, @@ -801,6 +820,7 @@ class AArch64CGFunc : public CGFunc { uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5 */ LmbcArgInfo *lmbcArgInfo = nullptr; MIRType *lmbcCallReturnType = nullptr; + bool storeFP = false; void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, AArch64isa::MemoryOrdering memOrd, bool isDirect); @@ -834,48 +854,41 @@ class AArch64CGFunc : public CGFunc { return (o.IsRegister() ? static_cast(o) : SelectCopy(o, sty, dty)); } - void SelectCopySmallAggToReg(uint32 symSize, RegOperand &parmOpnd, const MemOperand &memOpnd); - void CreateCallStructParamPassByStack(uint32 symSize, MIRSymbol *sym, RegOperand *addrOpnd, int32 baseOffset); - RegOperand *SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, - int32 offset, uint32 parmNum); - void CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, - FpParamState state, uint32 symSize); - void CreateCallStructParamMemcpy(const MIRSymbol &sym, uint32 structSize, int32 copyOffset, int32 fromOffset); - void CreateCallStructParamMemcpy(RegOperand &addrOpnd, uint32 structSize, int32 copyOffset, int32 fromOffset); - RegOperand *CreateCallStructParamCopyToStack(uint32 numMemOp, MIRSymbol *sym, RegOperand *addrOpd, - int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc); - RegOperand *LoadIreadAddrForSamllAgg(BaseNode &iread); - void SelectParmListDreadSmallAggregate(MIRSymbol &sym, MIRType &structType, - ListOperand &srcOpnds, - int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID); - void SelectParmListIreadSmallAggregate(BaseNode &iread, MIRType &structType, ListOperand &srcOpnds, - int32 offset, AArch64CallConvImpl &parmLocator); - void SelectParmListDreadLargeAggregate(MIRSymbol &sym, MIRType &structType, - ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); - void SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); - void CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, AArch64CallConvImpl &parmLocator, - ListOperand &srcOpnds); - void GenAggParmForDread(const BaseNode &parent, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, - int32 &structCopyOffset, size_t argNo); - void GenAggParmForIread(const BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); - void GenAggParmForIreadoff(BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); - void GenAggParmForIreadfpoff(BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); - void SelectParmListForAggregate(BaseNode &parent, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, - int32 &structCopyOffset, size_t argNo, PrimType ¶mPType); - size_t SelectParmListGetStructReturnSize(StmtNode &naryNode); bool MarkParmListCall(BaseNode &expr); - void GenLargeStructCopyForDread(BaseNode &argExpr, int32 &structCopyOffset); - void GenLargeStructCopyForIread(BaseNode &argExpr, int32 &structCopyOffset); - void GenLargeStructCopyForIreadfpoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); - void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); - void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); - void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); + + struct AggregateDesc { + MIRType *mirType = nullptr; + MIRSymbol *sym = nullptr; + uint32 offset = 0; + bool isRefField = false; + }; + + struct ParamDesc { + ParamDesc(MIRType *type, BaseNode *expr, MIRSymbol *symbol = nullptr, + uint32 ofst = 0, bool copyed = false) + : mirType(type), argExpr(expr), sym(symbol), offset(ofst), preCopyed(copyed) {} + MIRType *mirType = nullptr; + BaseNode *argExpr = nullptr; // expr node + MIRSymbol *sym = nullptr; // agg sym + uint32 offset = 0; // agg offset, for preCopyed struct, RSP-based offset + bool preCopyed = false; // for large struct, pre copyed to strack + bool isSpecialArg = false; // such as : tls + }; + Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); + std::pair GetCalleeFunction(StmtNode &naryNode) const; + Operand *GetSymbolAddressOpnd(const MIRSymbol &sym, int32 offset, bool useMem); + void SelectStructMemcpy(RegOperand &destOpnd, RegOperand &srcOpnd, uint32 structSize); + void SelectStructCopy(MemOperand &destOpnd, MemOperand &srcOpnd, uint32 structSize); + Operand *GetAddrOpndWithBaseNode(const BaseNode &argExpr, const MIRSymbol &sym, uint32 offset, + bool useMem = true); + void GetAggregateDescFromAggregateNode(BaseNode &argExpr, AggregateDesc &aggDesc); + void SelectParamPreCopy(const BaseNode &argExpr, AggregateDesc &aggDesc, uint32 mirSize, + int32 structCopyOffset, bool isArgUnused); + void SelectParmListPreprocessForAggregate(BaseNode &argExpr, int32 &structCopyOffset, + std::vector &argsDesc, bool isArgUnused); + bool SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::vector &argsDesc, + const MIRFunction *callee = nullptr); void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, std::vector &stackPostion); void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); @@ -890,10 +903,6 @@ class AArch64CGFunc : public CGFunc { Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); - int64 GetOrCreatSpillRegLocation(regno_t vrNum) { - AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); - return static_cast(GetBaseOffset(*symLoc)); - } void SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype); void SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize, Operand &src, PrimType stype); @@ -905,9 +914,15 @@ class AArch64CGFunc : public CGFunc { void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); void SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType); void SelectAtomicStore(Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + bool SelectTLSModelByAttr(Operand &result, StImmOperand &stImm, bool isShlib); + bool SelectTLSModelByOption(Operand &result, StImmOperand &stImm, bool isShlib); + bool SelectTLSModelByPreemptibility(Operand &result, StImmOperand &stImm, bool isShlib); void SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm); + void SelectThreadAnchor(Operand &result, StImmOperand &stImm); void SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm); void SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm); + void SelectCTlsGotDesc(Operand &result, StImmOperand &stImm); + void SelectCTlsLoad(Operand &result, StImmOperand &stImm); void SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode); void SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode); void SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, @@ -915,7 +930,7 @@ class AArch64CGFunc : public CGFunc { Operand *GetOpndWithOneParam(const IntrinsicopNode &intrnNode); Operand *GetOpndFromIntrnNode(const IntrinsicopNode &intrnNode); - bool IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, uint64 lhsSize); + bool IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, uint64 lhsSize) const; RegOperand &GetRegOpnd(bool isAfterRegAlloc, PrimType primType); Operand *SelectAArch64CAtomicFetch(const IntrinsicopNode &intrinopNode, SyncAndAtomicOp op, bool fetchBefore); Operand *SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, SyncAndAtomicOp op, bool fetchBefore); @@ -940,7 +955,7 @@ class AArch64CGFunc : public CGFunc { bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const; Insn &GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); Insn &GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); - Insn &GenerateGlobalNopltCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); + Insn &GenerateGlobalNopltCallAfterInsn(const MIRSymbol &funcSym, ListOperand &srcOpnds); bool IsDuplicateAsmList(const MIRSymbol &sym) const; RegOperand *CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, LabelIdx jumpLabIdx); @@ -961,6 +976,15 @@ class AArch64CGFunc : public CGFunc { LabelIdx GetLabelInInsn(Insn &insn) override { return static_cast(insn.GetOperand(AArch64isa::GetJumpTargetIdx(insn))).GetLabelIndex(); } + void CheckAndSetStackProtectInfoWithAddrof(const MIRSymbol &symbol) { + // 1. if me performs stack protection check, doesn't need to set stack protect + // 2. only addressing variables on the stack, need to set stack protect + // 3. retVar is generated internally by compiler, doesn't need to set stack protect + if (!GetFunction().IsMayWriteToAddrofStackChecked() && + symbol.GetStorageClass() == kScAuto && !symbol.IsReturnVar()) { + SetStackProtectInfo(kAddrofStack); + } + } }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h index 2a90a4d25e7914d9ee963dfe21b22bb0fd88baa5..5ea8c86a0e8f3924f69fcf69807668936a720488 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -22,9 +22,10 @@ namespace maplebe { class AArch64DataDepBase : public DataDepBase { public: - AArch64DataDepBase(MemPool &mp, CGFunc &func, MAD &mad) : DataDepBase(mp, func, mad) {} + AArch64DataDepBase(MemPool &mp, CGFunc &func, MAD &mad, bool isIntraAna) : DataDepBase(mp, func, mad, isIntraAna) {} ~AArch64DataDepBase() override = default; + void InitCDGNodeDataInfo(MemPool &mp, MapleAllocator &alloc, CDGNode &cdgNode) override; void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) override; void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) override; bool IsFrameReg(const RegOperand &opnd) const override; @@ -40,7 +41,8 @@ class AArch64DataDepBase : public DataDepBase { void BuildDepsDirtyHeap(Insn &insn) override; void BuildOpndDependency(Insn &insn) override; void BuildSpecialInsnDependency(Insn &insn, const MapleVector &nodes) override; - void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) override; + void BuildAsmInsnDependency(Insn &insn) override; + void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, DepNode &sepNode) override; DepNode *BuildSeparatorNode() override; void BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, MemOperand *nextMemOpnd, bool isMemDef) override; @@ -48,10 +50,10 @@ class AArch64DataDepBase : public DataDepBase { MemOperand &memOpnd, MemOperand *nextMemOpnd) override; void BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, MemOperand &memOpnd, MemOperand *nextMemOpnd) override; + void DumpNodeStyleInDot(std::ofstream &file, DepNode &depNode) override; void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); - bool NeedBuildDepsMem(const MemOperand &memOpnd, - const MemOperand *nextMemOpnd, const Insn &memInsn) const; + bool NeedBuildDepsMem(const MemOperand &memOpnd, const MemOperand *nextMemOpnd, const Insn &memInsn) const; protected: MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h index 2159f3132eadfd6e4674639192f3458818a7c1c3..520e2275a1d71310ad81733b1ae8dde7ebd864b0 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h @@ -23,7 +23,7 @@ using namespace maple; class AArch64AsmEmitter : public AsmEmitter { public: AArch64AsmEmitter(CG &cg, const std::string &asmFileName) : AsmEmitter(cg, asmFileName) {} - ~AArch64AsmEmitter() = default; + ~AArch64AsmEmitter() override = default; void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; @@ -57,14 +57,17 @@ class AArch64AsmEmitter : public AsmEmitter { void EmitStringIndexOf(Emitter &emitter, const Insn &insn) const; void EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const; void EmitCheckThrowPendingException(Emitter &emitter) const; - void EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const; - void EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const; + void EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const; // emit instrinsic for local-exec TLS model + void EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const; // emit instrinsic for general dynamic TLS mode + void EmitCTlsDescGot(Emitter &emitter, const Insn &insn) const; // emit instrinsic for initial-exec TLS model + void EmitCTlsLoadTdata(Emitter &emitter, const Insn &insn) const; // emit instrinsic for warmup-dynamic TLS model + void EmitCTlsLoadTbss(Emitter &emitter, const Insn &insn) const; // emit instrinsic for warmup-dynamic TLS model void EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const; void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const; bool CheckInsnRefField(const Insn &insn, uint32 opndIndex) const; - void EmitCallWithLocalAlias(Emitter &emitter, FuncNameOperand &func, const std::string &mdName) const; + void EmitCallWithLocalAlias(Emitter &emitter, const std::string &funcName, const std::string &mdName) const; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h index dea57f8a82c2d00aea5339e480f96a0cff7e86c9..3ff1af1512cff0437edf8cba4bde2819ee4c0303 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h @@ -474,7 +474,6 @@ class ContinuousLdrPattern : public OptimizePattern { static bool IsMopMatch(const Insn &insn); bool IsUsedBySameCall(Insn &insn1, Insn &insn2, Insn &insn3) const; static bool IsMemValid(const MemOperand &memopnd); - static bool IsImmValid(MOperator mop, const ImmOperand &imm); static int64 GetMemOffsetValue(const Insn &insn); std::vector insnList; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global_schedule.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..41cdf2e82cfc4cba108a5a4e346bc48cdbd64de3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global_schedule.h @@ -0,0 +1,38 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_GLOBAL_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_GLOBAL_SCHEDULE_H + +#include "global_schedule.h" + +namespace maplebe { +class AArch64GlobalSchedule : public GlobalSchedule { + public: + AArch64GlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &idda) + : GlobalSchedule(mp, f, cdAna, idda) {} + ~AArch64GlobalSchedule() override = default; + + /* Verify global scheduling */ + void VerifyingSchedule(CDGRegion ®ion) override; + + protected: + void InitInCDGNode(CDGRegion ®ion, CDGNode &cdgNode, MemPool *cdgNodeMp) override; + void FinishScheduling(CDGNode &cdgNode) override; + void DumpInsnInfoByScheduledOrder(BB &curBB) const override; +}; +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_GLOBAL_SCHEDULE_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h index 94de2fda6554084dc71df12e2c729cef756ec592..27e3cc41bbd65a6d9680564bac950d7cf1c80ca1 100755 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h @@ -35,11 +35,13 @@ class AArch64ICOPattern : public ICOPattern { protected: ConditionCode Encode(MOperator mOp, bool inverse) const; Insn *BuildCmpInsn(const Insn &condBr) const; - Insn *BuildCcmpInsn(ConditionCode ccCode, const Insn &cmpInsn) const; + Insn *BuildCcmpInsn(ConditionCode ccCode, ConditionCode ccCode2, const Insn &cmpInsn, Insn *&moveInsn) const; Insn *BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const; Insn *BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, RegOperand &src2) const; static uint32 GetNZCV(ConditionCode ccCode, bool inverse); bool CheckMop(MOperator mOperator) const; + bool CheckMopOfCmp(MOperator mOperator) const; + bool IsReverseMop(MOperator mOperator1, MOperator mOperator2) const; }; /* If-Then-Else pattern */ @@ -51,15 +53,22 @@ class AArch64ICOIfThenElsePattern : public AArch64ICOPattern { KUseOrDef }; + struct DestSrcMap { + DestSrcMap(const std::map> &ifMap, + const std::map> &elseMap) + : ifDestSrcMap(ifMap), elseDestSrcMap(elseMap) {} + const std::map> &ifDestSrcMap; + const std::map> &elseDestSrcMap; + }; + explicit AArch64ICOIfThenElsePattern(CGFunc &func) : AArch64ICOPattern(func) {} ~AArch64ICOIfThenElsePattern() override { cmpBB = nullptr; } bool Optimize(BB &curBB) override; protected: - bool BuildCondMovInsn(const BB &bb, const std::map> &ifDestSrcMap, - const std::map> &elseDestSrcMap, bool elseBBIsProcessed, - std::vector &generateInsn) const; + bool BuildCondMovInsn(const BB &bb, const DestSrcMap &destSrcTempMap, bool elseBBIsProcessed, + std::vector &generateInsn, const Insn *toBeRremoved2CmpBB) const; bool DoOpt(BB *ifBB, BB *elseBB, BB &joinBB); void GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg, std::vector &generateInsn) const; @@ -78,7 +87,7 @@ class AArch64ICOIfThenElsePattern : public AArch64ICOPattern { void UpdateTemps(std::vector &destRegs, std::vector &setInsn, std::map> &destSrcMap, const Insn &oldInsn, Insn *newInsn) const; Insn *MoveSetInsn2CmpBB(Insn &toBeRremoved2CmpBB, BB &currBB, - std::vector &anotherBranchDestRegs, std::map> &destSrcMap) const; + std::map> &destSrcMap) const; void RevertMoveInsns(BB *bb, Insn *prevInsnInBB, Insn *newInsnOfBB, Insn *insnInBBToBeRremovedOutOfCurrBB) const; bool IsExpansionMOperator(const Insn &insn) const; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_imm_valid.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_imm_valid.h new file mode 100644 index 0000000000000000000000000000000000000000..c9f2e7c296ea7af8d61188dd1358d6eedfc6b873 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_imm_valid.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_IMM_VALID_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_IMM_VALID_H + +#include "common_utils.h" +#include "types_def.h" + +namespace maplebe { +inline bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { + // mask1 is a 64bits number that is all 1 shifts left size bits + const uint64 mask1 = 0xffffffffffffffffUL << bitLen; + // mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 + uint64 mask2 = (1UL << static_cast(nLowerZeroBits)) - 1UL; + return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; +}; + +// This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file +// was IsMoveWidableImmediate +inline bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { + // When #imm is FFFFFFFF, ~val should return true, because ~val is 0, and it should be valid. + // But ~val and (~val & 0xffffffff) both will be 0, it will return false, so we judge this situation alone. + if (val == 0 || val == 0xffffffff) { + return true; + } + if (bitLen == k64BitSize) { + // 0xHHHH000000000000 or 0x0000HHHH00000000, return true + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + // get lower 32 bits + val &= static_cast(0xffffffff); + // If lower 32 bits are all 0, but higher 32 bits have 1, val will be 1 and return true, but it is false in fact. + if (val == 0) { + return false; + } + } + // 0x00000000HHHH0000 or 0x000000000000HHHH, return true + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & static_cast(0xffff)) == val); +} +namespace aarch64 { +bool IsBitmaskImmediate(uint64 val, uint32 bitLen); +} // namespace aarch64 + +using namespace aarch64; +inline bool IsSingleInstructionMovable32(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 32) || + IsMoveWidableImmediateCopy(~static_cast(value), 32) || + IsBitmaskImmediate(static_cast(value), 32)); +} + +inline bool IsSingleInstructionMovable64(int64 value) { + return (IsMoveWidableImmediateCopy(static_cast(value), 64) || + IsMoveWidableImmediateCopy(~static_cast(value), 64) || + IsBitmaskImmediate(static_cast(value), 64)); +} + +inline bool Imm12BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +// For the 32-bit variant: is the bitmask immediate +inline bool Imm12BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return false; + } + return IsBitmaskImmediate(static_cast(value), k32BitSize); +} + +inline bool Imm13BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); + return result; +} + +// For the 64-bit variant: is the bitmask immediate +inline bool Imm13BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return false; + } + return IsBitmaskImmediate(static_cast(value), k64BitSize); +} + +inline bool Imm16BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); + // for target linux-aarch64-gnu + // aarch64 assembly takes up to 24-bits immediate, generating + // either cmp or cmp with shift 12 encoding + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +// For the 32-bit variant: is the shift amount, in the range 0 to 31, opnd input is bitshiftopnd +inline bool BitShift5BitValid(uint32 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal5Bits, 0); + return result; +} + +// For the 64-bit variant: is the shift amount, in the range 0 to 63, opnd input is bitshiftopnd +inline bool BitShift6BitValid(uint32 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal6Bits, 0); + return result; +} + +// For the 32-bit variant: is the shift amount, in the range 0 to 31, opnd input is immopnd +inline bool BitShift5BitValidImm(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal5Bits, 0); + return result; +} + +// For the 64-bit variant: is the shift amount, in the range 0 to 63, opnd input is immopnd +inline bool BitShift6BitValidImm(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal6Bits, 0); + return result; +} + +// Is a 16-bit unsigned immediate, in the range 0 to 65535, used by BRK +inline bool Imm16BitValidImm(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); + return result; +} + +// Is the flag bit specifier, an immediate in the range 0 to 15, used by CCMP +inline bool Nzcv4BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), k4BitSize, 0); + return result; +} + +// For the 32-bit variant: is the bit number of the lsb of the source bitfield, in the range 0 to 31 +inline bool Lsb5BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal5Bits, 0); + return result; +} + +// For the 32-bit variant: is the width of the bitfield, in the range 1 to 32- +inline bool Width5BitValid(int64 value, int64 lsb) { + return (value >= 1) && (value <= 32 - lsb); +} + +// For the 32-bit variant: is the width of the bitfield, in the range 1 to 32, is used for only width verified +inline bool Width5BitOnlyValid(int64 value) { + return (value >= 1) && (value <= 32); +} + +// For the 64-bit variant: is the bit number of the lsb of the source bitfield, in the range 0 to 63 +inline bool Lsb6BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal6Bits, 0); + return result; +} + +// For the 64-bit variant: is the width of the bitfield, in the range 1 to 64- +inline bool Width6BitValid(int64 value, int64 lsb) { + return (value >= 1) && (value <= 64 - lsb); +} + +// For the 64-bit variant: is the width of the bitfield, in the range 1 to 64, is used for only width verified +inline bool Width6BitOnlyValid(int64 value) { + return (value >= 1) && (value <= 64); +} + +// Is the left shift amount to be applied after extension in the range 0 to 4, uint32 means value non-negative +inline bool ExtendShift0To4Valid(uint32 value) { + return (value <= k4BitSize); +} + +// Is the optional left shift to apply to the immediate, it can have the values: 0, 12 +inline bool LeftShift12Valid(uint32 value) { + return value == k0BitSize || value == k12BitSize; +} + +// For the 32-bit variant: is the amount by which to shift the immediate left, either 0 or 16 +inline bool ImmShift32Valid(uint32 value) { + return value == k0BitSize || value == k16BitSize; +} + +// For the 64-bit variant: is the amount by which to shift the immediate left, either 0, 16, 32 or 48 +inline bool ImmShift64Valid(uint32 value) { + return value == k0BitSize || value == k16BitSize || value == k32BitSize || value == k48BitSize; +} + +inline bool IsSIMMValid(int64 value) { + return (value <= kMaxSimm32) && (value >= kMinSimm32); +} + +inline bool IsPIMMValid(int64 value, uint wordSize) { + if ((value >= k0BitSize) && (value <= kMaxPimm[wordSize])) { + uint64 mask = (1U << wordSize) - 1U; + return (static_cast(value) & mask) > 0 ? false : true; + } + return false; +} + +// Used for backend str/ldr memopnd offset judgment +inline bool StrLdrInsnSignedOfstValid(int64 value, uint wordSize, bool IsIntactIndexed) { + return IsSIMMValid(value) || (IsPIMMValid(value, wordSize) && IsIntactIndexed); +} + +// 8bit : 0 +// halfword : 1 +// 32bit - word : 2 +// 64bit - word : 3 +// 128bit- word : 4 +inline bool StrLdrSignedOfstValid(int64 value, uint wordSize) { + return IsSIMMValid(value) || IsPIMMValid(value, wordSize); +} + +inline bool StrLdr8ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, 0); +} + +inline bool StrLdr16ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k1ByteSize); +} + +inline bool StrLdr32ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k2ByteSize); +} + +inline bool StrLdr32PairImmValid(int64 value) { + if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { + return (static_cast(value) & 3) > 0 ? false : true; + } + return false; +} + +inline bool StrLdr64ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k3ByteSize); +} + +inline bool StrLdr64PairImmValid(int64 value) { + if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { + return (static_cast(value) & 7) > 0 ? false : true; + } + return false; +} + +inline bool StrLdr128ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k4ByteSize); +} + +inline bool StrLdr128PairImmValid(int64 value) { + if (value < k1024BitSize && (value >= kNegative1024BitSize)) { + return (static_cast(value) & 0xf) > 0 ? false : true; + } + return false; +} +} // namespace maplebe + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_IMM_VALID_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h index fd05d56f47c9ebad1b27fab9c101bdc6ec061c15..769fd3e65b438db2be2507c43723c6ede4f939e8 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h @@ -23,8 +23,7 @@ namespace maplebe { class A64OpndEmitVisitor : public OpndEmitVisitor { public: A64OpndEmitVisitor(Emitter &emitter, const OpndDesc *operandProp) - : OpndEmitVisitor(emitter), - opndProp(operandProp) {} + : OpndEmitVisitor(emitter, operandProp) {} ~A64OpndEmitVisitor() override { opndProp = nullptr; } @@ -46,8 +45,6 @@ class A64OpndEmitVisitor : public OpndEmitVisitor { void EmitVectorOperand(const RegOperand &v); void EmitIntReg(const RegOperand &v, uint32 opndSz = kMaxSimm32); void Visit(const MIRSymbol &symbol, int64 offset); - - const OpndDesc *opndProp; }; class A64OpndDumpVisitor : public OpndDumpVisitor { diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h index 0fd1ad54ba65dc88638222d05e43d565ff5a11a6..c201b263220605358034a9e5fe895da851f1a1a2 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h @@ -23,7 +23,6 @@ namespace maplebe { enum AArch64MOP_t : maple::uint32 { #include "abstract_mmir.def" #include "aarch64_md.def" -#include "aarch64_mem_md.def" kMopLast }; #undef DEFINE_MOP @@ -38,7 +37,7 @@ constexpr int32 kAarch64StackPtrAlignmentInt = 16; constexpr int32 kOffsetAlign = 8; constexpr uint32 kIntregBytelen = 8; /* 64-bit */ constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ -constexpr int kSizeOfFplr = 16; +constexpr uint32 kSizeOfFplr = 16; enum StpLdpImmBound : int { kStpLdpImm64LowerBound = -512, @@ -125,7 +124,7 @@ inline bool IsFPSIMDRegister(AArch64reg r) { return V0 <= r && r <= V31; } - inline bool IsPhysicalRegister(regno_t r) { +inline bool IsPhysicalRegister(regno_t r) { return r < kMaxRegNum; } @@ -167,6 +166,14 @@ bool IsSub(const Insn &insn); MOperator GetMopSub2Subs(const Insn &insn); MOperator FlipConditionOp(MOperator flippedOp); + +// Function: for immediate verification, memopnd ofstvalue is returned from opnd input. +// It's worth noting that 0 will be returned when kBOR memopnd is input. +int64 GetMemOpndOffsetValue(Operand *o); + +int32 GetTail0BitNum(int64 val); + +int32 GetHead0BitNum(int64 val); } /* namespace AArch64isa */ /* diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_local_schedule.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_local_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..155a6c2e28d7d73436327824760b96ed8b22bb86 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_local_schedule.h @@ -0,0 +1,32 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_LOCAL_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_LOCAL_SCHEDULE_H + +#include "local_schedule.h" + +namespace maplebe { +class AArch64LocalSchedule : public LocalSchedule { + public: + AArch64LocalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &idda) + : LocalSchedule(mp, f, cdAna, idda) {} + ~AArch64LocalSchedule() = default; + + void FinishScheduling(CDGNode &cdgNode) override; + void DumpInsnInfoByScheduledOrder(BB &curBB) const override; +}; +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_LOCAL_SCHEDULE_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def index 95e0673b4b0d56a0527d8458c37fde83e2bd41fd..a07b1a1cf6752efe292a9afe1afb907432e5e5de 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -13,481 +13,481 @@ * See the Mulan PSL v1 for more details. */ /* InsnDesc format: - * {mop, opndMD, properties, latency, name, format, atomicNum, validFunc(nullptr)} + * {mop, opndMD, properties, latency, name, format, atomicNum, validFunc(nullptr), splitFunc(nullptr)} */ /* AARCH64 MOVES */ /* MOP_xmovrr */ -DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISMOVE, kLtAlu, "mov", "0,1", 1) /* MOP_wmovrr */ -DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISMOVE, kLtAlu, "mov", "0,1", 1) /* MOP_wmovri32 */ -DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1, - IsSingleInstructionMovable32) +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID, &OpndDesc::Imm32}, ISMOVE, kLtAlu, "mov", "0,1", 1, + MOP_wmovri32Valid, MOP_wmovri32Split) /* MOP_xmovri64 */ -DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1, - IsSingleInstructionMovable64) +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID, &OpndDesc::Imm64}, ISMOVE, kLtAlu, "mov", "0,1", 1, + MOP_xmovri64Valid, MOP_xmovri64Split) /* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ -DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) +DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISMOVE, kLtAlu, "mov", "0,1", 1) /* MOP_xvmovsr */ -DEFINE_MOP(MOP_xvmovsr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISMOVE,kLtR2f,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovsr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32IS}, ISMOVE, kLtR2f, "fmov", "0,1", 1) /* MOP_xvmovdr */ -DEFINE_MOP(MOP_xvmovdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISMOVE,kLtR2f,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovdr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISMOVE, kLtR2f, "fmov", "0,1", 1) /* MOP_xvmovrs */ -DEFINE_MOP(MOP_xvmovrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISMOVE, kLtF2r, "fmov", "0,1", 1) /* MOP_xvmovrd */ -DEFINE_MOP(MOP_xvmovrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISMOVE,kLtF2r,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovrd, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISMOVE, kLtF2r, "fmov", "0,1", 1) /* MOP_xvmovs */ -DEFINE_MOP(MOP_xvmovs, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISMOVE,kLtFpalu,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovs, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, ISMOVE, kLtFpalu, "fmov", "0,1", 1) /* MOP_xvmovd */ -DEFINE_MOP(MOP_xvmovd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISMOVE,kLtFpalu,"fmov","0,1",1) +DEFINE_MOP(MOP_xvmovd, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISMOVE, kLtFpalu, "fmov", "0,1", 1) /* Vector SIMD mov */ /* MOP_xmovrv */ -DEFINE_MOP(MOP_xvmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"mov","0,1",1) +DEFINE_MOP(MOP_xvmovrv, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISMOVE, kLtF2r, "mov", "0,1", 1) /* MOP_xadrp */ -DEFINE_MOP(MOP_xadrp, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISLOADADDR,kLtShift,"adrp","0,1",1) +DEFINE_MOP(MOP_xadrp, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISLOADADDR, kLtShift, "adrp", "0,1", 1) /* MOP_xadr */ -DEFINE_MOP(MOP_xadri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISLOADADDR,kLtShift,"adr","0,1",1) +DEFINE_MOP(MOP_xadri64, {&OpndDesc::Reg64ID, &OpndDesc::Imm64}, ISLOADADDR, kLtShift, "adr", "0,1", 1) /* MOP_xadrpl12 */ -DEFINE_MOP(MOP_xadrpl12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Literal12Src},ISLOADADDR,kLtAlu,"add","0,1,2",1) +DEFINE_MOP(MOP_xadrpl12, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Literal12Src}, ISLOADADDR, kLtAlu, "add", "0,1,2", 1) /* AARCH64 Arithmetic: add */ /* MOP_xaddrrr */ -DEFINE_MOP(MOP_xaddrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"add","0,1,2",1) +DEFINE_MOP(MOP_xaddrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "add", "0,1,2", 1) /* MOP_xaddsrrr */ -DEFINE_MOP(MOP_xaddsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"adds","1,2,3",1) +DEFINE_MOP(MOP_xaddsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "adds", "1,2,3", 1) /* MOP_xaddrrrs */ -DEFINE_MOP(MOP_xaddrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"add","0,1,2,3",1) +DEFINE_MOP(MOP_xaddrrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAluShift, "add", "0,1,2,3", 1, MOP_xaddrrrsValid) /* MOP_xaddsrrrs */ -DEFINE_MOP(MOP_xaddsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"adds","1,2,3,4",1) +DEFINE_MOP(MOP_xaddsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAluShift, "adds", "1,2,3,4", 1, MOP_xaddsrrrsValid) /* MOP_xxwaddrrre */ -DEFINE_MOP(MOP_xxwaddrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1) +DEFINE_MOP(MOP_xxwaddrrre, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAluShift, "add", "0,1,2,3", 1, MOP_xxwaddrrreValid) /* MOP_xaddrri24 */ -DEFINE_MOP(MOP_xaddrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtShift,"add","0,1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_xaddrri24, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtShift, "add", "0,1,2,3", 1, MOP_xaddrri24Valid, MOP_xaddrri24Split) /* MOP_xaddrri12 */ -DEFINE_MOP(MOP_xaddrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_xaddrri12, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12}, 0, kLtAlu, "add", "0,1,2", 1, MOP_xaddrri12Valid, MOP_xaddrri12Split) /* MOP_xaddsrri12 */ -DEFINE_MOP(MOP_xaddsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"adds","1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_xaddsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12}, 0, kLtAlu, "adds", "1,2,3", 1, MOP_xaddsrri12Valid, MOP_xaddsrri12Split) /* MOP_waddrrr */ -DEFINE_MOP(MOP_waddrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"add","0,1,2",1) +DEFINE_MOP(MOP_waddrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "add", "0,1,2", 1) /* MOP_waddsrrr */ -DEFINE_MOP(MOP_waddsrrr, {&OpndDesc::CCD, &OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"adds","1,2,3",1) +DEFINE_MOP(MOP_waddsrrr, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "adds", "1,2,3", 1) /* MOP_waddrrrs */ -DEFINE_MOP(MOP_waddrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"add","0,1,2,3",1) +DEFINE_MOP(MOP_waddrrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAluShift, "add", "0,1,2,3", 1, MOP_waddrrrsValid) /* MOP_waddsrrrs */ -DEFINE_MOP(MOP_waddsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"adds","1,2,3,4",1) +DEFINE_MOP(MOP_waddsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAluShift, "adds", "1,2,3,4", 1, MOP_waddsrrrsValid) /* MOP_xxwaddrrre */ -DEFINE_MOP(MOP_wwwaddrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1) +DEFINE_MOP(MOP_wwwaddrrre, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAluShift, "add", "0,1,2,3", 1, MOP_wwwaddrrreValid) /* MOP_waddrri24 */ -DEFINE_MOP(MOP_waddrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"add","0,1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_waddrri24, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtAluShift, "add", "0,1,2,3", 1, MOP_waddrri24Valid, MOP_waddrri24Split) /* MOP_waddrri12 */ -DEFINE_MOP(MOP_waddrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_waddrri12, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "add", "0,1,2", 1, MOP_waddrri12Valid, MOP_waddrri12Split) /* MOP_waddsrri12 */ -DEFINE_MOP(MOP_waddsrri12, {&OpndDesc::CCD, &OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"adds","1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_waddsrri12, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "adds", "1,2,3", 1, MOP_waddsrri12Valid, MOP_waddsrri12Split) /* MOP_dadd */ -DEFINE_MOP(MOP_dadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fadd","0,1,2",1) +DEFINE_MOP(MOP_dadd, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fadd", "0,1,2", 1) /* MOP_sadd */ -DEFINE_MOP(MOP_sadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fadd","0,1,2",1) +DEFINE_MOP(MOP_sadd, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fadd", "0,1,2", 1) /* AARCH64 Arithmetic: sub/subs */ /* MOP newly add to the following group should be related pairs with such order :{ sub, subs } */ /* MOP_xsubrrr */ -DEFINE_MOP(MOP_xsubrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"sub","0,1,2",1) +DEFINE_MOP(MOP_xsubrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "sub", "0,1,2", 1) /* MOP_xsubsrrr */ -DEFINE_MOP(MOP_xsubsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"subs","1,2,3",1) +DEFINE_MOP(MOP_xsubsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "subs", "1,2,3", 1) /* MOP_xsubrrrs */ -DEFINE_MOP(MOP_xsubrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"sub","0,1,2,3",1) +DEFINE_MOP(MOP_xsubrrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_xsubrrrsValid) /* MOP_xsubsrrrs */ -DEFINE_MOP(MOP_xsubsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"subs","1,2,3,4",1) +DEFINE_MOP(MOP_xsubsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAluShift, "subs", "1,2,3,4", 1, MOP_xsubsrrrsValid) /* MOP_xsubrri24 */ -DEFINE_MOP(MOP_xsubrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_xsubrri24, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_xsubrri24Valid, MOP_xsubrri24Split) /* MOP_xsubsrri24 */ -DEFINE_MOP(MOP_xsubsrri24, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid) +DEFINE_MOP(MOP_xsubsrri24, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtAluShift, "subs", "1,2,3,4", 1, MOP_xsubsrri24Valid) /* MOP_xsubrri12 */ -DEFINE_MOP(MOP_xsubrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_xsubrri12, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12}, 0, kLtAlu, "sub", "0,1,2", 1, MOP_xsubrri12Valid, MOP_xsubrri12Split) /* MOP_xsubsrri12 */ -DEFINE_MOP(MOP_xsubsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_xsubsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm12}, 0, kLtAlu, "subs", "1,2,3", 1, MOP_xsubsrri12Valid, MOP_xsubsrri12Split) /* MOP_wsubrrr */ -DEFINE_MOP(MOP_wsubrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"sub","0,1,2",1) +DEFINE_MOP(MOP_wsubrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "sub", "0,1,2", 1) /* MOP_wsubsrrr */ -DEFINE_MOP(MOP_wsubsrrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"subs","1,2,3",1) +DEFINE_MOP(MOP_wsubsrrr, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "subs", "1,2,3", 1) /* MOP_wsubrrrs */ -DEFINE_MOP(MOP_wsubrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"sub","0,1,2,3",1) +DEFINE_MOP(MOP_wsubrrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_wsubrrrsValid) /* MOP_wsubsrrrs */ -DEFINE_MOP(MOP_wsubsrrrs, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"subs","1,2,3,4",1) +DEFINE_MOP(MOP_wsubsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAluShift, "subs", "1,2,3,4", 1, MOP_wsubsrrrsValid) /* MOP_wsubrri24 */ -DEFINE_MOP(MOP_wsubrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_wsubrri24, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_wsubrri24Valid, MOP_wsubrri24Split) /* MOP_wsubsrri24 */ -DEFINE_MOP(MOP_wsubsrri24, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid) +DEFINE_MOP(MOP_wsubsrri24, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12, &OpndDesc::Lsl12}, 0, kLtAluShift, "subs", "1,2,3,4", 1, MOP_wsubsrri24Valid) /* MOP_wsubrri12 */ -DEFINE_MOP(MOP_wsubrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_wsubrri12, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "sub", "0,1,2", 1, MOP_wsubrri12Valid, MOP_wsubrri12Split) /* MOP_wsubsrri12 */ -DEFINE_MOP(MOP_wsubsrri12, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid) +DEFINE_MOP(MOP_wsubsrri12, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "subs", "1,2,3", 1, MOP_wsubsrri12Valid, MOP_wsubsrri12Split) /* AARCH64 Arithmetic: sub */ /* MOP_xxwsubrrre */ -DEFINE_MOP(MOP_xxwsubrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1) +DEFINE_MOP(MOP_xxwsubrrre, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_xxwsubrrreValid) /* MOP_wwwsubrrre */ -DEFINE_MOP(MOP_wwwsubrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1) +DEFINE_MOP(MOP_wwwsubrrre, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAluShift, "sub", "0,1,2,3", 1, MOP_wwwsubrrreValid) /* MOP_dsub */ -DEFINE_MOP(MOP_dsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fsub","0,1,2",1) +DEFINE_MOP(MOP_dsub, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fsub", "0,1,2", 1) /* MOP_ssub */ -DEFINE_MOP(MOP_ssub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fsub","0,1,2",1) +DEFINE_MOP(MOP_ssub, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fsub", "0,1,2", 1) /* AARCH64 Arithmetic: multiply */ /* MOP_Tbxmulrrr */ -DEFINE_MOP(MOP_xmulrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mul","0,1,2",1) +DEFINE_MOP(MOP_xmulrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtMul, "mul", "0,1,2", 1) /* MOP_wmulrrr */ -DEFINE_MOP(MOP_wmulrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mul","0,1,2",1) +DEFINE_MOP(MOP_wmulrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtMul, "mul", "0,1,2", 1) /* MOP_Tbxvmuls */ -DEFINE_MOP(MOP_xvmuls, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpmul,"fmul","0,1,2",1) +DEFINE_MOP(MOP_xvmuls, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpmul, "fmul", "0,1,2", 1) /* MOP_Tbxvmuld */ -DEFINE_MOP(MOP_xvmuld, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpmul,"fmul","0,1,2",1) +DEFINE_MOP(MOP_xvmuld, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpmul, "fmul", "0,1,2", 1) /*MOP_xsmullrrr */ -DEFINE_MOP(MOP_xsmullrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"smull","0,1,2",1) +DEFINE_MOP(MOP_xsmullrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtMul, "smull", "0,1,2", 1) /* AARCH64 Arithmetic: multiply first then add */ /* MOP_xmaddrrrr */ -DEFINE_MOP(MOP_xmaddrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"madd","0,1,2,3",1) +DEFINE_MOP(MOP_xmaddrrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtMul, "madd", "0,1,2,3", 1) /* MOP_wmaddrrrr */ -DEFINE_MOP(MOP_wmaddrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"madd","0,1,2,3",1) +DEFINE_MOP(MOP_wmaddrrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtMul, "madd", "0,1,2,3", 1) /* AARCH64 leading zeros, reverse bits (for trailing zeros) */ /* MOP_wclz */ -DEFINE_MOP(MOP_wclz, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"clz","0,1", 1) +DEFINE_MOP(MOP_wclz, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "clz", "0,1", 1) /* MOP_xclz */ -DEFINE_MOP(MOP_xclz, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"clz","0,1", 1) +DEFINE_MOP(MOP_xclz, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "clz", "0,1", 1) /* MOP_wrbit */ -DEFINE_MOP(MOP_wrbit, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rbit","0,1", 1) +DEFINE_MOP(MOP_wrbit, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "rbit", "0,1", 1) /* MOP_xrbit */ -DEFINE_MOP(MOP_xrbit, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rbit","0,1", 1) +DEFINE_MOP(MOP_xrbit, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "rbit", "0,1", 1) /* AARCH64 Conversions */ /* MOP_xsxtb32 */ -DEFINE_MOP(MOP_xsxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1) +DEFINE_MOP(MOP_xsxtb32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "sxtb", "0,1", 1) /* MOP_xsxtb64 */ -DEFINE_MOP(MOP_xsxtb64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1) +DEFINE_MOP(MOP_xsxtb64, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "sxtb", "0,1", 1) /* MOP_xsxth32 */ -DEFINE_MOP(MOP_xsxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1) +DEFINE_MOP(MOP_xsxth32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "sxth", "0,1", 1) /* MOP_xsxth64 */ -DEFINE_MOP(MOP_xsxth64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1) +DEFINE_MOP(MOP_xsxth64, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "sxth", "0,1", 1) /* MOP_xsxtw64 */ -DEFINE_MOP(MOP_xsxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtw","0,1",1) +DEFINE_MOP(MOP_xsxtw64, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "sxtw", "0,1", 1) /* MOP_xuxtb32 */ -DEFINE_MOP(MOP_xuxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtb","0,1",1) +DEFINE_MOP(MOP_xuxtb32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "uxtb", "0,1", 1) /* MOP_xuxth32 */ -DEFINE_MOP(MOP_xuxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxth","0,1",1) -/* MOP_xuxtw64 Same as mov w0,w0 */ -DEFINE_MOP(MOP_xuxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtw","0,1",1) +DEFINE_MOP(MOP_xuxth32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "uxth", "0,1", 1) +/* MOP_xuxtw64 Same as mov w0, w0 */ +DEFINE_MOP(MOP_xuxtw64, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISCONVERSION, kLtAluShift, "uxtw", "0,1", 1) /* MOP_xvcvtfd */ -DEFINE_MOP(MOP_xvcvtfd, {&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1) +DEFINE_MOP(MOP_xvcvtfd, {&OpndDesc::Reg32FD, &OpndDesc::Reg64FS}, ISCONVERSION, kLtFpalu, "fcvt", "0,1", 1) /* MOP_xvcvtdf */ -DEFINE_MOP(MOP_xvcvtdf, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1) - -/* MOP_vcvtrf fcvtzs w,s */ -DEFINE_MOP(MOP_vcvtrf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) -/* MOP_xvcvtrf fcvtzs x,s */ -DEFINE_MOP(MOP_xvcvtrf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) -/* MOP_vcvturf fcvtzu w,s */ -DEFINE_MOP(MOP_vcvturf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) -/* MOP_xvcvturf fcvtzu x,s */ -DEFINE_MOP(MOP_xvcvturf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) - -/* MOP_vcvtas fcvtas w,s (for round) */ -DEFINE_MOP(MOP_vcvtas, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1) -/* MOP_xvcvtas fcvtas x,s */ -DEFINE_MOP(MOP_xvcvtas, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1) -/* MOP_vcvtms fcvtms w,s (for floor) */ -DEFINE_MOP(MOP_vcvtms, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1) -/* MOP_xvcvtms fcvtms x,s */ -DEFINE_MOP(MOP_xvcvtms, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1) -/* MOP_vcvtps fcvtps w,s (for ceil) */ -DEFINE_MOP(MOP_vcvtps, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1) -/* MOP_xvcvtps fcvtps x,d */ -DEFINE_MOP(MOP_xvcvtps, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1) - -/* MOP_vcvtrd fcvtzs w,d */ -DEFINE_MOP(MOP_vcvtrd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) -/* MOP_xvcvtrd fcvtzs x,d */ -DEFINE_MOP(MOP_xvcvtrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) -/* MOP_vcvturd fcvtzu w,d */ -DEFINE_MOP(MOP_vcvturd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) -/* MOP_xvcvturd fcvtzu x,d */ -DEFINE_MOP(MOP_xvcvturd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) - -/* MOP_vcvtfr scvtf s,w */ -DEFINE_MOP(MOP_vcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) -/* MOP_xvcvtfr scvtf s,x */ -DEFINE_MOP(MOP_xvcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) -/* MOP_vcvtufr ucvtf s,w */ -DEFINE_MOP(MOP_vcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) -/* MOP_xvcvtufr ucvtf s,x */ -DEFINE_MOP(MOP_xvcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) - -/* MOP_vcvtdr scvtf d,w */ -DEFINE_MOP(MOP_vcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) -/* MOP_xvcvtdr scvtf d,x */ -DEFINE_MOP(MOP_xvcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) -/* MOP_vcvtudr ucvtf d,w */ -DEFINE_MOP(MOP_vcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) -/* MOP_xvcvtudr ucvtf d,x */ -DEFINE_MOP(MOP_xvcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) +DEFINE_MOP(MOP_xvcvtdf, {&OpndDesc::Reg64FD, &OpndDesc::Reg32FS}, ISCONVERSION, kLtFpalu, "fcvt", "0,1", 1) + +/* MOP_vcvtrf fcvtzs w, s */ +DEFINE_MOP(MOP_vcvtrf, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtzs", "0,1", 1) +/* MOP_xvcvtrf fcvtzs x, s */ +DEFINE_MOP(MOP_xvcvtrf, {&OpndDesc::Reg64ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtzs", "0,1", 1) +/* MOP_vcvturf fcvtzu w, s */ +DEFINE_MOP(MOP_vcvturf, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtzu", "0,1", 1) +/* MOP_xvcvturf fcvtzu x, s */ +DEFINE_MOP(MOP_xvcvturf, {&OpndDesc::Reg64ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtzu", "0,1", 1) + +/* MOP_vcvtas fcvtas w, s (for round) */ +DEFINE_MOP(MOP_vcvtas, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtas", "0,1", 1) +/* MOP_xvcvtas fcvtas x, s */ +DEFINE_MOP(MOP_xvcvtas, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtas", "0,1", 1) +/* MOP_vcvtms fcvtms w, s (for floor) */ +DEFINE_MOP(MOP_vcvtms, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtms", "0,1", 1) +/* MOP_xvcvtms fcvtms x, s */ +DEFINE_MOP(MOP_xvcvtms, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtms", "0,1", 1) +/* MOP_vcvtps fcvtps w, s (for ceil) */ +DEFINE_MOP(MOP_vcvtps, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISCONVERSION, kLtF2rCvt, "fcvtps", "0,1", 1) +/* MOP_xvcvtps fcvtps x, d */ +DEFINE_MOP(MOP_xvcvtps, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtps", "0,1", 1) + +/* MOP_vcvtrd fcvtzs w, d */ +DEFINE_MOP(MOP_vcvtrd, {&OpndDesc::Reg32ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtzs", "0,1", 1) +/* MOP_xvcvtrd fcvtzs x, d */ +DEFINE_MOP(MOP_xvcvtrd, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtzs", "0,1", 1) +/* MOP_vcvturd fcvtzu w, d */ +DEFINE_MOP(MOP_vcvturd, {&OpndDesc::Reg32ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtzu", "0,1", 1) +/* MOP_xvcvturd fcvtzu x, d */ +DEFINE_MOP(MOP_xvcvturd, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISCONVERSION, kLtF2rCvt, "fcvtzu", "0,1", 1) + +/* MOP_vcvtfr scvtf s, w */ +DEFINE_MOP(MOP_vcvtfr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32IS}, ISCONVERSION, kLtR2fCvt, "scvtf", "0,1", 1) +/* MOP_xvcvtfr scvtf s, x */ +DEFINE_MOP(MOP_xvcvtfr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISCONVERSION, kLtR2fCvt, "scvtf", "0,1", 1) +/* MOP_vcvtufr ucvtf s, w */ +DEFINE_MOP(MOP_vcvtufr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32IS}, ISCONVERSION, kLtR2fCvt, "ucvtf", "0,1", 1) +/* MOP_xvcvtufr ucvtf s, x */ +DEFINE_MOP(MOP_xvcvtufr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISCONVERSION, kLtR2fCvt, "ucvtf", "0,1", 1) + +/* MOP_vcvtdr scvtf d, w */ +DEFINE_MOP(MOP_vcvtdr, {&OpndDesc::Reg64FD, &OpndDesc::Reg32IS}, ISCONVERSION, kLtR2fCvt, "scvtf", "0,1", 1) +/* MOP_xvcvtdr scvtf d, x */ +DEFINE_MOP(MOP_xvcvtdr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISCONVERSION, kLtR2fCvt, "scvtf", "0,1", 1) +/* MOP_vcvtudr ucvtf d, w */ +DEFINE_MOP(MOP_vcvtudr, {&OpndDesc::Reg64FD, &OpndDesc::Reg32IS}, ISCONVERSION, kLtR2fCvt, "ucvtf", "0,1", 1) +/* MOP_xvcvtudr ucvtf d, x */ +DEFINE_MOP(MOP_xvcvtudr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISCONVERSION, kLtR2fCvt, "ucvtf", "0,1", 1) /* MOP_xcsel */ -DEFINE_MOP(MOP_wcselrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1) -DEFINE_MOP(MOP_xcselrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1) +DEFINE_MOP(MOP_wcselrrrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csel", "0,1,2,3", 1) +DEFINE_MOP(MOP_xcselrrrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csel", "0,1,2,3", 1) /* MOP_xcset -- all conditions minus AL & NV */ -DEFINE_MOP(MOP_wcsetrc, {&OpndDesc::Reg32ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1) -DEFINE_MOP(MOP_xcsetrc, {&OpndDesc::Reg64ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1) +DEFINE_MOP(MOP_wcsetrc, {&OpndDesc::Reg32ID, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cset", "0,1", 1) +DEFINE_MOP(MOP_xcsetrc, {&OpndDesc::Reg64ID, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cset", "0,1", 1) /* MOP_xcinc */ -DEFINE_MOP(MOP_wcincrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cinc","0,1,2",1) -DEFINE_MOP(MOP_xcincrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cinc","0,1,2",1) +DEFINE_MOP(MOP_wcincrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cinc", "0,1,2", 1) +DEFINE_MOP(MOP_xcincrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cinc", "0,1,2", 1) /* MOP_xcsinc */ -DEFINE_MOP(MOP_wcsincrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1) -DEFINE_MOP(MOP_xcsincrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1) +DEFINE_MOP(MOP_wcsincrrrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csinc", "0,1,2,3", 1) +DEFINE_MOP(MOP_xcsincrrrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csinc", "0,1,2,3", 1) /* MOP_xcsinv */ -DEFINE_MOP(MOP_wcsinvrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1) -DEFINE_MOP(MOP_xcsinvrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1) +DEFINE_MOP(MOP_wcsinvrrrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csinv", "0,1,2,3", 1) +DEFINE_MOP(MOP_xcsinvrrrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csinv", "0,1,2,3", 1) /* MOP_xandrrr */ -DEFINE_MOP(MOP_xandrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"and","0,1,2",1) +DEFINE_MOP(MOP_xandrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "and", "0,1,2", 1) /* MOP_xandrrrs */ -DEFINE_MOP(MOP_xandrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"and","0,1,2,3",1) +DEFINE_MOP(MOP_xandrrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAluShift, "and", "0,1,2,3", 1, MOP_xandrrrsValid) /* MOP_xandrri13 */ -DEFINE_MOP(MOP_xandrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"and","0,1,2",1,Imm13BitMaskValid) +DEFINE_MOP(MOP_xandrri13, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm13}, 0, kLtAlu, "and", "0,1,2", 1, MOP_xandrri13Valid) /* MOP_wandrrr */ -DEFINE_MOP(MOP_wandrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"and","0,1,2",1) +DEFINE_MOP(MOP_wandrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "and", "0,1,2", 1) /* MOP_wandrrrs */ -DEFINE_MOP(MOP_wandrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"and","0,1,2,3",1) +DEFINE_MOP(MOP_wandrrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAluShift, "and", "0,1,2,3", 1, MOP_wandrrrsValid) /* MOP_wandrri12 */ -DEFINE_MOP(MOP_wandrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"and","0,1,2",1,Imm12BitMaskValid) +DEFINE_MOP(MOP_wandrri12, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "and", "0,1,2", 1, MOP_wandrri12Valid) /* MOP_xbicrrr */ -DEFINE_MOP(MOP_xbicrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"bic","0,1,2",1) +DEFINE_MOP(MOP_xbicrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "bic", "0,1,2", 1) /* MOP_wbicrrr */ -DEFINE_MOP(MOP_wbicrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"bic","0,1,2",1) +DEFINE_MOP(MOP_wbicrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "bic", "0,1,2", 1) /* MOP_xiorrrr */ -DEFINE_MOP(MOP_xiorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"orr","0,1,2",1) +DEFINE_MOP(MOP_xiorrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "orr", "0,1,2", 1) /* MOP_xiorrrrs */ -DEFINE_MOP(MOP_xiorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"orr","0,1,2,3",1) +DEFINE_MOP(MOP_xiorrrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAlu, "orr", "0,1,2,3", 1, MOP_xiorrrrsValid) /* MOP_xiorrri13 */ -DEFINE_MOP(MOP_xiorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"orr","0,1,2",1,Imm13BitMaskValid) +DEFINE_MOP(MOP_xiorrri13, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm13}, 0, kLtAlu, "orr", "0,1,2", 1, MOP_xiorrri13Valid) /* MOP_wiorrrr */ -DEFINE_MOP(MOP_wiorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"orr","0,1,2",1) +DEFINE_MOP(MOP_wiorrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "orr", "0,1,2", 1) /* MOP_wiorrrrs */ -DEFINE_MOP(MOP_wiorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"orr","0,1,2,3",1) +DEFINE_MOP(MOP_wiorrrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAlu, "orr", "0,1,2,3", 1, MOP_wiorrrrsValid) /* MOP_wiorrri12 */ -DEFINE_MOP(MOP_wiorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"orr","0,1,2",1,Imm12BitMaskValid) +DEFINE_MOP(MOP_wiorrri12, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "orr", "0,1,2", 1, MOP_wiorrri12Valid) /* MOP_xeorrrr */ -DEFINE_MOP(MOP_xeorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"eor","0,1,2",1) +DEFINE_MOP(MOP_xeorrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "eor", "0,1,2", 1) /* MOP_xeorrrrs */ -DEFINE_MOP(MOP_xeorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"eor","0,1,2,3",1) +DEFINE_MOP(MOP_xeorrrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAlu, "eor", "0,1,2,3", 1, MOP_xeorrrrsValid) /* MOP_xeorrri13 */ -DEFINE_MOP(MOP_xeorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"eor","0,1,2",1,Imm13BitMaskValid) +DEFINE_MOP(MOP_xeorrri13, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm13}, 0, kLtAlu, "eor", "0,1,2", 1, MOP_xeorrri13Valid) /* MOP_weorrrr */ -DEFINE_MOP(MOP_weorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"eor","0,1,2",1) +DEFINE_MOP(MOP_weorrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "eor", "0,1,2", 1) /* MOP_weorrrrs */ -DEFINE_MOP(MOP_weorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"eor","0,1,2,3",1) +DEFINE_MOP(MOP_weorrrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAlu, "eor", "0,1,2,3", 1, MOP_weorrrrsValid) /* MOP_weorrri12 */ -DEFINE_MOP(MOP_weorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"eor","0,1,2",1,Imm12BitMaskValid) +DEFINE_MOP(MOP_weorrri12, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "eor", "0,1,2", 1, MOP_weorrri12Valid) /* MOP_xnotrr */ -DEFINE_MOP(MOP_xnotrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"mvn","0,1",1) +DEFINE_MOP(MOP_xnotrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "mvn", "0,1", 1) /* MOP_wnotrr */ -DEFINE_MOP(MOP_wnotrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"mvn","0,1",1) +DEFINE_MOP(MOP_wnotrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "mvn", "0,1", 1) /* MOP_vnotui */ -DEFINE_MOP(MOP_vnotui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) +DEFINE_MOP(MOP_vnotui, {&OpndDesc::Reg64VD, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "mvni", "0,1", 1) /* MOP_vnotvi */ -DEFINE_MOP(MOP_vnotvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) +DEFINE_MOP(MOP_vnotvi, {&OpndDesc::Reg128VD, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "mvni", "0,1", 1) /* MOP_xrevrr */ -DEFINE_MOP(MOP_xrevrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rev","0,1",1) +DEFINE_MOP(MOP_xrevrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "rev", "0,1", 1) /* MOP_wrevrr */ -DEFINE_MOP(MOP_wrevrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev","0,1",1) +DEFINE_MOP(MOP_wrevrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "rev", "0,1", 1) /* MOP_xrevrr */ -DEFINE_MOP(MOP_wrevrr16, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev16","0,1",1) +DEFINE_MOP(MOP_wrevrr16, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "rev16", "0,1", 1) /* MOP_wfmaxrrr */ -DEFINE_MOP(MOP_wfmaxrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmax","0,1,2",1) +DEFINE_MOP(MOP_wfmaxrrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fmax", "0,1,2", 1) /* MOP_xfmaxrrr */ -DEFINE_MOP(MOP_xfmaxrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmax","0,1,2",1) +DEFINE_MOP(MOP_xfmaxrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fmax", "0,1,2", 1) /* MOP_wfminrrr */ -DEFINE_MOP(MOP_wfminrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmin","0,1,2",1) +DEFINE_MOP(MOP_wfminrrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fmin", "0,1,2", 1) /* MOP_xfminrrr */ -DEFINE_MOP(MOP_xfminrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmin","0,1,2",1) +DEFINE_MOP(MOP_xfminrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fmin", "0,1,2", 1) /* MOP_wsdivrrr */ -DEFINE_MOP(MOP_wsdivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1) +DEFINE_MOP(MOP_wsdivrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, CANTHROW, kLtDiv, "sdiv", "0,1,2", 1) /* MOP_xsdivrrr */ -DEFINE_MOP(MOP_xsdivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1) +DEFINE_MOP(MOP_xsdivrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, CANTHROW, kLtDiv, "sdiv", "0,1,2", 1) /* MOP_wudivrrr */ -DEFINE_MOP(MOP_wudivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"udiv","0,1,2",1) +DEFINE_MOP(MOP_wudivrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, CANTHROW, kLtDiv, "udiv", "0,1,2", 1) /* MOP_xudivrrr */ -DEFINE_MOP(MOP_xudivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"udiv","0,1,2",1) +DEFINE_MOP(MOP_xudivrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, CANTHROW, kLtDiv, "udiv", "0,1,2", 1) /* MOP_wmsubrrrr */ -DEFINE_MOP(MOP_wmsubrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"msub","0,1,2,3",1) +DEFINE_MOP(MOP_wmsubrrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtMul, "msub", "0,1,2,3", 1) /* MOP_xmsubrrrr */ -DEFINE_MOP(MOP_xmsubrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"msub","0,1,2,3",1) +DEFINE_MOP(MOP_xmsubrrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtMul, "msub", "0,1,2,3", 1) /* MOP_wmnegrrr */ -DEFINE_MOP(MOP_wmnegrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mneg","0,1,2",1) +DEFINE_MOP(MOP_wmnegrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtMul, "mneg", "0,1,2", 1) /* MOP_xmnegrrr */ -DEFINE_MOP(MOP_xmnegrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mneg","0,1,2",1) +DEFINE_MOP(MOP_xmnegrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtMul, "mneg", "0,1,2", 1) /* MOP_wubfxrri5i5 */ -DEFINE_MOP(MOP_wubfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfx","0,1,2,3",1) +DEFINE_MOP(MOP_wubfxrri5i5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm5, &OpndDesc::Imm5}, 0, kLtAluShift, "ubfx", "0,1,2,3", 1, MOP_wubfxrri5i5Valid) /* MOP_xubfxrri6i6 */ -DEFINE_MOP(MOP_xubfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfx","0,1,2,3",1) +DEFINE_MOP(MOP_xubfxrri6i6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6, &OpndDesc::Imm6}, 0, kLtAluShift, "ubfx", "0,1,2,3", 1, MOP_xubfxrri6i6Valid) /* MOP_wsbfxrri5i5 -- Signed Bitfield Extract */ -DEFINE_MOP(MOP_wsbfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"sbfx","0,1,2,3",1) +DEFINE_MOP(MOP_wsbfxrri5i5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm5, &OpndDesc::Imm5}, 0, kLtAluShift, "sbfx", "0,1,2,3", 1, MOP_wsbfxrri5i5Valid) /* MOP_xsbfxrri6i6 */ -DEFINE_MOP(MOP_xsbfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfx","0,1,2,3",1) +DEFINE_MOP(MOP_xsbfxrri6i6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6, &OpndDesc::Imm6}, 0, kLtAluShift, "sbfx", "0,1,2,3", 1, MOP_xsbfxrri6i6Valid) /* MOP_wubfizrri5i5 -- Unsigned Bitfield Insert in Zero */ -DEFINE_MOP(MOP_wubfizrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfiz","0,1,2,3",1) +DEFINE_MOP(MOP_wubfizrri5i5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm5, &OpndDesc::Imm5}, 0, kLtAluShift, "ubfiz", "0,1,2,3", 1, MOP_wubfizrri5i5Valid) /* MOP_xubfizrri6i6 */ -DEFINE_MOP(MOP_xubfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfiz","0,1,2,3",1) +DEFINE_MOP(MOP_xubfizrri6i6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6, &OpndDesc::Imm6}, 0, kLtAluShift, "ubfiz", "0,1,2,3", 1, MOP_xubfizrri6i6Valid) /* MOP_xsbfizrri6i6 Signed Bitfield Insert in Zero */ -DEFINE_MOP(MOP_xsbfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfiz","0,1,2,3",1) +DEFINE_MOP(MOP_xsbfizrri6i6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6, &OpndDesc::Imm6}, 0, kLtAluShift, "sbfiz", "0,1,2,3", 1, MOP_xsbfizrri6i6Valid) /* MOP_wbfirri5i5 -- Bitfield Insert */ -DEFINE_MOP(MOP_wbfirri5i5, {&OpndDesc::Reg32IDS,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1) +DEFINE_MOP(MOP_wbfirri5i5, {&OpndDesc::Reg32IDS, &OpndDesc::Reg32IS, &OpndDesc::Imm5, &OpndDesc::Imm5}, ISMOVE, kLtAluShift, "bfi", "0,1,2,3", 1, MOP_wbfirri5i5Valid) /* MOP_xbfirri6i6 */ -DEFINE_MOP(MOP_xbfirri6i6, {&OpndDesc::Reg64IDS,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1) +DEFINE_MOP(MOP_xbfirri6i6, {&OpndDesc::Reg64IDS, &OpndDesc::Reg64IS, &OpndDesc::Imm6, &OpndDesc::Imm6}, ISMOVE, kLtAluShift, "bfi", "0,1,2,3", 1, MOP_xbfirri6i6Valid) -/* MOP_xlslrri6,--- Logical Shift Left */ -DEFINE_MOP(MOP_xlslrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsl","0,1,2",1) +/* MOP_xlslrri6, --- Logical Shift Left */ +DEFINE_MOP(MOP_xlslrri6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6}, 0, kLtAluShift, "lsl", "0,1,2", 1, MOP_xlslrri6Valid) /* MOP_wlslrri5 */ -DEFINE_MOP(MOP_wlslrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsl","0,1,2",1) +DEFINE_MOP(MOP_wlslrri5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm8}, 0, kLtAluShift, "lsl", "0,1,2", 1, MOP_wlslrri5Valid) /* MOP_xasrrri6, */ -DEFINE_MOP(MOP_xasrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"asr","0,1,2",1) +DEFINE_MOP(MOP_xasrrri6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6}, 0, kLtAluShift, "asr", "0,1,2", 1, MOP_xasrrri6Valid) /* MOP_wasrrri5 */ -DEFINE_MOP(MOP_wasrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"asr","0,1,2",1) +DEFINE_MOP(MOP_wasrrri5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm8}, 0, kLtAluShift, "asr", "0,1,2", 1, MOP_wasrrri5Valid) /* MOP_xlsrrri6, */ -DEFINE_MOP(MOP_xlsrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsr","0,1,2",1) +DEFINE_MOP(MOP_xlsrrri6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm6}, 0, kLtAluShift, "lsr", "0,1,2", 1, MOP_xlsrrri6Valid) /* MOP_wlsrrri5 */ -DEFINE_MOP(MOP_wlsrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsr","0,1,2",1) +DEFINE_MOP(MOP_wlsrrri5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Imm8}, 0, kLtAluShift, "lsr", "0,1,2", 1, MOP_wlsrrri5Valid) /* MOP_xlslrrr, */ -DEFINE_MOP(MOP_xlslrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsl","0,1,2",1) +DEFINE_MOP(MOP_xlslrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAluShiftReg, "lsl", "0,1,2", 1) /* MOP_wlslrrr */ -DEFINE_MOP(MOP_wlslrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsl","0,1,2",1) +DEFINE_MOP(MOP_wlslrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAluShiftReg, "lsl", "0,1,2", 1) /* MOP_xasrrrr, */ -DEFINE_MOP(MOP_xasrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"asr","0,1,2",1) +DEFINE_MOP(MOP_xasrrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAluShiftReg, "asr", "0,1,2", 1) /* MOP_wasrrrr */ -DEFINE_MOP(MOP_wasrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"asr","0,1,2",1) +DEFINE_MOP(MOP_wasrrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAluShiftReg, "asr", "0,1,2", 1) /* MOP_xlsrrrr, */ -DEFINE_MOP(MOP_xlsrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsr","0,1,2",1) +DEFINE_MOP(MOP_xlsrrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAluShiftReg, "lsr", "0,1,2", 1) /* MOP_wlsrrrr */ -DEFINE_MOP(MOP_wlsrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsr","0,1,2",1) +DEFINE_MOP(MOP_wlsrrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAluShiftReg, "lsr", "0,1,2", 1) /* MOP_xrorrrr */ -DEFINE_MOP(MOP_xrorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"ror","0,1,2",1) +DEFINE_MOP(MOP_xrorrrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAluShiftReg, "ror", "0,1,2", 1) /* MOP_wrorrrr */ -DEFINE_MOP(MOP_wrorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"ror","0,1,2",1) +DEFINE_MOP(MOP_wrorrrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAluShiftReg, "ror", "0,1,2", 1) /* MOP_wtstri32 */ -DEFINE_MOP(MOP_wtstri32, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Imm32},0,kLtAlu,"tst","1,2",1) +DEFINE_MOP(MOP_wtstri32, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Imm32}, 0, kLtAlu, "tst", "1,2", 1, MOP_wtstri32Valid) /* MOP_xtstri64 */ -DEFINE_MOP(MOP_xtstri64, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Imm64},0,kLtAlu,"tst","1,2",1) +DEFINE_MOP(MOP_xtstri64, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Imm64}, 0, kLtAlu, "tst", "1,2", 1, MOP_xtstri64Valid) /* MOP_wtstrr */ -DEFINE_MOP(MOP_wtstrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"tst","1,2",1) +DEFINE_MOP(MOP_wtstrr, {&OpndDesc::CCD, &OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "tst", "1,2", 1) /* MOP_xtstrr */ -DEFINE_MOP(MOP_xtstrr, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"tst","1,2",1) +DEFINE_MOP(MOP_xtstrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "tst", "1,2", 1) /* MOP_wextrrrri5 -- Extracts a register from a pair of registers */ -DEFINE_MOP(MOP_wextrrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm5},0,kLtAluShift,"extr","0,1,2,3",1) +DEFINE_MOP(MOP_wextrrrri5, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Imm5}, 0, kLtAluShift, "extr", "0,1,2,3", 1, MOP_wextrrrri5Valid) /* MOP_xextrrrri6 */ -DEFINE_MOP(MOP_xextrrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"extr","0,1,2,3",1) +DEFINE_MOP(MOP_xextrrrri6, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm6}, 0, kLtAluShift, "extr", "0,1,2,3", 1, MOP_xextrrrri6Valid) /* MOP_wsfmovri imm8->s */ -DEFINE_MOP(MOP_wsfmovri, {&OpndDesc::Reg32FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1) +DEFINE_MOP(MOP_wsfmovri, {&OpndDesc::Reg32FD, &OpndDesc::Imm8}, ISMOVE, kLtFconst, "fmov", "0,1", 1) /* MOP_xdfmovri imm8->d */ -DEFINE_MOP(MOP_xdfmovri, {&OpndDesc::Reg64FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1) +DEFINE_MOP(MOP_xdfmovri, {&OpndDesc::Reg64FD, &OpndDesc::Imm8}, ISMOVE, kLtFconst, "fmov", "0,1", 1) /* MOP_xcsneg -- Conditional Select Negation */ -DEFINE_MOP(MOP_wcsnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1) -DEFINE_MOP(MOP_xcsnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1) -DEFINE_MOP(MOP_wcnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1) -DEFINE_MOP(MOP_xcnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1) +DEFINE_MOP(MOP_wcsnegrrrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csneg", "0,1,2,3", 1) +DEFINE_MOP(MOP_xcsnegrrrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "csneg", "0,1,2,3", 1) +DEFINE_MOP(MOP_wcnegrrrc, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cneg", "0,1,2", 1) +DEFINE_MOP(MOP_xcnegrrrc, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtAlu, "cneg", "0,1,2", 1) /* MOP_sabsrr */ -DEFINE_MOP(MOP_sabsrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fabs","0,1",1) +DEFINE_MOP(MOP_sabsrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fabs", "0,1", 1) /* MOP_dabsrr */ -DEFINE_MOP(MOP_dabsrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fabs","0,1",1) +DEFINE_MOP(MOP_dabsrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fabs", "0,1", 1) /* MOP_winegrr */ -DEFINE_MOP(MOP_winegrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"neg","0,1",1) +DEFINE_MOP(MOP_winegrr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, 0, kLtAlu, "neg", "0,1", 1) /* MOP_winegrre */ -DEFINE_MOP(MOP_winegrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"neg","0,1,2",1) +DEFINE_MOP(MOP_winegrrs, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAlu, "neg", "0,1,2", 1, MOP_winegrrsValid) /* neg MOP_xinegrr */ -DEFINE_MOP(MOP_xinegrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"neg","0,1",1) +DEFINE_MOP(MOP_xinegrr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, 0, kLtAlu, "neg", "0,1", 1) /* neg MOP_xinegrrs */ -DEFINE_MOP(MOP_xinegrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"neg","0,1,2",1) +DEFINE_MOP(MOP_xinegrrs, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAlu, "neg", "0,1,2", 1, MOP_xinegrrsValid) /* neg f32 */ -DEFINE_MOP(MOP_wfnegrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fneg","0,1",1) +DEFINE_MOP(MOP_wfnegrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fneg", "0,1", 1) /* neg f64 */ -DEFINE_MOP(MOP_xfnegrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fneg","0,1",1) +DEFINE_MOP(MOP_xfnegrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fneg", "0,1", 1) /* MOP_sdivrrr */ -DEFINE_MOP(MOP_sdivrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fdiv","0,1,2",1) +DEFINE_MOP(MOP_sdivrrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, CANTHROW, kLtAdvsimdDivS, "fdiv", "0,1,2", 1) /* MOP_ddivrrr */ -DEFINE_MOP(MOP_ddivrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fdiv","0,1,2",1) +DEFINE_MOP(MOP_ddivrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, CANTHROW, kLtAdvsimdDivD, "fdiv", "0,1,2", 1) /* MOP_smadd */ -DEFINE_MOP(MOP_smadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1) +DEFINE_MOP(MOP_smadd, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, CANTHROW, kLtFpmul, "fmadd", "0,1,2,3", 1) /* MOP_dmadd */ -DEFINE_MOP(MOP_dmadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1) +DEFINE_MOP(MOP_dmadd, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, CANTHROW, kLtFpmul, "fmadd", "0,1,2,3", 1) /* MOP_smsub */ -DEFINE_MOP(MOP_smsub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1) +DEFINE_MOP(MOP_smsub, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, CANTHROW, kLtFpmul, "fmsub", "0,1,2,3", 1) /* MOP_dmsub */ -DEFINE_MOP(MOP_dmsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1) +DEFINE_MOP(MOP_dmsub, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, CANTHROW, kLtFpmul, "fmsub", "0,1,2,3", 1) /* MOP_snmul */ -DEFINE_MOP(MOP_snmul, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1) +DEFINE_MOP(MOP_snmul, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, CANTHROW, kLtFpmul, "fnmul", "0,1,2", 1) /* MOP_dnmul */ -DEFINE_MOP(MOP_dnmul, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1) +DEFINE_MOP(MOP_dnmul, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, CANTHROW, kLtFpmul, "fnmul", "0,1,2", 1) /* MOP_hcselrrrc --- Floating-point Conditional Select */ -DEFINE_MOP(MOP_hcselrrrc, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS,&OpndDesc::Reg16FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) +DEFINE_MOP(MOP_hcselrrrc, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtFpalu, "fcsel", "0,1,2,3", 1) /* MOP_scselrrrc */ -DEFINE_MOP(MOP_scselrrrc, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) +DEFINE_MOP(MOP_scselrrrc, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtFpalu, "fcsel", "0,1,2,3", 1) /* MOP_dcselrrrc */ -DEFINE_MOP(MOP_dcselrrrc, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) +DEFINE_MOP(MOP_dcselrrrc, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Cond, &OpndDesc::CCS}, ISCONDDEF, kLtFpalu, "fcsel", "0,1,2,3", 1) /* MOP_wldli -- load 32-bit literal */ -DEFINE_MOP(MOP_wldli, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1) +DEFINE_MOP(MOP_wldli, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1) /* MOP_xldli -- load 64-bit literal */ -DEFINE_MOP(MOP_xldli, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1) +DEFINE_MOP(MOP_xldli, {&OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1) /* MOP_sldli -- load 32-bit literal */ -DEFINE_MOP(MOP_sldli, {&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1) +DEFINE_MOP(MOP_sldli, {&OpndDesc::Reg32FD, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1) /* MOP_dldli -- load 64-bit literal */ -DEFINE_MOP(MOP_dldli, {&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1) +DEFINE_MOP(MOP_dldli, {&OpndDesc::Reg64FD, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1) /* AArch64 branches/calls */ /* MOP_xbl -- branch with link (call); this is a special definition */ -DEFINE_MOP(MOP_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"bl","0",1) +DEFINE_MOP(MOP_xbl, {&OpndDesc::AddressName, &OpndDesc::ListSrc}, ISCALL | CANTHROW, kLtBranch, "bl", "0", 1) /* MOP_xblr -- branch with link (call) to register; this is a special definition */ -DEFINE_MOP(MOP_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"blr","0",1) +DEFINE_MOP(MOP_xblr, {&OpndDesc::Reg64IS, &OpndDesc::ListSrc}, ISCALL | CANTHROW, kLtBranch, "blr", "0", 1) /* Tls descriptor */ /* * add x0, #:tprel_hi12:symbol, lsl #12 * add x0, #:tprel_lo12_nc:symbol */ -DEFINE_MOP(MOP_tls_desc_rel, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},SPINTRINSIC,kLtAlu,"tlsdescrel","0,1",2) +DEFINE_MOP(MOP_tls_desc_rel, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::LiteralSrc}, SPINTRINSIC, kLtAlu, "tlsdescrel", "0,1", 2) /* * adrp x0, , :tlsdesc:symbol @@ -496,17 +496,32 @@ DEFINE_MOP(MOP_tls_desc_rel, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::L * .tlsdesccall symbol * blr x1 */ -DEFINE_MOP(MOP_tls_desc_call, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::ListSrc},ISCALL|CANTHROW|SPINTRINSIC,kLtBranch,"tlsdesccall","0,1",2) +DEFINE_MOP(MOP_tls_desc_call, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::ListSrc}, ISCALL | CANTHROW | SPINTRINSIC, kLtBranch, "tlsdesccall", "0,1", 2) + +/* + * will be emit to three instrunctions in a row: + * adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr xd, [xd,#:got_lo12:__staticDecoupleValueOffset$$xx+offset] + * ldr xd, [xd] + */ +DEFINE_MOP(MOP_tlsload_tdata, {&OpndDesc::Reg64ID},CANTHROW,kLtAdrpLdr,"intrinsic_tlsload","0",3) +DEFINE_MOP(MOP_tlsload_tbss, {&OpndDesc::Reg64ID},CANTHROW,kLtAdrpLdr,"intrinsic_tlsload","0",3) + +/* + * adrp x1, :gottprel:symbol + * ldr x1, [x1, #:gottprel_lo12:symbol] + */ +DEFINE_MOP(MOP_tls_desc_got, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},0,kLtTlsRel,"tlsdescgot","0,1",2) /* System register access */ /* MOP_mrs */ -DEFINE_MOP(MOP_mrs, {&OpndDesc::Reg64ID,&OpndDesc::String0S},ISMOVE,kLtAlu,"mrs","0,1",1) +DEFINE_MOP(MOP_mrs, {&OpndDesc::Reg64ID, &OpndDesc::String0S}, ISMOVE, kLtAlu, "mrs", "0,1", 1) /* Inline asm */ /* Number of instructions generated by inline asm is arbitrary. Use a large number here. */ /* asm string, output list, clobber list, input list, output constraint, input constraint, out reg prefix, in reg prefix */ -DEFINE_MOP(MOP_asm, {&OpndDesc::String0S,&OpndDesc::ListDest,&OpndDesc::ListDest,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc},INLINEASM|CANTHROW|HASACQUIRE|HASRELEASE,kLtUndef,"asm","0,1,2,3",100) +DEFINE_MOP(MOP_asm, {&OpndDesc::String0S, &OpndDesc::ListDest, &OpndDesc::ListDest, &OpndDesc::ListSrc, &OpndDesc::ListSrc, &OpndDesc::ListSrc, &OpndDesc::ListSrc, &OpndDesc::ListSrc}, INLINEASM | CANTHROW | HASACQUIRE | HASRELEASE, kLtUndef, "asm", "0,1,2,3", 100) /* c sync builtins */ /* @@ -517,7 +532,7 @@ DEFINE_MOP(MOP_asm, {&OpndDesc::String0S,&OpndDesc::ListDest,&OpndDesc::ListDest * cbnz w1, label1 * dmb ish */ -DEFINE_MOP(MOP_sync_lock_test_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setI","0,1,2,3,4",5) +DEFINE_MOP(MOP_sync_lock_test_setI, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_sync_lock_test_setI", "0,1,2,3,4", 5) /* * intrinsic_sync_lock_test_setL x0, w1, x2, x3, lable1 @@ -527,835 +542,835 @@ DEFINE_MOP(MOP_sync_lock_test_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&Opnd * cbnz w1, label1 * dmb ish */ -DEFINE_MOP(MOP_sync_lock_test_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setL","0,1,2,3,4",5) +DEFINE_MOP(MOP_sync_lock_test_setL, {&OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_sync_lock_test_setL", "0,1,2,3,4", 5) /* AARCH64 LOADS */ /* MOP_wldrsb --- Load Register Signed Byte */ -DEFINE_MOP(MOP_wldrsb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb, {&OpndDesc::Reg32ID, &OpndDesc::Mem8S}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, MOP_wldrsbValid, MOP_wldrsbSplit) /* MOP_xldrsb --- Load Register Signed Byte */ -DEFINE_MOP(MOP_xldrsb, {&OpndDesc::Reg64ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb, {&OpndDesc::Reg64ID, &OpndDesc::Mem8S}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, MOP_xldrsbValid, MOP_xldrsbSplit) /* MOP_wldrb */ -DEFINE_MOP(MOP_wldrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb, {&OpndDesc::Reg32ID, &OpndDesc::Mem8S}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, MOP_wldrbValid, MOP_wldrbSplit) /* MOP_wldrsh --- Load Register Signed Halfword */ -DEFINE_MOP(MOP_wldrsh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, MOP_wldrshValid, MOP_wldrshSplit) /* MOP_xldrsh --- Load Register Signed Halfword */ -DEFINE_MOP(MOP_xldrsh, {&OpndDesc::Reg64ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh, {&OpndDesc::Reg64ID, &OpndDesc::Mem16S}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, MOP_xldrshValid, MOP_xldrshSplit) /* MOP_xldrsw --- Load Register Signed Word */ -DEFINE_MOP(MOP_xldrsw, {&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw, {&OpndDesc::Reg64ID, &OpndDesc::Mem32S}, ISLOAD | CANTHROW, kLtLoad1, "ldrsw", "0,1", 1, MOP_xldrswValid, MOP_xldrswSplit) /* MOP_wldrh */ -DEFINE_MOP(MOP_wldrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, MOP_wldrhValid, MOP_wldrhSplit) /* MOP_wldr */ -DEFINE_MOP(MOP_wldr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr, {&OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, MOP_wldrValid, MOP_wldrSplit) /* MOP_xldr */ -DEFINE_MOP(MOP_xldr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xldr, {&OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, MOP_xldrValid, MOP_xldrSplit) /* MOP_bldr */ -DEFINE_MOP(MOP_bldr, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_bldr, {&OpndDesc::Reg8FD, &OpndDesc::Mem8S}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, MOP_bldrValid, MOP_bldrSplit) /* MOP_hldr */ -DEFINE_MOP(MOP_hldr, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_hldr, {&OpndDesc::Reg16FD, &OpndDesc::Mem16S}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, MOP_hldrValid, MOP_hldrSplit) /* MOP_sldr */ -DEFINE_MOP(MOP_sldr, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr, {&OpndDesc::Reg32FD, &OpndDesc::Mem32S}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, MOP_sldrValid, MOP_sldrSplit) /* MOP_dldr */ -DEFINE_MOP(MOP_dldr, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr, {&OpndDesc::Reg64FD, &OpndDesc::Mem64S}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, MOP_dldrValid, MOP_dldrSplit) /* MOP_qldr */ -DEFINE_MOP(MOP_qldr, {&OpndDesc::Reg128FD,&OpndDesc::Mem128S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr, {&OpndDesc::Reg128FD, &OpndDesc::Mem128S}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, MOP_qldrValid, MOP_qldrSplit) /* AArch64 LDP/LDPSW */ /* MOP_wldp */ -DEFINE_MOP(MOP_wldp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, MOP_wldpValid, MOP_wldpSplit) /* MOP_xldp */ -DEFINE_MOP(MOP_xldp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, MOP_xldpValid, MOP_xldpSplit) /* MOP_xldpsw */ -DEFINE_MOP(MOP_xldpsw, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Mem32S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldpsw", "0,1,2", 1, MOP_xldpswValid, MOP_xldpswSplit) /* MOP_sldp */ -DEFINE_MOP(MOP_sldp, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Mem32S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, MOP_sldpValid, MOP_sldpSplit) /* MOP_dldp */ -DEFINE_MOP(MOP_dldp, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Mem64S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, MOP_dldpValid, MOP_dldpSplit) /* MOP_qldp */ -DEFINE_MOP(MOP_qldp, {&OpndDesc::Reg128FD,&OpndDesc::Reg128FD,&OpndDesc::Mem128S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp, {&OpndDesc::Reg128FD, &OpndDesc::Reg128FD, &OpndDesc::Mem128S}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, MOP_qldpValid, MOP_qldpSplit) /* AARCH64 Load with Acquire semantics */ /* MOP_wldarb */ -DEFINE_MOP(MOP_wldarb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldarb, {&OpndDesc::Reg32ID, &OpndDesc::Mem8S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldarb", "0,1", 1, MOP_wldarbValid) /* MOP_wldarh */ -DEFINE_MOP(MOP_wldarh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldarh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldarh", "0,1", 1, MOP_wldarhValid) /* MOP_wldar */ -DEFINE_MOP(MOP_wldar, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldar, {&OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldar", "0,1", 1, MOP_wldarValid) /* MOP_xldar */ -DEFINE_MOP(MOP_xldar, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xldar, {&OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldar", "0,1", 1, MOP_xldarValid) /* MOP_wmovkri16 */ -DEFINE_MOP(MOP_wmovkri16, {&OpndDesc::Reg32IDS,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_wmovkri16, {&OpndDesc::Reg32IDS, &OpndDesc::Imm16, &OpndDesc::Lsl5}, ISMOVE, kLtShift, "movk", "0,1,2", 1, MOP_wmovkri16Valid) /* MOP_xmovkri16 */ -DEFINE_MOP(MOP_xmovkri16, {&OpndDesc::Reg64IDS,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_xmovkri16, {&OpndDesc::Reg64IDS, &OpndDesc::Imm16, &OpndDesc::Lsl6}, ISMOVE, kLtShift, "movk", "0,1,2", 1, MOP_xmovkri16Valid) /* MOP_wmovzri16 */ -DEFINE_MOP(MOP_wmovzri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_wmovzri16, {&OpndDesc::Reg32ID, &OpndDesc::Imm16, &OpndDesc::Lsl5}, ISMOVE, kLtShift, "movz", "0,1,2", 1, MOP_wmovzri16Valid) /* MOP_xmovzri16 */ -DEFINE_MOP(MOP_xmovzri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_xmovzri16, {&OpndDesc::Reg64ID, &OpndDesc::Imm16, &OpndDesc::Lsl6}, ISMOVE, kLtShift, "movz", "0,1,2", 1, MOP_xmovzri16Valid) /* MOP_wmovnri16 */ -DEFINE_MOP(MOP_wmovnri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_wmovnri16, {&OpndDesc::Reg32ID, &OpndDesc::Imm16, &OpndDesc::Lsl5}, ISMOVE, kLtShift, "movn", "0,1,2", 1, MOP_wmovnri16Valid) /* MOP_xmovnri16 */ -DEFINE_MOP(MOP_xmovnri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_xmovnri16, {&OpndDesc::Reg64ID, &OpndDesc::Imm16, &OpndDesc::Lsl6}, ISMOVE, kLtShift, "movn", "0,1,2", 1, MOP_xmovnri16Valid) /* AARCH64 Load exclusive with/without acquire semantics */ -DEFINE_MOP(MOP_wldxrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldxrh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldxrb, {&OpndDesc::Reg32ID, &OpndDesc::Mem8S}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxrb", "0,1", 1, MOP_wldxrbValid) +DEFINE_MOP(MOP_wldxrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxrh", "0,1", 1, MOP_wldxrhValid) +DEFINE_MOP(MOP_wldxr, {&OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxr", "0,1", 1, MOP_wldxrValid) +DEFINE_MOP(MOP_xldxr, {&OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxr", "0,1", 1, MOP_xldxrValid) -DEFINE_MOP(MOP_wldaxrb,{&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldaxrh,{&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldaxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldaxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldaxrb, {&OpndDesc::Reg32ID, &OpndDesc::Mem8S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxrb", "0,1", 1, MOP_wldaxrbValid) +DEFINE_MOP(MOP_wldaxrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxrh", "0,1", 1, MOP_wldaxrhValid) +DEFINE_MOP(MOP_wldaxr, {&OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxr", "0,1", 1, MOP_wldaxrValid) +DEFINE_MOP(MOP_xldaxr, {&OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxr", "0,1", 1, MOP_xldaxrValid) -DEFINE_MOP(MOP_wldaxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldaxp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldaxp, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISLOAD | ISLOADPAIR | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxp", "0,1,2", 1, MOP_wldaxpValid) +DEFINE_MOP(MOP_xldaxp, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISLOAD | ISLOADPAIR | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxp", "0,1,2", 1, MOP_xldaxpValid) /* MOP_vsqrts */ -DEFINE_MOP(MOP_vsqrts, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fsqrt","0,1",1) +DEFINE_MOP(MOP_vsqrts, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, CANTHROW, kLtAdvsimdDivS, "fsqrt", "0,1", 1) /* MOP_vsqrtd */ -DEFINE_MOP(MOP_vsqrtd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fsqrt","0,1",1) +DEFINE_MOP(MOP_vsqrtd, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, CANTHROW, kLtAdvsimdDivD, "fsqrt", "0,1", 1) /* # Non Definitions */ /* # As far as register allocation is concerned, the instructions below are non-definitions. */ /* MOP_bcs */ -DEFINE_MOP(MOP_bcs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcs","1",1) +DEFINE_MOP(MOP_bcs, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bcs", "1", 1) /* MOP_bcc */ -DEFINE_MOP(MOP_bcc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcc","1",1) +DEFINE_MOP(MOP_bcc, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bcc", "1", 1) /* MOP_beq */ -DEFINE_MOP(MOP_beq, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"beq","1",1) +DEFINE_MOP(MOP_beq, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "beq", "1", 1) /* MOP_bne */ -DEFINE_MOP(MOP_bne, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bne","1",1) +DEFINE_MOP(MOP_bne, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bne", "1", 1) /* MOP_blt */ -DEFINE_MOP(MOP_blt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blt","1",1) +DEFINE_MOP(MOP_blt, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "blt", "1", 1) /* MOP_ble */ -DEFINE_MOP(MOP_ble, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"ble","1",1) +DEFINE_MOP(MOP_ble, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "ble", "1", 1) /* MOP_bgt */ -DEFINE_MOP(MOP_bgt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bgt","1",1) +DEFINE_MOP(MOP_bgt, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bgt", "1", 1) /* MOP_bge */ -DEFINE_MOP(MOP_bge, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bge","1",1) +DEFINE_MOP(MOP_bge, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bge", "1", 1) /* MOP_blo equal to MOP_blt for unsigned comparison */ -DEFINE_MOP(MOP_blo, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blo","1",1) +DEFINE_MOP(MOP_blo, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "blo", "1", 1) /* MOP_bls equal to MOP_bls for unsigned comparison */ -DEFINE_MOP(MOP_bls, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bls","1",1) +DEFINE_MOP(MOP_bls, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bls", "1", 1) /* MOP_bhs equal to MOP_bge for unsigned comparison */ -DEFINE_MOP(MOP_bhs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhs","1",1) +DEFINE_MOP(MOP_bhs, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bhs", "1", 1) /* MOP_bhi equal to MOP_bgt for float comparison */ -DEFINE_MOP(MOP_bhi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhi","1",1) +DEFINE_MOP(MOP_bhi, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bhi", "1", 1) /* MOP_bpl equal to MOP_bge for float comparison */ -DEFINE_MOP(MOP_bpl, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bpl","1",1) -DEFINE_MOP(MOP_bmi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bmi","1",1) -DEFINE_MOP(MOP_bvc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvc","1",1) -DEFINE_MOP(MOP_bvs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvs","1",1) +DEFINE_MOP(MOP_bpl, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bpl", "1", 1) +DEFINE_MOP(MOP_bmi, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bmi", "1", 1) +DEFINE_MOP(MOP_bvc, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bvc", "1", 1) +DEFINE_MOP(MOP_bvs, {&OpndDesc::CCS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "bvs", "1", 1) /* MOP_xret AARCH64 Specific */ -DEFINE_MOP(MOP_xret, {},CANTHROW,kLtBranch,"ret","",1) +DEFINE_MOP(MOP_xret, {}, CANTHROW, kLtBranch, "ret", "", 1) /* MOP_clrex AARCH64 Specific */ -DEFINE_MOP(MOP_clrex, {},CANTHROW,kLtBranch,"clrex","",1) +DEFINE_MOP(MOP_clrex, {}, CANTHROW, kLtBranch, "clrex", "", 1) /* AARCH64 Floating-Point COMPARES signaling versions */ /* MOP_hcmperi -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_hcmperi, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_hcmperi, {&OpndDesc::CCD, &OpndDesc::Reg16FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* MOP_hcmperr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_hcmperr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_hcmperr, {&OpndDesc::CCD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* MOP_scmperi -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_scmperi, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_scmperi, {&OpndDesc::CCD, &OpndDesc::Reg32FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* MOP_scmperr */ -DEFINE_MOP(MOP_scmperr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_scmperr, {&OpndDesc::CCD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* MOP_dcmperi -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_dcmperi, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_dcmperi, {&OpndDesc::CCD, &OpndDesc::Reg64FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* MOP_dcmperr */ -DEFINE_MOP(MOP_dcmperr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmpe","1,2",1) +DEFINE_MOP(MOP_dcmperr, {&OpndDesc::CCD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fcmpe", "1,2", 1) /* AARCH64 Floating-Point COMPARES non-signaling (quiet) versions */ /* MOP_hcmpqri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_hcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_hcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg16FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmp", "1,2", 1) /* MOP_hcmpqrr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_hcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_hcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS}, 0, kLtFpalu, "fcmp", "1,2", 1) /* MOP_scmpqri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_scmpqri, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_scmpqri, {&OpndDesc::CCD, &OpndDesc::Reg32FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmp", "1,2", 1) /* MOP_scmpqrr */ -DEFINE_MOP(MOP_scmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_scmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, 0, kLtFpalu, "fcmp", "1,2", 1) /* MOP_dcmpqri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_dcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_dcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg64FS, &OpndDesc::FpImm8}, 0, kLtFpalu, "fcmp", "1,2", 1) /* MOP_dcmpqrr */ -DEFINE_MOP(MOP_dcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmp","1,2",1) +DEFINE_MOP(MOP_dcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, 0, kLtFpalu, "fcmp", "1,2", 1) /* AARCH64 Integer COMPARES */ /* MOP_wcmpri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmpri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmp","1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_wcmpri, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "cmp", "1,2", 1, MOP_wcmpriValid) /* MOP_wcmprr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmprr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmp","1,2",1) +DEFINE_MOP(MOP_wcmprr, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "cmp", "1,2", 1) /* MOP_wcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmp","1,2,3",1) +DEFINE_MOP(MOP_wcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAlu, "cmp", "1,2,3", 1, MOP_wcmprrsValid) /* MOP_wwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1) +DEFINE_MOP(MOP_wwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAlu, "cmp", "1,2,3", 1, MOP_wwcmprreValid) /* MOP_xcmpri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmpri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmp","1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_xcmpri, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Imm16}, 0, kLtAlu, "cmp", "1,2", 1, MOP_xcmpriValid) /* MOP_xcmprr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmprr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmp","1,2",1) +DEFINE_MOP(MOP_xcmprr, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "cmp", "1,2", 1) /* MOP_xcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmp","1,2,3",1) +DEFINE_MOP(MOP_xcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAlu, "cmp", "1,2,3", 1, MOP_xcmprrsValid) /* MOP_xwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1) +DEFINE_MOP(MOP_xwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAlu, "cmp", "1,2,3", 1, MOP_xwcmprreValid) /* MOP_wccmpriic -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +DEFINE_MOP(MOP_wccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Imm5, &OpndDesc::Imm4, &OpndDesc::Cond, &OpndDesc::CCS}, 0, kLtAlu, "ccmp", "1,2,3,4", 1, MOP_wccmpriicValid, MOP_wccmpriicSplit) /* MOP_wccmprric -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wccmprric, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +DEFINE_MOP(MOP_wccmprric, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Imm4, &OpndDesc::Cond, &OpndDesc::CCS}, 0, kLtAlu, "ccmp", "1,2,3,4", 1, MOP_wccmprricValid) /* MOP_xccmpriic -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +DEFINE_MOP(MOP_xccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Imm5, &OpndDesc::Imm4, &OpndDesc::Cond, &OpndDesc::CCS}, 0, kLtAlu, "ccmp", "1,2,3,4", 1, MOP_xccmpriicValid, MOP_xccmpriicSplit) /* MOP_xccmprric -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xccmprric, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +DEFINE_MOP(MOP_xccmprric, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm4, &OpndDesc::Cond, &OpndDesc::CCS}, 0, kLtAlu, "ccmp", "1,2,3,4", 1, MOP_xccmprricValid) /* MOP_wcmnri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmnri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmn","1,2",1,Imm12BitValid) +DEFINE_MOP(MOP_wcmnri, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Imm12}, 0, kLtAlu, "cmn", "1,2", 1, MOP_wcmnriValid) /* MOP_wcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmn","1,2",1) +DEFINE_MOP(MOP_wcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, 0, kLtAlu, "cmn", "1,2", 1) /* MOP_wcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmn","1,2,3",1) +DEFINE_MOP(MOP_wcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Bitshift32}, 0, kLtAlu, "cmn", "1,2,3", 1, MOP_wcmnrrsValid) /* MOP_wwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_wwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1) +DEFINE_MOP(MOP_wwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAlu, "cmn", "1,2,3", 1, MOP_wwcmnrreValid) /* MOP_xcmnri -- AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmnri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmn","1,2",1,Imm16BitValid) +DEFINE_MOP(MOP_xcmnri, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Imm16}, 0, kLtAlu, "cmn", "1,2", 1, MOP_xcmnriValid) /* MOP_xcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmn","1,2",1) +DEFINE_MOP(MOP_xcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, 0, kLtAlu, "cmn", "1,2", 1) /* MOP_xcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmn","1,2,3",1) +DEFINE_MOP(MOP_xcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, 0, kLtAlu, "cmn", "1,2,3", 1, MOP_xcmnrrsValid) /* MOP_xwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ -DEFINE_MOP(MOP_xwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1) +DEFINE_MOP(MOP_xwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, 0, kLtAlu, "cmn", "1,2,3", 1, MOP_xwcmnrreValid) /* AArch64 branches */ /* MOP_xbr -- branch to register */ -DEFINE_MOP(MOP_xbr, {&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},ISUNCONDBRANCH,kLtBranch,"br","0",1) +DEFINE_MOP(MOP_xbr, {&OpndDesc::Reg64IS, &OpndDesc::LiteralSrc}, ISUNCONDBRANCH, kLtBranch, "br", "0", 1) /* MOP_Tbbuncond */ -DEFINE_MOP(MOP_xuncond, {&OpndDesc::AddressName},ISUNCONDBRANCH,kLtBranch,"b","0",1) +DEFINE_MOP(MOP_xuncond, {&OpndDesc::AddressName}, ISUNCONDBRANCH, kLtBranch, "b", "0", 1) /* MOP_wcbnz --- Compare and Branch on Nonzero */ -DEFINE_MOP(MOP_wcbnz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1) +DEFINE_MOP(MOP_wcbnz, {&OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "cbnz", "0,1", 1) /* MOP_xcbnz */ -DEFINE_MOP(MOP_xcbnz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1) +DEFINE_MOP(MOP_xcbnz, {&OpndDesc::Reg64IS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "cbnz", "0,1", 1) /* MOP_wcbz --- Compare and Branch on zero */ -DEFINE_MOP(MOP_wcbz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1) +DEFINE_MOP(MOP_wcbz, {&OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "cbz", "0,1", 1) /* MOP_xcbz */ -DEFINE_MOP(MOP_xcbz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1) +DEFINE_MOP(MOP_xcbz, {&OpndDesc::Reg64IS, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "cbz", "0,1", 1) /* MOP_wtbnz --- Test bit and Branch if Nonzero */ -DEFINE_MOP(MOP_wtbnz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1) +DEFINE_MOP(MOP_wtbnz, {&OpndDesc::Reg32IS, &OpndDesc::Imm8, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "tbnz", "0,1,2", 1, MOP_wtbnzValid) /* MOP_xtbnz */ -DEFINE_MOP(MOP_xtbnz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1) +DEFINE_MOP(MOP_xtbnz, {&OpndDesc::Reg64IS, &OpndDesc::Imm8, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "tbnz", "0,1,2", 1, MOP_xtbnzValid) /* MOP_wtbz --- Test bit and Branch if Zero */ -DEFINE_MOP(MOP_wtbz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1) +DEFINE_MOP(MOP_wtbz, {&OpndDesc::Reg32IS, &OpndDesc::Imm8, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "tbz", "0,1,2", 1, MOP_wtbzValid) /* MOP_xtbz */ -DEFINE_MOP(MOP_xtbz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1) +DEFINE_MOP(MOP_xtbz, {&OpndDesc::Reg64IS, &OpndDesc::Imm8, &OpndDesc::AddressName}, ISCONDBRANCH, kLtBranch, "tbz", "0,1,2", 1, MOP_xtbzValid) /* AARCH64 STORES */ /* MOP_wstrb -- Store Register Byte */ -DEFINE_MOP(MOP_wstrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb, {&OpndDesc::Reg32IS, &OpndDesc::Mem8D}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, MOP_wstrbValid, MOP_wstrbSplit) /* MOP_wstrh -- Store Register Halfword */ -DEFINE_MOP(MOP_wstrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh, {&OpndDesc::Reg32IS, &OpndDesc::Mem16D}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, MOP_wstrhValid, MOP_wstrhSplit) /* MOP_wstr -- Store Register Word */ -DEFINE_MOP(MOP_wstr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr, {&OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, MOP_wstrValid, MOP_wstrSplit) /* MOP_xstr -- Store Register Double word */ -DEFINE_MOP(MOP_xstr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr, {&OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, MOP_xstrValid, MOP_xstrSplit) /* MOP_sstr -- Store Register SIMD/FP Float */ -DEFINE_MOP(MOP_sstr, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr, {&OpndDesc::Reg32FS, &OpndDesc::Mem32D}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, MOP_sstrValid, MOP_sstrSplit) /* MOP_dstr -- Store Register SIMD/FP Double */ -DEFINE_MOP(MOP_dstr, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr, {&OpndDesc::Reg64FS, &OpndDesc::Mem64D}, ISSTORE | CANTHROW, kLtStore3plus, "str", "0,1", 1, MOP_dstrValid, MOP_dstrSplit) /* MOP_qstr -- Store Register SIMD/FP Double */ -DEFINE_MOP(MOP_qstr, {&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr, {&OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | CANTHROW, kLtStore3plus, "str", "0,1", 1, MOP_qstrValid, MOP_qstrSplit) /* AArch64 STP. */ /* MOP_wstp */ -DEFINE_MOP(MOP_wstp, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, MOP_wstpValid, MOP_wstpSplit) /* MOP_xstp */ -DEFINE_MOP(MOP_xstp, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xstp, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, MOP_xstpValid, MOP_xstpSplit) /* AArch64 does not define STPSW. It has no practical value. */ /* MOP_sstp */ -DEFINE_MOP(MOP_sstp, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Mem32D}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, MOP_sstpValid, MOP_sstpSplit) /* MOP_dstp */ -DEFINE_MOP(MOP_dstp, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Mem64D}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, MOP_dstpValid, MOP_dstpSplit) /* MOP_qstp */ -DEFINE_MOP(MOP_qstp, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, MOP_qstpValid, MOP_qstpSplit) /* AARCH64 Store with Release semantics */ /* MOP_wstlrb -- Store-Release Register Byte */ -DEFINE_MOP(MOP_wstlrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstlrb, {&OpndDesc::Reg32IS, &OpndDesc::Mem8D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlrb", "0,1", 1, MOP_wstlrbValid) /* MOP_wstlrh -- Store-Release Register Halfword */ -DEFINE_MOP(MOP_wstlrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstlrh, {&OpndDesc::Reg32IS, &OpndDesc::Mem16D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlrh", "0,1", 1, MOP_wstlrhValid) /* MOP_wstlr -- Store-Release Register Word */ -DEFINE_MOP(MOP_wstlr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstlr, {&OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlr", "0,1", 1, MOP_wstlrValid) /* MOP_xstlr -- Store-Release Register Double word */ -DEFINE_MOP(MOP_xstlr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstlr, {&OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlr", "0,1", 1, MOP_xstlrValid) /* AARCH64 Store exclusive with/without release semantics */ -DEFINE_MOP(MOP_wstxrb, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrb","0,1,2",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstxrh, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrh","0,1,2",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wstxrb, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem8D}, ISSTORE | ISATOMIC | CANTHROW, kLtStore1, "stxrb", "0,1,2", 1, MOP_wstxrbValid) +DEFINE_MOP(MOP_wstxrh, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem16D}, ISSTORE | ISATOMIC | CANTHROW, kLtStore1, "stxrh", "0,1,2", 1, MOP_wstxrhValid) +DEFINE_MOP(MOP_wstxr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISSTORE | ISATOMIC | CANTHROW, kLtStore1, "stxr", "0,1,2", 1, MOP_wstxrValid) +DEFINE_MOP(MOP_xstxr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | ISATOMIC | CANTHROW, kLtStore1, "stxr", "0,1,2", 1, MOP_xstxrValid) -DEFINE_MOP(MOP_wstlxrb,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrb","0,1,2",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstlxrh,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrh","0,1,2",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wstlxrb, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem8D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxrb", "0,1,2", 1, MOP_wstlxrbValid) +DEFINE_MOP(MOP_wstlxrh, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem16D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxrh", "0,1,2", 1, MOP_wstlxrhValid) +DEFINE_MOP(MOP_wstlxr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxr", "0,1,2", 1, MOP_wstlxrValid) +DEFINE_MOP(MOP_xstlxr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxr", "0,1,2", 1, MOP_xstlxrValid) -DEFINE_MOP(MOP_wstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wstlxp, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Mem64D}, ISSTORE | ISSTOREPAIR | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxp", "0,1,2,3", 1, MOP_wstlxpValid) +DEFINE_MOP(MOP_xstlxp, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISSTORE | ISSTOREPAIR | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxp", "0,1,2,3", 1, MOP_xstlxpValid) /* Generate a breakpoint instruction exception */ -DEFINE_MOP(MOP_brk, {&OpndDesc::Imm16}, 0, kLtAlu, "brk", "0", 1, Imm16BitValid) +DEFINE_MOP(MOP_brk, {&OpndDesc::Imm16}, 0, kLtAlu, "brk", "0", 1, MOP_brkValid) /* Memory barriers */ /* MOP_dmb_ishld */ -DEFINE_MOP(MOP_dmb_ishld, {}, HASACQUIRE|ISDMB,kLtBranch, "dmb\tishld", "",1) +DEFINE_MOP(MOP_dmb_ishld, {}, HASACQUIRE | ISDMB, kLtBranch, "dmb\tishld", "", 1) /* MOP_dmb_ishst */ -DEFINE_MOP(MOP_dmb_ishst, {}, HASRELEASE|ISDMB,kLtBranch, "dmb\tishst", "",1) +DEFINE_MOP(MOP_dmb_ishst, {}, HASRELEASE | ISDMB, kLtBranch, "dmb\tishst", "", 1) /* MOP_dmb_ish */ -DEFINE_MOP(MOP_dmb_ish, {}, HASACQUIRE|HASRELEASE|ISDMB,kLtBranch, "dmb\tish", "",1) +DEFINE_MOP(MOP_dmb_ish, {}, HASACQUIRE | HASRELEASE | ISDMB, kLtBranch, "dmb\tish", "", 1) /* Neon simd, r:nonvector reg, u:64b vector reg, v:128b vector reg */ /* Following ISMOVE vector instructions must be in a group, starting with vmovui and end with vmovvv */ -DEFINE_MOP(MOP_vmovui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) -DEFINE_MOP(MOP_vmovvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) -DEFINE_MOP(MOP_vmovuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vmovvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vmovlaneuu, {&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vmovlanevu, {&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vmovlanevv, {&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vmov2vv, {&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) -DEFINE_MOP(MOP_vwmovru, {&OpndDesc::Reg32ID,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umov","0,1",1) -DEFINE_MOP(MOP_vwmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) -DEFINE_MOP(MOP_vxmovrv, {&OpndDesc::Reg64ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) -DEFINE_MOP(MOP_vwsmovru, {&OpndDesc::Reg32ID,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smov","0,1",1) -DEFINE_MOP(MOP_vwsmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smov","0,1",1) -DEFINE_MOP(MOP_vwdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vwdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vxdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vxdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vduprv, {&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vbdupru, {&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vbduprv, {&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vhdupru, {&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vhduprv, {&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vsdupru, {&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vsduprv, {&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vdupuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vdupuv, {&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vdupvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vdupvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) -DEFINE_MOP(MOP_vextuuui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) -DEFINE_MOP(MOP_vextvvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) -DEFINE_MOP(MOP_vsabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sabdl","0,1,2",1) -DEFINE_MOP(MOP_vuabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uabdl","0,1,2",1) -DEFINE_MOP(MOP_vsabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sabdl2","0,1,2",1) -DEFINE_MOP(MOP_vuabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uabdl2","0,1,2",1) -DEFINE_MOP(MOP_vspadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) -DEFINE_MOP(MOP_vspadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) -DEFINE_MOP(MOP_vupadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) -DEFINE_MOP(MOP_vupadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) -DEFINE_MOP(MOP_vsaddlpuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) -DEFINE_MOP(MOP_vsaddlpvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) -DEFINE_MOP(MOP_vuaddlpuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) -DEFINE_MOP(MOP_vuaddlpvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) -DEFINE_MOP(MOP_vwinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) -DEFINE_MOP(MOP_vxinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) -DEFINE_MOP(MOP_vwinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) -DEFINE_MOP(MOP_vxinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) -DEFINE_MOP(MOP_vrev16dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) -DEFINE_MOP(MOP_vrev32dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) -DEFINE_MOP(MOP_vrev64dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) -DEFINE_MOP(MOP_vrev16qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) -DEFINE_MOP(MOP_vrev32qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) -DEFINE_MOP(MOP_vrev64qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) -DEFINE_MOP(MOP_vbaddvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vhaddvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vsaddvru,{&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vbaddvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vhaddvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vsaddvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) -DEFINE_MOP(MOP_vdaddvrv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addp","0,1",1) - -DEFINE_MOP(MOP_vzcmequu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) -DEFINE_MOP(MOP_vzcmgtuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) -DEFINE_MOP(MOP_vzcmgeuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) -DEFINE_MOP(MOP_vzcmltuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) -DEFINE_MOP(MOP_vzcmleuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) -DEFINE_MOP(MOP_vzcmeqvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) -DEFINE_MOP(MOP_vzcmgtvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) -DEFINE_MOP(MOP_vzcmgevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) -DEFINE_MOP(MOP_vzcmltvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) -DEFINE_MOP(MOP_vzcmlevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) -DEFINE_MOP(MOP_vcmequuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) -DEFINE_MOP(MOP_vcmgeuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) -DEFINE_MOP(MOP_vcmgtuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) -DEFINE_MOP(MOP_vcmhiuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) -DEFINE_MOP(MOP_vcmhsuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) -DEFINE_MOP(MOP_vcmeqvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) -DEFINE_MOP(MOP_vcmgevvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) -DEFINE_MOP(MOP_vcmgtvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) -DEFINE_MOP(MOP_vcmhivvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) -DEFINE_MOP(MOP_vcmhsvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) -DEFINE_MOP(MOP_vbsluuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) -DEFINE_MOP(MOP_vbslvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) - -DEFINE_MOP(MOP_vshlrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) -DEFINE_MOP(MOP_vshluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) -DEFINE_MOP(MOP_vshlvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) -DEFINE_MOP(MOP_vushlrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) -DEFINE_MOP(MOP_vushluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) -DEFINE_MOP(MOP_vushlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) - -DEFINE_MOP(MOP_vushlrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) -DEFINE_MOP(MOP_vushluui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) -DEFINE_MOP(MOP_vushlvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) -DEFINE_MOP(MOP_vushrrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) -DEFINE_MOP(MOP_vushruui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) -DEFINE_MOP(MOP_vushrvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) - -DEFINE_MOP(MOP_vxtnuv, {&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn","0,1",1) -DEFINE_MOP(MOP_vsxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sxtl","0,1",1) -DEFINE_MOP(MOP_vuxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uxtl","0,1",1) -DEFINE_MOP(MOP_vxtn2uv, {&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn2","0,1",1) -DEFINE_MOP(MOP_vsxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sxtl2","0,1",1) -DEFINE_MOP(MOP_vuxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uxtl2","0,1",1) - -DEFINE_MOP(MOP_vshruui, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) -DEFINE_MOP(MOP_vshrvvi, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) - -DEFINE_MOP(MOP_vsmaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlal","0,1,2",1) -DEFINE_MOP(MOP_vumaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlal","0,1,2",1) -DEFINE_MOP(MOP_vsmullvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smull","0,1,2",1) -DEFINE_MOP(MOP_vumullvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umull","0,1,2",1) -DEFINE_MOP(MOP_vsmullvuv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smull","0,1,2",1) -DEFINE_MOP(MOP_vumullvuv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umull","0,1,2",1) -DEFINE_MOP(MOP_vsmullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smull","0,1,2",1) -DEFINE_MOP(MOP_vumullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umull","0,1,2",1) -DEFINE_MOP(MOP_vsmull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smull2","0,1,2",1) -DEFINE_MOP(MOP_vumull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umull2","0,1,2",1) -DEFINE_MOP(MOP_vabsrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"abs","0,1",1) -DEFINE_MOP(MOP_vabsuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"abs","0,1",1) -DEFINE_MOP(MOP_vabsvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"abs","0,1",1) -DEFINE_MOP(MOP_vaddrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"add","0,1,2",1) -DEFINE_MOP(MOP_vadduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) -DEFINE_MOP(MOP_vsaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddl","0,1,2",1) -DEFINE_MOP(MOP_vuaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddl","0,1,2",1) -DEFINE_MOP(MOP_vsaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddl2","0,1,2",1) -DEFINE_MOP(MOP_vuaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddl2","0,1,2",1) -DEFINE_MOP(MOP_vsaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddw","0,1,2",1) -DEFINE_MOP(MOP_vuaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddw","0,1,2",1) -DEFINE_MOP(MOP_vsaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddw2","0,1,2",1) -DEFINE_MOP(MOP_vuaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddw2","0,1,2",1) -DEFINE_MOP(MOP_vaddvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) -DEFINE_MOP(MOP_vmuluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) -DEFINE_MOP(MOP_vmulvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) -DEFINE_MOP(MOP_vsubrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) -DEFINE_MOP(MOP_vsubuuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) -DEFINE_MOP(MOP_vsubvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) -DEFINE_MOP(MOP_vandrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"and","0,1,2",1) -DEFINE_MOP(MOP_vanduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) -DEFINE_MOP(MOP_vandvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) -DEFINE_MOP(MOP_voruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) -DEFINE_MOP(MOP_vorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) -DEFINE_MOP(MOP_vxoruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) -DEFINE_MOP(MOP_vxorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) -DEFINE_MOP(MOP_vornuuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"orn","0,1,2",1) -DEFINE_MOP(MOP_vornvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"orn","0,1,2",1) -DEFINE_MOP(MOP_vnotuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"not","0,1",1) -DEFINE_MOP(MOP_vnotvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"not","0,1",1) -DEFINE_MOP(MOP_vnegrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"neg","0,1",1) -DEFINE_MOP(MOP_vneguu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"neg","0,1",1) -DEFINE_MOP(MOP_vnegvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"neg","0,1",1) -DEFINE_MOP(MOP_vssublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubl","0,1,2",1) -DEFINE_MOP(MOP_vusublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubl","0,1,2",1) -DEFINE_MOP(MOP_vssubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubl2","0,1,2",1) -DEFINE_MOP(MOP_vusubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubl2","0,1,2",1) -DEFINE_MOP(MOP_vssubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubw","0,1,2",1) -DEFINE_MOP(MOP_vusubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubw","0,1,2",1) -DEFINE_MOP(MOP_vssubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubw2","0,1,2",1) -DEFINE_MOP(MOP_vusubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubw2","0,1,2",1) -DEFINE_MOP(MOP_vzip1uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip1","0,1,2",1) -DEFINE_MOP(MOP_vzip1vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"zip1","0,1,2",1) -DEFINE_MOP(MOP_vzip2uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip2","0,1,2",1) -DEFINE_MOP(MOP_vzip2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"zip2","0,1,2",1) -DEFINE_MOP(MOP_vbsqxtnrr,{&OpndDesc::Reg8FD,&OpndDesc::Reg16FS},ISVECTOR,kLtFpalu,"sqxtn","0,1",1) -DEFINE_MOP(MOP_vhsqxtnrr,{&OpndDesc::Reg16FD,&OpndDesc::Reg32FS},ISVECTOR,kLtFpalu,"sqxtn","0,1",1) -DEFINE_MOP(MOP_vssqxtnrr,{&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqxtn","0,1",1) -DEFINE_MOP(MOP_vsqxtnuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqxtn","0,1",1) -DEFINE_MOP(MOP_vsqxtn2vv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqxtn2","0,1",1) -DEFINE_MOP(MOP_vbsqxtunrr,{&OpndDesc::Reg8FD,&OpndDesc::Reg16FS},ISVECTOR,kLtFpalu,"sqxtun","0,1",1) -DEFINE_MOP(MOP_vhsqxtunrr,{&OpndDesc::Reg16FD,&OpndDesc::Reg32FS},ISVECTOR,kLtFpalu,"sqxtun","0,1",1) -DEFINE_MOP(MOP_vssqxtunrr,{&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqxtun","0,1",1) -DEFINE_MOP(MOP_vsqxtunuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqxtun","0,1",1) -DEFINE_MOP(MOP_vsqxtun2vv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqxtun2","0,1",1) -DEFINE_MOP(MOP_vbuqxtnrr,{&OpndDesc::Reg8FD,&OpndDesc::Reg16FS},ISVECTOR,kLtFpalu,"uqxtn","0,1",1) -DEFINE_MOP(MOP_vhuqxtnrr,{&OpndDesc::Reg16FD,&OpndDesc::Reg32FS},ISVECTOR,kLtFpalu,"uqxtn","0,1",1) -DEFINE_MOP(MOP_vsuqxtnrr,{&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"uqxtn","0,1",1) -DEFINE_MOP(MOP_vuqxtnuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqxtn","0,1",1) -DEFINE_MOP(MOP_vuqxtn2vv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqxtn2","0,1",1) -DEFINE_MOP(MOP_vsabduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sabd","0,1,2",1) -DEFINE_MOP(MOP_vsabdvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sabd","0,1,2",1) -DEFINE_MOP(MOP_vuabduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uabd","0,1,2",1) -DEFINE_MOP(MOP_vuabdvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uabd","0,1,2",1) -DEFINE_MOP(MOP_vsminuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smin","0,1,2",1) -DEFINE_MOP(MOP_vsminvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smin","0,1,2",1) -DEFINE_MOP(MOP_vuminuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umin","0,1,2",1) -DEFINE_MOP(MOP_vuminvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umin","0,1,2",1) -DEFINE_MOP(MOP_vsmaxuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smax","0,1,2",1) -DEFINE_MOP(MOP_vsmaxvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smax","0,1,2",1) -DEFINE_MOP(MOP_vumaxuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umax","0,1,2",1) -DEFINE_MOP(MOP_vumaxvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umax","0,1,2",1) -DEFINE_MOP(MOP_vurecpeuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"urecpe","0,1",1) -DEFINE_MOP(MOP_vurecpevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"urecpe","0,1",1) -DEFINE_MOP(MOP_vaddprv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addp","0,1",1) -DEFINE_MOP(MOP_vaddpuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addp","0,1,2",1) -DEFINE_MOP(MOP_vaddpvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addp","0,1,2",1) -DEFINE_MOP(MOP_vsmaxpuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smaxp","0,1,2",1) -DEFINE_MOP(MOP_vsmaxpvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smaxp","0,1,2",1) -DEFINE_MOP(MOP_vumaxpuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umaxp","0,1,2",1) -DEFINE_MOP(MOP_vumaxpvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umaxp","0,1,2",1) -DEFINE_MOP(MOP_vsminpuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sminp","0,1,2",1) -DEFINE_MOP(MOP_vsminpvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sminp","0,1,2",1) -DEFINE_MOP(MOP_vuminpuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uminp","0,1,2",1) -DEFINE_MOP(MOP_vuminpvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uminp","0,1,2",1) -DEFINE_MOP(MOP_vbsmaxvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smaxv","0,1",1) -DEFINE_MOP(MOP_vhsmaxvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smaxv","0,1",1) -DEFINE_MOP(MOP_vbsmaxvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smaxv","0,1",1) -DEFINE_MOP(MOP_vhsmaxvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smaxv","0,1",1) -DEFINE_MOP(MOP_vssmaxvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smaxv","0,1",1) -DEFINE_MOP(MOP_vbsminvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sminv","0,1",1) -DEFINE_MOP(MOP_vhsminvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sminv","0,1",1) -DEFINE_MOP(MOP_vbsminvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sminv","0,1",1) -DEFINE_MOP(MOP_vhsminvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sminv","0,1",1) -DEFINE_MOP(MOP_vssminvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sminv","0,1",1) -DEFINE_MOP(MOP_vbumaxvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umaxv","0,1",1) -DEFINE_MOP(MOP_vhumaxvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umaxv","0,1",1) -DEFINE_MOP(MOP_vbumaxvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umaxv","0,1",1) -DEFINE_MOP(MOP_vhumaxvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umaxv","0,1",1) -DEFINE_MOP(MOP_vsumaxvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umaxv","0,1",1) -DEFINE_MOP(MOP_vbuminvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uminv","0,1",1) -DEFINE_MOP(MOP_vhuminvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uminv","0,1",1) -DEFINE_MOP(MOP_vbuminvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uminv","0,1",1) -DEFINE_MOP(MOP_vhuminvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uminv","0,1",1) -DEFINE_MOP(MOP_vsuminvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uminv","0,1",1) -DEFINE_MOP(MOP_vtstrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"cmtst","0,1,2",1) -DEFINE_MOP(MOP_vtstuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmtst","0,1,2",1) -DEFINE_MOP(MOP_vtstvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmtst","0,1,2",1) -DEFINE_MOP(MOP_vmvnuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mvn","0,1",1) -DEFINE_MOP(MOP_vmvnvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mvn","0,1",1) -DEFINE_MOP(MOP_vclsuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cls","0,1",1) -DEFINE_MOP(MOP_vclsvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cls","0,1",1) -DEFINE_MOP(MOP_vclzuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"clz","0,1",1) -DEFINE_MOP(MOP_vclzvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"clz","0,1",1) -DEFINE_MOP(MOP_vcntuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cnt","0,1",1) -DEFINE_MOP(MOP_vcntvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cnt","0,1",1) -DEFINE_MOP(MOP_vbicuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"bic","0,1,2",1) -DEFINE_MOP(MOP_vbicvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"bic","0,1,2",1) -DEFINE_MOP(MOP_vrbituu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rbit","0,1",1) -DEFINE_MOP(MOP_vrbitvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rbit","0,1",1) -DEFINE_MOP(MOP_vuzp1uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uzp1","0,1,2",1) -DEFINE_MOP(MOP_vuzp1vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uzp1","0,1,2",1) -DEFINE_MOP(MOP_vuzp2uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uzp2","0,1,2",1) -DEFINE_MOP(MOP_vuzp2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uzp2","0,1,2",1) -DEFINE_MOP(MOP_vtrn1uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"trn1","0,1,2",1) -DEFINE_MOP(MOP_vtrn1vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"trn1","0,1,2",1) -DEFINE_MOP(MOP_vtrn2uuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"trn2","0,1,2",1) -DEFINE_MOP(MOP_vtrn2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"trn2","0,1,2",1) -DEFINE_MOP(MOP_vld1ub,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1vb,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1uh,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1vh,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1uw,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1vw,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1ud,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1vd,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1","0,1",1) -DEFINE_MOP(MOP_vld1rub,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1rvb,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1ruh,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1rvh,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1ruw,{&OpndDesc::Reg64VD,&OpndDesc::Mem64S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1rvw,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vld1rvd,{&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISVECTOR,kLtFpalu,"ld1r","0,1",1) -DEFINE_MOP(MOP_vst1ub,{&OpndDesc::Reg64VS,&OpndDesc::Mem64D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1vb,{&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1uh,{&OpndDesc::Reg64VS,&OpndDesc::Mem64D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1vh,{&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1uw,{&OpndDesc::Reg64VS,&OpndDesc::Mem64D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1vw,{&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1ud,{&OpndDesc::Reg64VS,&OpndDesc::Mem64D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vst1vd,{&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISVECTOR,kLtFpalu,"st1","0,1",1) -DEFINE_MOP(MOP_vtbluvu,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"tbl","0,1,2",1) -DEFINE_MOP(MOP_vtblvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"tbl","0,1,2",1) -DEFINE_MOP(MOP_vtbxuvu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"tbx","0,1,2",1) -DEFINE_MOP(MOP_vtbxvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"tbx","0,1,2",1) -DEFINE_MOP(MOP_vshadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"shadd","0,1,2",1) -DEFINE_MOP(MOP_vshaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"shadd","0,1,2",1) -DEFINE_MOP(MOP_vuhadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uhadd","0,1,2",1) -DEFINE_MOP(MOP_vuhaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uhadd","0,1,2",1) -DEFINE_MOP(MOP_vsrhadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"srhadd","0,1,2",1) -DEFINE_MOP(MOP_vsrhaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"srhadd","0,1,2",1) -DEFINE_MOP(MOP_vurhadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"urhadd","0,1,2",1) -DEFINE_MOP(MOP_vurhaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"urhadd","0,1,2",1) -DEFINE_MOP(MOP_vaddhnuvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addhn","0,1,2",1) -DEFINE_MOP(MOP_vaddhn2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addhn2","0,1,2",1) -DEFINE_MOP(MOP_vraddhnuvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"raddhn","0,1,2",1) -DEFINE_MOP(MOP_vraddhn2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"raddhn2","0,1,2",1) -DEFINE_MOP(MOP_vdsqaddrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqadd","0,1,2",1) -DEFINE_MOP(MOP_vsqadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqadd","0,1,2",1) -DEFINE_MOP(MOP_vsqaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqadd","0,1,2",1) -DEFINE_MOP(MOP_vduqaddrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"uqadd","0,1,2",1) -DEFINE_MOP(MOP_vuqadduuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uqadd","0,1,2",1) -DEFINE_MOP(MOP_vuqaddvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqadd","0,1,2",1) -DEFINE_MOP(MOP_vdsuqaddrr,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"suqadd","0,1",1) -DEFINE_MOP(MOP_vsuqadduu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"suqadd","0,1",1) -DEFINE_MOP(MOP_vsuqaddvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"suqadd","0,1",1) -DEFINE_MOP(MOP_vdusqaddrr,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"usqadd","0,1",1) -DEFINE_MOP(MOP_vusqadduu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usqadd","0,1",1) -DEFINE_MOP(MOP_vusqaddvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usqadd","0,1",1) -DEFINE_MOP(MOP_vmlauuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mla","0,1,2",1) -DEFINE_MOP(MOP_vmlauuv,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mla","0,1,2",1) -DEFINE_MOP(MOP_vmlavvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mla","0,1,2",1) -DEFINE_MOP(MOP_vmlavvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mla","0,1,2",1) -DEFINE_MOP(MOP_vmlsuuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mls","0,1,2",1) -DEFINE_MOP(MOP_vmlsuuv,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mls","0,1,2",1) -DEFINE_MOP(MOP_vmlsvvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mls","0,1,2",1) -DEFINE_MOP(MOP_vmlsvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mls","0,1,2",1) -DEFINE_MOP(MOP_vsmlalvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlal","0,1,2",1) -DEFINE_MOP(MOP_vumlalvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlal","0,1,2",1) -DEFINE_MOP(MOP_vsmlalvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smlal","0,1,2",1) -DEFINE_MOP(MOP_vumlalvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umlal","0,1,2",1) -DEFINE_MOP(MOP_vsmlal2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smlal2","0,1,2",1) -DEFINE_MOP(MOP_vumlal2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umlal2","0,1,2",1) -DEFINE_MOP(MOP_vsmlslvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlsl","0,1,2",1) -DEFINE_MOP(MOP_vumlslvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlsl","0,1,2",1) -DEFINE_MOP(MOP_vsmlslvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smlsl","0,1,2",1) -DEFINE_MOP(MOP_vumlslvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umlsl","0,1,2",1) -DEFINE_MOP(MOP_vsmlsl2vvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlsl2","0,1,2",1) -DEFINE_MOP(MOP_vumlsl2vvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlsl2","0,1,2",1) -DEFINE_MOP(MOP_vsmlsl2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smlsl2","0,1,2",1) -DEFINE_MOP(MOP_vumlsl2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umlsl2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmulhuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqdmulhuuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqdmulhvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqdmulhvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqrdmulhuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqrdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqrdmulhuuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqrdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqrdmulhvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqrdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqrdmulhvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqrdmulh","0,1,2",1) -DEFINE_MOP(MOP_vsqdmulluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmull","0,1,2",1) -DEFINE_MOP(MOP_vsqdmullvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmull","0,1,2",1) -DEFINE_MOP(MOP_vsqdmullvuv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmull","0,1,2",1) -DEFINE_MOP(MOP_vsqdmull2vvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmull2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmull2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlalvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmlal","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlalvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmlal","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlal2vvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmlal2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlal2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmlal2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlslvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmlsl","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlslvuv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmlsl","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlsl2vvu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqdmlsl2","0,1,2",1) -DEFINE_MOP(MOP_vsqdmlsl2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqdmlsl2","0,1,2",1) -DEFINE_MOP(MOP_vshsubuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"shsub","0,1,2",1) -DEFINE_MOP(MOP_vshsubvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"shsub","0,1,2",1) -DEFINE_MOP(MOP_vuhsubuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uhsub","0,1,2",1) -DEFINE_MOP(MOP_vuhsubvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uhsub","0,1,2",1) -DEFINE_MOP(MOP_vsubhnuvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"subhn","0,1,2",1) -DEFINE_MOP(MOP_vsubhn2vvv,{&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"subhn2","0,1,2",1) -DEFINE_MOP(MOP_vrsubhnuvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rsubhn","0,1,2",1) -DEFINE_MOP(MOP_vrsubhn2vvv,{&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rsubhn2","0,1,2",1) -DEFINE_MOP(MOP_vdsqsubrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqsub","0,1,2",1) -DEFINE_MOP(MOP_vsqsubuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqsub","0,1,2",1) -DEFINE_MOP(MOP_vsqsubvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqsub","0,1,2",1) -DEFINE_MOP(MOP_vduqsubrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"uqsub","0,1,2",1) -DEFINE_MOP(MOP_vuqsubuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uqsub","0,1,2",1) -DEFINE_MOP(MOP_vuqsubvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqsub","0,1,2",1) -DEFINE_MOP(MOP_vsabauuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saba","0,1,2",1) -DEFINE_MOP(MOP_vsabavvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saba","0,1,2",1) -DEFINE_MOP(MOP_vuabauuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaba","0,1,2",1) -DEFINE_MOP(MOP_vuabavvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaba","0,1,2",1) -DEFINE_MOP(MOP_vsabalvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sabal","0,1,2",1) -DEFINE_MOP(MOP_vuabalvuu,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uabal","0,1,2",1) -DEFINE_MOP(MOP_vsabal2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sabal2","0,1,2",1) -DEFINE_MOP(MOP_vuabal2vvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uabal2","0,1,2",1) -DEFINE_MOP(MOP_vdsqabsrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqabs","0,1",1) -DEFINE_MOP(MOP_vsqabsuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqabs","0,1",1) -DEFINE_MOP(MOP_vsqabsvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqabs","0,1",1) -DEFINE_MOP(MOP_vursqrteuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ursqrte","0,1",1) -DEFINE_MOP(MOP_vursqrtevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ursqrte","0,1",1) -DEFINE_MOP(MOP_vhsaddlvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddlv","0,1",1) -DEFINE_MOP(MOP_vhsaddlvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddlv","0,1",1) -DEFINE_MOP(MOP_vwsaddlvru,{&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddlv","0,1",1) -DEFINE_MOP(MOP_vwsaddlvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddlv","0,1",1) -DEFINE_MOP(MOP_vdsaddlvrv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddlv","0,1",1) -DEFINE_MOP(MOP_vhuaddlvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddlv","0,1",1) -DEFINE_MOP(MOP_vhuaddlvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddlv","0,1",1) -DEFINE_MOP(MOP_vwuaddlvru,{&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddlv","0,1",1) -DEFINE_MOP(MOP_vwuaddlvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddlv","0,1",1) -DEFINE_MOP(MOP_vduaddlvrv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddlv","0,1",1) -DEFINE_MOP(MOP_vdsqshlrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vsqshluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vsqshlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vduqshlrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vuqshluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vuqshlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vdsqshlrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vsqshluui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vsqshlvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshl","0,1,2",1) -DEFINE_MOP(MOP_vduqshlrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vuqshluui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vuqshlvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqshl","0,1,2",1) -DEFINE_MOP(MOP_vdsqshlurri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshlu","0,1,2",1) -DEFINE_MOP(MOP_vsqshluuui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshlu","0,1,2",1) -DEFINE_MOP(MOP_vsqshluvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshlu","0,1,2",1) -DEFINE_MOP(MOP_vdsrshlurrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"srshl","0,1,2",1) -DEFINE_MOP(MOP_vsrshluuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"srshl","0,1,2",1) -DEFINE_MOP(MOP_vsrshluvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"srshl","0,1,2",1) -DEFINE_MOP(MOP_vdurshlurrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"urshl","0,1,2",1) -DEFINE_MOP(MOP_vurshluuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"urshl","0,1,2",1) -DEFINE_MOP(MOP_vurshluvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"urshl","0,1,2",1) -DEFINE_MOP(MOP_vdsqrshlrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqrshl","0,1,2",1) -DEFINE_MOP(MOP_vsqrshluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqrshl","0,1,2",1) -DEFINE_MOP(MOP_vsqrshlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqrshl","0,1,2",1) -DEFINE_MOP(MOP_vduqrshlrrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"uqrshl","0,1,2",1) -DEFINE_MOP(MOP_vuqrshluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uqrshl","0,1,2",1) -DEFINE_MOP(MOP_vuqrshlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqrshl","0,1,2",1) -DEFINE_MOP(MOP_vsshllvui,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshll","0,1,2",1) -DEFINE_MOP(MOP_vushllvui,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushll","0,1,2",1) -DEFINE_MOP(MOP_vsshll2vvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshll2","0,1,2",1) -DEFINE_MOP(MOP_vushll2vvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushll2","0,1,2",1) -DEFINE_MOP(MOP_vshllvui,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shll","0,1,2",1) -DEFINE_MOP(MOP_vshll2vvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shll2","0,1,2",1) -DEFINE_MOP(MOP_vdslirri,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sli","0,1,2",1) -DEFINE_MOP(MOP_vsliuui,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sli","0,1,2",1) -DEFINE_MOP(MOP_vslivvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sli","0,1,2",1) -DEFINE_MOP(MOP_vdsrshrrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srshr","0,1,2",1) -DEFINE_MOP(MOP_vsrshruui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srshr","0,1,2",1) -DEFINE_MOP(MOP_vsrshrvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srshr","0,1,2",1) -DEFINE_MOP(MOP_vdurshrrri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"urshr","0,1,2",1) -DEFINE_MOP(MOP_vurshruui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"urshr","0,1,2",1) -DEFINE_MOP(MOP_vurshrvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"urshr","0,1,2",1) -DEFINE_MOP(MOP_vdssrarri,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ssra","0,1,2",1) -DEFINE_MOP(MOP_vssrauui,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ssra","0,1,2",1) -DEFINE_MOP(MOP_vssravvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ssra","0,1,2",1) -DEFINE_MOP(MOP_vdusrarri,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"usra","0,1,2",1) -DEFINE_MOP(MOP_vusrauui,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"usra","0,1,2",1) -DEFINE_MOP(MOP_vusravvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"usra","0,1,2",1) -DEFINE_MOP(MOP_vdsrsrarri,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srsra","0,1,2",1) -DEFINE_MOP(MOP_vsrsrauui,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srsra","0,1,2",1) -DEFINE_MOP(MOP_vsrsravvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"srsra","0,1,2",1) -DEFINE_MOP(MOP_vdursrarri,{&OpndDesc::Reg64FDS,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ursra","0,1,2",1) -DEFINE_MOP(MOP_vursrauui,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ursra","0,1,2",1) -DEFINE_MOP(MOP_vursravvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ursra","0,1,2",1) -DEFINE_MOP(MOP_vshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shrn","0,1,2",1) -DEFINE_MOP(MOP_vshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shrn2","0,1,2",1) -DEFINE_MOP(MOP_vsqshrunuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshrun","0,1,2",1) -DEFINE_MOP(MOP_vsqshrun2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshrun2","0,1,2",1) -DEFINE_MOP(MOP_vsqshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshrn","0,1,2",1) -DEFINE_MOP(MOP_vuqshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqshrn","0,1,2",1) -DEFINE_MOP(MOP_vsqshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqshrn2","0,1,2",1) -DEFINE_MOP(MOP_vuqshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqshrn2","0,1,2",1) -DEFINE_MOP(MOP_vsqrshrunuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqrshrun","0,1,2",1) -DEFINE_MOP(MOP_vsqrshrun2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqrshrun2","0,1,2",1) -DEFINE_MOP(MOP_vsqrshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqrshrn","0,1,2",1) -DEFINE_MOP(MOP_vsqrshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sqrshrn2","0,1,2",1) -DEFINE_MOP(MOP_vuqrshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqrshrn","0,1,2",1) -DEFINE_MOP(MOP_vuqrshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"uqrshrn2","0,1,2",1) -DEFINE_MOP(MOP_vrshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"rshrn","0,1,2",1) -DEFINE_MOP(MOP_vrshrn2vvi,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"rshrn2","0,1,2",1) -DEFINE_MOP(MOP_vdsrirri,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sri","0,1,2",1) -DEFINE_MOP(MOP_vsriuui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sri","0,1,2",1) -DEFINE_MOP(MOP_vsrivvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sri","0,1,2",1) -DEFINE_MOP(MOP_vbsqnegrr,{&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) -DEFINE_MOP(MOP_vhsqnegrr,{&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) -DEFINE_MOP(MOP_vssqnegrr,{&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) -DEFINE_MOP(MOP_vdsqnegrr,{&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) -DEFINE_MOP(MOP_vsqneguu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) -DEFINE_MOP(MOP_vsqnegvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqneg","0,1",1) +DEFINE_MOP(MOP_vmovui, {&OpndDesc::Reg64VD, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "movi", "0,1", 1) +DEFINE_MOP(MOP_vmovvi, {&OpndDesc::Reg128VD, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "movi", "0,1", 1) +DEFINE_MOP(MOP_vmovuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vmovvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vmovlaneuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vmovlanevu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vmovlanevv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vmov2vv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISMOVE | ISVECTOR, kLtFpalu, "mov", "0,1", 1) +DEFINE_MOP(MOP_vwmovru, {&OpndDesc::Reg32ID, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umov", "0,1", 1) +DEFINE_MOP(MOP_vwmovrv, {&OpndDesc::Reg32ID, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umov", "0,1", 1) +DEFINE_MOP(MOP_vxmovrv, {&OpndDesc::Reg64ID, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umov", "0,1", 1) +DEFINE_MOP(MOP_vwsmovru, {&OpndDesc::Reg32ID, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smov", "0,1", 1) +DEFINE_MOP(MOP_vwsmovrv, {&OpndDesc::Reg32ID, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smov", "0,1", 1) +DEFINE_MOP(MOP_vwdupur, {&OpndDesc::Reg64VD, &OpndDesc::Reg32IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vwdupvr, {&OpndDesc::Reg128VD, &OpndDesc::Reg32IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vxdupur, {&OpndDesc::Reg64VD, &OpndDesc::Reg64IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vxdupvr, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vduprv, {&OpndDesc::Reg64FD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vbdupru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vbduprv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vhdupru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vhduprv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vsdupru, {&OpndDesc::Reg32FD, &OpndDesc::Reg64VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vsduprv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vdupuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vdupuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vdupvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vdupvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "dup", "0,1", 1) +DEFINE_MOP(MOP_vextuuui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ext", "0,1,2,3", 1) +DEFINE_MOP(MOP_vextvvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ext", "0,1,2,3", 1) +DEFINE_MOP(MOP_vsabdlvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "sabdl", "0,1,2", 1) +DEFINE_MOP(MOP_vuabdlvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "uabdl", "0,1,2", 1) +DEFINE_MOP(MOP_vsabdl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sabdl2", "0,1,2", 1) +DEFINE_MOP(MOP_vuabdl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uabdl2", "0,1,2", 1) +DEFINE_MOP(MOP_vspadaluu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "sadalp", "0,1", 1) +DEFINE_MOP(MOP_vspadalvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtAlu, "sadalp", "0,1", 1) +DEFINE_MOP(MOP_vupadaluu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "uadalp", "0,1", 1) +DEFINE_MOP(MOP_vupadalvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtAlu, "uadalp", "0,1", 1) +DEFINE_MOP(MOP_vsaddlpuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "saddlp", "0,1", 1) +DEFINE_MOP(MOP_vsaddlpvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtAlu, "saddlp", "0,1", 1) +DEFINE_MOP(MOP_vuaddlpuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtAlu, "uaddlp", "0,1", 1) +DEFINE_MOP(MOP_vuaddlpvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtAlu, "uaddlp", "0,1", 1) +DEFINE_MOP(MOP_vwinsur, {&OpndDesc::Reg64VDS, &OpndDesc::Reg32IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "ins", "0,1", 1) +DEFINE_MOP(MOP_vxinsur, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "ins", "0,1", 1) +DEFINE_MOP(MOP_vwinsvr, {&OpndDesc::Reg128VDS, &OpndDesc::Reg32IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "ins", "0,1", 1) +DEFINE_MOP(MOP_vxinsvr, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64IS}, ISVECTOR | SPINTRINSIC, kLtFpalu, "ins", "0,1", 1) +DEFINE_MOP(MOP_vrev16dd, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "rev16", "0,1", 1) +DEFINE_MOP(MOP_vrev32dd, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "rev32", "0,1", 1) +DEFINE_MOP(MOP_vrev64dd, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "rev64", "0,1", 1) +DEFINE_MOP(MOP_vrev16qq, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rev16", "0,1", 1) +DEFINE_MOP(MOP_vrev32qq, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rev32", "0,1", 1) +DEFINE_MOP(MOP_vrev64qq, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rev64", "0,1", 1) +DEFINE_MOP(MOP_vbaddvru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vhaddvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vsaddvru, {&OpndDesc::Reg32FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vbaddvrv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vhaddvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vsaddvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addv", "0,1", 1) +DEFINE_MOP(MOP_vdaddvrv, {&OpndDesc::Reg64FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addp", "0,1", 1) + +DEFINE_MOP(MOP_vzcmequu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmeq", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmgtuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmgt", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmgeuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmge", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmltuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmlt", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmleuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmle", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmeqvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmeq", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmgtvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmgt", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmgevv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmge", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmltvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmlt", "0,1,2", 1) +DEFINE_MOP(MOP_vzcmlevv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "cmle", "0,1,2", 1) +DEFINE_MOP(MOP_vcmequuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmeq", "0,1,2", 1) +DEFINE_MOP(MOP_vcmgeuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmge", "0,1,2", 1) +DEFINE_MOP(MOP_vcmgtuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmgt", "0,1,2", 1) +DEFINE_MOP(MOP_vcmhiuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmhi", "0,1,2", 1) +DEFINE_MOP(MOP_vcmhsuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmhs", "0,1,2", 1) +DEFINE_MOP(MOP_vcmeqvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmeq", "0,1,2", 1) +DEFINE_MOP(MOP_vcmgevvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmge", "0,1,2", 1) +DEFINE_MOP(MOP_vcmgtvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmgt", "0,1,2", 1) +DEFINE_MOP(MOP_vcmhivvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmhi", "0,1,2", 1) +DEFINE_MOP(MOP_vcmhsvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmhs", "0,1,2", 1) +DEFINE_MOP(MOP_vbsluuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "bsl", "0,1,2", 1) +DEFINE_MOP(MOP_vbslvvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "bsl", "0,1,2", 1) + +DEFINE_MOP(MOP_vshlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sshl", "0,1,2", 1) +DEFINE_MOP(MOP_vshluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sshl", "0,1,2", 1) +DEFINE_MOP(MOP_vshlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sshl", "0,1,2", 1) +DEFINE_MOP(MOP_vushlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "ushl", "0,1,2", 1) +DEFINE_MOP(MOP_vushluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "ushl", "0,1,2", 1) +DEFINE_MOP(MOP_vushlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "ushl", "0,1,2", 1) + +DEFINE_MOP(MOP_vushlrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shl", "0,1,2", 1) +DEFINE_MOP(MOP_vushluui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shl", "0,1,2", 1) +DEFINE_MOP(MOP_vushlvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shl", "0,1,2", 1) +DEFINE_MOP(MOP_vushrrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ushr", "0,1,2", 1) +DEFINE_MOP(MOP_vushruui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ushr", "0,1,2", 1) +DEFINE_MOP(MOP_vushrvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ushr", "0,1,2", 1) + +DEFINE_MOP(MOP_vxtnuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "xtn", "0,1", 1) +DEFINE_MOP(MOP_vsxtlvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sxtl", "0,1", 1) +DEFINE_MOP(MOP_vuxtlvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uxtl", "0,1", 1) +DEFINE_MOP(MOP_vxtn2uv, {&OpndDesc::Reg64VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "xtn2", "0,1", 1) +DEFINE_MOP(MOP_vsxtl2vv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sxtl2", "0,1", 1) +DEFINE_MOP(MOP_vuxtl2vv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uxtl2", "0,1", 1) + +DEFINE_MOP(MOP_vshruui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sshr", "0,1,2", 1) +DEFINE_MOP(MOP_vshrvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sshr", "0,1,2", 1) + +DEFINE_MOP(MOP_vsmaddvvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smlal", "0,1,2", 1) +DEFINE_MOP(MOP_vumaddvvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umlal", "0,1,2", 1) +DEFINE_MOP(MOP_vsmullvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smull", "0,1,2", 1) +DEFINE_MOP(MOP_vumullvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umull", "0,1,2", 1) +DEFINE_MOP(MOP_vsmullvuv, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smull", "0,1,2", 1) +DEFINE_MOP(MOP_vumullvuv, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umull", "0,1,2", 1) +DEFINE_MOP(MOP_vsmullvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smull", "0,1,2", 1) +DEFINE_MOP(MOP_vumullvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umull", "0,1,2", 1) +DEFINE_MOP(MOP_vsmull2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smull2", "0,1,2", 1) +DEFINE_MOP(MOP_vumull2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umull2", "0,1,2", 1) +DEFINE_MOP(MOP_vabsrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "abs", "0,1", 1) +DEFINE_MOP(MOP_vabsuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "abs", "0,1", 1) +DEFINE_MOP(MOP_vabsvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "abs", "0,1", 1) +DEFINE_MOP(MOP_vaddrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "add", "0,1,2", 1) +DEFINE_MOP(MOP_vadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "add", "0,1,2", 1) +DEFINE_MOP(MOP_vsaddlvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "saddl", "0,1,2", 1) +DEFINE_MOP(MOP_vuaddlvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uaddl", "0,1,2", 1) +DEFINE_MOP(MOP_vsaddl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saddl2", "0,1,2", 1) +DEFINE_MOP(MOP_vuaddl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaddl2", "0,1,2", 1) +DEFINE_MOP(MOP_vsaddwvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "saddw", "0,1,2", 1) +DEFINE_MOP(MOP_vuaddwvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uaddw", "0,1,2", 1) +DEFINE_MOP(MOP_vsaddw2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saddw2", "0,1,2", 1) +DEFINE_MOP(MOP_vuaddw2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaddw2", "0,1,2", 1) +DEFINE_MOP(MOP_vaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "add", "0,1,2", 1) +DEFINE_MOP(MOP_vmuluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mul", "0,1,2", 1) +DEFINE_MOP(MOP_vmulvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mul", "0,1,2", 1) +DEFINE_MOP(MOP_vsubrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sub", "0,1,2", 1) +DEFINE_MOP(MOP_vsubuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sub", "0,1,2", 1) +DEFINE_MOP(MOP_vsubvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sub", "0,1,2", 1) +DEFINE_MOP(MOP_vandrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "and", "0,1,2", 1) +DEFINE_MOP(MOP_vanduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "and", "0,1,2", 1) +DEFINE_MOP(MOP_vandvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "and", "0,1,2", 1) +DEFINE_MOP(MOP_voruuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "orr", "0,1,2", 1) +DEFINE_MOP(MOP_vorvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "orr", "0,1,2", 1) +DEFINE_MOP(MOP_vxoruuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "eor", "0,1,2", 1) +DEFINE_MOP(MOP_vxorvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "eor", "0,1,2", 1) +DEFINE_MOP(MOP_vornuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "orn", "0,1,2", 1) +DEFINE_MOP(MOP_vornvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "orn", "0,1,2", 1) +DEFINE_MOP(MOP_vnotuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "not", "0,1", 1) +DEFINE_MOP(MOP_vnotvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "not", "0,1", 1) +DEFINE_MOP(MOP_vnegrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "neg", "0,1", 1) +DEFINE_MOP(MOP_vneguu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "neg", "0,1", 1) +DEFINE_MOP(MOP_vnegvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "neg", "0,1", 1) +DEFINE_MOP(MOP_vssublvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "ssubl", "0,1,2", 1) +DEFINE_MOP(MOP_vusublvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "usubl", "0,1,2", 1) +DEFINE_MOP(MOP_vssubl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "ssubl2", "0,1,2", 1) +DEFINE_MOP(MOP_vusubl2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "usubl2", "0,1,2", 1) +DEFINE_MOP(MOP_vssubwvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "ssubw", "0,1,2", 1) +DEFINE_MOP(MOP_vusubwvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "usubw", "0,1,2", 1) +DEFINE_MOP(MOP_vssubw2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "ssubw2", "0,1,2", 1) +DEFINE_MOP(MOP_vusubw2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "usubw2", "0,1,2", 1) +DEFINE_MOP(MOP_vzip1uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "zip1", "0,1,2", 1) +DEFINE_MOP(MOP_vzip1vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "zip1", "0,1,2", 1) +DEFINE_MOP(MOP_vzip2uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "zip2", "0,1,2", 1) +DEFINE_MOP(MOP_vzip2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "zip2", "0,1,2", 1) +DEFINE_MOP(MOP_vbsqxtnrr, {&OpndDesc::Reg8FD, &OpndDesc::Reg16FS}, ISVECTOR, kLtFpalu, "sqxtn", "0,1", 1) +DEFINE_MOP(MOP_vhsqxtnrr, {&OpndDesc::Reg16FD, &OpndDesc::Reg32FS}, ISVECTOR, kLtFpalu, "sqxtn", "0,1", 1) +DEFINE_MOP(MOP_vssqxtnrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqxtn", "0,1", 1) +DEFINE_MOP(MOP_vsqxtnuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqxtn", "0,1", 1) +DEFINE_MOP(MOP_vsqxtn2vv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqxtn2", "0,1", 1) +DEFINE_MOP(MOP_vbsqxtunrr, {&OpndDesc::Reg8FD, &OpndDesc::Reg16FS}, ISVECTOR, kLtFpalu, "sqxtun", "0,1", 1) +DEFINE_MOP(MOP_vhsqxtunrr, {&OpndDesc::Reg16FD, &OpndDesc::Reg32FS}, ISVECTOR, kLtFpalu, "sqxtun", "0,1", 1) +DEFINE_MOP(MOP_vssqxtunrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqxtun", "0,1", 1) +DEFINE_MOP(MOP_vsqxtunuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqxtun", "0,1", 1) +DEFINE_MOP(MOP_vsqxtun2vv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqxtun2", "0,1", 1) +DEFINE_MOP(MOP_vbuqxtnrr, {&OpndDesc::Reg8FD, &OpndDesc::Reg16FS}, ISVECTOR, kLtFpalu, "uqxtn", "0,1", 1) +DEFINE_MOP(MOP_vhuqxtnrr, {&OpndDesc::Reg16FD, &OpndDesc::Reg32FS}, ISVECTOR, kLtFpalu, "uqxtn", "0,1", 1) +DEFINE_MOP(MOP_vsuqxtnrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "uqxtn", "0,1", 1) +DEFINE_MOP(MOP_vuqxtnuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqxtn", "0,1", 1) +DEFINE_MOP(MOP_vuqxtn2vv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqxtn2", "0,1", 1) +DEFINE_MOP(MOP_vsabduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sabd", "0,1,2", 1) +DEFINE_MOP(MOP_vsabdvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sabd", "0,1,2", 1) +DEFINE_MOP(MOP_vuabduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uabd", "0,1,2", 1) +DEFINE_MOP(MOP_vuabdvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uabd", "0,1,2", 1) +DEFINE_MOP(MOP_vsminuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smin", "0,1,2", 1) +DEFINE_MOP(MOP_vsminvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smin", "0,1,2", 1) +DEFINE_MOP(MOP_vuminuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umin", "0,1,2", 1) +DEFINE_MOP(MOP_vuminvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umin", "0,1,2", 1) +DEFINE_MOP(MOP_vsmaxuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smax", "0,1,2", 1) +DEFINE_MOP(MOP_vsmaxvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smax", "0,1,2", 1) +DEFINE_MOP(MOP_vumaxuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umax", "0,1,2", 1) +DEFINE_MOP(MOP_vumaxvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umax", "0,1,2", 1) +DEFINE_MOP(MOP_vurecpeuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "urecpe", "0,1", 1) +DEFINE_MOP(MOP_vurecpevv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "urecpe", "0,1", 1) +DEFINE_MOP(MOP_vaddprv, {&OpndDesc::Reg64FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addp", "0,1", 1) +DEFINE_MOP(MOP_vaddpuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "addp", "0,1,2", 1) +DEFINE_MOP(MOP_vaddpvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addp", "0,1,2", 1) +DEFINE_MOP(MOP_vsmaxpuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smaxp", "0,1,2", 1) +DEFINE_MOP(MOP_vsmaxpvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smaxp", "0,1,2", 1) +DEFINE_MOP(MOP_vumaxpuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umaxp", "0,1,2", 1) +DEFINE_MOP(MOP_vumaxpvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umaxp", "0,1,2", 1) +DEFINE_MOP(MOP_vsminpuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sminp", "0,1,2", 1) +DEFINE_MOP(MOP_vsminpvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sminp", "0,1,2", 1) +DEFINE_MOP(MOP_vuminpuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uminp", "0,1,2", 1) +DEFINE_MOP(MOP_vuminpvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uminp", "0,1,2", 1) +DEFINE_MOP(MOP_vbsmaxvru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smaxv", "0,1", 1) +DEFINE_MOP(MOP_vhsmaxvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smaxv", "0,1", 1) +DEFINE_MOP(MOP_vbsmaxvrv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smaxv", "0,1", 1) +DEFINE_MOP(MOP_vhsmaxvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smaxv", "0,1", 1) +DEFINE_MOP(MOP_vssmaxvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smaxv", "0,1", 1) +DEFINE_MOP(MOP_vbsminvru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sminv", "0,1", 1) +DEFINE_MOP(MOP_vhsminvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sminv", "0,1", 1) +DEFINE_MOP(MOP_vbsminvrv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sminv", "0,1", 1) +DEFINE_MOP(MOP_vhsminvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sminv", "0,1", 1) +DEFINE_MOP(MOP_vssminvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sminv", "0,1", 1) +DEFINE_MOP(MOP_vbumaxvru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umaxv", "0,1", 1) +DEFINE_MOP(MOP_vhumaxvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umaxv", "0,1", 1) +DEFINE_MOP(MOP_vbumaxvrv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umaxv", "0,1", 1) +DEFINE_MOP(MOP_vhumaxvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umaxv", "0,1", 1) +DEFINE_MOP(MOP_vsumaxvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umaxv", "0,1", 1) +DEFINE_MOP(MOP_vbuminvru, {&OpndDesc::Reg8FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uminv", "0,1", 1) +DEFINE_MOP(MOP_vhuminvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uminv", "0,1", 1) +DEFINE_MOP(MOP_vbuminvrv, {&OpndDesc::Reg8FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uminv", "0,1", 1) +DEFINE_MOP(MOP_vhuminvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uminv", "0,1", 1) +DEFINE_MOP(MOP_vsuminvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uminv", "0,1", 1) +DEFINE_MOP(MOP_vtstrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "cmtst", "0,1,2", 1) +DEFINE_MOP(MOP_vtstuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cmtst", "0,1,2", 1) +DEFINE_MOP(MOP_vtstvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cmtst", "0,1,2", 1) +DEFINE_MOP(MOP_vmvnuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mvn", "0,1", 1) +DEFINE_MOP(MOP_vmvnvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mvn", "0,1", 1) +DEFINE_MOP(MOP_vclsuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cls", "0,1", 1) +DEFINE_MOP(MOP_vclsvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cls", "0,1", 1) +DEFINE_MOP(MOP_vclzuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "clz", "0,1", 1) +DEFINE_MOP(MOP_vclzvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "clz", "0,1", 1) +DEFINE_MOP(MOP_vcntuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "cnt", "0,1", 1) +DEFINE_MOP(MOP_vcntvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "cnt", "0,1", 1) +DEFINE_MOP(MOP_vbicuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "bic", "0,1,2", 1) +DEFINE_MOP(MOP_vbicvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "bic", "0,1,2", 1) +DEFINE_MOP(MOP_vrbituu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "rbit", "0,1", 1) +DEFINE_MOP(MOP_vrbitvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rbit", "0,1", 1) +DEFINE_MOP(MOP_vuzp1uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uzp1", "0,1,2", 1) +DEFINE_MOP(MOP_vuzp1vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uzp1", "0,1,2", 1) +DEFINE_MOP(MOP_vuzp2uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uzp2", "0,1,2", 1) +DEFINE_MOP(MOP_vuzp2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uzp2", "0,1,2", 1) +DEFINE_MOP(MOP_vtrn1uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "trn1", "0,1,2", 1) +DEFINE_MOP(MOP_vtrn1vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "trn1", "0,1,2", 1) +DEFINE_MOP(MOP_vtrn2uuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "trn2", "0,1,2", 1) +DEFINE_MOP(MOP_vtrn2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "trn2", "0,1,2", 1) +DEFINE_MOP(MOP_vld1ub, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1vb, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1uh, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1vh, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1uw, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1vw, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1ud, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1vd, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1", "0,1", 1) +DEFINE_MOP(MOP_vld1rub, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1rvb, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1ruh, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1rvh, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1ruw, {&OpndDesc::Reg64VD, &OpndDesc::Mem64S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1rvw, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vld1rvd, {&OpndDesc::Reg128VD, &OpndDesc::Mem128S}, ISLOAD | ISVECTOR, kLtFpalu, "ld1r", "0,1", 1) +DEFINE_MOP(MOP_vst1ub, {&OpndDesc::Reg64VS, &OpndDesc::Mem64D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1vb, {&OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1uh, {&OpndDesc::Reg64VS, &OpndDesc::Mem64D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1vh, {&OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1uw, {&OpndDesc::Reg64VS, &OpndDesc::Mem64D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1vw, {&OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1ud, {&OpndDesc::Reg64VS, &OpndDesc::Mem64D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vst1vd, {&OpndDesc::Reg128VS, &OpndDesc::Mem128D}, ISSTORE | ISVECTOR, kLtFpalu, "st1", "0,1", 1) +DEFINE_MOP(MOP_vtbluvu, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "tbl", "0,1,2", 1) +DEFINE_MOP(MOP_vtblvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "tbl", "0,1,2", 1) +DEFINE_MOP(MOP_vtbxuvu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "tbx", "0,1,2", 1) +DEFINE_MOP(MOP_vtbxvvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "tbx", "0,1,2", 1) +DEFINE_MOP(MOP_vshadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "shadd", "0,1,2", 1) +DEFINE_MOP(MOP_vshaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "shadd", "0,1,2", 1) +DEFINE_MOP(MOP_vuhadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vuhaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vsrhadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "srhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vsrhaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "srhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vurhadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "urhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vurhaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "urhadd", "0,1,2", 1) +DEFINE_MOP(MOP_vaddhnuvv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addhn", "0,1,2", 1) +DEFINE_MOP(MOP_vaddhn2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "addhn2", "0,1,2", 1) +DEFINE_MOP(MOP_vraddhnuvv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "raddhn", "0,1,2", 1) +DEFINE_MOP(MOP_vraddhn2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "raddhn2", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqaddrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vsqadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vsqaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vduqaddrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "uqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vuqadduuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vuqaddvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqadd", "0,1,2", 1) +DEFINE_MOP(MOP_vdsuqaddrr, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "suqadd", "0,1", 1) +DEFINE_MOP(MOP_vsuqadduu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "suqadd", "0,1", 1) +DEFINE_MOP(MOP_vsuqaddvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "suqadd", "0,1", 1) +DEFINE_MOP(MOP_vdusqaddrr, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "usqadd", "0,1", 1) +DEFINE_MOP(MOP_vusqadduu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "usqadd", "0,1", 1) +DEFINE_MOP(MOP_vusqaddvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "usqadd", "0,1", 1) +DEFINE_MOP(MOP_vmlauuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mla", "0,1,2", 1) +DEFINE_MOP(MOP_vmlauuv, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mla", "0,1,2", 1) +DEFINE_MOP(MOP_vmlavvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mla", "0,1,2", 1) +DEFINE_MOP(MOP_vmlavvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mla", "0,1,2", 1) +DEFINE_MOP(MOP_vmlsuuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mls", "0,1,2", 1) +DEFINE_MOP(MOP_vmlsuuv, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mls", "0,1,2", 1) +DEFINE_MOP(MOP_vmlsvvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "mls", "0,1,2", 1) +DEFINE_MOP(MOP_vmlsvvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "mls", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlalvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smlal", "0,1,2", 1) +DEFINE_MOP(MOP_vumlalvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umlal", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlalvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smlal", "0,1,2", 1) +DEFINE_MOP(MOP_vumlalvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umlal", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlal2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smlal2", "0,1,2", 1) +DEFINE_MOP(MOP_vumlal2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umlal2", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlslvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vumlslvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlslvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vumlslvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlsl2vvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "smlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vumlsl2vvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "umlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vsmlsl2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "smlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vumlsl2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "umlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmulhuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmulhuuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmulhvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmulhvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrdmulhuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqrdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrdmulhuuv, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqrdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrdmulhvvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqrdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrdmulhvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqrdmulh", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmulluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmull", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmullvuu, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmull", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmullvuv, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmull", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmull2vvu, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmull2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmull2vvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmull2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlalvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmlal", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlalvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmlal", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlal2vvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmlal2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlal2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmlal2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlslvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlslvuv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmlsl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlsl2vvu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqdmlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqdmlsl2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqdmlsl2", "0,1,2", 1) +DEFINE_MOP(MOP_vshsubuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "shsub", "0,1,2", 1) +DEFINE_MOP(MOP_vshsubvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "shsub", "0,1,2", 1) +DEFINE_MOP(MOP_vuhsubuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uhsub", "0,1,2", 1) +DEFINE_MOP(MOP_vuhsubvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uhsub", "0,1,2", 1) +DEFINE_MOP(MOP_vsubhnuvv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "subhn", "0,1,2", 1) +DEFINE_MOP(MOP_vsubhn2vvv, {&OpndDesc::Reg64VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "subhn2", "0,1,2", 1) +DEFINE_MOP(MOP_vrsubhnuvv, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rsubhn", "0,1,2", 1) +DEFINE_MOP(MOP_vrsubhn2vvv, {&OpndDesc::Reg64VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "rsubhn2", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqsubrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vsqsubuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vsqsubvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vduqsubrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "uqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vuqsubuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vuqsubvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqsub", "0,1,2", 1) +DEFINE_MOP(MOP_vsabauuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "saba", "0,1,2", 1) +DEFINE_MOP(MOP_vsabavvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saba", "0,1,2", 1) +DEFINE_MOP(MOP_vuabauuu, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uaba", "0,1,2", 1) +DEFINE_MOP(MOP_vuabavvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaba", "0,1,2", 1) +DEFINE_MOP(MOP_vsabalvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sabal", "0,1,2", 1) +DEFINE_MOP(MOP_vuabalvuu, {&OpndDesc::Reg128VDS, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uabal", "0,1,2", 1) +DEFINE_MOP(MOP_vsabal2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sabal2", "0,1,2", 1) +DEFINE_MOP(MOP_vuabal2vvv, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uabal2", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqabsrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqabs", "0,1", 1) +DEFINE_MOP(MOP_vsqabsuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqabs", "0,1", 1) +DEFINE_MOP(MOP_vsqabsvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqabs", "0,1", 1) +DEFINE_MOP(MOP_vursqrteuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "ursqrte", "0,1", 1) +DEFINE_MOP(MOP_vursqrtevv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "ursqrte", "0,1", 1) +DEFINE_MOP(MOP_vhsaddlvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "saddlv", "0,1", 1) +DEFINE_MOP(MOP_vhsaddlvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saddlv", "0,1", 1) +DEFINE_MOP(MOP_vwsaddlvru, {&OpndDesc::Reg32FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "saddlv", "0,1", 1) +DEFINE_MOP(MOP_vwsaddlvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saddlv", "0,1", 1) +DEFINE_MOP(MOP_vdsaddlvrv, {&OpndDesc::Reg64FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "saddlv", "0,1", 1) +DEFINE_MOP(MOP_vhuaddlvru, {&OpndDesc::Reg16FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uaddlv", "0,1", 1) +DEFINE_MOP(MOP_vhuaddlvrv, {&OpndDesc::Reg16FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaddlv", "0,1", 1) +DEFINE_MOP(MOP_vwuaddlvru, {&OpndDesc::Reg32FD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uaddlv", "0,1", 1) +DEFINE_MOP(MOP_vwuaddlvrv, {&OpndDesc::Reg32FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaddlv", "0,1", 1) +DEFINE_MOP(MOP_vduaddlvrv, {&OpndDesc::Reg64FD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uaddlv", "0,1", 1) +DEFINE_MOP(MOP_vdsqshlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vduqshlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqshlrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshluui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshlvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vduqshlrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshluui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshlvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqshl", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqshlurri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshlu", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshluuui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshlu", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshluvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshlu", "0,1,2", 1) +DEFINE_MOP(MOP_vdsrshlurrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "srshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsrshluuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "srshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsrshluvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "srshl", "0,1,2", 1) +DEFINE_MOP(MOP_vdurshlurrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "urshl", "0,1,2", 1) +DEFINE_MOP(MOP_vurshluuuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "urshl", "0,1,2", 1) +DEFINE_MOP(MOP_vurshluvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "urshl", "0,1,2", 1) +DEFINE_MOP(MOP_vdsqrshlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vduqrshlrrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "uqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqrshluuu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "uqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vuqrshlvvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "uqrshl", "0,1,2", 1) +DEFINE_MOP(MOP_vsshllvui, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sshll", "0,1,2", 1) +DEFINE_MOP(MOP_vushllvui, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ushll", "0,1,2", 1) +DEFINE_MOP(MOP_vsshll2vvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sshll2", "0,1,2", 1) +DEFINE_MOP(MOP_vushll2vvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ushll2", "0,1,2", 1) +DEFINE_MOP(MOP_vshllvui, {&OpndDesc::Reg128VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shll", "0,1,2", 1) +DEFINE_MOP(MOP_vshll2vvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shll2", "0,1,2", 1) +DEFINE_MOP(MOP_vdslirri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sli", "0,1,2", 1) +DEFINE_MOP(MOP_vsliuui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sli", "0,1,2", 1) +DEFINE_MOP(MOP_vslivvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sli", "0,1,2", 1) +DEFINE_MOP(MOP_vdsrshrrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srshr", "0,1,2", 1) +DEFINE_MOP(MOP_vsrshruui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srshr", "0,1,2", 1) +DEFINE_MOP(MOP_vsrshrvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srshr", "0,1,2", 1) +DEFINE_MOP(MOP_vdurshrrri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "urshr", "0,1,2", 1) +DEFINE_MOP(MOP_vurshruui, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "urshr", "0,1,2", 1) +DEFINE_MOP(MOP_vurshrvvi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "urshr", "0,1,2", 1) +DEFINE_MOP(MOP_vdssrarri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ssra", "0,1,2", 1) +DEFINE_MOP(MOP_vssrauui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ssra", "0,1,2", 1) +DEFINE_MOP(MOP_vssravvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ssra", "0,1,2", 1) +DEFINE_MOP(MOP_vdusrarri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "usra", "0,1,2", 1) +DEFINE_MOP(MOP_vusrauui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "usra", "0,1,2", 1) +DEFINE_MOP(MOP_vusravvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "usra", "0,1,2", 1) +DEFINE_MOP(MOP_vdsrsrarri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srsra", "0,1,2", 1) +DEFINE_MOP(MOP_vsrsrauui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srsra", "0,1,2", 1) +DEFINE_MOP(MOP_vsrsravvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "srsra", "0,1,2", 1) +DEFINE_MOP(MOP_vdursrarri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ursra", "0,1,2", 1) +DEFINE_MOP(MOP_vursrauui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ursra", "0,1,2", 1) +DEFINE_MOP(MOP_vursravvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "ursra", "0,1,2", 1) +DEFINE_MOP(MOP_vshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shrn", "0,1,2", 1) +DEFINE_MOP(MOP_vshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "shrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshrunuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshrun", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshrun2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshrun2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshrn", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqshrn", "0,1,2", 1) +DEFINE_MOP(MOP_vsqshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqshrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vuqshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqshrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshrunuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqrshrun", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshrun2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqrshrun2", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqrshrn", "0,1,2", 1) +DEFINE_MOP(MOP_vsqrshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sqrshrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vuqrshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqrshrn", "0,1,2", 1) +DEFINE_MOP(MOP_vuqrshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "uqrshrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vrshrnuvi, {&OpndDesc::Reg64VD, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "rshrn", "0,1,2", 1) +DEFINE_MOP(MOP_vrshrn2vvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "rshrn2", "0,1,2", 1) +DEFINE_MOP(MOP_vdsrirri, {&OpndDesc::Reg64FDS, &OpndDesc::Reg64FS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sri", "0,1,2", 1) +DEFINE_MOP(MOP_vsriuui, {&OpndDesc::Reg64VDS, &OpndDesc::Reg64VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sri", "0,1,2", 1) +DEFINE_MOP(MOP_vsrivvi, {&OpndDesc::Reg128VDS, &OpndDesc::Reg128VS, &OpndDesc::Imm8}, ISVECTOR, kLtFpalu, "sri", "0,1,2", 1) +DEFINE_MOP(MOP_vbsqnegrr, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) +DEFINE_MOP(MOP_vhsqnegrr, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) +DEFINE_MOP(MOP_vssqnegrr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) +DEFINE_MOP(MOP_vdsqnegrr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) +DEFINE_MOP(MOP_vsqneguu, {&OpndDesc::Reg64VD, &OpndDesc::Reg64VS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) +DEFINE_MOP(MOP_vsqnegvv, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VS}, ISVECTOR, kLtFpalu, "sqneg", "0,1", 1) /* * MOP_clinit * will be emit to four instructions in a row: * adrp xd, :got:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B - * ldr xd, [xd,#:got_lo12:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] - * ldr xd, [xd,#112] + * ldr xd, [xd, #:got_lo12:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * ldr xd, [xd, #112] * ldr wzr, [xd] */ -DEFINE_MOP(MOP_clinit, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_clinit","0,1",4) +DEFINE_MOP(MOP_clinit, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISATOMIC | CANTHROW, kLtClinit, "intrinsic_clinit", "0,1", 4) /* * MOP_counter * will be emit to five instructions in a row: * adrp x1, :got:__profile_table + idx - * ldr w17, [x1,#:got_lo12:__profile_table] + * ldr w17, [x1, #:got_lo12:__profile_table] * add w17, w17, #1 - * str w17,[x1,,#:got_lo12:__profile_table] + * str w17, [x1, , #:got_lo12:__profile_table] */ -DEFINE_MOP(MOP_counter, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_counter","0,1", 4) +DEFINE_MOP(MOP_counter, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISATOMIC | CANTHROW, kLtClinit, "intrinsic_counter", "0,1", 4) /* * MOP_c_counter * will be emit to seven instructions in a row: * str x30, [sp , #-16]! * adrp x16, :got:__profile_table - * ldr x16, [x16,#:got_lo12:__profile_table] + * ldr x16, [x16, #:got_lo12:__profile_table] * ldr x30, [x16 + offset] * add x30, x30, #1 * str w30, [x16 + offset] * ldr x30, [sp], #16 */ -DEFINE_MOP(MOP_c_counter, {&OpndDesc::LiteralSrc,&OpndDesc::Imm64, &OpndDesc::Reg64ID},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_counter","0,1", 8) +DEFINE_MOP(MOP_c_counter, {&OpndDesc::LiteralSrc, &OpndDesc::Imm64, &OpndDesc::Reg64ID}, ISATOMIC | CANTHROW, kLtClinit, "intrinsic_counter", "0,1", 8) /* * will be emit to two instrunctions in a row: * ldr wd, [xs] // xd and xs should be differenct register * ldr wd, [xd] */ -DEFINE_MOP(MOP_lazy_ldr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC,kLtClinitTail,"intrinsic_lazyload","0,1",2) +DEFINE_MOP(MOP_lazy_ldr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISATOMIC | CANTHROW | SPINTRINSIC, kLtClinitTail, "intrinsic_lazyload", "0,1", 2) /* * will be emit to three instrunctions in a row: * adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset - * ldr xd, [xd,#:got_lo12:__staticDecoupleValueOffset$$xx+offset] + * ldr xd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xx+offset] * ldr xzr, [xd] */ -DEFINE_MOP(MOP_lazy_ldr_static, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_lazyloadstatic","0,1",3) +DEFINE_MOP(MOP_lazy_ldr_static, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISATOMIC | CANTHROW, kLtAdrpLdr, "intrinsic_lazyloadstatic", "0,1", 3) /* A pseudo instruction followed MOP_lazy_ldr, to make sure xs and xd be allocated to different physical registers. */ -DEFINE_MOP(MOP_lazy_tail, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},0,kLtUndef,"pseudo_lazy_tail","",0) +DEFINE_MOP(MOP_lazy_tail, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, 0, kLtUndef, "pseudo_lazy_tail", "", 0) /* will be emit to two instructions in a row: * adrp xd, _PTR__cinf_Ljava_2Flang_2FSystem_3B * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] * MOP_adrp_ldr */ -DEFINE_MOP(MOP_adrp_ldr, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_adrpldr","0,1",2) +DEFINE_MOP(MOP_adrp_ldr, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISATOMIC | CANTHROW, kLtAdrpLdr, "intrinsic_adrpldr", "0,1", 2) /* will be emit to two instructions in a row: * adrp xd, label * add xd, xd, #:lo12:label */ -DEFINE_MOP(MOP_adrp_label, {&OpndDesc::Reg64ID, &OpndDesc::Imm64},0,kLtAlu,"intrinsic_adrplabel","0,1", 2) +DEFINE_MOP(MOP_adrp_label, {&OpndDesc::Reg64ID, &OpndDesc::Imm64}, 0, kLtAlu, "intrinsic_adrplabel", "0,1", 2) /* * will be emit to three instrunctions in a row: * adrp xd, :got:__arrayClassCacheTable$$xxx+offset - * ldr xd, [xd,#:got_lo12:__arrayClassCacheTable$$xx+offset] + * ldr xd, [xd, #:got_lo12:__arrayClassCacheTable$$xx+offset] * ldr xzr, [xd] */ -DEFINE_MOP(MOP_arrayclass_cache_ldr, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_loadarrayclass","0,1",3) +DEFINE_MOP(MOP_arrayclass_cache_ldr, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc}, ISATOMIC | CANTHROW, kLtAdrpLdr, "intrinsic_loadarrayclass", "0,1", 3) /* - * ldr x17, [xs,#112] + * ldr x17, [xs, #112] * ldr wzr, [x17] */ -DEFINE_MOP(MOP_clinit_tail, {&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC,kLtClinitTail,"intrinsic_clinit_tail","0",2) +DEFINE_MOP(MOP_clinit_tail, {&OpndDesc::Reg64IS}, ISATOMIC | CANTHROW | SPINTRINSIC, kLtClinitTail, "intrinsic_clinit_tail", "0", 2) /* * intrinsic Unsafe.getAndAddInt @@ -1367,7 +1382,7 @@ DEFINE_MOP(MOP_clinit_tail, {&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC,k * stlxr ws, wt, [xt] * cbnz ws, label */ -DEFINE_MOP(MOP_get_and_addI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_int","",5) +DEFINE_MOP(MOP_get_and_addI, {&OpndDesc::Reg32ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_get_add_int", "", 5) /* * intrinsic Unsafe.getAndAddLong * intrinsic_get_add_long x0, xt, xs, ws, x1, x2, x3, ws, label @@ -1378,7 +1393,7 @@ DEFINE_MOP(MOP_get_and_addI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::R * stlxr ws, x2, [xt] * cbnz ws, label */ -DEFINE_MOP(MOP_get_and_addL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_long","",5) +DEFINE_MOP(MOP_get_and_addL, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_get_add_long", "", 5) /* * intrinsic Unsafe.getAndSetInt @@ -1389,7 +1404,7 @@ DEFINE_MOP(MOP_get_and_addL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::R * stlxr w2, w3, [xt] * cbnz w2, label */ -DEFINE_MOP(MOP_get_and_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_int","0,1,2,3,4",4) +DEFINE_MOP(MOP_get_and_setI, {&OpndDesc::Reg32ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_get_set_int", "0,1,2,3,4", 4) /* * intrinsic Unsafe.getAndSetLong * intrinsic_get_set_long x0, x1, x2, x3, label @@ -1399,7 +1414,7 @@ DEFINE_MOP(MOP_get_and_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::R * stlxr w2, x3, [xt] * cbnz w2, label */ -DEFINE_MOP(MOP_get_and_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_long","0,1,2,3,4",4) +DEFINE_MOP(MOP_get_and_setL, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_get_set_long", "0,1,2,3,4", 4) /* * intrinsic Unsafe.compareAndSwapInt @@ -1414,7 +1429,7 @@ DEFINE_MOP(MOP_get_and_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::R * label2: * cset x0, eq */ -DEFINE_MOP(MOP_compare_and_swapI, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_int","0,1,2,3,4,5,6",7) +DEFINE_MOP(MOP_compare_and_swapI, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::AddressName, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_compare_swap_int", "0,1,2,3,4,5,6", 7) /* * intrinsic Unsafe.compareAndSwapLong * intrinsic_compare_swap_long x0, xt, xs, x1, x2, x3, x4, lable1, label2 @@ -1428,7 +1443,7 @@ DEFINE_MOP(MOP_compare_and_swapI, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDe * label2: * cset x0, eq */ -DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_long","0,1,2,3,4,5,6",7) +DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_compare_swap_long", "0,1,2,3,4,5,6", 7) /* * intrinsic String.indexOf(Ljava/lang/String;)I @@ -1444,9 +1459,9 @@ DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDe * add x1, x1, x2 * neg x4, x4 * neg x2, x2 - * ldr x5, [x3,x4] + * ldr x5, [x3, x4] * .Label.FIRST_LOOP: - * ldr x7, [x1,x2] + * ldr x7, [x1, x2] * cmp x5, x7 * b.eq .Label.STR1_LOOP * .Label.STR2_NEXT: @@ -1458,8 +1473,8 @@ DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDe * add x9, x2, #8 * b.ge .Label.LAST_WORD * .Label.STR1_NEXT: - * ldr x6, [x3,x8] - * ldr x7, [x1,x9] + * ldr x6, [x3, x8] + * ldr x7, [x1, x9] * cmp x6, x7 * b.ne .Label.STR2_NEXT * adds x8, x8, #8 @@ -1468,7 +1483,7 @@ DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDe * .Label.LAST_WORD: * ldr x6, [x3] * sub x9, x1, x4 - * ldr x7, [x9,x2] + * ldr x7, [x9, x2] * cmp x6, x7 * b.ne .Label.STR2_NEXT * add w0, w10, w2 @@ -1477,66 +1492,66 @@ DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDe * mov w0, #-1 * .Label.RET: */ -DEFINE_MOP(MOP_string_indexof, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_string_indexof","0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17",36) +DEFINE_MOP(MOP_string_indexof, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IDS, &OpndDesc::Reg32IDS, &OpndDesc::Reg64IDS, &OpndDesc::Reg32IDS, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg32ID, &OpndDesc::AddressName, &OpndDesc::AddressName, &OpndDesc::AddressName, &OpndDesc::AddressName, &OpndDesc::AddressName, &OpndDesc::AddressName, &OpndDesc::AddressName}, HASLOOP | CANTHROW | SPINTRINSIC, kLtBranch, "intrinsic_string_indexof", "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17", 36) /* MOP_tail_call_opt_xbl -- branch without link (call); this is a special definition */ -DEFINE_MOP(MOP_tail_call_opt_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"b","0", 1) +DEFINE_MOP(MOP_tail_call_opt_xbl, {&OpndDesc::AddressName, &OpndDesc::ListSrc}, CANTHROW | ISTAILCALL, kLtBranch, "b", "0", 1) /* MOP_tail_call_opt_xblr -- branch without link (call) to register; this is a special definition */ -DEFINE_MOP(MOP_tail_call_opt_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"br","0", 1) +DEFINE_MOP(MOP_tail_call_opt_xblr, {&OpndDesc::Reg64IS, &OpndDesc::ListSrc}, CANTHROW | ISTAILCALL, kLtBranch, "br", "0", 1) /* MOP_pseudo_param_def_x, */ -DEFINE_MOP(MOP_pseudo_param_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) +DEFINE_MOP(MOP_pseudo_param_def_x, {&OpndDesc::Reg64ID}, 0, kLtUndef, "//MOP_pseudo_param_def", "0", 0) /* MOP_pseudo_param_def_w, */ -DEFINE_MOP(MOP_pseudo_param_def_w, {&OpndDesc::Reg32ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) +DEFINE_MOP(MOP_pseudo_param_def_w, {&OpndDesc::Reg32ID}, 0, kLtUndef, "//MOP_pseudo_param_def", "0", 0) /* MOP_pseudo_param_def_d, */ -DEFINE_MOP(MOP_pseudo_param_def_d, {&OpndDesc::Reg64FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) +DEFINE_MOP(MOP_pseudo_param_def_d, {&OpndDesc::Reg64FD}, 0, kLtUndef, "//MOP_pseudo_param_def", "0", 0) /* MOP_pseudo_param_def_s, */ -DEFINE_MOP(MOP_pseudo_param_def_s, {&OpndDesc::Reg32FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) +DEFINE_MOP(MOP_pseudo_param_def_s, {&OpndDesc::Reg32FD}, 0, kLtUndef, "//MOP_pseudo_param_def", "0", 0) /* MOP_pseudo_param_store_x, */ -DEFINE_MOP(MOP_pseudo_param_store_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_param_store_x","0", 0) +DEFINE_MOP(MOP_pseudo_param_store_x, {&OpndDesc::Mem64D}, 0, kLtUndef, "//MOP_pseudo_param_store_x", "0", 0) /* MOP_pseudo_param_store_w, */ -DEFINE_MOP(MOP_pseudo_param_store_w, {&OpndDesc::Mem32D},0,kLtUndef,"//MOP_pseudo_param_store_w","0", 0) +DEFINE_MOP(MOP_pseudo_param_store_w, {&OpndDesc::Mem32D}, 0, kLtUndef, "//MOP_pseudo_param_store_w", "0", 0) /* MOP_pseudo_ref_init_x, */ -DEFINE_MOP(MOP_pseudo_ref_init_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_ref_init_x","0", 0) +DEFINE_MOP(MOP_pseudo_ref_init_x, {&OpndDesc::Mem64D}, 0, kLtUndef, "//MOP_pseudo_ref_init_x", "0", 0) /* MOP_pseudo_ret_int, */ -DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS},0,kLtUndef,"//MOP_pseudo_ret_int","", 0) +DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS}, 0, kLtUndef, "//MOP_pseudo_ret_int", "", 0) /* MOP_pseudo_ret_float, */ -DEFINE_MOP(MOP_pseudo_ret_float, {&OpndDesc::Reg64FS},0,kLtUndef,"//MOP_pseudo_ret_float","", 0) +DEFINE_MOP(MOP_pseudo_ret_float, {&OpndDesc::Reg64FS}, 0, kLtUndef, "//MOP_pseudo_ret_float", "", 0) /* When exception occurs, R0 and R1 may be defined by runtime code. */ /* MOP_pseudo_eh_def_x, */ -DEFINE_MOP(MOP_pseudo_eh_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_eh_def_x","0", 0) +DEFINE_MOP(MOP_pseudo_eh_def_x, {&OpndDesc::Reg64ID}, 0, kLtUndef, "//MOP_pseudo_eh_def_x", "0", 0) /*MOP_nop */ -DEFINE_MOP(MOP_nop, {},ISNOP,kLtAlu,"nop","", 1) +DEFINE_MOP(MOP_nop, {}, ISNOP, kLtAlu, "nop", "", 1) /* phi node for SSA form */ /* MOP_xphirr */ -DEFINE_MOP(MOP_xphirr, {&OpndDesc::Reg64ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +DEFINE_MOP(MOP_xphirr, {&OpndDesc::Reg64ID, &OpndDesc::ListSrc}, ISPHI, kLtAlu, "//phi", "0,1", 1) /* MOP_wphirr */ -DEFINE_MOP(MOP_wphirr, {&OpndDesc::Reg32ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +DEFINE_MOP(MOP_wphirr, {&OpndDesc::Reg32ID, &OpndDesc::ListSrc}, ISPHI, kLtAlu, "//phi", "0,1", 1) /* MOP_xvphis */ -DEFINE_MOP(MOP_xvphis, {&OpndDesc::Reg32FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +DEFINE_MOP(MOP_xvphis, {&OpndDesc::Reg32FD, &OpndDesc::ListSrc}, ISPHI, kLtFpalu, "//phi", "0,1", 1) /* MOP_xvphid */ -DEFINE_MOP(MOP_xvphid, {&OpndDesc::Reg64FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +DEFINE_MOP(MOP_xvphid, {&OpndDesc::Reg64FD, &OpndDesc::ListSrc}, ISPHI, kLtFpalu, "//phi", "0,1", 1) /* MOP_xvphivd */ -DEFINE_MOP(MOP_xvphivd, {&OpndDesc::Reg128VD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"movi","0,1",1) +DEFINE_MOP(MOP_xvphivd, {&OpndDesc::Reg128VD, &OpndDesc::ListSrc}, ISPHI, kLtFpalu, "movi", "0,1", 1) /* A pseudo instruction that used for seperating dependence graph. */ /* MOP_pseudo_dependence_seperator, */ -DEFINE_MOP(MOP_pseudo_dependence_seperator, {},0,kLtUndef,"//MOP_pseudo_dependence_seperator","0", 0) +DEFINE_MOP(MOP_pseudo_dependence_seperator, {}, 0, kLtUndef, "//MOP_pseudo_dependence_seperator", "0", 0) /* A pseudo instruction that used for replacing MOP_clinit_tail after clinit merge in scheduling. */ /* MOP_pseudo_none, */ -DEFINE_MOP(MOP_pseudo_none, {},0,kLtUndef,"//MOP_pseudo_none","0", 0) +DEFINE_MOP(MOP_pseudo_none, {}, 0, kLtUndef, "//MOP_pseudo_none", "0", 0) /* end of AArch64 instructions */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def index 574452818de95db43195c10463c938a729ebb53e..50997e7f3bd721fbe446af912258bca0964d23f5 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def @@ -1,356 +1,369 @@ +/* + * Copyright (c) [2022-2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ /* Load Register Signed Byte */ -DEFINE_MOP(MOP_wldrsb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrsb_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) /* ldrsb */ -DEFINE_MOP(MOP_xldrsb_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_xldrsb_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_ri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rex, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rls, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rlo, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_pri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_poi, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_l, {&OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr8ImmValid) /* ldrb */ -DEFINE_MOP(MOP_wldrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldrb_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1,"ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldrb", "0,1", 1, StrLdr8ImmValid) /* Load Register Signed Halfword */ -DEFINE_MOP(MOP_wldrsh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrsh_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrss","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1,"ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsb", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldrss", "0,1", 1, StrLdr16ImmValid) /* ldrsh */ -DEFINE_MOP(MOP_xldrsh_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_xldrsh_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_ri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rex, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rls, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rlo, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_pri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_poi, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_l, {&OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad2, "ldrsh", "0,1", 1, StrLdr16ImmValid) /* ldrh */ -DEFINE_MOP(MOP_wldrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldrh_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldrh", "0,1", 1, StrLdr16ImmValid) /* Load Register Signed Word */ -DEFINE_MOP(MOP_xldrsw_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldrsw_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_ri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rex, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rls, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rlo, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_pri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_poi, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_l, {&OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad2, "ldrsw", "0,1", 1, StrLdr32ImmValid) /* ldr to w */ -DEFINE_MOP(MOP_wldr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wldr_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1,"ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad1, "ldr", "0,1", 1, StrLdr32ImmValid) /* ldr to x */ -DEFINE_MOP(MOP_xldr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldr_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_l, {&OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtLoad2, "ldr", "0,1", 1, StrLdr32ImmValid) /* ldr to float reg */ -DEFINE_MOP(MOP_sldr_r, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_ri, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_rr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_rex, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_rls, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_rlo, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_pri, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_poi, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sldr_l, {&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_r, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_ri, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rr, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rex, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rls, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rlo, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_pri, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_poi, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_l, {&OpndDesc::Reg32FD, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtFLoad64, "ldr", "0,1", 1, StrLdr32ImmValid) -DEFINE_MOP(MOP_dldr_r, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_ri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_rr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_rex, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_rls, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_rlo, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_pri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_poi, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dldr_l, {&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_r, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_ri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rex, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rls, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rlo, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_pri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_poi, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_l, {&OpndDesc::Reg64FD, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr64ImmValid) -DEFINE_MOP(MOP_qldr_r, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_ri, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_rr, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_rex, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_rls, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_rlo, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_pri, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_poi, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qldr_l, {&OpndDesc::Reg128VD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_r, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_ri, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rr, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rex, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rls, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rlo, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_pri, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_poi, {&OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_l, {&OpndDesc::Reg128VD, &OpndDesc::AddressName}, ISLOAD | CANTHROW, kLtFLoadMany, "ldr", "0,1", 1, StrLdr128ImmValid) /* ldr to pair reg */ -DEFINE_MOP(MOP_wldp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wldp_l, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_ri, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rr, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rex, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rls, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rlo, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_pri, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_poi, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_l, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad2, "ldp", "0,1,2", 1, StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldp_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldp_l, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_ri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rex, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rls, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rlo,{&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_pri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_poi, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_l, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldp", "0,1,2", 1, StrLdr64PairImmValid) -DEFINE_MOP(MOP_xldpsw_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xldpsw_l, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_ri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rr, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rex, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rls, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rlo, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_pri, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_poi, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_l, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtLoad3plus, "ldpsw", "0,1,2", 1, StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_r, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_ri, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_rr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_rex, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_rls, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_rlo, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_pri, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_poi, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sldp_l, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_r, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_ri, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rr, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rex, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rls, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rlo, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_pri, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_poi, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_l, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FD, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoad64, "ldp", "0,1,2", 1, StrLdr32PairImmValid) -DEFINE_MOP(MOP_dldp_r, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_ri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_rr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_rex, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_rls, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_rlo, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_pri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_poi, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dldp_l, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_r, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_ri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rr, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rex, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rls, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rlo, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_pri, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_poi, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_l, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FD, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr64PairImmValid) -DEFINE_MOP(MOP_qldp_r, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_ri, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_rr, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_rex, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_rls, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_rlo, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_pri, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_poi, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qldp_l, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_r, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_ri, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rr, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rex, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rls, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rlo, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_pri, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_poi, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_l, {&OpndDesc::Reg128VD, &OpndDesc::Reg128VD, &OpndDesc::AddressName}, ISLOAD | ISLOADPAIR | CANTHROW, kLtFLoadMany, "ldp", "0,1,2", 1, StrLdr128PairImmValid) /* Load with Acquire semantics */ -DEFINE_MOP(MOP_wldarb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldarh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldar_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldar_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldarb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldarb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldarh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldarh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldar_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldar", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldar_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad2, "ldar", "0,1", 1, StrLdr64ImmValid) /* Load exclusive with/without acquire semantics */ -DEFINE_MOP(MOP_wldxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldxr_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldxrb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldxrh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldxr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad1, "ldxr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldxr_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | CANTHROW, kLtLoad2, "ldxr", "0,1", 1, StrLdr64ImmValid) -DEFINE_MOP(MOP_wldaxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wldaxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wldaxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldaxr_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_wldaxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad2,"ldaxp","0,1,2",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xldaxp_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad2,"ldaxp","0,1,2",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldaxrb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxrb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wldaxrh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxrh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wldaxr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad1, "ldaxr", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxr_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad2, "ldaxr", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_wldaxp_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad2, "ldaxp", "0,1,2", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxp_r, {&OpndDesc::Reg64ID, &OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISLOAD | ISLOADPAIR | ISATOMIC | HASACQUIRE | CANTHROW, kLtLoad3plus, "ldaxp", "0,1,2", 1, StrLdr64ImmValid) /* Store Register Byte */ -DEFINE_MOP(MOP_wstrb_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstrb_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_ri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rr, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rex, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rls, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rlo, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_pri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_poi, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_l, {&OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtStore1, "strb", "0,1", 1, StrLdr8ImmValid) /* Store Register Halfword */ -DEFINE_MOP(MOP_wstrh_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstrh_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_ri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rr, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rex, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rls, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rlo, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_pri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_poi, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_l, {&OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtStore1, "strh", "0,1", 1, StrLdr16ImmValid) /* Store Register Word */ -DEFINE_MOP(MOP_wstr_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_wstr_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_ri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rr, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rex, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rls, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rlo, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_pri, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_poi, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_l, {&OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtStore1, "str", "0,1", 1, StrLdr32ImmValid) /* Store Register Double word */ -DEFINE_MOP(MOP_xstr_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_ri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_rr, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_rex, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_rls, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_rlo, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_pri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_poi, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstr_l, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_ri, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rr, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rex, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rls, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rlo, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_pri, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_poi, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_l, {&OpndDesc::Reg64IS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtStore2, "str", "0,1", 1, StrLdr64ImmValid) /* Store Register SIMD/FP Float */ -DEFINE_MOP(MOP_sstr_r, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_ri, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_rr, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_rex, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_rls, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_rlo, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_pri, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_poi, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_sstr_l, {&OpndDesc::Reg32FS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_r, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_ri, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rr, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rex, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rls, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rlo, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_pri, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_poi, {&OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_l, {&OpndDesc::Reg32FS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtFStore64, "str", "0,1", 1, StrLdr32ImmValid) /* Store Register SIMD/FP Double */ -DEFINE_MOP(MOP_dstr_r, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_ri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_rr, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_rex, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_rls, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_rlo, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_pri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_poi, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_dstr_l, {&OpndDesc::Reg64FS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_r, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_ri, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rr, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rex, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rls, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rlo, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_pri, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_poi, {&OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_l, {&OpndDesc::Reg64FS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr64ImmValid) /* MOP_qstr -- Store Register SIMD/FP Double */ -DEFINE_MOP(MOP_qstr_r, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_ri, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_rr, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_rex, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_rls, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_rlo, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_pri, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_poi, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) -DEFINE_MOP(MOP_qstr_l, {&OpndDesc::Reg128VS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_r, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_ri, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rr, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rex, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rls, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rlo, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_pri, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_poi, {&OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_l, {&OpndDesc::Reg128VS, &OpndDesc::AddressName}, ISSTORE | CANTHROW, kLtFStoreMany, "str", "0,1", 1, StrLdr128ImmValid) /* store to pair reg */ -DEFINE_MOP(MOP_wstp_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_wstp_l, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_ri, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rr, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rex, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rls, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rlo, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_pri, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_poi, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_l, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::AddressName}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore2, "stp", "0,1,2", 1, StrLdr32PairImmValid) /* MOP_xstp */ -DEFINE_MOP(MOP_xstp_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_ri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_rr, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_rex, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_rls, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_rlo, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_pri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_poi, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_xstp_l, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_ri, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rr, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rex, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rls, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rlo, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_pri, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_poi, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_l, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::AddressName}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtStore3plus, "stp", "0,1,2", 1, StrLdr32PairImmValid) /* AArch64 does not define STPSW. It has no practical value. */ /* MOP_sstp */ -DEFINE_MOP(MOP_sstp_r, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_ri, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_rr, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_rex, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_rls, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_rlo, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_pri, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_poi, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) -DEFINE_MOP(MOP_sstp_l, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_r, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_ri, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rr, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rex, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rls, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rlo, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_pri, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_poi, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_l, {&OpndDesc::Reg32FS, &OpndDesc::Reg32FS, &OpndDesc::AddressName}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStore64, "stp", "0,1,2", 1, StrLdr32PairImmValid) /* MOP_dstp */ -DEFINE_MOP(MOP_dstp_r, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_ri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_rr, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_rex, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_rls, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_rlo, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_pri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_poi, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) -DEFINE_MOP(MOP_dstp_l, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_r, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_ri, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rr, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rex, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rls, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rlo, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_pri, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_poi, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_l, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FS, &OpndDesc::AddressName}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtFStoreMany, "stp", "0,1,2", 1, StrLdr64PairImmValid) /* MOP_qstp */ -DEFINE_MOP(MOP_qstp_r, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_ri, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_rr, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_rex, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_rls, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_rlo, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_pri, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_poi, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) -DEFINE_MOP(MOP_qstp_l, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_r, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_ri, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rr, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rex, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg32IS, &OpndDesc::Extendshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rls, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Bitshift64}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rlo, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::AddressName, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_pri, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_poi, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::Reg64IS, &OpndDesc::Imm8}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_l, {&OpndDesc::Reg128VS, &OpndDesc::Reg128VS, &OpndDesc::AddressName}, ISSTORE | ISSTOREPAIR | CANTHROW, kLtAdvsimdMulQ, "stp", "0,1,2", 1, StrLdr128PairImmValid) /* AARCH64 Store with Release semantics */ /* MOP_wstlrb -- Store-Release Register Byte */ -DEFINE_MOP(MOP_wstlrb_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstlrb_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlrb", "0,1", 1, StrLdr8ImmValid) /* MOP_wstlrh -- Store-Release Register Halfword */ -DEFINE_MOP(MOP_wstlrh_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstlrh_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlrh", "0,1", 1, StrLdr16ImmValid) /* MOP_wstlr -- Store-Release Register Word */ -DEFINE_MOP(MOP_wstlr_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstlr_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlr", "0,1", 1, StrLdr32ImmValid) /* MOP_xstlr -- Store-Release Register Double word */ -DEFINE_MOP(MOP_xstlr_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstlr_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore2, "stlr", "0,1", 1, StrLdr64ImmValid) -DEFINE_MOP(MOP_wstxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxrb","0,1,2",1,StrLdr8ImmValid) -DEFINE_MOP(MOP_wstxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxrh","0,1,2",1,StrLdr16ImmValid) -DEFINE_MOP(MOP_wstxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr32ImmValid) -DEFINE_MOP(MOP_xstxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr64ImmValid) - -DEFINE_MOP(MOP_wstlxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) -DEFINE_MOP(MOP_xstlxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wstxrb_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stxrb", "0,1,2", 1, StrLdr8ImmValid) +DEFINE_MOP(MOP_wstxrh_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stxrh", "0,1,2", 1, StrLdr16ImmValid) +DEFINE_MOP(MOP_wstxr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stxr", "0,1,2", 1, StrLdr32ImmValid) +DEFINE_MOP(MOP_xstxr_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISATOMIC | HASRELEASE | CANTHROW, kLtStore2, "stxr", "0,1,2", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_wstlxp_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | ISATOMIC | HASRELEASE | CANTHROW, kLtStore1, "stlxp", "0,1,2,3", 1, StrLdr64ImmValid) +DEFINE_MOP(MOP_xstlxp_r, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISSTORE | ISSTOREPAIR | ISATOMIC | HASRELEASE | CANTHROW, kLtStore2, "stlxp", "0,1,2,3", 1, StrLdr64ImmValid) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h index 0b3a1d7942b4a194c52bec19c6cfe0642d7660fe..7b1b41233a76ae91869cfccb797788431c6f2f8d 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h @@ -149,8 +149,6 @@ class AArch64MemLayout : public MemLayout { void AssignSpillLocationsToPseudoRegisters() override; - SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; - uint64 StackFrameSize() const; uint32 RealStackFrameSize() const; @@ -195,7 +193,6 @@ class AArch64MemLayout : public MemLayout { private: MemSegment segRefLocals = MemSegment(kMsRefLocals); /* callee saved register R19-R28 (10) */ - MemSegment segSpillReg = MemSegment(kMsSpillReg); MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); @@ -208,6 +205,10 @@ class AArch64MemLayout : public MemLayout { void LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays); void LayoutEAVariales(std::vector &tempVar); void LayoutReturnRef(std::vector &returnDelays, int32 &structCopySize, int32 &maxParmStackSize); + + SymbolAlloc *CreateSymbolAlloc() const override { + return memAllocator->GetMemPool()->New(); + } }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_split.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_split.h new file mode 100644 index 0000000000000000000000000000000000000000..63febdbb5710490e45ac08de416064647ffe55d2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_split.h @@ -0,0 +1,698 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_SPLIT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_SPLIT_H + +namespace maplebe { +// Supply a new reg operand for insn split process, which type is kRegTyInt for immediate. +// Before regalloc: create a new virtual reg; +// After regalloc: use R16 to be a temporary physical reg. +inline RegOperand *GetSplitBaseReg(bool isAfterRegAlloc, bool is64Bits, OperandBuilder *opndBuilder) { + RegOperand *resOpnd = nullptr; + if (!isAfterRegAlloc) { + resOpnd = &opndBuilder->CreateVReg((is64Bits ? k64BitSize : k32BitSize), kRegTyInt); + } else { + resOpnd = &opndBuilder->CreatePReg(R16, (is64Bits ? k64BitSize : k32BitSize), kRegTyInt); + } + return resOpnd; +} + +// Judging valid range of the immediate by passing in bitLen & forPair parameter, return the closest valid value to +// ofstVal, getting the remainder simultaneously. The valid value will be input in new memopnd, and the remainder +// will be input in add insn. +inline ImmOperand &SplitGetRemained(const MemOperand &memOpnd, uint32 bitLen, int64 ofstVal, bool forPair, + OperandBuilder *opndBuilder) { + // opndVal == Q0 * 32760(16380) + R0 + // R0 == Q1 * 8(4) + R1 + // ADDEND == Q0 * 32760(16380) + R1 + // NEW_OFFSET = Q1 * 8(4) + // we want to generate two instructions: + // ADD TEMP_REG, X29, ADDEND + // LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ] + int32 maxPimm = 0; + if (!forPair) { + maxPimm = MemOperand::GetMaxPIMM(bitLen); + } else { + maxPimm = MemOperand::GetMaxPairPIMM(bitLen); + } + ASSERT(maxPimm != 0, "get max pimm failed"); + int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0); + int64 addend = q0 * maxPimm; + uint64 r0 = static_cast(ofstVal - addend); + uint64 alignment = static_cast(static_cast(MemOperand::GetImmediateOffsetAlignment(bitLen))); + auto q1 = r0 >> alignment; + auto r1 = static_cast(r0 & ((1u << alignment) - 1)); + auto remained = static_cast(q1 << alignment); + addend = addend + r1; + if (addend > 0) { + uint64 suffixClear = 0xfff; + if (forPair) { + suffixClear = 0xff; + } + int64 remainedTmp = remained + static_cast(static_cast(addend) & suffixClear); + if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && + ((static_cast(remainedTmp) & ((1u << alignment) - 1)) == 0)) { + addend = static_cast(static_cast(addend) & ~suffixClear); + } + } + ImmOperand &immAddend = opndBuilder->CreateImm(k64BitSize, addend, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddend.SetVary(kUnAdjustVary); + } + return immAddend; +} + +// Split Add Insn add reg, reg, #imm, steps as follows: +// If #imm value range 0 ~ 2^24 - 1, insn will be split into add reg, reg, #imm(, LSL 12) +// If #imm value out of range 2^24, insn will be split as follows: +// add x0, x1, #imm ====> mov x2, #imm +// add x0, x1, x2 +inline void AddInsnSplit(Insn *insn, bool is64Bits, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + Operand *opnd0 = &insn->GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn->GetOperand(kInsnSecondOpnd); + Operand *opnd2 = &insn->GetOperand(kInsnThirdOpnd); + ImmOperand *immOpnd = static_cast(opnd2); + MOperator mOpCode = MOP_undef; + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + mOpCode = is64Bits ? MOP_xsubrri12 : MOP_wsubrri12; + insn->SetMOP(AArch64CG::kMd[mOpCode]); + if (!insn->VerifySelf()) { + insn->SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + return; + } + BB *bb = insn->GetBB(); + // lower 24 bits has 1, higher bits are all 0 + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + // lower 12 bits and higher 12 bits both has 1 + Operand *newOpnd1 = opnd1; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + // process higher 12 bits + ImmOperand &immOpnd2 = opndBuilder->CreateImm(immOpnd->GetSize(), + static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + RegOperand *tmpRes = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + BitShiftOperand &shiftopnd = opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, k12BitSize, k64BitSize); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *tmpRes, *opnd1, immOpnd2, shiftopnd); + newInsn.SetBB(bb); + ASSERT(newInsn.VerifySelf(), "immOpnd2 appears invalid"); + (void)bb->InsertInsnBefore(*insn, newInsn); + // get lower 12 bits value + immOpnd->ModuloByPow2(kMaxImmVal12Bits); + newOpnd1 = tmpRes; + } + // process lower 12 bits value + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + // It`s worth noting that if immOpnd->IsInBitSize(12, 12) returns true, gcc assembler can compile correctly, + // so we pass immOpnd directly as a parameter. + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *newOpnd1, *immOpnd); + newInsn.SetBB(bb); + ASSERT(newInsn.VerifySelf(), "immOpnd appears invalid"); + bb->ReplaceInsn(*insn, newInsn); + return; + } else { + // load into register + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = AArch64isa::GetTail0BitNum(immVal); + int32 head0bitNum = AArch64isa::GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand *movOpnd = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + regno_t regNO0 = static_cast(opnd1)->GetRegisterNumber(); + // The content of the next if code block is when immvalue can be moved in one insn, we do next: + // add x0, x1, #imm1 ====> mov x2, #imm2 + // add x0, x1, x2, LSL + // #imm2 = #imm1 >> shift + // addrrrs do not support sp + if (bitNum <= k16BitSizeInt && regNO0 != RSP) { + int64 newImm = static_cast((static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF); + ImmOperand &immOpnd1 = opndBuilder->CreateImm(k16BitSize, newImm, false); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, immOpnd1); + movInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, movInsn); + mOpCode = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + // bitLen means bitshiftopnd size: 64bits -> 6, 32bits ->5 + uint32 bitLen = is64Bits ? k6BitSize : k5BitSize; + BitShiftOperand &bitShiftOpnd = opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, + static_cast(tail0bitNum), bitLen); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *opnd1, *movOpnd, bitShiftOpnd); + newInsn.SetBB(bb); + bb->ReplaceInsn(*insn, newInsn); + return; + } + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, *immOpnd); + movInsn.SetBB(bb); + mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *opnd1, *movOpnd); + newInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, movInsn); + // If #imm of mov is invalid, split mov insn + if (!movInsn.VerifySelf()) { + movInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + bb->ReplaceInsn(*insn, newInsn); + } +} + +// Split Sub Insn sub reg, reg, #imm, the same split steps as add +inline void SubInsnSplit(Insn *insn, bool is64Bits, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + Operand *opnd0 = &insn->GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn->GetOperand(kInsnSecondOpnd); + Operand *opnd2 = &insn->GetOperand(kInsnThirdOpnd); + ImmOperand *immOpnd = static_cast(opnd2); + MOperator mOpCode = MOP_undef; + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + insn->SetMOP(AArch64CG::kMd[mOpCode]); + if (!insn->VerifySelf()) { + insn->SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + return; + } + BB *bb = insn->GetBB(); + int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { + // SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + // SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + // imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + // aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + // large offset is treated as sub (higher 12 bits + 4096) + add + // it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store + bool isSplitSub = false; + Operand *newOpnd1 = opnd1; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + isSplitSub = true; + // process higher 12 bits + ImmOperand &immOpnd2 = + opndBuilder->CreateImm(immOpnd->GetSize(), higher12BitVal + 1, immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; + RegOperand *resOpnd = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + BitShiftOperand &shiftopnd = opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, k12BitSize, k64BitSize); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *resOpnd, *opnd1, immOpnd2, shiftopnd); + newInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, newInsn); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); + immOpnd->SetValue(static_cast(kMax12UnsignedImm) - immOpnd->GetValue()); + newOpnd1 = resOpnd; + } + // process lower 12 bits + mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *newOpnd1, *immOpnd); + newInsn.SetBB(bb); + bb->ReplaceInsn(*insn, newInsn); + return; + } else { + // load into register + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = AArch64isa::GetTail0BitNum(immVal); + int32 head0bitNum = AArch64isa::GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand *movOpnd = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + // The content of the next if code block is when immvalue can be moved in one insn, we do next: + // sub x0, x1, #imm1 ====> mov x2, #imm2 + // sub x0, x1, x2, LSL + // #imm2 = #imm1 >> shift + // subrrrs supports sp, so do not need to check whether regNo is RSP + if (bitNum <= k16BitSizeInt) { + int64 newImm = static_cast((static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF); + ImmOperand &immOpnd1 = opndBuilder->CreateImm(k16BitSize, newImm, false); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, immOpnd1); + movInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, movInsn); + mOpCode = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; + // bitLen means bitshiftopnd size: 64bits -> 6, 32bits ->5 + uint32 bitLen = is64Bits ? k6BitSize : k5BitSize; + BitShiftOperand &bitShiftOpnd = opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, + static_cast(tail0bitNum), bitLen); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *opnd1, *movOpnd, bitShiftOpnd); + newInsn.SetBB(bb); + bb->ReplaceInsn(*insn, newInsn); + return; + } + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, *immOpnd); + movInsn.SetBB(bb); + mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr; + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *opnd1, *movOpnd); + newInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, movInsn); + // If #imm of mov is invalid, split mov insn + if (!movInsn.VerifySelf()) { + movInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + bb->ReplaceInsn(*insn, newInsn); + } +} + +// adds & subs updates the condition flags based on the result, so it cannot be split into multiple instructions +// of the same type, because we don`t know when the result is out of range and changes flags. The solution is: +// adds/subs x0, x1, #imm ====> mov x2, #imm +// adds/subs x0, x1, x2 +// isAdds: true -> adds, false -> subs +inline void AddsSubsInsnSplit(Insn *insn, bool isAdds, bool is64Bits, bool isAfterRegAlloc, + InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + Operand *opnd0 = &insn->GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn->GetOperand(kInsnSecondOpnd); + Operand *opnd2 = &insn->GetOperand(kInsnThirdOpnd); + Operand *opnd3 = &insn->GetOperand(kInsnFourthOpnd); + ImmOperand *immOpnd = static_cast(opnd3); + MOperator mOpCode = MOP_undef; + BB *bb = insn->GetBB(); + RegOperand *movOpnd = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, *immOpnd); + movInsn.SetBB(bb); + mOpCode = isAdds ? (is64Bits ? MOP_xaddsrrr : MOP_waddsrrr) : (is64Bits ? MOP_xsubsrrr : MOP_wsubsrrr); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, *opnd0, *opnd1, *opnd2, *movOpnd); + newInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*insn, movInsn); + // If #imm of mov is invalid, split mov first. + if (!movInsn.VerifySelf()) { + movInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + (void)bb->InsertInsnBefore(*insn, newInsn); + bb->RemoveInsn(*insn); +} + +// Split Add/Sub Insn with BitShiftOperand (LSL 12), steps as follows: +// add/sub x0, x1, #imm, LSL 12 ====> add/sub x0, x1, #newimm ====> Add/SubInsnSplit +// isAdd: true -> add, false -> sub +inline void AddSubWithLslSplit(Insn *insn, bool isAdd, bool is64Bits, bool isAfterRegAlloc, + InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + uint32 size = is64Bits ? k64BitSize : k32BitSize; + ImmOperand &immOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + // add/sub x0, x1, #imm, LSL 12 ====> imm value range can be split: 2^12 ~ 2^52 - 1 + // add/sub w0, w1, #imm, LSL 12 ====> imm value range can be split: 2^12 ~ 2^20 - 1 + // If imm is out of range, insn will not be split + if (!immOpnd.IsInBitSize((size - k12BitSize), k0BitSize)) { + return; + } + BB *bb = insn->GetBB(); + ImmOperand &newImmOpnd = opndBuilder->CreateImm(size, static_cast(immOpnd.GetValue()) << k12BitSize); + MOperator mOpCode = is64Bits ? (isAdd ? MOP_xaddrri12 : MOP_xsubrri12) : (isAdd ? MOP_waddrri12 : MOP_wsubrri12); + Insn &newInsn = insnBuilder->BuildInsn(mOpCode, insn->GetOperand(kInsnFirstOpnd), + insn->GetOperand(kInsnSecondOpnd), newImmOpnd); + newInsn.SetBB(bb); + bb->ReplaceInsn(*insn, newInsn); + if (!newInsn.VerifySelf()) { + newInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } +} + +// Split memoperand with invalid offset value to a new valid memoperand and add insn with remainder. +inline MemOperand &MemOfstSplitWithAdd(const MemOperand &memOpnd, uint32 bitLen, bool isAfterRegAlloc, + Insn *insn, bool forPair, InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + ASSERT((memOpnd.GetAddrMode() == MemOperand::kBOI), "expect kBOI memOpnd"); + ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd"); + BB *bb = insn->GetBB(); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + RegOperand *resOpnd = GetSplitBaseReg(isAfterRegAlloc, true, opndBuilder); + ImmOperand &immAddend = SplitGetRemained(memOpnd, bitLen, ofstVal, forPair, opndBuilder); + int64 remained = (ofstVal - immAddend.GetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + ASSERT(origBaseReg != nullptr, "nullptr check"); + // Provide add insn to split offset, where add insn is 64-bit explicitly + Insn &addInsn = insnBuilder->BuildInsn(MOP_xaddrri12, *resOpnd, *origBaseReg, immAddend); + addInsn.SetBB(bb); + bb->InsertInsnBefore(*insn, addInsn); + if (!addInsn.VerifySelf()) { + addInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + ImmOperand &remainedOpnd = opndBuilder->CreateImm(k32BitSize, remained, true); + MemOperand &newMemOpnd = opndBuilder->CreateMem(bitLen, *resOpnd, remainedOpnd); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +// Split a load/store insn with invalid offset value to a new valid insn and add insn. +// idx: memOperand index of insn +inline void LoadStoreInsnSplit(Insn *insn, uint32 idx, bool forPair, bool isAfterRegAlloc, + InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + MemOperand &memOpnd = static_cast(insn->GetOperand(idx)); + if (!(memOpnd.GetAddrMode() == MemOperand::kBOI && memOpnd.IsIntactIndexed())) { + return; + } + uint32 bitLen = insn->GetDesc()->GetOpndDes(idx)->GetSize(); + MemOperand &newMemOpnd = MemOfstSplitWithAdd(memOpnd, bitLen, isAfterRegAlloc, insn, forPair, + insnBuilder, opndBuilder); + insn->SetOperand(idx, newMemOpnd); +} + +// ccmp updates the condition flags based on the result, so it cannot be split into multiple instructions +// of the same type, because we don`t know when the result is out of range and changes flags. The solution is: +// ccmp , #, #, ====> mov , # +// ccmp , , #, +inline void CondCompareInsnSplit(Insn *insn, bool is64Bits, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + if (insn->VerifySelf()) { return; } + ImmOperand &immOpnd = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MOperator mOpCode = MOP_undef; + BB *bb = insn->GetBB(); + RegOperand *movOpnd = GetSplitBaseReg(isAfterRegAlloc, is64Bits, opndBuilder); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = insnBuilder->BuildInsn(mOpCode, *movOpnd, immOpnd); + movInsn.SetBB(bb); + bb->InsertInsnBefore(*insn, movInsn); + if (!movInsn.VerifySelf()) { + movInsn.SplitSelf(isAfterRegAlloc, insnBuilder, opndBuilder); + } + mOpCode = is64Bits ? MOP_xccmprric : MOP_wccmprric; + insn->SetMOP(AArch64CG::kMd[mOpCode]); + insn->SetOperand(kInsnSecondOpnd, *movOpnd); +} + +// split mov w0, #imm to mov and movk +inline void MOP_wmovri32Split(Insn *curInsn, bool /* isAfterRegAlloc */, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + // If higher 32bits of immVal have 1, we will truncate and keep lower 32 bits. + int64 immVal = (static_cast(curInsn->GetOperand(kInsnSecondOpnd)).GetValue()) & 0x00000000FFFFFFFFULL; + ImmOperand &immOpnd = opndBuilder->CreateImm(k64BitSize, immVal, true); + curInsn->SetOperand(kInsnSecondOpnd, immOpnd); + if (curInsn->VerifySelf()) { return; } + RegOperand &destReg = static_cast(curInsn->GetOperand(kInsnFirstOpnd)); + auto *bb = curInsn->GetBB(); + uint64 chunkVal0 = static_cast(immVal) & 0x0000FFFFULL; + ImmOperand &src0 = opndBuilder->CreateImm(k16BitSize, static_cast(chunkVal0), false); + Insn &movInsn = insnBuilder->BuildInsn(MOP_wmovri32, destReg, src0); + movInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*curInsn, movInsn); + uint64 chunkVal1 = (static_cast(immVal) >> k16BitSize) & 0x0000FFFFULL; + ImmOperand &src16 = opndBuilder->CreateImm(k16BitSize, static_cast(chunkVal1), false); + BitShiftOperand *lslOpnd = &opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, + static_cast(k16BitSize), 6); + Insn &movkInsn = insnBuilder->BuildInsn(MOP_wmovkri16, destReg, src16, *lslOpnd); + movkInsn.SetBB(bb); + (void)bb->InsertInsnBefore(*curInsn, movkInsn); + bb->RemoveInsn(*curInsn); +} + +// split mov x0, #imm to movz/movn and movk +inline void MOP_xmovri64Split(Insn *curInsn, bool /* isAfterRegAlloc */, + InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + if (curInsn->VerifySelf()) { return; } + RegOperand &destReg = static_cast(curInsn->GetOperand(kInsnFirstOpnd)); + int64 immVal = static_cast(curInsn->GetOperand(kInsnSecondOpnd)).GetValue(); + bool useMovz = BetterUseMOVZ(static_cast(immVal)); + bool useMovk = false; + // get lower 32 bits of the immediate + uint64 chunkLval = static_cast(immVal) & 0xFFFFFFFFULL; + // get upper 32 bits of the immediate + uint64 chunkHval = (static_cast(immVal) >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + if (chunkLval == chunkHval) { + // compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed + maxLoopTime = 2; + } + uint64 sa = 0; + auto *bb = curInsn->GetBB(); + for (int64 i = 0 ; i < maxLoopTime; ++i, sa += k16BitSize) { + // create an imm opereand which represents the i-th 16-bit chunk of the immediate + uint64 chunkVal = (static_cast(immVal) >> sa) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = opndBuilder->CreateImm(k16BitSize, static_cast(chunkVal), false); + BitShiftOperand *lslOpnd = &opndBuilder->CreateBitShift(BitShiftOperand::kShiftLSL, static_cast(sa), 6); + Insn *newInsn = nullptr; + if (!useMovk) { + // use movz or movn + if (!useMovz) { + src16.BitwiseNegate(); + } + MOperator mOpCode = useMovz ? MOP_xmovzri16 : MOP_xmovnri16; + newInsn = &insnBuilder->BuildInsn(mOpCode, destReg, src16, *lslOpnd); + newInsn->SetBB(bb); + useMovk = true; + } else { + newInsn = &insnBuilder->BuildInsn(MOP_xmovkri16, destReg, src16, *lslOpnd); + newInsn->SetBB(bb); + } + (void)bb->InsertInsnBefore(*curInsn, *newInsn); + } + if (maxLoopTime == 2) { + // copy lower 32 bits to higher 32 bits + ImmOperand &immOpnd = opndBuilder->CreateImm(k8BitSize, k32BitSize, false); + Insn &insn = insnBuilder->BuildInsn(MOP_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); + insn.SetBB(bb); + (void)bb->InsertInsnBefore(*curInsn, insn); + } + bb->RemoveInsn(*curInsn); +} + +inline void MOP_xaddrri24Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddSubWithLslSplit(insn, true, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xaddrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddInsnSplit(insn, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xaddsrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddsSubsInsnSplit(insn, true, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_waddrri24Split(Insn *insn, bool isAfterRegAlloc, + InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + AddSubWithLslSplit(insn, true, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_waddrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddInsnSplit(insn, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_waddsrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddsSubsInsnSplit(insn, true, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xsubrri24Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddSubWithLslSplit(insn, false, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xsubrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + SubInsnSplit(insn, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xsubsrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddsSubsInsnSplit(insn, false, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wsubrri24Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddSubWithLslSplit(insn, false, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wsubrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + SubInsnSplit(insn, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wsubsrri12Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + AddsSubsInsnSplit(insn, false, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldrsbSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldrsbSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldrbSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldrshSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldrshSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldrswSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldrhSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_bldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_hldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_sldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_dldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_qldrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wldpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xldpswSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_sldpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_dldpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_qldpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wccmpriicSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + CondCompareInsnSplit(insn, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xccmpriicSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + CondCompareInsnSplit(insn, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wstrbSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wstrhSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wstrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xstrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_sstrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_dstrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_qstrSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnSecondOpnd, false, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_wstpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_xstpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_sstpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_dstpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} + +inline void MOP_qstpSplit(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, + OperandBuilder *opndBuilder) { + LoadStoreInsnSplit(insn, kInsnThirdOpnd, true, isAfterRegAlloc, insnBuilder, opndBuilder); +} +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_SPLIT_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_valid.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_valid.h new file mode 100644 index 0000000000000000000000000000000000000000..9d364431f555f4daf41214e591f3d154381a9894 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mop_valid.h @@ -0,0 +1,1033 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_VALID_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_VALID_H +#include "aarch64_imm_valid.h" +#include "aarch64_isa.h" +#include "mempool_allocator.h" +#include "operand.h" + +namespace maplebe { +// Immediate verification for a byte from/to memory. simm: -256 ~ 255; pimm: 0 ~ 4095. +inline bool StrLdr8Valid(Operand *o) { + return StrLdrInsnSignedOfstValid(AArch64isa::GetMemOpndOffsetValue(o), k0ByteSize, + static_cast(o)->IsIntactIndexed()); +} + +// Immediate verification for half word from/to memory. simm: -256 ~ 255; pimm: 0 ~ 8190, multiple of 2. +inline bool StrLdr16Valid(Operand *o) { + return StrLdrInsnSignedOfstValid(AArch64isa::GetMemOpndOffsetValue(o), k1ByteSize, + static_cast(o)->IsIntactIndexed()); +} + +// Immediate verification for a word from/to memory. simm: -256 ~ 255; pimm: 0 ~ 16380, multiple of 4. +inline bool StrLdr32Valid(Operand *o) { + return StrLdrInsnSignedOfstValid(AArch64isa::GetMemOpndOffsetValue(o), k2ByteSize, + static_cast(o)->IsIntactIndexed()); +} + +// Immediate verification: value range -256 ~ 252, multiple of 4. +inline bool StrLdr32PairValid(Operand *o) { + int64 value = AArch64isa::GetMemOpndOffsetValue(o); + if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { + return (static_cast(value) & 3) > 0 ? false : true; + } + return false; +} + +// Immediate verification for 2 words from/to memory. simm: -256 ~ 255; pimm: 0 ~ 32760, multiple of 8. +inline bool StrLdr64Valid(Operand *o) { + return StrLdrInsnSignedOfstValid(AArch64isa::GetMemOpndOffsetValue(o), k3ByteSize, + static_cast(o)->IsIntactIndexed()); +} + +// Immediate verification: value range -512 ~ 504, multiple of 8. +inline bool StrLdr64PairValid(Operand *o) { + int64 value = AArch64isa::GetMemOpndOffsetValue(o); + if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { + return (static_cast(value) & 7) > 0 ? false : true; + } + return false; +} + +// Immediate verification for 4 words from/to memory. simm: -256 ~ 255; pimm: 0 ~ 65520, multiple of 16. +inline bool StrLdr128Valid(Operand *o) { + return StrLdrInsnSignedOfstValid(AArch64isa::GetMemOpndOffsetValue(o), k4ByteSize, + static_cast(o)->IsIntactIndexed()); +} + +// Immediate verification: value range -1024 ~ 1008, multiple of 16. +inline bool StrLdr128PairValid(Operand *o) { + int64 value = AArch64isa::GetMemOpndOffsetValue(o); + if (value < k1024BitSize && (value >= kNegative1024BitSize)) { + return (static_cast(value) & 0xf) > 0 ? false : true; + } + return false; +} + +// Load-Acquire & Store-Release & Load/Store-Exclusive offset value must be #0 if not absent +inline bool IsOfstZero(Operand *o) { + int64 value = AArch64isa::GetMemOpndOffsetValue(o); + if (value == static_cast(k0BitSize)) { + return true; + } + return false; +} + +inline bool MOP_wmovri32Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + IsSingleInstructionMovable32(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : + true; + return checkSecond; +} + +inline bool MOP_xmovri64Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + IsSingleInstructionMovable64(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : + true; + return checkSecond; +} + +inline bool MOP_xaddrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xaddsrrrsValid(const MapleVector &opnds) { + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFifth; +} + +inline bool MOP_xxwaddrrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_xaddrri24Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_xaddrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xaddsrri12Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_waddrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_waddsrrrsValid(const MapleVector &opnds) { + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFifth; +} + +inline bool MOP_wwwaddrrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_waddrri24Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_waddrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_waddsrri12Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xsubrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xsubsrrrsValid(const MapleVector &opnds) { + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFifth; +} + +inline bool MOP_xsubrri24Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_xsubsrri24Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFourth && checkFifth; +} + +inline bool MOP_xsubrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xsubsrri12Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wsubrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wsubsrrrsValid(const MapleVector &opnds) { + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFifth; +} + +inline bool MOP_wsubrri24Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_wsubsrri24Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + bool checkFifth = (opnds[kInsnFifthOpnd] != nullptr) ? + LeftShift12Valid(static_cast(opnds[kInsnFifthOpnd])->GetValue()) : true; + return checkFourth && checkFifth; +} + +inline bool MOP_wsubrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wsubsrri12Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xxwsubrrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_wwwsubrrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_xandrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xandrri13Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm13BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wandrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wandrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xiorrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xiorrri13Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm13BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wiorrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wiorrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xeorrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xeorrri13Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm13BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_weorrrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_weorrri12Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wubfxrri5i5Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb5BitValid(lsb) && Width5BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb5BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width5BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xubfxrri6i6Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb6BitValid(lsb) && Width6BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width6BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_wsbfxrri5i5Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb5BitValid(lsb) && Width5BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb5BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width5BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xsbfxrri6i6Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb6BitValid(lsb) && Width6BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width6BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_wubfizrri5i5Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb5BitValid(lsb) && Width5BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb5BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width5BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xubfizrri6i6Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb6BitValid(lsb) && Width6BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width6BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xsbfizrri6i6Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb6BitValid(lsb) && Width6BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width6BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_wbfirri5i5Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb5BitValid(lsb) && Width5BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb5BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width5BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xbfirri6i6Valid(const MapleVector &opnds) { + if ((opnds[kInsnThirdOpnd] != nullptr) && (opnds[kInsnFourthOpnd] != nullptr)) { + int64 lsb = static_cast(opnds[kInsnThirdOpnd])->GetValue(); + int64 width = static_cast(opnds[kInsnFourthOpnd])->GetValue(); + return Lsb6BitValid(lsb) && Width6BitValid(width, lsb); + } else { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Width6BitOnlyValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; + } +} + +inline bool MOP_xlslrri6Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wlslrri5Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xasrrri6Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wasrrri5Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xlsrrri6Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wlsrrri5Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wtstri32Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xtstri64Valid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm13BitMaskValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wextrrrri5Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Lsb5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xextrrrri6Valid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Lsb6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_winegrrsValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xinegrrsValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wldrsbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr8Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldrsbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr8Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldrbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr8Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldrshValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr16Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldrshValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr16Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldrswValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr32Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldrhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr16Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr32Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr64Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_bldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr8Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_hldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr16Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_sldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr32Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_dldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr64Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_qldrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr128Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr32PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xldpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr64PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xldpswValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr32PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_sldpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr32PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_dldpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr64PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_qldpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr128PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wldarbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldarhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldarValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldarValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wmovkri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift32Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_xmovkri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift64Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_wmovzri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift32Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_xmovzri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift64Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_wmovnri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift32Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_xmovnri16Valid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + ImmShift64Valid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkSecond && checkThird; +} + +inline bool MOP_wldxrbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldxrhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldxrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldxrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldaxrbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldaxrhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldaxrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xldaxrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wldaxpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xldaxpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wcmpriValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wcmprrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wwcmprreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_xcmpriValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xcmprrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xwcmprreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_wccmpriicValid(const MapleVector &opnds) { + // Is a five bit unsigned (positive) immediate, range 0 to 31 + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Nzcv4BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_wccmprricValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Nzcv4BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xccmpriicValid(const MapleVector &opnds) { + // Is a five bit unsigned (positive) immediate, range 0 to 31 + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + BitShift5BitValidImm(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Nzcv4BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkThird && checkFourth; +} + +inline bool MOP_xccmprricValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + Nzcv4BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wcmnriValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm12BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_wcmnrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift5BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_wwcmnrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_xcmnriValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? + Imm16BitValid(static_cast(opnds[kInsnThirdOpnd])->GetValue()) : true; + return checkThird; +} + +inline bool MOP_xcmnrrsValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + BitShift6BitValid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : true; + return checkFourth; +} + +inline bool MOP_xwcmnrreValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? + ExtendShift0To4Valid(static_cast(opnds[kInsnFourthOpnd])->GetValue()) : + true; + return checkFourth; +} + +inline bool MOP_wtbnzValid(const MapleVector &opnds) { + // Is the bit number to be tested, in the range 0 to 63 + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + return checkSecond; +} + +inline bool MOP_xtbnzValid(const MapleVector &opnds) { + // Is the bit number to be tested, in the range 0 to 63 + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + return checkSecond; +} + +inline bool MOP_wtbzValid(const MapleVector &opnds) { + // Is the bit number to be tested, in the range 0 to 63 + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + return checkSecond; +} + +inline bool MOP_xtbzValid(const MapleVector &opnds) { + // Is the bit number to be tested, in the range 0 to 63 + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? + BitShift6BitValidImm(static_cast(opnds[kInsnSecondOpnd])->GetValue()) : true; + return checkSecond; +} + +inline bool MOP_wstrbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr8Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstrhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr16Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr32Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xstrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr64Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_sstrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr32Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_dstrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr64Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_qstrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? StrLdr128Valid(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr32PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xstpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr64PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_sstpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr32PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_dstpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr64PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_qstpValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? StrLdr128PairValid(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstlrbValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstlrhValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstlrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_xstlrValid(const MapleVector &opnds) { + bool checkSecond = (opnds[kInsnSecondOpnd] != nullptr) ? IsOfstZero(opnds[kInsnSecondOpnd]) : true; + return checkSecond; +} + +inline bool MOP_wstxrbValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstxrhValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstxrValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xstxrValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstlxrbValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstlxrhValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstlxrValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_xstlxrValid(const MapleVector &opnds) { + bool checkThird = (opnds[kInsnThirdOpnd] != nullptr) ? IsOfstZero(opnds[kInsnThirdOpnd]) : true; + return checkThird; +} + +inline bool MOP_wstlxpValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? IsOfstZero(opnds[kInsnFourthOpnd]) : true; + return checkFourth; +} + +inline bool MOP_xstlxpValid(const MapleVector &opnds) { + bool checkFourth = (opnds[kInsnFourthOpnd] != nullptr) ? IsOfstZero(opnds[kInsnFourthOpnd]) : true; + return checkFourth; +} + +inline bool MOP_brkValid(const MapleVector &opnds) { + bool checkFirst = (opnds[kInsnFirstOpnd] != nullptr) ? + Imm16BitValidImm(static_cast(opnds[kInsnFirstOpnd])->GetValue()) : true; + return checkFirst; +} +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MOP_VALID_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h index 9b3743b671d2543eae9e5804984b631a06fe5379..be159ce8f9d282d43295685aeac5b8c17358cfe6 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h @@ -29,12 +29,14 @@ class AArch64FPLROffsetAdjustment : public FrameFinalize { isLmbc(aarchCGFunc->GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc), stackBaseReg((isLmbc || aarchCGFunc->UseFP()) ? R29 : RSP) {} - ~AArch64FPLROffsetAdjustment() override = default; + ~AArch64FPLROffsetAdjustment() override { + aarchCGFunc = nullptr; + } void Run() override; private: - void AdjustmentOffsetForOpnd(Insn &insn); + void AdjustmentOffsetForOpnd(Insn &insn) const; void AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index) const; /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ void AdjustmentStackPointer(Insn &insn) const; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h index 52aa468d8e5d6f178460fed68a13f90bd3a46d89..1706afa55979b3275c0eecf79834ce4a2a27a5fe 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h @@ -46,6 +46,8 @@ enum LsMOpType : uint8 { kLwNeg, /* MOP_winegrr | MOP_winegrrs */ kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */ kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */ + kLxAnd, /* MOP_xandrrr | MOP_xandrrrs */ + kLwAnd, /* MOP_wandrrr | MOP_wandrrrs */ }; enum SuffixType : uint8 { diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h index 52ab2539cefc57769d8ccfea5859f4bccd33490b..0a1384cb65011012322bccc70f80dfb7677aa9a3 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h @@ -41,6 +41,7 @@ class AArch64InsnVisitor : public InsnVisitor { void ReTargetSuccBB(BB &bb, LabelIdx newTarget) const override; void FlipIfBB(BB &bb, LabelIdx ftLabel) const override; BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const override; + void ModifyFathruBBToGotoBB(BB &bb, LabelIdx labelIdx) const override; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h index 4cf26dc9062f13d658d2027cebab1809085e67a9..536d896be1f6b97a6cd85565f00f97bbe5baa6b1 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -22,13 +22,13 @@ #include "mir_builder.h" namespace maplebe { -class AArch64CGPeepHole : CGPeepHole { +class AArch64CGPeepHole : public CGPeepHole { public: /* normal constructor */ AArch64CGPeepHole(CGFunc &f, MemPool *memPool) : CGPeepHole(f, memPool) {}; /* constructor for ssa */ AArch64CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) : CGPeepHole(f, memPool, cgssaInfo) {}; - ~AArch64CGPeepHole() = default; + ~AArch64CGPeepHole() override = default; void Run() override; bool DoSSAOptimize(BB &bb, Insn &insn) override; @@ -454,6 +454,38 @@ class NormRevTbzToTbzPattern : public CGPeepPattern { Insn *tbzInsn = nullptr; }; +// Add/Sub & load/store insn mergence pattern: +// add x0, x0, #255 +// ldr w1, [x0] ====> ldr w1, [x0, #255]! +// +// stp w1, w2, [x0] +// sub x0, x0, #256 ====> stp w1, w2, [x0], #-256 +// If new load/store insn is invalid and should be split, the pattern optimization will not work. +class AddSubMergeLdStPattern : public CGPeepPattern { + public: + AddSubMergeLdStPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + AddSubMergeLdStPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~AddSubMergeLdStPattern() override = default; + std::string GetPatternName() override { + return "AddSubMergeLdStPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + + private: + bool CheckIfCanBeMerged(Insn *adjacentInsn, Insn &insn); + Insn *nextInsn = nullptr; + Insn *prevInsn = nullptr; + Insn *insnToBeReplaced = nullptr; + bool isAddSubFront = false; + bool isLdStFront = false; + bool isInsnAdd = false; + RegOperand *insnDefReg = nullptr; + RegOperand *insnUseReg = nullptr; +}; + class CombineSameArithmeticPattern : public CGPeepPattern { public: CombineSameArithmeticPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) @@ -828,17 +860,19 @@ class LsrAndToUbfxPattern : public CGPeepPattern { ~LsrAndToUbfxPattern() override = default; void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; + bool CheckIntersectedCondition(Insn &insn, Insn &prevInsn); std::string GetPatternName() override { return "LsrAndToUbfxPattern"; } private: Insn *prevInsn = nullptr; + bool isWXSumOutOfRange = false; }; /* * lsl w1, w2, #m - * and w3, w1, #2^n-1 ---> if n > m : ubfiz w3, w2, #m, #n-m + * and w3, w1, #[(2^n-1 << m) ~ (2^n-1)] ---> if n > m : ubfiz w3, w2, #m, #n-m * * and w1, w2, #2^n-1 ---> ubfiz w3, w2, #m, #n * lsl w3, w1, #m @@ -986,7 +1020,9 @@ class MulImmToShiftPattern : public CGPeepPattern { public: MulImmToShiftPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : CGPeepPattern(cgFunc, currBB, currInsn, info) {} - ~MulImmToShiftPattern() override = default; + ~MulImmToShiftPattern() override { + movInsn = nullptr; + } std::string GetPatternName() override { return "MulImmToShiftPattern"; } @@ -1027,13 +1063,14 @@ class CombineContiLoadAndStorePattern : public CGPeepPattern { * str x21, [x19, #16] */ bool IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, int64 baseOfst) const; - bool IsValidNormalLoadOrStorePattern(Insn &insn, Insn &prevInsn, MemOperand &memOpnd, int64 curOfstVal, int64 prevOfstVal); - bool IsValidStackArgLoadOrStorePattern(Insn &curInsn, Insn &prevInsn, MemOperand &curMemOpnd, MemOperand &prevMemOpnd, - int64 curOfstVal, int64 prevOfstVal); + bool IsValidNormalLoadOrStorePattern(const Insn &insn, const Insn &prevInsn, const MemOperand &memOpnd, + int64 curOfstVal, int64 prevOfstVal); + bool IsValidStackArgLoadOrStorePattern(const Insn &curInsn, const Insn &prevInsn, const MemOperand &curMemOpnd, + const MemOperand &prevMemOpnd, int64 curOfstVal, int64 prevOfstVal) const; MOperator GetNewMemMop(MOperator mop) const; Insn *GenerateMemPairInsn(MOperator newMop, RegOperand &curDestOpnd, RegOperand &prevDestOpnd, MemOperand &combineMemOpnd, bool isCurDestFirst); - bool FindUseX16AfterInsn(BB &bb, Insn &curInsn); + bool FindUseX16AfterInsn(const Insn &curInsn) const; void RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const; bool doAggressiveCombine = false; @@ -1347,7 +1384,9 @@ class AndCmpBranchesToCsetPattern : public CGPeepPattern { public: AndCmpBranchesToCsetPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : CGPeepPattern(cgFunc, currBB, currInsn, info) {} - ~AndCmpBranchesToCsetPattern() override = default; + ~AndCmpBranchesToCsetPattern() override { + prevCmpInsn = nullptr; + } void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; std::string GetPatternName() override { @@ -1605,10 +1644,10 @@ class AddCmpZeroAArch64 : public PeepPattern { void Run(BB &bb, Insn &insn) override; private: - bool CheckAddCmpZeroCheckAdd (const Insn &insn, regno_t regNO); - bool CheckAddCmpZeroContinue (const Insn &insn, regno_t regNO); - bool CheckAddCmpZeroCheckCond (const Insn &insn); - Insn* CheckAddCmpZeroAArch64Pattern(Insn &insn, regno_t regNO); + bool CheckAddCmpZeroCheckAdd(const Insn &prevInsn, const Insn &insn) const; + bool CheckAddCmpZeroContinue(const Insn &insn, const RegOperand &opnd) const; + bool CheckAddCmpZeroCheckCond(const Insn &insn) const; + Insn* CheckAddCmpZeroAArch64Pattern(Insn &insn, const RegOperand &opnd); }; /* @@ -1619,14 +1658,21 @@ class AddCmpZeroAArch64 : public PeepPattern { * uxtw x1, w0 * lsl x2, x1, #3 ====> ubfiz x2, x0, #3, #32 */ -class ComplexExtendWordLslAArch64 : public PeepPattern { +class ComplexExtendWordLslPattern : public CGPeepPattern { public: - explicit ComplexExtendWordLslAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} - ~ComplexExtendWordLslAArch64() override = default; + ComplexExtendWordLslPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ComplexExtendWordLslPattern() override { + useInsn = nullptr; + } void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ComplexExtendWordLslPattern"; + } private: - bool IsExtendWordLslPattern(const Insn &insn) const; + Insn *useInsn = nullptr; }; @@ -1647,7 +1693,9 @@ class AddCmpZeroPatternSSA : public CGPeepPattern { public: AddCmpZeroPatternSSA(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) : CGPeepPattern(cgFunc, currBB, currInsn, info) {} - ~AddCmpZeroPatternSSA() override = default; + ~AddCmpZeroPatternSSA() override { + prevAddInsn = nullptr; + } void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; std::string GetPatternName() override { diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h index e5d90290f5cdf36835757817f698e20df3b15de8..04cf666e6b8c013a38035b066d45be9ebf0f1893 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h @@ -20,7 +20,7 @@ namespace maplebe { class AArch64ProfGen : public CGProfGen { public: AArch64ProfGen(CGFunc &curF, MemPool &mp) : CGProfGen(curF, mp) {} - virtual ~AArch64ProfGen() = default; + ~AArch64ProfGen() override = default; void InstrumentBB(BB &bb, MIRSymbol &countTab, uint32 offset) override; void CreateCallForDump(BB &bb, const MIRSymbol &dumpCall) override; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def index fd8598509da6b626a83d2a449de1a023ba8a2236..6b14fd85479566752ebab82fbb67f4877044313f 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -1,5 +1,5 @@ /* - * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2022-2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under the Mulan PSL v1. * You can use this software according to the terms and conditions of the Mulan PSL v1. @@ -37,8 +37,8 @@ ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole()); ADDTARGETPHASE("ebo", CGOptions::DoEBO()); ADDTARGETPHASE("prepeephole", CGOptions::DoPrePeephole()) + ADDTARGETPHASE("precfgo", CGOptions::DoCFGO()); ADDTARGETPHASE("ico", CGOptions::DoICO()) - ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()); @@ -64,13 +64,12 @@ ADDTARGETPHASE("generateproepilog", true); ADDTARGETPHASE("framefinalize", true); ADDTARGETPHASE("cfgo", GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); - ADDTARGETPHASE("peephole0", CGOptions::DoPeephole()) + ADDTARGETPHASE("peephole0", CGOptions::DoPeephole()); ADDTARGETPHASE("postebo", CGOptions::DoEBO()); ADDTARGETPHASE("postcfgo", CGOptions::DoCFGO()); - ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole()) - ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule() || CGOptions::GetInstance().IsUnwindTables() || GetMIRModule()->IsWithDbgInfo()); - ADDTARGETPHASE("dbgfixcallframeoffsets", true); + ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole()); ADDTARGETPHASE("yieldpoint", GetMIRModule()->IsJavaModule() && CGOptions::IsInsertYieldPoint()); + ADDTARGETPHASE("localschedule", false); ADDTARGETPHASE("scheduling", CGOptions::DoSchedule()); ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoLiteProfGen()); ADDTARGETPHASE("cgpgogen", CGOptions::DoLiteProfGen()); @@ -78,4 +77,7 @@ ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis() && !CGOptions::DoLiteProfGen()); ADDTARGETPHASE("fixshortbranch", true); ADDTARGETPHASE("cgaggressiveopt", GetMIRModule()->IsCModule() && CGOptions::DoAggrOpt()); + ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule() || CGOptions::GetInstance().IsUnwindTables() || GetMIRModule()->IsWithDbgInfo()); + ADDTARGETPHASE("dbgfixcallframeoffsets", true); + ADDTARGETPHASE("cgirverify", true); ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h index aef5b287716a2842c2ac5cf37e668fa01fbac347..d39f29a339d4fa746192336867895ef39f6c74f1 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h @@ -36,6 +36,16 @@ class AArch64GenProEpilog : public GenProEpilog { } else { stackBaseReg = useFP ? R29 : RSP; } + AArch64CGFunc &aarchCGFunc = static_cast(func); + const MapleVector &calleeSavedRegs = aarchCGFunc.GetCalleeSavedRegs(); + if (useFP) { + storeFP = true; + } else if (find(calleeSavedRegs.begin(), calleeSavedRegs.end(), RFP) != calleeSavedRegs.end()) { + storeFP = true; + } else if (find(calleeSavedRegs.begin(), calleeSavedRegs.end(), R29) != calleeSavedRegs.end()) { + storeFP = true; + } + aarchCGFunc.SetStoreFP(storeFP); } ~AArch64GenProEpilog() override = default; @@ -52,7 +62,7 @@ class AArch64GenProEpilog : public GenProEpilog { MemOperand *GetDownStack(); void GenStackGuard(); void AddStackGuard(BB &bb); - BB &GenStackGuardCheckInsn(BB &bb); + void GenStackGuardCheckInsn(BB &bb); void AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty); void AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty); void GeneratePushRegs(); @@ -86,7 +96,12 @@ class AArch64GenProEpilog : public GenProEpilog { static constexpr const int32 kOffset16MemPos = 16; BB *fastPathReturnBB = nullptr; - bool useFP = true; + bool useFP = false; + // To be compatible with previous code more easily,we use storeFP boolean to indicate the case + // (1) use FP to address + // (2) FP is clobbered + // need to delete this and optimize the callee save process. + bool storeFP = false; /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ AArch64reg stackBaseReg = RFP; }; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h index 6fd1f355d7b3c51fb6382799545f940b9024f9a3..1e303c4c30c4d9671f36ced06d877692ab9d3ac7 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h @@ -70,7 +70,8 @@ class A64StrLdrProp { bool ReplaceMemOpnd(const MemOperand &currMemOpnd); MemOperand *SelectReplaceMem(const MemOperand &currMemOpnd); RegOperand *GetReplaceReg(RegOperand &a64Reg); - MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize) const; + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, + uint32 memSize, VaryType varyType = kNotVary) const; MemOperand *SelectReplaceExt(RegOperand &base, uint32 amount, bool isSigned, uint32 memSize); bool CheckNewMemOffset(const Insn &insn, MemOperand &newMemOpnd, uint32 opndIdx) const; void DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn); @@ -104,14 +105,14 @@ class A64ConstProp { cgFunc(&f), ssaInfo(&sInfo), curInsn(&insn) {} - void DoOpt(); + void DoOpt() const; /* false : default lsl #0 true: lsl #12 (only support 12 bit left shift in aarch64) */ static MOperator GetRegImmMOP(MOperator regregMop, bool withLeftShift); static MOperator GetReversalMOP(MOperator arithMop); static MOperator GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn); private: - bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); + bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const; /* use xzr/wzr in aarch64 to shrink register live range */ void ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) const; @@ -123,11 +124,11 @@ class A64ConstProp { bool MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const; bool ArithConstReplaceForOneOpnd(Insn &useInsn, DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) const; - bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT); + bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) const; bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT) const; bool ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const; bool BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const; - bool ReplaceCmpToCmn(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const; + bool ReplaceCmpToCmn(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const; MemPool *constPropMp; CGFunc *cgFunc; @@ -183,51 +184,6 @@ class RedundantPhiProp : public PropOptimizePattern { VRegVersion *srcVersion = nullptr; }; -class RedundantExpandProp : public PropOptimizePattern { - public: - RedundantExpandProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} - ~RedundantExpandProp() override { - destVersion = nullptr; - srcVersion = nullptr; - } - bool CheckCondition(Insn &insn) final; - void Optimize(Insn &insn) final; - void Run() final; - - protected: - void Init() final { - destVersion = nullptr; - srcVersion = nullptr; - } - - private: - VRegVersion *destVersion = nullptr; - VRegVersion *srcVersion = nullptr; -}; - -class ValidBitNumberProp : public PropOptimizePattern { - public: - ValidBitNumberProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} - ~ValidBitNumberProp() override { - destVersion = nullptr; - srcVersion = nullptr; - } - bool CheckCondition(Insn &insn) final; - void Optimize(Insn &insn) final; - void Run() final; - - protected: - void Init() final { - destVersion = nullptr; - srcVersion = nullptr; - } - private: - bool IsImplicitUse(const RegOperand &dstOpnd, const RegOperand &srcOpnd) const; - bool IsPhiToMopX(const RegOperand &defOpnd) const; - VRegVersion *destVersion = nullptr; - VRegVersion *srcVersion = nullptr; -}; - /* * frame pointer and stack pointer will not be varied in function body * treat them as const diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h index 0817f110c6131496bec192ec4a78f397c4357529..e5652df16a74222171bffaf9c466a595aa48ad68 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h @@ -158,8 +158,7 @@ class AArch64RegSavesOpt : public RegSavesOpt { void RevertToRestoreAtEpilog(AArch64reg reg); void DetermineCalleeSaveLocationsPre(); void DetermineCalleeRestoreLocations(); - int32 FindCalleeBase() const; - void SetupRegOffsets(); + int32 GetCalleeBaseOffset() const; void InsertCalleeSaveCode(); void InsertCalleeRestoreCode(); void PrintSaveLocs(AArch64reg reg); @@ -204,16 +203,19 @@ class AArch64RegSavesOpt : public RegSavesOpt { } } - void ResetCalleeBit(CalleeBitsType * data, BBID bid, regno_t reg) const { - CalleeBitsType mask = 1ULL << RegBitMap(reg); - data[bid] = GetBBCalleeBits(data, bid) & ~mask; - } - bool IsCalleeBitSet(CalleeBitsType * data, BBID bid, regno_t reg) const { CalleeBitsType mask = 1ULL << RegBitMap(reg); return GetBBCalleeBits(data, bid) & mask; } + bool IsCalleeBitSetDef(BBID bid, regno_t reg) const { + return IsCalleeBitSet(calleeBitsDef, bid, reg); + } + + bool IsCalleeBitSetUse(BBID bid, regno_t reg) const { + return IsCalleeBitSet(calleeBitsUse, bid, reg); + } + /* AArch64 specific callee-save registers bit positions 0 9 10 33 -- position R19 .. R28 V8 .. V15 V16 .. V31 -- regs */ @@ -257,12 +259,54 @@ class AArch64RegSavesOpt : public RegSavesOpt { CalleeBitsType *calleeBitsDef = nullptr; CalleeBitsType *calleeBitsUse = nullptr; CalleeBitsType *calleeBitsAcc = nullptr; - MapleVector bbSavedRegs; /* set of regs to be saved in a BB */ - MapleVector regSavedBBs; /* set of BBs to be saved for a reg */ - MapleMap regOffset; /* save offset of each register */ - MapleSet visited; /* temp */ - MapleMap id2bb; /* bbid to bb* mapping */ + MapleVector bbSavedRegs; // set of regs to be saved in a BB */ + MapleVector regSavedBBs; // set of BBs to be saved for a reg */ + MapleMap regOffset; // save offset of each register + MapleSet visited; // temp + MapleMap id2bb; // bbid to bb* mapping }; + +// callee reg finder, return two reg for stp/ldp +class AArch64RegFinder { + public: + AArch64RegFinder(const CGFunc &func, const AArch64RegSavesOpt ®Save) : + regAlloced(func.GetTargetRegInfo()->GetAllRegNum(), true) { + CalcRegUsedInSameBBsMat(func, regSave); + CalcRegUsedInBBsNum(func, regSave); + SetCalleeRegUnalloc(func); + } + + // get callee reg1 and reg2 for stp/ldp; if reg1 is invalid, all reg is alloced + std::pair GetPairCalleeeReg(); + + void Dump() const; + private: + // two reg is used in same bb's num + // such as: BB1 use r1,r2; BB2 use r1,r3; BB3 use r1,r2,r3 + // r1 r2 r3 + // r1 / 2 2 + // r2 2 / 1 + // r3 2 1 / + std::vector> regUsedInSameBBsMat; + // reg is used in bb's num + std::map regUsedInBBsNum; + std::vector regAlloced; // callee reg is alloced, true is alloced + + void CalcRegUsedInSameBBsMat(const CGFunc &func, const AArch64RegSavesOpt ®Save); + void CalcRegUsedInBBsNum(const CGFunc &func, const AArch64RegSavesOpt ®Save); + void SetCalleeRegUnalloc(const CGFunc &func); + + // find an unalloced reg, which has max UsedInBBsNum + regno_t FindMaxUnallocRegUsedInBBsNum() const { + for (regno_t i = kRinvalid; i < regAlloced.size(); ++i) { + if (!regAlloced[i]) { + return i; + } + } + return kRinvalid; + } +}; + } /* namespace maplebe */ #endif /* MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h index dc22d8b88f2d34654abc1484d5402d8c75ac73b5..22f88dcaed010ce2344f7f0a7fcb3b9a1cf03ebd 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h @@ -27,104 +27,6 @@ enum RegisterType : uint8 { kRegisterLast, }; -class ScheduleProcessInfo { - public: - explicit ScheduleProcessInfo(uint32 size) { - availableReadyList.reserve(size); - scheduledNodes.reserve(size); - } - - virtual ~ScheduleProcessInfo() = default; - - uint32 GetLastUpdateCycle() const { - return lastUpdateCycle; - } - - void SetLastUpdateCycle(uint32 updateCycle) { - lastUpdateCycle = updateCycle; - } - - uint32 GetCurrCycle() const { - return currCycle; - } - - void IncCurrCycle() { - ++currCycle; - } - - void DecAdvanceCycle() { - advanceCycle--; - } - - uint32 GetAdvanceCycle() const { - return advanceCycle; - } - - void SetAdvanceCycle(uint32 cycle) { - advanceCycle = cycle; - } - - void ClearAvailableReadyList() { - availableReadyList.clear(); - } - - void PushElemIntoAvailableReadyList(DepNode *node) { - availableReadyList.emplace_back(node); - } - - size_t SizeOfAvailableReadyList() const { - return availableReadyList.size(); - } - - bool AvailableReadyListIsEmpty() const { - return availableReadyList.empty(); - } - - void SetAvailableReadyList(const std::vector &tempReadyList) { - availableReadyList = tempReadyList; - } - - const std::vector &GetAvailableReadyList() const { - return availableReadyList; - } - - const std::vector &GetAvailableReadyList() { - return availableReadyList; - } - - void PushElemIntoScheduledNodes(DepNode *node) { - node->SetState(kScheduled); - node->SetSchedCycle(currCycle); - node->OccupyUnits(); - scheduledNodes.emplace_back(node); - } - - bool IsFirstSeparator() const { - return isFirstSeparator; - } - - void ResetIsFirstSeparator() { - isFirstSeparator = false; - } - - size_t SizeOfScheduledNodes() const { - return scheduledNodes.size(); - } - - const std::vector &GetScheduledNodes() const { - return scheduledNodes; - } - - private: - std::vector availableReadyList; - std::vector scheduledNodes; - uint32 lastUpdateCycle = 0; - uint32 currCycle = 0; - uint32 advanceCycle = 0; - bool isFirstSeparator = true; -}; - - class AArch64ScheduleProcessInfo : public ScheduleProcessInfo { public: explicit AArch64ScheduleProcessInfo(uint32 size) : ScheduleProcessInfo(size) {} diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h index 866bce5f439ecd1d54acc049dddd7e3929a9d4d7..7b2249a5b8487b99d4c1356a87d784291672ce85 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_standardize.h @@ -59,7 +59,7 @@ class AArch64Standardize : public Standardize { Operand *UpdateRegister(Operand &opnd, std::map ®Map, bool allocate); void TraverseOperands(Insn *insn, std::map ®Map, bool allocate); - Operand *GetInsnResult(Insn *insn); + Operand *GetInsnResult(Insn *insn) const; Insn *HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order); void SelectTargetInsn(Insn *insn); }; diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h index 24abdced20c9218d2320c21ad9246c1b6b24bc71..2b14760d8fce4fb20f9ac6a7e6cc9a09b373aeee 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h @@ -48,7 +48,8 @@ class AArch64StoreLoadOpt : public StoreLoadOpt { bool CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, regno_t replaceRegNo); bool CheckDefInsn(Insn &defInsn, Insn &currInsn); bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx); - MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, + VaryType varyType = kNotVary); MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset); MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned); bool CanDoMemProp(const Insn *insn); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h index 9eba2c0e42c64c8116672e41c9a4013d1b4c7a24..ec0cad26803d7a82d53c8a9e8c05e78319744717 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h @@ -35,10 +35,8 @@ class AArch64TailCallOpt : public TailCallOpt { bool InsnIsCall(Insn &insn) const override; bool InsnIsUncondJump(Insn &insn) const override; bool InsnIsAddWithRsp(Insn &insn) const override; - bool OpndIsStackRelatedReg(RegOperand &opnd) const override; bool OpndIsR0Reg(RegOperand &opnd) const override; bool OpndIsCalleeSaveReg(RegOperand &opnd) const override; - bool IsAddOrSubOp(MOperator mOp) const override; void ReplaceInsnMopWithTailCall(Insn &insn) override; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h index 8e5350314c6e38c5ac4bf460a5e8614e746dd1bf..50a24442c78501211fc9e2cc357273aeb0376ad8 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h @@ -22,12 +22,25 @@ namespace maplebe { class AArch64ValidBitOpt : public ValidBitOpt { public: - AArch64ValidBitOpt(CGFunc &f, CGSSAInfo &info) : ValidBitOpt(f, info) {} + AArch64ValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &info, LiveIntervalAnalysis &ll) : ValidBitOpt(mp, f, info, ll) {} ~AArch64ValidBitOpt() override = default; - void DoOpt(BB &bb, Insn &insn) override; + void DoOpt() override; void SetValidBits(Insn &insn) override; bool SetPhiValidBits(Insn &insn) override; + private: + void OptPatternWithImplicitCvt(BB &bb, Insn &insn); + void OptCvt(BB &bb, Insn &insn); + void OptPregCvt(BB &bb, Insn &insn); +}; + +class PropPattern : public ValidBitPattern { + public: + PropPattern(CGFunc &cgFunc, CGSSAInfo &info, LiveIntervalAnalysis &ll) : ValidBitPattern(cgFunc, info, ll) {} + ~PropPattern() override {} + protected: + void VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn); + void ReplaceImplicitCvtAndProp(VRegVersion *destVersion, VRegVersion *srcVersion); }; /* @@ -40,12 +53,14 @@ class AArch64ValidBitOpt : public ValidBitOpt { * and w6[16], w0[16], #FF00[16] mov w6, w0 * asr w6, w6[16], #8[4] ===> asr w6, w6 */ -class AndValidBitPattern : public ValidBitPattern { +class AndValidBitPattern : public PropPattern { public: - AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info, LiveIntervalAnalysis &ll) : PropPattern(cgFunc, info, ll) {} ~AndValidBitPattern() override { desReg = nullptr; srcReg = nullptr; + destVersion = nullptr; + srcVersion = nullptr; } void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; @@ -58,6 +73,8 @@ class AndValidBitPattern : public ValidBitPattern { MOperator newMop = MOP_undef; RegOperand *desReg = nullptr; RegOperand *srcReg = nullptr; + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; }; /* @@ -71,12 +88,14 @@ class AndValidBitPattern : public ValidBitPattern { * ===> * mov w1, w2 */ -class ExtValidBitPattern : public ValidBitPattern { +class ExtValidBitPattern : public PropPattern { public: - ExtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ExtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info, LiveIntervalAnalysis &ll) : PropPattern(cgFunc, info, ll) {} ~ExtValidBitPattern() override { newDstOpnd = nullptr; newSrcOpnd = nullptr; + destVersion = nullptr; + srcVersion = nullptr; } void Run(BB &bb, Insn &insn) override; bool CheckCondition(Insn &insn) override; @@ -85,11 +104,37 @@ class ExtValidBitPattern : public ValidBitPattern { } private: + bool CheckValidCvt(const Insn &insn); + bool RealUseMopX(const RegOperand &defOpnd, InsnSet &visitedInsn); RegOperand *newDstOpnd = nullptr; RegOperand *newSrcOpnd = nullptr; + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; MOperator newMop = MOP_undef; }; +class RedundantExpandProp : public PropPattern { + public: + RedundantExpandProp(CGFunc &cgFunc, CGSSAInfo &info, LiveIntervalAnalysis &ll) : PropPattern(cgFunc, info, ll) {} + ~RedundantExpandProp() override { + newDstOpnd = nullptr; + newSrcOpnd = nullptr; + destVersion = nullptr; + srcVersion = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RedundantExpandProp"; + } + + private: + RegOperand *newDstOpnd = nullptr; + RegOperand *newSrcOpnd = nullptr; + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + /* * cmp w0, #0 * cset w1, NE --> mov w1, w0 diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def index acc419fec286734e05873cb7833c42c8453983c7..83056802d1fa7e08c1a8e758565f25b0b68f479c 100644 --- a/src/mapleall/maple_be/include/cg/abstract_mmir.def +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -15,162 +15,162 @@ /* Abstract Maple Machine IR */ /* {mop, opnds, prop, latency, name, format, length} */ - DEFINE_MOP(MOP_undef, {}, ISABSTRACT,0,"","",0) + DEFINE_MOP(MOP_undef, {}, ISABSTRACT, 0, "", "", 0) /* conversion between all types and registers */ - DEFINE_MOP(MOP_copy_ri_8, {&OpndDesc::Reg8ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_8","",1) - DEFINE_MOP(MOP_copy_rr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISMOVE,0,"copy_rr_8","",1) - DEFINE_MOP(MOP_copy_ri_16, {&OpndDesc::Reg16ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_16","",1) - DEFINE_MOP(MOP_copy_rr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISMOVE,0,"copy_rr_16","",1) - DEFINE_MOP(MOP_copy_ri_32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_32","",1) - DEFINE_MOP(MOP_copy_rr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISMOVE,0,"copy_rr_32","",1) - DEFINE_MOP(MOP_copy_ri_64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_ri_64","",1) - DEFINE_MOP(MOP_copy_rr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS},ISABSTRACT|ISMOVE,0,"copy_rr_64","",1) - - DEFINE_MOP(MOP_copy_fi_8, {&OpndDesc::Reg8FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_8","",1) - DEFINE_MOP(MOP_copy_ff_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISMOVE,0,"copy_ff_8","",1) - DEFINE_MOP(MOP_copy_fi_16, {&OpndDesc::Reg16FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_16","",1) - DEFINE_MOP(MOP_copy_ff_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISMOVE,0,"copy_ff_16","",1) - DEFINE_MOP(MOP_copy_fi_32, {&OpndDesc::Reg32FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_32","",1) - DEFINE_MOP(MOP_copy_ff_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISMOVE,0,"copy_ff_32","",1) - DEFINE_MOP(MOP_copy_fi_64, {&OpndDesc::Reg64FD,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_fi_64","",1) - DEFINE_MOP(MOP_copy_ff_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS},ISABSTRACT|ISMOVE,0,"copy_ff_64","",1) + DEFINE_MOP(MOP_copy_ri_8, {&OpndDesc::Reg8ID, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_ri_8", "", 1) + DEFINE_MOP(MOP_copy_rr_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISMOVE, 0, "copy_rr_8", "", 1) + DEFINE_MOP(MOP_copy_ri_16, {&OpndDesc::Reg16ID, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_ri_16", "", 1) + DEFINE_MOP(MOP_copy_rr_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISMOVE, 0, "copy_rr_16", "", 1) + DEFINE_MOP(MOP_copy_ri_32, {&OpndDesc::Reg32ID, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_ri_32", "", 1) + DEFINE_MOP(MOP_copy_rr_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISMOVE, 0, "copy_rr_32", "", 1) + DEFINE_MOP(MOP_copy_ri_64, {&OpndDesc::Reg64ID, &OpndDesc::Imm64}, ISABSTRACT | ISMOVE, 0, "copy_ri_64", "", 1) + DEFINE_MOP(MOP_copy_rr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISMOVE, 0, "copy_rr_64", "", 1) + + DEFINE_MOP(MOP_copy_fi_8, {&OpndDesc::Reg8FD, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_fi_8", "", 1) + DEFINE_MOP(MOP_copy_ff_8, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS}, ISABSTRACT | ISMOVE, 0, "copy_ff_8", "", 1) + DEFINE_MOP(MOP_copy_fi_16, {&OpndDesc::Reg16FD, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_fi_16", "", 1) + DEFINE_MOP(MOP_copy_ff_16, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS}, ISABSTRACT | ISMOVE, 0, "copy_ff_16", "", 1) + DEFINE_MOP(MOP_copy_fi_32, {&OpndDesc::Reg32FD, &OpndDesc::Imm32}, ISABSTRACT | ISMOVE, 0, "copy_fi_32", "", 1) + DEFINE_MOP(MOP_copy_ff_32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, ISABSTRACT | ISMOVE, 0, "copy_ff_32", "", 1) + DEFINE_MOP(MOP_copy_fi_64, {&OpndDesc::Reg64FD, &OpndDesc::Imm64}, ISABSTRACT | ISMOVE, 0, "copy_fi_64", "", 1) + DEFINE_MOP(MOP_copy_ff_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISABSTRACT | ISMOVE, 0, "copy_ff_64", "", 1) /* register extend */ - DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r8","",1) - DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r8","",1) - DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r8","",1) - DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r8","",1) - DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r16","",1) - DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r16","",1) - - DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r8","",1) - DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r8","",1) - DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r16","",1) - DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r16","",1) - DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) - DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDesc::Reg16ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r16_r8", "", 1) + DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDesc::Reg16ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r16_r8", "", 1) + DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDesc::Reg32ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r32_r8", "", 1) + DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDesc::Reg32ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r32_r8", "", 1) + DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDesc::Reg32ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r32_r16", "", 1) + DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDesc::Reg32ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r32_r16", "", 1) + + DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDesc::Reg64ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r64_r8", "", 1) + DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDesc::Reg64ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r64_r8", "", 1) + DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDesc::Reg64ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r64_r16", "", 1) + DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDesc::Reg64ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r64_r16", "", 1) + DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r64_r32", "", 1) + DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r64_r32", "", 1) /* register truncate */ - DEFINE_MOP(MOP_zext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r16","",1) - DEFINE_MOP(MOP_sext_rr_8_16, {&OpndDesc::Reg8ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r16","",1) - DEFINE_MOP(MOP_zext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r32","",1) - DEFINE_MOP(MOP_sext_rr_8_32, {&OpndDesc::Reg8ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r32","",1) - DEFINE_MOP(MOP_zext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r32","",1) - DEFINE_MOP(MOP_sext_rr_16_32, {&OpndDesc::Reg16ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r32","",1) - - DEFINE_MOP(MOP_zext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r8_r64","",1) - DEFINE_MOP(MOP_sext_rr_8_64, {&OpndDesc::Reg8ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r8_r64","",1) - DEFINE_MOP(MOP_zext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r64","",1) - DEFINE_MOP(MOP_sext_rr_16_64, {&OpndDesc::Reg16ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r64","",1) - DEFINE_MOP(MOP_zext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r64","",1) - DEFINE_MOP(MOP_sext_rr_32_64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r64","",1) + DEFINE_MOP(MOP_zext_rr_8_16, {&OpndDesc::Reg8ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r8_r16", "", 1) + DEFINE_MOP(MOP_sext_rr_8_16, {&OpndDesc::Reg8ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r8_r16", "", 1) + DEFINE_MOP(MOP_zext_rr_8_32, {&OpndDesc::Reg8ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r8_r32", "", 1) + DEFINE_MOP(MOP_sext_rr_8_32, {&OpndDesc::Reg8ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r8_r32", "", 1) + DEFINE_MOP(MOP_zext_rr_16_32, {&OpndDesc::Reg16ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r16_r32", "", 1) + DEFINE_MOP(MOP_sext_rr_16_32, {&OpndDesc::Reg16ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r16_r32", "", 1) + + DEFINE_MOP(MOP_zext_rr_8_64, {&OpndDesc::Reg8ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r8_r64", "", 1) + DEFINE_MOP(MOP_sext_rr_8_64, {&OpndDesc::Reg8ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r8_r64", "", 1) + DEFINE_MOP(MOP_zext_rr_16_64, {&OpndDesc::Reg16ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r16_r64", "", 1) + DEFINE_MOP(MOP_sext_rr_16_64, {&OpndDesc::Reg16ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r16_r64", "", 1) + DEFINE_MOP(MOP_zext_rr_32_64, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "zext_r32_r64", "", 1) + DEFINE_MOP(MOP_sext_rr_32_64, {&OpndDesc::Reg32ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "sext_r32_r64", "", 1) /* int2float conversion */ - DEFINE_MOP(MOP_cvt_f32_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u32","",1) - DEFINE_MOP(MOP_cvt_f64_u32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u32","",1) - DEFINE_MOP(MOP_cvt_f32_u64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_u64","",1) - DEFINE_MOP(MOP_cvt_f64_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_u64","",1) - DEFINE_MOP(MOP_cvt_f32_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i32","",1) - DEFINE_MOP(MOP_cvt_f64_i32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i32","",1) - DEFINE_MOP(MOP_cvt_f32_i64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f32_i64","",1) - DEFINE_MOP(MOP_cvt_f64_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_f64_i64","",1) + DEFINE_MOP(MOP_cvt_f32_u32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f32_u32", "", 1) + DEFINE_MOP(MOP_cvt_f64_u32, {&OpndDesc::Reg64FD, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f64_u32", "", 1) + DEFINE_MOP(MOP_cvt_f32_u64, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f32_u64", "", 1) + DEFINE_MOP(MOP_cvt_f64_u64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f64_u64", "", 1) + DEFINE_MOP(MOP_cvt_f32_i32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f32_i32", "", 1) + DEFINE_MOP(MOP_cvt_f64_i32, {&OpndDesc::Reg64FD, &OpndDesc::Reg32IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f64_i32", "", 1) + DEFINE_MOP(MOP_cvt_f32_i64, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f32_i64", "", 1) + DEFINE_MOP(MOP_cvt_f64_i64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_f64_i64", "", 1) /* float2int conversion */ - DEFINE_MOP(MOP_cvt_u32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f32","",1) - DEFINE_MOP(MOP_cvt_u64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f32","",1) - DEFINE_MOP(MOP_cvt_u32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u32_f64","",1) - DEFINE_MOP(MOP_cvt_u64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_u64_f64","",1) - DEFINE_MOP(MOP_cvt_i32_f32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f32","",1) - DEFINE_MOP(MOP_cvt_i64_f32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f32","",1) - DEFINE_MOP(MOP_cvt_i32_f64, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i32_f64","",1) - DEFINE_MOP(MOP_cvt_i64_f64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_i64_f64","",1) + DEFINE_MOP(MOP_cvt_u32_f32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_u32_f32", "", 1) + DEFINE_MOP(MOP_cvt_u64_f32, {&OpndDesc::Reg64ID, &OpndDesc::Reg32FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_u64_f32", "", 1) + DEFINE_MOP(MOP_cvt_u32_f64, {&OpndDesc::Reg32ID, &OpndDesc::Reg64FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_u32_f64", "", 1) + DEFINE_MOP(MOP_cvt_u64_f64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_u64_f64", "", 1) + DEFINE_MOP(MOP_cvt_i32_f32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_i32_f32", "", 1) + DEFINE_MOP(MOP_cvt_i64_f32, {&OpndDesc::Reg64ID, &OpndDesc::Reg32FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_i64_f32", "", 1) + DEFINE_MOP(MOP_cvt_i32_f64, {&OpndDesc::Reg32ID, &OpndDesc::Reg64FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_i32_f64", "", 1) + DEFINE_MOP(MOP_cvt_i64_f64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_i64_f64", "", 1) /* float conversion */ - DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_64_32","",1) - DEFINE_MOP(MOP_cvt_ff_32_64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_32_64","",1) + DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD, &OpndDesc::Reg32FS}, ISABSTRACT | ISCONVERSION, 0, "cvt_ff_64_32", "", 1) + DEFINE_MOP(MOP_cvt_ff_32_64, {&OpndDesc::Reg32FD, &OpndDesc::Reg64IS}, ISABSTRACT | ISCONVERSION, 0, "cvt_ff_32_64", "", 1) /* Support transformation between memory and registers */ - DEFINE_MOP(MOP_str_8, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_8","",1) - DEFINE_MOP(MOP_str_16, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_16","",1) - DEFINE_MOP(MOP_str_32, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_32","",1) - DEFINE_MOP(MOP_str_64, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_64","",1) - DEFINE_MOP(MOP_load_8, {&OpndDesc::Reg8ID,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_8","",1) - DEFINE_MOP(MOP_load_16, {&OpndDesc::Reg16ID,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_16","",1) - DEFINE_MOP(MOP_load_32, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_32","",1) - DEFINE_MOP(MOP_load_64, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_64","",1) - DEFINE_MOP(MOP_str_f_8, {&OpndDesc::Reg8FS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_f_8","",1) - DEFINE_MOP(MOP_str_f_16, {&OpndDesc::Reg16FS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_f_16","",1) - DEFINE_MOP(MOP_str_f_32, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_f_32","",1) - DEFINE_MOP(MOP_str_f_64, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_f_64","",1) - DEFINE_MOP(MOP_load_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_f_8","",1) - DEFINE_MOP(MOP_load_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_f_16","",1) - DEFINE_MOP(MOP_load_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_f_32","",1) - DEFINE_MOP(MOP_load_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_f_64","",1) + DEFINE_MOP(MOP_str_8, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D}, ISABSTRACT | ISSTORE, 0, "str_8", "", 1) + DEFINE_MOP(MOP_str_16, {&OpndDesc::Reg16IS, &OpndDesc::Mem16D}, ISABSTRACT | ISSTORE, 0, "str_16", "", 1) + DEFINE_MOP(MOP_str_32, {&OpndDesc::Reg32IS, &OpndDesc::Mem32D}, ISABSTRACT | ISSTORE, 0, "str_32", "", 1) + DEFINE_MOP(MOP_str_64, {&OpndDesc::Reg64IS, &OpndDesc::Mem64D}, ISABSTRACT | ISSTORE, 0, "str_64", "", 1) + DEFINE_MOP(MOP_load_8, {&OpndDesc::Reg8ID, &OpndDesc::Mem8S}, ISABSTRACT | ISLOAD, 0, "load_8", "", 1) + DEFINE_MOP(MOP_load_16, {&OpndDesc::Reg16ID, &OpndDesc::Mem16S}, ISABSTRACT | ISLOAD, 0, "load_16", "", 1) + DEFINE_MOP(MOP_load_32, {&OpndDesc::Reg32ID, &OpndDesc::Mem32S}, ISABSTRACT | ISLOAD, 0, "load_32", "", 1) + DEFINE_MOP(MOP_load_64, {&OpndDesc::Reg64ID, &OpndDesc::Mem64S}, ISABSTRACT | ISLOAD, 0, "load_64", "", 1) + DEFINE_MOP(MOP_str_f_8, {&OpndDesc::Reg8FS, &OpndDesc::Mem8D}, ISABSTRACT | ISSTORE, 0, "str_f_8", "", 1) + DEFINE_MOP(MOP_str_f_16, {&OpndDesc::Reg16FS, &OpndDesc::Mem16D}, ISABSTRACT | ISSTORE, 0, "str_f_16", "", 1) + DEFINE_MOP(MOP_str_f_32, {&OpndDesc::Reg32FS, &OpndDesc::Mem32D}, ISABSTRACT | ISSTORE, 0, "str_f_32", "", 1) + DEFINE_MOP(MOP_str_f_64, {&OpndDesc::Reg64FS, &OpndDesc::Mem64D}, ISABSTRACT | ISSTORE, 0, "str_f_64", "", 1) + DEFINE_MOP(MOP_load_f_8, {&OpndDesc::Reg8FD, &OpndDesc::Mem8S}, ISABSTRACT | ISLOAD, 0, "load_f_8", "", 1) + DEFINE_MOP(MOP_load_f_16, {&OpndDesc::Reg16FD, &OpndDesc::Mem16S}, ISABSTRACT | ISLOAD, 0, "load_f_16", "", 1) + DEFINE_MOP(MOP_load_f_32, {&OpndDesc::Reg32FD, &OpndDesc::Mem32S}, ISABSTRACT | ISLOAD, 0, "load_f_32", "", 1) + DEFINE_MOP(MOP_load_f_64, {&OpndDesc::Reg64FD, &OpndDesc::Mem64S}, ISABSTRACT | ISLOAD, 0, "load_f_64", "", 1) /* Support three address basic operations */ - DEFINE_MOP(MOP_add_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"add_8","",1) - DEFINE_MOP(MOP_add_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"add_16","",1) - DEFINE_MOP(MOP_add_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"add_32","",1) - DEFINE_MOP(MOP_add_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"add_64","",1) - DEFINE_MOP(MOP_sub_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) - DEFINE_MOP(MOP_sub_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) - DEFINE_MOP(MOP_sub_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) - DEFINE_MOP(MOP_sub_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) - DEFINE_MOP(MOP_or_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"or_8","",1) - DEFINE_MOP(MOP_or_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"or_16","",1) - DEFINE_MOP(MOP_or_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"or_32","",1) - DEFINE_MOP(MOP_or_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"or_64","",1) - DEFINE_MOP(MOP_xor_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"xor_8","",1) - DEFINE_MOP(MOP_xor_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"xor_16","",1) - DEFINE_MOP(MOP_xor_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"xor_32","",1) - DEFINE_MOP(MOP_xor_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"xor_64","",1) - DEFINE_MOP(MOP_and_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"and_8","",1) - DEFINE_MOP(MOP_and_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"and_16","",1) - DEFINE_MOP(MOP_and_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"and_32","",1) - DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1) + DEFINE_MOP(MOP_add_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISBASICOP, 0, "add_8", "", 1) + DEFINE_MOP(MOP_add_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISBASICOP, 0, "add_16", "", 1) + DEFINE_MOP(MOP_add_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISBASICOP, 0, "add_32", "", 1) + DEFINE_MOP(MOP_add_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISBASICOP, 0, "add_64", "", 1) + DEFINE_MOP(MOP_sub_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISBASICOP, 0, "sub_8", "", 1) + DEFINE_MOP(MOP_sub_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISBASICOP, 0, "sub_16", "", 1) + DEFINE_MOP(MOP_sub_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISBASICOP, 0, "sub_32", "", 1) + DEFINE_MOP(MOP_sub_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISBASICOP, 0, "sub_64", "", 1) + DEFINE_MOP(MOP_or_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISBASICOP, 0, "or_8", "", 1) + DEFINE_MOP(MOP_or_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISBASICOP, 0, "or_16", "", 1) + DEFINE_MOP(MOP_or_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISBASICOP, 0, "or_32", "", 1) + DEFINE_MOP(MOP_or_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISBASICOP, 0, "or_64", "", 1) + DEFINE_MOP(MOP_xor_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISBASICOP, 0, "xor_8", "", 1) + DEFINE_MOP(MOP_xor_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISBASICOP, 0, "xor_16", "", 1) + DEFINE_MOP(MOP_xor_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISBASICOP, 0, "xor_32", "", 1) + DEFINE_MOP(MOP_xor_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISBASICOP, 0, "xor_64", "", 1) + DEFINE_MOP(MOP_and_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISBASICOP, 0, "and_8", "", 1) + DEFINE_MOP(MOP_and_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISBASICOP, 0, "and_16", "", 1) + DEFINE_MOP(MOP_and_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISBASICOP, 0, "and_32", "", 1) + DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISBASICOP, 0, "and_64", "", 1) /* Support three address basic operations (Floating point) */ - DEFINE_MOP(MOP_and_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"and_f_8","",1) - DEFINE_MOP(MOP_and_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"and_f_16","",1) - DEFINE_MOP(MOP_and_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"and_f_32","",1) - DEFINE_MOP(MOP_and_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"and_f_64","",1) - DEFINE_MOP(MOP_add_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"add_f_8","",1) - DEFINE_MOP(MOP_add_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"add_f_16","",1) - DEFINE_MOP(MOP_add_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"add_f_32","",1) - DEFINE_MOP(MOP_add_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"add_f_64","",1) - DEFINE_MOP(MOP_sub_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"sub_f_8","",1) - DEFINE_MOP(MOP_sub_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"sub_f_16","",1) - DEFINE_MOP(MOP_sub_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"sub_f_32","",1) - DEFINE_MOP(MOP_sub_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"sub_f_64","",1) + DEFINE_MOP(MOP_and_f_8, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS, &OpndDesc::Reg8FS}, ISABSTRACT | ISBASICOP, 0, "and_f_8", "", 1) + DEFINE_MOP(MOP_and_f_16, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS}, ISABSTRACT | ISBASICOP, 0, "and_f_16", "", 1) + DEFINE_MOP(MOP_and_f_32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, ISABSTRACT | ISBASICOP, 0, "and_f_32", "", 1) + DEFINE_MOP(MOP_and_f_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISABSTRACT | ISBASICOP, 0, "and_f_64", "", 1) + DEFINE_MOP(MOP_add_f_8, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS, &OpndDesc::Reg8FS}, ISABSTRACT | ISBASICOP, 0, "add_f_8", "", 1) + DEFINE_MOP(MOP_add_f_16, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS}, ISABSTRACT | ISBASICOP, 0, "add_f_16", "", 1) + DEFINE_MOP(MOP_add_f_32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, ISABSTRACT | ISBASICOP, 0, "add_f_32", "", 1) + DEFINE_MOP(MOP_add_f_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISABSTRACT | ISBASICOP, 0, "add_f_64", "", 1) + DEFINE_MOP(MOP_sub_f_8, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS, &OpndDesc::Reg8FS}, ISABSTRACT | ISBASICOP, 0, "sub_f_8", "", 1) + DEFINE_MOP(MOP_sub_f_16, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS, &OpndDesc::Reg16FS}, ISABSTRACT | ISBASICOP, 0, "sub_f_16", "", 1) + DEFINE_MOP(MOP_sub_f_32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS, &OpndDesc::Reg32FS}, ISABSTRACT | ISBASICOP, 0, "sub_f_32", "", 1) + DEFINE_MOP(MOP_sub_f_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS, &OpndDesc::Reg64FS}, ISABSTRACT | ISBASICOP, 0, "sub_f_64", "", 1) /* shift -- shl/ashr/lshr */ - DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1) - DEFINE_MOP(MOP_shl_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"shl_16","",1) - DEFINE_MOP(MOP_shl_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_32","",1) - DEFINE_MOP(MOP_shl_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"shl_64","",1) - DEFINE_MOP(MOP_ashr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"ashr_8","",1) - DEFINE_MOP(MOP_ashr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"ashr_16","",1) - DEFINE_MOP(MOP_ashr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_32","",1) - DEFINE_MOP(MOP_ashr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"ashr_64","",1) - DEFINE_MOP(MOP_lshr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"lshr_8","",1) - DEFINE_MOP(MOP_lshr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"lshr_16","",1) - DEFINE_MOP(MOP_lshr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_32","",1) - DEFINE_MOP(MOP_lshr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"lshr_64","",1) + DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISSHIFT, 0, "shl_8", "", 1) + DEFINE_MOP(MOP_shl_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISSHIFT, 0, "shl_16", "", 1) + DEFINE_MOP(MOP_shl_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISSHIFT, 0, "shl_32", "", 1) + DEFINE_MOP(MOP_shl_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISSHIFT, 0, "shl_64", "", 1) + DEFINE_MOP(MOP_ashr_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISSHIFT, 0, "ashr_8", "", 1) + DEFINE_MOP(MOP_ashr_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISSHIFT, 0, "ashr_16", "", 1) + DEFINE_MOP(MOP_ashr_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISSHIFT, 0, "ashr_32", "", 1) + DEFINE_MOP(MOP_ashr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISSHIFT, 0, "ashr_64", "", 1) + DEFINE_MOP(MOP_lshr_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS, &OpndDesc::Reg8IS}, ISABSTRACT | ISSHIFT, 0, "lshr_8", "", 1) + DEFINE_MOP(MOP_lshr_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS, &OpndDesc::Reg16IS}, ISABSTRACT | ISSHIFT, 0, "lshr_16", "", 1) + DEFINE_MOP(MOP_lshr_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS, &OpndDesc::Reg32IS}, ISABSTRACT | ISSHIFT, 0, "lshr_32", "", 1) + DEFINE_MOP(MOP_lshr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS, &OpndDesc::Reg64IS}, ISABSTRACT | ISSHIFT, 0, "lshr_64", "", 1) /* Support two address basic operations */ - DEFINE_MOP(MOP_neg_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"neg_8","",1) - DEFINE_MOP(MOP_neg_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"neg_16","",1) - DEFINE_MOP(MOP_neg_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"neg_32","",1) - DEFINE_MOP(MOP_neg_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"neg_64","",1) - DEFINE_MOP(MOP_neg_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISUNARYOP,0,"neg_f_8","",1) - DEFINE_MOP(MOP_neg_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISUNARYOP,0,"neg_f_16","",1) - DEFINE_MOP(MOP_neg_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISUNARYOP,0,"neg_f_32","",1) - DEFINE_MOP(MOP_neg_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISABSTRACT|ISUNARYOP,0,"neg_f_64","",1) - DEFINE_MOP(MOP_not_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"not_8","",1) - DEFINE_MOP(MOP_not_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"not_16","",1) - DEFINE_MOP(MOP_not_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"not_32","",1) - DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) + DEFINE_MOP(MOP_neg_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISUNARYOP, 0, "neg_8", "", 1) + DEFINE_MOP(MOP_neg_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISUNARYOP, 0, "neg_16", "", 1) + DEFINE_MOP(MOP_neg_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISUNARYOP, 0, "neg_32", "", 1) + DEFINE_MOP(MOP_neg_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISUNARYOP, 0, "neg_64", "", 1) + DEFINE_MOP(MOP_neg_f_8, {&OpndDesc::Reg8FD, &OpndDesc::Reg8FS}, ISABSTRACT | ISUNARYOP, 0, "neg_f_8", "", 1) + DEFINE_MOP(MOP_neg_f_16, {&OpndDesc::Reg16FD, &OpndDesc::Reg16FS}, ISABSTRACT | ISUNARYOP, 0, "neg_f_16", "", 1) + DEFINE_MOP(MOP_neg_f_32, {&OpndDesc::Reg32FD, &OpndDesc::Reg32FS}, ISABSTRACT | ISUNARYOP, 0, "neg_f_32", "", 1) + DEFINE_MOP(MOP_neg_f_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS}, ISABSTRACT | ISUNARYOP, 0, "neg_f_64", "", 1) + DEFINE_MOP(MOP_not_8, {&OpndDesc::Reg8ID, &OpndDesc::Reg8IS}, ISABSTRACT | ISUNARYOP, 0, "not_8", "", 1) + DEFINE_MOP(MOP_not_16, {&OpndDesc::Reg16ID, &OpndDesc::Reg16IS}, ISABSTRACT | ISUNARYOP, 0, "not_16", "", 1) + DEFINE_MOP(MOP_not_32, {&OpndDesc::Reg32ID, &OpndDesc::Reg32IS}, ISABSTRACT | ISUNARYOP, 0, "not_32", "", 1) + DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS}, ISABSTRACT | ISUNARYOP, 0, "not_64", "", 1) /* MOP_comment */ - DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT|ISCOMMENT,0,"//","0", 0) + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S}, ISABSTRACT | ISCOMMENT, 0, "//", "0", 0) diff --git a/src/mapleall/maple_be/include/cg/asm_info.h b/src/mapleall/maple_be/include/cg/asm_info.h index 1e070975a780fc2475212e3655e1dd8d58096796..2112894ddd69a4aa2eb5c88e4d81a39872cb7738 100644 --- a/src/mapleall/maple_be/include/cg/asm_info.h +++ b/src/mapleall/maple_be/include/cg/asm_info.h @@ -16,6 +16,7 @@ #define MAPLEBE_INCLUDE_CG_ASM_INFO_H #include "maple_string.h" +#include "types_def.h" namespace maplebe { enum AsmLabel : uint8 { @@ -148,9 +149,9 @@ class AsmInfo { } explicit AsmInfo(MemPool &memPool) -#if TARGX86 || TARGX86_64 +#if (defined(TARGX86) && TARGX86) || (defined(TARGX86_64) && TARGX86_64) : asmCmnt("\t//\t", &memPool), -#elif TARGARM32 +#elif defined(TARGARM32) && TARGARM32 : asmCmnt("\t@\t", &memPool), #else : asmCmnt("\t#\t", &memPool), diff --git a/src/mapleall/maple_be/include/cg/base_schedule.h b/src/mapleall/maple_be/include/cg/base_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..94ae4ec581237b825bf9db27f72736cca086090a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/base_schedule.h @@ -0,0 +1,59 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_BASE_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_BASE_SCHEDULE_H + +#include "cgfunc.h" +#include "control_dep_analysis.h" +#include "data_dep_analysis.h" +#include "list_scheduler.h" + +namespace maplebe { +class BaseSchedule { + public: + BaseSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, bool doDelay = false) + : schedMP(mp), schedAlloc(&mp), cgFunc(f), cda(cdAna), doDelayHeu(doDelay) {} + virtual ~BaseSchedule() { + listScheduler = nullptr; + } + + virtual void Run() = 0; + void DoLocalSchedule(CDGRegion ®ion); + bool DoDelayHeu() { + return doDelayHeu; + } + void SetDelayHeu() { + doDelayHeu = true; + } + + protected: + void InitInsnIdAndLocInsn(); + void InitInRegion(CDGRegion ®ion) const; + void DumpRegionInfoBeforeSchedule(CDGRegion ®ion) const; + void DumpCDGNodeInfoBeforeSchedule(CDGNode &cdgNode) const; + void DumpCDGNodeInfoAfterSchedule(CDGNode &cdgNode) const; + virtual void DumpInsnInfoByScheduledOrder(BB &curBB) const = 0; + + MemPool &schedMP; + MapleAllocator schedAlloc; + CGFunc &cgFunc; + ControlDepAnalysis &cda; + CommonScheduleInfo *commonSchedInfo = nullptr; + ListScheduler *listScheduler = nullptr; + bool doDelayHeu = false; +}; +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_BASE_SCHEDULE_H diff --git a/src/mapleall/maple_be/include/cg/call_conv.h b/src/mapleall/maple_be/include/cg/call_conv.h index 19661f0e3a28bdd92175c46daffad0a7d3eec1b5..977605fbec53051c6f84af5f78e8b60923be4136 100644 --- a/src/mapleall/maple_be/include/cg/call_conv.h +++ b/src/mapleall/maple_be/include/cg/call_conv.h @@ -17,6 +17,7 @@ #include "types_def.h" #include "becommon.h" +#include "isa.h" namespace maplebe { using namespace maple; @@ -36,6 +37,23 @@ struct CCLocInfo { PrimType primTypeOfReg1; /* the primitive type stored in reg1 */ PrimType primTypeOfReg2; PrimType primTypeOfReg3; + + void Clear() { + reg0 = kInvalidRegNO; + reg1 = kInvalidRegNO; + reg2 = kInvalidRegNO; + reg3 = kInvalidRegNO; + memOffset = 0; + memSize = 0; + fpSize = 0; + numFpPureRegs = 0; + regCount = 0; + primTypeOfReg0 = PTY_begin; + primTypeOfReg1 = PTY_begin; + primTypeOfReg2 = PTY_begin; + primTypeOfReg3 = PTY_begin; + } + uint8 GetRegCount() const { return regCount; } diff --git a/src/mapleall/maple_be/include/cg/cfgo.h b/src/mapleall/maple_be/include/cg/cfgo.h index 340078e4f97469cafccab4a8652c19d71ff7c781..8e531a1038e220c7533a3654b62ba42123ecc52d 100644 --- a/src/mapleall/maple_be/include/cg/cfgo.h +++ b/src/mapleall/maple_be/include/cg/cfgo.h @@ -32,8 +32,8 @@ class ChainingPattern : public OptimizationPattern { patternName = "BB Chaining"; dotColor = kCfgoChaining; } + ~ChainingPattern() override = default; - virtual ~ChainingPattern() = default; bool Optimize(BB &curBB) override; protected: @@ -53,12 +53,14 @@ class SequentialJumpPattern : public OptimizationPattern { dotColor = kCfgoSj; } - virtual ~SequentialJumpPattern() = default; + ~SequentialJumpPattern() override = default; bool Optimize(BB &curBB) override; protected: void SkipSucBB(BB &curBB, BB &sucBB) const; void UpdateSwitchSucc(BB &curBB, BB &sucBB) const; + // If the sucBB has one invalid predBB, the sucBB can not be skipped + bool HasInvalidPred(BB &sucBB) const; }; class FlipBRPattern : public OptimizationPattern { @@ -68,7 +70,7 @@ class FlipBRPattern : public OptimizationPattern { dotColor = kCfgoFlipCond; } - virtual ~FlipBRPattern() = default; + ~FlipBRPattern() override = default; bool Optimize(BB &curBB) override; CfgoPhase GetPhase() const { @@ -95,7 +97,7 @@ class UnreachBBPattern : public OptimizationPattern { func.GetTheCFG()->FindAndMarkUnreachable(*cgFunc); } - virtual ~UnreachBBPattern() = default; + ~UnreachBBPattern() override = default; bool Optimize(BB &curBB) override; }; @@ -110,7 +112,7 @@ class DuplicateBBPattern : public OptimizationPattern { dotColor = kCfgoDup; } - virtual ~DuplicateBBPattern() = default; + ~DuplicateBBPattern() override = default; bool Optimize(BB &curBB) override; private: @@ -127,7 +129,7 @@ class EmptyBBPattern : public OptimizationPattern { dotColor = kCfgoEmpty; } - virtual ~EmptyBBPattern() = default; + ~EmptyBBPattern() override = default; bool Optimize(BB &curBB) override; }; @@ -137,7 +139,7 @@ class CFGOptimizer : public Optimizer { name = "CFGO"; } - virtual ~CFGOptimizer() = default; + ~CFGOptimizer() override = default; CfgoPhase GetPhase() const { return phase; } @@ -147,6 +149,8 @@ class CFGOptimizer : public Optimizer { CfgoPhase phase = kCfgoDefault; }; +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPreCfgo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCfgo, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostCfgo, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/cfi.h b/src/mapleall/maple_be/include/cg/cfi.h index 26e0faea86dfe426b6885828786cc33321d9bc2d..5647d8f5e395676313f7ac9a48c33a9cc649713d 100644 --- a/src/mapleall/maple_be/include/cg/cfi.h +++ b/src/mapleall/maple_be/include/cg/cfi.h @@ -69,7 +69,7 @@ class CfiInsn : public maplebe::Insn { maplebe::Operand &opnd2) : Insn(memPool, op, opnd0, opnd1, opnd2) {} - ~CfiInsn() = default; + ~CfiInsn() override = default; bool IsMachineInstruction() const override { return false; @@ -91,12 +91,10 @@ class CfiInsn : public maplebe::Insn { bool IsRegDefined(maplebe::regno_t regNO) const override { CHECK_FATAL(false, "cfi do not def regs"); - return false; } std::set GetDefRegs() const override{ CHECK_FATAL(false, "cfi do not def regs"); - return std::set(); } uint32 GetBothDefUseOpnd() const override { @@ -111,7 +109,7 @@ class RegOperand : public maplebe::OperandVisitable { public: RegOperand(uint32 no, uint32 size) : OperandVisitable(kOpdRegister, size), regNO(no) {} - ~RegOperand() = default; + ~RegOperand() override = default; using OperandVisitable::OperandVisitable; uint32 GetRegisterNO() const { @@ -137,7 +135,7 @@ class ImmOperand : public maplebe::OperandVisitable { public: ImmOperand(int64 val, uint32 size) : OperandVisitable(kOpdImmediate, size), val(val) {} - ~ImmOperand() = default; + ~ImmOperand() override = default; using OperandVisitable::OperandVisitable; Operand *Clone(MemPool &memPool) const override { @@ -164,7 +162,7 @@ class SymbolOperand : public maplebe::OperandVisitable { SymbolOperand(const maple::MIRSymbol &mirSymbol, uint8 size) : OperandVisitable(kOpdStImmediate, size), symbol(&mirSymbol) {} - ~SymbolOperand() { + ~SymbolOperand() override { symbol = nullptr; } using OperandVisitable::OperandVisitable; @@ -191,7 +189,7 @@ class StrOperand : public maplebe::OperandVisitable { public: StrOperand(const std::string &str, MemPool &memPool) : OperandVisitable(kOpdString, 0), str(str, &memPool) {} - ~StrOperand() = default; + ~StrOperand() override = default; using OperandVisitable::OperandVisitable; Operand *Clone(MemPool &memPool) const override { @@ -219,7 +217,7 @@ class LabelOperand : public maplebe::OperandVisitable { LabelOperand(const std::string &parent, LabelIdx labIdx, MemPool &memPool) : OperandVisitable(kOpdBBAddress, 0), parentFunc(parent, &memPool), labelIndex(labIdx) {} - ~LabelOperand() = default; + ~LabelOperand() override = default; using OperandVisitable::OperandVisitable; Operand *Clone(MemPool &memPool) const override { @@ -254,7 +252,7 @@ class CFIOpndEmitVisitor : public maplebe::OperandVisitorBase, LabelOperand> { public: explicit CFIOpndEmitVisitor(maplebe::Emitter &asmEmitter) : emitter(asmEmitter) {} - virtual ~CFIOpndEmitVisitor() = default; + ~CFIOpndEmitVisitor() override = default; protected: maplebe::Emitter &emitter; private: diff --git a/src/mapleall/maple_be/include/cg/cfi_generator.h b/src/mapleall/maple_be/include/cg/cfi_generator.h index 1844935ae3569ed9d62d8b82b000cfec54efb2b8..42d1129e2ccce7d69852e0d165749a18c2856971 100644 --- a/src/mapleall/maple_be/include/cg/cfi_generator.h +++ b/src/mapleall/maple_be/include/cg/cfi_generator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2022-2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -59,7 +59,7 @@ class GenCfi { virtual void GenerateRegisterSaveDirective(BB &bb) {} virtual void GenerateRegisterRestoreDirective(BB &bb) {} - /* It is do insert a start location information for each function in debugging mode */ + /* It inserts a start location information for each function in debugging mode */ void InsertFirstLocation(BB &bb); }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/cg.h b/src/mapleall/maple_be/include/cg/cg.h index a1621518fa96cd6d951eff988b19684d95f0167e..575174141133ebfd90b92f33c59204f2997a0ed4 100644 --- a/src/mapleall/maple_be/include/cg/cg.h +++ b/src/mapleall/maple_be/include/cg/cg.h @@ -52,6 +52,10 @@ class RedundantComputeElim; class TailCallOpt; class Rematerializer; class CGProfGen; +class GlobalSchedule; +class LocalSchedule; +class ControlDepAnalysis; +class InterDataDepAnalysis; class CGAggressiveOpt; class Globals { @@ -87,11 +91,15 @@ class Globals { return mad; } - void SetOptimLevel(int32 opLevel) { + void ClearMAD() { + mad = nullptr; + } + + void SetOptimLevel(uint32 opLevel) { optimLevel = opLevel; } - int32 GetOptimLevel() const { + uint32 GetOptimLevel() const { return optimLevel; } @@ -101,7 +109,7 @@ class Globals { private: BECommon *beCommon = nullptr; MAD *mad = nullptr; - int32 optimLevel = 0; + uint32 optimLevel = 0; CG *cg = nullptr; Globals() = default; }; @@ -173,7 +181,7 @@ class CG { return cgOption.GenerateDebugFriendlyCode(); } - int32 GetOptimizeLevel() const { + uint32 GetOptimizeLevel() const { return cgOption.GetOptimizeLevel(); } @@ -304,9 +312,17 @@ class CG { virtual PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const = 0; virtual CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; - virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; + virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const = 0; virtual RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; virtual TailCallOpt *CreateCGTailCallOpt(MemPool &mp, CGFunc &f) const = 0; + virtual GlobalSchedule *CreateGlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cda, + InterDataDepAnalysis &idda) const { + return nullptr; + } + virtual LocalSchedule *CreateLocalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cda, + InterDataDepAnalysis &idda) const { + return nullptr; + } virtual LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition&) const { return nullptr; }; diff --git a/src/mapleall/maple_be/include/cg/cg_callgraph_reorder.h b/src/mapleall/maple_be/include/cg/cg_callgraph_reorder.h new file mode 100644 index 0000000000000000000000000000000000000000..2dc6ec647262e3749bfb2e18e5860ead00c20cd3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_callgraph_reorder.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_INCLUDE_CG_CALLGRAPH_REORDER_H +#define MAPLE_INCLUDE_CG_CALLGRAPH_REORDER_H + +#include "mpl_logging.h" +#include "types_def.h" + +namespace maple { +std::map ReorderAccordingProfile(const std::string &path); +} + +#endif \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/cg_cdg.h b/src/mapleall/maple_be/include/cg/cg_cdg.h index 8af8f3384add402811958aa07a20304d3135f6f1..b9513543673425b1c7d0f772c80e4f8e9d039390 100644 --- a/src/mapleall/maple_be/include/cg/cg_cdg.h +++ b/src/mapleall/maple_be/include/cg/cg_cdg.h @@ -37,8 +37,9 @@ class CDGNode { public: CDGNode(CDGNodeId nId, BB &bb, MapleAllocator &alloc) : id(nId), bb(&bb), outEdges(alloc.Adapter()), inEdges(alloc.Adapter()), - topoPreds(alloc.Adapter()), lastComments(alloc.Adapter()), predCDGNodes(alloc.Adapter()) {} + lastComments(alloc.Adapter()), dataNodes(alloc.Adapter()), cfiInsns(alloc.Adapter()) {} virtual ~CDGNode() { + topoPredInRegion = nullptr; lastFrameDef = nullptr; bb = nullptr; regUses = nullptr; @@ -88,6 +89,10 @@ class CDGNode { region = &cdgRegion; } + void ClearRegion() { + region = nullptr; + } + bool IsEntryNode() const { return isEntryNode; } @@ -128,6 +133,30 @@ class CDGNode { return inEdges.size(); } + void SetVisitedInTopoSort(bool isVisited) { + isVisitedInTopoSort = isVisited; + } + + bool IsVisitedInTopoSort() { + return isVisitedInTopoSort; + } + + void SetVisitedInExtendedFind() { + isVisitedInExtendedFind = true; + } + + bool IsVisitedInExtendedFind() const { + return isVisitedInExtendedFind; + } + + uint32 GetInsnNum() const { + return insnNum; + } + + void SetInsnNum(uint32 num) { + insnNum = num; + } + bool HasAmbiRegs() const { return hasAmbiRegs; } @@ -160,19 +189,12 @@ class CDGNode { lastFrameDef = frameInsn; } - MapleVector &GetTopoPreds() { - return topoPreds; + void InitTopoInRegionInfo(MemPool &tmpMp, MapleAllocator &tmpAlloc) { + topoPredInRegion = tmpMp.New>(tmpAlloc.Adapter()); } - void AddTopoPred(CDGNode *pred) { - (void)topoPreds.emplace_back(pred); - } - - void RemoveTopoPred(CDGNode *pred) { - auto it = std::find(topoPreds.begin(), topoPreds.end(), pred); - if (it != topoPreds.end()) { - (void)topoPreds.erase(it); - } + void ClearTopoInRegionInfo() { + topoPredInRegion = nullptr; } void InitDataDepInfo(MemPool &tmpMp, MapleAllocator &tmpAlloc, uint32 maxRegNum) { @@ -196,7 +218,6 @@ class CDGNode { lastCallInsn = nullptr; lastFrameDef = nullptr; lastComments.clear(); - predCDGNodes.clear(); regDefs = nullptr; regUses = nullptr; @@ -230,19 +251,19 @@ class CDGNode { ambiInsns->clear(); } - Insn *GetLatestDefInsn(regno_t regNO) const { + Insn *GetLatestDefInsn(regno_t regNO) { return (*regDefs)[regNO]; } - void SetLatestDefInsn(regno_t regNO, Insn *defInsn) const { + void SetLatestDefInsn(regno_t regNO, Insn *defInsn) { (*regDefs)[regNO] = defInsn; } - RegList *GetUseInsnChain(regno_t regNO) const { + RegList *GetUseInsnChain(regno_t regNO) { return (*regUses)[regNO]; } - void AppendUseInsnChain(regno_t regNO, Insn *useInsn, MemPool &mp, bool beforeRA) const { + void AppendUseInsnChain(regno_t regNO, Insn *useInsn, MemPool &mp, bool beforeRA) { CHECK_FATAL(useInsn != nullptr, "invalid useInsn"); auto *newUse = mp.New(); newUse->insn = useInsn; @@ -266,15 +287,15 @@ class CDGNode { } } - void ClearUseInsnChain(regno_t regNO) const { + void ClearUseInsnChain(regno_t regNO) { (*regUses)[regNO] = nullptr; } - MapleVector &GetStackUseInsns() const { + MapleVector &GetStackUseInsns() { return *stackUses; } - void AddStackUseInsn(Insn *stackInsn) const { + void AddStackUseInsn(Insn *stackInsn) { stackUses->emplace_back(stackInsn); } @@ -282,7 +303,7 @@ class CDGNode { return *stackDefs; } - void AddStackDefInsn(Insn *stackInsn) const { + void AddStackDefInsn(Insn *stackInsn) { stackDefs->emplace_back(stackInsn); } @@ -290,7 +311,7 @@ class CDGNode { return *heapUses; } - void AddHeapUseInsn(Insn *heapInsn) { + void AddHeapUseInsn(Insn *heapInsn) const { heapUses->emplace_back(heapInsn); } @@ -298,7 +319,7 @@ class CDGNode { return *heapDefs; } - void AddHeapDefInsn(Insn *heapInsn) { + void AddHeapDefInsn(Insn *heapInsn) const { heapDefs->emplace_back(heapInsn); } @@ -314,10 +335,18 @@ class CDGNode { return *ambiInsns; } - void AddAmbiguousInsn(Insn *ambiInsn) { + void AddAmbiguousInsn(Insn *ambiInsn) const { ambiInsns->emplace_back(ambiInsn); } + MapleSet &GetTopoPredInRegion() { + return *topoPredInRegion; + } + + void InsertVisitedTopoPredInRegion(CDGNodeId nodeId) const { + topoPredInRegion->insert(nodeId); + } + MapleVector &GetLastComments() { return lastComments; } @@ -335,7 +364,7 @@ class CDGNode { lastComments.clear(); } - void AddPseudoSepNodes(DepNode *node) { + void AddPseudoSepNodes(DepNode *node) const { pseudoSepNodes->emplace_back(node); } @@ -343,6 +372,60 @@ class CDGNode { return *ehInRegs; } + MapleVector &GetAllDataNodes() { + return dataNodes; + } + + void AddDataNode(DepNode *depNode) { + (void)dataNodes.emplace_back(depNode); + } + + void ClearDataNodes() { + dataNodes.clear(); + } + + MapleVector &GetCfiInsns() { + return cfiInsns; + } + + void AddCfiInsn(Insn *cfiInsn) { + (void)cfiInsns.emplace_back(cfiInsn); + } + + void RemoveDepNodeFromDataNodes(DepNode &depNode) { + for (auto iter = dataNodes.begin(); iter != dataNodes.end(); ++iter) { + if (*iter == &depNode) { + void(dataNodes.erase(iter)); + break; + } + } + } + + void InitPredNodeSumInRegion(int32 predSum) { + CHECK_FATAL(predSum >= 0, "invalid predSum"); + predNodesInRegion = predSum; + } + + void DecPredNodeSumInRegion() { + predNodesInRegion--; + } + + bool IsAllPredInRegionProcessed() const { + return (predNodesInRegion == 0); + } + + uint32 &GetNodeSum() { + return nodeSum; + } + + void AccNodeSum() { + nodeSum++; + } + + void SetNodeSum(uint32 sum) { + nodeSum = sum; + } + bool operator!=(const CDGNode &node) { if (this != &node) { return true; @@ -364,6 +447,9 @@ class CDGNode { bool isExitNode = false; MapleVector outEdges; MapleVector inEdges; + bool isVisitedInTopoSort = false; // for sorting nodes in region by topological order + bool isVisitedInExtendedFind = false; // for finding a fallthrough path as a region + uint32 insnNum = 0; // record insn total num of BB /* * The following structures are used to record data flow infos in building data dependence among insns */ @@ -371,7 +457,6 @@ class CDGNode { Insn *membarInsn = nullptr; Insn *lastCallInsn = nullptr; Insn *lastFrameDef = nullptr; - MapleVector topoPreds; // For visit nodes by topological order in the region, it will change dynamically MapleVector *regDefs = nullptr; // the index is regNO, record the latest defInsn in the curBB MapleVector *regUses = nullptr; // the index is regNO MapleVector *stackUses = nullptr; @@ -382,8 +467,21 @@ class CDGNode { MapleVector *ambiInsns = nullptr; MapleVector *pseudoSepNodes = nullptr; MapleSet *ehInRegs = nullptr; + MapleSet *topoPredInRegion = nullptr; MapleVector lastComments; - MapleVector predCDGNodes; // predecessor nodes of the curBB in CFG + MapleVector dataNodes; + MapleVector cfiInsns; + /* + * For computing topological order of cdgNodes in a region, + * which is initialized to the number of pred nodes in CFG at the beginning of processing the region, + * and change dynamically + */ + int32 predNodesInRegion = -1; + /* + * For intra-block dda: it accumulates from the first insn (nodeSum = 1) of bb + * For inter-block dda: it accumulates from the maximum of nodeSum in all the predecessor of cur cdgNode + */ + uint32 nodeSum = 0; }; class CDGEdge { @@ -457,7 +555,9 @@ class CDGRegion { public: CDGRegion(CDGRegionId rId, MapleAllocator &alloc) : id(rId), memberNodes(alloc.Adapter()), cdEdges(alloc.Adapter()) {} - virtual ~CDGRegion() = default; + virtual ~CDGRegion() { + root = nullptr; + } CDGRegionId GetRegionId() { return id; @@ -486,8 +586,13 @@ class CDGRegion { (void)memberNodes.erase(it); } - void ClearMemberNodes() { - memberNodes.clear(); + CDGNode *GetCDGNodeById(CDGNodeId nodeId) { + for (auto cdgNode : memberNodes) { + if (cdgNode->GetNodeId() == nodeId) { + return cdgNode; + } + } + return nullptr; } MapleVector &GetCDEdges() { @@ -512,10 +617,23 @@ class CDGRegion { return maxId; } + void SetRegionRoot(CDGNode &node) { + root = &node; + } + + CDGNode *GetRegionRoot() { + return root; + } + private: CDGRegionId id; - MapleVector memberNodes; // the nodes in CDGRegion by out-of-order - MapleVector cdEdges; // the control dependence sets of the parent node + MapleVector memberNodes; // The nodes in CDGRegion by topological order + /* + * The control dependence sets of the parent node. + * If it is a general non-linear region, the cdEdges is empty. + */ + MapleVector cdEdges; + CDGNode *root = nullptr; }; /* @@ -580,7 +698,7 @@ class FCDG { private: MapleVector nodes; // all CDGNodes in FCDG that use nodeId as the index MapleVector fcds; // all forward-control-dependence in FCDG - MapleVector regions; // all regions in FCDG that use CDGRegionId as the index + MapleVector regions; // all regions in FCDG that use CDGRegionId as the index }; struct CDGOutEdgeComparator { diff --git a/src/mapleall/maple_be/include/cg/cg_cfg.h b/src/mapleall/maple_be/include/cg/cg_cfg.h index 180b2463a32ed658815cb04e31365d2246c4294d..ebf63f87c8710a8f7d9fbf4eae0b813bdd8a9f50 100644 --- a/src/mapleall/maple_be/include/cg/cg_cfg.h +++ b/src/mapleall/maple_be/include/cg/cg_cfg.h @@ -64,6 +64,10 @@ class InsnVisitor { virtual void FlipIfBB(BB &bb, LabelIdx ftLabel) const = 0; virtual BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const = 0; + // Change ftBB to gotoBB: + // Append new jumpInsn in curBB. + virtual void ModifyFathruBBToGotoBB(BB &bb, LabelIdx labelIdx) const = 0; + private: CGFunc *cgFunc; }; /* class InsnVisitor; */ @@ -74,7 +78,7 @@ class CGCFG { ~CGCFG() = default; - void BuildCFG() const; + void BuildCFG(); void CheckCFG(); void CheckCFGFreq(); uint32 ComputeCFGHash(); @@ -101,6 +105,12 @@ class CGCFG { /* Skip the successor of bb, directly jump to bb's successor'ssuccessor */ void RetargetJump(BB &srcBB, BB &targetBB) const; + /* + * Update the preds of CommonExitBB after changing cfg, + * We'd better do it once after cfgo opt + */ + void UpdateCommonExitBBInfo(); + /* Loop up if the given label is in the exception tables in LSDA */ static bool InLSDA(LabelIdx label, const EHFunc *ehFunc); static bool InSwitchTable(LabelIdx label, const CGFunc &func); diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h index 5b0297abbc65b48a7c94cca511049ff5fe577910..2ddc6aa432417b53f5467ad4ce2ac76b73f33256 100644 --- a/src/mapleall/maple_be/include/cg/cg_irbuilder.h +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -38,6 +38,7 @@ class InsnBuilder { Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2); Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3); Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4, Operand &o5); Insn &BuildInsn(MOperator opCode, std::vector &opnds); Insn &BuildCfiInsn(MOperator opCode); @@ -66,13 +67,15 @@ class OperandBuilder { /* create an operand in cgfunc when no mempool is supplied */ ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); + ImmOperand &CreateImm(uint32 size, int64 value, bool isSigned, MemPool *mp = nullptr); ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); OfstOperand &CreateOfst(int64 offset, uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size, MemPool *mp = nullptr); - MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &offImm, MemPool *mp = nullptr); - MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &offImm, const MIRSymbol &symbol, + MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &ofstOperand, MemPool *mp = nullptr); + MemOperand &CreateMem(uint32 size, RegOperand &baseOpnd, ImmOperand &ofstOperand, const MIRSymbol &symbol, MemPool *mp = nullptr); + BitShiftOperand &CreateBitShift(BitShiftOperand::ShiftOp op, uint32 amount, uint32 bitLen, MemPool *mp = nullptr); RegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr); RegOperand &CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp = nullptr); RegOperand &CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp = nullptr); diff --git a/src/mapleall/maple_be/include/cg/cg_mc_ssa_pre.h b/src/mapleall/maple_be/include/cg/cg_mc_ssa_pre.h index b50e94d35ca48ae27582d29bff66b69fdaeaa942..c4b131b70c7f488a05b039ab543d194b356f930e 100644 --- a/src/mapleall/maple_be/include/cg/cg_mc_ssa_pre.h +++ b/src/mapleall/maple_be/include/cg/cg_mc_ssa_pre.h @@ -72,18 +72,20 @@ class McSSAPre : public SSAPre { occ2RGNodeMap(preAllocator.Adapter()), maxFlowRoutes(preAllocator.Adapter()), minCut(preAllocator.Adapter()) {} - ~McSSAPre() = default; + ~McSSAPre() { + sink = nullptr; + } void ApplyMCSSAPre(); private: // step 8 willbeavail - void ResetMCWillBeAvail(PhiOcc *phiOcc) const; + void ResetMCWillBeAvail(PhiOcc *occ) const; void ComputeMCWillBeAvail() const; // step 7 max flow/min cut - bool AmongMinCut(RGNode *, uint32 idx) const; + bool AmongMinCut(const RGNode *nd, uint32 idx) const; void DumpRGToFile(); // dump reduced graph to dot file - bool IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx); - void RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route); + bool IncludedEarlier(Visit **cut, const Visit *curVisit, uint32 nextRouteIdx) const; + void RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route) const; bool SearchRelaxedMinCut(Visit **cut, std::unordered_multiset &cutSet, uint32 nextRouteIdx, FreqType flowSoFar); bool SearchMinCut(Visit **cut, std::unordered_multiset &cutSet, uint32 nextRouteIdx, FreqType flowSoFar); @@ -109,9 +111,9 @@ class McSSAPre : public SSAPre { uint32 numSourceEdges = 0; MapleVector maxFlowRoutes; uint32 nextRGNodeId = 1; // 0 is reserved - FreqType maxFlowValue; + FreqType maxFlowValue = 0; // relax maxFlowValue to avoid excessive mincut search time when number of routes is large - FreqType relaxedMaxFlowValue; + FreqType relaxedMaxFlowValue = 0; MapleVector minCut; // an array of Visits* to represent the minCut }; diff --git a/src/mapleall/maple_be/include/cg/cg_occur.h b/src/mapleall/maple_be/include/cg/cg_occur.h index 6db5e0fdd393eca1f81973d776e8b5688c8463df..94aba474d8abe97c0d3e15719e31f8e0866950b1 100644 --- a/src/mapleall/maple_be/include/cg/cg_occur.h +++ b/src/mapleall/maple_be/include/cg/cg_occur.h @@ -132,7 +132,7 @@ class CgUseOcc : public CgOccur { : CgOccur(kOccUse, bb, insn, opnd), needReload(false) {} - ~CgUseOcc() = default; + ~CgUseOcc() override = default; bool Reload() const { return needReload; @@ -163,7 +163,7 @@ class CgUseOcc : public CgOccur { class CgStoreOcc : public CgOccur { public: CgStoreOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccStore, bb, insn, opnd) {} - ~CgStoreOcc() = default; + ~CgStoreOcc() override = default; bool Reload() const { return needReload; @@ -194,7 +194,7 @@ class CgStoreOcc : public CgOccur { class CgDefOcc : public CgOccur { public: CgDefOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccDef, bb, insn, opnd) {} - ~CgDefOcc() = default; + ~CgDefOcc() override = default; bool Loaded() const { return needStore; @@ -237,7 +237,7 @@ class CgPhiOcc : public CgOccur { isDownSafe(!bb.IsCatch()), phiOpnds(alloc.Adapter()) {} - virtual ~CgPhiOcc() = default; + ~CgPhiOcc() override = default; bool IsDownSafe() const { return isDownSafe; @@ -320,7 +320,7 @@ class CgPhiOpndOcc : public CgOccur { hasRealUse(false), phiOcc(defPhi) {} - ~CgPhiOpndOcc() = default; + ~CgPhiOpndOcc() override = default; bool HasRealUse() const { return hasRealUse; diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h index a077d4c02b9581d75d764459b0b9770587f20275..ebe111189e8b2c73ad1cf86a1f45627917bd7732 100644 --- a/src/mapleall/maple_be/include/cg/cg_option.h +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -96,9 +96,17 @@ class CGOptions { }; enum VisibilityType : uint8 { - kDefault, - kHidden, - kProtected + kDefaultVisibility, + kHiddenVisibility, + kProtectedVisibility + }; + + enum TLSModel : uint8 { + kDefaultTLSModel, + kLocalExecTLSModel, + kLocalDynamicTLSModel, + kGlobalDynamicTLSModel, + kInitialExecTLSModel, }; enum EmitFileType : uint8 { @@ -106,6 +114,12 @@ class CGOptions { kObj, kEmitNone, }; + + enum FramePointerType : uint8 { + kNoneFP, + kNonLeafFP, + kAllFP, + }; /* * The default CG option values are: * Don't BE_QUITE; verbose, @@ -291,10 +305,14 @@ class CGOptions { void SetRange(const std::string &str, const std::string &cmd, Range &subRange) const; void SetTargetMachine(const std::string &str); - int32 GetOptimizeLevel() const { + uint32 GetOptimizeLevel() const { return optimizeLevel; } + static bool IsOptimized() { + return CGOptions::GetInstance().GetOptimizeLevel() > kLevel0; + } + bool IsRunCG() const { return runCGFlag; } @@ -481,6 +499,21 @@ class CGOptions { duplicateAsmFile = fileName; } + static const std::string &GetCPU() { + return cpu; + } + + static void SetCPU(const std::string &core) { + cpu = core; + } + + static bool IsCortexA53() { + if (cpu == "cortex-a53") { + return true; + } + return false; + } + static bool UseRange() { return range.enable; } @@ -627,6 +660,38 @@ class CGOptions { return doCGSSA && !flavorLmbc; } + static void DisableLayoutColdPath() { + doLayoutColdPath = false; + } + + static void EnableLayoutColdPath() { + doLayoutColdPath = true; + } + + static bool DoLayoutColdPath() { + return doLayoutColdPath; + } + + static void DisableGlobalSchedule() { + doGlobalSchedule = false; + } + + static void EnableGlobalSchedule() { + doGlobalSchedule = true; + } + + static bool DoGlobalSchedule() { + return doGlobalSchedule; + } + + static bool DoLocalSchedule() { + return doLocalSchedule; + } + + static bool DoVerifySchedule() { + return doVerifySchedule; + } + static void DisableCalleeEnsureParam() { calleeEnsureParam = false; } @@ -1018,6 +1083,10 @@ class CGOptions { return picMode > kClose; } + static bool IsShlib() { + return IsPIC() && !IsPIE(); + } + void SetPICOptionHelper(CGOptions::PICMode mode) { SetPICMode(mode); SetOption(CGOptions::kGenPic); @@ -1262,15 +1331,11 @@ class CGOptions { return functionSections; } - static void EnableFramePointer() { - useFramePointer = true; - } - - static void DisableFramePointer() { - useFramePointer = false; + static void SetFramePointer(FramePointerType fpType) { + useFramePointer = fpType; } - static bool UseFramePointer() { + static FramePointerType UseFramePointer() { return useFramePointer; } @@ -1362,6 +1427,18 @@ class CGOptions { return funcAlignPow; } + static bool DoLiteProfVerify() { + return liteProfVerify; + } + + static void EnableLiteProfVerify() { + liteProfVerify = true; + } + + static void DisableLiteProfVerify() { + liteProfVerify = false; + } + static bool DoLiteProfGen() { return liteProfGen; } @@ -1398,6 +1475,22 @@ class CGOptions { return functionProrityFile; } + static void SetFunctionReorderAlgorithm(std::string algorithm) { + functionReorderAlgorithm = algorithm; + } + + static std::string GetFunctionReorderAlgorithm() { + return functionReorderAlgorithm; + } + + static void SetFunctionReorderProfile(std::string profile) { + functionReorderProfile = profile; + } + + static std::string GetFunctionReorderProfile() { + return functionReorderProfile; + } + static void SetLitePgoOutputFunction(std::string iofile) { litePgoOutputFunction = iofile; } @@ -1428,9 +1521,9 @@ class CGOptions { static void SetVisibilityType(const std::string &type) { if (type == "hidden" || type == "internal") { - visibilityType = kHidden; + visibilityType = kHiddenVisibility; } else if (type == "protected") { - visibilityType = kProtected; + visibilityType = kProtectedVisibility; } else { CHECK_FATAL(type == "default", "unsupported visibility type. Only support: default|hidden|protected|internal"); } @@ -1448,12 +1541,30 @@ class CGOptions { return noplt; } + static void SetTLSModel(const std::string &model) { + if (model == "global-dynamic") { + tlsModel = kGlobalDynamicTLSModel; + } else if (model == "local-dynamic") { + tlsModel = kLocalDynamicTLSModel; + } else if (model == "initial-exec") { + tlsModel = kInitialExecTLSModel; + } else if (model == "local-exec") { + tlsModel = kLocalExecTLSModel; + } else { + CHECK_FATAL_FALSE("unsupported tls model."); + } + } + + static TLSModel GetTLSModel() { + return tlsModel; + } + private: std::vector phaseSequence; bool runCGFlag = true; bool generateObjectMap = true; uint32 parserOption = 0; - int32 optimizeLevel = 0; + uint32 optimizeLevel = 0; GenerateFlag generateFlag = 0; OptionFlag options = kUndefined; @@ -1473,6 +1584,7 @@ class CGOptions { static std::string skipAfter; static std::string dumpFunc; static std::string duplicateAsmFile; + static std::string cpu; static bool optForSize; static bool enableHotColdSplit; static bool useBarriersForVolatile; @@ -1480,6 +1592,10 @@ class CGOptions { static bool cgBigEndian; static bool doEBO; static bool doCGSSA; + static bool doLayoutColdPath; + static bool doGlobalSchedule; + static bool doLocalSchedule; + static bool doVerifySchedule; static bool calleeEnsureParam; static bool doIPARA; static bool doCFGO; @@ -1528,7 +1644,7 @@ class CGOptions { /* if true generate adrp/ldr/blr */ static bool genLongCalls; static bool functionSections; - static bool useFramePointer; + static FramePointerType useFramePointer; static bool gcOnly; static bool doPreSchedule; static bool emitBlockMarker; @@ -1561,13 +1677,18 @@ class CGOptions { static uint32 funcAlignPow; static bool liteProfGen; static bool liteProfUse; + static bool liteProfVerify; static std::string litePgoOutputFunction; static std::string litePgoWhiteList; static std::string instrumentationOutPutPath; static std::string liteProfile; static std::string functionProrityFile; + static std::string functionReorderAlgorithm; + static std::string functionReorderProfile; static bool doAggrOpt; static VisibilityType visibilityType; + static TLSModel tlsModel; + static bool doTlsGlobalWarmUpOpt; static bool noplt; }; } /* namespace maplebe */ @@ -1576,7 +1697,8 @@ class CGOptions { #define SET_END(SET) ((SET).end()) #define IS_STR_IN_SET(SET, NAME) (SET_FIND(SET, NAME) != SET_END(SET)) -#define CG_DEBUG_FUNC(f) \ +#define \ + CG_DEBUG_FUNC(f) \ (!maplebe::CGOptions::GetDumpPhases().empty() && maplebe::CGOptions::IsDumpFunc((f).GetName()) && \ maplebe::CGOptions::GetDumpPhases().find(PhaseName()) != maplebe::CGOptions::GetDumpPhases().end()) #ifndef TRACE_PHASE diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h index 50a7109e9d2f0c564b470fb1fb81c6417b7acfbf..ae36abada222163e0299e0dc17490361a9d95d76 100644 --- a/src/mapleall/maple_be/include/cg/cg_options.h +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -118,10 +118,13 @@ extern maplecl::Option filetype; extern maplecl::Option longCalls; extern maplecl::Option functionSections; extern maplecl::Option omitFramePointer; +extern maplecl::Option omitLeafFramePointer; extern maplecl::Option fastMath; extern maplecl::Option tailcall; extern maplecl::Option alignAnalysis; extern maplecl::Option cgSsa; +extern maplecl::Option layoutColdPath; +extern maplecl::Option globalSchedule; extern maplecl::Option calleeEnsureParam; extern maplecl::Option common; extern maplecl::Option condbrAlign; @@ -136,6 +139,7 @@ extern maplecl::Option instrumentationFile; extern maplecl::Option litePgoWhiteList; extern maplecl::Option litePgoFile; extern maplecl::Option functionPriority; +extern maplecl::Option litePgoVerify; } #endif /* MAPLE_BE_INCLUDE_CG_OPTIONS_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_pgo_gen.h b/src/mapleall/maple_be/include/cg/cg_pgo_gen.h index 99bf8c9d99e5fefea4150bfb1fd951c79cbea944..89a7f0f6092b94a39de11e285c0535ae661a90b3 100644 --- a/src/mapleall/maple_be/include/cg/cg_pgo_gen.h +++ b/src/mapleall/maple_be/include/cg/cg_pgo_gen.h @@ -23,7 +23,9 @@ class CGProfGen { CGProfGen(CGFunc &curF, MemPool &mp) : f(&curF), instrumenter(mp) {} - virtual ~CGProfGen() = default; + virtual ~CGProfGen() { + f = nullptr; + } void InstrumentFunction(); void CreateProfileCalls(); diff --git a/src/mapleall/maple_be/include/cg/cg_pgo_use.h b/src/mapleall/maple_be/include/cg/cg_pgo_use.h index 58eaa806a9576e6264671b395f00dc71f3978953..9da77c62bcfd9a94887bb318f67d9955a4a99640 100644 --- a/src/mapleall/maple_be/include/cg/cg_pgo_use.h +++ b/src/mapleall/maple_be/include/cg/cg_pgo_use.h @@ -113,11 +113,8 @@ class CGProfUse { domInfo(dom), bbSplit(newbbinsplit), instrumenter(mp), - bb2chain(puAlloc.Adapter()), - readyToLayoutChains(puAlloc.Adapter()), layoutBBs(puAlloc.Adapter()), - laidOut(puAlloc.Adapter()), - frequencyReversePostOrderBBList(puAlloc.Adapter()) {} + laidOut(puAlloc.Adapter()) {} bool ApplyPGOData(); void LayoutBBwithProfile(); @@ -128,27 +125,12 @@ class CGProfUse { DomAnalysis *domInfo = nullptr; MapleSet bbSplit; private: - struct BBOrderEle { - BBOrderEle(uint32 f, uint32 rpoIdx, BB *ibb) - : frequency(f), - reversePostOrderIdx(rpoIdx), - bb(ibb) {} - uint32 frequency; - uint32 reversePostOrderIdx; - BB* bb; - bool operator < (const BBOrderEle &bbEle) const { - if (frequency == bbEle.frequency) { - return reversePostOrderIdx < bbEle.reversePostOrderIdx; - } else { - return frequency > bbEle.frequency; - } - } - }; PGOInstrumentTemplate> instrumenter; std::unordered_map*> bbProfileInfo; void ApplyOnBB(); - bool VerifyProfiledata(const std::vector &iBBs, LiteProfile::BBInfo &bbInfo); + // verify profile data according to 1. cfg hash 2. bb counter number + bool VerifyProfileData(const std::vector &iBBs, LiteProfile::BBInfo &bbInfo); void InitBBEdgeInfo(); /* compute all edge freq in the cfg without consider exception */ void ComputeEdgeFreq(); @@ -158,34 +140,15 @@ class CGProfUse { BBUseInfo *GetOrCreateBBUseInfo(const maplebe::BB &bb, bool notCreate = false); void SetEdgeCount(maple::BBUseEdge &e, size_t count); - /* functions && members for PGO layout */ - void BuildChainForFunc(); - void BuildChainForLoops(); - void BuildChainForLoop(CGFuncLoops &loop, MapleVector *context); - void InitBBChains(); - void DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context); - BB *GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, bool considerBetterPred); - BB *FindBestStartBBForLoop(CGFuncLoops &loop, const MapleVector *context); - - bool IsBBInCurrContext(const BB &bb, const MapleVector *context) const; - bool IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context); - bool HasBetterLayoutPred(const BB &bb, const BB &succ) const; - void AddBBProf(BB &bb); void AddBB(BB &bb); void ReTargetSuccBB(BB &bb, BB &fallthru); - void ChangeToFallthruFromGoto(BB &bb); + void ChangeToFallthruFromGoto(BB &bb) const; LabelIdx GetOrCreateBBLabIdx(BB &bb) const; - void InitFrequencyReversePostOrderBBList(); - - MapleVector bb2chain; - MapleSet readyToLayoutChains; bool debugChainLayout = false; - uint32 rpoSearchPos = 0; // reverse post order search beginning position MapleVector layoutBBs; // gives the determined layout order MapleVector laidOut; // indexed by bbid to tell if has been laid out - MapleSet frequencyReversePostOrderBBList; // frequency first, post order second; }; MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPgoUse, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/cg_profile_use.h b/src/mapleall/maple_be/include/cg/cg_profile_use.h index f33bb8bb058ce9dac614e08d29e00cbf6510099b..33a821d75c1381b497d196cefe646a3852f0da28 100644 --- a/src/mapleall/maple_be/include/cg/cg_profile_use.h +++ b/src/mapleall/maple_be/include/cg/cg_profile_use.h @@ -35,7 +35,9 @@ class CgProfUse { : cgFunc(&f), memPool(&mp), alloc(&mp), allEdges(alloc.Adapter()), BB2InEdges(alloc.Adapter()), BB2OutEdges(alloc.Adapter()) {} - virtual ~CgProfUse() = default; + virtual ~CgProfUse() { + memPool = nullptr; + } void setupProf(); @@ -92,7 +94,7 @@ class CgProfUse { // Type 1 inference uint32 knownEdges1 = 0; FreqType freqSum1 = 0; - Edge * unknownEdge1 = nullptr; + Edge *unknownEdge1 = nullptr; MapleMap>::iterator iit = BB2InEdges.find(bb); if ((iit != BB2InEdges.end()) && (iit->second.size() != 0)) { for (Edge *e : iit->second) { diff --git a/src/mapleall/maple_be/include/cg/cg_rce.h b/src/mapleall/maple_be/include/cg/cg_rce.h index 0b0ce08d833d3eef259f6af752690db3bab91a48..ffe8697b8228471eafef88d01938dc3f8e1ce356 100644 --- a/src/mapleall/maple_be/include/cg/cg_rce.h +++ b/src/mapleall/maple_be/include/cg/cg_rce.h @@ -22,12 +22,13 @@ namespace maplebe { #define CG_RCE_DUMP CG_DEBUG_FUNC(*cgFunc) -using InsnSet = std::set; static uint32 g_count = 0; class RedundantComputeElim { public: RedundantComputeElim(CGFunc &f, CGSSAInfo &info, MemPool &mp) : cgFunc(&f), ssaInfo(&info), rceAlloc(&mp) {} - virtual ~RedundantComputeElim() = default; + virtual ~RedundantComputeElim() { + ssaInfo = nullptr; + } std::string PhaseName() const { return "cgredundantcompelim"; diff --git a/src/mapleall/maple_be/include/cg/cg_ssa.h b/src/mapleall/maple_be/include/cg/cg_ssa.h index 9319847ce4e03ac36aab82d0a42ac62ba1b3267c..06d5d740f3f18fd9acdbc12735291428ed27c08f 100644 --- a/src/mapleall/maple_be/include/cg/cg_ssa.h +++ b/src/mapleall/maple_be/include/cg/cg_ssa.h @@ -274,7 +274,7 @@ class SSAOperandVisitor : public OperandVisitorBase, public: SSAOperandVisitor(Insn &cInsn, const OpndDesc &cDes, uint32 idx) : insn(&cInsn), opndDes(&cDes), idx(idx) {} SSAOperandVisitor() = default; - virtual ~SSAOperandVisitor() = default; + ~SSAOperandVisitor() override = default; void SetInsnOpndInfo(Insn &cInsn, const OpndDesc &cDes, uint32 index) { insn = &cInsn; opndDes = &cDes; @@ -292,7 +292,7 @@ class SSAOperandDumpVisitor : public OperandVisitorBase, public OperandVisitor { public: explicit SSAOperandDumpVisitor(const MapleUnorderedMap &allssa) : allSSAOperands(allssa) {} - virtual ~SSAOperandDumpVisitor() = default; + ~SSAOperandDumpVisitor() override = default; void SetHasDumped() { hasDumped = true; } diff --git a/src/mapleall/maple_be/include/cg/cg_ssa_pre.h b/src/mapleall/maple_be/include/cg/cg_ssa_pre.h index 37dd2fb09000f34fbff005f7458561bf5a3de93c..ad3d66c524214ffe4f8bc99ac576426d12c61b18 100644 --- a/src/mapleall/maple_be/include/cg/cg_ssa_pre.h +++ b/src/mapleall/maple_be/include/cg/cg_ssa_pre.h @@ -74,7 +74,7 @@ class Occ { class RealOcc : public Occ { public: explicit RealOcc(BB *bb): Occ(kAOccReal, bb) {} - virtual ~RealOcc() = default; + ~RealOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); @@ -90,7 +90,7 @@ class PhiOcc; class PhiOpndOcc : public Occ { public: explicit PhiOpndOcc(BB *bb): Occ(kAOccPhiOpnd, bb) {} - virtual ~PhiOpndOcc() = default; + ~PhiOpndOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "PhiOpndOcc at bb" << cgbb->GetId() << " classId" << classId; @@ -106,7 +106,7 @@ class PhiOcc : public Occ { public: PhiOcc(BB *bb, MapleAllocator &alloc) : Occ(kAOccPhi, bb), phiOpnds(alloc.Adapter()) {} - virtual ~PhiOcc() = default; + ~PhiOcc() override = default; bool WillBeAvail() const { return isCanBeAvail && !isLater; @@ -136,7 +136,7 @@ class PhiOcc : public Occ { class ExitOcc : public Occ { public: explicit ExitOcc(BB *bb) : Occ(kAOccExit, bb) {} - virtual ~ExitOcc() = default; + ~ExitOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "ExitOcc at bb" << cgbb->GetId(); diff --git a/src/mapleall/maple_be/include/cg/cg_ssu_pre.h b/src/mapleall/maple_be/include/cg/cg_ssu_pre.h index ffb451135ff2fdccfef8863073d4703b75ec4bef..7f64ac6c1163e934012fd8cc30b1d55db1476943 100644 --- a/src/mapleall/maple_be/include/cg/cg_ssu_pre.h +++ b/src/mapleall/maple_be/include/cg/cg_ssu_pre.h @@ -77,7 +77,7 @@ class SOcc { class SRealOcc : public SOcc { public: explicit SRealOcc(BB *bb) : SOcc(kSOccReal, bb) {} - virtual ~SRealOcc() = default; + ~SRealOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); @@ -107,7 +107,7 @@ class SLambdaOcc : public SOcc { public: SLambdaOcc(BB *bb, MapleAllocator &alloc) : SOcc(kSOccLambda, bb), lambdaRes(alloc.Adapter()) {} - virtual ~SLambdaOcc() = default; + ~SLambdaOcc() override = default; bool WillBeAnt() const { return isCanBeAnt && !isEarlier; @@ -133,7 +133,7 @@ class SLambdaOcc : public SOcc { class SEntryOcc : public SOcc { public: explicit SEntryOcc(BB *bb) : SOcc(kSOccEntry, bb) {} - virtual ~SEntryOcc() = default; + ~SEntryOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "EntryOcc at bb" << cgbb->GetId(); @@ -143,7 +143,7 @@ class SEntryOcc : public SOcc { class SKillOcc : public SOcc { public: explicit SKillOcc(BB *bb) : SOcc(kSOccKill, bb) {} - virtual ~SKillOcc() = default; + ~SKillOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "KillOcc at bb" << cgbb->GetId(); diff --git a/src/mapleall/maple_be/include/cg/cg_validbit_opt.h b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h index d60abdf83300e061da7dce433c2199f436cf3bba..9a010a48c14a13e5455438f069c6ec99709359b4 100644 --- a/src/mapleall/maple_be/include/cg/cg_validbit_opt.h +++ b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h @@ -20,15 +20,19 @@ #include "bb.h" #include "insn.h" #include "cg_ssa.h" +#include "reg_coalesce.h" +#include "cg_dce.h" namespace maplebe { #define CG_VALIDBIT_OPT_DUMP CG_DEBUG_FUNC(*cgFunc) class ValidBitPattern { public: ValidBitPattern(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + ValidBitPattern(CGFunc &f, CGSSAInfo &info, LiveIntervalAnalysis &ll) : cgFunc(&f), ssaInfo(&info), regll(&ll) {} virtual ~ValidBitPattern() { cgFunc = nullptr; ssaInfo = nullptr; + regll = nullptr; } std::string PhaseName() const { return "cgvalidbitopt"; @@ -37,20 +41,25 @@ class ValidBitPattern { virtual std::string GetPatternName() = 0; virtual bool CheckCondition(Insn &insn) = 0; virtual void Run(BB &bb, Insn &insn) = 0; - InsnSet GetAllUseInsn(const RegOperand &defReg); + InsnSet GetAllUseInsn(const RegOperand &defReg) const; void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); protected: CGFunc *cgFunc; CGSSAInfo *ssaInfo; + LiveIntervalAnalysis *regll = nullptr; }; class ValidBitOpt { public: - ValidBitOpt(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + ValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &info, LiveIntervalAnalysis &ll) :memPool(&mp), cgFunc(&f), ssaInfo(&info), regll(&ll) { + cgDce = f.GetCG()->CreateCGDce(mp, f, info); + } virtual ~ValidBitOpt() { + memPool = nullptr; cgFunc = nullptr; ssaInfo = nullptr; + regll = nullptr; } void Run(); static uint32 GetImmValidBit(int64 value, uint32 size) { @@ -75,19 +84,28 @@ class ValidBitOpt { } template - void Optimize(BB &bb, Insn &insn) const { + void OptimizeProp(BB &bb, Insn &insn) const { + VBOpt opt(*cgFunc, *ssaInfo, *regll); + opt.Run(bb, insn); + } + + template + void OptimizeNoProp(BB &bb, Insn &insn) const { VBOpt opt(*cgFunc, *ssaInfo); opt.Run(bb, insn); } - virtual void DoOpt(BB &bb, Insn &insn) = 0; + virtual void DoOpt() = 0; void RectifyValidBitNum(); void RecoverValidBitNum(); virtual void SetValidBits(Insn &insn) = 0; virtual bool SetPhiValidBits(Insn &insn) = 0; protected: + MemPool *memPool; CGFunc *cgFunc; CGSSAInfo *ssaInfo; + LiveIntervalAnalysis *regll; + CGDce *cgDce = nullptr; }; MAPLE_FUNC_PHASE_DECLARE(CgValidBitOpt, maplebe::CGFunc) } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h index 0e49b6da40395c07a38fb7fe7d2f6d933e3148cd..31bf7b1f93f41c61c456934305c61234af141582 100644 --- a/src/mapleall/maple_be/include/cg/cgbb.h +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -22,6 +22,7 @@ #endif #include "insn.h" #include "sparse_datainfo.h" +#include "base_graph_node.h" /* Maple IR headers */ #include "mir_nodes.h" @@ -76,7 +77,7 @@ class CGFuncLoops; class CGFunc; class CDGNode; -class BB { +class BB : public maple::BaseGraphNode { public: enum BBKind : uint8 { kBBFallthru, /* default */ @@ -92,7 +93,7 @@ class BB { }; BB(uint32 bbID, MapleAllocator &mallocator) - : id(bbID), + : BaseGraphNode(bbID), // id(bbID), kind(kBBFallthru), /* kBBFallthru default kind */ labIdx(MIRLabelTable::GetDummyLabel()), preds(mallocator.Adapter()), @@ -109,7 +110,7 @@ class BB { rangeGotoLabelVec(mallocator.Adapter()), phiInsnList(mallocator.Adapter()) {} - virtual ~BB() = default; + ~BB() override = default; virtual BB *Clone(MemPool &memPool) const { BB *bb = memPool.Clone(*this); @@ -195,6 +196,27 @@ class BB { internalFlag1++; } + void AppendOtherBBInsn(Insn &insn) { + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(insn.GetNext()); + } + if (insn.GetNext() != nullptr) { + insn.GetNext()->SetPrev(insn.GetPrev()); + } + if (firstInsn != nullptr && lastInsn != nullptr) { + lastInsn->SetNext(&insn); + insn.SetPrev(lastInsn); + insn.SetNext(nullptr); + lastInsn = &insn; + } else { + firstInsn = lastInsn = &insn; + insn.SetPrev(nullptr); + insn.SetNext(nullptr); + } + insn.SetBB(this); + internalFlag1++; + } + void ReplaceInsn(Insn &insn, Insn &newInsn); void RemoveInsn(Insn &insn); @@ -206,7 +228,7 @@ class BB { void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); /* prepend all insns from bb before insn */ - void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn); + void InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) const; /* append all insns from bb into this bb */ void AppendBBInsns(BB &bb); @@ -275,7 +297,7 @@ class BB { /* Number of instructions excluding DbgInsn and comments */ int32 NumInsn() const; uint32 GetId() const { - return id; + return GetID(); } uint32 GetLevel() const { return level; @@ -283,6 +305,9 @@ class BB { void SetLevel(uint32 arg) { level = arg; } + FreqType GetNodeFrequency() const override { + return static_cast(frequency); + } uint32 GetFrequency() const { return frequency; } @@ -295,7 +320,7 @@ class BB { void SetProfFreq(FreqType arg) { profFreq = arg; } - bool IsInColdSection() { + bool IsInColdSection() const { return inColdSection; } void SetColdSection() { @@ -365,7 +390,7 @@ class BB { FOR_BB_INSNS_REV(insn, this) { #if TARGAARCH64 if (insn->IsMachineInstruction() && !AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { -#elif TARGX86_64 +#elif defined(TARGX86_64) && TARGX86_64 if (insn->IsMachineInstruction()) { #endif return insn; @@ -400,6 +425,30 @@ class BB { const std::size_t GetSuccsSize() const { return succs.size(); } + + // override interface of BaseGraphNode + const std::string GetIdentity() final { + return "BBId: " + std::to_string(GetID()); + } + + void GetOutNodes(std::vector &outNodes) const final { + outNodes.resize(succs.size(), nullptr); + std::copy(succs.begin(), succs.end(), outNodes.begin()); + } + + void GetOutNodes(std::vector &outNodes) final { + static_cast(this)->GetOutNodes(outNodes); + } + + void GetInNodes(std::vector &inNodes) const final { + inNodes.resize(preds.size(), nullptr); + std::copy(preds.begin(), preds.end(), inNodes.begin()); + } + + void GetInNodes(std::vector &inNodes) final { + static_cast(this)->GetInNodes(inNodes); + } + const MapleList &GetEhPreds() const { return ehPreds; } @@ -463,8 +512,8 @@ class BB { void PushFrontSuccs(BB &bb) { succs.push_front(&bb); } - void ErasePreds(MapleList::const_iterator it) { - preds.erase(it); + MapleList::iterator ErasePreds(MapleList::const_iterator it) { + return preds.erase(it); } void EraseSuccs(MapleList::const_iterator it) { succs.erase(it); @@ -535,8 +584,8 @@ class BB { CGFuncLoops *GetLoop() const { return loop; } - void SetLoop(CGFuncLoops &arg) { - loop = &arg; + void SetLoop(CGFuncLoops *arg) { + loop = arg; } bool GetLiveInChange() const { return liveInChange; @@ -690,16 +739,16 @@ class BB { void SetLiveIn(SparseDataInfo &arg) { liveIn = &arg; } - void SetLiveInBit(uint32 arg) const { + void SetLiveInBit(uint32 arg) { liveIn->SetBit(arg); } - void SetLiveInInfo(const SparseDataInfo &arg) const { + void SetLiveInInfo(const SparseDataInfo &arg) { *liveIn = arg; } - void LiveInOrBits(const SparseDataInfo &arg) const { + void LiveInOrBits(const SparseDataInfo &arg) { liveIn->OrBits(arg); } - void LiveInEnlargeCapacity(uint32 arg) const { + void LiveInEnlargeCapacity(uint32 arg) { liveIn->EnlargeCapacityToAdaptSize(arg); } void LiveInClearDataInfo() { @@ -715,13 +764,13 @@ class BB { void SetLiveOut(SparseDataInfo &arg) { liveOut = &arg; } - void SetLiveOutBit(uint32 arg) const { + void SetLiveOutBit(uint32 arg) { liveOut->SetBit(arg); } - void LiveOutOrBits(const SparseDataInfo &arg) const { + void LiveOutOrBits(const SparseDataInfo &arg) { liveOut->OrBits(arg); } - void LiveOutEnlargeCapacity(uint32 arg) const { + void LiveOutEnlargeCapacity(uint32 arg) { liveOut->EnlargeCapacityToAdaptSize(arg); } void LiveOutClearDataInfo() { @@ -734,13 +783,13 @@ class BB { void SetDef(SparseDataInfo &arg) { def = &arg; } - void SetDefBit(uint32 arg) const { + void SetDefBit(uint32 arg) { def->SetBit(arg); } - void DefResetAllBit() const { + void DefResetAllBit() { def->ResetAllBit(); } - void DefResetBit(uint32 arg) const { + void DefResetBit(uint32 arg) { def->ResetBit(arg); } void DefClearDataInfo() { @@ -753,13 +802,13 @@ class BB { void SetUse(SparseDataInfo &arg) { use = &arg; } - void SetUseBit(uint32 arg) const { + void SetUseBit(uint32 arg) { use->SetBit(arg); } - void UseResetAllBit() const { + void UseResetAllBit() { use->ResetAllBit(); } - void UseResetBit(uint32 arg) const { + void UseResetBit(uint32 arg) { use->ResetBit(arg); } void UseClearDataInfo() { @@ -795,6 +844,16 @@ class BB { succsFreq.resize(succs.size()); } + FreqType GetEdgeFrequency(const BaseGraphNode &node) const override { + auto edgeFreq = GetEdgeFreq(static_cast(node)); + return static_cast(edgeFreq); + } + + FreqType GetEdgeFrequency(size_t idx) const override { + auto edgeFreq = GetEdgeFreq(idx); + return static_cast(edgeFreq); + } + uint64 GetEdgeFreq(const BB &bb) const { auto iter = std::find(succs.begin(), succs.end(), &bb); if (iter == std::end(succs) || succs.size() > succsFreq.size()) { @@ -857,9 +916,26 @@ class BB { succsProfFreq[idx] = freq; } + bool HasMachineInsn() { + FOR_BB_INSNS(insn, this) { + if (insn->IsMachineInstruction()) { + return true; + } + } + return false; + } + + bool IsAdrpLabel() const { + return isAdrpLabel; + } + + void SetIsAdrpLabel() { + isAdrpLabel = true; + } + private: static const std::string bbNames[kBBLast]; - uint32 id; + // uint32 id uint32 level = 0; uint32 frequency = 0; FreqType profFreq = 0; // profileUse @@ -950,6 +1026,8 @@ class BB { uint32 alignNopNum = 0; CDGNode *cdgNode = nullptr; + + bool isAdrpLabel = false; // Indicate whether the address of this BB is referenced by adrp_label insn }; /* class BB */ struct BBIdCmp { diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 5b2d58ed7d39c825c21a89ae88cc63c2de4d84d9..19d3da1a7f33ee1422dedf04d2fe832ba1018745 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -34,6 +34,7 @@ #include "maple_phase_manager.h" /* Maple MP header */ #include "mempool_allocator.h" +#include "safe_cast.h" namespace maplebe { constexpr int32 kBBLimit = 100000; @@ -178,6 +179,7 @@ class CGFunc { LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); virtual void LmbcGenSaveSpForAlloca() = 0; void RemoveUnreachableBB(); + void MarkAdrpLabelBB(); Insn &BuildLocInsn(int64 fileNum, int64 lineNum, int64 columnNum); Insn &BuildScopeInsn(int64 id, bool isEnd); void GenerateLoc(StmtNode &stmt, SrcPosition &lastSrcPos, SrcPosition &lastMplPos); @@ -343,7 +345,6 @@ class CGFunc { virtual Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) = 0; virtual Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) = 0; virtual void GenerateYieldpoint(BB &bb) = 0; - virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0; virtual Operand &GetOrCreateRflag() = 0; virtual const Operand *GetRflag() const = 0; @@ -413,6 +414,9 @@ class CGFunc { virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; virtual RegOperand *SelectVectorIntrinsics(const IntrinsicopNode &intrinsicNode) = 0; + virtual RegOperand *SelectIntrinsicOpLoadTlsAnchor(const IntrinsicopNode& intrinsicopNode, + const BaseNode &parent) = 0; + virtual void HandleFuncCfg(CGCFG *cfg) { AddCommonExitBB(); } /* For ebo issue. */ @@ -536,6 +540,8 @@ class CGFunc { void PatchLongBranch(); + void VerifyAllInsn(); + virtual uint32 MaxCondBranchDistance() { return INT_MAX; } @@ -781,6 +787,11 @@ class CGFunc { return commonExitBB; } + BB *GetCommonEntryBB() { + ASSERT(bbVec[0]->GetId() == 0 && bbVec[0] != firstBB, "there is no commonEntryBB"); + return bbVec[0]; + } + LabelIdx GetFirstCGGenLabelIdx() const { return firstCGGenLabelIdx; } @@ -801,8 +812,8 @@ class CGFunc { return exitBBVec.empty(); } - void EraseExitBBsVec(MapleVector::iterator it) { - exitBBVec.erase(it); + MapleVector::iterator EraseExitBBsVec(MapleVector::iterator it) { + return exitBBVec.erase(it); } void PushBackExitBBsVec(BB &bb) { @@ -842,10 +853,18 @@ class CGFunc { return exitBBVec.at(index); } + MapleVector::iterator EraseNoReturnCallBB(MapleVector::iterator it) { + return noReturnCallBBVec.erase(it); + } + void PushBackNoReturnCallBBsVec(BB &bb) { noReturnCallBBVec.emplace_back(&bb); } + MapleVector &GetNoRetCallBBVec() { + return noReturnCallBBVec; + } + void SetLab2BBMap(int32 index, BB &bb) { lab2BBMap[index] = &bb; } @@ -880,7 +899,7 @@ class CGFunc { memLayout = &layout; } - RegisterInfo *GetTargetRegInfo() { + RegisterInfo *GetTargetRegInfo() const { return targetRegInfo; } @@ -957,11 +976,11 @@ class CGFunc { CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); - LabelIdx labelIdx = lblConst->GetValue(); - CHECK_FATAL(switchLabelCnt[labelIdx] > 0, "error labelIdx"); - --switchLabelCnt[labelIdx]; - if (switchLabelCnt[labelIdx] == 0) { - (void)switchLabelCnt.erase(labelIdx); + LabelIdx tmpLabelIdx = lblConst->GetValue(); + CHECK_FATAL(switchLabelCnt[tmpLabelIdx] > 0, "error tmpLabelIdx"); + --switchLabelCnt[tmpLabelIdx]; + if (switchLabelCnt[tmpLabelIdx] == 0) { + (void)switchLabelCnt.erase(tmpLabelIdx); } } (void)emitStVec.erase(id); @@ -1085,7 +1104,7 @@ class CGFunc { } bool GetHasProEpilogue() const { - return hasProEpilogue; + return hasProEpilogue || useFP; } void SetHasProEpilogue(bool state) { @@ -1128,7 +1147,7 @@ class CGFunc { } auto it = func.GetLastFreqMap().find(stmt.GetStmtID()); if (it != func.GetLastFreqMap().end()) { - frequency = it->second; + frequency = static_cast(it->second); } } @@ -1161,7 +1180,7 @@ class CGFunc { return bb; } - void SetCurBBKind(BB::BBKind bbKind) const { + void SetCurBBKind(BB::BBKind bbKind) { curBB->SetKind(bbKind); } @@ -1280,6 +1299,26 @@ class CGFunc { return priority; } + static bool UsePlt(const MIRSymbol *funcSt = nullptr) { + if (CGOptions::GetNoplt() || CGOptions::IsNoSemanticInterposition() || + CGOptions::GetVisibilityType() == CGOptions::kHiddenVisibility) { + return false; + } + + if (funcSt && funcSt->IsHiddenVisibility()) { + return false; + } + + return true; + } + + void SetExitBBLost(bool val) { + exitBBLost = val; + } + bool GetExitBBLost() { + return exitBBLost; + } + protected: uint32 firstNonPregVRegNO; VregInfo vReg; /* for assigning a number for each CG virtual register */ @@ -1350,9 +1389,8 @@ class CGFunc { RegType regType = vRegNode.GetType(); ASSERT(regType == kRegTyInt || regType == kRegTyFloat, ""); uint32 size = vRegNode.GetSize(); /* in bytes */ - ASSERT(size == sizeof(int32) || size == sizeof(int64), ""); - return (regType == kRegTyInt ? (size == sizeof(int32) ? PTY_i32 : PTY_i64) - : (size == sizeof(float) ? PTY_f32 : PTY_f64)); + return (regType == kRegTyInt ? (size <= sizeof(int32) ? PTY_i32 : PTY_i64) : + (size <= sizeof(float) ? PTY_f32 : PTY_f64)); } int64 GetPseudoRegisterSpillLocation(PregIdx idx) { @@ -1360,6 +1398,11 @@ class CGFunc { return static_cast(GetBaseOffset(*symLoc)); } + int64 GetOrCreatSpillRegLocation(regno_t vrNum, uint32 memByteSize) { + auto *symLoc = GetMemlayout()->GetLocOfSpillRegister(vrNum, memByteSize); + return static_cast(GetBaseOffset(*symLoc)); + } + virtual MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) = 0; uint32 GetSpillLocation(uint32 size) { @@ -1370,8 +1413,8 @@ class CGFunc { /* See if the symbol is a structure parameter that requires a copy. */ bool IsParamStructCopy(const MIRSymbol &symbol) { - if (symbol.GetStorageClass() == kScFormal && - GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + auto *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol.GetTyIdx()); + if (symbol.GetStorageClass() == kScFormal && IsParamStructCopyToMemory(*mirType)) { return true; } return false; @@ -1390,12 +1433,10 @@ class CGFunc { } } - BB *CreateAtomicBuiltinBB(bool isBBIf = true) { + BB *CreateAtomicBuiltinBB() { LabelIdx atomicBBLabIdx = CreateLabel(); BB *atomicBB = CreateNewBB(); - if (isBBIf) { - atomicBB->SetKind(BB::kBBIf); - } + atomicBB->SetKind(BB::kBBIf); atomicBB->SetAtomicBuiltIn(); atomicBB->AddLabel(atomicBBLabIdx); SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); @@ -1403,6 +1444,26 @@ class CGFunc { return atomicBB; } + // clone old mem and add offset + // oldMem: [base, imm:12] -> newMem: [base, imm:(12 + offset)] + MemOperand &GetMemOperandAddOffset(const MemOperand &oldMem, uint32 offset, uint32 newSize) { + auto &newMem = static_cast(*oldMem.Clone(*GetMemoryPool())); + auto &oldOffset = *oldMem.GetOffsetOperand(); + auto &newOffst = static_cast(*oldOffset.Clone(*GetMemoryPool())); + newOffst.SetValue(oldOffset.GetValue() + offset); + newMem.SetOffsetOperand(newOffst); + newMem.SetSize(newSize); + return newMem; + } + + void AddAdrpLabel(LabelIdx label) { + (void)adrpLabels.emplace_back(label); + } + + MapleVector &GetAdrpLabels() { + return adrpLabels; + } + private: CGFunc &operator=(const CGFunc &cgFunc); CGFunc(const CGFunc&); @@ -1467,6 +1528,13 @@ class CGFunc { /* cross reference isel class pointer */ MPISel *isel = nullptr; + + // mark exitBB is unreachable + bool exitBBLost = false; + + // Record adrp address labels when generating instructions, + // only used in MarkArdpLabelBB of handlefunction + MapleVector adrpLabels; }; /* class CGFunc */ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) @@ -1477,6 +1545,8 @@ MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPatchLongBranch, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgVerify, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) MAPLE_FUNC_PHASE_DECLARE_END MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgEmission, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/control_dep_analysis.h b/src/mapleall/maple_be/include/cg/control_dep_analysis.h index a905c92cc23bbf53723535b889d56703467b5361..1a292a9a689f7987798bc52ac436c89520f86f68 100644 --- a/src/mapleall/maple_be/include/cg/control_dep_analysis.h +++ b/src/mapleall/maple_be/include/cg/control_dep_analysis.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,6 +15,7 @@ #ifndef MAPLEBE_INCLUDE_CG_PDG_ANALYSIS_H #define MAPLEBE_INCLUDE_CG_PDG_ANALYSIS_H +#include #include "cfg_mst.h" #include "instrument.h" #include "cg_cdg.h" @@ -23,50 +24,83 @@ #include "loop.h" namespace maplebe { +#define CONTROL_DEP_ANALYSIS_DUMP CG_DEBUG_FUNC(cgFunc) /* Analyze Control Dependence */ class ControlDepAnalysis { public: - ControlDepAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, PostDomAnalysis &pd, - CFGMST, maplebe::BB> &cfgmst) - : cgFunc(func), pdom(&pd), cfgMST(&cfgmst), cdgMemPool(memPool), tmpMemPool(&tmpPool), cdgAlloc(&memPool), - tmpAlloc(&tmpPool), nonPdomEdges(tmpAlloc.Adapter()), curCondNumOfBB(tmpAlloc.Adapter()) {} - ControlDepAnalysis(CGFunc &func, MemPool &memPool) + ControlDepAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, DomAnalysis &d, PostDomAnalysis &pd, + CFGMST, maplebe::BB> *cfgmst, std::string pName = "") + : cgFunc(func), dom(&d), pdom(&pd), cfgMST(cfgmst), cdgMemPool(memPool), tmpMemPool(&tmpPool), + cdgAlloc(&memPool), tmpAlloc(&tmpPool), nonPdomEdges(tmpAlloc.Adapter()), + curCondNumOfBB(tmpAlloc.Adapter()), phaseName(std::move(pName)) {} + ControlDepAnalysis(CGFunc &func, MemPool &memPool, std::string pName = "", bool isSingle = true) : cgFunc(func), cdgMemPool(memPool), cdgAlloc(&memPool), tmpAlloc(&memPool), - nonPdomEdges(cdgAlloc.Adapter()), curCondNumOfBB(cdgAlloc.Adapter()) {} + nonPdomEdges(cdgAlloc.Adapter()), curCondNumOfBB(cdgAlloc.Adapter()), + phaseName(std::move(pName)), isSingleBB(isSingle) {} virtual ~ControlDepAnalysis() { + dom = nullptr; fcdg = nullptr; cfgMST = nullptr; tmpMemPool = nullptr; pdom = nullptr; } + std::string PhaseName() const { + if (phaseName.empty()) { + return "controldepanalysis"; + } else { + return phaseName; + } + } + void SetIsSingleBB(bool isSingle) { + isSingleBB = isSingle; + } + /* The entry of analysis */ void Run(); + /* Provide scheduling-related interfaces */ + void ComputeSingleBBRegions(); // For local-scheduling in a single BB + void GetEquivalentNodesInRegion(CDGRegion ®ion, CDGNode &cdgNode, std::vector &equivalentNodes) const; + /* Interface for obtaining PDGAnalysis infos */ FCDG *GetFCDG() { return fcdg; } + CFGMST, maplebe::BB> *GetCFGMst() { + return cfgMST; + } /* Print forward-control-dependence-graph in dot syntax */ void GenerateFCDGDot() const; /* Print control-flow-graph with condition at edges in dot syntax */ void GenerateCFGDot() const; - void CreateAllCDGNodes(); + /* Print control-flow-graph with only bbId */ + void GenerateSimplifiedCFGDot() const; + /* Print control-flow-graph of the region in dot syntax */ + void GenerateCFGInRegionDot(CDGRegion ®ion) const; protected: void BuildCFGInfo(); void ConstructFCDG(); - void ComputeRegions(); + void ComputeRegions(bool doCDRegion); + void ComputeGeneralNonLinearRegions(); + void FindInnermostLoops(std::vector &innermostLoops, std::unordered_map &visited, + CGFuncLoops *loop); + void FindFallthroughPath(std::vector ®ionMembers, BB *curBB, bool isRoot); + void CreateRegionForSingleBB(); + bool AddRegionNodesInTopologicalOrder(CDGRegion ®ion, CDGNode &root, const MapleVector &members); + void ComputeSameCDRegions(bool considerNonDep); void ComputeRegionForCurNode(uint32 curBBId, std::vector &visited); void CreateAndDivideRegion(uint32 pBBId); void ComputeRegionForNonDepNodes(); - CDGRegion *FindExistRegion(CDGNode &node); + CDGRegion *FindExistRegion(CDGNode &node) const; bool IsISEqualToCDs(CDGNode &parent, CDGNode &child); void MergeRegions(CDGNode &mergeNode, CDGNode &candiNode); CDGEdge *BuildControlDependence(const BB &fromBB, const BB &toBB, int32 condition); CDGRegion *CreateFCDGRegion(CDGNode &curNode); + void CreateAllCDGNodes(); void AddNonPdomEdges(BBEdge *bbEdge) { nonPdomEdges.emplace_back(bbEdge); @@ -96,6 +130,7 @@ class ControlDepAnalysis { } CGFunc &cgFunc; + DomAnalysis *dom = nullptr; PostDomAnalysis *pdom = nullptr; CFGMST, maplebe::BB> *cfgMST = nullptr; MemPool &cdgMemPool; @@ -106,6 +141,8 @@ class ControlDepAnalysis { MapleUnorderedMap curCondNumOfBB; // FCDG *fcdg = nullptr; uint32 lastRegionId = 0; + std::string phaseName; + bool isSingleBB = false; }; MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgControlDepAnalysis, maplebe::CGFunc); diff --git a/src/mapleall/maple_be/include/cg/data_dep_analysis.h b/src/mapleall/maple_be/include/cg/data_dep_analysis.h index 1db0223ba7c2f3ea7fe6682ee14d6a338a03efd2..2588e1c0f63d1d6a003ee0565107ee38d2e3f304 100644 --- a/src/mapleall/maple_be/include/cg/data_dep_analysis.h +++ b/src/mapleall/maple_be/include/cg/data_dep_analysis.h @@ -1,5 +1,5 @@ /* -* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -30,6 +30,8 @@ #include "cg_cdg.h" namespace maplebe { +constexpr uint32 kMaxDumpRegionNodeNum = 6; + /* Analyze IntraBlock Data Dependence */ class IntraDataDepAnalysis { public: @@ -53,43 +55,30 @@ class IntraDataDepAnalysis { class InterDataDepAnalysis { public: InterDataDepAnalysis(CGFunc &f, MemPool &memPool, DataDepBase &dataDepBase) - : cgFunc(f), interAlloc(&memPool), ddb(dataDepBase), - readyNodes(interAlloc.Adapter()), restNodes(interAlloc.Adapter()) {} + : cgFunc(f), interMp(memPool), interAlloc(&memPool), ddb(dataDepBase) {} virtual ~InterDataDepAnalysis() = default; - void AddReadyNode(CDGNode *node) { - (void)readyNodes.emplace_back(node); - } - void RemoveReadyNode(CDGNode *node) { - auto it = std::find(readyNodes.begin(), readyNodes.end(), node); - if (it != readyNodes.end()) { - (void)readyNodes.erase(it); - } - } - void InitRestNodes(const MapleVector &nodes) { - restNodes = nodes; - } - void RemoveRestNode(CDGNode *node) { - auto it = std::find(restNodes.begin(), restNodes.end(), node); - if (it != restNodes.end()) { - (void)restNodes.erase(it); - } - } - - void Run(CDGRegion ®ion, MapleVector &dataNodes); - void GlobalInit(MapleVector &dataNodes); - void LocalInit(BB &bb, CDGNode &cdgNode, MapleVector &dataNodes, std::size_t idx); - void GenerateInterDDGDot(MapleVector &dataNodes); + void Run(CDGRegion ®ion); + void GenerateDataDepGraphDotOfRegion(CDGRegion ®ion); protected: - void ComputeTopologicalOrderInRegion(CDGRegion ®ion); + void InitInfoInRegion(MemPool ®ionMp, MapleAllocator ®ionAlloc, CDGRegion ®ion); + void InitInfoInCDGNode(MemPool ®ionMp, MapleAllocator ®ionAlloc, BB &bb, CDGNode &cdgNode); + void ClearInfoInRegion(MemPool *regionMp, MapleAllocator *regionAlloc, CDGRegion ®ion); + void AddBeginSeparatorNode(CDGNode *rootNode); + void SeparateDependenceGraph(CDGRegion ®ion, CDGNode &cdgNode); + void BuildDepsForNewSeparator(CDGRegion ®ion, CDGNode &cdgNode, DepNode &newSepNode); + void BuildDepsForPrevSeparator(CDGNode &cdgNode, DepNode &depNode, CDGRegion &curRegion); + void BuildSpecialInsnDependency(Insn &insn, CDGNode &cdgNode, CDGRegion ®ion, MapleAllocator &alloc); + void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, CDGNode &cdgNode); + void AddEndSeparatorNode(CDGRegion ®ion, CDGNode &cdgNode); + void UpdateReadyNodesInfo(CDGNode &cdgNode, const CDGNode &root) const; private: CGFunc &cgFunc; + MemPool &interMp; MapleAllocator interAlloc; DataDepBase &ddb; - MapleVector readyNodes; - MapleVector restNodes; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/data_dep_base.h b/src/mapleall/maple_be/include/cg/data_dep_base.h index f0f48b8d9974a08489a7797b92ae52fbb12bd173..31d75fb6c00efe7e3942f11df0771c91a740b252 100644 --- a/src/mapleall/maple_be/include/cg/data_dep_base.h +++ b/src/mapleall/maple_be/include/cg/data_dep_base.h @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -22,12 +22,15 @@ namespace maplebe { using namespace maple; constexpr maple::uint32 kMaxDependenceNum = 200; +constexpr maple::uint32 kMaxInsnNum = 220; class DataDepBase { public: - DataDepBase(MemPool &memPool, CGFunc &func, MAD &mad) - : memPool(memPool), alloc(&memPool), cgFunc(func), mad(mad), beforeRA(!cgFunc.IsAfterRegAlloc()) {} + DataDepBase(MemPool &memPool, CGFunc &func, MAD &mad, bool isIntraAna) + : memPool(memPool), alloc(&memPool), cgFunc(func), mad(mad), + beforeRA(!cgFunc.IsAfterRegAlloc()), isIntra(isIntraAna) {} virtual ~DataDepBase() { + curRegion = nullptr; curCDGNode = nullptr; } @@ -56,6 +59,12 @@ class DataDepBase { CDGNode *GetCDGNode() { return curCDGNode; } + void SetCDGRegion(CDGRegion *region) { + curRegion = region; + } + CDGRegion *GetCDGRegion() { + return curRegion; + } void SetLastFrameDefInsn(Insn *insn) const { curCDGNode->SetLastFrameDefInsn(insn); } @@ -81,7 +90,6 @@ class DataDepBase { void CombineDependence(DepNode &firstNode, const DepNode &secondNode, bool isAcrossSeparator, bool isMemCombine = false); - bool IsIntraBlockAnalysis() const; bool IfInAmbiRegs(regno_t regNO) const; void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType); void RemoveSelfDeps(Insn &insn); @@ -98,7 +106,6 @@ class DataDepBase { void SeparateDependenceGraph(MapleVector &nodes, uint32 &nodeSum); DepNode *GenerateDepNode(Insn &insn, MapleVector &nodes, uint32 &nodeSum, MapleVector &comments); void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); - void BuildSeparatorNodeDependency(MapleVector &dataNodes, Insn &insn); void BuildInterBlockDefUseDependency(DepNode &curDepNode, regno_t regNO, DepType depType, bool isDef); void BuildPredPathDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, regno_t regNO, DepType depType); @@ -109,6 +116,7 @@ class DataDepBase { void BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vector &visited, bool needCmp, DepNode &depNode, DepType depType, DataDepBase::DataFlowInfoType infoType); + virtual void InitCDGNodeDataInfo(MemPool &mp, MapleAllocator &alloc, CDGNode &cdgNode) = 0; virtual void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) = 0; virtual void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) = 0; virtual bool IsFrameReg(const RegOperand&) const = 0; @@ -124,7 +132,8 @@ class DataDepBase { virtual void BuildDepsDirtyHeap(Insn &insn) = 0; virtual void BuildOpndDependency(Insn &insn) = 0; virtual void BuildSpecialInsnDependency(Insn &insn, const MapleVector &nodes) = 0; - virtual void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) = 0; + virtual void BuildAsmInsnDependency(Insn &insn) = 0; + virtual void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, DepNode &sepNode) = 0; virtual DepNode *BuildSeparatorNode() = 0; virtual void BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, MemOperand *nextMemOpnd, bool isMemDef) = 0; @@ -132,14 +141,17 @@ class DataDepBase { MemOperand &memOpnd, MemOperand *nextMemOpnd) = 0; virtual void BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, MemOperand &memOpnd, MemOperand *nextMemOpnd) = 0; + virtual void DumpNodeStyleInDot(std::ofstream &file, DepNode &depNode) = 0; protected: MemPool &memPool; MapleAllocator alloc; CGFunc &cgFunc; MAD &mad; - CDGNode *curCDGNode = nullptr; bool beforeRA; + bool isIntra; + CDGNode *curCDGNode = nullptr; + CDGRegion *curRegion = nullptr; uint32 separatorIndex = 0; }; } diff --git a/src/mapleall/maple_be/include/cg/dbg.h b/src/mapleall/maple_be/include/cg/dbg.h index f0520c77cfaa9ac312e3752ac3c975787d21c426..5bcca0bfe4e08fd65ba2d061aac05221677dba12 100644 --- a/src/mapleall/maple_be/include/cg/dbg.h +++ b/src/mapleall/maple_be/include/cg/dbg.h @@ -48,7 +48,7 @@ class DbgInsn : public maplebe::Insn { maplebe::Operand &opnd2) : Insn(memPool, op, opnd0, opnd1, opnd2) {} - ~DbgInsn() = default; + ~DbgInsn() override = default; bool IsMachineInstruction() const override { return false; @@ -92,7 +92,7 @@ class ImmOperand : public maplebe::OperandVisitable { public: explicit ImmOperand(int64 val) : OperandVisitable(kOpdImmediate, 32), val(val) {} - ~ImmOperand() = default; + ~ImmOperand() override = default; using OperandVisitable::OperandVisitable; Operand *Clone(MemPool &memPool) const override { @@ -119,7 +119,7 @@ class DBGOpndEmitVisitor : public maplebe::OperandVisitorBase, public maplebe::OperandVisitor { public: explicit DBGOpndEmitVisitor(maplebe::Emitter &asmEmitter): emitter(asmEmitter) {} - virtual ~DBGOpndEmitVisitor() = default; + ~DBGOpndEmitVisitor() override = default; protected: maplebe::Emitter &emitter; private: diff --git a/src/mapleall/maple_be/include/cg/dependence.h b/src/mapleall/maple_be/include/cg/dependence.h index ba92b5651d2906e0978b8fb50e7f5891b9ab64d2..803c8298b0b292cb61900c527d5fdb4e418058e0 100644 --- a/src/mapleall/maple_be/include/cg/dependence.h +++ b/src/mapleall/maple_be/include/cg/dependence.h @@ -20,9 +20,6 @@ namespace maplebe { using namespace maple; -namespace { -constexpr maple::uint32 kMaxDependenceNum = 200; -}; class DepAnalysis { public: diff --git a/src/mapleall/maple_be/include/cg/deps.h b/src/mapleall/maple_be/include/cg/deps.h index 1faf86beb0352f844d84779863e0d8523961567a..a16c8afcc5916cd801062d39cf090bdcca2c91dd 100644 --- a/src/mapleall/maple_be/include/cg/deps.h +++ b/src/mapleall/maple_be/include/cg/deps.h @@ -20,7 +20,7 @@ #include "pressure.h" namespace maplebe { #define PRINT_STR_VAL(STR, VAL) \ - (LogInfo::MapleLogger() << std::left << std::setw(12) << STR << VAL << " | "); + (LogInfo::MapleLogger() << std::left << std::setw(12) << (STR) << VAL << " | "); #define PRINT_VAL(VAL) \ (LogInfo::MapleLogger() << std::left << std::setw(12) << VAL << " | "); @@ -54,6 +54,8 @@ enum NodeType : uint8 { enum ScheduleState : uint8 { kNormal, + kCandidate, + kWaiting, kReady, kScheduled, }; @@ -93,14 +95,10 @@ class DepLink { class DepNode { public: - bool CanBeScheduled() const; - void OccupyUnits() const; - uint32 GetUnitKind() const; - DepNode(Insn &insn, MapleAllocator &alloc) : insn(&insn), units(nullptr), reservation(nullptr), unitNum(0), eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), - schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), + schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), topoPredsSize(0), preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), defRegnos(alloc.Adapter()), regPressure(nullptr) {} @@ -109,12 +107,108 @@ class DepNode { : insn(&insn), units(unit), reservation(&rev), unitNum(num), eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), - preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), cfiInsns(alloc.Adapter()), - clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), defRegnos(alloc.Adapter()), - regPressure(nullptr) {} + topoPredsSize(0), preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), + cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), + defRegnos(alloc.Adapter()), regPressure(nullptr) {} virtual ~DepNode() = default; + /* + * If all unit of this node need when it be scheduling is free, this node can be scheduled, + * Return true. + */ + bool IsResourceFree() const { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + if (!unit->IsFree(i)) { + return false; + } + } + } + return true; + } + + /* Mark those unit that this node need occupy unit when it is being scheduled. */ + void OccupyUnits() { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + unit->Occupy(*insn, i); + } + } + } + + /* Get unit kind of this node's units[0]. */ + uint32 GetUnitKind() const { + uint32 retValue = 0; + if ((units == nullptr) || (units[0] == nullptr)) { + return retValue; + } + + switch (units[0]->GetUnitId()) { + case kUnitIdSlotD: + retValue |= kUnitKindSlot0; + break; + case kUnitIdAgen: + case kUnitIdSlotSAgen: + retValue |= kUnitKindAgen; + break; + case kUnitIdSlotDAgen: + retValue |= kUnitKindAgen; + retValue |= kUnitKindSlot0; + break; + case kUnitIdHazard: + case kUnitIdSlotSHazard: + retValue |= kUnitKindHazard; + break; + case kUnitIdCrypto: + retValue |= kUnitKindCrypto; + break; + case kUnitIdMul: + case kUnitIdSlotSMul: + retValue |= kUnitKindMul; + break; + case kUnitIdDiv: + retValue |= kUnitKindDiv; + break; + case kUnitIdBranch: + case kUnitIdSlotSBranch: + retValue |= kUnitKindBranch; + break; + case kUnitIdStAgu: + retValue |= kUnitKindStAgu; + break; + case kUnitIdLdAgu: + retValue |= kUnitKindLdAgu; + break; + case kUnitIdFpAluS: + case kUnitIdFpAluD: + retValue |= kUnitKindFpAlu; + break; + case kUnitIdFpMulS: + case kUnitIdFpMulD: + retValue |= kUnitKindFpMul; + break; + case kUnitIdFpDivS: + case kUnitIdFpDivD: + retValue |= kUnitKindFpDiv; + break; + case kUnitIdSlot0LdAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindLdAgu; + break; + case kUnitIdSlot0StAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindStAgu; + break; + default: + break; + } + + return retValue; + } + Insn *GetInsn() const { return insn; } @@ -125,7 +219,7 @@ class DepNode { units = unit; } const Unit *GetUnitByIndex(uint32 idx) const { - ASSERT(index < unitNum, "out of units"); + ASSERT(idx < unitNum, "out of units"); return units[idx]; } Reservation *GetReservation() const { @@ -152,6 +246,12 @@ class DepNode { void SetLStart(uint32 start) { lStart = start; } + uint32 GetDelay() const { + return delay; + } + void SetDelay(uint32 prio) { + delay = prio; + } uint32 GetVisit() const { return visit; } @@ -203,7 +303,7 @@ class DepNode { uint32 GetValidPredsSize() const { return validPredsSize; } - void DescreaseValidPredsSize() { + void DecreaseValidPredsSize() { --validPredsSize; } void IncreaseValidPredsSize() { @@ -215,6 +315,21 @@ class DepNode { void SetValidSuccsSize(uint32 size) { validSuccsSize = size; } + void DecreaseValidSuccsSize() { + --validSuccsSize; + } + uint32 GetTopoPredsSize() { + return topoPredsSize; + } + void SetTopoPredsSize(uint32 size) { + topoPredsSize = size; + } + void IncreaseTopoPredsSize() { + ++topoPredsSize; + } + void DecreaseTopoPredsSize() { + --topoPredsSize; + } const MapleVector &GetPreds() const { return preds; } @@ -426,6 +541,7 @@ class DepNode { uint32 unitNum; uint32 eStart; uint32 lStart; + uint32 delay = 0; // Identify the critical path priority uint32 visit; NodeType type; ScheduleState state; @@ -438,6 +554,9 @@ class DepNode { uint32 validPredsSize; uint32 validSuccsSize; + /* For compute eStart by topological order */ + uint32 topoPredsSize; + /* Dependence links. */ MapleVector preds; MapleVector succs; diff --git a/src/mapleall/maple_be/include/cg/emit.h b/src/mapleall/maple_be/include/cg/emit.h index c922291216885d3548116bc4cf9c3eb41b174da6..0732d23affff3daf294b50eaf61587b251baa647 100644 --- a/src/mapleall/maple_be/include/cg/emit.h +++ b/src/mapleall/maple_be/include/cg/emit.h @@ -34,13 +34,6 @@ #include "debug_info.h" #include "alignment.h" -namespace maple { -const char *GetDwTagName(unsigned n); -const char *GetDwFormName(unsigned n); -const char *GetDwAtName(unsigned n); -const char *GetDwOpName(unsigned n); -} /* namespace maple */ - #if defined(TARGRISCV64) && TARGRISCV64 #define CMNT "\t# " #else @@ -241,6 +234,7 @@ class Emitter { return *this; } + void InsertAnchor(std::string anchorName, int64 offset); // provide anchor in specific postion for better assembly void EmitLabelRef(LabelIdx labIdx); void EmitStmtLabel(LabelIdx labIdx); void EmitLabelPair(const LabelPair &pairLabel); @@ -266,7 +260,7 @@ class Emitter { void EmitDIDebugRangesSection(); void EmitDIDebugLineSection(); void EmitDIDebugStrSection(); - MIRFunction *GetDwTagSubprogram(const MapleVector &attrvec, DebugInfo &di); + MIRFunction *GetDwTagSubprogram(const MapleVector &attrvec, DebugInfo &di) const; void EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di); void EmitDIFormSpecification(unsigned int dwform); void EmitDIFormSpecification(const DBGDieAttr *attr) { @@ -359,7 +353,9 @@ class Emitter { void EmitDWRef(const std::string &name); void InitRangeIdx2PerfixStr(); void EmitAddressString(const std::string &address); - void EmitAliasAndRef(const MIRSymbol &sym); /* handle function symbol which has alias and weak ref */ + void EmitAliasAndRef(const MIRSymbol &sym); // handle function symbol which has alias and weak ref + // collect all global TLS together -- better perfomance for local dynamic + void EmitTLSBlock(const std::vector &tdataVec, const std::vector &tbssVec); CG *cg; MOperator currentMop = UINT_MAX; @@ -380,6 +376,10 @@ class Emitter { #endif MapleMap labdie2labidxTable; MapleMap fileMap; + + // for global warmup localDynamicOpt + std::vector globalTlsDataVec; + std::vector globalTlsBssVec; }; class OpndEmitVisitor : public OperandVisitorBase, @@ -396,9 +396,13 @@ class OpndEmitVisitor : public OperandVisitorBase, ExtendShiftOperand, CommentOperand> { public: - explicit OpndEmitVisitor(Emitter &asmEmitter): emitter(asmEmitter) {} - virtual ~OpndEmitVisitor() = default; - uint8 GetSlot() { + explicit OpndEmitVisitor(Emitter &asmEmitter, const OpndDesc *operandProp) + : emitter(asmEmitter), + opndProp(operandProp) {} + ~OpndEmitVisitor() override { + opndProp = nullptr; + } + uint8 GetSlot() const { return slot; } void SetSlot(uint8 startIndex) { @@ -419,6 +423,7 @@ class OpndEmitVisitor : public OperandVisitorBase, * asm {{destReg0, destReg2 ...}, srcReg} -----> slot = 0 */ uint8 slot = 255; + const OpndDesc *opndProp; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/global_schedule.h b/src/mapleall/maple_be/include/cg/global_schedule.h index 29a45c5f7ee5e721ae4ea1fe39613dc2e8e7c85c..fdf3139ccc31115ba69bb3988d0b745fc96ef4b3 100644 --- a/src/mapleall/maple_be/include/cg/global_schedule.h +++ b/src/mapleall/maple_be/include/cg/global_schedule.h @@ -1,5 +1,5 @@ /* -* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,27 +15,35 @@ #ifndef MAPLEBE_INCLUDE_CG_GLOBAL_SCHEDULE_H #define MAPLEBE_INCLUDE_CG_GLOBAL_SCHEDULE_H -#include "cgfunc.h" -#include "control_dep_analysis.h" -#include "data_dep_analysis.h" +#include "base_schedule.h" namespace maplebe { -class GlobalSchedule { +#define GLOBAL_SCHEDULE_DUMP CG_DEBUG_FUNC(cgFunc) + +class GlobalSchedule : public BaseSchedule { public: - GlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &interDDA) - : gsMempool(mp), gsAlloc(&mp), cgFunc(f), cda(cdAna), idda(interDDA), - dataNodes(gsAlloc.Adapter()) {} + GlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &idda) + : BaseSchedule(mp, f, cdAna), interDDA(idda) {} virtual ~GlobalSchedule() = default; - void Run(); + std::string PhaseName() const { + return "globalschedule"; + } + void Run() override; + bool CheckCondition(CDGRegion ®ion); + /* Region-based global scheduling entry, using the list scheduling algorithm for scheduling insns in bb */ + void DoGlobalSchedule(CDGRegion ®ion); + + /* Verifying the Correctness of Global Scheduling */ + virtual void VerifyingSchedule(CDGRegion ®ion) = 0; protected: - MemPool &gsMempool; - MapleAllocator gsAlloc; - CGFunc &cgFunc; - ControlDepAnalysis &cda; - InterDataDepAnalysis &idda; - MapleVector dataNodes; + virtual void InitInCDGNode(CDGRegion ®ion, CDGNode &cdgNode, MemPool *cdgNodeMp) = 0; + virtual void FinishScheduling(CDGNode &cdgNode) = 0; + void ClearCDGNodeInfo(CDGRegion ®ion, CDGNode &cdgNode, MemPool *cdgNodeMp); + void DumpInsnInfoByScheduledOrder(BB &curBB) const override {}; + + InterDataDepAnalysis &interDDA; }; MAPLE_FUNC_PHASE_DECLARE(CgGlobalSchedule, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/ico.h b/src/mapleall/maple_be/include/cg/ico.h index 9155fd3a9c2580e4e7e726d7752acdfc2b4a1860..976ad01d2e9731189b2a575d6ec16dd5f704252d 100644 --- a/src/mapleall/maple_be/include/cg/ico.h +++ b/src/mapleall/maple_be/include/cg/ico.h @@ -39,7 +39,7 @@ class ICOPattern : public OptimizationPattern { protected: Insn *FindLastCmpInsn(BB &bb) const; - std::vector GetLabelOpnds(Insn &insn) const; + std::vector GetLabelOpnds(const Insn &insn) const; }; MAPLE_FUNC_PHASE_DECLARE(CgIco, maplebe::CGFunc) diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h index ae8cf8c95cf9aa8a60d9534a1b21712c300ede0b..307601fc0cdf3112cdf624d0c6d560a098e7f65d 100644 --- a/src/mapleall/maple_be/include/cg/insn.h +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -33,6 +33,8 @@ class BB; class CG; class Emitter; class DepNode; +class InsnBuilder; +class OperandBuilder; struct VectorRegSpec { VectorRegSpec() : vecLane(-1), vecLaneMax(0), vecElementSize(0), compositeOpnds(0) {} @@ -176,17 +178,30 @@ class Insn { return retSize; } + // Insn Function: check legitimacy of opnds. + bool VerifySelf() const { + if (this->IsCfiInsn() || this->IsDbgInsn()) { + return true; + } + return md->Verify(opnds); + } + + void SplitSelf(bool isAfterRegAlloc, InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) { + md->Split(this, isAfterRegAlloc, insnBuilder, opndBuilder); + } + virtual bool IsMachineInstruction() const; - bool OpndIsDef(uint32 id) const; + bool OpndIsDef(uint32 opndId) const; - bool OpndIsUse(uint32 id) const; + bool OpndIsUse(uint32 opndId) const; virtual bool IsPCLoad() const { return false; } Operand *GetMemOpnd() const; + uint32 GetMemOpndIdx() const; void SetMemOpnd(MemOperand *memOpnd); @@ -506,6 +521,14 @@ class Insn { return !isCallReturnUnsigned; } + void SetMayTailCall() { + mayTailCall = true; + } + + bool GetMayTailCall() const { + return mayTailCall; + } + void SetRetType(RetType retTy) { this->retType = retTy; } @@ -549,7 +572,7 @@ class Insn { return isPhiMovInsn; } - Insn *Clone(const MemPool &memPool) const; + Insn *Clone(const MemPool /* &memPool */) const; void SetInsnDescrption(const InsnDesc &newMD) { md = &newMD; @@ -664,6 +687,7 @@ class Insn { bool asmDefCondCode = false; bool asmModMem = false; bool needSplit = false; + bool mayTailCall = false; /* for dynamic language to mark reference counting */ int32 refSkipIdx = -1; diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h index 67f02d7668a2bc8bd0250606887bb5c011037fc2..d0744429c5a9664b78f508ca9cebb2319048f07f 100644 --- a/src/mapleall/maple_be/include/cg/isa.h +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -20,6 +20,14 @@ #include "operand.h" namespace maplebe { +// For verify & split insn +#define VERIFY_INSN(INSN) (INSN)->VerifySelf() +#define SPLIT_INSN(INSN, FUNC) \ + (INSN)->SplitSelf((FUNC)->IsAfterRegAlloc(), (FUNC)->GetInsnBuilder(), (FUNC)->GetOpndBuilder()) +// circular dependency exists, no other choice +class Insn; +class InsnBuilder; +class OperandBuilder; enum MopProperty : maple::uint8 { kInsnIsAbstract, kInsnIsMove, @@ -142,7 +150,7 @@ enum AbstractMOP : MOperator { #define DEF_MIR_INTRINSIC(op, ...) op, enum VectorIntrinsicID { - #include "intrinsic_vector_new.def" +#include "intrinsic_vector_new.def" #undef DEF_MIR_INTRINSIC kVectorIntrinsicLast }; @@ -192,10 +200,21 @@ struct InsnDesc { atomicNum = 1; }; - /* for hard-coded machine description */ + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, + const std::string &inName, const std::string &inFormat, uint32 anum) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum) { + }; + + // for hard-coded machine description. InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, const std::string &inName, const std::string &inFormat, uint32 anum, - std::function vFunc = nullptr) + std::function)> vFunc) : opc(op), opndMD(opndmd), properties(props), @@ -206,6 +225,22 @@ struct InsnDesc { validFunc(vFunc) { }; + // for hard-coded machine description. + InsnDesc(MOperator op, std::vector opndmd, uint64 props, uint64 ltype, + const std::string &inName, const std::string &inFormat, uint32 anum, + std::function)> vFunc, + std::function sFunc) + : opc(op), + opndMD(opndmd), + properties(props), + latencyType(ltype), + name(inName), + format(inFormat), + atomicNum(anum), + validFunc(vFunc), + splitFunc(sFunc) { + }; + MOperator opc; std::vector opndMD; uint64 properties; @@ -213,7 +248,10 @@ struct InsnDesc { const std::string name; const std::string format; uint32 atomicNum; /* indicate how many asm instructions it will emit. */ - std::function validFunc = nullptr; /* If insn has immOperand, this function needs to be implemented. */ + // If insn has immOperand, this function needs to be implemented. + std::function)> validFunc = nullptr; + // If insn needs to be split, this function needs to be implemented. + std::function splitFunc = nullptr; bool IsSame(const InsnDesc &left, std::function cmp) const; @@ -300,11 +338,28 @@ struct InsnDesc { return (properties & SPINTRINSIC) != 0; } bool IsComment() const { - return properties & ISCOMMENT; + return (properties & ISCOMMENT) != 0; } MOperator GetOpc() const { return opc; } + + bool Verify(const MapleVector &opnds) const { + if (!validFunc) { + return true; + } + if (opnds.size() != opndMD.size()) { + CHECK_FATAL_FALSE("The size of opnds is wrong."); + } + return validFunc(opnds); + } + + void Split(Insn *insn, bool isAfterRegAlloc, InsnBuilder *insnBuilder, OperandBuilder *opndBuilder) const { + if (!splitFunc) { + return; + } + splitFunc(insn, isAfterRegAlloc, insnBuilder, opndBuilder); + } const OpndDesc *GetOpndDes(size_t index) const { return opndMD[index]; } @@ -319,12 +374,6 @@ struct InsnDesc { bool Is64Bit() const { return GetOperandSize() == k64BitSize; } - bool IsValidImmOpnd(int64 val) const { - if (!validFunc) { - return true; - } - return validFunc(val); - } uint32 GetLatencyType() const { return latencyType; } diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h index 2293ab80963e038f33460dab897af248bbb3a946..47eb7735f2900b08fe59093215dce4e43f987087 100644 --- a/src/mapleall/maple_be/include/cg/isel.h +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -43,7 +43,7 @@ class MPISel { Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); void SelectDassign(const DassignNode &stmt, Operand &opndRhs); - void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0); + void SelectDassignoff(const DassignoffNode &stmt, Operand &opnd0); void SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs); void SelectIassignoff(const IassignoffNode &stmt); RegOperand *SelectRegread(RegreadNode &expr); @@ -58,8 +58,8 @@ class MPISel { virtual Operand* SelectExtractbits(const BaseNode &parent, ExtractbitsNode &node, Operand &opnd0); virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); - Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); - ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; + Operand *SelectCGArrayElemAdd(const BinaryNode &node); + ImmOperand *SelectIntConst(const MIRIntConst &intConst, PrimType primType) const; void SelectCallCommon(StmtNode &stmt, const MPISel &iSel) const; void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); @@ -128,9 +128,9 @@ class MPISel { virtual void SelectAsm(AsmNode &node) = 0; virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &rOpnd, const DassignNode &s) = 0; Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); - Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); - Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + Operand *SelectMin(const BinaryNode &node, Operand &opnd0, Operand &opnd1); + Operand *SelectMax(const BinaryNode &node, Operand &opnd0, Operand &opnd1); + Operand *SelectRetype(const TypeCvtNode &node, Operand &opnd0); void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); template diff --git a/src/mapleall/maple_be/include/cg/list_scheduler.h b/src/mapleall/maple_be/include/cg/list_scheduler.h new file mode 100644 index 0000000000000000000000000000000000000000..bb524b32bf70b7e7db4b61d9c409be4e440f6a82 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/list_scheduler.h @@ -0,0 +1,311 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_CG_LIST_SCHEDULER_H +#define MAPLEBE_INCLUDE_CG_LIST_SCHEDULER_H + +#include + +#include "cg.h" +#include "deps.h" +#include "cg_cdg.h" +#include "schedule_heuristic.h" + +namespace maplebe { +#define LIST_SCHEDULE_DUMP CG_DEBUG_FUNC(cgFunc) +typedef bool (*SchedRankFunctor)(const DepNode *node1, const DepNode *node2); + +constexpr uint32 kClinitAdvanceCycle = 12; +constexpr uint32 kAdrpLdrAdvanceCycle = 4; +constexpr uint32 kClinitTailAdvanceCycle = 6; + +static uint32 maxUnitIdx = 0; + +class CommonScheduleInfo { + public: + explicit CommonScheduleInfo(MemPool &memPool) + : csiAlloc(&memPool), candidates(csiAlloc.Adapter()), schedResults(csiAlloc.Adapter()) {} + ~CommonScheduleInfo() = default; + + bool IsDepNodeInCandidates(const CDGNode &curCDGNode, const DepNode &depNode) { + ASSERT(depNode.GetInsn() != nullptr, "get insn from depNode failed"); + ASSERT(curCDGNode.GetBB() != nullptr, "get bb from cdgNode failed"); + if (depNode.GetInsn()->GetBB()->GetId() == curCDGNode.GetBB()->GetId()) { + return true; + } + for (auto candiNode : candidates) { + if (&depNode == candiNode) { + return true; + } + } + return false; + } + + MapleVector &GetCandidates() { + return candidates; + } + void AddCandidates(DepNode *depNode) { + (void)candidates.emplace_back(depNode); + } + void EraseNodeFromCandidates(const DepNode *depNode) { + for (auto iter = candidates.begin(); iter != candidates.end(); ++iter) { + if (*iter == depNode) { + candidates.erase(iter); + return; + } + } + } + MapleVector::iterator EraseIterFromCandidates(MapleVector::iterator depIter) { + return candidates.erase(depIter); + } + MapleVector &GetSchedResults() { + return schedResults; + } + void AddSchedResults(DepNode *depNode) { + (void)schedResults.emplace_back(depNode); + } + std::size_t GetSchedResultsSize() { + return schedResults.size(); + } + + private: + MapleAllocator csiAlloc; + /* + * Candidate instructions list of current BB, + * by control flow sequence + */ + MapleVector candidates; + /* + * Scheduled results list of current BB + * for global-scheduler, it stored only to the last depNode of current BB + */ + MapleVector schedResults; +}; + +class ListScheduler { + public: + ListScheduler(MemPool &memPool, CGFunc &func, SchedRankFunctor rankFunc, bool delayHeu = true, std::string pName = "") + : listSchedMp(memPool), listSchedAlloc(&memPool), cgFunc(func), rankScheduleInsns(rankFunc), + doDelayHeuristics(delayHeu), phaseName(std::move(pName)), + waitingQueue(listSchedAlloc.Adapter()), readyList(listSchedAlloc.Adapter()) {} + ListScheduler(MemPool &memPool, CGFunc &func, bool delayHeu = true, std::string pName = "") + : listSchedMp(memPool), listSchedAlloc(&memPool), cgFunc(func), + doDelayHeuristics(delayHeu), phaseName(std::move(pName)), + waitingQueue(listSchedAlloc.Adapter()), readyList(listSchedAlloc.Adapter()) {} + virtual ~ListScheduler() { + mad = nullptr; + } + + std::string PhaseName() const { + if (phaseName.empty()) { + return "listscheduler"; + } else { + return phaseName; + } + } + + /* + * The entry of list-scheduler + * cdgNode: current scheduled BB + */ + void DoListScheduling(); + void ComputeDelayPriority(); + /* Compute the earliest start cycle, update maxEStart */ + void ComputeEStart(uint32 cycle); + /* Compute the latest start cycle */ + void ComputeLStart(); + /* Calculate the most used unitKind index */ + void CalculateMostUsedUnitKindCount(); + + void SetCommonSchedInfo(CommonScheduleInfo &csi) { + commonSchedInfo = &csi; + } + void SetCDGRegion(CDGRegion &cdgRegion) { + region = &cdgRegion; + } + void SetCDGNode(CDGNode &cdgNode) { + curCDGNode = &cdgNode; + } + uint32 GetCurrCycle() const { + return currCycle; + } + uint32 GetMaxLStart() const { + return maxLStart; + } + uint32 GetMaxDelay() const { + return maxDelay; + } + + protected: + void Init(); + void InitInfoBeforeCompEStart(uint32 cycle, std::vector &traversalList); + void InitInfoBeforeCompLStart(std::vector &traversalList); + void UpdateInfoBeforeSelectNode(); + void SortReadyList(); + void UpdateEStart(DepNode &schedNode); + void UpdateInfoAfterSelectNode(DepNode &schedNode); + void UpdateNodesInReadyList(); + void UpdateAdvanceCycle(const DepNode &schedNode); + void CountUnitKind(const DepNode &depNode, std::array &unitKindCount) const; + void DumpWaitingQueue() const; + void DumpReadyList() const; + void DumpScheduledResult() const; + void DumpDelay() const; + void DumpEStartLStartOfAllNodes(); + void DumpDepNodeInfo(const BB &curBB, MapleVector &nodes, const std::string state) const; + void DumpReservation(const DepNode &depNode) const; + + void EraseNodeFromReadyList(const DepNode *depNode) { + for (auto iter = readyList.begin(); iter != readyList.end(); ++iter) { + if (*iter == depNode) { + (void)readyList.erase(iter); + return; + } + } + } + MapleVector::iterator EraseIterFromReadyList(MapleVector::iterator depIter) { + return readyList.erase(depIter); + } + + void EraseNodeFromWaitingQueue(const DepNode *depNode) { + for (auto iter = waitingQueue.begin(); iter != waitingQueue.end(); ++iter) { + if (*iter == depNode) { + (void)waitingQueue.erase(iter); + return; + } + } + } + MapleVector::iterator EraseIterFromWaitingQueue(MapleVector::iterator depIter) { + return waitingQueue.erase(depIter); + } + + /* + * Sort by priority in descending order, + * that is the first node in list has the highest priority + */ + static bool CriticalPathRankScheduleInsns(const DepNode *node1, const DepNode *node2) { + // p as an acronym for priority + CompareLStart compareLStart; + int p1 = compareLStart(*node1, *node2); + if (p1 != 0) { + return p1 > 0; + } + + CompareEStart compareEStart; + int p2 = compareEStart(*node1, *node2); + if (p2 != 0) { + return p2 > 0; + } + + CompareSuccNodeSize compareSuccNodeSize; + int p3 = compareSuccNodeSize(*node1, *node2); + if (p3 != 0) { + return p3 > 0; + } + + CompareUnitKindNum compareUnitKindNum(maxUnitIdx); + int p4 = compareUnitKindNum(*node1, *node2); + if (p4 != 0) { + return p4 > 0; + } + + CompareSlotType compareSlotType; + int p5 = compareSlotType(*node1, *node2); + if (p5 != 0) { + return p5 > 0; + } + + CompareInsnID compareInsnId; + int p6 = compareInsnId(*node1, *node2); + if (p6 != 0) { + return p6 > 0; + } + + // default + return true; + } + + /* + * Rank function by delay heuristic + */ + static bool DelayRankScheduleInsns(const DepNode *node1, const DepNode *node2) { + // p as an acronym for priority + CompareDelay compareDelay; + int p1 = compareDelay(*node1, *node2); + if (p1 != 0) { + return p1 > 0; + } + + CompareSuccNodeSize compareSuccNodeSize; + int p2 = compareSuccNodeSize(*node1, *node2); + if (p2 != 0) { + return p2 > 0; + } + + CompareUnitKindNum compareUnitKindNum(maxUnitIdx); + int p3 = compareUnitKindNum(*node1, *node2); + if (p3 != 0) { + return p3 > 0; + } + + CompareSlotType compareSlotType; + int p4 = compareSlotType(*node1, *node2); + if (p4 != 0) { + return p4 > 0; + } + + CompareInsnID compareInsnId; + int p5 = compareInsnId(*node1, *node2); + if (p5 != 0) { + return p5 > 0; + } + + // default + return true; + } + + MemPool &listSchedMp; + MapleAllocator listSchedAlloc; + CGFunc &cgFunc; + MAD *mad = nullptr; // CPU resources + CDGRegion *region = nullptr; // the current region + CDGNode *curCDGNode = nullptr; // the current scheduled BB + CommonScheduleInfo *commonSchedInfo = nullptr; // common scheduling info that prepared by other scheduler + /* + * The function ptr that computes instruction priority based on heuristic rules, + * list-scheduler provides default implementations and supports customization by other schedulers + */ + SchedRankFunctor rankScheduleInsns = nullptr; + bool doDelayHeuristics = true; // true: compute delay; false: compute eStart & lStart + std::string phaseName; // for dumping log + /* + * A node is moved from [candidates] to [waitingQueue] when it's all data dependency are met + */ + MapleVector waitingQueue; + /* + * A node is moved from [waitingQueue] to [readyList] when resources required by it are free and + * estart-cycle <= curr-cycle + */ + MapleVector readyList; + uint32 currCycle = 0; // Simulates the CPU clock during scheduling + uint32 advancedCycle = 0; // Using after an instruction is scheduled, record its execution cycles + uint32 maxEStart = 0; // Update when the eStart of depNodes are recalculated + uint32 maxLStart = 0; // Ideal total cycles that is equivalent to critical path length + uint32 maxDelay = 0; // Ideal total cycles that is equivalent to max delay + uint32 scheduledNodeNum = 0; +}; +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_LIST_SCHEDULER_H diff --git a/src/mapleall/maple_be/include/cg/live.h b/src/mapleall/maple_be/include/cg/live.h index b4c29c783d21cc2cf7dce6b5e015b3d3a3742a46..a651f345f256fe27d285c4d582fba3e1b257a172 100644 --- a/src/mapleall/maple_be/include/cg/live.h +++ b/src/mapleall/maple_be/include/cg/live.h @@ -46,7 +46,7 @@ class LiveAnalysis : public AnalysisResult { void ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const; void ProcessMemOpnd(BB &bb, Operand &opnd) const; void ProcessCondOpnd(BB &bb) const; - void CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse) const; + void CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const; SparseDataInfo *NewLiveIn(uint32 maxRegCount) { return memPool->New(maxRegCount, alloc); diff --git a/src/mapleall/maple_be/include/cg/local_schedule.h b/src/mapleall/maple_be/include/cg/local_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..7bdd693bf73e24d307a41a3c08dd30d3a93abe5a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/local_schedule.h @@ -0,0 +1,48 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_LOCAL_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_LOCAL_SCHEDULE_H + +#include "base_schedule.h" + +namespace maplebe { +#define LOCAL_SCHEDULE_DUMP CG_DEBUG_FUNC(cgFunc) + +class LocalSchedule : public BaseSchedule { + public: + LocalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &idda) + : BaseSchedule(mp, f, cdAna), interDDA(idda) {} + ~LocalSchedule() override = default; + + std::string PhaseName() const { + return "localschedule"; + } + void Run() override; + bool CheckCondition(CDGRegion ®ion) const; + void DoLocalSchedule(CDGNode &cdgNode); + + protected: + void InitInCDGNode(CDGNode &cdgNode); + virtual void FinishScheduling(CDGNode &cdgNode) = 0; + void DumpInsnInfoByScheduledOrder(BB &curBB) const override {}; + + InterDataDepAnalysis &interDDA; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLocalSchedule, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_LOCAL_SCHEDULE_H diff --git a/src/mapleall/maple_be/include/cg/memlayout.h b/src/mapleall/maple_be/include/cg/memlayout.h index 17980e0d2378854c5babdcbe74ad2d08f66a7acd..a5b5c6fec0ed3eb9c972f6ce725b7221c2b7ca0a 100644 --- a/src/mapleall/maple_be/include/cg/memlayout.h +++ b/src/mapleall/maple_be/include/cg/memlayout.h @@ -177,8 +177,6 @@ class MemLayout { */ virtual void AssignSpillLocationsToPseudoRegisters() = 0; - virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) = 0; - virtual int32 GetCalleeSaveBaseLoc() const { return 0; } @@ -197,11 +195,21 @@ class MemLayout { return spillLocTable.at(index); } - SymbolAlloc *GetLocOfSpillRegister(regno_t vrNum) { + SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum, uint32 memByteSize) { + auto *symLoc = CreateSymbolAlloc(); + symLoc->SetMemSegment(segSpillReg); + segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), memByteSize)); + symLoc->SetOffset(segSpillReg.GetSize()); + segSpillReg.SetSize(segSpillReg.GetSize() + memByteSize); + SetSpillRegLocInfo(vrNum, *symLoc); + return symLoc; + } + + SymbolAlloc *GetLocOfSpillRegister(regno_t vrNum, uint32 memByteSize) { SymbolAlloc *loc = nullptr; auto pos = spillRegLocMap.find(vrNum); if (pos == spillRegLocMap.end()) { - loc = AssignLocationToSpillReg(vrNum); + loc = AssignLocationToSpillReg(vrNum, memByteSize); } else { loc = pos->second; } @@ -265,6 +273,7 @@ class MemLayout { MemSegment segArgsStkPassed; MemSegment segArgsRegPassed; MemSegment segArgsToStkPass; + MemSegment segSpillReg = MemSegment(kMsSpillReg); MapleVector symAllocTable; /* index is stindex from StIdx */ MapleVector spillLocTable; /* index is preg idx */ MapleUnorderedMap spillRegLocMap; @@ -272,6 +281,8 @@ class MemLayout { MapleAllocator *memAllocator; CGFunc *cgFunc = nullptr; const uint32 stackPtrAlignment; + + virtual SymbolAlloc *CreateSymbolAlloc() const = 0; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/operand.def b/src/mapleall/maple_be/include/cg/operand.def index 249f048046382c302518f830f1250c9b6570fdc4..2fce04f3d3412886a097e11265d4231a91c7ec6f 100644 --- a/src/mapleall/maple_be/include/cg/operand.def +++ b/src/mapleall/maple_be/include/cg/operand.def @@ -80,6 +80,7 @@ DEFINE_MOP(Literal12Src, {Operand::kOpdStImmediate, operand::kLiteralLow12, 12}) /* for movk */ DEFINE_MOP(Lsl4, {Operand::kOpdShift, operand::kIsUse, 4}) +DEFINE_MOP(Lsl5, {Operand::kOpdShift, operand::kIsUse, 5}) DEFINE_MOP(Lsl6, {Operand::kOpdShift, operand::kIsUse, 6}) DEFINE_MOP(Lsl12, {Operand::kOpdShift, operand::kIsUse, 12}) /* for shift */ diff --git a/src/mapleall/maple_be/include/cg/operand.h b/src/mapleall/maple_be/include/cg/operand.h index 31ade845f248c4810dd679e6f5c0aaff5dde1147..b3da55a95bde546eab6ce61b05073bf1fb92e3d1 100644 --- a/src/mapleall/maple_be/include/cg/operand.h +++ b/src/mapleall/maple_be/include/cg/operand.h @@ -17,6 +17,7 @@ #include "becommon.h" #include "cg_option.h" +#include "aarch64/aarch64_imm_valid.h" #include "visitor_common.h" /* maple_ir */ @@ -32,8 +33,6 @@ namespace maplebe { class OpndDesc; class Emitter; -bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits); -bool IsBitmaskImmediate(uint64 val, uint32 bitLen); bool IsMoveWidableImmediate(uint64 val, uint32 bitLen); bool BetterUseMOVZ(uint64 val); @@ -471,20 +470,20 @@ class ImmOperand : public OperandVisitable { return relocs; } - bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const { - return maplebe::IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); + bool IsInBitSize(uint32 size, uint32 nLowerZeroBits) const { + return IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); } bool IsBitmaskImmediate() const { ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); - return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); + return maplebe::aarch64::IsBitmaskImmediate(static_cast(value), static_cast(size)); } bool IsBitmaskImmediate(uint32 destSize) const { ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); - return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); + return maplebe::aarch64::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); } bool IsSingleInstructionMovable() const { @@ -800,6 +799,10 @@ class ExtendShiftOperand : public OperandVisitable { return extendOp; } + uint32 GetValue() const { + return shiftAmount; + } + bool Less(const Operand &right) const override; void Dump() const override { @@ -841,10 +844,10 @@ class BitShiftOperand : public OperandVisitable { public: enum ShiftOp : uint8 { kUndef, - kLSL, /* logical shift left */ - kLSR, /* logical shift right */ - kASR, /* arithmetic shift right */ - kROR, /* rotate shift right */ + kShiftLSL, /* logical shift left */ + kShiftLSR, /* logical shift right */ + kShiftASR, /* arithmetic shift right */ + kShiftROR, /* rotate shift right */ }; /* bitlength is equal to 5 or 6 */ @@ -885,6 +888,10 @@ class BitShiftOperand : public OperandVisitable { return shiftOp; } + uint32 GetValue() const { + return GetShiftAmount(); + } + void Dump() const override { CHECK_FATAL_FALSE("dont run here"); } @@ -1033,6 +1040,7 @@ class MemOperand : public OperandVisitable { lsOpnd(memOpnd.lsOpnd), symbol(memOpnd.symbol), memoryOrder(memOpnd.memoryOrder), + accessSize(memOpnd.accessSize), addrMode(memOpnd.addrMode), isStackMem(memOpnd.isStackMem), isStackArgMem(memOpnd.isStackArgMem) {} @@ -1289,7 +1297,7 @@ class MemOperand : public OperandVisitable { std::string GetExtendAsString() const { if (addrMode == kBOL) { CHECK_NULL_FATAL(lsOpnd); - CHECK_FATAL(lsOpnd->GetShiftOp() == BitShiftOperand::kLSL, "check bitshiftop!"); + CHECK_FATAL(lsOpnd->GetShiftOp() == BitShiftOperand::kShiftLSL, "check bitshiftop!"); return "LSL"; } else if (addrMode == kBOE) { CHECK_NULL_FATAL(exOpnd); @@ -1313,7 +1321,6 @@ class MemOperand : public OperandVisitable { } else { return false; } - return true; } bool NeedFixIndex() const { @@ -1873,7 +1880,7 @@ class OpndDumpVisitor : public OperandVisitorBase, protected: virtual void DumpOpndPrefix() { - LogInfo::MapleLogger() << " (opnd:"; + LogInfo::MapleLogger() << "(opnd:"; } virtual void DumpOpndSuffix() { LogInfo::MapleLogger() << " )"; @@ -1885,6 +1892,19 @@ class OpndDumpVisitor : public OperandVisitorBase, return opndDesc; } + void DumpOpndDesc() const { + LogInfo::MapleLogger() << " ["; + if (opndDesc->IsDef()) { + LogInfo::MapleLogger() << "DEF"; + } + if (opndDesc->IsDef() && opndDesc->IsUse()) { + LogInfo::MapleLogger() << ","; + } + if (opndDesc->IsUse()) { + LogInfo::MapleLogger() << "USE"; + } + LogInfo::MapleLogger() << "]"; + } private: const OpndDesc *opndDesc; }; diff --git a/src/mapleall/maple_be/include/cg/optimize_common.h b/src/mapleall/maple_be/include/cg/optimize_common.h index a57dafdefe9e1b622f5096b3eacd2b1e6d533f97..31e27af04cc1def87489c0ab93d07c5b2fdfb497 100644 --- a/src/mapleall/maple_be/include/cg/optimize_common.h +++ b/src/mapleall/maple_be/include/cg/optimize_common.h @@ -78,6 +78,10 @@ class Optimizer { void Run(const std::string &funcName, bool checkOnly = false); virtual void InitOptimizePatterns() = 0; + bool IsOptimized() const { + return doOptWithSinglePassPatterns; + } + protected: CGFunc *cgFunc; const char *name; @@ -87,6 +91,7 @@ class Optimizer { MapleVector diffPassPatterns; /* patterns can run in a single pass of cgFunc */ MapleVector singlePassPatterns; + bool doOptWithSinglePassPatterns = false; }; class OptimizeLogger { diff --git a/src/mapleall/maple_be/include/cg/peep.h b/src/mapleall/maple_be/include/cg/peep.h index 2d3277a739c190779ca618f9f6c753881b49980f..23b66bd8e7dcf9c5042974a5e8b33951049ec404 100644 --- a/src/mapleall/maple_be/include/cg/peep.h +++ b/src/mapleall/maple_be/include/cg/peep.h @@ -71,6 +71,13 @@ class PeepOptimizeManager { BB *currBB; Insn *currInsn; CGSSAInfo *ssaInfo; + /* + * The flag indicates whether the optimization pattern is successful, + * this prevents the next optimization pattern that processs the same mop from failing to get the validInsn, + * which was changed by previous pattern. + * + * Set the flag to true when the pattern optimize successfully. + */ bool optSuccess = false; }; @@ -86,7 +93,9 @@ class CGPeepHole { : cgFunc(&f), peepMemPool(memPool), ssaInfo(cgssaInfo) {} - virtual ~CGPeepHole() = default; + virtual ~CGPeepHole() { + ssaInfo = nullptr; + } virtual void Run() = 0; virtual bool DoSSAOptimize(BB &bb, Insn &insn) = 0; diff --git a/src/mapleall/maple_be/include/cg/reg_alloc_color_ra.h b/src/mapleall/maple_be/include/cg/reg_alloc_color_ra.h index e69a396ddbafb23952ee8555f3f12496f8d4eb8d..2ac99836823f2e5f37d9b95171d32bc9b88b7ad7 100644 --- a/src/mapleall/maple_be/include/cg/reg_alloc_color_ra.h +++ b/src/mapleall/maple_be/include/cg/reg_alloc_color_ra.h @@ -24,6 +24,8 @@ #include "rematerialize.h" namespace maplebe { +// reserve some regs for subsequent phases, enabled by default +// such as: aarch64 reserved x16 for memOffset split #define RESERVED_REGS #define USE_LRA @@ -90,14 +92,10 @@ inline bool FindIn(const MapleList &list, const T &item) { return std::find(list.begin(), list.end(), item) != list.end(); } -inline bool IsBitArrElemSet(const uint64 *vec, const uint32 num) { - size_t index = num / kU64; - uint64 bit = num % kU64; - return (vec[index] & (1ULL << bit)) != 0; -} - -inline bool IsBBsetOverlap(const uint64 *vec1, const uint64 *vec2, uint32 bbBuckets) { - for (uint32 i = 0; i < bbBuckets; ++i) { +// check bb set has overlap +inline bool IsBBsetOverlap(const MapleVector &vec1, const MapleVector &vec2) { + ASSERT(vec1.size() == vec2.size(), "size must be same"); + for (uint32 i = 0; i < vec1.size(); ++i) { if ((vec1[i] & vec2[i]) != 0) { return true; } @@ -105,6 +103,21 @@ inline bool IsBBsetOverlap(const uint64 *vec1, const uint64 *vec2, uint32 bbBuck return false; } +// foreach bb, and run functor +template +inline void ForEachBBArrElem(const MapleVector &vec, const Func functor) { + for (uint32 iBBArrElem = 0; iBBArrElem < vec.size(); ++iBBArrElem) { + if (vec[iBBArrElem] == 0) { + continue; + } + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + /* For each bb, record info pertain to allocation */ /* * This is per bb per LR. @@ -115,7 +128,7 @@ class LiveUnit { LiveUnit() = default; ~LiveUnit() = default; - void PrintLiveUnit() const; + void Dump() const; uint32 GetBegin() const { return begin; @@ -203,20 +216,177 @@ enum RefType : uint8 { kIsCall = 0x4, }; -/* LR is for each global vreg. */ +// AdjMatrix is an undirected graph, used to temporarily indicate a register conflict +class AdjMatrix { + public: + using BitElem = uint64; + + explicit AdjMatrix(uint32 maxVregNum) + : bucket((maxVregNum + kBitElemSize - 1) / kBitElemSize), + matrix(maxVregNum, std::vector(bucket, BitElem(0))) {} + + ~AdjMatrix() = default; + + // add an edge to the graph to represent a conflict between n1 and n2. + void AddEdge(uint32 n1, uint32 n2) { + ASSERT(n1 < matrix.size(), "out of range!"); + ASSERT(n2 < matrix.size(), "out of range!"); + AddOneEdge(n1, n2); + AddOneEdge(n2, n1); + } + + // convert all edges of a node to a vector + std::vector ConvertEdgeToVec(uint32 index) const { + ASSERT(index < matrix.size(), "out of range!"); + std::vector res; + const auto &row = matrix[index]; + for (uint32 i = 0; i < row.size(); ++i) { + if (row[i] == BitElem(0)) { + continue; + } + for (uint32 j = 0; j < kBitElemSize; ++j) { + if ((row[i] & (BitElem(1) << j)) != 0) { + (void)res.emplace_back(i * kBitElemSize + j); + } + } + } + return res; + } + + void Dump() const; + private: + static constexpr uint32 kBitElemSize = sizeof(BitElem) * CHAR_BIT; // matrix elem bit size + uint32 bucket = 0; + std::vector> matrix; + + void AddOneEdge(uint32 n1, uint32 n2) { + uint32 offset = n2 / kBitElemSize; + BitElem mask = BitElem(1) << (n2 % kBitElemSize); + matrix[n1][offset] |= mask; + } +}; + +// live range's confilct info +class ConfilctInfo { + public: + explicit ConfilctInfo(uint32 maxRegNum, MapleAllocator &allocator) + : pregveto(maxRegNum, false, allocator.Adapter()), + callDef(maxRegNum, false, allocator.Adapter()), + forbidden(maxRegNum, false, allocator.Adapter()), + conflict(allocator.Adapter()), + prefs(allocator.Adapter()) {} + + ~ConfilctInfo() = default; + + void Dump() const; + + const MapleBitVector &GetPregveto() const { + return pregveto; + } + + void InsertElemToPregveto(regno_t regno) { + if (!pregveto[regno]) { + pregveto[regno] = true; + ++numPregveto; + } + } + + uint32 GetPregvetoSize() const { + return numPregveto; + } + + const MapleBitVector &GetCallDef() const { + return callDef; + } + + void InsertElemToCallDef(regno_t regno) { + callDef[regno] = true; + } + + const MapleBitVector &GetForbidden() const { + return forbidden; + } + + void InsertElemToForbidden(regno_t regno) { + if (!forbidden[regno]) { + forbidden[regno] = true; + ++numForbidden; + } + } + + void EraseElemFromForbidden(regno_t regno) { + if (forbidden[regno]) { + forbidden[regno] = false; + --numForbidden; + } + } + + void ClearForbidden() { + numForbidden = 0; + forbidden.clear(); + } + + uint32 GetForbiddenSize() const { + return numForbidden; + } + + void InsertConflict(regno_t regno) { + (void)conflict.insert(regno); + } + + void EraseConflict(regno_t regno) { + (void)conflict.erase(regno); + } + + const MapleSet &GetConflict() const { + return conflict; + } + + const MapleSet &GetPrefs() const { + return prefs; + } + + void InsertElemToPrefs(regno_t regno) { + (void)prefs.insert(regno); + } + + void EraseElemFromPrefs(regno_t regno) { + (void)prefs.erase(regno); + } + + private: + // preg have conflicted with lr before RA, cannot be allocated or modified. + MapleBitVector pregveto; + // when lr crosses calls and IPARA is enabled, callDef is set to indicate + // the caller-save register used by the invoked function. + // therefore, callDef cannot be modified, but may be allocated. + MapleBitVector callDef; + // from conflictLr, indicating that the current LR cannot re-allocate these registers. + // SplitLr may clear forbidden + MapleBitVector forbidden; + uint32 numPregveto = 0; + uint32 numForbidden = 0; + + MapleSet conflict; // vreg interference from graph neighbors + MapleSet prefs; // pregs that prefer, if preg in pregveto, does we need to delete it? +}; + +// LR is for each global vreg. class LiveRange { public: explicit LiveRange(uint32 maxRegNum, MapleAllocator &allocator) : lrAlloca(&allocator), - pregveto(maxRegNum, false, allocator.Adapter()), - callDef(maxRegNum, false, allocator.Adapter()), - forbidden(maxRegNum, false, allocator.Adapter()), - prefs(allocator.Adapter()), + bbMember(allocator.Adapter()), + confilctInfo(maxRegNum, allocator), refMap(allocator.Adapter()), luMap(allocator.Adapter()) {} ~LiveRange() = default; + void DumpLiveUnitMap() const; + void DumpLiveBB() const; + void Dump(const std::string &str) const; + regno_t GetRegNO() const { return regNO; } @@ -277,207 +447,130 @@ class LiveRange { mustAssigned = true; } - void SetBBBuckets(uint32 bucketNum) { - bbBuckets = bucketNum; - } - - void SetRegBuckets(uint32 bucketNum) { - regBuckets = bucketNum; - } - - uint32 GetNumBBMembers() const { - return numBBMembers; + void InsertElemToPregveto(regno_t regno) { + confilctInfo.InsertElemToPregveto(regno); } - void IncNumBBMembers() { - ++numBBMembers; + void InsertElemToCallDef(regno_t regno) { + confilctInfo.InsertElemToCallDef(regno); } - void DecNumBBMembers() { - --numBBMembers; + void InsertElemToForbidden(regno_t regno) { + confilctInfo.InsertElemToForbidden(regno); } - void InitBBMember(MemPool &memPool, size_t size) { - bbMember = memPool.NewArray(size); - errno_t ret = memset_s(bbMember, size * sizeof(uint64), 0, size * sizeof(uint64)); - CHECK_FATAL(ret == EOK, "call memset_s failed"); + void EraseElemFromForbidden(regno_t regno) { + confilctInfo.EraseElemFromForbidden(regno); } - uint64 *GetBBMember() { - return bbMember; + void ClearForbidden() { + confilctInfo.ClearForbidden(); } - const uint64 *GetBBMember() const { - return bbMember; + void InsertElemToPrefs(regno_t regno) { + confilctInfo.InsertElemToPrefs(regno); } - uint64 GetBBMemberElem(uint32 index) const { - return bbMember[index]; + void EraseElemFromPrefs(regno_t regno) { + confilctInfo.EraseElemFromPrefs(regno); } - void SetBBMemberElem(int32 index, uint64 elem) { - bbMember[index] = elem; + void InsertConflict(regno_t regno) { + confilctInfo.InsertConflict(regno); } - void SetMemberBitArrElem(uint32 bbID) { - uint32 index = bbID / kU64; - uint64 bit = bbID % kU64; - uint64 mask = 1ULL << bit; - if ((GetBBMemberElem(index) & mask) == 0) { - IncNumBBMembers(); - SetBBMemberElem(index, GetBBMemberElem(index) | mask); - } + void EraseConflict(regno_t regno) { + confilctInfo.EraseConflict(regno); } - void UnsetMemberBitArrElem(uint32 bbID) { - uint32 index = bbID / kU64; - uint64 bit = bbID % kU64; - uint64 mask = 1ULL << bit; - if ((GetBBMemberElem(index) & mask) != 0) { - DecNumBBMembers(); - SetBBMemberElem(index, GetBBMemberElem(index) & (~mask)); - } + uint32 GetPregvetoSize() const { + return confilctInfo.GetPregvetoSize(); } - void SetConflictBitArrElem(regno_t regno) { - uint32 index = regno / kU64; - uint64 bit = regno % kU64; - uint64 mask = 1ULL << bit; - if ((GetBBConflictElem(index) & mask) == 0) { - IncNumBBConflicts(); - SetBBConflictElem(index, GetBBConflictElem(index) | mask); - } + uint32 GetForbiddenSize() const { + return confilctInfo.GetForbiddenSize(); } - void UnsetConflictBitArrElem(regno_t regno) { - uint32 index = regno / kU64; - uint64 bit = regno % kU64; - uint64 mask = 1ULL << bit; - if ((GetBBConflictElem(index) & mask) != 0) { - DecNumBBConflicts(); - SetBBConflictElem(index, GetBBConflictElem(index) & (~mask)); - } + uint32 GetConflictSize() const { + return static_cast(confilctInfo.GetConflict().size()); } - void InitPregveto() { - pregveto.assign(pregveto.size(), false); - callDef.assign(callDef.size(), false); + const MapleBitVector &GetPregveto() const { + return confilctInfo.GetPregveto(); } bool GetPregveto(regno_t regno) const { - return pregveto[regno]; - } - - size_t GetPregvetoSize() const { - return numPregveto; - } - - void InsertElemToPregveto(regno_t regno) { - if (!pregveto[regno]) { - pregveto[regno] = true; - ++numPregveto; - } + return GetPregveto()[regno]; } bool GetCallDef(regno_t regno) const { - return callDef[regno]; - } - - void InsertElemToCallDef(regno_t regno) { - if (!callDef[regno]) { - callDef[regno] = true; - ++numCallDef; - } - } - - void SetCrossCall() { - crossCall = true; + return confilctInfo.GetCallDef()[regno]; } - bool GetCrossCall() const { - return crossCall; - } - - void InitForbidden() { - forbidden.assign(forbidden.size(), false); - } - - const MapleVector &GetForbidden() const { - return forbidden; + const MapleBitVector &GetForbidden() const { + return confilctInfo.GetForbidden(); } bool GetForbidden(regno_t regno) const { - return forbidden[regno]; - } - - size_t GetForbiddenSize() const { - return numForbidden; - } - - void InsertElemToForbidden(regno_t regno) { - if (!forbidden[regno]) { - forbidden[regno] = true; - ++numForbidden; - } - } - - void EraseElemFromForbidden(regno_t regno) { - if (forbidden[regno]) { - forbidden[regno] = false; - --numForbidden; - } - } - - void ClearForbidden() { - forbidden.clear(); + return GetForbidden()[regno]; } - uint32 GetNumBBConflicts() const { - return numBBConflicts; + const MapleSet &GetConflict() const { + return confilctInfo.GetConflict(); } - void IncNumBBConflicts() { - ++numBBConflicts; + const MapleSet &GetPrefs() const { + return confilctInfo.GetPrefs(); } - void DecNumBBConflicts() { - --numBBConflicts; + // check whether the lr conflicts with the preg + bool HaveConflict(regno_t preg) const { + return GetPregveto(preg) || GetForbidden(preg); } - void InitBBConflict(MemPool &memPool, size_t size) { - bbConflict = memPool.NewArray(size); - errno_t ret = memset_s(bbConflict, size * sizeof(uint64), 0, size * sizeof(uint64)); - CHECK_FATAL(ret == EOK, "call memset_s failed"); + uint32 GetNumBBMembers() const { + return numBBMembers; } - const uint64 *GetBBConflict() const { - return bbConflict; + void InitBBMember(size_t bbNum) { + bbMember.clear(); + bbMember.resize((bbNum + kU64 - 1) / kU64, 0); } - uint64 GetBBConflictElem(int32 index) const { - ASSERT(index < regBuckets, "out of bbConflict"); - return bbConflict[index]; + const MapleVector &GetBBMember() const { + return bbMember; } - void SetBBConflictElem(uint32 index, uint64 elem) { - ASSERT(index < regBuckets, "out of bbConflict"); - bbConflict[index] = elem; + bool GetBBMember(uint32 bbID) const { + uint32 offset = bbID / kU64; + uint64 mask = uint64(1) << (bbID % kU64); + return (bbMember[offset] & mask) != 0; } - void SetOldConflict(uint64 *conflict) { - oldConflict = conflict; + void SetBBMember(uint32 bbID) { + uint32 offset = bbID / kU64; + uint64 mask = uint64(1) << (bbID % kU64); + if ((bbMember[offset] & mask) == 0) { + ++numBBMembers; + bbMember[offset] |= mask; + } } - const uint64 *GetOldConflict() const { - return oldConflict; + void UnsetBBMember(uint32 bbID) { + uint32 offset = bbID / kU64; + uint64 mask = uint64(1) << (bbID % kU64); + if ((bbMember[offset] & mask) != 0) { + --numBBMembers; + bbMember[offset] &= (~mask); + } } - const MapleSet &GetPrefs() const { - return prefs; + void SetCrossCall() { + crossCall = true; } - void InsertElemToPrefs(regno_t regno) { - (void)prefs.insert(regno); + bool GetCrossCall() const { + return crossCall; } const MapleMap*> GetRefs() const { @@ -605,6 +698,22 @@ class LiveRange { spillSize = size; } + uint32 GetMaxDefSize() const { + return maxDefSize; + } + + void SetMaxDefSize(uint32 size) { + maxDefSize = size; + } + + uint32 GetMaxUseSize() const { + return maxUseSize; + } + + void SetMaxUseSize(uint32 size) { + maxUseSize = size; + } + bool IsSpilled() const { return spilled; } @@ -629,8 +738,9 @@ class LiveRange { proccessed = true; } - bool IsNonLocal() const { - return isNonLocal; + // if true, lr will be alloced by LRA + bool IsLocalReg() const { + return !splitLr && (numBBMembers == 1) && !isNonLocal; } void SetIsNonLocal(bool isNonLocalVal) { @@ -693,23 +803,10 @@ class LiveRange { RegType regType = kRegTyUndef; float priority = 0.0; bool mustAssigned = false; - uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ - uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ - uint32 numBBMembers = 0; /* number of bits set in bbMember */ - uint64 *bbMember = nullptr; /* Same as smember, but use bit array */ - - MapleBitVector pregveto; /* pregs cannot be assigned -- SplitLr may clear forbidden */ - MapleBitVector callDef; /* pregs cannot be assigned -- SplitLr may clear forbidden */ - MapleBitVector forbidden; /* pregs cannot be assigned */ - uint32 numPregveto = 0; - uint32 numCallDef = 0; - uint32 numForbidden = 0; + uint32 numBBMembers = 0; // number of bits set in bbMember + MapleVector bbMember; // use bit array. if true, reg is used in BB bool crossCall = false; - - uint32 numBBConflicts = 0; /* number of bits set in bbConflict */ - uint64 *bbConflict = nullptr; /* vreg interference from graph neighbors (bit) */ - uint64 *oldConflict = nullptr; - MapleSet prefs; /* pregs that prefer */ + ConfilctInfo confilctInfo; // lr conflict info MapleMap*> refMap; MapleMap luMap; /* info for each bb */ LiveRange *splitLr = nullptr; /* The 1st part of the split */ @@ -720,11 +817,13 @@ class LiveRange { #endif /* OPTIMIZE_FOR_PROLOG */ MemOperand *spillMem = nullptr; /* memory operand used for spill, if any */ regno_t spillReg = 0; /* register operand for spill at current point */ - uint32 spillSize = 0; + uint32 spillSize = 0; /* use min(maxDefSize, maxUseSize) */ + uint32 maxDefSize = 0; + uint32 maxUseSize = 0; bool spilled = false; /* color assigned */ bool hasDefUse = false; /* has regDS */ bool proccessed = false; - bool isNonLocal = false; + bool isNonLocal = true; bool isSpSave = false; /* contain SP in case of alloca */ Rematerializer *rematerializer = nullptr; }; @@ -860,8 +959,8 @@ class FinalizeRegisterInfo { useDefOperands.emplace_back(idx, &opnd); } - int32 GetMemOperandIdx() const { - return static_cast(memOperandIdx); + uint32 GetMemOperandIdx() const { + return memOperandIdx; } const Operand *GetBaseOperand() const { @@ -1118,7 +1217,6 @@ class GraphColorRegAllocator : public RegAllocator { fpCalleeUsed(alloc.Adapter()) { constexpr uint32 kNumInsnThreashold = 30000; numVregs = cgFunc.GetMaxVReg(); - regBuckets = (numVregs / kU64) + 1; localRegVec.resize(cgFunc.NumBBs()); bbRegInfo.resize(cgFunc.NumBBs()); if (CGOptions::DoMultiPassColorRA() && cgFunc.GetMirModule().IsCModule()) { @@ -1147,14 +1245,6 @@ class GraphColorRegAllocator : public RegAllocator { kSpillMemPost, }; - LiveRange *GetLiveRange(regno_t regNO) { - MapleMap::const_iterator it = lrMap.find(regNO); - if (it != lrMap.cend()) { - return it->second; - } else { - return nullptr; - } - } LiveRange *GetLiveRange(regno_t regNO) const { auto it = lrMap.find(regNO); if (it != lrMap.end()) { @@ -1185,51 +1275,40 @@ class GraphColorRegAllocator : public RegAllocator { } }; - template - void ForEachBBArrElem(const uint64 *vec, Func functor) const; - - template - void ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const; - - template - void ForEachRegArrElem(const uint64 *vec, Func functor) const; - - void PrintLiveUnitMap(const LiveRange &lr) const; - void PrintLiveRangeConflicts(const LiveRange &lr) const; - void PrintLiveBBBit(const LiveRange &lr) const; - void PrintLiveRange(const LiveRange &lr, const std::string &str) const; void PrintLiveRanges() const; void PrintLocalRAInfo(const std::string &str) const; void PrintBBAssignInfo() const; - void PrintBBs() const; void InitFreeRegPool(); LiveRange *NewLiveRange(); void CalculatePriority(LiveRange &lr) const; + void CalculatePriority() const; bool CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef); - LiveRange *CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, uint32 currId); + LiveRange *CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, uint32 currId); void CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount); - bool SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, regno_t regNO, bool isDef); - void SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses); - void SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint); - bool UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const; + void SetupLiveRangeByPhysicalReg(const Insn &insn, regno_t regNO, bool isDef); + void SetupLiveRangeByRegOpnd(const Insn &insn, const RegOperand ®Opnd, uint32 regSize, bool isDef); + void ComputeLiveRangeByLiveOut(BB &bb, uint32 currPoint); + void ComputeLiveRangeByLiveIn(BB &bb, uint32 currPoint); + void UpdateAdjMatrixByLiveIn(BB &bb, AdjMatrix &adjMat); + using RegOpndInfo = std::pair; // first is opnd, second is regSize + void CollectRegOpndInfo(const Insn &insn, std::vector &defOpnds, + std::vector &useOpnds); void UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn); - void ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, - const Operand &opnd) const; - void SetOpndConflict(const Insn &insn, bool onlyDef); - void UpdateOpndConflict(const Insn &insn, bool multiDef); - void SetLrMustAssign(const RegOperand *regOpnd); + void InsertRegLive(regno_t regNO); // insert live reg to regLive + void RemoveRegLive(regno_t regNO); // remove reg from regLive + void UpdateAdjMatrixByRegNO(regno_t regNO, AdjMatrix &adjMat); + void UpdateAdjMatrix(const Insn &insn, const std::vector &defOpnds, + const std::vector &useOpnds, AdjMatrix &adjMat); + void ComputeLiveRangeForDefOperands(const Insn &insn, std::vector &defOpnds); + void ComputeLiveRangeForUseOperands(const Insn &insn, std::vector &useOpnds); + void UpdateRegLive(const Insn &insn, const std::vector &useOpnds); + void SetLrMustAssign(const RegOperand ®Opnd); void SetupMustAssignedLiveRanges(const Insn &insn); - void ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef); - void ComputeLiveRangesForEachUseOperand(Insn &insn); - void ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn); - void ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint); - void ComputeLiveRanges(); - MemOperand *CreateSpillMem(uint32 spillIdx, uint32 memSize, SpillMemCheck check); - bool CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const; - void CheckInterference(LiveRange &lr1, LiveRange &lr2) const; - void BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, std::vector &fpLrVec); - void BuildInterferenceGraph(); + void AddConflictAndPregvetoToLr(const std::vector &conflict, LiveRange &lr, bool isInt); + void ConvertAdjMatrixToConflict(const AdjMatrix &adjMat); + void ComputeLiveRangesAndConflict(); + MemOperand *CreateSpillMem(uint32 spillIdx, RegType regType, SpillMemCheck check); void SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO); bool HaveAvailableColor(const LiveRange &lr, uint32 num) const; void Separate(); @@ -1237,7 +1316,6 @@ class GraphColorRegAllocator : public RegAllocator { void SplitAndColor(); void ColorForOptPrologEpilog(); bool IsLocalReg(regno_t regNO) const; - bool IsLocalReg(const LiveRange &lr) const; void HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const; void HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt); void UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, bool isDef) const; @@ -1256,11 +1334,13 @@ class GraphColorRegAllocator : public RegAllocator { void SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); void SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); - MemOperand *GetConsistentReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, + MemOperand *GetConsistentReuseMem(const MapleSet &conflict, + const std::set &usedMemOpnd, uint32 size, RegType regType); - MemOperand *GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, - RegType regType); - MemOperand *GetReuseMem(const LiveRange &lr); + MemOperand *GetCommonReuseMem(const MapleSet &conflict, + const std::set &usedMemOpnd, uint32 size, + RegType regType) const; + MemOperand *GetReuseMem(const LiveRange &lr) const; MemOperand *GetSpillMem(uint32 vregNO, uint32 spillSize, bool isDest, Insn &insn, regno_t regNO, bool &isOutOfRange); bool SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, @@ -1278,8 +1358,6 @@ class GraphColorRegAllocator : public RegAllocator { MapleBitVector &usedRegMask); bool EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector& visitedMap); bool FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef); - bool EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap); - bool FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef); bool HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; bool HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; bool NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef); @@ -1288,7 +1366,7 @@ class GraphColorRegAllocator : public RegAllocator { RegOperand *GetReplaceUseDefOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, MapleBitVector &usedRegMask); void MarkCalleeSaveRegs(); - void MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask); + void MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask) const; bool FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, const Insn &insn, MapleBitVector &usedRegMask); void SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, @@ -1328,8 +1406,8 @@ class GraphColorRegAllocator : public RegAllocator { bool LrCanBeColored(const LiveRange &lr, const BB &bbAdded, std::unordered_set &conflictRegs); void MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const; bool ContainsLoop(const CGFuncLoops &loop, const std::set &loops) const; - void GetAllLrMemberLoops(LiveRange &lr, std::set &loops); - bool SplitLrShouldSplit(LiveRange &lr); + void GetAllLrMemberLoops(const LiveRange &lr, std::set &loops); + bool SplitLrShouldSplit(const LiveRange &lr); bool SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::unordered_set &conflictRegs); void SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, const std::set &origLoops, const std::set &newLoops); @@ -1368,8 +1446,6 @@ class GraphColorRegAllocator : public RegAllocator { MapleSet fpCalleeUsed; Bfs *bfs = nullptr; - uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ - uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ uint32 intRegNum = 0; /* total available int preg */ uint32 fpRegNum = 0; /* total available fp preg */ uint32 numVregs = 0; /* number of vregs when starting */ @@ -1384,7 +1460,8 @@ class GraphColorRegAllocator : public RegAllocator { * sp_reg1 <- [spillMemOpnds[1]] * sp_reg2 <- [spillMemOpnds[2]] */ - std::array spillMemOpnds = { nullptr }; + std::array intSpillMemOpnds = { nullptr }; + std::array fpSpillMemOpnds = { nullptr }; bool operandSpilled[kSpillMemOpndNum]; bool needExtraSpillReg = false; #ifdef USE_LRA @@ -1410,7 +1487,7 @@ class CallerSavePre : public CGPre { regAllocator(regAlloc), loopHeadBBs(ssaPreAllocator.Adapter()) {} - ~CallerSavePre() { + ~CallerSavePre() override { func = nullptr; regAllocator = nullptr; workLr = nullptr; diff --git a/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h b/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h index e5dd9b8a2ea66ec5a78ca278a614dd3b29f35505..8c17ba7b6761d43ca8844cb48281354c04cc065b 100644 --- a/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h +++ b/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h @@ -469,7 +469,7 @@ class LSRALinearScanRegAllocator : public RegAllocator { void ComputeLiveInterval(); void FindLowestPrioInActive(LiveInterval *&targetLi, RegType regType = kRegTyInt, bool startRa = false); void LiveIntervalAnalysis(); - bool OpndNeedAllocation(const Insn &insn, Operand &opnd, bool isDef, uint32 insnNum); + bool OpndNeedAllocation(Operand &opnd, bool isDef, uint32 insnNum); void InsertParamToActive(Operand &opnd); void InsertToActive(Operand &opnd, uint32 insnNum); void ReturnPregToSet(const LiveInterval &li, uint32 preg); diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h index 1b6797ac6b50f72afa4c32fc0b6cd359a2a543fc..40f5a51af743c307ccf4c90c88013617d7a5959b 100644 --- a/src/mapleall/maple_be/include/cg/reg_info.h +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -100,13 +100,13 @@ class VregInfo { ++virtualRegCount; return temp; } - void Inc(uint32 v) { + void Inc(uint32 v) const { virtualRegCount += v; } uint32 GetCount() const { return virtualRegCount; } - void SetCount(uint32 v) { + void SetCount(uint32 v) const { /* Vreg number can only increase. */ if (virtualRegCount < v) { virtualRegCount = v; @@ -120,7 +120,7 @@ class VregInfo { void SetMaxRegCount(uint32 num) { maxRegCount = num; } - void IncMaxRegCount(uint32 num) { + void IncMaxRegCount(uint32 num) const { maxRegCount += num; } @@ -137,16 +137,16 @@ class VregInfo { RegType VRegTableGetType(uint32 idx) const { return vRegTable[idx].GetType(); } - VirtualRegNode &VRegTableElementGet(uint32 idx) { + VirtualRegNode &VRegTableElementGet(uint32 idx) const { return vRegTable[idx]; } void VRegTableElementSet(uint32 idx, VirtualRegNode *node) { vRegTable[idx] = *node; } - void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) { + void VRegTableValuesSet(uint32 idx, RegType rt, uint32 sz) const { new (&vRegTable[idx]) VirtualRegNode(rt, sz); } - void VRegOperandTableSet(regno_t regNO, RegOperand *rp) { + void VRegOperandTableSet(regno_t regNO, RegOperand *rp) const { vRegOperandTable[regNO] = rp; } }; diff --git a/src/mapleall/maple_be/include/cg/regsaves.h b/src/mapleall/maple_be/include/cg/regsaves.h index 5f20313a9ec66c01957725e113a60512bcf47f45..b4ddd18089e200eee7adb8bfce952719b5d2ce04 100644 --- a/src/mapleall/maple_be/include/cg/regsaves.h +++ b/src/mapleall/maple_be/include/cg/regsaves.h @@ -31,7 +31,7 @@ class RegSavesOpt { virtual void Run() {} std::string PhaseName() const { - return "regsavesopt"; + return "regsaves"; } CGFunc *GetCGFunc() const { diff --git a/src/mapleall/maple_be/include/cg/rematerialize.h b/src/mapleall/maple_be/include/cg/rematerialize.h index f1cd0b1933bda292443e1e508ba0550c50b7af40..93b16f0fed1f4c8d44d3e849625e219b2e838eb7 100644 --- a/src/mapleall/maple_be/include/cg/rematerialize.h +++ b/src/mapleall/maple_be/include/cg/rematerialize.h @@ -49,6 +49,7 @@ class Rematerializer { void SetRematLevel(RematLevel val) { rematLevel = val; } + RematLevel GetRematLevel() const { return rematLevel; } @@ -88,4 +89,4 @@ class Rematerializer { }; } /* namespace maplebe */ -#endif /* MAPLEBE_INCLUDE_CG_REMATERIALIZE_H */ \ No newline at end of file +#endif /* MAPLEBE_INCLUDE_CG_REMATERIALIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/schedule.h b/src/mapleall/maple_be/include/cg/schedule.h index 052f850d798cd39108eaba62e91d8f9a88d1e708..6949fc3fa666ee81dd73849bb2246204ae975e73 100644 --- a/src/mapleall/maple_be/include/cg/schedule.h +++ b/src/mapleall/maple_be/include/cg/schedule.h @@ -25,6 +25,103 @@ namespace maplebe { #define LIST_SCHED_DUMP_NEWPM CG_DEBUG_FUNC(f) #define LIST_SCHED_DUMP_REF CG_DEBUG_FUNC(cgFunc) +class ScheduleProcessInfo { + public: + explicit ScheduleProcessInfo(uint32 size) { + availableReadyList.reserve(size); + scheduledNodes.reserve(size); + } + + virtual ~ScheduleProcessInfo() = default; + + uint32 GetLastUpdateCycle() const { + return lastUpdateCycle; + } + + void SetLastUpdateCycle(uint32 updateCycle) { + lastUpdateCycle = updateCycle; + } + + uint32 GetCurrCycle() const { + return currCycle; + } + + void IncCurrCycle() { + ++currCycle; + } + + void DecAdvanceCycle() { + advanceCycle--; + } + + uint32 GetAdvanceCycle() const { + return advanceCycle; + } + + void SetAdvanceCycle(uint32 cycle) { + advanceCycle = cycle; + } + + void ClearAvailableReadyList() { + availableReadyList.clear(); + } + + void PushElemIntoAvailableReadyList(DepNode *node) { + availableReadyList.emplace_back(node); + } + + size_t SizeOfAvailableReadyList() const { + return availableReadyList.size(); + } + + bool AvailableReadyListIsEmpty() const { + return availableReadyList.empty(); + } + + void SetAvailableReadyList(const std::vector &tempReadyList) { + availableReadyList = tempReadyList; + } + + const std::vector &GetAvailableReadyList() const { + return availableReadyList; + } + + const std::vector &GetAvailableReadyList() { + return availableReadyList; + } + + void PushElemIntoScheduledNodes(DepNode *node) { + node->SetState(kScheduled); + node->SetSchedCycle(currCycle); + node->OccupyUnits(); + scheduledNodes.emplace_back(node); + } + + bool IsFirstSeparator() const { + return isFirstSeparator; + } + + void ResetIsFirstSeparator() { + isFirstSeparator = false; + } + + size_t SizeOfScheduledNodes() const { + return scheduledNodes.size(); + } + + const std::vector &GetScheduledNodes() const { + return scheduledNodes; + } + + private: + std::vector availableReadyList; + std::vector scheduledNodes; + uint32 lastUpdateCycle = 0; + uint32 currCycle = 0; + uint32 advanceCycle = 0; + bool isFirstSeparator = true; +}; + class RegPressureSchedule { public: RegPressureSchedule(CGFunc &func, MapleAllocator &alloc) diff --git a/src/mapleall/maple_be/include/cg/schedule_heuristic.h b/src/mapleall/maple_be/include/cg/schedule_heuristic.h new file mode 100644 index 0000000000000000000000000000000000000000..599fa418c07383c49279f201e32e0d984d5787c5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/schedule_heuristic.h @@ -0,0 +1,125 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_SCHEDULE_HEURISTIC_H +#define MAPLEBE_INCLUDE_CG_SCHEDULE_HEURISTIC_H + +#include "deps.h" + +/* + * Define a series of priority comparison function objects. + * @ReturnValue: + * - positive: node1 has higher priority + * - negative: node2 has higher priority + * - zero: node1 == node2 + * And ensure the sort is stable. + */ +namespace maplebe { +/* Prefer max delay priority */ +class CompareDelay { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + return static_cast(node1.GetDelay() - node2.GetDelay()); + } +}; + +/* Prefer min eStart */ +class CompareEStart { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + return static_cast(node2.GetEStart() - node1.GetEStart()); + } +}; + +/* Prefer less lStart */ +class CompareLStart { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + return static_cast(node2.GetLStart() - node1.GetLStart()); + } +}; + +/* Prefer using more unit kind */ +class CompareUnitKindNum { + public: + explicit CompareUnitKindNum(uint32 maxUnitIndex) : maxUnitIdx(maxUnitIndex) {} + + int operator() (const DepNode &node1, const DepNode &node2) { + bool use1 = IsUseUnitKind(node1); + bool use2 = IsUseUnitKind(node2); + if ((use1 && use2) || (!use1 && !use2)) { + return 0; + } else if (!use2) { + return 1; + } else { + return -1; + } + } + + private: + /* Check if a node use a specific unit kind */ + bool IsUseUnitKind(const DepNode &depNode) const { + uint32 unitKind = depNode.GetUnitKind(); + auto idx = static_cast(__builtin_ffs(static_cast(unitKind))); + while (idx != 0) { + ASSERT(maxUnitIdx < kUnitKindLast, "invalid unit index"); + if (idx == maxUnitIdx) { + return true; + } + unitKind &= ~(1u << (idx - 1u)); + idx = static_cast(__builtin_ffs(static_cast(unitKind))); + } + return false; + } + + uint32 maxUnitIdx = 0; +}; + +/* Prefer slot0 */ +class CompareSlotType { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + SlotType slotType1 = node1.GetReservation()->GetSlot(); + SlotType slotType2 = node2.GetReservation()->GetSlot(); + if (slotType1 == kSlots) { + slotType1 = kSlot0; + } + if (slotType2 == kSlots) { + slotType2 = kSlot0; + } + return (slotType2 - slotType1); + } +}; + +/* Prefer more succNodes */ +class CompareSuccNodeSize { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + return static_cast(node1.GetSuccs().size() - node2.GetSuccs().size()); + } +}; + +/* Default order */ +class CompareInsnID { + public: + int operator() (const DepNode &node1, const DepNode &node2) { + Insn *insn1 = node1.GetInsn(); + ASSERT(insn1 != nullptr, "get insn from depNode failed"); + Insn *insn2 = node2.GetInsn(); + ASSERT(insn2 != nullptr, "get insn from depNode failed"); + return static_cast(insn2->GetId() - insn1->GetId()); + } +}; +} /* namespace maplebe */ +#endif // MAPLEBE_INCLUDE_CG_SCHEDULE_HEURISTIC_H diff --git a/src/mapleall/maple_be/include/cg/sparse_datainfo.h b/src/mapleall/maple_be/include/cg/sparse_datainfo.h index 048a5197d3c19e1025667c6b1fa8c4ccee8203d0..f0db416a9f0e3ec3bb01939edd7ff4a642badcf0 100644 --- a/src/mapleall/maple_be/include/cg/sparse_datainfo.h +++ b/src/mapleall/maple_be/include/cg/sparse_datainfo.h @@ -118,7 +118,7 @@ class SparseDataInfo { info.Clear(); } - void EnlargeCapacityToAdaptSize(uint32 bitNO) const { + void EnlargeCapacityToAdaptSize(uint32 /* bitNO */) const { /* add one more size for each enlarge action */ } diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h index 322497a9e3c59479fb846a8ebe95d53ae310d271..86442a1dc12fd23d7996e1a20cea639c14b4a517 100644 --- a/src/mapleall/maple_be/include/cg/standardize.h +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -49,7 +49,7 @@ class Standardize { void SetAddressMapping(bool needMapping) { needAddrMapping = needMapping; } - bool NeedAddressMapping(const Insn &insn) { + bool NeedAddressMapping(const Insn &insn) const { /* Operand number for two addressing mode is 2 */ /* and 3 for three addressing mode */ return needAddrMapping && ((insn.GetOperandSize() > 2) || (insn.IsUnaryOp())); diff --git a/src/mapleall/maple_be/include/cg/tailcall.h b/src/mapleall/maple_be/include/cg/tailcall.h index 914471302e43b68df3ed86be6c3d696bb2fe2f9b..f8136e9c92cf2599b1752c3956c53d57f8cb2de9 100644 --- a/src/mapleall/maple_be/include/cg/tailcall.h +++ b/src/mapleall/maple_be/include/cg/tailcall.h @@ -42,10 +42,10 @@ class TailCallOpt { void Run(); bool DoTailCallOpt(); void TideExitBB(); - bool OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const; - void TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB); - void ConvertToTailCalls(MapleSet &callInsnsMap); - MapleMap> &GetExitBB2CallSitesMap() { + bool OptimizeTailBB(BB &bb, MapleSet &callInsns) const; + void TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB); + void ConvertToTailCalls(MapleSet &callInsnsMap); + MapleMap, BBIdCmp> &GetExitBB2CallSitesMap() { return exitBB2CallSitesMap; } void SetCurTailcallExitBB(BB *bb) { @@ -67,19 +67,16 @@ class TailCallOpt { virtual bool InsnIsCall(Insn &insn) const = 0; virtual bool InsnIsUncondJump(Insn &insn) const = 0; virtual bool InsnIsAddWithRsp(Insn &insn) const = 0; - virtual bool OpndIsStackRelatedReg(RegOperand &opnd) const = 0; virtual bool OpndIsR0Reg(RegOperand &opnd) const = 0; virtual bool OpndIsCalleeSaveReg(RegOperand &opnd) const = 0; - virtual bool IsAddOrSubOp(MOperator mOp) const = 0; virtual void ReplaceInsnMopWithTailCall(Insn &insn) = 0; - bool IsStackAddrTaken(); protected: CGFunc &cgFunc; MemPool *memPool; MapleAllocator tmpAlloc; bool stackProtect = false; - MapleMap> exitBB2CallSitesMap; + MapleMap, BBIdCmp> exitBB2CallSitesMap; BB *curTailcallExitBB = nullptr; }; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h index 9551b5d26797a0bd7e9e7c4b1f2c82c9f042ad22..48938e08a6800849a2be010f5a57cad6bc0eac73 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h @@ -30,8 +30,6 @@ #include "x64_rematerialize.h" namespace maplebe { -constexpr int32 kIntRegTypeNum = 5; - class X64CG : public CG { public: X64CG(MIRModule &mod, const CGOptions &opts) : CG(mod, opts) {} @@ -128,10 +126,11 @@ class X64CG : public CG { (void)ssaInfo; return nullptr; } - ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { (void)mp; (void)f; (void)ssaInfo; + (void)ll; return nullptr; } RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { @@ -148,14 +147,17 @@ class X64CG : public CG { /* Used for GCTIB pattern merging */ std::string FindGCTIBPatternName(const std::string &name) const override; - static std::array, kIntRegTypeNum> intRegNames; enum : uint8 { - kR8LowList, - kR8HighList, - kR16List, - kR32List, - kR64List + kR8LowList = 0, + kR8HighList = 1, + kR16List = 2, + kR32List = 3, + kR64List = 4, + kR128List = 5, + kIntRegTypeNum }; + static std::array, kIntRegTypeNum> intRegNames; + bool IsEffectiveCopy(Insn &insn) const final; bool IsTargetInsn(MOperator mOp) const final; bool IsClinitInsn(MOperator mOp) const final; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h index 4c776972e731df1116f951f70898f731c6e398ef..c5af805e6b3ec148951d223e50450cf099472cd3 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -165,7 +165,6 @@ class X64CGFunc : public CGFunc { Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) override; Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) override; void GenerateYieldpoint(BB &bb) override; - Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; Operand &GetOrCreateRflag() override; const Operand *GetRflag() const override; const Operand *GetFloatRflag() const override; @@ -268,11 +267,8 @@ class X64CGFunc : public CGFunc { }; MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) override; + RegOperand *SelectIntrinsicOpLoadTlsAnchor(const IntrinsicopNode& intrinsicopNode, const BaseNode &parent) override; void FreeSpillRegMem(regno_t vrNum) override; - int64 GetOrCreatSpillRegLocation(regno_t vrNum) { - auto symLoc = GetMemlayout()->GetLocOfSpillRegister(vrNum); - return static_cast(GetBaseOffset(*symLoc)); - } private: MapleSet calleeSavedRegs; uint32 numIntregToCalleeSave = 0; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h index 4970611d8f8fa893847d0106ac2c6d0771319510..c74d3e8b832c528a221accd087c6e05c6d8191ed 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h @@ -38,7 +38,8 @@ class X64Emitter : public AsmEmitter { class X64OpndEmitVisitor : public OpndEmitVisitor { public: - X64OpndEmitVisitor(Emitter &emitter) : OpndEmitVisitor(emitter) {} + X64OpndEmitVisitor(Emitter &emitter, const OpndDesc *operandProp) + : OpndEmitVisitor(emitter, operandProp) {} ~X64OpndEmitVisitor() override = default; void Visit(RegOperand *v) final; @@ -53,6 +54,8 @@ class X64OpndEmitVisitor : public OpndEmitVisitor { void Visit(ExtendShiftOperand *v) final; void Visit(CommentOperand *v) final; void Visit(OfstOperand *v) final; + private: + void Visit(maplebe::RegOperand *v, uint32 regSize); }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h index 505a717837c5a837a750dd1da07b92446f4493e3..08694380247b5c9fec409d7844842d1b104f861a 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h @@ -28,7 +28,7 @@ constexpr int kX64StackPtrAlignment = 16; constexpr int32 kOffsetAlign = 8; constexpr uint32 kIntregBytelen = 8; /* 64-bit */ constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ -constexpr int kSizeOfFplr = 16; +constexpr uint32 kSizeOfFplr = 16; class Insn; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h index 59708ac59fb3e7795e09a6e3426d33a19f6911cd..3d4017f0b579e1a43523dbc3042bd6f64909708a 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h @@ -22,12 +22,12 @@ namespace maplebe { namespace x64 { /* register, imm , memory, cond */ -#define DEF_X64_CMP_MAPPING_INT(SIZE) \ -static const X64MOP_t cmpIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = {\ - {MOP_cmp##SIZE##_r_r, MOP_begin, MOP_cmp##SIZE##_r_m, MOP_begin}, \ - {MOP_cmp##SIZE##_i_r, MOP_begin, MOP_cmp##SIZE##_i_m, MOP_begin}, \ - {MOP_cmp##SIZE##_m_r, MOP_begin, MOP_begin, MOP_begin}, \ - {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ +#define DEF_X64_CMP_MAPPING_INT(SIZE) \ +static const X64MOP_t cmpIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ + {MOP_cmp##SIZE##_r_r, MOP_begin, MOP_cmp##SIZE##_r_m, MOP_begin}, \ + {MOP_cmp##SIZE##_i_r, MOP_begin, MOP_cmp##SIZE##_i_m, MOP_begin}, \ + {MOP_cmp##SIZE##_m_r, MOP_begin, MOP_begin, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ }; DEF_X64_CMP_MAPPING_INT(b) DEF_X64_CMP_MAPPING_INT(w) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h b/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h index af7df9c4c72d13f4295eada723f4ea470a8e534e..8197ad99d6a0454a42980593603fbbaec6db066f 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h @@ -17,11 +17,11 @@ #define MAPLEBE_INCLUDE_X64_LOCALO_H #include "local_opt.h" -namespace maplebe{ +namespace maplebe { class X64LocalOpt : public LocalOpt { public: - X64LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition& rd) - : LocalOpt(memPool, func, rd){} + X64LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition &rd) + : LocalOpt(memPool, func, rd) {} ~X64LocalOpt() = default; private: void DoLocalCopyProp() override; diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_md.def b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def index 04c55e4618780ad5795aa0aef37ae397db5bbfc8..a3f3183c75e695cf2c4ad65fb24939d2feee79b8 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_md.def +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def @@ -55,6 +55,7 @@ DEFINE_MOP(MOP_movq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE,kLtAlu," /* floating point mov */ DEFINE_MOP(MOP_movd_fr_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg32ID},ISSTORE,kLtAlu,"movd","0,1",1) +DEFINE_MOP(MOP_movd_r_fr, {&OpndDesc::Reg32IS,&OpndDesc::Reg128FD},ISSTORE,kLtAlu,"movd","0,1",1) DEFINE_MOP(MOP_movq_fr_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg64ID},ISSTORE,kLtAlu,"movq","0,1",1) DEFINE_MOP(MOP_movq_r_fr, {&OpndDesc::Reg64IS,&OpndDesc::Reg128FD},ISSTORE,kLtAlu,"movq","0,1",1) DEFINE_MOP(MOP_movfs_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg128FD},ISLOAD,kLtAlu,"movss","0,1",1) @@ -70,7 +71,7 @@ DEFINE_MOP(MOP_movabs_l_r, {&OpndDesc::Lbl64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu," /* push & pop & lea */ DEFINE_MOP(MOP_pushq_r, {&OpndDesc::Reg64IS},0,kLtAlu,"pushq","0",1) -DEFINE_MOP(MOP_popq_r, {&OpndDesc::Reg32IS},0,kLtAlu,"popq","0",1) +DEFINE_MOP(MOP_popq_r, {&OpndDesc::Reg64IS},0,kLtAlu,"popq","0",1) DEFINE_MOP(MOP_leaq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) DEFINE_MOP(MOP_leal_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h index a46a8ba5622246011e3e346755e6f78012a345b3..30a127ea23e001db0f8de8c5e3a42e3582bf22c6 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h @@ -108,8 +108,6 @@ class X64MemLayout : public MemLayout { */ virtual void AssignSpillLocationsToPseudoRegisters() override; - virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; - uint32 GetSizeOfSpillReg() const { return segSpillReg.GetSize(); } @@ -149,7 +147,10 @@ class X64MemLayout : public MemLayout { MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); - MemSegment segSpillReg = MemSegment(kMsSpillReg); + + SymbolAlloc *CreateSymbolAlloc() const override { + return memAllocator->GetMemPool()->New(); + } }; } #endif // MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h b/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h index 055658b0a5bc152b0033b6b494b2abe427626655..ae2b89a64cb656c2e3294b220f1649930ff5b943 100644 --- a/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h @@ -40,6 +40,7 @@ class X64InsnVisitor : public InsnVisitor { void ReTargetSuccBB(BB &bb, LabelIdx newTarget) const override; void FlipIfBB(BB &bb, LabelIdx ftLabel) const override; BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const override; + void ModifyFathruBBToGotoBB(BB &bb, LabelIdx labelIdx) const override; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/ad/mad.cpp b/src/mapleall/maple_be/src/ad/mad.cpp index 367c25f67831a461f309867c63d49c921d38d7aa..8d4e74ba449b37dd77be4826dda1766accc85e9d 100644 --- a/src/mapleall/maple_be/src/ad/mad.cpp +++ b/src/mapleall/maple_be/src/ad/mad.cpp @@ -16,7 +16,7 @@ #include #if TARGAARCH64 #include "aarch64_operand.h" -#elif TARGRISCV64 +#elif defined(TARGRISCV64) && TARGRISCV64 #include "riscv64_operand.h" #endif #include "schedule.h" @@ -278,6 +278,9 @@ bool AluShiftBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { return &(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnSecondOpnd)); } +/* + * AccumulatorBypass: from multiplier to summator + */ bool AccumulatorBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { /* * hook condition diff --git a/src/mapleall/maple_be/src/be/bbt.cpp b/src/mapleall/maple_be/src/be/bbt.cpp index bc00a8b515ecc5d0059a4de275bb7ce1eefdb897..664554ac7881f12e7388fb49648f780e1c322906 100644 --- a/src/mapleall/maple_be/src/be/bbt.cpp +++ b/src/mapleall/maple_be/src/be/bbt.cpp @@ -14,7 +14,7 @@ */ #include "bbt.h" namespace maplebe { -#if DEBUG +#if defined(DEBUG) && DEBUG void BBT::Dump(const MIRModule &mod) const { if (IsTry()) { LogInfo::MapleLogger() << "Try" << '\n'; diff --git a/src/mapleall/maple_be/src/be/becommon.cpp b/src/mapleall/maple_be/src/be/becommon.cpp index 58e1e8a0614164aa1eb7e6eb6d6e36f2bb4e75bf..748d1c7712290ae36d20406ef1e73c913fd0dc3e 100644 --- a/src/mapleall/maple_be/src/be/becommon.cpp +++ b/src/mapleall/maple_be/src/be/becommon.cpp @@ -197,7 +197,11 @@ void BECommon::ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { } else { /* for unions, bitfields are treated as non-bitfields */ allocedSize = std::max(allocedSize, static_cast(fieldTypeSize)); } - SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + if (structType.HasZeroWidthBitField()) { + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), originAlign)); + } else { + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + } /* C99 * Last struct element of a struct with more than one member * is a flexible array if it is an array of size 0. @@ -478,10 +482,10 @@ void BECommon::GenFieldOffsetMap(const std::string &className) { fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize(); } - std::pair p = GetFieldOffset(*classType, i); - CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + OffsetPair p = GetJClassFieldOffset(*classType, i); + CHECK_FATAL(p.bitOffset == 0, "expect p.second equals 0"); LogInfo::MapleLogger() << "CLASS_FIELD_OFFSET_MAP(" << className.c_str() << "," << fieldName.c_str() << "," - << p.first << "," << fieldSize << ")\n"; + << p.bitOffset << "," << fieldSize << ")\n"; } } @@ -521,10 +525,10 @@ void BECommon::GenFieldOffsetMap(MIRClassType &classType, FILE &outFile) { fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize(); } - std::pair p = GetFieldOffset(classType, i); - CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + OffsetPair p = GetJClassFieldOffset(classType, i); + CHECK_FATAL(p.bitOffset == 0, "expect p.second equals 0"); (void)fprintf(&outFile, "__MRT_CLASS_FIELD(%s, %s, %d, %lu)\n", className.c_str(), fieldName.c_str(), - p.first, fieldSize); + p.bitOffset, fieldSize); } } @@ -547,135 +551,16 @@ void BECommon::GenObjSize(const MIRClassType &classType, FILE &outFile) const { fprintf(&outFile, "__MRT_CLASS(%s, %" PRIu64 ", %s)\n", className.c_str(), objSize, parentName); } -/* - * compute the offset of the field given by fieldID within the structure type - * structy; it returns the answer in the pair (byteoffset, bitoffset) such that - * if it is a bitfield, byteoffset gives the offset of the container for - * extracting the bitfield and bitoffset is with respect to the container - */ -std::pair BECommon::GetFieldOffset(MIRStructType &structType, FieldID fieldID) { - CHECK_FATAL(fieldID <= GetStructFieldCount(structType.GetTypeIndex()), "GetFieldOFfset: fieldID too large"); - uint64 allocedSize = 0; - uint64 allocedSizeInBits = 0; - FieldID curFieldID = 1; +// compute the offset of the field given by fieldID within the java class +OffsetPair BECommon::GetJClassFieldOffset(MIRStructType &classType, FieldID fieldID) const { + CHECK_FATAL(fieldID <= GetStructFieldCount(classType.GetTypeIndex()), "GetFieldOFfset: fieldID too large"); if (fieldID == 0) { - return std::pair(0, 0); - } - - if (structType.GetKind() == kTypeClass) { - CHECK_FATAL(HasJClassLayout(static_cast(structType)), "Cannot found java class layout information"); - const JClassLayout &layout = GetJClassLayout(static_cast(structType)); - CHECK_FATAL(static_cast(fieldID) - 1 < layout.size(), "subscript out of range"); - return std::pair(static_cast(layout[fieldID - 1].GetOffset()), 0); - } - - /* process the struct fields */ - FieldVector fields = structType.GetFields(); - auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); - for (uint32 j = 0; j < fields.size(); ++j) { - TyIdx fieldTyIdx = fields[j].second.first; - auto fieldAttr = fields[j].second.second; - MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); - uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); - uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; - auto originAlign = std::max(GetTypeAlign(fieldTyIdx), static_cast(fieldAttr.GetAlign())); - uint64 fieldAlign = static_cast( - static_cast(fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack))); - uint64 fieldAlignBits = static_cast(static_cast(fieldAlign * kBitsPerByte)); - CHECK_FATAL(fieldAlign != 0, "fieldAlign should not equal 0"); - if (structType.GetKind() != kTypeUnion) { - if (fieldType->GetKind() == kTypeBitField) { - uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); - /* - * Is this field is crossing the align boundary of its base type? Or, - * is field a zero-with bit field? - * Refer to C99 standard (§6.7.2.1) : - * > As a special case, a bit-field structure member with a width of 0 indicates that no further - * > bit-field is to be packed into the unit in which the previous bit-field, if any, was placed. - * - * We know that A zero-width bit field can cause the next field to be aligned on the next container - * boundary where the container is the same size as the underlying type of the bit field. - */ - if ((!structType.GetTypeAttrs().IsPacked() && - ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || - fieldSize == 0) { - /* - * the field is crossing the align boundary of its base type; - * align alloced_size_in_bits to fieldAlign - */ - allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); - } - /* allocate the bitfield */ - if (curFieldID == fieldID) { - return std::pair( - static_cast(static_cast((allocedSizeInBits / fieldAlignBits) * fieldAlign)), - static_cast(static_cast(allocedSizeInBits % fieldAlignBits))); - } else { - ++curFieldID; - } - allocedSizeInBits += fieldSize; - allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); - } else { - bool leftOverBits = false; - uint64 offset = 0; - - if (allocedSizeInBits == allocedSize * k8BitSize) { - allocedSize = RoundUp(allocedSize, fieldAlign); - offset = allocedSize; - } else { - /* still some leftover bits on allocated words, we calculate things based on bits then. */ - if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - k1BitSize) / fieldAlignBits) { - /* the field is crossing the align boundary of its base type */ - allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); - } - allocedSize = RoundUp(allocedSize, fieldAlign); - offset = (allocedSizeInBits / fieldAlignBits) * fieldAlign; - leftOverBits = true; - } - - if (curFieldID == fieldID) { - return std::pair(static_cast(static_cast(offset)), 0); - } else { - MIRStructType *subStructType = fieldType->EmbeddedStructType(); - if (subStructType == nullptr) { - ++curFieldID; - } else { - if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { - curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; - } else { - std::pair result = GetFieldOffset(*subStructType, fieldID - curFieldID); - return std::pair(result.first + static_cast(offset), result.second); - } - } - } - - if (leftOverBits) { - allocedSizeInBits += fieldSizeBits; - allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); - } else { - allocedSize += fieldTypeSize; - allocedSizeInBits = allocedSize * kBitsPerByte; - } - } - } else { /* for unions, bitfields are treated as non-bitfields */ - if (curFieldID == fieldID) { - return std::pair(0, 0); - } else { - MIRStructType *subStructType = fieldType->EmbeddedStructType(); - if (subStructType == nullptr) { - curFieldID++; - } else { - if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { - curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; - } else { - return GetFieldOffset(*subStructType, fieldID - curFieldID); - } - } - } - } + return {0, 0}; } - CHECK_FATAL(false, "GetFieldOffset() fails to find field"); - return std::pair(0, 0); + CHECK_FATAL(HasJClassLayout(static_cast(classType)), "Cannot found java class layout information"); + const JClassLayout &layout = GetJClassLayout(static_cast(classType)); + CHECK_FATAL(static_cast(fieldID) - 1 < layout.size(), "subscript out of range"); + return {static_cast(layout[fieldID - 1].GetOffset()), 0}; } bool BECommon::TyIsInSizeAlignTable(const MIRType &ty) const { @@ -749,15 +634,15 @@ BaseNode *BECommon::GetAddressOfNode(const BaseNode &node) { uint32 index = static_cast(GlobalTables::GetTypeTable().GetTypeTable().at( iNode.GetTyIdx()))->GetPointedTyIdx(); MIRType *pointedType = GlobalTables::GetTypeTable().GetTypeTable().at(index); - std::pair byteBitOffset = - GetFieldOffset(static_cast(*pointedType), iNode.GetFieldID()); + OffsetPair byteBitOffset = + GetJClassFieldOffset(static_cast(*pointedType), iNode.GetFieldID()); #if TARGAARCH64 || TARGRISCV64 ASSERT(GetAddressPrimType() == GetLoweredPtrType(), "incorrect address type, expect a GetLoweredPtrType()"); #endif return mirModule.GetMIRBuilder()->CreateExprBinary( OP_add, *GlobalTables::GetTypeTable().GetPrimType(GetAddressPrimType()), static_cast(iNode.Opnd(0)), - mirModule.GetMIRBuilder()->CreateIntConst(static_cast(static_cast(byteBitOffset.first)), + mirModule.GetMIRBuilder()->CreateIntConst(static_cast(static_cast(byteBitOffset.byteOffset)), PTY_u32)); } default: @@ -772,7 +657,7 @@ bool BECommon::CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const { /* For now, all 64x1_t types object are not propagated to become pregs by mplme, so the following is not needed for now. We need to revisit this later when types are enhanced with attributes */ -#if TO_BE_RESURRECTED +#if defined(TO_BE_RESURRECTED) && TO_BE_RESURRECTED bool attrFunc = false; if (narynode->GetOpCode() == OP_call) { CallNode *callNode = static_cast(narynode); diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index c005828f4e168a8448f9746b3342282a0574d878..5b9c3a3309a4314e757a4effb62c972dac0e7490 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -49,31 +49,7 @@ using namespace maple; enum ExtFuncT : uint8 { kFmodDouble, - kFmodFloat, - kfloatsitf, - kfloatunsitf, - kfloatditf, - kfloatunditf, - kextendsftf2, - lextenddftf2, - kfixtfsi, - kfixunstfsi, - kfixtfdi, - kfixunstfdi, - ktrunctfsf2, - ktrunctfdf2, - kaddtf3, - ksubtf3, - kmultf3, - kdivtf3, - knegtf3, - kcmptf2, - kletf2, - kgetf2, - klttf2, - kgttf2, - knetf2, - keqtf2 + kFmodFloat }; struct ExtFuncDescrT { @@ -101,34 +77,7 @@ std::pair cgBuiltins[] = { ExtFuncDescrT extFnDescrs[] = { { kFmodDouble, "fmod", PTY_f64, { PTY_f64, PTY_f64, kPtyInvalid } }, - { kFmodFloat, "fmodf", PTY_f32, { PTY_f32, PTY_f32, kPtyInvalid } }, - - { kfloatsitf, "__floatsitf", PTY_f128, { PTY_i32, kPtyInvalid } }, - { kfloatunsitf, "__floatunsitf", PTY_f128, { PTY_u32, kPtyInvalid } }, - { kfloatditf, "__floatditf", PTY_f128, { PTY_i64, kPtyInvalid } }, - { kfloatunditf, "__floatunditf", PTY_f128, { PTY_i64, kPtyInvalid } }, - { kextendsftf2, "__extendsftf2", PTY_f128, { PTY_f32, kPtyInvalid } }, - { lextenddftf2, "__extenddftf2", PTY_f128, { PTY_f64, kPtyInvalid } }, - - { kfixtfsi, "__fixtfsi", PTY_i32, { PTY_f128, kPtyInvalid } }, - { kfixunstfsi, "__fixunstfsi", PTY_u32, { PTY_f128, kPtyInvalid } }, - { kfixtfdi, "__fixtfdi", PTY_i64, { PTY_f128, kPtyInvalid } }, - { kfixunstfdi, "__fixunstfdi", PTY_u64, { PTY_f128, kPtyInvalid } }, - { ktrunctfsf2, "__trunctfsf2", PTY_f32, { PTY_f128, kPtyInvalid } }, - { ktrunctfdf2, "__trunctfdf2", PTY_f64, { PTY_f128, kPtyInvalid } }, - - { kaddtf3, "__addtf3", PTY_f128, { PTY_f128, kPtyInvalid } }, - { ksubtf3, "__subtf3", PTY_f128, { PTY_f128, kPtyInvalid } }, - { kmultf3, "__multf3", PTY_f128, { PTY_f128, kPtyInvalid } }, - { kdivtf3, "__divtf3", PTY_f128, { PTY_f128, kPtyInvalid } }, - { knegtf3, "__negtf2", PTY_f128, { PTY_f128, kPtyInvalid } }, - { kcmptf2, "__cmptf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { kletf2, "__letf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { kgetf2, "__getf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { klttf2, "__lttf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { kgttf2, "__gttf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { knetf2, "__netf2", PTY_i32, { PTY_f128, kPtyInvalid } }, - { keqtf2, "__eqtf2", PTY_i32, { PTY_f128, kPtyInvalid } }, + { kFmodFloat, "fmodf", PTY_f32, { PTY_f32, PTY_f32, kPtyInvalid } } }; std::vector> extFuncs; @@ -244,7 +193,9 @@ BaseNode *CGLowerer::LowerIaddrof(const IreadNode &iaddrof) { MIRStructType *structTy = static_cast( GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx())); CHECK_FATAL(structTy != nullptr, "LowerIaddrof: non-zero fieldID for non-structure"); - int32 offset = beCommon.GetFieldOffset(*structTy, iaddrof.GetFieldID()).first; + int32 offset = structTy->GetKind() == kTypeClass ? + beCommon.GetJClassFieldOffset(*structTy, iaddrof.GetFieldID()).byteOffset : + structTy->GetFieldOffsetFromBaseAddr(iaddrof.GetFieldID()).byteOffset; if (offset == 0) { return iaddrof.Opnd(0); } @@ -272,8 +223,8 @@ BaseNode *CGLowerer::SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode) BaseNode *opnd1 = bNode.Opnd(1); MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd1->GetPrimType())); - MIRSymbol *dnodeSt = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); - DassignNode *dnode = mirbuilder->CreateStmtDassign(const_cast(*dnodeSt), 0, opnd1); + MIRSymbol *dnodeSt = mirbuilder->GetOrCreateLocalDecl(name, *ty); + DassignNode *dnode = mirbuilder->CreateStmtDassign(*dnodeSt, 0, opnd1); blkNode.InsertAfter(blkNode.GetLast(), dnode); BaseNode *dreadNode = mirbuilder->CreateExprDread(*dnodeSt); @@ -294,8 +245,8 @@ BaseNode *CGLowerer::SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent name.append(std::to_string(val++)); MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(tNode.GetPrimType())); - MIRSymbol *dassignNodeSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); - DassignNode *dassignNode = mirbuilder->CreateStmtDassign(const_cast(*dassignNodeSym), 0, &tNode); + MIRSymbol *dassignNodeSym = mirbuilder->GetOrCreateLocalDecl(name, *ty); + DassignNode *dassignNode = mirbuilder->CreateStmtDassign(*dassignNodeSym, 0, &tNode); blkNode.InsertAfter(blkNode.GetLast(), dassignNode); BaseNode *dreadNode = mirbuilder->CreateExprDread(*dassignNodeSym); @@ -401,7 +352,7 @@ BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &pare static uint32 val = 0; std::string name("ComplexSelectTmp"); name.append(std::to_string(val++)); - cplxSelRes.resSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *resultTy); + cplxSelRes.resSym = mirbuilder->GetOrCreateLocalDecl(name, *resultTy); DassignNode *dassignTrue = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(1)); // Fallthru: update the frequence 1 func->SetFirstFreqMap(dassignTrue->GetStmtID(), fallthruStmtFreq); @@ -636,7 +587,7 @@ BaseNode *CGLowerer::LowerArray(ArrayNode &array, const BaseNode &parent) { int32 dim = arrayType->GetDim(); BaseNode *resNode = LowerArrayDim(array, dim); BaseNode *rMul = nullptr; - size_t eSize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx())->GetSize(); Opcode opAdd = OP_add; MIRType &arrayTypes = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType())); if (resNode->GetOpCode() == OP_constval) { @@ -698,8 +649,8 @@ BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { nestedArray = true; do { innerArrayType = static_cast(innerType); - elemSize = RoundUp(beCommon.GetTypeSize(innerArrayType->GetElemTyIdx().GetIdx()), - beCommon.GetTypeAlign(arrayType->GetElemTyIdx().GetIdx())); + elemSize = RoundUp(GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx())->GetSize(), + GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx())->GetAlign()); dim++; innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); } while (innerType->GetKind() == kTypeArray); @@ -708,7 +659,18 @@ BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { int32 numIndex = static_cast(array.NumOpnds()) - 1; MIRArrayType *curArrayType = arrayType; - BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + // OP_CG_array_elem_add process in handlefunc is not suitable for CModule + // so here we add a forced type conversion to make it avoid OP_CG_array_elem_add + BaseNode *resNode = nullptr; + if (mirModule.IsCModule()) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt); + static_cast(resNode)->SetFromType(array.GetIndex(0)->GetPrimType()); + static_cast(resNode)->SetPrimType(array.GetPrimType()); + static_cast(resNode)->SetOpnd(array.GetIndex(0), 0); + } else { + resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + } + if (dim > 1) { BaseNode *prevNode = nullptr; for (int i = 0; (i < dim) && (i < numIndex); i++) { @@ -790,7 +752,7 @@ BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { if (nestedArray) { esize = elemSize; } else { - esize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + esize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx())->GetSize(); } Opcode opadd = OP_add; if (resNode->op == OP_constval) { @@ -822,19 +784,19 @@ BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { return rAdd; } -StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, +StmtNode *CGLowerer::WriteBitField(const OffsetPair &byteBitOffsets, const MIRBitFieldType *fieldType, BaseNode *baseAddr, BaseNode *rhs, BlockNode *block) { auto bitSize = fieldType->GetFieldSize(); auto primType = fieldType->GetPrimType(); - auto byteOffset = byteBitOffsets.first; - auto bitOffset = byteBitOffsets.second; + auto byteOffset = byteBitOffsets.byteOffset; + auto bitOffset = byteBitOffsets.bitOffset; auto *builder = mirModule.GetMIRBuilder(); auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeSize = GetPrimTypeSize(primType); auto primTypeBitSize = GetPrimTypeBitSize(primType); if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { if (CGOptions::IsBigEndian()) { - bitOffset = static_cast(static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * - kBitsPerByte) - bitOffset) - bitSize; + bitOffset = (static_cast(primTypeBitSize) - bitOffset) - bitSize; } auto depositBits = builder->CreateExprDepositbits(OP_depositbits, primType, static_cast(bitOffset), bitSize, bitField, rhs); @@ -857,11 +819,11 @@ StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets auto *extractedHigherBits = builder->CreateExprExtractbits(OP_extractbits, primType, bitsExtracted, bitsRemained, rhs); auto *bitFieldRemained = builder->CreateExprIreadoff(primType, - byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + byteOffset + static_cast(primTypeSize), baseAddr); auto *depositedHigherBits = builder->CreateExprDepositbits(OP_depositbits, primType, 0, bitsRemained, bitFieldRemained, extractedHigherBits); auto *assignedHigherBits = builder->CreateStmtIassignoff(primType, - byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr, depositedHigherBits); + byteOffset + static_cast(primTypeSize), baseAddr, depositedHigherBits); if (funcProfData) { funcProfData->CopyStmtFreq(assignedLowerBits->GetStmtID(), block->GetStmtID()); funcProfData->CopyStmtFreq(assignedHigherBits->GetStmtID(), block->GetStmtID()); @@ -869,19 +831,19 @@ StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets return assignedHigherBits; } -BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, +BaseNode *CGLowerer::ReadBitField(const OffsetPair &byteBitOffsets, const MIRBitFieldType &fieldType, BaseNode *baseAddr) { - auto bitSize = fieldType->GetFieldSize(); - auto primType = fieldType->GetPrimType(); - auto byteOffset = byteBitOffsets.first; - auto bitOffset = byteBitOffsets.second; + auto bitSize = fieldType.GetFieldSize(); + auto primType = fieldType.GetPrimType(); + auto byteOffset = byteBitOffsets.byteOffset; + auto bitOffset = byteBitOffsets.bitOffset; auto *builder = mirModule.GetMIRBuilder(); auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeSize = GetPrimTypeSize(primType); auto primTypeBitSize = GetPrimTypeBitSize(primType); if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { if (CGOptions::IsBigEndian()) { - bitOffset = static_cast(static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * - kBitsPerByte) - bitOffset) - bitSize; + bitOffset = (static_cast(primTypeBitSize) - bitOffset) - bitSize; } return builder->CreateExprExtractbits(OP_extractbits, primType, static_cast(bitOffset), bitSize, bitField); } @@ -893,7 +855,7 @@ BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, auto *extractedLowerBits = builder->CreateExprExtractbits(OP_extractbits, primType, static_cast(bitOffset), bitSize - bitsRemained, bitField); auto *bitFieldRemained = builder->CreateExprIreadoff(primType, - byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + byteOffset + static_cast(primTypeSize), baseAddr); auto *result = builder->CreateExprDepositbits(OP_depositbits, primType, bitSize - bitsRemained, bitsRemained, extractedLowerBits, bitFieldRemained); return result; @@ -909,8 +871,8 @@ BaseNode *CGLowerer::LowerDreadBitfield(DreadNode &dread) { } auto *builder = mirModule.GetMIRBuilder(); auto *baseAddr = builder->CreateExprAddrof(0, dread.GetStIdx()); - auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dread.GetFieldID()); - return ReadBitField(byteBitOffsets, static_cast(fType), baseAddr); + auto byteBitOffsets = structTy->GetFieldOffsetFromBaseAddr(dread.GetFieldID()); + return ReadBitField(byteBitOffsets, *static_cast(fType), baseAddr); } BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { @@ -930,8 +892,8 @@ BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { if (fType->GetKind() != kTypeBitField) { return &iread; } - auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iread.GetFieldID()); - return ReadBitField(byteBitOffsets, static_cast(fType), iread.Opnd(0)); + auto byteBitOffsets = structTy->GetFieldOffsetFromBaseAddr(iread.GetFieldID()); + return ReadBitField(byteBitOffsets, *static_cast(fType), iread.Opnd(0)); } // input node must be cvt, retype, zext or sext @@ -984,10 +946,6 @@ BlockNode *CGLowerer::LowerReturnStructUsingFakeParm(NaryStmtNode &retNode) { funcProfData->CopyStmtFreq(blk->GetStmtID(), retNode.GetStmtID()); } ASSERT(opnd0 != nullptr, "opnd0 should not be nullptr"); - if ((beCommon.GetTypeSize(retTy->GetPointedTyIdx().GetIdx()) <= k16ByteSize) && (opnd0->GetPrimType() == PTY_agg)) { - /* struct goes into register. */ - curFunc->SetStructReturnedInRegs(); - } iassign->SetFieldID(0); iassign->SetRHS(opnd0); if (retSt->IsPreg()) { @@ -1044,7 +1002,7 @@ StmtNode *CGLowerer::LowerDassignBitfield(DassignNode &dassign, BlockNode &newBl } auto *builder = mirModule.GetMIRBuilder(); auto *baseAddr = builder->CreateExprAddrof(0, dassign.GetStIdx()); - auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dassign.GetFieldID()); + auto byteBitOffsets = structTy->GetFieldOffsetFromBaseAddr(dassign.GetFieldID()); return WriteBitField(byteBitOffsets, static_cast(fType), baseAddr, dassign.GetRHS(), &newBlk); } @@ -1076,7 +1034,7 @@ StmtNode *CGLowerer::LowerIassignBitfield(IassignNode &iassign, BlockNode &newBl if (fType->GetKind() != kTypeBitField) { return &iassign; } - auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iassign.GetFieldID()); + auto byteBitOffsets = structTy->GetFieldOffsetFromBaseAddr(iassign.GetFieldID()); auto *bitFieldType = static_cast(fType); return WriteBitField(byteBitOffsets, bitFieldType, iassign.Opnd(0), iassign.GetRHS(), &newBlk); } @@ -1333,7 +1291,7 @@ BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2 bool sizeIs0 = false; if (sym != nullptr) { retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); - if (beCommon.GetTypeSize(retType->GetTypeIndex().GetIdx()) == 0) { + if (retType->GetSize() == 0) { sizeIs0 = true; } } @@ -1388,7 +1346,7 @@ BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2 blk->AddStatement(cmnt); } CHECK_FATAL(dStmt == nullptr || dStmt->GetNext() == nullptr, "make sure dStmt or dStmt's next is nullptr"); - LowerCallStmt(newCall, dStmt, *blk, retType, uselvar ? true : false, opcode == OP_intrinsiccallassigned); + LowerCallStmt(newCall, dStmt, *blk, retType, uselvar, opcode == OP_intrinsiccallassigned); if (!uselvar && dStmt != nullptr) { dStmt->SetSrcPos(newCall.GetSrcPos()); blk->AddStatement(dStmt); @@ -1400,7 +1358,7 @@ BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2 // try to expand memset and memcpy BlockNode *CGLowerer::LowerMemop(StmtNode &stmt) { auto memOpKind = SimplifyOp::ComputeOpKind(stmt); - if (memOpKind == MEM_OP_unknown) { + if (memOpKind == kMemOpUnknown) { return nullptr; } auto *prev = stmt.GetPrev(); @@ -1441,7 +1399,8 @@ BlockNode *CGLowerer::LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode auto firstArgTypeIdx = intrinsicCall.GetTyIdx(); CHECK_FATAL(firstArgTypeIdx != 0, "firstArgTypeIdx should not be 0"); PrimType primType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(firstArgTypeIdx)->GetPrimType(); - auto *intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, primType, firstArgTypeIdx, opndVector); + auto *intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, primType, + firstArgTypeIdx, opndVector); auto &returnVector = intrinsicCall.GetReturnVec(); StmtNode *newStmt = nullptr; if (returnVector.size() == 0) { @@ -1490,11 +1449,12 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { } case OP_intrinsiccallassigned: case OP_xintrinsiccallassigned: { + BlockNode *blockNode = LowerIntrinsiccallToIntrinsicop(stmt); + if (blockNode) { + return blockNode; + } IntrinsiccallNode &intrinCall = static_cast(stmt); auto intrinsicID = intrinCall.GetIntrinsic(); - if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { - return LowerIntrinsiccallAassignedToAssignStmt(intrinCall); - } if (intrinsicID == INTRN_JAVA_POLYMORPHIC_CALL) { BaseNode *contextClassArg = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); constexpr int kContextIdx = 4; /* stable index in MCC_DexPolymorphicCall, never out of range */ @@ -1511,10 +1471,9 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { break; } case OP_intrinsiccallwithtypeassigned: { - IntrinsiccallNode &intrinCall = static_cast(stmt); - auto intrinsicID = intrinCall.GetIntrinsic(); - if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { - return LowerIntrinsiccallAassignedToAssignStmt(intrinCall); + BlockNode *blockNode = LowerIntrinsiccallToIntrinsicop(stmt); + if (blockNode) { + return blockNode; } auto &origCall = static_cast(stmt); newCall = GenIntrinsiccallNode(stmt, funcCalled, handledAtLowerLevel, origCall); @@ -1535,6 +1494,10 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { return nullptr; } + if (stmt.GetMayTailCall()) { + newCall->SetMayTailcall(); + } + /* transfer srcPosition location info */ newCall->SetSrcPos(stmt.GetSrcPos()); if (funcProfData) { @@ -1544,44 +1507,18 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { return GenBlockNode(*newCall, *p2nRets, stmt.GetOpCode(), funcCalled, handledAtLowerLevel, uselvar); } -#if TARGAARCH64 -static PrimType IsStructElementSame(MIRType *ty) { - if (ty->GetKind() == kTypeArray) { - MIRArrayType *arrtype = static_cast(ty); - MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); - if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { - return IsStructElementSame(pty); - } - return pty->GetPrimType(); - } else if (ty->GetKind() == kTypeStruct) { - MIRStructType *sttype = static_cast(ty); - FieldVector fields = sttype->GetFields(); - PrimType oldtype = PTY_void; - for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { - TyIdx fieldtyidx = fields[fcnt].second.first; - MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); - PrimType ptype = IsStructElementSame(fieldty); - if (oldtype != PTY_void && oldtype != ptype) { - return PTY_void; - } else { - oldtype = ptype; - } - } - return oldtype; - } else { - return ty->GetPrimType(); +BlockNode *CGLowerer::LowerIntrinsiccallToIntrinsicop(StmtNode &stmt) { + IntrinsiccallNode &intrinCall = static_cast(stmt); + auto intrinsicID = intrinCall.GetIntrinsic(); + if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { + return LowerIntrinsiccallAassignedToAssignStmt(intrinCall); } + return nullptr; } -#endif -// return true if successfully lowered; nextStmt is in/out, and is made to point -// to its following statement if lowering of the struct return is successful -bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, - StmtNode *&nextStmt, bool &lvar, BlockNode *oldBlk) { - if (!nextStmt) { - return false; - } - CallReturnVector *p2nrets = stmt->GetCallReturnVector(); +// return true if successfully lowered +bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode &stmt, bool &lvar) { + CallReturnVector *p2nrets = stmt.GetCallReturnVector(); if (p2nrets->size() == 0) { return false; } @@ -1593,268 +1530,212 @@ bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, if (retSym->GetType()->GetPrimType() != PTY_agg) { return false; } - if (nextStmt->op != OP_dassign) { - // introduce a temporary and insert a dassign whose rhs is this temporary - // and whose lhs is retSym - MIRSymbol *temp = CreateNewRetVar(*retSym->GetType(), kUserRetValPrefix); - BaseNode *rhs = mirModule.GetMIRBuilder()->CreateExprDread(*temp->GetType(), 0, *temp); - DassignNode *dass = mirModule.GetMIRBuilder()->CreateStmtDassign( - retPair.first, retPair.second.GetFieldID(), rhs); - oldBlk->InsertBefore(nextStmt, dass); - if (funcProfData) { - funcProfData->CopyStmtFreq(dass->GetStmtID(), stmt->GetStmtID()); - } - nextStmt = dass; - // update CallReturnVector to the new temporary - (*p2nrets)[0].first = temp->GetStIdx(); - (*p2nrets)[0].second.SetFieldID(0); - } - // now, it is certain that nextStmt is a dassign - BaseNode *bnode = static_cast(nextStmt)->GetRHS(); - if (bnode->GetOpCode() != OP_dread) { + if (IsReturnInMemory(*retSym->GetType())) { + lvar = true; + } else if (!LowerStructReturnInRegs(newBlk, stmt, *retSym)) { return false; } - DreadNode *dnode = static_cast(bnode); - MIRType *dtype = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnode->GetStIdx())->GetType(); -#if TARGAARCH64 - PrimType ty = IsStructElementSame(dtype); - if (ty == PTY_f32 || ty == PTY_f64 || IsPrimitiveVector(ty)) { + return true; +} + +bool CGLowerer::LowerStructReturnInRegs(BlockNode &newBlk, StmtNode &stmt, + const MIRSymbol &retSym) { + // lower callassigned -> call + if (stmt.GetOpCode() == OP_callassigned) { + auto &callNode = static_cast(stmt); + for (size_t i = 0; i < callNode.GetNopndSize(); ++i) { + auto *newOpnd = LowerExpr(callNode, *callNode.GetNopndAt(i), newBlk); + callNode.SetOpnd(newOpnd, i); + } + auto *callStmt = mirModule.GetMIRBuilder()->CreateStmtCall(callNode.GetPUIdx(), + callNode.GetNopnd()); + callStmt->SetSrcPos(callNode.GetSrcPos()); + newBlk.AddStatement(callStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(callStmt->GetStmtID(), stmt.GetStmtID()); + } + } else if (stmt.GetOpCode() == OP_icallassigned || stmt.GetOpCode() == OP_icallprotoassigned) { + auto &icallNode = static_cast(stmt); + for (size_t i = 0; i < icallNode.GetNopndSize(); ++i) { + auto *newOpnd = LowerExpr(icallNode, *icallNode.GetNopndAt(i), newBlk); + icallNode.SetOpnd(newOpnd, i); + } + IcallNode *icallStmt = nullptr; + if (stmt.GetOpCode() == OP_icallassigned) { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode.GetNopnd()); + } else { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcallproto(icallNode.GetNopnd(), + icallNode.GetRetTyIdx()); + } + icallStmt->SetSrcPos(icallNode.GetSrcPos()); + newBlk.AddStatement(icallStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(icallStmt->GetStmtID(), stmt.GetStmtID()); + } + } else { return false; } -#endif - if (dnode->GetPrimType() != PTY_agg) { - return false; + +#if TARGAARCH64 + PrimType primType = PTY_begin; + size_t elemNum = 0; + if (IsHomogeneousAggregates(*retSym.GetType(), primType, elemNum)) { + LowerStructReturnInFpRegs(newBlk, stmt, retSym, primType, elemNum); + } else { + LowerStructReturnInGpRegs(newBlk, stmt, retSym); } - CallReturnPair pair = (*p2nrets)[0]; - if (pair.first != dnode->GetStIdx() || pair.second.GetFieldID() != dnode->GetFieldID()) { - return false; +#else + LowerStructReturnInGpRegs(newBlk, stmt, retSym); +#endif // TARGAARCH64 + return true; +} + +// struct passed in gpregs, lowered into +// call &foo +// regassign u64 %1 (regread u64 %%retval0) +// regassign ptr %2 (addrof ptr $s) +// iassign <* u64> 0 (regread ptr %2, regread u64 %1) +void CGLowerer::LowerStructReturnInGpRegs(BlockNode &newBlk, const StmtNode &stmt, + const MIRSymbol &symbol) { + auto size = static_cast(symbol.GetType()->GetSize()); + + if (size == 0) { + return; + } else if (size <= k8ByteSize) { + // size <= 8-Byte, lowerd into + // call &foo + // dassign agg $s (regread agg %%retval0) + auto *regread = mirBuilder->CreateExprRegread(symbol.GetType()->GetPrimType(), -kSregRetval0); + auto *dStmt = mirBuilder->CreateStmtDassign(symbol.GetStIdx(), 0, regread); + newBlk.AddStatement(dStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(dStmt->GetStmtID(), stmt.GetStmtID()); + } + return; } - auto *dnodeStmt = static_cast(nextStmt); - if (dnodeStmt->GetFieldID() != 0) { - return false; + + // save retval0, retval1 + PregIdx pIdx1R = 0, pIdx2R = 0; + auto genRetvalSave = [this, &newBlk, &stmt](PregIdx &pIdx, SpecialReg sreg) { + auto *regreadNode = mirBuilder->CreateExprRegread(PTY_u64, -sreg); + pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + auto *aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx, regreadNode); + newBlk.AddStatement(aStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt.GetStmtID()); + } + }; + genRetvalSave(pIdx1R, kSregRetval0); + genRetvalSave(pIdx2R, kSregRetval1); + + // save &s + BaseNode *regAddr = mirBuilder->CreateExprAddrof(0, symbol); + LowerTypePtr(*regAddr); + PregIdx pIdxL = GetCurrentFunc()->GetPregTab()->CreatePreg(GetLoweredPtrType()); + auto *aStmt = mirBuilder->CreateStmtRegassign(PTY_a64, pIdxL, regAddr); + newBlk.AddStatement(aStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt.GetStmtID()); } - if (dtype->GetSize() > k16ByteSize) { - (*p2nrets)[0].first = dnodeStmt->GetStIdx(); - (*p2nrets)[0].second.SetFieldID(dnodeStmt->GetFieldID()); - lvar = true; - // set ATTR_firstarg_return for callee - if (stmt->GetOpCode() == OP_callassigned) { - CallNode *callNode = static_cast(stmt); - MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); - f->SetFirstArgReturn(); - f->GetMIRFuncType()->SetFirstArgReturn(); - } else { - // for icall, front-end already set ATTR_firstarg_return + + // str retval to &s + for (uint32 curSize = 0; curSize < size;) { + // calc addr + BaseNode *addrNode = mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL); + if (curSize != 0) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetLoweredPtrType()); + addrNode = mirBuilder->CreateExprBinary(OP_add, *addrType, addrNode, + mirBuilder->CreateIntConst(curSize, PTY_i32)); } - } else { /* struct <= 16 passed in regs lowered into - call &foo - regassign u64 %1 (regread u64 %%retval0) - regassign ptr %2 (addrof ptr $s) - iassign <* u64> 0 (regread ptr %2, regread u64 %1) */ - MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnodeStmt->GetStIdx()); - auto *structType = static_cast(symbol->GetType()); - auto size = static_cast(structType->GetSize()); - if (stmt->GetOpCode() == OP_callassigned) { - auto *callNode = static_cast(stmt); - for (size_t i = 0; i < callNode->GetNopndSize(); ++i) { - BaseNode *newOpnd = LowerExpr(*callNode, *callNode->GetNopndAt(i), newBlk); - callNode->SetOpnd(newOpnd, i); - } - CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCall(callNode->GetPUIdx(), callNode->GetNopnd()); - callStmt->SetSrcPos(callNode->GetSrcPos()); - newBlk.AddStatement(callStmt); - if (funcProfData) { - funcProfData->CopyStmtFreq(callStmt->GetStmtID(), stmt->GetStmtID()); - } - } else if (stmt->GetOpCode() == OP_icallassigned || stmt->GetOpCode() == OP_icallprotoassigned) { - auto *icallNode = static_cast(stmt); - for (size_t i = 0; i < icallNode->GetNopndSize(); ++i) { - BaseNode *newOpnd = LowerExpr(*icallNode, *icallNode->GetNopndAt(i), newBlk); - icallNode->SetOpnd(newOpnd, i); + + PregIdx pIdxR = (curSize < k8ByteSize) ? pIdx1R : pIdx2R; + uint32 strSize = size - curSize; + // gen str retval to &s + offset + auto genStrRetval2Memory = + [this, &newBlk, &stmt, &addrNode, &curSize, &pIdxR](PrimType primType) { + uint32 shiftSize = (curSize * kBitsPerByte) % k64BitSize; + if (CGOptions::IsBigEndian()) { + shiftSize = k64BitSize - GetPrimTypeBitSize(primType) + shiftSize; } - IcallNode *icallStmt = nullptr; - if (stmt->GetOpCode() == OP_icallassigned) { - icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode->GetNopnd()); - } else { - icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcallproto(icallNode->GetNopnd(), icallNode->GetRetTyIdx()); + BaseNode *regreadExp = mirBuilder->CreateExprRegread(PTY_u64, pIdxR); + if (shiftSize != 0) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, regreadExp, + mirBuilder->CreateIntConst(shiftSize, PTY_i32)); } - icallStmt->SetSrcPos(icallNode->GetSrcPos()); - newBlk.AddStatement(icallStmt); + auto *pointedType = GlobalTables::GetTypeTable().GetPrimType(primType); + auto *iassignStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*pointedType), 0, addrNode, regreadExp); + newBlk.AddStatement(iassignStmt); if (funcProfData) { - funcProfData->CopyStmtFreq(icallStmt->GetStmtID(), stmt->GetStmtID()); + funcProfData->CopyStmtFreq(iassignStmt->GetStmtID(), stmt.GetStmtID()); } + curSize += GetPrimTypeSize(primType); + }; + if (strSize >= k8ByteSize) { + genStrRetval2Memory(PTY_u64); + } else if (strSize >= k4ByteSize) { + genStrRetval2Memory(PTY_u32); + } else if (strSize >= k2ByteSize) { + genStrRetval2Memory(PTY_u16); } else { - return false; + genStrRetval2Memory(PTY_u8); } + } +} - uint32 origSize = size; - PregIdx pIdxR, pIdx1R, pIdx2R; - StmtNode *aStmt = nullptr; - RegreadNode *reg = nullptr; - - /* save x0 */ - reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval0); - pIdx1R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); - aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx1R, reg); +// struct passed in fpregs, lowered into +// call &foo +// regassign f64 %1 (regread f64 %%retval0) +// regassign ptr %2 (addrof ptr $s) +// iassign <* f64> 0 (regread ptr %2, regread f64 %1) +void CGLowerer::LowerStructReturnInFpRegs(BlockNode &newBlk, const StmtNode &stmt, + const MIRSymbol &symbol, PrimType primType, + size_t elemNum) { + // save retvals + static constexpr std::array sregs = {kSregRetval0, kSregRetval1, kSregRetval2, kSregRetval3}; + std::vector pIdxs(sregs.size(), 0); + for (uint32 i = 0; i < elemNum; ++i) { + auto *regreadNode = mirBuilder->CreateExprRegread(primType, -sregs[i]); + pIdxs[i] = GetCurrentFunc()->GetPregTab()->CreatePreg(primType); + auto *aStmt = mirBuilder->CreateStmtRegassign(primType, pIdxs[i], regreadNode); newBlk.AddStatement(aStmt); if (funcProfData) { - funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt->GetStmtID()); + funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt.GetStmtID()); } + } - /* save x1 */ - if (origSize > k8ByteSize) { - reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval1); - pIdx2R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); - aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx2R, reg); - newBlk.AddStatement(aStmt); - if (funcProfData) { - funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt->GetStmtID()); - } + // save &s + BaseNode *regAddr = mirBuilder->CreateExprAddrof(0, symbol); + LowerTypePtr(*regAddr); + PregIdx pIdxL = GetCurrentFunc()->GetPregTab()->CreatePreg(GetLoweredPtrType()); + auto *aStmt = mirBuilder->CreateStmtRegassign(PTY_a64, pIdxL, regAddr); + newBlk.AddStatement(aStmt); + if (funcProfData) { + funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt.GetStmtID()); + } + + // str retvals to &s + for (uint32 i = 0; i < elemNum; ++i) { + uint32 offsetSize = i * GetPrimTypeSize(primType); + BaseNode *addrNode = mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL); + // addr add offset + if (offsetSize != 0) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetLoweredPtrType()); + addrNode = mirBuilder->CreateExprBinary(OP_add, *addrType, addrNode, + mirBuilder->CreateIntConst(offsetSize, PTY_i32)); } - - /* save &s */ - BaseNode *regAddr = mirBuilder->CreateExprAddrof(0, *symbol); - LowerTypePtr(*regAddr); - PregIdx pIdxL = GetCurrentFunc()->GetPregTab()->CreatePreg(GetLoweredPtrType()); - aStmt = mirBuilder->CreateStmtRegassign(PTY_a64, pIdxL, regAddr); - newBlk.AddStatement(aStmt); + // gen iassigen to addr + auto *pointedType = GlobalTables::GetTypeTable().GetPrimType(primType); + auto *iassignStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*pointedType), 0, addrNode, + mirBuilder->CreateExprRegread(PTY_u64, pIdxs[i])); + newBlk.AddStatement(iassignStmt); if (funcProfData) { - funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt->GetStmtID()); - } - - uint32 curSize = 0; - PregIdx pIdxS; - while (size > 0) { - pIdxR = pIdx1R; - if (curSize >= k8ByteSize) { - pIdxR = pIdx2R; - } - BaseNode *addr; - BaseNode *shift; - BaseNode *regreadExp; - if (origSize != size) { - MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetLoweredPtrType()); - addr = mirBuilder->CreateExprBinary(OP_add, *addrType, - mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL), - mirBuilder->CreateIntConst(origSize - size, PTY_i32)); - } else { - addr = mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL); - } - if (size >= k8ByteSize) { - aStmt = mirBuilder->CreateStmtIassign( - *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt64()), - 0, addr, mirBuilder->CreateExprRegread(PTY_u64, pIdxR)); - size -= k8ByteSize; - curSize += k8ByteSize; - } else if (size >= k4ByteSize) { - MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); - - if (CGOptions::IsBigEndian()) { - regreadExp = mirBuilder->CreateExprBinary( - OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k64BitSize - k32BitSize, PTY_i32)); - } else { - regreadExp = mirBuilder->CreateExprRegread(PTY_u32, pIdxR); - } - - aStmt = mirBuilder->CreateStmtIassign( - *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt32()), - 0, addr, regreadExp); - - if (CGOptions::IsBigEndian()) { - shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); - } else { - shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); - } - - pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); - StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); - - pIdx1R = pIdx2R = pIdxS; - newBlk.AddStatement(sStmp); - if (funcProfData) { - funcProfData->CopyStmtFreq(sStmp->GetStmtID(), stmt->GetStmtID()); - } - size -= k4ByteSize; - curSize += k4ByteSize; - } else if (size >= k2ByteSize) { - MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); - - if (CGOptions::IsBigEndian()) { - regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); - } else { - regreadExp = mirBuilder->CreateExprRegread(PTY_u16, pIdxR); - } - - aStmt = mirBuilder->CreateStmtIassign( - *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt16()), - 0, addr, regreadExp); - - if (CGOptions::IsBigEndian()) { - shift = mirBuilder->CreateExprBinary(OP_shl, *type, - mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); - } else { - shift = mirBuilder->CreateExprBinary(OP_lshr, *type, - mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k16BitSize, PTY_i32)); - } - - pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); - StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); - - pIdx1R = pIdx2R = pIdxS; - newBlk.AddStatement(sStmp); - if (funcProfData) { - funcProfData->CopyStmtFreq(sStmp->GetStmtID(), stmt->GetStmtID()); - } - size -= k2ByteSize; - curSize += k2ByteSize; - } else { - MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); - - if (CGOptions::IsBigEndian()) { - regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); - } else { - regreadExp = mirBuilder->CreateExprRegread(PTY_u8, pIdxR); - } - - aStmt = mirBuilder->CreateStmtIassign( - *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), - 0, addr, regreadExp); - - if (CGOptions::IsBigEndian()) { - shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); - } else { - shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), - mirBuilder->CreateIntConst(k8BitSize, PTY_i32)); - } - - pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); - StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); - - pIdx1R = pIdx2R = pIdxS; - newBlk.AddStatement(sStmp); - if (funcProfData) { - funcProfData->CopyStmtFreq(sStmp->GetStmtID(), stmt->GetStmtID()); - } - size -= k1ByteSize; - curSize += k1ByteSize; - } - newBlk.AddStatement(aStmt); - if (funcProfData) { - funcProfData->CopyStmtFreq(aStmt->GetStmtID(), stmt->GetStmtID()); - } + funcProfData->CopyStmtFreq(iassignStmt->GetStmtID(), stmt.GetStmtID()); } } - nextStmt = nextStmt->GetNext(); // skip the dassign - return true; } void CGLowerer::LowerStmt(StmtNode &stmt, BlockNode &newBlk) { @@ -1912,7 +1793,7 @@ bool CGLowerer::IsSwitchToRangeGoto(const BlockNode &blk) const { return false; } -StmtNode *CGLowerer::CreateFflushStmt(StmtNode &stmt) { +StmtNode *CGLowerer::CreateFflushStmt(StmtNode &stmt) const { MIRFunction *fflush = mirBuilder->GetOrCreateFunction("fflush", TyIdx(PTY_i32)); fflush->GetFuncSymbol()->SetAppearsInCode(true); MapleVector argsFflush(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -2111,8 +1992,7 @@ BlockNode *CGLowerer::LowerBlock(BlockNode &block) { case OP_icallprotoassigned: { // pass the addr of lvar if this is a struct call assignment bool lvar = false; - // nextStmt could be changed by the call to LowerStructReturn - if (!LowerStructReturn(*newBlk, stmt, nextStmt, lvar, &block)) { + if (!LowerStructReturn(*newBlk, *stmt, lvar)) { newBlk->AppendStatementsFromBlock(*LowerCallAssignedStmt(*stmt, lvar)); } break; @@ -2239,10 +2119,10 @@ void CGLowerer::SimplifyBlock(BlockNode &block) const { break; } auto *oldFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt->GetPUIdx()); - if (asmMap.find(oldFunc->GetName()) == asmMap.end()) { + if (kAsmMap.find(oldFunc->GetName()) == kAsmMap.end()) { break; } - auto *newFunc = theMIRModule->GetMIRBuilder()->GetOrCreateFunction(asmMap.at(oldFunc->GetName()), + auto *newFunc = theMIRModule->GetMIRBuilder()->GetOrCreateFunction(kAsmMap.at(oldFunc->GetName()), callStmt->GetTyIdx()); MIRSymbol *funcSym = newFunc->GetFuncSymbol(); funcSym->SetStorageClass(kScExtern); @@ -2255,7 +2135,6 @@ void CGLowerer::SimplifyBlock(BlockNode &block) const { } } } while (nextStmt != nullptr); - return; } MIRType *CGLowerer::GetArrayNodeType(BaseNode &baseNode) { @@ -2383,7 +2262,7 @@ StmtNode *CGLowerer::LowerCall( return &callNode; } - if (!uselvar && retTy && beCommon.GetTypeSize(retTy->GetTypeIndex().GetIdx()) <= k16ByteSize) { + if (!uselvar && retTy && retTy->GetSize() <= k16ByteSize) { /* return structure fitting in one or two regs. */ return &callNode; } @@ -2458,26 +2337,15 @@ StmtNode *CGLowerer::LowerCall( void CGLowerer::LowerEntry(MIRFunction &func) { // determine if needed to insert fake parameter to return struct for current function + bool firstArgRet = func.IsFirstArgReturn(); if (func.IsReturnStruct()) { - MIRType *retType = func.GetReturnType(); -#if TARGAARCH64 - PrimType pty = IsStructElementSame(retType); - if (pty == PTY_f32 || pty == PTY_f64 || IsPrimitiveVector(pty)) { - func.SetStructReturnedInRegs(); - return; - } -#endif - if (retType->GetPrimType() != PTY_agg) { - return; - } - if (retType->GetSize() > k16ByteSize) { - func.SetFirstArgReturn(); - func.GetMIRFuncType()->SetFirstArgReturn(); + if (IsReturnInMemory(*func.GetReturnType())) { + firstArgRet = true; } else { func.SetStructReturnedInRegs(); } } - if (func.IsFirstArgReturn() && func.GetReturnType()->GetPrimType() != PTY_void) { + if (firstArgRet && func.GetReturnType()->GetPrimType() != PTY_void) { MIRSymbol *retSt = func.GetSymTab()->CreateSymbol(kScopeLocal); retSt->SetStorageClass(kScFormal); retSt->SetSKind(kStVar); @@ -2498,10 +2366,9 @@ void CGLowerer::LowerEntry(MIRFunction &func) { beCommon.AddElementToFuncReturnType(func, func.GetReturnTyIdx()); - func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true); + func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true, true); auto *funcType = func.GetMIRFuncType(); ASSERT(funcType != nullptr, "null ptr check"); - funcType->SetFirstArgReturn(); beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); } } @@ -2626,7 +2493,7 @@ void CGLowerer::LowerTryCatchBlocks(BlockNode &body) { return; } -#if DEBUG +#if defined(DEBUG) && DEBUG BBT::ValidateStmtList(nullptr, nullptr); #endif auto memPool = std::make_unique(memPoolCtrler, "CreateNewBB mempool"); @@ -2635,7 +2502,7 @@ void CGLowerer::LowerTryCatchBlocks(BlockNode &body) { bool generateEHCode = GenerateExceptionHandlingCode(); tryCatchLower.SetGenerateEHCode(generateEHCode); tryCatchLower.TraverseBBList(); -#if DEBUG +#if defined(DEBUG) && DEBUG tryCatchLower.CheckTryCatchPattern(); #endif } @@ -2897,7 +2764,7 @@ BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkN if (expr.GetOpCode() != OP_intrinsicop && expr.GetPrimType() == PTY_u1) { #if TARGAARCH64 expr.SetPrimType(PTY_i32); -#elif TARGX86_64 +#elif defined(TARGX86_64) && TARGX86_64 expr.SetPrimType(PTY_u8); #else CHECK_FATAL(false, "target not supported"); @@ -2975,7 +2842,8 @@ BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkN case OP_sizeoftype: { CHECK(static_cast(expr).GetTyIdx() < beCommon.GetSizeOfTypeSizeTable(), "index out of range in CGLowerer::LowerExpr"); - uint64 typeSize = beCommon.GetTypeSize(static_cast(expr).GetTyIdx()); + uint64 typeSize = GlobalTables:: + GetTypeTable().GetTypeFromTyIdx(static_cast(expr).GetTyIdx())->GetSize(); return mirModule.GetMIRBuilder()->CreateIntConst(typeSize, PTY_u32); } @@ -3332,7 +3200,7 @@ StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrin } BaseNode *CGLowerer::LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode) const { - BaseNode *resNode = static_cast(&intrinNode); + BaseNode *resNode = static_cast(&intrinNode); CHECK_FATAL(intrinNode.GetNumOpnds() > 0, "invalid JAVA_MERGE intrinsic node"); BaseNode *candidate = intrinNode.Opnd(0); ASSERT(candidate != nullptr, "candidate should not be nullptr"); @@ -3400,7 +3268,7 @@ BaseNode *CGLowerer::LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNod } BaseNode *CGLowerer::LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode) { - BaseNode *resNode = static_cast(&intrinNode); + BaseNode *resNode = static_cast(&intrinNode); PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should not be kFuncNotFound"); MIRFunction *biFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(bFunc); @@ -3468,7 +3336,7 @@ BaseNode *CGLowerer::LowerIntrinJavaArrayLength(const BaseNode &parent, Intrinsi } BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode) { - BaseNode *resNode = static_cast(&intrinNode); + BaseNode *resNode = static_cast(&intrinNode); if (intrinNode.GetIntrinsic() == INTRN_JAVA_MERGE) { resNode = LowerIntrinJavaMerge(parent, intrinNode); } else if (intrinNode.GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { @@ -3648,7 +3516,7 @@ BaseNode *CGLowerer::GetClassInfoExpr(const std::string &classInfo) const { } BaseNode *CGLowerer::LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode) { - BaseNode *resNode = static_cast(&intrinNode); + BaseNode *resNode = static_cast(&intrinNode); if ((intrinNode.GetIntrinsic() == INTRN_JAVA_CONST_CLASS) || (intrinNode.GetIntrinsic() == INTRN_JAVA_INSTANCE_OF)) { PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); CHECK_FATAL(bFunc != kFuncNotFound, "bFunc not founded"); @@ -3731,6 +3599,11 @@ BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &i if (intrinDesc.IsVectorOp() || intrinDesc.IsAtomic()) { return &intrinNode; } + + if (intrnID == INTRN_C___tls_get_tbss_anchor || intrnID == INTRN_C___tls_get_tdata_anchor) { + return &intrinNode; + } + CHECK_FATAL(false, "unexpected intrinsic type in CGLowerer::LowerIntrinsicop"); return &intrinNode; } @@ -3956,16 +3829,16 @@ StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { MIRIntConst *intConst = safe_cast(cst->GetConstVal()); switch (intConst->GetExtValue()) { case kMCCSyncEnterFast0: - id = INTRN_FIRST_SYNC_ENTER; + id = static_cast(INTRN_FIRST_SYNC_ENTER); break; case kMCCSyncEnterFast1: - id = INTRN_SECOND_SYNC_ENTER; + id = static_cast(INTRN_SECOND_SYNC_ENTER); break; case kMCCSyncEnterFast2: - id = INTRN_THIRD_SYNC_ENTER; + id = static_cast(INTRN_THIRD_SYNC_ENTER); break; case kMCCSyncEnterFast3: - id = INTRN_FOURTH_SYNC_ENTER; + id = static_cast(INTRN_FOURTH_SYNC_ENTER); break; default: CHECK_FATAL(false, "wrong kind for syncenter"); @@ -3973,7 +3846,7 @@ StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { } } else { CHECK_FATAL(nStmt.NumOpnds() == 1, "wrong args for syncexit"); - id = INTRN_YNC_EXIT; + id = static_cast(INTRN_YNC_EXIT); } PUIdx bFunc = GetBuiltinToUse(id); CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should be found"); @@ -4190,6 +4063,8 @@ bool CGLowerer::IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const case INTRN_C_ctz32: case INTRN_C_ctz64: case INTRN_C_fabsl: + case INTRN_C_fmaxl: + case INTRN_C_fminl: case INTRN_C_popcount32: case INTRN_C_popcount64: case INTRN_C_parity32: diff --git a/src/mapleall/maple_be/src/be/switch_lowerer.cpp b/src/mapleall/maple_be/src/be/switch_lowerer.cpp index a017d61b039eb0931937b6b6ace73066b4b959e3..60c7532afa4793cca96be5e3d33ae43ed56e86b4 100644 --- a/src/mapleall/maple_be/src/be/switch_lowerer.cpp +++ b/src/mapleall/maple_be/src/be/switch_lowerer.cpp @@ -173,7 +173,7 @@ CondGotoNode *SwitchLowerer::BuildCondGotoNode(int32 idx, Opcode opCode, BaseNod return cGotoStmt; } -FreqType SwitchLowerer::sumFreq(uint32 startIdx, uint32 endIdx) { +FreqType SwitchLowerer::SumFreq(uint32 startIdx, uint32 endIdx) { ASSERT(startIdx >= 0 && endIdx >=0 && endIdx >= startIdx, "startIdx or endIdx is invalid"); if (Options::profileUse && cgLowerer->GetLabel2Freq().size() > 0 && (startIdx <= switchItems.size() - 1) && (endIdx <= switchItems.size() - 1) && @@ -201,7 +201,7 @@ FreqType SwitchLowerer::sumFreq(uint32 startIdx, uint32 endIdx) { } return valid ? freqSum : -1; } else { - return -1; + return -1; } } @@ -240,31 +240,34 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l } } } - rangeGoto = BuildRangeGotoNode(switchItems[start].first, switchItems[start].second, newLabelIdx); + rangeGoto = BuildRangeGotoNode(switchItems[static_cast(start)].first, + switchItems[static_cast(start)].second, newLabelIdx); if (Options::profileUse && funcProfData != nullptr) { funcProfData->SetStmtFreq(rangeGoto->GetStmtID(), freqSum - freqSumChecked); - freqSumChecked += sumFreq(start, start); + freqSumChecked += SumFreq(static_cast(start), static_cast(start)); } if (stmt->GetDefaultLabel() == 0) { localBlk->AddStatement(rangeGoto); } else { - cmpNode = BuildCmpNode(OP_le, switchItems[start].second); + cmpNode = BuildCmpNode(OP_le, switchItems[static_cast(start)].second); ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); ifStmt->GetThenPart()->AddStatement(rangeGoto); if (Options::profileUse && funcProfData != nullptr) { - funcProfData->SetStmtFreq(ifStmt->GetThenPart()->GetStmtID(), freqSum + sumFreq(start, start)); - funcProfData->SetStmtFreq(ifStmt->GetStmtID(), freqSum + sumFreq(start, start)); + funcProfData->SetStmtFreq(ifStmt->GetThenPart()->GetStmtID(), + freqSum + SumFreq(static_cast(start), static_cast(start))); + funcProfData->SetStmtFreq(ifStmt->GetStmtID(), + freqSum + SumFreq(static_cast(start), static_cast(start))); } localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); } if (start < end) { - lowBlockNodeChecked = (stmt->GetCasePair(switchItems[start].second).first + 1 == - stmt->GetCasePair(switchItems[start + 1].first).first); + lowBlockNodeChecked = (stmt->GetCasePair(switchItems[static_cast(start)].second).first + 1 == + stmt->GetCasePair(switchItems[static_cast(start) + 1].first).first); } ++start; } /* if high side starts with a dense item, handle it also */ - while ((start <= end) && (switchItems[end].second != 0)) { + while ((start <= end) && (switchItems[static_cast(end)].second != 0)) { if (!highBlockNodeChecked) { cGoto = BuildCondGotoNode(-1, OP_brtrue, *BuildCmpNode(OP_gt, switchItems[end].second)); if (cGoto != nullptr) { @@ -275,29 +278,32 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l } highBlockNodeChecked = true; } - rangeGoto = BuildRangeGotoNode(switchItems[end].first, switchItems[end].second, newLabelIdx); + rangeGoto = BuildRangeGotoNode(switchItems[static_cast(end)].first, + switchItems[static_cast(end)].second, newLabelIdx); if (Options::profileUse && funcProfData != nullptr) { funcProfData->SetStmtFreq(rangeGoto->GetStmtID(), freqSum - freqSumChecked); - freqSumChecked += sumFreq(end, end); + freqSumChecked += SumFreq(static_cast(end), static_cast(end)); } if (stmt->GetDefaultLabel() == 0) { localBlk->AddStatement(rangeGoto); } else { - cmpNode = BuildCmpNode(OP_ge, switchItems[end].first); + cmpNode = BuildCmpNode(OP_ge, switchItems[static_cast(end)].first); ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); ifStmt->GetThenPart()->AddStatement(rangeGoto); if (Options::profileUse && funcProfData != nullptr) { - funcProfData->SetStmtFreq(ifStmt->GetThenPart()->GetStmtID(), freqSum + sumFreq(end, end)); - funcProfData->SetStmtFreq(ifStmt->GetStmtID(), freqSum + sumFreq(end, end)); + funcProfData->SetStmtFreq(ifStmt->GetThenPart()->GetStmtID(), + freqSum + SumFreq(static_cast(end), static_cast(end))); + funcProfData->SetStmtFreq(ifStmt->GetStmtID(), + freqSum + SumFreq(static_cast(end), static_cast(end))); } localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); } if (start < end) { highBlockNodeChecked = - (stmt->GetCasePair(switchItems[end].first).first - 1 == - stmt->GetCasePair(switchItems[end - 1].first).first) || - (stmt->GetCasePair(switchItems[end].first).first - 1 == - stmt->GetCasePair(switchItems[end - 1].second).first); + (stmt->GetCasePair(switchItems[static_cast(end)].first).first - 1 == + stmt->GetCasePair(switchItems[static_cast(end) - 1].first).first) || + (stmt->GetCasePair(switchItems[static_cast(end)].first).first - 1 == + stmt->GetCasePair(switchItems[static_cast(end) - 1].second).first); } --end; } @@ -316,7 +322,7 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l } if ((start == end) && lowBlockNodeChecked && highBlockNodeChecked) { /* only 1 case with 1 tag remains */ - auto *gotoStmt = BuildGotoNode(switchItems[static_cast(start)].first); + auto *gotoStmt = BuildGotoNode(switchItems[static_cast(start)].first); if (gotoStmt != nullptr) { localBlk->AddStatement(gotoStmt); if (Options::profileUse && funcProfData != nullptr) { @@ -331,7 +337,7 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l int32 lastIdx = -1; bool freqPriority = false; // The setting of kClusterSwitchDensityLow to such a lower value (0.2) makes other strategies less useful - if (Options::profileUse && funcProfData != nullptr && cgLowerer->GetLabel2Freq().size()) { + if (Options::profileUse && funcProfData != nullptr && (cgLowerer->GetLabel2Freq().size() != 0)) { for (int32 idx = start; idx <= end; idx++) { if (switchItems[static_cast(idx)].second == 0) { freq2case.push_back(std::make_pair(cgLowerer->GetLabel2Freq()[stmt->GetCasePair(static_cast( @@ -351,7 +357,7 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l if (Options::profileUse && funcProfData != nullptr && freqPriority) { for (std::pair f2c : freq2case) { uint32 idx = static_cast(f2c.second); - cGoto = BuildCondGotoNode(idx, OP_brtrue, *BuildCmpNode(OP_eq, idx)); + cGoto = BuildCondGotoNode(static_cast(idx), OP_brtrue, *BuildCmpNode(OP_eq, idx)); if (cGoto != nullptr) { localBlk->AddStatement(cGoto); funcProfData->SetStmtFreq(cGoto->GetStmtID(), freqSum - freqSumChecked); @@ -374,21 +380,22 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l cGoto = reinterpret_cast(BuildGotoNode(switchItems[static_cast(start)].first)); } else { cGoto = BuildCondGotoNode(switchItems[static_cast(start)].first, OP_brtrue, - *BuildCmpNode(OP_eq, switchItems[static_cast(start)].first)); + *BuildCmpNode(OP_eq, static_cast(switchItems[static_cast(start)].first))); } if (cGoto != nullptr) { localBlk->AddStatement(cGoto); } if (lowBlockNodeChecked && (start < end)) { - lowBlockNodeChecked = (stmt->GetCasePair(switchItems[static_cast(start)].first).first + 1 == - stmt->GetCasePair(switchItems[static_cast(start + 1)].first).first); + lowBlockNodeChecked = ( + stmt->GetCasePair(static_cast(switchItems[static_cast(start)].first)).first + 1 == + stmt->GetCasePair(static_cast(switchItems[static_cast(start + 1)].first)).first); } ++start; } } if (start <= end) { /* recursive call */ BlockNode *tmp = BuildCodeForSwitchItems(start, end, lowBlockNodeChecked, highBlockNodeChecked, - sumFreq(start, end) ); + SumFreq(static_cast(start), static_cast(end))); CHECK_FATAL(tmp != nullptr, "tmp should not be nullptr"); localBlk->AppendStatementsFromBlock(*tmp); } else if (!lowBlockNodeChecked || !highBlockNodeChecked) { @@ -401,8 +408,8 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l return localBlk; } - int64 lowestTag = stmt->GetCasePair(switchItems[static_cast(start)].first).first; - int64 highestTag = stmt->GetCasePair(switchItems[static_cast(end)].first).first; + int64 lowestTag = stmt->GetCasePair(static_cast(switchItems[static_cast(start)].first)).first; + int64 highestTag = stmt->GetCasePair(static_cast(switchItems[static_cast(end)].first)).first; /* * if lowestTag and higesttag have the same sign, use difference @@ -417,24 +424,24 @@ BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool l : (highestTag + lowestTag) / 2; /* find the mid-point in switch_items between start and end */ int32 mid = start; - while (stmt->GetCasePair(switchItems[mid].first).first < middleTag) { + while (stmt->GetCasePair(switchItems[static_cast(mid)].first).first < middleTag) { ++mid; } ASSERT(mid >= start, "switch lowering logic mid should greater than or equal start"); ASSERT(mid <= end, "switch lowering logic mid should less than or equal end"); /* generate test for binary search */ if (stmt->GetDefaultLabel() != 0) { - cmpNode = BuildCmpNode(OP_lt, static_cast(switchItems[static_cast(mid)].first)); + cmpNode = BuildCmpNode(OP_lt, static_cast(switchItems[static_cast(mid)].first)); ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); - bool leftHighBNdChecked = (stmt->GetCasePair(switchItems.at(mid - 1).first).first + 1 == - stmt->GetCasePair(switchItems.at(mid).first).first) || - (stmt->GetCasePair(switchItems.at(mid - 1).second).first + 1 == - stmt->GetCasePair(switchItems.at(mid).first).first); + bool leftHighBNdChecked = (stmt->GetCasePair(static_cast(switchItems.at(mid - 1).first)).first + 1 == + stmt->GetCasePair(static_cast(switchItems.at(mid).first)).first) || + (stmt->GetCasePair(static_cast(switchItems.at(mid - 1).second)).first + 1 == + stmt->GetCasePair(static_cast(switchItems.at(mid).first)).first); if (Options::profileUse && funcProfData != nullptr) { ifStmt->SetThenPart(BuildCodeForSwitchItems(start, mid - 1, lowBlockNodeChecked, leftHighBNdChecked, - sumFreq(start, mid - 1))); + SumFreq(static_cast(start), static_cast(mid) - 1))); ifStmt->SetElsePart(BuildCodeForSwitchItems(mid, end, true, highBlockNodeChecked, - sumFreq(mid, end))); + SumFreq(static_cast(mid), static_cast(end)))); } else { ifStmt->SetThenPart(BuildCodeForSwitchItems(start, mid - 1, lowBlockNodeChecked, leftHighBNdChecked, -1)); ifStmt->SetElsePart(BuildCodeForSwitchItems(mid, end, true, highBlockNodeChecked, -1)); @@ -478,7 +485,7 @@ BlockNode *SwitchLowerer::LowerSwitch(LabelIdx newLabelIdx) { FindClusters(clusters); InitSwitchItems(clusters); BlockNode *blkNode = BuildCodeForSwitchItems(0, static_cast(switchItems.size()) - 1, false, false, - sumFreq(0, switchItems.size() - 1), newLabelIdx); + SumFreq(0, static_cast(switchItems.size() - 1)), newLabelIdx); if (!jumpToDefaultBlockGenerated) { GotoNode *gotoDft = BuildGotoNode(-1); if (gotoDft != nullptr) { diff --git a/src/mapleall/maple_be/src/be/trycatchblockslower.cpp b/src/mapleall/maple_be/src/be/trycatchblockslower.cpp index 8dc8a37f072fe0e98a1888eb1dec12db50d919c2..80f1f7c234d7bce9904699987802c76faa8d7fe2 100644 --- a/src/mapleall/maple_be/src/be/trycatchblockslower.cpp +++ b/src/mapleall/maple_be/src/be/trycatchblockslower.cpp @@ -34,7 +34,7 @@ StmtNode *TryCatchBlocksLower::MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, con StmtNode *firstStmtMovedIn = nullptr; const MapleVector &bbs = labeledBBsInTry; StmtNode *jtStmt = jtBB.GetKeyStmt(); -#if DEBUG +#if defined(DEBUG) && DEBUG StmtNode *js = jtBB.GetFirstStmt(); while (js->GetOpCode() != OP_try) { js = js->GetNext(); @@ -208,7 +208,7 @@ void TryCatchBlocksLower::RecoverBasicBlock() { break; } case OP_catch: { -#if DEBUG +#if defined(DEBUG) && DEBUG StmtNode *ss = stmt->GetPrev(); while ((ss != nullptr) && (ss->GetOpCode() == OP_comment)) { ss = ss->GetPrev(); @@ -678,7 +678,7 @@ void TryCatchBlocksLower::PalceCatchSeenSofar(BBT &insertAfter) { lastBB = lastBB->GetFallthruBranch(); } -#if DEBUG +#if defined(DEBUG) && DEBUG BBT::ValidateStmtList(bodyFirst); #endif if (lastBB->GetFallthruBranch() != nullptr) { @@ -826,7 +826,7 @@ void TryCatchBlocksLower::TraverseBBList() { if (tryEndTryBlock.GetEndTryBB()->GetLastStmt() == body.GetLast()) { bodyEndWithEndTry = true; } -#if DEBUG +#if defined(DEBUG) && DEBUG for (size_t i = 0; i < tryEndTryBlock.GetEnclosedBBsSize(); ++i) { CHECK_FATAL(tryEndTryBlock.GetEnclosedBBsElem(i), "there should not be nullptr in enclosedBBs"); } @@ -837,7 +837,7 @@ void TryCatchBlocksLower::TraverseBBList() { BBT *insertAfter = FindInsertAfterBB(); PlaceRelocatedBB(*insertAfter); -#if DEBUG +#if defined(DEBUG) && DEBUG CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); BBT::ValidateStmtList(bodyFirst); #endif diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp index b3f6ab85e5b8fb362502d0fa625eab87fcde947f..833e9c8649f34e035524cf209ae0b9f118a1776e 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -46,7 +46,8 @@ MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, MIRStructType *structType = static_cast(mirType); symType = structType->GetFieldType(fieldId)->GetPrimType(); if (baseReg || !isCopy) { - fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + fieldOffset = static_cast(static_cast(structType-> + GetFieldOffsetFromBaseAddr(fieldId).byteOffset)); } } uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); @@ -58,21 +59,21 @@ MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, OfstOperand *ofstOpnd = &a64func->GetOrCreateOfstOpnd(fieldOffset, k32BitSize); return *a64func->CreateMemOperand(opndSz, *baseReg, *ofstOpnd); } else { - return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, static_cast(fieldOffset)); } } MemOperand &AArch64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { return static_cast(cgFunc)->GetOrCreateMemOpnd(symbol, offset, opndSize); } -Operand *AArch64MPIsel::SelectFloatingConst(MIRConst &mirConst, PrimType primType, const BaseNode &parent) const { +Operand *AArch64MPIsel::SelectFloatingConst(MIRConst &floatingConst, PrimType primType, const BaseNode &parent) const { CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const"); AArch64CGFunc *a64Func = static_cast(cgFunc); if (primType == PTY_f64) { - auto *dblConst = safe_cast(mirConst); + auto *dblConst = safe_cast(floatingConst); return a64Func->HandleFmovImm(primType, dblConst->GetIntValue(), *dblConst, parent); } else { - auto *floatConst = safe_cast(mirConst); + auto *floatConst = safe_cast(floatingConst); return a64Func->HandleFmovImm(primType, floatConst->GetIntValue(), *floatConst, parent); } } @@ -101,17 +102,17 @@ void AArch64MPIsel::SelectReturn(bool noOpnd) { } } -void AArch64MPIsel::CreateCallStructParamPassByStack(const MemOperand &memOpnd, uint32 symSize, int32 baseOffset) { - uint32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); +void AArch64MPIsel::CreateCallStructParamPassByStack(const MemOperand &addrOpnd, uint32 symSize, int32 baseOffset) { + uint32 copyTime = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); for (uint32 i = 0; i < copyTime; ++i) { MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); - addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); - ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + addrMemOpnd.SetBaseRegister(*addrOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*addrOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); addrMemOpnd.SetOffsetOperand(newImmOpnd); RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RSP, k64BitSize, kRegTyInt); Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, - (baseOffset + i * GetPointerSize()), k64BitSize); + static_cast((static_cast(baseOffset) + i * GetPointerSize())), k64BitSize); SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); } } @@ -158,7 +159,7 @@ void AArch64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, AArch64CallCon /* create call struct param pass */ if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { - CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + CreateCallStructParamPassByStack(memOpnd, static_cast(argSize), ploc.memOffset); } else { CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); @@ -187,7 +188,7 @@ void AArch64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds) { bool AArch64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { if (symbol.GetStorageClass() == kScFormal && - cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol.GetTyIdx().GetIdx())->GetSize() > k16ByteSize) { return true; } return false; @@ -300,7 +301,7 @@ void AArch64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd, Opera } Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, - ListOperand ¶mOpnds, ListOperand &retOpnds) { + ListOperand ¶mOpnds, ListOperand &retOpnds) const { Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); cgFunc->GetCurBB()->AppendInsn(callInsn); @@ -309,7 +310,7 @@ Insn &AArch64MPIsel::AppendCall(AArch64MOP_t mOp, Operand &targetOpnd, return callInsn; } -void AArch64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { +void AArch64MPIsel::SelectCalleeReturn(const MIRType *retType, ListOperand &retOpnds) const { if (retType == nullptr) { return; } @@ -344,7 +345,6 @@ void AArch64MPIsel::SelectGoto(GotoNode &stmt) { cgFunc->GetCurBB()->AppendInsn(jmpInsn); jmpInsn.AddOpndChain(targetOpnd); cgFunc->SetCurBBKind(BB::kBBGoto); - return; } void AArch64MPIsel::SelectIgoto(Operand &opnd0) { @@ -353,7 +353,6 @@ void AArch64MPIsel::SelectIgoto(Operand &opnd0) { Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, AArch64CG::kMd[mOp]); jmpInsn.AddOpndChain(opnd0); cgFunc->GetCurBB()->AppendInsn(jmpInsn); - return; } /* The second parameter in function va_start does not need to be concerned here, @@ -423,7 +422,7 @@ void AArch64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOp /* load the displacement into a register by accessing memory at base + index*8 */ AArch64CGFunc *a64func = static_cast(cgFunc); - BitShiftOperand &bitOpnd = a64func->CreateBitShiftOperand(BitShiftOperand::kLSL, k3BitSize, k8BitShift); + BitShiftOperand &bitOpnd = a64func->CreateBitShiftOperand(BitShiftOperand::kShiftLSL, k3BitSize, k8BitShift); Operand *disp = static_cast(cgFunc)->CreateMemOperand(k64BitSize, baseOpnd, *indexOpnd, bitOpnd); RegOperand &tgt = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); SelectAdd(tgt, baseOpnd, *disp, PTY_u64); @@ -466,8 +465,8 @@ void AArch64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode) { Opcode condOp = condGotoNode.GetOpCode(); if (condNode.GetOpCode() == OP_constval) { auto &constValNode = static_cast(condNode); - if (((OP_brfalse == condOp) && constValNode.GetConstVal()->IsZero()) || - ((OP_brtrue == condOp) && !constValNode.GetConstVal()->IsZero())) { + if (((condOp == OP_brfalse) && constValNode.GetConstVal()->IsZero()) || + ((condOp == OP_brtrue) && !constValNode.GetConstVal()->IsZero())) { auto *gotoStmt = cgFunc->GetMemoryPool()->New(OP_goto); gotoStmt->SetOffset(condGotoNode.GetOffset()); HandleGoto(*gotoStmt, *this); // isel's @@ -551,11 +550,8 @@ Operand *AArch64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opn return cgFunc->SelectRem(node, opnd0, opnd1, parent); } -Operand *AArch64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { - (void)opnd0; - (void)opnd1; - (void)primType; - (void)opcode; +Operand *AArch64MPIsel::SelectDivRem(RegOperand& /* opnd0 */, RegOperand& /* opnd1 */, + PrimType /* primType */, Opcode /* opcode */) const { CHECK_FATAL_FALSE("Invalid MPISel function"); return nullptr; } @@ -564,7 +560,7 @@ Operand *AArch64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand & return cgFunc->SelectCmpOp(node, opnd0, opnd1, parent); } -void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { +void AArch64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) const { (void)opnd0; (void)opnd1; (void)primType; @@ -585,8 +581,8 @@ void AArch64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, a64func->SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); } -Operand *AArch64MPIsel::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name, Operand &opnd0, - const BaseNode &parent) { +Operand *AArch64MPIsel::SelectIntrinsicOpWithOneParam(const IntrinsicopNode &intrnNode, std::string name, + Operand &opnd0, const BaseNode /* &parent */) { PrimType ptype = intrnNode.Opnd(0)->GetPrimType(); Operand *opnd = &opnd0; AArch64CGFunc *a64func = static_cast(cgFunc); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_aggressive_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_aggressive_opt.cpp index 37e39f3e0616b4a0482242e128f16132358fe43d..c41ea2cf4c7e2efbdea363072c6655e34607b85a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_aggressive_opt.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_aggressive_opt.cpp @@ -26,6 +26,10 @@ void AArch64CombineRedundantX16Opt::Run() { if (!insn->IsMachineInstruction()) { continue; } + if (insn->GetMachineOpcode() == MOP_c_counter) { + ASSERT(bb->GetFirstInsn() == insn, "invalid pgo counter-insn"); + continue; + } if (HasUseOpndReDef(*insn)) { hasUseOpndReDef = true; } @@ -224,7 +228,8 @@ bool AArch64CombineRedundantX16Opt::IsUseX16MemInsn(Insn &insn) { if (baseOpnd == nullptr || baseOpnd->GetRegisterNumber() != R16) { return false; } - CHECK_FATAL(memOpnd.GetAddrMode() == MemOperand::kBOI, "invalid mem instruction which uses x16"); + CHECK_FATAL(memOpnd.GetAddrMode() == MemOperand::kBOI || memOpnd.GetAddrMode() == MemOperand::kLo12Li, + "invalid mem instruction which uses x16"); return true; } @@ -235,7 +240,7 @@ void AArch64CombineRedundantX16Opt::RecordUseX16InsnInfo(Insn &insn, MemPool *tm auto *x16UseInfo = tmpMp->New(); x16UseInfo->memInsn = &insn; x16UseInfo->addPrevInsns = tmpMp->New>(tmpAlloc->Adapter()); - x16UseInfo->InsertAddPrevInsns(recentX16DefPrevInsns); + x16UseInfo->InsertAddPrevInsns(*recentX16DefPrevInsns); x16UseInfo->addInsn = recentX16DefInsn; x16UseInfo->curAddImm = recentAddImm; x16UseInfo->curOfst = (ofstOpnd == nullptr ? 0 : ofstOpnd->GetOffsetValue()); @@ -297,7 +302,7 @@ void AArch64CombineRedundantX16Opt::FindCommonX16DefInsns(MemPool *tmpMp, MapleA } } -void AArch64CombineRedundantX16Opt::ProcessSameAddImmCombineInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc) { +void AArch64CombineRedundantX16Opt::ProcessSameAddImmCombineInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc) const { CHECK_FATAL(recentSplitUseOpnd != nullptr && recentAddImm != 0, "find split insn info failed"); auto *newCombineInfo = tmpMp->New(); newCombineInfo->combineAddImm = recentAddImm; @@ -366,7 +371,7 @@ void AArch64CombineRedundantX16Opt::CombineRedundantX16DefInsns(BB &bb) { if (combineInfos->empty()) { return; } - for (uint32 i = 0; i < combineInfos->size(); ++i ) { + for (uint32 i = 0; i < combineInfos->size(); ++i) { CombineInfo *combineInfo = (*combineInfos)[i]; if (combineInfo->combineUseInfos->size() <= 1) { continue; @@ -374,7 +379,7 @@ void AArch64CombineRedundantX16Opt::CombineRedundantX16DefInsns(BB &bb) { UseX16InsnInfo *firstInsnInfo = (*combineInfo->combineUseInfos)[0]; auto &oldImmOpnd = static_cast(firstInsnInfo->addInsn->GetOperand(kInsnThirdOpnd)); auto &commonAddImmOpnd = aarFunc.CreateImmOperand( - combineInfo->combineAddImm,oldImmOpnd.GetSize(), oldImmOpnd.IsSignedValue()); + combineInfo->combineAddImm, oldImmOpnd.GetSize(), oldImmOpnd.IsSignedValue()); uint32 size = combineInfo->addUseOpnd->GetSize(); aarFunc.SelectAddAfterInsnBySize(firstInsnInfo->addInsn->GetOperand(kInsnFirstOpnd), *combineInfo->addUseOpnd, commonAddImmOpnd, size, false, *firstInsnInfo->addInsn); @@ -395,7 +400,7 @@ void AArch64CombineRedundantX16Opt::CombineRedundantX16DefInsns(BB &bb) { } } -bool AArch64CombineRedundantX16Opt::HasX16Def(Insn &insn) { +bool AArch64CombineRedundantX16Opt::HasX16Def(const Insn &insn) const { for (uint32 defRegNo : insn.GetDefRegs()) { if (defRegNo == R16) { return true; @@ -404,7 +409,7 @@ bool AArch64CombineRedundantX16Opt::HasX16Def(Insn &insn) { return false; } -bool AArch64CombineRedundantX16Opt::HasUseOpndReDef(Insn &insn) { +bool AArch64CombineRedundantX16Opt::HasUseOpndReDef(const Insn &insn) const { for (uint32 defRegNo : insn.GetDefRegs()) { if (recentSplitUseOpnd != nullptr && defRegNo == recentSplitUseOpnd->GetRegisterNumber()) { return true; @@ -413,7 +418,7 @@ bool AArch64CombineRedundantX16Opt::HasUseOpndReDef(Insn &insn) { return false; } -bool AArch64CombineRedundantX16Opt::HasX16Use(Insn &insn) { +bool AArch64CombineRedundantX16Opt::HasX16Use(const Insn &insn) const{ MOperator mop = insn.GetMachineOpcode(); if (mop == MOP_wmovri32 || mop == MOP_xmovri64) { return false; @@ -435,7 +440,8 @@ bool AArch64CombineRedundantX16Opt::HasX16Use(Insn &insn) { uint32 AArch64CombineRedundantX16Opt::GetMemSizeFromMD(Insn &insn) { const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; ASSERT(md != nullptr, "get md failed"); - const OpndDesc *od = md->GetOpndDes(kInsnFirstOpnd); + uint32 memOpndIdx = GetMemOperandIdx(insn); + const OpndDesc *od = md->GetOpndDes(memOpndIdx); ASSERT(od != nullptr, "get od failed"); return od->GetSize(); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp index a62973366fe927b370ac462ace93b32a624a1409..22db1862c483bc9465a610e712a1c2d11544eb9a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp @@ -292,11 +292,19 @@ void AArch64AlignAnalysis::AddNopAfterMark() { Insn *detect = insn->GetPrev(); BB *region = bb; while (detect != nullptr || region != aarFunc->GetFirstBB()) { + bool isBreak = false; while (detect == nullptr) { - ASSERT(region->GetPrev() != nullptr, "get region prev failed"); + // If region's prev bb and detect both are nullptr, it should end the while loop. + if (region->GetPrev() == nullptr) { + isBreak = true; + break; + } region = region->GetPrev(); detect = region->GetLastInsn(); } + if (isBreak) { + break; + } if (detect->GetMachineOpcode() == MOP_xuncond || detect->GetMachineOpcode() == MOP_xret || detect->GetMachineOpcode() == MOP_xbr) { findIsland = true; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp index fda8d6055b615a8b1bc906fee7bcd655632675d3..1f5f236e465139b4b8fb7eaa00db24a1441c9301 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -21,305 +21,61 @@ namespace maplebe { using namespace maple; void AArch64MoveRegArgs::Run() { + BB *formerCurBB = aarFunc->GetCurBB(); MoveVRegisterArgs(); MoveRegisterArgs(); + aarFunc->SetCurBB(*formerCurBB); } -void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsList, - std::vector &indexList, - std::map &pairReg, - std::vector &numFpRegs, - std::vector &fpSize) const { - uint32 numFormal = static_cast(aarFunc->GetFunction().GetFormalCount()); - numFpRegs.resize(numFormal); - fpSize.resize(numFormal); +void AArch64MoveRegArgs::MoveRegisterArgs() { + aarFunc->GetDummyBB()->ClearInsns(); + aarFunc->SetCurBB(*aarFunc->GetDummyBB()); + + auto &mirFunc = aarFunc->GetFunction(); AArch64CallConvImpl parmlocator(aarFunc->GetBecommon()); CCLocInfo ploc; - uint32 start = 0; - if (numFormal > 0) { - MIRFunction *func = aarFunc->GetBecommon().GetMIRModule().CurFunction(); - if (func->IsReturnStruct() && func->IsFirstArgReturn()) { - TyIdx tyIdx = func->GetFuncRetStructTyIdx(); - if (aarFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { - start = 1; - } - } - } - for (uint32 i = start; i < numFormal; ++i) { - MIRType *ty = aarFunc->GetFunction().GetNthParamType(i); - parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarFunc->GetFunction()); + for (uint32 i = 0; i < mirFunc.GetFormalCount(); ++i) { + MIRType *ty = mirFunc.GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, mirFunc.GetMIRFuncType()); if (ploc.reg0 == kRinvalid) { continue; } - AArch64reg reg0 = static_cast(ploc.reg0); - MIRSymbol *sym = aarFunc->GetFunction().GetFormal(i); + auto *sym = mirFunc.GetFormal(i); if (sym->IsPreg()) { continue; } - argsList[i] = reg0; - indexList.emplace_back(i); - if (ploc.reg1 == kRinvalid) { - continue; - } - if (ploc.numFpPureRegs > 0) { - uint32 index = i; - numFpRegs[index] = ploc.numFpPureRegs; - fpSize[index] = ploc.fpSize; - continue; - } - pairReg[i] = static_cast(ploc.reg1); - } -} - -ArgInfo AArch64MoveRegArgs::GetArgInfo(std::map &argsList, std::vector &numFpRegs, - std::vector &fpSize, uint32 argIndex) const { - ArgInfo argInfo; - argInfo.reg = argsList[argIndex]; - argInfo.mirTy = aarFunc->GetFunction().GetNthParamType(argIndex); - argInfo.symSize = aarFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); - argInfo.memPairSecondRegSize = 0; - argInfo.doMemPairOpt = false; - argInfo.createTwoStores = false; - argInfo.isTwoRegParm = false; - - if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { - /* vector type */ - argInfo.stkSize = argInfo.symSize; - } else if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { - argInfo.isTwoRegParm = true; - if (numFpRegs[argIndex] > kOneRegister) { - argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; - } else { - if (argInfo.symSize > k12ByteSize) { - argInfo.memPairSecondRegSize = k8ByteSize; - } else { - /* Round to 4 the stack space required for storing the struct */ - argInfo.memPairSecondRegSize = k4ByteSize; - } - argInfo.doMemPairOpt = true; - if (CGOptions::IsArm64ilp32()) { - argInfo.symSize = argInfo.stkSize = k8ByteSize; - } else { - argInfo.symSize = argInfo.stkSize = GetPointerSize(); + auto *symLoc = aarFunc->GetMemlayout()->GetSymAllocInfo(sym->GetStIndex()); + auto *baseOpnd = aarFunc->GetBaseReg(*symLoc); + auto offset = aarFunc->GetBaseOffset(*symLoc); + + auto generateStrInsn = + [this, baseOpnd, &offset, sym, symLoc](AArch64reg reg, PrimType primType) { + RegOperand ®Opnd = aarFunc->GetOrCreatePhysicalRegisterOperand(reg, + GetPrimTypeBitSize(primType), aarFunc->GetRegTyFromPrimTy(primType)); + OfstOperand &ofstOpnd = aarFunc->CreateOfstOpnd(offset, k32BitSize); + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + ofstOpnd.SetVary(kUnAdjustVary); } - } - } else if (argInfo.symSize > k16ByteSize) { - /* For large struct passing, a pointer to the copy is used. */ - if (CGOptions::IsArm64ilp32()) { - argInfo.symSize = argInfo.stkSize = k8ByteSize; - } else { - argInfo.symSize = argInfo.stkSize = GetPointerSize(); - } - } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize < k8ByteSize)) { - /* - * For small aggregate parameter, set to minimum of 8 bytes. - * B.5:If the argument type is a Composite Type then the size of the argument is rounded up to the - * nearest multiple of 8 bytes. - */ - argInfo.symSize = argInfo.stkSize = k8ByteSize; - } else if (numFpRegs[argIndex] > kOneRegister) { - argInfo.isTwoRegParm = true; - argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; - } else { - argInfo.stkSize = (argInfo.symSize < k4ByteSize) ? k4ByteSize : argInfo.symSize; - if (argInfo.symSize > k4ByteSize) { - argInfo.symSize = k8ByteSize; - } - } - argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; - argInfo.sym = aarFunc->GetFunction().GetFormal(argIndex); - CHECK_NULL_FATAL(argInfo.sym); - argInfo.symLoc = - static_cast(aarFunc->GetMemlayout()->GetSymAllocInfo(argInfo.sym->GetStIndex())); - CHECK_NULL_FATAL(argInfo.symLoc); - if (argInfo.doMemPairOpt && ((static_cast(aarFunc->GetBaseOffset(*(argInfo.symLoc))) & 0x7) != 0)) { - /* Do not optimize for struct reg pair for unaligned access. - * However, this symbol requires two parameter registers, separate stores must be generated. - */ - argInfo.symSize = GetPointerSize(); - argInfo.doMemPairOpt = false; - argInfo.createTwoStores = true; - } - return argInfo; -} - -bool AArch64MoveRegArgs::IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const { - if (firstArgInfo.symLoc->GetMemSegment() != secondArgInfo.symLoc->GetMemSegment()) { - return false; - } - if (firstArgInfo.symSize != secondArgInfo.symSize) { - return false; - } - if (firstArgInfo.symSize != k4ByteSize && firstArgInfo.symSize != k8ByteSize) { - return false; - } - if (firstArgInfo.regType != secondArgInfo.regType) { - return false; - } - return firstArgInfo.symLoc->GetOffset() + firstArgInfo.stkSize == secondArgInfo.symLoc->GetOffset(); -} - -void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) { - RegOperand *baseOpnd = static_cast(aarFunc->GetBaseReg(*firstArgInfo.symLoc)); - RegOperand ®Opnd = aarFunc->GetOrCreatePhysicalRegisterOperand(firstArgInfo.reg, - firstArgInfo.stkSize * kBitsPerByte, - firstArgInfo.regType); - MOperator mOp = firstArgInfo.regType == kRegTyInt ? ((firstArgInfo.stkSize > k4ByteSize) ? MOP_xstp : MOP_wstp) - : ((firstArgInfo.stkSize > k4ByteSize) ? MOP_dstp : MOP_sstp); - RegOperand *regOpnd2 = &aarFunc->GetOrCreatePhysicalRegisterOperand(secondArgInfo.reg, - firstArgInfo.stkSize * kBitsPerByte, - firstArgInfo.regType); - if (firstArgInfo.doMemPairOpt && firstArgInfo.isTwoRegParm) { - AArch64reg regFp2 = static_cast(firstArgInfo.reg + kOneRegister); - regOpnd2 = &aarFunc->GetOrCreatePhysicalRegisterOperand(regFp2, - firstArgInfo.stkSize * kBitsPerByte, - firstArgInfo.regType); - } - - int32 limit = (secondArgInfo.stkSize > k4ByteSize) ? kStpLdpImm64UpperBound : kStpLdpImm32UpperBound; - int32 stOffset = aarFunc->GetBaseOffset(*firstArgInfo.symLoc); - MemOperand *memOpnd = nullptr; - if (stOffset > limit || baseReg != nullptr) { - if (baseReg == nullptr || lastSegment != firstArgInfo.symLoc->GetMemSegment()) { - ImmOperand &immOpnd = - aarFunc->CreateImmOperand(stOffset - firstArgInfo.symLoc->GetOffset(), k64BitSize, false); - baseReg = &aarFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); - lastSegment = firstArgInfo.symLoc->GetMemSegment(); - aarFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, GetLoweredPtrType()); - } - uint64 offVal = static_cast(firstArgInfo.symLoc->GetOffset()); - OfstOperand &offsetOpnd = aarFunc->CreateOfstOpnd(offVal, k32BitSize); - if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { - offsetOpnd.SetVary(kUnAdjustVary); - } - memOpnd = aarFunc->CreateMemOperand(firstArgInfo.stkSize * kBitsPerByte, *baseReg, offsetOpnd); - } else { - OfstOperand &offsetOpnd = aarFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), - k32BitSize); - if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { - offsetOpnd.SetVary(kUnAdjustVary); - } - memOpnd = aarFunc->CreateMemOperand(firstArgInfo.stkSize * kBitsPerByte, *baseOpnd, offsetOpnd); - } - Insn &pushInsn = aarFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *regOpnd2, *memOpnd); - if (aarFunc->GetCG()->GenerateVerboseCG()) { - std::string argName = firstArgInfo.sym->GetName() + " " + secondArgInfo.sym->GetName(); - pushInsn.SetComment(std::string("store param: ").append(argName)); - } - aarFunc->GetCurBB()->AppendInsn(pushInsn); -} - -void AArch64MoveRegArgs::GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, - int32 offset) const { - MOperator mOp = aarFunc->PickStInsn(stBitSize, argInfo.mirTy->GetPrimType()); - RegOperand ®Opnd = aarFunc->GetOrCreatePhysicalRegisterOperand(dest, stBitSize, argInfo.regType); - - OfstOperand &offsetOpnd = aarFunc->CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); - if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { - offsetOpnd.SetVary(kUnAdjustVary); - } - MemOperand *memOpnd = aarFunc->CreateMemOperand(stBitSize, baseOpnd, offsetOpnd); - Insn &insn = aarFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); - if (aarFunc->GetCG()->GenerateVerboseCG()) { - insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); - } - aarFunc->GetCurBB()->AppendInsn(insn); -} - -void AArch64MoveRegArgs::GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize) { - int32 stOffset = aarFunc->GetBaseOffset(*argInfo.symLoc); - auto *baseOpnd = static_cast(aarFunc->GetBaseReg(*argInfo.symLoc)); - RegOperand ®Opnd = - aarFunc->GetOrCreatePhysicalRegisterOperand(argInfo.reg, argInfo.stkSize * kBitsPerByte, argInfo.regType); - MemOperand *memOpnd = nullptr; - if (MemOperand::IsPIMMOffsetOutOfRange(stOffset, argInfo.symSize * kBitsPerByte) || - (baseReg != nullptr && (lastSegment == argInfo.symLoc->GetMemSegment()))) { - if (baseReg == nullptr || lastSegment != argInfo.symLoc->GetMemSegment()) { - ImmOperand &immOpnd = aarFunc->CreateImmOperand(stOffset - argInfo.symLoc->GetOffset(), k64BitSize, - false); - baseReg = &aarFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); - lastSegment = argInfo.symLoc->GetMemSegment(); - aarFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, PTY_a64); - } - OfstOperand &offsetOpnd = aarFunc->CreateOfstOpnd(static_cast(argInfo.symLoc->GetOffset()), k32BitSize); - if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { - offsetOpnd.SetVary(kUnAdjustVary); - } - memOpnd = aarFunc->CreateMemOperand(argInfo.symSize * kBitsPerByte, *baseReg, offsetOpnd); - } else { - OfstOperand &offsetOpnd = aarFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), - k32BitSize); - if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { - offsetOpnd.SetVary(kUnAdjustVary); - } - memOpnd = aarFunc->CreateMemOperand(argInfo.symSize * kBitsPerByte, *baseOpnd, offsetOpnd); - } - - MOperator mOp = aarFunc->PickStInsn(argInfo.symSize * kBitsPerByte, argInfo.mirTy->GetPrimType()); - Insn &insn = aarFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); - if (aarFunc->GetCG()->GenerateVerboseCG()) { - insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); - } - aarFunc->GetCurBB()->AppendInsn(insn); + auto *memOpnd = aarFunc->CreateMemOperand(GetPrimTypeBitSize(primType), *baseOpnd, ofstOpnd); - if (argInfo.createTwoStores || argInfo.doMemPairOpt) { - /* second half of the struct passing by registers. */ - uint32 part2BitSize = argInfo.memPairSecondRegSize * kBitsPerByte; - GenOneInsn(argInfo, *baseOpnd, part2BitSize, reg2, (stOffset + GetPointerSize())); - } else if (numFpRegs > kOneRegister) { - uint32 fpSizeBits = fpSize * kBitsPerByte; - AArch64reg regFp2 = static_cast(argInfo.reg + kOneRegister); - GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp2, (stOffset + static_cast(fpSize))); - if (numFpRegs > kTwoRegister) { - AArch64reg regFp3 = static_cast(argInfo.reg + kTwoRegister); - GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k4BitShift))); + MOperator mOp = aarFunc->PickStInsn(GetPrimTypeBitSize(primType), primType); + Insn &insn = aarFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); + if (aarFunc->GetCG()->GenerateVerboseCG()) { + insn.SetComment(std::string("store param: ").append(sym->GetName())); + } + aarFunc->GetCurBB()->AppendInsn(insn); + offset += static_cast(GetPrimTypeSize(primType)); + }; + generateStrInsn(static_cast(ploc.GetReg0()), ploc.GetPrimTypeOfReg0()); + if (ploc.GetReg1() != kRinvalid) { + generateStrInsn(static_cast(ploc.GetReg1()), ploc.GetPrimTypeOfReg1()); } - if (numFpRegs > kThreeRegister) { - AArch64reg regFp3 = static_cast(argInfo.reg + kThreeRegister); - GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k8BitShift))); + if (ploc.GetReg2() != kRinvalid) { + generateStrInsn(static_cast(ploc.GetReg2()), ploc.GetPrimTypeOfReg2()); } - } -} - -void AArch64MoveRegArgs::MoveRegisterArgs() { - BB *formerCurBB = aarFunc->GetCurBB(); - aarFunc->GetDummyBB()->ClearInsns(); - aarFunc->SetCurBB(*aarFunc->GetDummyBB()); - - std::map movePara; - std::vector moveParaIndex; - std::map pairReg; - std::vector numFpRegs; - std::vector fpSize; - CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); - - std::vector::iterator it; - std::vector::iterator next; - for (it = moveParaIndex.begin(); it != moveParaIndex.end(); ++it) { - uint32 firstIndex = *it; - ArgInfo firstArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, firstIndex); - next = it; - ++next; - if ((next != moveParaIndex.end()) || (firstArgInfo.doMemPairOpt)) { - uint32 secondIndex = (firstArgInfo.doMemPairOpt) ? firstIndex : *next; - ArgInfo secondArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, secondIndex); - secondArgInfo.reg = (firstArgInfo.doMemPairOpt) ? pairReg[firstIndex] : movePara[secondIndex]; - secondArgInfo.symSize = (firstArgInfo.doMemPairOpt) ? firstArgInfo.memPairSecondRegSize : secondArgInfo.symSize; - secondArgInfo.symLoc = (firstArgInfo.doMemPairOpt) ? secondArgInfo.symLoc : - static_cast(aarFunc->GetMemlayout()->GetSymAllocInfo( - secondArgInfo.sym->GetStIndex())); - /* Make sure they are in same segment if want to use stp */ - if (((firstArgInfo.isTwoRegParm && secondArgInfo.isTwoRegParm) || - (!firstArgInfo.isTwoRegParm && !secondArgInfo.isTwoRegParm)) && - (firstArgInfo.doMemPairOpt || IsInSameSegment(firstArgInfo, secondArgInfo))) { - GenerateStpInsn(firstArgInfo, secondArgInfo); - if (!firstArgInfo.doMemPairOpt) { - it = next; - } - continue; - } + if (ploc.GetReg3() != kRinvalid) { + generateStrInsn(static_cast(ploc.GetReg3()), ploc.GetPrimTypeOfReg3()); } - GenerateStrInsn(firstArgInfo, pairReg[firstIndex], numFpRegs[firstIndex], fpSize[firstIndex]); } if (cgFunc->GetCG()->IsLmbc() && (cgFunc->GetSpSaveReg() != 0)) { @@ -329,7 +85,6 @@ void AArch64MoveRegArgs::MoveRegisterArgs() { /* Java requires insertion at begining as it has fast unwind and other features */ aarFunc->GetFirstBB()->InsertAtBeginning(*aarFunc->GetDummyBB()); } - aarFunc->SetCurBB(*formerCurBB); } void AArch64MoveRegArgs::MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const { @@ -375,6 +130,9 @@ void AArch64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) const { } void AArch64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const { + // when args parameter type i128, reg1 will be used. + // but, i128 is not supported in back-end + CHECK_FATAL(ploc.reg2 == kRinvalid, "NIY"); RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; PrimType sType = mirSym.GetType()->GetPrimType(); uint32 byteSize = GetPrimTypeSize(sType); @@ -437,30 +195,19 @@ Insn &AArch64MoveRegArgs::CreateMoveArgsToVRegInsn(MOperator mOp, RegOperand &de } void AArch64MoveRegArgs::MoveVRegisterArgs() const { - BB *formerCurBB = aarFunc->GetCurBB(); aarFunc->GetDummyBB()->ClearInsns(); aarFunc->SetCurBB(*aarFunc->GetDummyBB()); AArch64CallConvImpl parmlocator(aarFunc->GetBecommon()); CCLocInfo ploc; - auto formalCount = static_cast(aarFunc->GetFunction().GetFormalCount()); - uint32 start = 0; - if (formalCount > 0) { - MIRFunction *func = aarFunc->GetBecommon().GetMIRModule().CurFunction(); - if (func->IsReturnStruct() && func->IsFirstArgReturn()) { - TyIdx tyIdx = func->GetFuncRetStructTyIdx(); - if (aarFunc->GetBecommon().GetTypeSize(tyIdx) <= k16BitSize) { - start = 1; - } - } - } - for (uint32 i = start; i < formalCount; ++i) { - MIRType *ty = aarFunc->GetFunction().GetNthParamType(i); - parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarFunc->GetFunction()); - MIRSymbol *sym = aarFunc->GetFunction().GetFormal(i); + auto &mirFunc = aarFunc->GetFunction(); + for (size_t i = 0; i < mirFunc.GetFormalCount(); ++i) { + MIRType *ty = mirFunc.GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, (i == 0), mirFunc.GetMIRFuncType()); + MIRSymbol *sym = mirFunc.GetFormal(i); /* load locarefvar formals to store in the reflocals. */ - if (aarFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + if (mirFunc.GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { MoveLocalRefVarToRefLocals(*sym); } @@ -483,6 +230,5 @@ void AArch64MoveRegArgs::MoveVRegisterArgs() const { /* Java requires insertion at begining as it has fast unwind and other features */ aarFunc->GetFirstBB()->InsertAtBeginning(*aarFunc->GetDummyBB()); } - aarFunc->SetCurBB(*formerCurBB); } } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp index a95de8680d171fd1071728e8e60ce0cf32e53825..ad4588eef35b86f71e970dd3ebc94526177e4539 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -19,660 +19,271 @@ namespace maplebe { using namespace maple; -namespace { -constexpr int kMaxRegCount = 4; - -/* - * Refer to ARM IHI 0055C_beta: Procedure Call Standard for - * ARM 64-bit Architecture. Table 1. - */ -enum AArch64ArgumentClass : uint8 { - kAArch64NoClass, - kAArch64IntegerClass, - kAArch64FloatClass, - kAArch64MemoryClass -}; - -int32 ProcessNonStructAndNonArrayWhenClassifyAggregate(const MIRType &mirType, - AArch64ArgumentClass classes[kMaxRegCount], - size_t classesLength) { - CHECK_FATAL(classesLength > 0, "classLength must > 0"); - /* scalar type */ - switch (mirType.GetPrimType()) { - case PTY_u1: - case PTY_u8: - case PTY_i8: - case PTY_u16: - case PTY_i16: - case PTY_a32: - case PTY_u32: - case PTY_i32: - case PTY_a64: - case PTY_ptr: - case PTY_ref: - case PTY_u64: - case PTY_i64: - classes[0] = kAArch64IntegerClass; - return 1; - case PTY_f32: - case PTY_f64: - case PTY_c64: - case PTY_c128: - classes[0] = kAArch64FloatClass; - return 1; - default: - CHECK_FATAL(false, "NYI"); +// external interface to look for pure float struct +uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) const { + PrimType baseType = PTY_begin; + size_t elemNum = 0; + if (!IsHomogeneousAggregates(structType, baseType, elemNum)) { + return 0; } - - /* should not reach to this point */ - return 0; + fpSize = GetPrimTypeSize(baseType); + return static_cast(elemNum); } -PrimType TraverseStructFieldsForFp(MIRType *ty, uint32 &numRegs) { - if (ty->GetKind() == kTypeArray) { - MIRArrayType *arrtype = static_cast(ty); - MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); - if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { - return TraverseStructFieldsForFp(pty, numRegs); - } - for (uint32 i = 0; i < arrtype->GetDim(); ++i) { - numRegs += arrtype->GetSizeArrayItem(i); - } - return pty->GetPrimType(); - } else if (ty->GetKind() == kTypeStruct) { - MIRStructType *sttype = static_cast(ty); - FieldVector fields = sttype->GetFields(); - PrimType oldtype = PTY_void; - for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { - TyIdx fieldtyidx = fields[fcnt].second.first; - MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); - PrimType ptype = TraverseStructFieldsForFp(fieldty, numRegs); - if (oldtype != PTY_void && oldtype != ptype) { - return PTY_void; - } else { - oldtype = ptype; - } - } - return oldtype; - } else { - numRegs++; - return ty->GetPrimType(); +static void AllocateHomogeneousAggregatesRegister(CCLocInfo &ploc, const AArch64reg *regList, + uint32 maxRegNum, PrimType baseType, + uint32 allocNum, uint32 begin = 0) { + CHECK_FATAL(allocNum + begin - 1 < maxRegNum, "NIY, out of range."); + if (allocNum >= kOneRegister) { + ploc.reg0 = regList[begin++]; + ploc.primTypeOfReg0 = baseType; } -} - -int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], - size_t classesLength, uint32 &fpSize); - -uint32 ProcessStructAndUnionWhenClassifyAggregate(const BECommon &be, MIRStructType &structType, - AArch64ArgumentClass classes[kMaxRegCount], - size_t classesLength, uint32 &fpSize) { - CHECK_FATAL(classesLength > 0, "classLength must > 0"); - uint32 sizeOfTyInDwords = static_cast( - RoundUp(be.GetTypeSize(structType.GetTypeIndex()), k8ByteSize) >> k8BitShift); - bool isF32 = false; - bool isF64 = false; - uint32 numRegs = 0; - for (uint32 f = 0; f < structType.GetFieldsSize(); ++f) { - TyIdx fieldTyIdx = structType.GetFieldsElemt(f).second.first; - MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); - PrimType pType = TraverseStructFieldsForFp(fieldType, numRegs); - if (pType == PTY_f32) { - if (isF64) { - isF64 = false; - break; - } - isF32 = true; - } else if (pType == PTY_f64) { - if (isF32) { - isF32 = false; - break; - } - isF64 = true; - } else if (IsPrimitiveVector(pType)) { - isF64 = true; - break; - } else { - isF32 = isF64 = false; - break; - } + if (allocNum >= kTwoRegister) { + ploc.reg1 = regList[begin++]; + ploc.primTypeOfReg1 = baseType; } - if (isF32 || isF64) { - CHECK_FATAL(numRegs <= classesLength, "ClassifyAggregate: num regs exceed limit"); - for (uint32 i = 0; i < numRegs; ++i) { - classes[i] = kAArch64FloatClass; - } - - fpSize = isF32 ? k4ByteSize : k8ByteSize; - if (structType.GetKind() == kTypeUnion) { - /* For Union, numRegs is calculated for the maximum size element in this Union */ - return sizeOfTyInDwords; - } - return numRegs; + if (allocNum >= kThreeRegister) { + ploc.reg2 = regList[begin++]; + ploc.primTypeOfReg2 = baseType; } - - classes[0] = kAArch64IntegerClass; - if (sizeOfTyInDwords == kDwordSizeTwo) { - classes[1] = kAArch64IntegerClass; + if (allocNum >= kFourRegister) { + ploc.reg3 = regList[begin++]; + ploc.primTypeOfReg3 = baseType; } - ASSERT(sizeOfTyInDwords <= classesLength, "sizeOfTyInDwords exceed limit"); - return sizeOfTyInDwords; + ploc.regCount = allocNum; } -/* - * Analyze the given aggregate using the rules given by the ARM 64-bit ABI and - * return the number of doublewords to be passed in registers; the classes of - * the doublewords are returned in parameter "classes"; if 0 is returned, it - * means the whole aggregate is passed in memory. - */ -int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], - size_t classesLength, uint32 &fpSize) { - CHECK_FATAL(classesLength > 0, "invalid index"); - uint64 sizeOfTy = be.GetTypeSize(mirType.GetTypeIndex()); - /* Rule B.3. - * If the argument type is a Composite Type that is larger than 16 bytes - * then the argument is copied to memory allocated by the caller and - * the argument is replaced by a pointer to the copy. - */ - if ((sizeOfTy > k16ByteSize) || (sizeOfTy == 0)) { - return 0; +void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &ploc) const { + ploc.Clear(); + ploc.memOffset = nextStackArgAdress; +} + +// instantiated with the type of the function return value, it describes how +// the return value is to be passed back to the caller +// +// Refer to Procedure Call Standard for the Arm 64-bit +// Architecture (AArch64) 2022Q3. $6.9 +// "If the type, T, of the result of a function is such that +// void func(T arg) +// would require that arg be passed as a value in a register (or set of registers) +// according to the rules in Parameter passing, then the result is returned in the +// same registers as would be used for such an argument." +void AArch64CallConvImpl::LocateRetVal(const MIRType &retType, CCLocInfo &ploc) const { + InitCCLocInfo(ploc); + size_t retSize = retType.GetSize(); + if (retSize == 0) { + return; // size 0 ret val } - /* - * An argument of any Integer class takes up an integer register - * which is a single double-word. - * Rule B.4. The size of an argument of composite type is rounded up to the nearest - * multiple of 8 bytes. - */ - int64 sizeOfTyInDwords = static_cast(RoundUp(sizeOfTy, k8ByteSize) >> k8BitShift); - ASSERT(sizeOfTyInDwords > 0, "sizeOfTyInDwords should be sizeOfTyInDwords > 0"); - ASSERT(sizeOfTyInDwords <= kTwoRegister, "sizeOfTyInDwords should be <= 2"); - int64 i; - for (i = 0; i < sizeOfTyInDwords; ++i) { - classes[i] = kAArch64NoClass; + PrimType primType = retType.GetPrimType(); + if (IsPrimitiveFloat(primType) || IsPrimitiveVector(primType)) { + // float or vector, return in v0 + ploc.reg0 = AArch64Abi::kFloatReturnRegs[0]; + ploc.primTypeOfReg0 = primType; + ploc.regCount = 1; + return; } - if ((mirType.GetKind() != kTypeStruct) && (mirType.GetKind() != kTypeArray) && (mirType.GetKind() != kTypeUnion)) { - return ProcessNonStructAndNonArrayWhenClassifyAggregate(mirType, classes, classesLength); + if (IsPrimitiveInteger(primType) && GetPrimTypeBitSize(primType) <= k64BitSize) { + // interger and size <= 64-bit, return in x0 + ploc.reg0 = AArch64Abi::kIntReturnRegs[0]; + ploc.primTypeOfReg0 = primType; + ploc.regCount = 1; + return; } - if (mirType.GetKind() == kTypeStruct || mirType.GetKind() == kTypeUnion) { - MIRStructType &structType = static_cast(mirType); - return static_cast(ProcessStructAndUnionWhenClassifyAggregate(be, structType, classes, - classesLength, fpSize)); + PrimType baseType = PTY_begin; + size_t elemNum = 0; + if (IsHomogeneousAggregates(retType, baseType, elemNum)) { + // homogeneous aggregates, return in v0-v3 + AllocateHomogeneousAggregatesRegister(ploc, AArch64Abi::kFloatReturnRegs, + AArch64Abi::kNumFloatParmRegs, baseType, static_cast(elemNum)); + return; } - /* post merger clean-up */ - for (i = 0; i < sizeOfTyInDwords; ++i) { - if (classes[i] == kAArch64MemoryClass) { - return 0; + if (retSize <= k16ByteSize) { + // agg size <= 16-byte or int128, return in x0-x1 + ploc.reg0 = AArch64Abi::kIntReturnRegs[0]; + ploc.primTypeOfReg0 = PTY_u64; + if (retSize > k8ByteSize) { + ploc.reg1 = AArch64Abi::kIntReturnRegs[1]; + ploc.primTypeOfReg1 = PTY_u64; } + ploc.regCount = retSize <= k8ByteSize ? kOneRegister : kTwoRegister; + return; } - return static_cast(sizeOfTyInDwords); } + +uint64 AArch64CallConvImpl::AllocateRegisterForAgg(const MIRType &mirType, CCLocInfo &ploc, + uint64 size, uint64 align) { + uint64 aggCopySize = 0; + PrimType baseType = PTY_begin; + size_t elemNum = 0; + if (IsHomogeneousAggregates(mirType, baseType, elemNum)) { + if ((nextFloatRegNO + elemNum - 1) < AArch64Abi::kNumFloatParmRegs) { + // C.2 If the argument is an HFA or an HVA and there are sufficient unallocated SIMD and + // Floating-point registers (NSRN + number of members <= 8), then the argument is + // allocated to SIMD and Floating-point registers (with one register per member of + // the HFA or HVA). The NSRN is incremented by the number of registers used. + // The argument has now been allocated + AllocateHomogeneousAggregatesRegister(ploc, AArch64Abi::kFloatReturnRegs, + AArch64Abi::kNumFloatParmRegs, baseType, elemNum, nextFloatRegNO); + nextFloatRegNO += elemNum; + } else { + // C.3 If the argument is an HFA or an HVA then the NSRN is set to 8 and the size of the + // argument is rounded up to the nearest multiple of 8 bytes. + nextFloatRegNO = AArch64Abi::kNumFloatParmRegs; + ploc.reg0 = kRinvalid; + } + } else if (size <= k16ByteSize) { + // small struct, passed by general purpose register + AllocateGPRegister(mirType, ploc, size, align); + } else { + // large struct, a pointer to the copy is used + ploc.reg0 = AllocateGPRegister(); + ploc.primTypeOfReg0 = PTY_a64; + ploc.memSize = k8ByteSize; + aggCopySize = RoundUp(size, k8ByteSize); + } + return aggCopySize; } -/* external interface to look for pure float struct */ -uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { - if (structType.GetSize() > k32ByteSize) { - return 0; +// allocate general purpose register +void AArch64CallConvImpl::AllocateGPRegister(const MIRType &mirType, CCLocInfo &ploc, + uint64 size, uint64 align) { + if (IsPrimitiveInteger(mirType.GetPrimType()) && size <= k8ByteSize) { + // C.9 If the argument is an Integral or Pointer Type, the size of the argument is less + // than or equal to 8 bytes and the NGRN is less than 8, the argument is copied to + // the least significant bits in x[NGRN]. The NGRN is incremented by one. + // The argument has now been allocated. + ploc.reg0 = AllocateGPRegister(); + ploc.primTypeOfReg0 = mirType.GetPrimType(); + return; } - AArch64ArgumentClass classes[kMaxRegCount]; - uint32 numRegs = ProcessStructAndUnionWhenClassifyAggregate(beCommon, structType, classes, kMaxRegCount, fpSize); - if (numRegs == 0) { - return 0; + if (align == k16ByteSize) { + // C.10 If the argument has an alignment of 16 then the NGRN is rounded up to the next + // even number. + nextGeneralRegNO = (nextGeneralRegNO + 1U) & ~1U; } - - bool isPure = true; - for (uint i = 0; i < numRegs; ++i) { - CHECK_FATAL(i < kMaxRegCount, "i should be lower than kMaxRegCount"); - if (classes[i] != kAArch64FloatClass) { - isPure = false; - break; + if (mirType.GetPrimType() == PTY_i128 || mirType.GetPrimType() == PTY_u128) { + // C.11 If the argument is an Integral Type, the size of the argument is equal to 16 + // and the NGRN is less than 7, the argument is copied to x[NGRN] and x[NGRN+1]. + // x[NGRN] shall contain the lower addressed double-word of the memory + // representation of the argument. The NGRN is incremented by two. + // The argument has now been allocated. + if (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs - 1) { + ASSERT(size == k16ByteSize, "NIY, size must be 16-byte."); + ploc.reg0 = AllocateGPRegister(); + ploc.primTypeOfReg0 = PTY_u64; + ploc.reg1 = AllocateGPRegister(); + ploc.primTypeOfReg1 = PTY_u64; + return; + } + } else if (size <= k16ByteSize) { + // C.12 If the argument is a Composite Type and the size in double-words of the argument + // is not more than 8 minus NGRN, then the argument is copied into consecutive + // general-purpose registers, starting at x[NGRN]. The argument is passed as though + // it had been loaded into the registers from a double-word-aligned address with + // an appropriate sequence of LDR instructions loading consecutive registers from + // memory (the contents of any unused parts of the registers are unspecified by this + // standard). The NGRN is incremented by the number of registers used. + // The argument has now been allocated. + ASSERT(mirType.GetPrimType() == PTY_agg, "NIY, primType must be PTY_agg."); + auto regNum = (size <= k8ByteSize) ? kOneRegister : kTwoRegister; + if (nextGeneralRegNO + regNum - 1 < AArch64Abi::kNumIntParmRegs) { + ploc.reg0 = AllocateGPRegister(); + ploc.primTypeOfReg0 = (size <= k4ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64; + if (regNum == kTwoRegister) { + ploc.reg1 = AllocateGPRegister(); + ploc.primTypeOfReg1 = + (size <= k12ByteSize && !CGOptions::IsBigEndian()) ? PTY_u32 : PTY_u64; + } + return; } } - if (isPure) { - return numRegs; - } - return 0; -} -void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { - pLoc.reg0 = kRinvalid; - pLoc.reg1 = kRinvalid; - pLoc.reg2 = kRinvalid; - pLoc.reg3 = kRinvalid; - pLoc.memOffset = nextStackArgAdress; - pLoc.fpSize = 0; - pLoc.numFpPureRegs = 0; + // C.13 The NGRN is set to 8. + ploc.reg0 = kRinvalid; + nextGeneralRegNO = AArch64Abi::kNumIntParmRegs; } -int32 AArch64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) { - InitCCLocInfo(pLoc); - uint32 retSize = beCommon.GetTypeSize(retType.GetTypeIndex().GetIdx()); - if (retSize == 0) { - return 0; /* size 0 ret val */ +static void SetupCCLocInfoRegCount(CCLocInfo &ploc) { + if (ploc.reg0 == kRinvalid) { + return; } - if (retSize <= k16ByteSize) { - /* For return struct size less or equal to 16 bytes, the values */ - /* are returned in register pairs. */ - AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; /* Max of four floats. */ - uint32 fpSize; - uint32 numRegs = static_cast(ClassifyAggregate(beCommon, retType, classes, sizeof(classes), fpSize)); - if (classes[0] == kAArch64FloatClass) { - CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); - AllocateNSIMDFPRegisters(pLoc, numRegs); - pLoc.numFpPureRegs = numRegs; - pLoc.fpSize = fpSize; - return 0; - } else { - CHECK_FATAL(numRegs <= kTwoRegister, "LocateNextParm: illegal number of regs"); - if (numRegs == kOneRegister) { - pLoc.reg0 = AllocateGPRegister(); - } else { - AllocateTwoGPRegisters(pLoc); - } - return 0; - } - } else { - /* For return struct size > 16 bytes the pointer returns in x8. */ - pLoc.reg0 = R8; - return GetPointerSize(); + ploc.regCount = kOneRegister; + if (ploc.reg1 == kRinvalid) { + return; + } + ploc.regCount++; + if (ploc.reg2 == kRinvalid) { + return; + } + ploc.regCount++; + if (ploc.reg3 == kRinvalid) { + return; } + ploc.regCount++; } -/* - * Refer to ARM IHI 0055C_beta: Procedure Call Standard for - * the ARM 64-bit Architecture. $5.4.2 - * - * For internal only functions, we may want to implement - * our own rules as Apple IOS has done. Maybe we want to - * generate two versions for each of externally visible functions, - * one conforming to the ARM standard ABI, and the other for - * internal only use. - * - * LocateNextParm should be called with each parameter in the parameter list - * starting from the beginning, one call per parameter in sequence; it returns - * the information on how each parameter is passed in pLoc - * - * *** CAUTION OF USE: *** - * If LocateNextParm is called for function formals, third argument isFirst is true. - * LocateNextParm is then checked against a function parameter list. All other calls - * of LocateNextParm are against caller's argument list must not have isFirst set, - * or it will be checking the caller's enclosing function. - */ -int32 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) { - InitCCLocInfo(pLoc); +// Refer to Procedure Call Standard for the Arm 64-bit +// Architecture (AArch64) 2022Q3. $6.8.2 +// +// LocateNextParm should be called with each parameter in the parameter list +// starting from the beginning, one call per parameter in sequence; it returns +// the information on how each parameter is passed in ploc +// +// *** CAUTION OF USE: *** +// If LocateNextParm is called for function formals, third argument isFirst is true. +// LocateNextParm is then checked against a function parameter list. All other calls +// of LocateNextParm are against caller's argument list must not have isFirst set, +// or it will be checking the caller's enclosing function. +uint64 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &ploc, bool isFirst, MIRFuncType *tFunc) { + InitCCLocInfo(ploc); - bool is64x1vec = false; - if (tFunc != nullptr && tFunc->GetParamSize() > 0) { - is64x1vec = tFunc->GetNthParamAttr(paramNum).GetAttr(ATTR_oneelem_simd) != 0; - } - - if (isFirst) { - MIRFunction *func = tFunc != nullptr ? tFunc : beCommon.GetMIRModule().CurFunction(); - if (func->IsFirstArgReturn()) { - TyIdx tyIdx = func->GetFuncRetStructTyIdx(); - size_t size = beCommon.GetTypeSize(tyIdx); - if (size == 0) { - /* For return struct size 0 there is no return value. */ - return 0; - } - /* For return struct size > 16 bytes the pointer returns in x8. */ - pLoc.reg0 = R8; - return GetPointerSize(); - } - } - uint64 typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex()); + uint64 typeSize = mirType.GetSize(); if (typeSize == 0) { return 0; } - int32 typeAlign = beCommon.GetTypeAlign(mirType.GetTypeIndex()); - /* - * Rule C.12 states that we do round nextStackArgAdress up before we use its value - * according to the alignment requirement of the argument being processed. - * We do the rounding up at the end of LocateNextParm(), - * so we want to make sure our rounding up is correct. - */ - ASSERT((nextStackArgAdress & (std::max(typeAlign, static_cast(k8ByteSize)) - 1)) == 0 || - (typeAlign == k16ByteSize && ((nextStackArgAdress & (static_cast(k16ByteSize) - 1)) % k8ByteSize == 0)), - "C.12 alignment requirement is violated"); - pLoc.memSize = static_cast(typeSize); - ++paramNum; - int32 aggCopySize = 0; - switch (mirType.GetPrimType()) { - case PTY_u1: - case PTY_u8: - case PTY_i8: - case PTY_u16: - case PTY_i16: - case PTY_a32: - case PTY_u32: - case PTY_i32: - case PTY_ptr: - case PTY_ref: - case PTY_a64: - case PTY_u64: - case PTY_i64: - case PTY_i128: - case PTY_u128: - /* Rule C.7 */ - typeSize = k8ByteSize; - pLoc.reg0 = is64x1vec ? AllocateSIMDFPRegister() : AllocateGPRegister(); - ASSERT(nextGeneralRegNO <= AArch64Abi::kNumIntParmRegs, "RegNo should be pramRegNO"); - break; - /* - * for c64 complex numbers, we assume - * - callers marshall the two f32 numbers into one f64 register - * - callees de-marshall one f64 value into the real and the imaginery part - */ - case PTY_f32: - case PTY_f64: - case PTY_c64: - case PTY_v2i32: - case PTY_v4i16: - case PTY_v8i8: - case PTY_v2u32: - case PTY_v4u16: - case PTY_v8u8: - case PTY_v2f32: - case PTY_v1i64: - case PTY_v1u64: - /* Rule C.1 */ - ASSERT(GetPrimTypeSize(PTY_f64) == k8ByteSize, "unexpected type size"); - typeSize = k8ByteSize; - pLoc.reg0 = AllocateSIMDFPRegister(); - break; - /* - * for c128 complex numbers, we assume - * - callers marshall the two f64 numbers into one f128 register - * - callees de-marshall one f128 value into the real and the imaginery part - */ - case PTY_f128: - case PTY_c128: - case PTY_v2i64: - case PTY_v4i32: - case PTY_v8i16: - case PTY_v16i8: - case PTY_v2u64: - case PTY_v4u32: - case PTY_v8u16: - case PTY_v16u8: - case PTY_v2f64: - case PTY_v4f32: - /* SIMD-FP registers have 128-bits. */ - pLoc.reg0 = AllocateSIMDFPRegister(); - ASSERT(nextFloatRegNO <= AArch64Abi::kNumFloatParmRegs, "regNO should not be greater than kNumFloatParmRegs"); - ASSERT(typeSize == k16ByteSize, "unexpected type size"); - break; - /* - * case of quad-word integer: - * we don't support java yet. - * if (has-16-byte-alignment-requirement) - * nextGeneralRegNO = (nextGeneralRegNO+1) & ~1; // C.8 round it up to the next even number - * try allocate two consecutive registers at once. - */ - /* case PTY_agg */ - case PTY_agg: { - aggCopySize = ProcessPtyAggWhenLocateNextParm(mirType, pLoc, typeSize, typeAlign); - break; + if (isFirst) { + auto *func = (tFunc != nullptr) ? tFunc : + beCommon.GetMIRModule().CurFunction()->GetMIRFuncType(); + if (func->FirstArgReturn()) { + // For return struct in memory, the pointer returns in x8. + SetupToReturnThroughMemory(ploc); + return GetPointerSize(); } - default: - CHECK_FATAL(false, "NYI"); } - /* Rule C.12 */ - if (pLoc.reg0 == kRinvalid) { - /* being passed in memory */ - nextStackArgAdress = pLoc.memOffset + static_cast(static_cast(typeSize)); - } - return aggCopySize; -} + uint64 typeAlign = mirType.GetAlign(); -int32 AArch64CallConvImpl::ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, - int32 typeAlign) { - /* - * In AArch64, integer-float or float-integer - * argument passing is not allowed. All should go through - * integer-integer. - * In the case where a struct is homogeneous composed of one of the fp types, - * either all single fp or all double fp, then it can be passed by float-float. - */ - AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; - typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex().GetIdx()); - int32 aggCopySize = 0; - if (typeSize > k16ByteSize) { - aggCopySize = static_cast(RoundUp(typeSize, GetPointerSize())); - } - /* - * alignment requirement - * Note. This is one of a few things iOS diverges from - * the ARM 64-bit standard. They don't observe the round-up requirement. - */ - if (typeAlign == k16ByteSizeInt) { - RoundNGRNUpToNextEven(); - } + ploc.memSize = static_cast(typeSize); - uint32 fpSize; - uint32 numRegs = static_cast( - ClassifyAggregate(beCommon, mirType, classes, sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); - if (classes[0] == kAArch64FloatClass) { - CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); - typeSize = k8ByteSize; - AllocateNSIMDFPRegisters(pLoc, numRegs); - pLoc.numFpPureRegs = numRegs; - pLoc.fpSize = fpSize; - } else if (numRegs == 1) { - /* passing in registers */ - typeSize = k8ByteSize; - if (classes[0] == kAArch64FloatClass) { - CHECK_FATAL(false, "param passing in FP reg not allowed here"); - } else { - pLoc.reg0 = AllocateGPRegister(); - /* Rule C.11 */ - ASSERT((pLoc.reg0 != kRinvalid) || (nextGeneralRegNO == AArch64Abi::kNumIntParmRegs), - "reg0 should not be kRinvalid or nextGeneralRegNO should equal kNumIntParmRegs"); - } - } else if (numRegs == kTwoRegister) { - /* Other aggregates with 8 < size <= 16 bytes can be allocated in reg pair */ - ASSERT(classes[0] == kAArch64IntegerClass || classes[0] == kAArch64NoClass, - "classes[0] must be either integer class or no class"); - ASSERT(classes[1] == kAArch64IntegerClass || classes[1] == kAArch64NoClass, - "classes[1] must be either integer class or no class"); - AllocateTwoGPRegisters(pLoc); - /* Rule C.11 */ - if (pLoc.reg0 == kRinvalid) { - nextGeneralRegNO = AArch64Abi::kNumIntParmRegs; - } + uint64 aggCopySize = 0; + if (IsPrimitiveFloat(mirType.GetPrimType()) || IsPrimitiveVector(mirType.GetPrimType())) { + // float or vector, passed by float or SIMD register + ploc.reg0 = AllocateSIMDFPRegister(); + ploc.primTypeOfReg0 = mirType.GetPrimType(); + } else if (IsPrimitiveInteger(mirType.GetPrimType())) { + // integer, passed by general purpose register + AllocateGPRegister(mirType, ploc, typeSize, typeAlign); } else { - /* - * 0 returned from ClassifyAggregate(). This means the whole data - * is passed thru memory. - * Rule B.3. - * If the argument type is a Composite Type that is larger than 16 - * bytes then the argument is copied to memory allocated by the - * caller and the argument is replaced by a pointer to the copy. - * - * Try to allocate an integer register - */ - typeSize = k8ByteSize; - pLoc.reg0 = AllocateGPRegister(); - pLoc.memSize = k8ByteSizeInt; /* byte size of a pointer in AArch64 */ - if (pLoc.reg0 != kRinvalid) { - numRegs = 1; - } + CHECK_FATAL(mirType.GetPrimType() == PTY_agg, "NIY"); + aggCopySize = AllocateRegisterForAgg(mirType, ploc, typeSize, typeAlign); } - /* compute rightpad */ - if ((numRegs == 0) || (pLoc.reg0 == kRinvalid)) { - /* passed in memory */ - typeSize = RoundUp(static_cast(static_cast(pLoc.memSize)), k8ByteSize); - } - return aggCopySize; -} - -/* - * instantiated with the type of the function return value, it describes how - * the return value is to be passed back to the caller - * - * Refer to ARM IHI 0055C_beta: Procedure Call Standard for - * the ARM 64-bit Architecture. $5.5 - * "If the type, T, of the result of a function is such that - * void func(T arg) - * would require that 'arg' be passed as a value in a register - * (or set of registers) according to the rules in $5.4 Parameter - * Passing, then the result is returned in the same registers - * as would be used for such an argument. - */ -void AArch64CallConvImpl::InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo) { - PrimType pType = retTy.GetPrimType(); - switch (pType) { - case PTY_void: - break; - case PTY_u1: - case PTY_u8: - case PTY_i8: - case PTY_u16: - case PTY_i16: - case PTY_a32: - case PTY_u32: - case PTY_i32: - ccLocInfo.regCount = 1; - ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i32 : PTY_u32; /* promote the type */ - return; - - case PTY_ptr: - case PTY_ref: - CHECK_FATAL(false, "PTY_ptr should have been lowered"); - return; - case PTY_a64: - case PTY_u64: - case PTY_i64: - case PTY_i128: - case PTY_u128: - ccLocInfo.regCount = 1; - ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ - return; - - /* - * for c64 complex numbers, we assume - * - callers marshall the two f32 numbers into one f64 register - * - callees de-marshall one f64 value into the real and the imaginery part - */ - case PTY_f32: - case PTY_f64: - case PTY_c64: - case PTY_v2i32: - case PTY_v4i16: - case PTY_v8i8: - case PTY_v2u32: - case PTY_v4u16: - case PTY_v8u8: - case PTY_v2f32: - case PTY_v1i64: - case PTY_v1u64: - - /* - * for c128 complex numbers, we assume - * - callers marshall the two f64 numbers into one f128 register - * - callees de-marshall one f128 value into the real and the imaginery part - */ - case PTY_f128: - case PTY_c128: - case PTY_v2i64: - case PTY_v4i32: - case PTY_v8i16: - case PTY_v16i8: - case PTY_v2u64: - case PTY_v4u32: - case PTY_v8u16: - case PTY_v16u8: - case PTY_v2f64: - case PTY_v4f32: - ccLocInfo.regCount = 1; - ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = pType; - return; - - /* - * Refer to ARM IHI 0055C_beta: Procedure Call Standard for - * the ARM 64-bit Architecture. $5.5 - * "Otherwise, the caller shall reserve a block of memory of - * sufficient size and alignment to hold the result. The - * address of the memory block shall be passed as an additional - * argument to the function in x8. The callee may modify the - * result memory block at any point during the execution of the - * subroutine (there is no requirement for the callee to preserve - * the value stored in x8)." - */ - case PTY_agg: { - uint64 size = beCommon.GetTypeSize(retTy.GetTypeIndex()); - if ((size > k16ByteSize) || (size == 0)) { - /* - * The return value is returned via memory. - * The address is in X8 and passed by the caller. - */ - SetupToReturnThroughMemory(ccLocInfo); - return; - } - uint32 fpSize; - AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; - ccLocInfo.regCount = static_cast(ClassifyAggregate(beCommon, retTy, classes, - sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); - if (classes[0] == kAArch64FloatClass) { - switch (ccLocInfo.regCount) { - case kFourRegister: - ccLocInfo.reg3 = AArch64Abi::kFloatReturnRegs[3]; - break; - case kThreeRegister: - ccLocInfo.reg2 = AArch64Abi::kFloatReturnRegs[2]; - break; - case kTwoRegister: - ccLocInfo.reg1 = AArch64Abi::kFloatReturnRegs[1]; - break; - case kOneRegister: - ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; - break; - default: - CHECK_FATAL(0, "AArch64CallConvImpl: unsupported"); - } - if (fpSize == k4ByteSize) { - ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f32; - } else { - ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f64; - } - return; - } else if (ccLocInfo.regCount == 0) { - SetupToReturnThroughMemory(ccLocInfo); - return; - } else { - if (ccLocInfo.regCount == 1) { - /* passing in registers */ - if (classes[0] == kAArch64FloatClass) { - ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = PTY_f64; - } else { - ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = PTY_i64; - } - } else { - ASSERT(ccLocInfo.regCount <= k2ByteSize, "reg count from ClassifyAggregate() should be 0, 1, or 2"); - ASSERT(classes[0] == kAArch64IntegerClass, "error val :classes[0]"); - ASSERT(classes[1] == kAArch64IntegerClass, "error val :classes[1]"); - ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; - ccLocInfo.primTypeOfReg0 = PTY_i64; - ccLocInfo.reg1 = AArch64Abi::kIntReturnRegs[1]; - ccLocInfo.primTypeOfReg1 = PTY_i64; - } - return; - } - } - default: - CHECK_FATAL(false, "NYI"); + SetupCCLocInfoRegCount(ploc); + if (ploc.reg0 == kRinvalid) { + // being passed in memory + typeAlign = (typeAlign <= k8ByteSize) ? k8ByteSize : typeAlign; + nextStackArgAdress = RoundUp(nextStackArgAdress, typeAlign); + ploc.memOffset = static_cast(nextStackArgAdress); + // large struct, passed with pointer + nextStackArgAdress += (aggCopySize != 0 ? k8ByteSize : typeSize); } + return aggCopySize; } -void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const { - ASSERT(pLoc.reg1 == kRinvalid, "make sure reg1 equal kRinvalid"); +void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &ploc) const { + ASSERT(ploc.reg1 == kRinvalid, "make sure reg1 equal kRinvalid"); PrimType pType = retTy2.GetPrimType(); switch (pType) { case PTY_void: @@ -690,62 +301,11 @@ void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pL case PTY_a64: case PTY_u64: case PTY_i64: - pLoc.reg1 = AArch64Abi::kIntReturnRegs[1]; - pLoc.primTypeOfReg1 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ + ploc.reg1 = AArch64Abi::kIntReturnRegs[1]; + ploc.primTypeOfReg1 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; // promote the type break; default: CHECK_FATAL(false, "NYI"); } } - -/* - * From "ARM Procedure Call Standard for ARM 64-bit Architecture" - * ARM IHI 0055C_beta, 6th November 2013 - * $ 5.1 machine Registers - * $ 5.1.1 General-Purpose Registers - * Note - * SP Stack Pointer - * R30/LR Link register Stores the return address. - * We push it into stack along with FP on function - * entry using STP and restore it on function exit - * using LDP even if the function is a leaf (i.e., - * it does not call any other function) because it - * is free (we have to store FP anyway). So, if a - * function is a leaf, we may use it as a temporary - * register. - * R29/FP Frame Pointer - * R19-R28 Callee-saved - * registers - * R18 Platform reg Can we use it as a temporary register? - * R16,R17 IP0,IP1 Maybe used as temporary registers. Should be - * given lower priorities. (i.e., we push them - * into the free register stack before the others) - * R9-R15 Temporary registers, caller-saved - * Note: - * R16 and R17 may be used by a linker as a scratch register between - * a routine and any subroutine it calls. They can also be used within a - * routine to hold intermediate values between subroutine calls. - * - * The role of R18 is platform specific. If a platform ABI has need of - * a dedicated general purpose register to carry inter-procedural state - * (for example, the thread context) then it should use this register for - * that purpose. If the platform ABI has no such requirements, then it should - * use R18 as an additional temporary register. The platform ABI specification - * must document the usage for this register. - * - * A subroutine invocation must preserve the contents of the registers R19-R29 - * and SP. All 64 bits of each value stored in R19-R29 must be preserved, even - * when using the ILP32 data model. - * - * $ 5.1.2 SIMD and Floating-Point Registers - * - * The first eight registers, V0-V7, are used to pass argument values into - * a subroutine and to return result values from a function. They may also - * be used to hold intermediate values within a routine. - * - * V8-V15 must be preserved by a callee across subroutine calls; the - * remaining registers do not need to be preserved( or caller-saved). - * Additionally, only the bottom 64 bits of each value stored in V8- - * V15 need to be preserved. - */ } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp index 1048dc54911647dc82221e97aa52cedd69400183..4397cba6b81bfd6cb1b57802d1e0b2300ddfb3c1 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp @@ -20,8 +20,10 @@ namespace maplebe { /* Initialize cfg optimization patterns */ void AArch64CFGOptimizer::InitOptimizePatterns() { (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); - (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); - AArch64FlipBRPattern *brOpt = memPool->New(*cgFunc); + if (cgFunc->GetMirModule().IsCModule()) { + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + } + auto *brOpt = memPool->New(*cgFunc); if (GetPhase() == kCfgoPostRegAlloc) { brOpt->SetPhase(kCfgoPostRegAlloc); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp index 8a9bd188958e550501588f247595734d5ead73ae..e265aa84dc7df4e4be98992c29719ea65971a463 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp @@ -17,20 +17,21 @@ #include "aarch64_cgfunc.h" namespace maplebe { void AArch64GenCfi::GenerateRegisterSaveDirective(BB &bb) { - int32 stackFrameSize = static_cast( + auto stackFrameSize = static_cast( static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); - int32 argsToStkPassSize = static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + auto argsToStkPassSize = static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); int32 cfiOffset = stackFrameSize; Insn &stackDefNextInsn = FindStackDefNextInsn(bb); InsertCFIDefCfaOffset(bb, stackDefNextInsn, cfiOffset); cfiOffset = static_cast(GetOffsetFromCFA() - argsToStkPassSize); - AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + auto &aarchCGFunc = static_cast(cgFunc); if (useFP) { (void)bb.InsertInsnBefore(stackDefNextInsn, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffset, k64BitSize)); } + int32 RLROffset = static_cast(cgFunc).GetStoreFP() ? kOffset8MemPos : 0; (void)bb.InsertInsnBefore(stackDefNextInsn, - aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffset + kOffset8MemPos, k64BitSize)); + aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffset + RLROffset, k64BitSize)); /* change CFA register and offset */ if (useFP) { @@ -58,9 +59,10 @@ void AArch64GenCfi::GenerateRegisterSaveDirective(BB &bb) { } auto it = regsToSave.begin(); - /* skip the first two registers */ - CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); - ++it; + // skip the RFP + if (*it == RFP) { + ++it; + } CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; int32 offset = cgFunc.GetMemlayout()->GetCalleeSaveBaseLoc(); @@ -83,8 +85,10 @@ void AArch64GenCfi::GenerateRegisterRestoreDirective(BB &bb) { CHECK_NULL_FATAL(returnInsn); if (!regsToSave.empty()) { auto it = regsToSave.begin(); - CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); - ++it; + // skip the RFP + if (*it == RFP ) { + ++it; + } CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp index aead8709591e217329050cb97d1af280b9524a75..b7dd5a37d387df92271b8d0cebac0f6f804b8266 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -13,18 +13,18 @@ * See the Mulan PSL v2 for more details. */ #include "aarch64_cg.h" +#include "aarch64_mop_split.h" +#include "aarch64_mop_valid.h" #include "mir_builder.h" #include "becommon.h" #include "label_creation.h" #include "alignment.h" namespace maplebe { -#include "immvalid.def" #define DEFINE_MOP(...) {__VA_ARGS__}, const InsnDesc AArch64CG::kMd[kMopLast] = { #include "abstract_mmir.def" #include "aarch64_md.def" -#include "aarch64_mem_md.def" }; #undef DEFINE_MOP @@ -351,7 +351,8 @@ void AArch64CG::EnrollTargetPhases(MaplePhaseManager *pm) const { Insn &AArch64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { ASSERT(defOpnd.IsRegister(), "build SSA on register operand"); - CHECK_FATAL(defOpnd.IsOfIntClass() || defOpnd.IsOfFloatOrSIMDClass(), " unknown operand type "); + /* There are cases that CCRegs need add phi insn. */ + CHECK_FATAL(defOpnd.IsOfIntClass() || defOpnd.IsOfFloatOrSIMDClass() || defOpnd.IsOfCC(), " unknown operand type "); bool is64bit = defOpnd.GetSize() == k64BitSize; MOperator mop = MOP_nop; if (defOpnd.GetSize() == k128BitSize) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 17554f2f6fab041fb5d1ae694c0afed8f79fde1f..5306a4849c280cdcdff650782fb527798cc3bdcd 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -12,6 +12,7 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ +#include "aarch64_cgfunc.h" #include #include #include @@ -27,7 +28,6 @@ #include "metadata_layout.h" #include "emit.h" #include "simplify.h" -#include "aarch64_cgfunc.h" #include "cg_irbuilder.h" namespace maplebe { @@ -52,7 +52,7 @@ CondOperand AArch64CGFunc::ccOperands[kCcLast] = { CondOperand(CC_AL), }; -Operand *AArch64CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { +Operand *AArch64CGFunc::AArchHandleExpr(const BaseNode &parent, BaseNode &expr) { #ifdef NEWCG Operand *opnd; if (CGOptions::UseNewCg()) { @@ -436,6 +436,14 @@ void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype) { uint32 dsize = GetPrimTypeBitSize(dtype); + // If the type size of the parent node is smaller than the type size of the child node, + // the number of child node needs to be truncated. + if (dsize < src.GetSize()) { + uint64 value = static_cast(src.GetValue()); + uint64 mask = (1UL << dsize) - 1; + int64 newValue = static_cast(value & mask); + src.SetValue(newValue); + } ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer"); ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)), "The destination operand must be >= 8-bit"); @@ -447,7 +455,7 @@ void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype return; } if (src.IsSingleInstructionMovable()) { - MOperator mOp = (dsize == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + MOperator mOp = (dsize <= k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src)); return; } @@ -891,34 +899,40 @@ bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint3 } } -bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) { +// This api is used to judge whether opnd is legal for mop. +// It is implemented by calling verify api of mop (InsnDesc -> Verify). +bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) const { const InsnDesc *md = &AArch64CG::kMd[mOp]; auto *opndProp = md->opndMD[opndIdx]; - + MemPool *localMp = memPoolCtrler.NewMemPool("opnd verify mempool", true); + auto *localAlloc = new MapleAllocator(localMp); + MapleVector testOpnds(md->opndMD.size(), localAlloc->Adapter()); + testOpnds[opndIdx] = o; + bool flag = true; Operand::OperandType opndTy = opndProp->GetOperandType(); if (opndTy == Operand::kOpdMem) { auto *memOpnd = static_cast(o); CHECK_FATAL(memOpnd != nullptr, "memOpnd should not be nullptr"); if (memOpnd->GetAddrMode() == MemOperand::kBOR) { + delete localAlloc; + memPoolCtrler.DeleteMemPool(localMp); return true; } OfstOperand *ofStOpnd = memOpnd->GetOffsetImmediate(); int64 offsetValue = ofStOpnd ? ofStOpnd->GetOffsetValue() : 0LL; if (md->IsLoadStorePair() || (memOpnd->GetAddrMode() == MemOperand::kBOI)) { - if (ofStOpnd && ofStOpnd->GetVary() == kUnAdjustVary) { - offsetValue += static_cast(static_cast(GetMemlayout())->RealStackFrameSize() + - 0xffL); - } - return md->IsValidImmOpnd(offsetValue); + flag = md->Verify(testOpnds); } else if (memOpnd->GetAddrMode() == MemOperand::kLo12Li) { - return offsetValue == 0; + flag = offsetValue == 0; } else if (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed()) { - return (offsetValue <= static_cast(k256BitSizeInt) && offsetValue >= kNegative256BitSize); + flag = (offsetValue <= static_cast(k256BitSizeInt) && offsetValue >= kNegative256BitSize); } } else if (opndTy == Operand::kOpdImmediate) { - return md->IsValidImmOpnd(static_cast(o)->GetValue()); + flag = md->Verify(testOpnds); } - return true; + delete localAlloc; + memPoolCtrler.DeleteMemPool(localMp); + return flag; } MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, @@ -948,7 +962,7 @@ RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum) { resOpnd = &CreateRegisterOperandOfType(PTY_i64); } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) { resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(baseRegNum), - GetPointerSize() * kBitsPerByte, kRegTyInt); + GetPointerBitSize(), kRegTyInt); } else { resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum); } @@ -1123,7 +1137,8 @@ void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPTyp if (fieldId != 0) { MIRStructType *structType = static_cast(symbol->GetType()); ASSERT(structType != nullptr, "SelectDassign: non-zero fieldID for non-structure"); - offset = GetBecommon().GetFieldOffset(*structType, fieldId).first; + offset = structType->GetKind() == kTypeClass ? GetBecommon().GetJClassFieldOffset(*structType, fieldId).byteOffset : + structType->GetFieldOffsetFromBaseAddr(fieldId).byteOffset; parmCopy = IsParamStructCopy(*symbol); } uint32 regSize = GetPrimTypeBitSize(rhsPType); @@ -1214,7 +1229,7 @@ void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { } void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { - Operand *opnd0 = HandleExpr(stmt, *stmt.Opnd(0)); + Operand *opnd0 = AArchHandleExpr(stmt, *stmt.Opnd(0)); RegOperand &baseReg = LoadIntoRegister(*opnd0, PTY_a64); auto &zwr = GetZeroOpnd(k32BitSize); auto &mem = CreateMemOpnd(baseReg, 0, k32BitSize); @@ -1227,9 +1242,9 @@ void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { } void AArch64CGFunc::SelectAbort() { - RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); - auto &mem = CreateMemOpnd(inOpnd, 0, k64BitSize); - Insn &movXzr = GetInsnBuilder()->BuildInsn(MOP_xmovri64, inOpnd, CreateImmOperand(0, k64BitSize, false)); + RegOperand *inOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k64BitSize), k64BitSize, kRegTyInt); + auto &mem = CreateMemOpnd(*inOpnd, 0, k64BitSize); + Insn &movXzr = GetInsnBuilder()->BuildInsn(MOP_xmovri64, *inOpnd, CreateImmOperand(0, k64BitSize, false)); Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, GetZeroOpnd(k64BitSize), mem); loadRef.SetDoNotRemove(true); movXzr.SetDoNotRemove(true); @@ -1372,8 +1387,8 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { } case OP_add: { BinaryNode *addNode = static_cast(node.Opnd(i)); - Operand *inOpnd = SelectAdd(*addNode, *HandleExpr(*addNode, *addNode->Opnd(0)), - *HandleExpr(*addNode, *addNode->Opnd(1)), node); + Operand *inOpnd = SelectAdd(*addNode, *AArchHandleExpr(*addNode, *addNode->Opnd(0)), + *AArchHandleExpr(*addNode, *addNode->Opnd(1)), node); listInputOpnd->PushOpnd(static_cast(*inOpnd)); PrimType pType = addNode->GetPrimType(); listInRegPrefix->stringList.push_back(static_cast(&CreateStringOperand( @@ -1666,9 +1681,7 @@ MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, u MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, bool needLow12) { MemOperand *memOpnd = nullptr; uint32 memSize = align * kBitsPerByte; - if (sym.GetStorageClass() == kScFormal && GetBecommon().GetTypeSize(sym.GetTyIdx()) > k16ByteSize) { - /* formal of size of greater than 16 is copied by the caller and the pointer to it is passed. */ - /* otherwise it is passed in register and is accessed directly. */ + if (IsParamStructCopy(sym)) { memOpnd = &GetOrCreateMemOpnd(sym, 0, memSize); RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *memOpnd); @@ -1720,7 +1733,7 @@ Insn *AArch64CGFunc::AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &ne return &newStrLdr; } -bool AArch64CGFunc::IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, uint64 lhsSize) { +bool AArch64CGFunc::IslhsSizeAligned(uint64 lhsSizeCovered, uint32 newAlignUsed, uint64 lhsSize) const { CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); if ((lhsSizeCovered + newAlignUsed) > lhsSize) { return true; @@ -1737,11 +1750,13 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRStructType *structType = static_cast(lhsSymbol->GetType()); ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); lhsType = structType->GetFieldType(stmt.GetFieldID()); - lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + lhsOffset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, stmt.GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(stmt.GetFieldID()).byteOffset); bothUnion = bothUnion || (structType->GetKind() == kTypeUnion); } - uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); - uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + uint32 lhsAlign = lhsType->GetAlign(); + uint64 lhsSize = lhsType->GetSize(); uint32 rhsAlign; uint32 alignUsed; @@ -1754,11 +1769,13 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRStructType *structType = static_cast(rhsSymbol->GetType()); ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); rhsType = structType->GetFieldType(rhsDread->GetFieldID()); - rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + rhsOffset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, rhsDread->GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(rhsDread->GetFieldID()).byteOffset); bothUnion = bothUnion && (structType->GetKind() == kTypeUnion); } bothUnion = bothUnion && (rhsSymbol == lhsSymbol); - rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + rhsAlign = rhsType->GetAlign(); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); uint32 copySize = GetAggCopySize(lhsOffset, rhsOffset, alignUsed); @@ -1781,7 +1798,7 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { int64 lhsOffsetVal = lhsOffstOpnd->GetValue(); bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kLo12Li); bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kLo12Li); - if (lhsSize > kParmMemcpySize) { + if (lhsSize > kParmMemcpySize * 2) { // expand to doule size of memcpy limit size std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); opndVec.push_back(regResult); /* result */ @@ -1892,7 +1909,7 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { } } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { IreadNode *rhsIread = static_cast(stmt.GetRHS()); - RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + RegOperand *addrOpnd = static_cast(AArchHandleExpr(*rhsIread, *rhsIread->Opnd(0))); addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); MIRPtrType *rhsPointerType = static_cast( GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); @@ -1903,10 +1920,12 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { MIRStructType *rhsStructType = static_cast(rhsType); ASSERT(rhsStructType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); - rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + rhsOffset = rhsStructType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*rhsStructType, rhsIread->GetFieldID()).byteOffset) : + static_cast(rhsStructType->GetFieldOffsetFromBaseAddr(rhsIread->GetFieldID()).byteOffset); isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } - rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + rhsAlign = rhsType->GetAlign(); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); @@ -1916,7 +1935,7 @@ void AArch64CGFunc::SelectAggDassign(const DassignNode &stmt) { CHECK_NULL_FATAL(lhsOffstOpnd); int64 lhsOffsetVal = lhsOffstOpnd->GetValue(); bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kLo12Li); - if (lhsSize > kParmMemcpySize) { + if (lhsSize > kParmMemcpySize * 2) { // expand to doule size of memcpy limit size std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); opndVec.push_back(regResult); /* result */ @@ -2091,7 +2110,9 @@ void AArch64CGFunc::SelectIassign(IassignNode &stmt) { } ASSERT(structType != nullptr, "SelectIassign: non-zero fieldID for non-structure"); pointedType = structType->GetFieldType(stmt.GetFieldID()); - offset = GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first; + offset = structType->GetKind() == kTypeClass ? + GetBecommon().GetJClassFieldOffset(*structType, stmt.GetFieldID()).byteOffset : + structType->GetFieldOffsetFromBaseAddr(stmt.GetFieldID()).byteOffset; isRefField = GetBecommon().IsRefField(*structType, stmt.GetFieldID()); } else { pointedType = GetPointedToType(*pointerType); @@ -2105,7 +2126,7 @@ void AArch64CGFunc::SelectIassign(IassignNode &stmt) { } PrimType styp = stmt.GetRHS()->GetPrimType(); - Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS()); + Operand *valOpnd = AArchHandleExpr(stmt, *stmt.GetRHS()); Operand &srcOpnd = LoadIntoRegister(*valOpnd, (IsPrimitiveInteger(styp) || IsPrimitiveVectorInteger(styp)), GetPrimTypeBitSize(styp)); @@ -2143,7 +2164,7 @@ void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { auto dataSize = GetPrimTypeBitSize(destType); memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd, nullptr) : memOpnd; - Operand *valOpnd = HandleExpr(stmt, *stmt.GetBOpnd(1)); + Operand *valOpnd = AArchHandleExpr(stmt, *stmt.GetBOpnd(1)); Operand &srcOpnd = LoadIntoRegister(*valOpnd, true, GetPrimTypeBitSize(destType)); SelectCopy(memOpnd, destType, srcOpnd, destType); } @@ -2174,8 +2195,7 @@ MemOperand *AArch64CGFunc::GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AA bool AArch64CGFunc::GetNumReturnRegsForIassignfpoff(MIRType &rType, PrimType &primType, uint32 &numRegs) { bool isPureFp = false; uint32 rSize = static_cast(rType.GetSize()); - CHECK_FATAL(rSize <= k16ByteSize, "SelectIassignfpoff invalid agg size"); - uint32 fpSize; + uint32 fpSize = 0; numRegs = FloatParamRegRequired(static_cast(&rType), fpSize); if (numRegs > 0) { primType = (fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; @@ -2283,11 +2303,11 @@ MIRType *AArch64CGFunc::LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector(static_cast(src)->GetRegisterNumber()); + auto regno = static_cast(static_cast(src).GetRegisterNumber()); MIRFunction *func = &GetFunction(); if (!func->IsReturnStruct()) { @@ -2298,7 +2318,7 @@ bool AArch64CGFunc::LmbcSmallAggForRet(const BaseNode &bNode, const Operand *src uint32 numRegs = 0; if (static_cast(bNode).GetNext()->GetOpCode() == OP_return) { MIRStructType *ty = static_cast(func->GetReturnType()); - uint32 tySize = static_cast(GetBecommon().GetTypeSize(ty->GetTypeIndex())); + uint32 tySize = static_cast(ty->GetSize()); uint32 fpregs = FloatParamRegRequired(ty, size); if (fpregs > 0) { /* pure floating point in agg */ @@ -2442,12 +2462,12 @@ void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand &src) { LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); SetLmbcArgInfo(p); } - if (LmbcSmallAggForRet(bNode, &src)) { + if (LmbcSmallAggForRet(bNode, src)) { return; } else if (LmbcSmallAggForCall(bNode, &src, &parmList)) { return; } - Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + Operand *dest = AArchHandleExpr(bNode, *bNode.Opnd(0)); RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); /* memcpy for agg assign OR large agg for arg/ret */ int32 offset = bNode.offset; @@ -2540,9 +2560,11 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { MIRStructType *structType = static_cast(lhsType); ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); lhsType = structType->GetFieldType(stmt.GetFieldID()); - lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + lhsOffset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, stmt.GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(stmt.GetFieldID()).byteOffset); } else if (lhsType->GetKind() == kTypeArray) { -#if DEBUG +#if defined(DEBUG) && DEBUG MIRArrayType *arrayLhsType = static_cast(lhsType); /* access an array element */ MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayLhsType->GetElemTyIdx()); @@ -2552,7 +2574,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { "unexpected array element type in iassign"); #endif } else if (lhsType->GetKind() == kTypeFArray) { -#if DEBUG +#if defined(DEBUG) && DEBUG MIRFarrayType *farrayLhsType = static_cast(lhsType); /* access an array element */ MIRType *lhsElemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayLhsType->GetElemTyIdx()); @@ -2562,8 +2584,8 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { "unexpected array element type in iassign"); #endif } - uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); - uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + uint32 lhsAlign = lhsType->GetAlign(); + uint64 lhsSize = lhsType->GetSize(); uint32 rhsAlign; uint32 alignUsed; @@ -2576,9 +2598,12 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { MIRStructType *structType = static_cast(rhsSymbol->GetType()); ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); rhsType = structType->GetFieldType(rhsDread->GetFieldID()); - rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + rhsOffset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, rhsDread->GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(rhsDread->GetFieldID()).byteOffset); } - rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + rhsAlign = rhsType->GetAlign(); + alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); @@ -2594,7 +2619,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { CHECK_NULL_FATAL(rhsOffstOpnd); int64 rhsOffsetVal = rhsOffstOpnd->GetValue(); bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kLo12Li); - if (lhsSize > kParmMemcpySize) { + if (lhsSize > kParmMemcpySize * 2) { // expand to doule size of memcpy limit size std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); opndVec.push_back(regResult); /* result */ @@ -2610,12 +2635,12 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { return; } for (uint32 i = 0; i < (lhsSize / copySize); ++i) { - uint32 rhsBaseOffset = static_cast(static_cast(rhsOffsetVal) + i * copySize); + int64 rhsBaseOffset = rhsOffsetVal + static_cast(i * copySize); uint32 lhsBaseOffset = lhsOffset + i * copySize; uint32 memSize = copySize * k8BitSize; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; ImmOperand &rhsOfstOpnd = - CreateImmOperand(static_cast(static_cast(rhsBaseOffset)), k32BitSize, false); + CreateImmOperand(rhsBaseOffset, k32BitSize, false, rhsOffstOpnd->GetVary()); MemOperand *rhsMemOpnd = nullptr; if (sym) { rhsMemOpnd = CreateMemOperand(memSize, *rhsBaseReg, rhsOfstOpnd, *sym); @@ -2665,7 +2690,8 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { uint32 newMemSize = newAlignUsed * k8BitSize; MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; uint64 rhsOffVal = lhsSizeCovered + static_cast(rhsOffsetVal); - ImmOperand &rhsOfstOpnd = CreateImmOperand(static_cast(rhsOffVal), k32BitSize, false); + ImmOperand &rhsOfstOpnd = CreateImmOperand(static_cast(rhsOffVal), k32BitSize, false, + rhsOffstOpnd->GetVary()); MemOperand *rhsMemOpnd = nullptr; if (sym) { rhsMemOpnd = CreateMemOperand(newMemSize, *rhsBaseReg, rhsOfstOpnd, *sym); @@ -2690,7 +2716,7 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { } else { /* rhs is iread */ ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); IreadNode *rhsIread = static_cast(stmt.GetRHS()); - RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + RegOperand *rhsAddrOpnd = static_cast(AArchHandleExpr(*rhsIread, *rhsIread->Opnd(0))); rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, rhsIread->Opnd(0)->GetPrimType()); MIRPtrType *rhsPointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); @@ -2701,14 +2727,16 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { MIRStructType *rhsStructType = static_cast(rhsType); ASSERT(rhsStructType, "SelectAggDassign: non-zero fieldID for non-structure"); rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); - rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + rhsOffset = rhsStructType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*rhsStructType, rhsIread->GetFieldID()).byteOffset) : + static_cast(rhsStructType->GetFieldOffsetFromBaseAddr(rhsIread->GetFieldID()).byteOffset); isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); } - rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + rhsAlign = rhsType->GetAlign(); alignUsed = std::min(lhsAlign, rhsAlign); ASSERT(alignUsed != 0, "expect non-zero"); uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); - if (lhsSize > kParmMemcpySize) { + if (lhsSize > kParmMemcpySize * 2) { // expand to doule size of memcpy limit size std::vector opndVec; RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); opndVec.push_back(regResult); /* result */ @@ -2724,6 +2752,8 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { return; } ASSERT(copySize != 0, "expect non-zero"); + // If the program goes through the following for loop, hasPairOrTwoWords returns true. + bool hasPairOrTwoWords = false; for (uint32 i = 0; i < (lhsSize / copySize); i++) { /* generate the load */ uint32 operandSize = copySize * k8BitSize; @@ -2763,189 +2793,126 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { lhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(lhsMemOpnd), operandSize, kInsnSecondOpnd); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); } + hasPairOrTwoWords = true; } /* take care of extra content at the end less than the unit */ uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; uint32 newAlignUsed = copySize; + // Insn can be reduced when lhsSizeNotCovered = 3 | 5 | 6 | 7 + // 3: h + b -> w ; 5: w + b -> x ; 6: w + h -> x ; 7: w + h + b -> x + uint32 lhsSizeNotCovered = lhsSize - lhsSizeCovered; + if (hasPairOrTwoWords && + (lhsSizeNotCovered == k3BitSize || ((lhsSizeNotCovered >= k5BitSize) && (lhsSizeNotCovered <= k7BitSize)))) { + uint64 ofst = (lhsSizeNotCovered == k3BitSize) ? (lhsSize - k4BitSize) : (lhsSize - k8BitSize); + uint32 memOpndSize = (lhsSizeNotCovered == k3BitSize) ? k32BitSize : k64BitSize; + regno_t vRegNO = NewVReg(kRegTyInt, (lhsSizeNotCovered == k3BitSize) ? k4BitSize : k8BitSize); + GenLdStForAggIassign(ofst, rhsOffset, lhsOffset, *rhsAddrOpnd, lhsAddrOpnd, memOpndSize, vRegNO, isRefField); + lhsSizeCovered += lhsSizeNotCovered; + } while (lhsSizeCovered < lhsSize) { newAlignUsed = newAlignUsed >> 1; if (IslhsSizeAligned(lhsSizeCovered, newAlignUsed, lhsSize)) { continue; } - /* generate the load */ - int64 rhsOffValCoverd = static_cast(rhsOffset + lhsSizeCovered); - ImmOperand &rhsOfstOpnd = CreateImmOperand(rhsOffValCoverd, k32BitSize, false); uint32 memOpndSize = newAlignUsed * k8BitSize; - MemOperand *rhsMemOpnd = CreateMemOperand(memOpndSize, static_cast(*rhsAddrOpnd), rhsOfstOpnd); regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); - RegOperand &result = CreateVirtualRegisterOperand(vRegNO); - MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); - rhsMemOpnd = FixLargeMemOpnd(mOpLD, *rhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); - Insn &insn = GetInsnBuilder()->BuildInsn(mOpLD, result, *rhsMemOpnd); - insn.MarkAsAccessRefField(isRefField); - GetCurBB()->AppendInsn(insn); - /* generate the store */ - int64 lhsOffValWithCover = static_cast(lhsOffset + lhsSizeCovered); - ImmOperand &lhsOfstOpnd = CreateImmOperand(lhsOffValWithCover, k32BitSize, false); - MemOperand *lhsMemOpnd = CreateMemOperand(memOpndSize, static_cast(lhsAddrOpnd), lhsOfstOpnd); - MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); - lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpST, result, *lhsMemOpnd)); + GenLdStForAggIassign(lhsSizeCovered, rhsOffset, lhsOffset, *rhsAddrOpnd, lhsAddrOpnd, memOpndSize, vRegNO, + isRefField); lhsSizeCovered += newAlignUsed; } } } +void AArch64CGFunc::GenLdStForAggIassign(uint64 ofst, uint32 rhsOffset, uint32 lhsOffset, RegOperand &rhsAddrOpnd, + Operand &lhsAddrOpnd, uint32 memOpndSize, regno_t vRegNO, bool isRefField) { + /* generate the load */ + int64 rhsOffValCovered = static_cast(rhsOffset + ofst); + ImmOperand &rhsOfstOpnd = CreateImmOperand(rhsOffValCovered, k32BitSize, false); + MemOperand *rhsMemOpnd = CreateMemOperand(memOpndSize, rhsAddrOpnd, rhsOfstOpnd); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOpLD, *rhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + Insn &insn = GetInsnBuilder()->BuildInsn(mOpLD, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + int64 lhsOffValWithCover = static_cast(lhsOffset + ofst); + ImmOperand &lhsOfstOpnd = CreateImmOperand(lhsOffValWithCover, k32BitSize, false); + MemOperand *lhsMemOpnd = CreateMemOperand(memOpndSize, static_cast(lhsAddrOpnd), lhsOfstOpnd); + MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpST, result, *lhsMemOpnd)); +} + void AArch64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { - uint32 offset = 0; - if (x->GetOpCode() == OP_dread) { - DreadNode *dread = static_cast(x); - MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(dread->GetStIdx()); - MIRType *mirType = sym->GetType(); - if (dread->GetFieldID() != 0) { - MIRStructType *structType = static_cast(mirType); - mirType = structType->GetFieldType(dread->GetFieldID()); - offset = static_cast(GetBecommon().GetFieldOffset(*structType, dread->GetFieldID()).first); - } - uint32 typeSize = static_cast(GetBecommon().GetTypeSize(mirType->GetTypeIndex())); - /* generate move to regs for agg return */ - AArch64CallConvImpl parmlocator(GetBecommon()); - CCLocInfo pLoc; - (void)parmlocator.LocateNextParm(*mirType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); - /* aggregates are 8 byte aligned. */ - Operand *rhsmemopnd = nullptr; - RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ - uint32 loadSize; - uint32 numRegs; - RegType regType; - PrimType retPty; - bool fpParm = false; - if (pLoc.numFpPureRegs > 0) { - loadSize = pLoc.fpSize; - numRegs = pLoc.numFpPureRegs; - fpParm = true; - regType = kRegTyFloat; - retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; - } else { - if (CGOptions::IsBigEndian()) { - loadSize = k8ByteSize; - numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; - regType = kRegTyInt; - retPty = PTY_u64; - } else { - loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; - numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; - regType = kRegTyInt; - retPty = PTY_u32; - } - } - bool parmCopy = IsParamStructCopy(*sym); - for (uint32 i = 0; i < numRegs; i++) { - if (parmCopy) { - rhsmemopnd = &LoadStructCopyBase(*sym, - (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), - static_cast(loadSize * kBitsPerByte)); - } else { - rhsmemopnd = &GetOrCreateMemOpnd(*sym, - (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), - (loadSize * kBitsPerByte)); - } - result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); - MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); - Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); - GetCurBB()->AppendInsn(ld); - } - AArch64reg regs[kFourRegister]; - regs[0] = static_cast(pLoc.reg0); - regs[1] = static_cast(pLoc.reg1); - regs[2] = static_cast(pLoc.reg2); - regs[3] = static_cast(pLoc.reg3); - RegOperand *dest; - for (uint32 i = 0; i < numRegs; i++) { - AArch64reg preg; - MOperator mop2; - if (fpParm) { - preg = regs[i]; - mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; - } else { - preg = (i == 0 ? R0 : R1); - mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; - } - dest = &GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); - Insn &mov = GetInsnBuilder()->BuildInsn(mop2, *dest, *(result[i])); - GetCurBB()->AppendInsn(mov); - } - /* Create artificial dependency to extend the live range */ - for (uint32 i = 0; i < numRegs; i++) { - AArch64reg preg; - MOperator mop3; - if (fpParm) { - preg = regs[i]; - mop3 = MOP_pseudo_ret_float; - } else { - preg = (i == 0 ? R0 : R1); - mop3 = MOP_pseudo_ret_int; - } - dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); - Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, *dest); - GetCurBB()->AppendInsn(pseudo); - } - return; - } else if (x->GetOpCode() == OP_iread) { - IreadNode *iread = static_cast(x); - RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*iread, *iread->Opnd(0))); - rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, iread->Opnd(0)->GetPrimType()); - MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx())); - MIRType *mirType = static_cast(ptrType->GetPointedType()); - bool isRefField = false; - if (iread->GetFieldID() != 0) { - MIRStructType *structType = static_cast(mirType); - mirType = structType->GetFieldType(iread->GetFieldID()); - offset = static_cast(GetBecommon().GetFieldOffset(*structType, iread->GetFieldID()).first); - isRefField = GetBecommon().IsRefField(*structType, iread->GetFieldID()); - } - uint32 typeSize = static_cast(GetBecommon().GetTypeSize(mirType->GetTypeIndex())); - /* generate move to regs. */ - RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ - uint32 loadSize; - if (CGOptions::IsBigEndian()) { - loadSize = k8ByteSize; - } else { - loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; - } - uint32 numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; - for (uint32 i = 0; i < numRegs; i++) { - ImmOperand &rhsOffOpnd = CreateImmOperand(offset + i * loadSize, loadSize * kBitsPerByte, false); - MemOperand *rhsmemopnd = CreateMemOperand(loadSize * kBitsPerByte, *rhsAddrOpnd, rhsOffOpnd); - result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); - MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); - Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); - ld.MarkAsAccessRefField(isRefField); - GetCurBB()->AppendInsn(ld); - } - RegOperand *dest; - for (uint32 i = 0; i < numRegs; i++) { - AArch64reg preg = (i == 0 ? R0 : R1); - dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); - Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, *dest, *(result[i])); - GetCurBB()->AppendInsn(mov); - } - /* Create artificial dependency to extend the live range */ - for (uint32 i = 0; i < numRegs; i++) { - AArch64reg preg = (i == 0 ? R0 : R1); - dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); - Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *dest); - GetCurBB()->AppendInsn(pseudo); - } - return; - } else { // dummy return of 0 inserted by front-end at absence of return - ASSERT(x->GetOpCode() == OP_constval, "SelectReturnSendOfStructInRegs: unexpected return operand"); - uint32 typeSize = GetPrimTypeSize(x->GetPrimType()); - RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize * kBitsPerByte, kRegTyInt); - ImmOperand &src = CreateImmOperand(0, k16BitSize, false); + if (x->GetOpCode() != OP_dread && x->GetOpCode() != OP_iread) { + // dummy return of 0 inserted by front-end at absence of return + ASSERT(x->GetOpCode() == OP_constval, "NIY: unexpected return operand"); + uint32 typeSize = GetPrimTypeBitSize(x->GetPrimType()); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize, kRegTyInt); + ImmOperand &src = CreateImmOperand(0, typeSize, false); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, src)); return; } + + AggregateDesc aggDesc; + GetAggregateDescFromAggregateNode(*x, aggDesc); + auto *addrOpnd = GetAddrOpndWithBaseNode(*x, *aggDesc.sym, aggDesc.offset); + + // generate move to regs for agg return + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo retMatch; + parmlocator.LocateRetVal(*aggDesc.mirType, retMatch); + if (retMatch.GetReg0() == kRinvalid) { + return; + } + + uint32 offset = 0; + std::vector result; + // load memOpnd to return reg + // ldr x0, [base] + // ldr x1, [base + 8] + auto generateReturnValToRegs = + [this, &result, &offset, &aggDesc, &addrOpnd](regno_t regno, PrimType primType) { + bool isFpReg = IsPrimitiveFloat(primType) || IsPrimitiveVector(primType); + RegType regType = isFpReg ? kRegTyFloat : kRegTyInt; + if (CGOptions::IsBigEndian() && !isFpReg) { + primType = PTY_u64; + } else if (GetPrimTypeSize(primType) <= k4ByteSize) { + primType = isFpReg ? PTY_f32 : PTY_u32; + } + auto &phyReg = GetOrCreatePhysicalRegisterOperand(static_cast(regno), + GetPrimTypeBitSize(primType), regType); + ASSERT(addrOpnd->IsMemoryAccessOperand(), "NIY, must be mem opnd"); + auto *newMemOpnd = &GetMemOperandAddOffset(static_cast(*addrOpnd), offset, + GetPrimTypeBitSize(primType)); + MOperator ldMop = PickLdInsn(GetPrimTypeBitSize(primType), primType); + newMemOpnd = FixLargeMemOpnd(ldMop, *newMemOpnd, GetPrimTypeBitSize(primType), kSecondOpnd); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(ldMop, phyReg, *newMemOpnd); + ldInsn.MarkAsAccessRefField(aggDesc.isRefField); + GetCurBB()->AppendInsn(ldInsn); + + offset += GetPrimTypeSize(primType); + result.push_back(&phyReg); + }; + generateReturnValToRegs(retMatch.GetReg0(), retMatch.GetPrimTypeOfReg0()); + if (retMatch.GetReg1() != kRinvalid) { + generateReturnValToRegs(retMatch.GetReg1(), retMatch.GetPrimTypeOfReg1()); + } + if (retMatch.GetReg2() != kRinvalid) { + generateReturnValToRegs(retMatch.GetReg2(), retMatch.GetPrimTypeOfReg2()); + } + if (retMatch.GetReg3() != kRinvalid) { + generateReturnValToRegs(retMatch.GetReg3(), retMatch.GetPrimTypeOfReg3()); + } + + // Create artificial dependency to extend the live range + for (auto *opnd : result) { + auto pseudoMop = (opnd->GetRegisterType() == kRegTyInt) ? MOP_pseudo_ret_int : + MOP_pseudo_ret_float; + Insn &pseudoInsn = GetInsnBuilder()->BuildInsn(pseudoMop, *opnd); + GetCurBB()->AppendInsn(pseudoInsn); + } } Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { @@ -2955,7 +2922,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { /* use the second register return by __builtin_eh_return(). */ AArch64CallConvImpl retLocator(GetBecommon()); CCLocInfo retMech; - retLocator.InitReturnInfo(*type, retMech); + retLocator.LocateRetVal(*type, retMech); retLocator.SetupSecondRetReg(*type, retMech); return &GetOrCreatePhysicalRegisterOperand(static_cast(retMech.GetReg1()), k64BitSize, kRegTyInt); } @@ -2968,7 +2935,9 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { MIRStructType *structType = static_cast(symbol->GetType()); ASSERT(structType != nullptr, "SelectDread: non-zero fieldID for non-structure"); symType = structType->GetFieldType(fieldId)->GetPrimType(); - offset = static_cast(GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first); + offset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, expr.GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(expr.GetFieldID()).byteOffset); parmCopy = IsParamStructCopy(*symbol); } @@ -2977,7 +2946,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { PrimType resultType = expr.GetPrimType(); if (symType == PTY_agg) { if (expr.GetPrimType() == PTY_agg) { - aggSize = static_cast(GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx())); + aggSize = static_cast(symbol->GetType()->GetSize()); dataSize = ((expr.GetFieldID() == 0) ? GetPointerSize() : aggSize) << 3; resultType = PTY_u64; symType = resultType; @@ -2988,9 +2957,9 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { MemOperand *memOpnd = nullptr; if (aggSize > k8ByteSize) { if (parent.op == OP_eval) { + Operand &dest = GetZeroOpnd(k64BitSize); if (symbol->GetAttr(ATTR_volatile)) { /* Need to generate loads for the upper parts of the struct. */ - Operand &dest = GetZeroOpnd(k64BitSize); uint32 numLoads = static_cast(RoundUp(aggSize, k64BitSize) / k64BitSize); for (uint32 o = 0; o < numLoads; ++o) { if (parmCopy) { @@ -3006,6 +2975,7 @@ Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { } else { /* No side-effects. No need to generate anything for eval. */ } + return &dest; } else { if (expr.GetFieldID() != 0) { CHECK_FATAL(false, "SelectDread: Illegal agg size"); @@ -3055,9 +3025,12 @@ RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr) { void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field) { const MIRSymbol *symbol = stImm.GetSymbol(); - if (!GetFunction().IsMayWriteToAddrofStackChecked() && symbol->GetStorageClass() == kScAuto) { - SetStackProtectInfo(kAddrofStack); + if (symbol->GetName() == ".tbss_start_" + GetMirModule().GetTlsAnchorHashString() || + symbol->GetName() == ".tdata_start_" + GetMirModule().GetTlsAnchorHashString()) { + SelectThreadAnchor(result, stImm); + return; } + CheckAndSetStackProtectInfoWithAddrof(*symbol); if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { if (!CGOptions::IsQuiet()) { maple::LogInfo::MapleLogger(kLlErr) << @@ -3129,7 +3102,7 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); - auto size = GetPointerSize() * kBitsPerByte; + auto size = GetPointerBitSize(); MemOperand *memOpnd = CreateMemOperand(static_cast(size), static_cast(*srcOpnd), offset, *symbol); GetCurBB()->AppendInsn( @@ -3145,6 +3118,12 @@ void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID f } } +void AArch64CGFunc::SelectThreadAnchor(Operand &result, StImmOperand &stImm) { + auto &r0opnd = GetOrCreatePhysicalRegisterOperand (R0, k64BitSize, GetRegTyFromPrimTy(PTY_u64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_call, r0opnd, result, stImm)); + SelectCopy(result, PTY_u64, r0opnd, PTY_u64); +} + void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field) { const MIRSymbol *symbol = memOpnd.GetSymbol(); if (symbol->GetStorageClass() == kScAuto) { @@ -3152,9 +3131,7 @@ void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID f Operand &immOpnd = CreateImmOperand(offsetOpnd->GetOffsetValue(), PTY_u32, false); ASSERT(memOpnd.GetBaseRegister() != nullptr, "nullptr check"); SelectAdd(result, *memOpnd.GetBaseRegister(), immOpnd, PTY_u32); - if (!GetFunction().IsMayWriteToAddrofStackChecked()) { - SetStackProtectInfo(kAddrofStack); - } + CheckAndSetStackProtectInfoWithAddrof(*symbol); } else if (!IsAfterRegAlloc()) { // Create a new vreg/preg for the upper bits of the address PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); @@ -3181,22 +3158,23 @@ void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID f Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); - int32 offset = 0; + uint32 offset = 0; AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); if (isAddrofoff) { - offset = addrofoffExpr.offset; + offset = static_cast(addrofoffExpr.offset); } else { if (expr.GetFieldID() != 0) { MIRStructType *structType = static_cast(symbol->GetType()); /* with array of structs, it is possible to have nullptr */ if (structType != nullptr) { - offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + offset = structType->GetKind() == kTypeClass ? + static_cast(GetBecommon().GetJClassFieldOffset(*structType, expr.GetFieldID()).byteOffset) : + static_cast(structType->GetFieldOffsetFromBaseAddr(expr.GetFieldID()).byteOffset); } } } if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && - ((!isAddrofoff && expr.GetFieldID() != 0) || - (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + ((!isAddrofoff && expr.GetFieldID() != 0) || (symbol->GetType()->GetSize() > k16ByteSize))) { /* * Struct param is copied on the stack by caller if struct size > 16. * Else if size < 16 then struct param is copied into one or two registers. @@ -3205,12 +3183,12 @@ Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, b /* load the base address of the struct copy from stack. */ SelectAddrof(*stackAddr, CreateStImmOperand(*symbol, 0, 0)); Operand *structAddr; - if (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) <= k16ByteSize) { + if (!IsParamStructCopy(*symbol)) { isAggParamInReg = true; structAddr = stackAddr; } else { MemOperand *mo = - CreateMemOperand(GetPointerSize() * kBitsPerByte, *stackAddr, CreateImmOperand(0, k32BitSize, false)); + CreateMemOperand(GetPointerBitSize(), *stackAddr, CreateImmOperand(0, k32BitSize, false)); structAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *structAddr, *mo)); } @@ -3299,6 +3277,7 @@ Operand &AArch64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; Operand &dst = GetOrCreateResOperand(parent, primType); Operand &immOpnd = CreateImmOperand(expr.GetOffset(), k64BitSize, false); + AddAdrpLabel(expr.GetOffset()); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); return dst; } @@ -3309,7 +3288,8 @@ Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ire auto bitSize = GetPrimTypeBitSize(primType); auto *baseAddr = ireadoff.Opnd(0); auto *result = &CreateRegisterOperandOfType(primType); - auto *addrOpnd = HandleExpr(ireadoff, *baseAddr); + auto *addrOpnd = AArchHandleExpr(ireadoff, *baseAddr); + ASSERT_NOT_NULL(addrOpnd); if (primType == PTY_agg && parent.GetOpCode() == OP_regassign) { auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); auto mop = PickLdInsn(64, PTY_a64); @@ -3317,7 +3297,7 @@ Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ire auto ®AssignNode = static_cast(parent); PregIdx pIdx = regAssignNode.GetRegIdx(); CHECK_FATAL(IsSpecialPseudoRegister(pIdx), "SelectIreadfpoff of agg"); - (void)LmbcSmallAggForRet(parent, addrOpnd, offset, true); + (void)LmbcSmallAggForRet(parent, *addrOpnd, offset, true); // result not used } else { auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); @@ -3342,7 +3322,7 @@ RegOperand *AArch64CGFunc::LmbcStructReturnLoad(int32 offset) { MIRFunction &func = GetFunction(); CHECK_FATAL(func.IsReturnStruct(), "LmbcStructReturnLoad: not struct return"); MIRType *ty = func.GetReturnType(); - uint32 sz = static_cast(GetBecommon().GetTypeSize(ty->GetTypeIndex())); + uint32 sz = static_cast(ty->GetSize()); uint32 fpSize; uint32 numFpRegs = FloatParamRegRequired(static_cast(ty), fpSize); if (numFpRegs > 0) { @@ -3413,7 +3393,9 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, ASSERT(structType != nullptr, "SelectIread: non-zero fieldID for non-structure"); pointedType = structType->GetFieldType(expr.GetFieldID()); - offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + offset = structType->GetKind() == kTypeClass ? + GetBecommon().GetJClassFieldOffset(*structType, expr.GetFieldID()).byteOffset : + structType->GetFieldOffsetFromBaseAddr(expr.GetFieldID()).byteOffset; isRefField = GetBecommon().IsRefField(*structType, expr.GetFieldID()); } else { pointedType = GetPointedToType(*pointerType); @@ -3433,7 +3415,7 @@ Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, if (regType == kRegTyFloat) { /* regsize is correct */ } else { - uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); + uint32 sz = static_cast(pointedType->GetSize()); regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; } } else if (regSize < k4ByteSize) { @@ -3629,16 +3611,16 @@ Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirCo CHECK_FATAL(GetPrimTypeBitSize(stype) != k128BitSize, "Couldn't process Float128 at HandleFmovImm method"); Operand *result; bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize); - uint64 val_unsigned = static_cast(val); - uint64 canRepreset = is64Bits ? (val_unsigned & 0xffffffffffff) : (val_unsigned & 0x7ffff); - uint32 val1 = is64Bits ? (val_unsigned >> 61) & 0x3 : (val_unsigned >> 29) & 0x3; - uint32 val2 = is64Bits ? (val_unsigned >> 54) & 0xff : (val_unsigned >> 25) & 0x1f; + uint64 valUnsigned = static_cast(val); + uint64 canRepreset = is64Bits ? (valUnsigned & 0xffffffffffff) : (valUnsigned & 0x7ffff); + uint32 val1 = is64Bits ? (valUnsigned >> 61) & 0x3 : (valUnsigned >> 29) & 0x3; + uint32 val2 = is64Bits ? (valUnsigned >> 54) & 0xff : (valUnsigned >> 25) & 0x1f; bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); - canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + canRepreset = static_cast((canRepreset == 0) && (((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) != 0) && isSame); if (canRepreset > 0) { - uint64 temp1 = is64Bits ? (val_unsigned >> 63) << 7 : (val_unsigned >> 31) << 7; - uint64 temp2 = is64Bits ? val_unsigned >> 48 : val_unsigned >> 19; - int64 imm8 = (temp2 & 0x7f) | temp1; + uint64 temp1 = is64Bits ? (valUnsigned >> 63) << 7 : (valUnsigned >> 31) << 7; + uint64 temp2 = is64Bits ? valUnsigned >> 48 : valUnsigned >> 19; + int64 imm8 = static_cast((temp2 & 0x7f) | temp1); Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true); result = &GetOrCreateResOperand(parent, stype); MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri); @@ -3728,40 +3710,6 @@ static inline void AppendInstructionTo(Insn &i, CGFunc &f) { f.GetCurBB()->AppendInsn(i); } -/* - * Returns the number of leading 0-bits in x, starting at the most significant bit position. - * If x is 0, the result is -1. - */ -static int32 GetHead0BitNum(int64 val) { - uint32 bitNum = 0; - for (; bitNum < k64BitSize; bitNum++) { - if ((0x8000000000000000ULL >> static_cast(bitNum)) & static_cast(val)) { - break; - } - } - if (bitNum == k64BitSize) { - return -1; - } - return bitNum; -} - -/* - * Returns the number of trailing 0-bits in x, starting at the least significant bit position. - * If x is 0, the result is -1. - */ -static int32 GetTail0BitNum(int64 val) { - uint32 bitNum = 0; - for (; bitNum < k64BitSize; bitNum++) { - if ((static_cast(1) << static_cast(bitNum)) & static_cast(val)) { - break; - } - } - if (bitNum == k64BitSize) { - return -1; - } - return bitNum; -} - /* * If the input integer is power of 2, return log2(input) * else return -1 @@ -3932,8 +3880,8 @@ void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcod */ void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) { ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode"); - Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0)); - Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1)); + Operand *opnd0 = AArchHandleExpr(expr, *expr.Opnd(0)); + Operand *opnd1 = AArchHandleExpr(expr, *expr.Opnd(1)); CompareNode *node = static_cast(&expr); bool isFloat = IsPrimitiveFloat(node->GetOpndType()); opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType()); @@ -3969,8 +3917,8 @@ void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) { */ void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr) { auto &cmpNode = static_cast(expr); - Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0)); - Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1)); + Operand *opnd0 = AArchHandleExpr(cmpNode, *cmpNode.Opnd(0)); + Operand *opnd1 = AArchHandleExpr(cmpNode, *cmpNode.Opnd(1)); PrimType operandType = cmpNode.GetOpndType(); opnd0 = opnd0->IsRegister() ? static_cast(opnd0) : &SelectCopy(*opnd0, operandType, operandType); @@ -4112,66 +4060,12 @@ void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, return; } else { /* add reg, #imm */ - ImmOperand *immOpnd = static_cast(&opnd1); - if (immOpnd->IsNegative()) { - immOpnd->Negate(); - SelectSub(resOpnd, opnd0, *immOpnd, primType); - return; - } - if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { - /* - * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers - * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers - * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 - * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 - */ - MOperator mOpCode = MOP_undef; - Operand *newOpnd0 = &opnd0; - if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || - immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { - /* process higher 12 bits */ - ImmOperand &immOpnd2 = - CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), - immOpnd->GetSize(), immOpnd->IsSignedValue()); - mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; - Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType); - BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd); - GetCurBB()->AppendInsn(newInsn); - immOpnd->ModuloByPow2(kMaxImmVal12Bits); - newOpnd0 = tmpRes; - } - /* process lower 12 bits */ - mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); - GetCurBB()->AppendInsn(newInsn); - return; - } - /* load into register */ - int64 immVal = immOpnd->GetValue(); - int32 tail0bitNum = GetTail0BitNum(immVal); - int32 head0bitNum = GetHead0BitNum(immVal); - const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; - RegOperand ®Opnd = GetRegOpnd(isAfterRegAlloc, primType); - regno_t regNO0 = static_cast(opnd0).GetRegisterNumber(); - /* addrrrs do not support sp */ - if (bitNum <= k16ValidBit && regNO0 != RSP) { - int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; - ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); - SelectCopyImm(regOpnd, immOpnd1, primType); - uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; - int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; - BitShiftOperand &bitShiftOpnd = - CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), static_cast(bitLen)); - Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd); - GetCurBB()->AppendInsn(newInsn); - return; + MOperator mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &addInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1); + GetCurBB()->AppendInsn(addInsn); + if (!VERIFY_INSN(&addInsn)) { + SPLIT_INSN(&addInsn, this); } - - SelectCopyImm(regOpnd, *immOpnd, primType); - MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd); - GetCurBB()->AppendInsn(newInsn); } } @@ -4257,71 +4151,12 @@ void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType); return; } - - ImmOperand *immOpnd = static_cast(&opnd1); - if (immOpnd->IsNegative()) { - immOpnd->Negate(); - SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType); - return; - } - - int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); - if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { - /* - * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers - * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers - * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 - * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 - * large offset is treated as sub (higher 12 bits + 4096) + add - * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store - */ - MOperator mOpCode = MOP_undef; - bool isSplitSub = false; - if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || - immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { - isSplitSub = true; - /* process higher 12 bits */ - ImmOperand &immOpnd2 = - CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue()); - - mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; - BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd); - GetCurBB()->AppendInsn(newInsn); - immOpnd->ModuloByPow2(kMaxImmVal12Bits); - immOpnd->SetValue(static_cast(kMax12UnsignedImm) - immOpnd->GetValue()); - opnd0Bak = &resOpnd; - } - /* process lower 12 bits */ - mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12); - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd); - GetCurBB()->AppendInsn(newInsn); - return; - } - - /* load into register */ - int64 immVal = immOpnd->GetValue(); - int32 tail0bitNum = GetTail0BitNum(immVal); - int32 head0bitNum = GetHead0BitNum(immVal); - const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; - RegOperand ®Opnd = GetRegOpnd(isAfterRegAlloc, primType); - if (bitNum <= k16ValidBit) { - int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; - ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); - SelectCopyImm(regOpnd, immOpnd1, primType); - uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; - int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; - BitShiftOperand &bitShiftOpnd = - CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), static_cast(bitLen)); - GetCurBB()->AppendInsn( - GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd)); - return; + MOperator mOpCode = is64Bits ? MOP_xsubrri12 : MOP_wsubrri12; + Insn &subInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, opnd1); + GetCurBB()->AppendInsn(subInsn); + if (!VERIFY_INSN(&subInsn)) { + SPLIT_INSN(&subInsn, this); } - - SelectCopyImm(regOpnd, *immOpnd, primType); - MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr; - Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd); - GetCurBB()->AppendInsn(newInsn); } Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { @@ -4464,7 +4299,7 @@ void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opn SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType); uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; - BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, + BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kShiftLSR, dsize - static_cast(shiftNumber), static_cast(bitLen)); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd)); SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType); @@ -5044,8 +4879,8 @@ void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operan GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); } else { int64 immVal = immOpnd->GetValue(); - int32 tail0BitNum = GetTail0BitNum(immVal); - int32 head0BitNum = GetHead0BitNum(immVal); + int32 tail0BitNum = AArch64isa::GetTail0BitNum(immVal); + int32 head0BitNum = AArch64isa::GetHead0BitNum(immVal); const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum; RegOperand ®Opnd = CreateRegisterOperandOfType(primType); @@ -5055,7 +4890,7 @@ void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operan SelectCopyImm(regOpnd, immOpnd1, primType); MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true); int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; - BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0BitNum), + BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kShiftLSL, static_cast(tail0BitNum), static_cast(bitLen)); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd)); } else { @@ -5654,7 +5489,7 @@ void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, Pri Operand *AArch64CGFunc::GetOpndWithOneParam(const IntrinsicopNode &intrnNode) { BaseNode *argexpr = intrnNode.Opnd(0); PrimType ptype = argexpr->GetPrimType(); - Operand *opnd = HandleExpr(intrnNode, *argexpr); + Operand *opnd = AArchHandleExpr(intrnNode, *argexpr); if (opnd->IsMemoryAccessOperand()) { RegOperand &ldDest = CreateRegisterOperandOfType(ptype); Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); @@ -5692,7 +5527,7 @@ Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, for (BaseNode *argexpr : argNodes) { PrimType ptype = argexpr->GetPrimType(); - Operand *opnd = HandleExpr(intrnNode, *argexpr); + Operand *opnd = AArchHandleExpr(intrnNode, *argexpr); if (opnd->IsMemoryAccessOperand()) { RegOperand &ldDest = CreateRegisterOperandOfType(ptype); Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); @@ -5707,6 +5542,41 @@ Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, return retOpnd; } +RegOperand *AArch64CGFunc::SelectIntrinsicOpLoadTlsAnchor(const IntrinsicopNode& intrinsicopNode, + const BaseNode &parent) { + auto intrinsicId = intrinsicopNode.GetIntrinsic(); + RegOperand &result = GetOrCreateResOperand(parent, PTY_u64); + if (opts::aggressiveTlsLocalDynamicOpt) { + if (intrinsicId == INTRN_C___tls_get_tbss_anchor) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tlsload_tbss, result)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tlsload_tdata, result)); + } + auto tpidr = &CreateCommentOperand("tpidr_el0"); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + SelectAdd(result, result, *specialFunc, PTY_u64); + return &result; + } else if (CGOptions::IsShlib()) { + auto &r0Opnd = GetOrCreatePhysicalRegisterOperand (R0, k64BitSize, GetRegTyFromPrimTy(PTY_u64)); + RegOperand *tlsAddr = &CreateRegisterOperandOfType(PTY_u64); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + StImmOperand *stImm = nullptr; + if (intrinsicId == INTRN_C___tls_get_tbss_anchor) { + stImm = &CreateStImmOperand(*GetMirModule().GetTbssAnchor(), 0, 0); + } else { + stImm = &CreateStImmOperand(*GetMirModule().GetTdataAnchor(), 0, 0); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_call, r0Opnd, *tlsAddr, *stImm)); + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + SelectAdd(result, r0Opnd, *specialFunc, PTY_u64); + return &result; + } else { + CHECK_FATAL(false, "Tls anchor intrinsic should be used in local dynamic or aggressive Tls opt"); + } +} + /* According to gcc.target/aarch64/ffs.c */ Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) { RegOperand &destOpnd = LoadIntoRegister(argOpnd, argType); @@ -5841,7 +5711,7 @@ Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) { uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); - canRepreset = (canRepreset == 0) && (((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) != 0) && isSame; + canRepreset = static_cast((canRepreset == 0) && (((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) != 0) && isSame); Operand *newOpnd0 = &opnd0; if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && (canRepreset != 0)) { uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; @@ -6233,7 +6103,7 @@ void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOp GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); /* load the displacement into a register by accessing memory at base + index*8 */ - BitShiftOperand &bitOpnd = CreateBitShiftOperand(BitShiftOperand::kLSL, k3BitSize, k8BitShift); + BitShiftOperand &bitOpnd = CreateBitShiftOperand(BitShiftOperand::kShiftLSL, k3BitSize, k8BitShift); Operand *disp = CreateMemOperand(k64BitSize, baseOpnd, *addOpnd, bitOpnd); RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64); SelectAdd(tgt, baseOpnd, *disp, PTY_u64); @@ -6322,7 +6192,7 @@ Operand *AArch64CGFunc::SelectGCMalloc(GCMallocNode &node) { /* Get the size and alignment of the type. */ TyIdx tyIdx = node.GetTyIdx(); - uint64 size = GetBecommon().GetTypeSize(tyIdx); + uint64 size = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize(); int64 align = static_cast(RTSupport::GetRTSupportInstance().GetObjectAlignment()); /* Generate the call to MCC_NewObj */ @@ -6539,6 +6409,19 @@ void AArch64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &r realSaveRegs.insert(preg); } } + if (UsePlt(funcSt)) { + // When plt is used during function call, x0-x9 and v0-v7 is saved on the stack. + for (uint32 i = R10; i <= R29; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + realSaveRegs.insert(i); + } + } + for (uint32 i = V8; i <= V31; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + realSaveRegs.insert(i); + } + } + } return; } } @@ -6573,7 +6456,7 @@ bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const { bool AArch64CGFunc::IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const { AArch64CallConvImpl retLocator(cgBeCommon); CCLocInfo retMechanism; - retLocator.InitReturnInfo(mirType, retMechanism); + retLocator.LocateRetVal(mirType, retMechanism); if (retMechanism.GetRegCount() > 0) { return reg.GetRegisterNumber() == retMechanism.GetReg0() || reg.GetRegisterNumber() == retMechanism.GetReg1() || reg.GetRegisterNumber() == retMechanism.GetReg2() || reg.GetRegisterNumber() == retMechanism.GetReg3(); @@ -6585,7 +6468,7 @@ bool AArch64CGFunc::IsSPOrFP(const RegOperand &opnd) const { const RegOperand ®Opnd = static_cast(opnd); regno_t regNO = opnd.GetRegisterNumber(); return (regOpnd.IsPhysicalRegister() && - (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); + (regNO == RSP || regNO == RFP || (regNO == R29 && (CGOptions::UseFramePointer() != 0)))); } bool AArch64CGFunc::IsReturnReg(const RegOperand &opnd) const { @@ -6632,11 +6515,11 @@ void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { SetCurBB(bb); RegOperand ®Opnd0 = - GetOrCreatePhysicalRegisterOperand(R0, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + GetOrCreatePhysicalRegisterOperand(R0, GetPointerBitSize(), GetRegTyFromPrimTy(PTY_a64)); RegOperand ®Opnd1 = - GetOrCreatePhysicalRegisterOperand(R1, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + GetOrCreatePhysicalRegisterOperand(R1, GetPointerBitSize(), GetRegTyFromPrimTy(PTY_a64)); /* allocate 16 bytes to store reg0 and reg1 (each reg has 8 bytes) */ - MemOperand &frameAlloc = CreateCallFrameOperand(-16, GetPointerSize() * kBitsPerByte); + MemOperand &frameAlloc = CreateCallFrameOperand(-16, GetPointerBitSize()); Insn &allocInsn = GetInsnBuilder()->BuildInsn(MOP_xstp, regOpnd0, regOpnd1, frameAlloc); allocInsn.SetDoNotRemove(true); AppendInstructionTo(allocInsn, *this); @@ -6644,7 +6527,7 @@ void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { /* invoke MCC_CleanupLocalStackRef(). */ HandleRCCall(false); /* deallocate 16 bytes which used to store reg0 and reg1 */ - MemOperand &frameDealloc = CreateCallFrameOperand(16, GetPointerSize() * kBitsPerByte); + MemOperand &frameDealloc = CreateCallFrameOperand(16, GetPointerBitSize()); GenRetCleanup(cleanEANode, true); Insn &deallocInsn = GetInsnBuilder()->BuildInsn(MOP_xldp, regOpnd0, regOpnd1, frameDealloc); deallocInsn.SetDoNotRemove(true); @@ -6874,11 +6757,11 @@ BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, b } AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { - BitShiftOperand(BitShiftOperand::kLSL, 0, 4), BitShiftOperand(BitShiftOperand::kLSL, 16, 4), - BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ - BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ - BitShiftOperand(BitShiftOperand::kLSL, 0, 6), BitShiftOperand(BitShiftOperand::kLSL, 16, 6), - BitShiftOperand(BitShiftOperand::kLSL, 32, 6), BitShiftOperand(BitShiftOperand::kLSL, 48, 6), + BitShiftOperand(BitShiftOperand::kShiftLSL, 0, 4), BitShiftOperand(BitShiftOperand::kShiftLSL, 16, 4), + BitShiftOperand(BitShiftOperand::kShiftLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kShiftLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kShiftLSL, 0, 6), BitShiftOperand(BitShiftOperand::kShiftLSL, 16, 6), + BitShiftOperand(BitShiftOperand::kShiftLSL, 32, 6), BitShiftOperand(BitShiftOperand::kShiftLSL, 48, 6), }; MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { @@ -6969,7 +6852,7 @@ void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) { */ if ((GetFunction().GetAttr(FUNCATTR_native) || GetFunction().GetAttr(FUNCATTR_fast_native)) && !GetFunction().GetAttr(FUNCATTR_critical_native) && !GetFunction().GetAttr(FUNCATTR_bridge)) { - RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, GetPointerBitSize(), kRegTyInt); ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); @@ -7049,31 +6932,15 @@ void AArch64CGFunc::GenerateYieldpoint(BB &bb) { bb.AppendInsn(yieldPoint); } -Operand &AArch64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { - return GetTargetRetOperand(primType, sReg); -} - Operand &AArch64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) { - uint32 bitSize = GetPrimTypeBitSize(primType) < k32BitSize ? k32BitSize : GetPrimTypeBitSize(primType); - AArch64reg pReg; - if (sReg < 0) { - return GetOrCreatePhysicalRegisterOperand( - IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0, - bitSize, GetRegTyFromPrimTy(primType)); - } else { - switch (sReg) { - case kSregRetval0: - pReg = IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0; - break; - case kSregRetval1: - pReg = R1; - break; - default: - pReg = RLAST_INT_REG; - ASSERT(0, "GetTargetRetOperand: NYI"); - } - return GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType)); + if (IsSpecialPseudoRegister(-sReg)) { + return GetOrCreateSpecialRegisterOperand(sReg, primType); } + bool useFpReg = !IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType); + uint32 bitSize = GetPrimTypeBitSize(primType); + bitSize = bitSize <= k32BitSize ? k32BitSize : bitSize; + return GetOrCreatePhysicalRegisterOperand(useFpReg ? V0 : R0, bitSize, + GetRegTyFromPrimTy(primType)); } RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType) { @@ -7282,7 +7149,7 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for /* optimization for little slot cleanup */ if (realMax == realMin && !forEA) { RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); - Operand &stackLoc = CreateStkTopOpnd(static_cast(realMin), GetPointerSize() * kBitsPerByte); + Operand &stackLoc = CreateStkTopOpnd(static_cast(realMin), GetPointerBitSize()); Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); GetCurBB()->AppendInsn(ldrInsn); @@ -7363,7 +7230,7 @@ bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool for RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const { RegOperand *res = memPool->New(vRegNO, size, kind, flg); - vReg.vRegOperandTable[vRegNO] = res; + maplebe::VregInfo::vRegOperandTable[vRegNO] = res; return res; } @@ -7376,14 +7243,14 @@ RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { - auto it = vReg.vRegOperandTable.find(vRegNO); - return (it != vReg.vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); + auto it = maplebe::VregInfo::vRegOperandTable.find(vRegNO); + return (it != maplebe::VregInfo::vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); } RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { regno_t regNO = regOpnd.GetRegisterNumber(); - auto it = vReg.vRegOperandTable.find(regNO); - if (it != vReg.vRegOperandTable.end()) { + auto it = maplebe::VregInfo::vRegOperandTable.find(regNO); + if (it != maplebe::VregInfo::vRegOperandTable.end()) { it->second->SetSize(regOpnd.GetSize()); it->second->SetRegisterNumber(regNO); it->second->SetRegisterType(regOpnd.GetRegisterType()); @@ -7396,7 +7263,7 @@ RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd SetMaxRegNum(newRegNO + kRegIncrStepLen); vReg.VRegTableResize(GetMaxRegNum()); } - vReg.vRegOperandTable[newRegNO] = newRegOpnd; + maplebe::VregInfo::vRegOperandTable[newRegNO] = newRegOpnd; VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); vReg.VRegTableElementSet(newRegNO, vregNode); vReg.SetCount(GetMaxRegNum()); @@ -7494,7 +7361,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { while (ind < pairNum) { int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + pairRefBytes * ind; Operand &zeroOp = GetZeroOpnd(k64BitSize); - Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstp, zeroOp, zeroOp, stackLoc); GetCurBB()->AppendInsn(setInc); ind++; @@ -7502,7 +7369,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { if (singleNum > 0) { int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + kIntregBytelen * (refNum - 1); Operand &zeroOp = GetZeroOpnd(k64BitSize); - Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstr, zeroOp, stackLoc); GetCurBB()->AppendInsn(setInc); } @@ -7516,7 +7383,7 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { if ((refNum == 1) && !begin && (retRef == nullptr)) { RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); Operand &stackLoc = CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), - GetPointerSize() * kBitsPerByte); + GetPointerBitSize()); Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); GetCurBB()->AppendInsn(ldrInsn); @@ -7546,10 +7413,10 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { if (stOffset == 0) { /* just have to Dec the next one. */ stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()) + kIntregBytelen, - GetPointerSize() * kBitsPerByte); + GetPointerBitSize()); } else { /* just have to Dec the current one. */ - stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerSize() * kBitsPerByte); + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerBitSize()); } Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, *stackLoc); GetCurBB()->AppendInsn(ldrInsn); @@ -7657,1009 +7524,451 @@ void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { } } -void AArch64CGFunc::SelectParmListDreadSmallAggregate(MIRSymbol &sym, MIRType &structType, ListOperand &srcOpnds, - int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID) { - /* - * in two param regs if possible - * If struct is <= 8 bytes, then it fits into one param reg. - * If struct is <= 16 bytes, then it fits into two param regs. - * Otherwise, it goes onto the stack. - * If the number of available param reg is less than what is - * needed to fit the entire struct into them, then the param - * reg is skipped and the struct goes onto the stack. - * Example 1. - * struct size == 8 bytes. - * param regs x0 to x6 are used. - * struct is passed in x7. - * Example 2. - * struct is 16 bytes. - * param regs x0 to x5 are used. - * struct is passed in x6 and x7. - * Example 3. - * struct is 16 bytes. - * param regs x0 to x6 are used. x7 alone is not enough to pass the struct. - * struct is passed on the stack. - * x7 is not used, as the following param will go onto the stack also. - */ - uint32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); - CCLocInfo ploc; - parmLocator.LocateNextParm(structType, ploc); - if (ploc.reg0 == 0) { - /* No param regs available, pass on stack. */ - /* If symSize is <= 8 bytes then use 1 reg, else 2 */ - CreateCallStructParamPassByStack(symSize, &sym, nullptr, ploc.memOffset); - } else { - /* pass by param regs. */ - RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); - srcOpnds.PushOpnd(*parmOpnd0); - if (ploc.reg1 > 0) { - RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); - srcOpnds.PushOpnd(*parmOpnd1); - } - if (ploc.reg2 > 0) { - RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); - srcOpnds.PushOpnd(*parmOpnd2); - } - if (ploc.reg3 > 0) { - RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); - srcOpnds.PushOpnd(*parmOpnd3); - } - } -} - -RegOperand *AArch64CGFunc::LoadIreadAddrForSamllAgg(BaseNode &iread) { - RegOperand *addrOpnd1 = nullptr; - if (iread.GetOpCode() == OP_iread) { - RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); - addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); - } else if (iread.GetOpCode() == OP_ireadfpoff) { - IreadFPoffNode &ireadoff = static_cast(iread); - RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); - RegOperand *addrOpnd0 = &CreateRegisterOperandOfType(PTY_a64); - ImmOperand &immOpnd = CreateImmOperand(ireadoff.GetOffset(), k32BitSize, true); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *addrOpnd0, *rfp, immOpnd)); - addrOpnd1 = &LoadIntoRegister(*addrOpnd0, PTY_i64); - } else if (iread.GetOpCode() == OP_ireadoff) { - IreadoffNode &ireadoff = static_cast(iread); - RegOperand *addrOpnd0 = static_cast(HandleExpr(ireadoff, *(ireadoff.Opnd(0)))); - addrOpnd1 = &LoadIntoRegister(*addrOpnd0, PTY_i64); - } - CHECK_FATAL(addrOpnd1 != nullptr, "addrOpnd for iread cannot be null"); - return addrOpnd1; -} - -void AArch64CGFunc::SelectParmListIreadSmallAggregate(BaseNode &iread, MIRType &structType, - ListOperand &srcOpnds, int32 offset, - AArch64CallConvImpl &parmLocator) { - uint32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); - uint32 passSize = 0; - RegOperand *addrOpnd1 = LoadIreadAddrForSamllAgg(iread); - CCLocInfo ploc; - parmLocator.LocateNextParm(structType, ploc); - if (ploc.reg0 == 0) { - /* No param regs available, pass on stack. */ - CreateCallStructParamPassByStack(symSize, nullptr, addrOpnd1, ploc.memOffset); - } else { - /* pass by param regs. */ - FpParamState state = kStateUnknown; - uint32 memSize = 0; - switch (ploc.fpSize) { - case k0BitSize: - state = kNotFp; - memSize = k64BitSize; - break; - case k4BitSize: - state = kFp32Bit; - memSize = k32BitSize; - break; - case k8BitSize: - state = kFp64Bit; - memSize = k64BitSize; - break; - default: - break; - } - passSize = (symSize / k8ByteSize) > 0 ? k8ByteSize : symSize; - ImmOperand &offOpnd0 = CreateImmOperand(static_cast(offset), k32BitSize, false); - MemOperand *mopnd = CreateMemOperand(memSize, *addrOpnd1, offOpnd0); - CreateCallStructParamPassByReg(ploc.reg0, *mopnd, srcOpnds, state, passSize); - if (ploc.reg1 > 0) { - passSize = (symSize / k8ByteSize) > 1 ? k8ByteSize : symSize % k8ByteSize; - OfstOperand *offOpnd1 = &GetOrCreateOfstOpnd((((ploc.fpSize > 0) ? ploc.fpSize : GetPointerSize()) + - static_cast(offset)), k32BitSize); - mopnd = CreateMemOperand(memSize, *addrOpnd1, *offOpnd1); - CreateCallStructParamPassByReg(ploc.reg1, *mopnd, srcOpnds, state, passSize); - } - if (ploc.reg2 > 0) { - OfstOperand *offOpnd2 = - &GetOrCreateOfstOpnd((((ploc.fpSize > 0) ? (ploc.fpSize * k4BitShift) : GetPointerSize()) - + static_cast(offset)), k32BitSize); - mopnd = CreateMemOperand(memSize, *addrOpnd1, *offOpnd2); - CreateCallStructParamPassByReg(ploc.reg2, *mopnd, srcOpnds, state, passSize); - } - if (ploc.reg3 > 0) { - OfstOperand *offOpnd3 = &GetOrCreateOfstOpnd((((ploc.fpSize > 0) ? - (ploc.fpSize * k8BitShift) : GetPointerSize()) + static_cast(offset)), k32BitSize); - mopnd = CreateMemOperand(memSize, *addrOpnd1, *offOpnd3); - CreateCallStructParamPassByReg(ploc.reg3, *mopnd, srcOpnds, state, passSize); - } - } -} - -void AArch64CGFunc::SelectParmListDreadLargeAggregate(MIRSymbol &sym, MIRType &structType, - ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, - int32 fromOffset) { - /* - * Pass larger sized struct on stack. - * Need to copy the entire structure onto the stack. - * The pointer to the starting address of the copied struct is then - * used as the parameter for the struct. - * This pointer is passed as the next parameter. - * Example 1: - * struct is 23 bytes. - * param regs x0 to x5 are used. - * First around up 23 to 24, so 3 of 8-byte slots. - * Copy struct to a created space on the stack. - * Pointer of copied struct is passed in x6. - * Example 2: - * struct is 25 bytes. - * param regs x0 to x7 are used. - * First around up 25 to 32, so 4 of 8-byte slots. - * Copy struct to a created space on the stack. - * Pointer of copied struct is passed on stack as the 9th parameter. - */ - uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); - CCLocInfo ploc; - parmLocator.LocateNextParm(structType, ploc); - uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ - /* Create the struct copies. */ - RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, - fromOffset, ploc); - if (parmOpnd) { - srcOpnds.PushOpnd(*parmOpnd); - } - structCopyOffset += static_cast(numMemOp * GetPointerSize()); -} - -void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, - ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, - int32 &structCopyOffset, int32 fromOffset) { - uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); - RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); - RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); - CCLocInfo ploc; - parmLocator.LocateNextParm(structType, ploc); - uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ - RegOperand *parmOpnd = - CreateCallStructParamCopyToStack(numMemOp, nullptr, addrOpnd1, structCopyOffset, fromOffset, ploc); - structCopyOffset += static_cast(numMemOp * GetPointerSize()); - if (parmOpnd) { - srcOpnds.PushOpnd(*parmOpnd); - } -} - -void AArch64CGFunc::CreateCallStructParamPassByStack(uint32 symSize, MIRSymbol *sym, - RegOperand *addrOpnd, int32 baseOffset) { - if (symSize == 0) { - return; +/* preprocess call in parmlist */ +bool AArch64CGFunc::MarkParmListCall(BaseNode &expr) { + if (!CGOptions::IsPIC()) { + return false; } - MemOperand *ldMopnd = nullptr; - MemOperand *stMopnd = nullptr; - uint32 numRegNeeded = (static_cast(symSize) <= k8ByteSize) ? kOneRegister : kTwoRegister; - for (int j = 0; j < static_cast(numRegNeeded); j++) { - if (sym) { - if (CGOptions::IsArm64ilp32()) { - ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(k8ByteSize)), k64BitSize); - } else { - ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(GetPointerSize())), k64BitSize); - } - } else { - CHECK_NULL_FATAL(addrOpnd); - if (CGOptions::IsArm64ilp32()) { - ldMopnd = CreateMemOperand(k64BitSize, *addrOpnd, - CreateImmOperand(static_cast(j) * k8ByteSize, k32BitSize, false)); - } else { - ldMopnd = CreateMemOperand(k64BitSize, *addrOpnd, - CreateImmOperand(static_cast(j) * GetPointerSize(), k32BitSize, false)); + switch (expr.GetOpCode()) { + case OP_addrof: { + auto &addrNode = static_cast(expr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrNode.GetStIdx()); + if (symbol->IsThreadLocal()) { + return true; } - } - RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); - if (CGOptions::IsArm64ilp32()) { - stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * static_cast(k8ByteSize))), k64BitSize); - } else { - stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * GetPointerSize())), k64BitSize); - } - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); - } -} - -RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, - const CCLocInfo &ploc, int32 offset, uint32 parmNum) { - uint32 memSize; - PrimType primType; - RegOperand *parmOpnd; - uint32 dataSizeBits; - AArch64reg reg; - switch (parmNum) { - case 0: - reg = static_cast(ploc.reg0); - break; - case 1: - reg = static_cast(ploc.reg1); break; - case 2: - reg = static_cast(ploc.reg2); - break; - case 3: - reg = static_cast(ploc.reg3); - break; - default: - CHECK_FATAL(false, "Exceeded maximum allowed fp parameter registers for struct passing"); - } - if (ploc.fpSize == 0) { - memSize = k64BitSize; - primType = PTY_i64; - dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); - } else if (ploc.fpSize == k4ByteSize) { - memSize = k32BitSize; - primType = PTY_f32; - dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); - } else if (ploc.fpSize == k8ByteSize) { - memSize = k64BitSize; - primType = PTY_f64; - dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); - } else { - CHECK_FATAL(false, "Unknown call parameter state"); - } - MemOperand *memOpnd; - if (sym.GetStorageClass() == kScFormal && fieldID > 0) { - MIRType *ty = sym.GetType(); - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - /* sym passed by address, need to be dereference */ - if (symSize > k16ByteSize) { - MemOperand &baseOpnd = GetOrCreateMemOpnd(sym, 0, memSize); - RegOperand &base = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), base, baseOpnd)); - memOpnd = &CreateMemOpnd(base, (static_cast(offset) + parmNum * GetPointerSize()), memSize); - } else { /* sym passed by register , no need to be dereference */ - if (CGOptions::IsArm64ilp32()) { - memOpnd = &GetOrCreateMemOpnd(sym, (k8ByteSize * parmNum + static_cast(offset)), memSize); - } else { - memOpnd = &GetOrCreateMemOpnd(sym, (GetPointerSize() * parmNum + static_cast(offset)), memSize); - } } - } else if (ploc.fpSize > 0) { - memOpnd = &GetOrCreateMemOpnd(sym, (ploc.fpSize * parmNum + static_cast(offset)), memSize); - } else { - if (CGOptions::IsArm64ilp32()) { - memOpnd = &GetOrCreateMemOpnd(sym, (k8ByteSize * parmNum + static_cast(offset)), memSize); - } else { - memOpnd = &GetOrCreateMemOpnd(sym, (GetPointerSize() * parmNum + static_cast(offset)), memSize); + default: { + for (size_t i = 0; i < expr.GetNumOpnds(); i++) { + if (expr.Opnd(i)) { + if (MarkParmListCall(*expr.Opnd(i))) { + return true; + } + } + } + break; } } - MOperator selectedMop = PickLdInsn(dataSizeBits, primType); - if ((memOpnd->GetAddrMode() == MemOperand::kBOI) && - !IsOperandImmValid(selectedMop, memOpnd, kInsnSecondOpnd)) { - memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSizeBits); - } - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, *memOpnd)); - - return parmOpnd; + return false; } -/* - * generate ldr and orr for small agg copy in parm pass - * ldr exact size of mem to reg based on agg size -*/ -void AArch64CGFunc::SelectCopySmallAggToReg(uint32 symSize, RegOperand &parmOpnd, const MemOperand &memOpnd) { - CHECK_FATAL(0 < symSize && symSize <= k8ByteSize, "small agg is less or equal to 64 bit"); - std::vector tmpRegs(symSize + 1); - std::vector tmpRegs2(symSize + 1); - uint32 bitSize = 0; - PrimType pty = PTY_begin; - if (symSize % k8ByteSize == 0) { - bitSize = k64BitSize; - pty = PTY_u64; - } else if (symSize % k4ByteSize == 0) { - bitSize = k32BitSize; - pty = PTY_u32; - } else { - bitSize = k8BitSize; - pty = PTY_u8; - } - RegOperand *base = memOpnd.GetBaseRegister(); - uint64 offset = static_cast(memOpnd.GetOffsetOperand()->GetValue()); - MOperator selectedMop = PickLdInsn(bitSize, pty); - // generate ldr mem -> reg - size_t lastPos = symSize / (bitSize / kBitsPerByte); - for (size_t i = 0; i < lastPos; i++) { - tmpRegs[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, bitSize)); - MemOperand *mOpnd = CreateMemOperand(bitSize, *base, - GetOrCreateOfstOpnd(static_cast(offset + (i * bitSize / kBitsPerByte)), k32BitSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *tmpRegs[i], *mOpnd)); - tmpRegs2[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - SelectCopyRegOpnd(*tmpRegs2[i], PTY_i64, Operand::kOpdRegister, k64BitSize, *tmpRegs[i], pty); - } - // use orr to reconstruct the original agg - for (size_t i = 0; i < lastPos - 1; i++) { - MOperator xorMop = MOP_xiorrrrs; - Operand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSL, - static_cast(bitSize * (i + 1)), static_cast(8)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(xorMop, *tmpRegs2[i + 1], - *tmpRegs2[i], *tmpRegs2[i + 1], shiftOpnd)); - } - SelectCopyRegOpnd(parmOpnd, PTY_i64, Operand::kOpdRegister, k64BitSize, - *tmpRegs2[lastPos - 1], PTY_i64); -} - -void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, - FpParamState state, uint32 symSize) { - RegOperand *parmOpnd; - uint32 dataSizeBits = 0; - PrimType pType = PTY_void; - parmOpnd = nullptr; - AArch64reg reg = static_cast(regno); - if (state == kNotFp) { - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); - dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; - pType = PTY_i64; - } else if (state == kFp32Bit) { - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); - dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; - pType = PTY_f32; - } else if (state == kFp64Bit) { - parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); - dataSizeBits = GetPrimTypeSize(PTY_f64) * kBitsPerByte; - pType = PTY_f64; - } else { - ASSERT(0, "CreateCallStructParamPassByReg: Unknown state"); - } - - if (state == kNotFp && !CGOptions::IsBigEndian()) { - /* load exact size agg (BigEndian not support yet) */ - SelectCopySmallAggToReg(symSize, *parmOpnd, memOpnd); - } else { - MOperator selectedMop = PickLdInsn(dataSizeBits, pType); - if (!IsOperandImmValid(selectedMop, &memOpnd, kInsnSecondOpnd)) { - memOpnd = SplitOffsetWithAddInstruction(memOpnd, dataSizeBits); - } - ASSERT(parmOpnd != nullptr, "parmOpnd should not be nullptr"); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, memOpnd)); - } - srcOpnds.PushOpnd(*parmOpnd); -} - -void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol &sym, uint32 structSize, - int32 copyOffset, int32 fromOffset) { - std::vector opndVec; - - RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - opndVec.push_back(vreg1); /* result */ - - RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); - SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); - opndVec.push_back(parmOpnd); /* param 0 */ - +Operand *AArch64CGFunc::GetSymbolAddressOpnd(const MIRSymbol &sym, int32 offset, bool useMem) { + RegOperand *rhsBaseOpnd = nullptr; + int32 rhsOffset = 0; + VaryType varyType = kNotVary; if (sym.GetStorageClass() == kScGlobal || sym.GetStorageClass() == kScExtern) { - StImmOperand &stopnd = CreateStImmOperand(sym, fromOffset, 0); - RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); - SelectAddrof(staddropnd, stopnd); - opndVec.push_back(&staddropnd); /* param 1 */ - } else if (sym.GetStorageClass() == kScAuto || sym.GetStorageClass() == kScFormal) { - RegOperand *parm1Reg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - AArch64SymbolAlloc *symloc = static_cast(GetMemlayout()->GetSymAllocInfo(sym.GetStIndex())); - RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); - int32 stoffset = GetBaseOffset(*symloc); - ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset), k64BitSize, false); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *parm1Reg, *baseOpnd, *offsetOpnd1)); - if (sym.GetStorageClass() == kScFormal) { - MemOperand *ldmopnd = CreateMemOperand(k64BitSize, *parm1Reg, CreateImmOperand(0, k32BitSize, false)); - RegOperand *tmpreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - RegOperand *vreg2 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), *tmpreg, *ldmopnd)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *vreg2, *tmpreg, - CreateImmOperand(fromOffset, k64BitSize, false))); - parm1Reg = vreg2; - } - opndVec.push_back(parm1Reg); /* param 1 */ + rhsBaseOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + StImmOperand &stOpnd = CreateStImmOperand(sym, offset, 0); + SelectAddrof(*rhsBaseOpnd, stOpnd); + } else if (sym.GetStorageClass() == kScAuto) { + auto *symloc = GetMemlayout()->GetSymAllocInfo(sym.GetStIndex()); + rhsBaseOpnd = GetBaseReg(*symloc); + rhsOffset = GetBaseOffset(*symloc) + offset; + } else if (sym.GetStorageClass() == kScFormal) { + if (IsParamStructCopy(sym)) { // sym passed by address, need to be dereference + auto &baseOpnd = GetOrCreateMemOpnd(sym, 0, k64BitSize); + rhsBaseOpnd = &SelectCopy(baseOpnd, PTY_a64, PTY_a64); + rhsOffset = offset; + } else { // sym passed by register , no need to be dereference + auto *symloc = GetMemlayout()->GetSymAllocInfo(sym.GetStIndex()); + rhsBaseOpnd = GetBaseReg(*symloc); + rhsOffset = GetBaseOffset(*symloc) + offset; + // sym passed by stack, offset need set kUnAdjustVary + if (symloc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + varyType = kUnAdjustVary; + } + } } else if (sym.GetStorageClass() == kScPstatic || sym.GetStorageClass() == kScFstatic) { CHECK_FATAL(sym.GetSKind() != kStConst, "Unsupported sym const for struct param"); - StImmOperand *stopnd = &CreateStImmOperand(sym, 0, 0); - RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, staddropnd, *stopnd)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, staddropnd, staddropnd, *stopnd)); - opndVec.push_back(&staddropnd); /* param 1 */ + StImmOperand &stOpnd = CreateStImmOperand(sym, 0, 0); + rhsBaseOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, *rhsBaseOpnd, stOpnd)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_xadrpl12, *rhsBaseOpnd, *rhsBaseOpnd, stOpnd)); } else { CHECK_FATAL(false, "Unsupported sym for struct param"); } - RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); - opndVec.push_back(&vreg3); /* param 2 */ + auto &ofstOpnd = CreateImmOperand(rhsOffset, k64BitSize, false, varyType); + if (useMem) { + // create mem opnd and return + return CreateMemOperand(k64BitSize, *rhsBaseOpnd, ofstOpnd); + } - SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + // calc mem address, return reg opnd + auto &rhsOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(rhsOpnd, *rhsBaseOpnd, ofstOpnd, PTY_a64); + return &rhsOpnd; } -void AArch64CGFunc::CreateCallStructParamMemcpy(RegOperand &addrOpnd, uint32 structSize, - int32 copyOffset, int32 fromOffset) { +void AArch64CGFunc::SelectStructMemcpy(RegOperand &destOpnd, RegOperand &srcOpnd, uint32 structSize) { std::vector opndVec; - - RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - opndVec.push_back(vreg1); /* result */ - - RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); - SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); - opndVec.push_back(parmOpnd); /* param 0 */ - - if (fromOffset != 0) { - RegOperand &p1vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - ImmOperand &fromImm = CreateImmOperand(fromOffset, k64BitSize, true); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, p1vreg, addrOpnd, fromImm)); - opndVec.push_back(&p1vreg); /* param 1 */ - } else { - opndVec.push_back(&addrOpnd); /* param 1 */ - } - - RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); - ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); - opndVec.push_back(&vreg3); /* param 2 */ - + opndVec.push_back(&CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize))); // result + opndVec.push_back(&destOpnd); // param 0 + opndVec.push_back(&srcOpnd); // param 1 + opndVec.push_back(&CreateImmOperand(structSize, k64BitSize, false)); // param 2 SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); } -RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, MIRSymbol *sym, - RegOperand *addrOpd, int32 copyOffset, - int32 fromOffset, const CCLocInfo &ploc) { - /* Create the struct copies. */ - MemOperand *ldMopnd = nullptr; - MemOperand *stMopnd = nullptr; - for (uint32 j = 0; j < numMemOp; j++) { - uint64 offVal = j * GetPointerSize() + static_cast(static_cast(fromOffset)); - if (sym != nullptr) { - if (sym->GetStorageClass() == kScFormal) { - MemOperand &base = GetOrCreateMemOpnd(*sym, 0, k64BitSize); - RegOperand &vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), vreg, base); - GetCurBB()->AppendInsn(ldInsn); - ldMopnd = CreateMemOperand(k64BitSize, vreg, CreateImmOperand(static_cast(offVal), k32BitSize, false)); - } else { - if (CGOptions::IsArm64ilp32()) { - ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k32BitSize); - } else { - ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k64BitSize); - } - } +void AArch64CGFunc::SelectStructCopy(MemOperand &destOpnd, MemOperand &srcOpnd, uint32 structSize) { + for (uint32 offset = 0; offset < structSize;) { + PrimType primType; + auto loadSize = structSize - offset; + if (CGOptions::IsBigEndian() || loadSize >= k8ByteSize) { + primType = PTY_u64; // load exact size agg (BigEndian not support yet) + } else if (loadSize >= k4ByteSize) { + primType = PTY_u32; + } else if (loadSize >= k2ByteSize) { + primType = PTY_u16; } else { - CHECK_NULL_FATAL(addrOpd); - ldMopnd = CreateMemOperand(k64BitSize, *addrOpd, - CreateImmOperand(static_cast(offVal), k32BitSize, false)); + primType = PTY_u8; } - if (CGOptions::IsArm64ilp32()) { - RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k4ByteSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k32BitSize, PTY_i32), *vreg, *ldMopnd)); + auto bitSize = GetPrimTypeBitSize(primType); + auto *src = &GetMemOperandAddOffset(srcOpnd, offset, bitSize); + auto ldMop = PickLdInsn(bitSize, primType); + src = FixLargeMemOpnd(ldMop, *src, bitSize, kSecondOpnd); + auto &tmpOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(primType))); + auto &ldInsn = GetInsnBuilder()->BuildInsn(ldMop, tmpOpnd, *src); + GetCurBB()->AppendInsn(ldInsn); - stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k32BitSize); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k32BitSize, PTY_i32), *vreg, *stMopnd)); - } else { - RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + auto *dest = &GetMemOperandAddOffset(destOpnd, offset, bitSize); + auto strMop = PickStInsn(bitSize, primType); + dest = FixLargeMemOpnd(ldMop, *dest, bitSize, kSecondOpnd); + auto &strInsn = GetInsnBuilder()->BuildInsn(strMop, tmpOpnd, *dest); + GetCurBB()->AppendInsn(strInsn); + offset += GetPrimTypeSize(primType); + } +} - stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k64BitSize); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); +void AArch64CGFunc::GetAggregateDescFromAggregateNode(BaseNode &argExpr, AggregateDesc &aggDesc) { + MIRType *mirType = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + auto &dread = static_cast(argExpr); + aggDesc.sym = GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + mirType = aggDesc.sym->GetType(); + if (dread.GetFieldID() != 0) { + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "NIY, non-structure"); + auto *structType = static_cast(mirType); + mirType = structType->GetFieldType(dread.GetFieldID()); + aggDesc.offset = static_cast(structType->GetFieldOffsetFromBaseAddr(dread.GetFieldID()).byteOffset); + aggDesc.isRefField = GetBecommon().IsRefField(*structType, dread.GetFieldID()); + } + } else if (argExpr.GetOpCode() == OP_iread) { + auto &iread = static_cast(argExpr); + auto *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx()); + CHECK_FATAL(ptrType->IsMIRPtrType(), "NIY, non-ptr"); + mirType = static_cast(ptrType)->GetPointedType(); + if (iread.GetFieldID() != 0) { + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "NIY, non-structure"); + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(iread.GetFieldID()); + aggDesc.offset = static_cast(structType->GetFieldOffsetFromBaseAddr(iread.GetFieldID()).byteOffset); + aggDesc.isRefField = GetBecommon().IsRefField(*structType, iread.GetFieldID()); } - } - /* Create the copy address parameter for the struct */ - RegOperand *fpopnd = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); - if (ploc.reg0 == kRinvalid) { - RegOperand &res = CreateRegisterOperandOfType(PTY_u64); - SelectAdd(res, *fpopnd, *offset, PTY_u64); - MemOperand &stMopnd2 = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); - GetCurBB()->AppendInsn( - GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), res, stMopnd2)); - return nullptr; } else { - RegOperand *parmOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), - k64BitSize, kRegTyInt); - SelectAdd(*parmOpnd, *fpopnd, *offset, PTY_a64); - return parmOpnd; + CHECK_FATAL(false, "NIY, unkown opcode."); } + aggDesc.mirType = mirType; } -void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, - AArch64CallConvImpl &parmLocator, ListOperand &srcOpnds) { - RegOperand &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); - ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); - - CCLocInfo ploc; - parmLocator.LocateNextParm(structType, ploc); - if (ploc.reg0 != 0) { - RegOperand &res = GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); - SelectAdd(res, spReg, offsetOpnd, PTY_a64); - srcOpnds.PushOpnd(res); +Operand *AArch64CGFunc::GetAddrOpndWithBaseNode(const BaseNode &argExpr, const MIRSymbol &sym, + uint32 offset, bool useMem) { + Operand *rhsOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + rhsOpnd = GetSymbolAddressOpnd(sym, static_cast(offset), useMem); + } else if (argExpr.GetOpCode() == OP_iread) { + auto &iread = static_cast(argExpr); + auto *baseOpnd = AArchHandleExpr(iread, *iread.Opnd(0)); + auto &offsetOpnd = CreateImmOperand(static_cast(offset), k32BitSize, false); + if (useMem) { + rhsOpnd = CreateMemOperand(k64BitSize, LoadIntoRegister(*baseOpnd, PTY_a64), offsetOpnd); + } else { + rhsOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(*rhsOpnd, *baseOpnd, offsetOpnd, PTY_a64); + } } else { - RegOperand &parmOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); - SelectAdd(parmOpnd, spReg, offsetOpnd, PTY_a64); - MemOperand &stmopnd = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), parmOpnd, stmopnd)); + CHECK_FATAL(false, "NIY, unkown opcode"); + } + if (useMem) { + CHECK_FATAL(rhsOpnd->IsMemoryAccessOperand(), "NIY, must be mem opnd"); } + return rhsOpnd; } -void AArch64CGFunc::GenAggParmForDread(const BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { - int32 rhsOffset = 0; - BaseNode &argExpr = *parent.Opnd(argNo); - auto &dread = static_cast(argExpr); - MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); - CHECK_FATAL(sym != nullptr, "sym should not be nullptr"); - MIRType *ty = sym->GetType(); - if (dread.GetFieldID() != 0) { - auto *structType = static_cast(ty); - ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structType->GetFieldTyIdx(dread.GetFieldID())); - rhsOffset = GetBecommon().GetFieldOffset(*structType, dread.GetFieldID()).first; - } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize <= k16ByteSize) { - SelectParmListDreadSmallAggregate(*sym, *ty, srcOpnds, rhsOffset, parmLocator, dread.GetFieldID()); - } else if (symSize > kParmMemcpySize) { - CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } else { - SelectParmListDreadLargeAggregate(*sym, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); - } -} -void AArch64CGFunc::GenAggParmForIread(const BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { - int32 rhsOffset = 0; - BaseNode &argExpr = *parent.Opnd(argNo); - IreadNode &iread = static_cast(argExpr); - MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); - MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); - if (iread.GetFieldID() != 0) { - MIRStructType *structty = static_cast(ty); - ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); - rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; - } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize <= k16ByteSize) { - SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); - } else if (symSize > kParmMemcpySize) { - RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); - if (rhsOffset > 0) { - RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); - regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); - RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, - CreateImmOperand(rhsOffset, k64BitSize, false))); - } - CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } else { - SelectParmListIreadLargeAggregate(iread, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); - } -} - -void AArch64CGFunc::GenAggParmForIreadoff(BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { - int32 rhsOffset = 0; - BaseNode &argExpr = *parent.Opnd(argNo); - IreadoffNode &iread = static_cast(argExpr); - MIRStructType *ty = GetLmbcStructArgType(parent, argNo); - if (ty == nullptr) { - return; - } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize <= k16ByteSize) { - rhsOffset = iread.GetOffset(); - SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); +void AArch64CGFunc::SelectParamPreCopy(const BaseNode &argExpr, AggregateDesc &aggDesc, + uint32 mirSize, int32 structCopyOffset, bool isArgUnused) { + auto &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + auto &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); + if (mirSize > kParmMemcpySize) { + auto *rhsOpnd = GetAddrOpndWithBaseNode(argExpr, *aggDesc.sym, aggDesc.offset, false); + ASSERT(rhsOpnd->IsRegister(), "NIY, must be reg"); + if (!isArgUnused) { // skip unused args + auto &destAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(destAddr, spReg, offsetOpnd, PTY_a64); + SelectStructMemcpy(destAddr, static_cast(*rhsOpnd), mirSize); + } } else { - CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + auto *rhsOpnd = GetAddrOpndWithBaseNode(argExpr, *aggDesc.sym, aggDesc.offset, true); + ASSERT(rhsOpnd->IsMemoryAccessOperand(), "NIY, must be mem opnd"); + if (!isArgUnused) { // skip unused args + auto *destMemOpnd = CreateMemOperand(k64BitSize, spReg, offsetOpnd); + SelectStructCopy(*destMemOpnd, static_cast(*rhsOpnd), mirSize); + } } } -void AArch64CGFunc::GenAggParmForIreadfpoff(BaseNode &parent, ListOperand &srcOpnds, - AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { - int32 rhsOffset = 0; - BaseNode &argExpr = *parent.Opnd(argNo); - IreadFPoffNode &iread = static_cast(argExpr); - MIRStructType *ty = GetLmbcStructArgType(parent, argNo); - if (ty == nullptr) { /* param < arg */ +void AArch64CGFunc::SelectParmListPreprocessForAggregate(BaseNode &argExpr, int32 &structCopyOffset, + std::vector &argsDesc, + bool isArgUnused) { + AggregateDesc aggDesc; + GetAggregateDescFromAggregateNode(argExpr, aggDesc); + + auto mirSize = aggDesc.mirType->GetSize(); + if (mirSize <= k16BitSize) { + (void)argsDesc.emplace_back(aggDesc.mirType, &argExpr, aggDesc.sym, aggDesc.offset); return; } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize <= k16ByteSize) { - SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); - } else { - CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } -} -void AArch64CGFunc::SelectParmListForAggregate(BaseNode &parent, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, - int32 &structCopyOffset, size_t argNo, PrimType ¶mPType) { - BaseNode &argExpr = *parent.Opnd(argNo); - if (argExpr.GetOpCode() == OP_dread) { - GenAggParmForDread(parent, srcOpnds, parmLocator, structCopyOffset, argNo); - } else if (argExpr.GetOpCode() == OP_iread) { - GenAggParmForIread(parent, srcOpnds, parmLocator, structCopyOffset, argNo); - } else if (argExpr.GetOpCode() == OP_ireadfpoff) { - GenAggParmForIreadfpoff(parent, srcOpnds, parmLocator, structCopyOffset, argNo); - } else if (argExpr.GetOpCode() == OP_ireadoff) { - GenAggParmForIreadoff(parent, srcOpnds, parmLocator, structCopyOffset, argNo); - } else if (argExpr.GetOpCode() == OP_constval) { - paramPType = argExpr.GetPrimType(); + PrimType baseType = PTY_begin; + size_t elemNum = 0; + if (IsHomogeneousAggregates(*aggDesc.mirType, baseType, elemNum)) { + // B.3 If the argument type is an HFA or an HVA, then the argument is used unmodified. + (void)argsDesc.emplace_back(aggDesc.mirType, &argExpr, aggDesc.sym, aggDesc.offset); return; - } else { - CHECK_FATAL(false, "NYI"); } -} -size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { - if (naryNode.GetOpCode() == OP_call) { - CallNode &callNode = static_cast(naryNode); - MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); - TyIdx retIdx = callFunc->GetReturnTyIdx(); - if (callFunc->IsFirstArgReturn()) { - MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[0].formalTyIdx); - return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); - } - size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); - if ((retSize == 0) && callFunc->IsReturnStruct()) { - TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); - return GetBecommon().GetTypeSize(tyIdx); - } - return retSize; - } else if (naryNode.GetOpCode() == OP_icall) { - IcallNode &icallNode = static_cast(naryNode); - CallReturnVector *p2nrets = &icallNode.GetReturnVec(); - if (p2nrets->size() == k1ByteSize) { - StIdx stIdx = (*p2nrets)[0].first; - MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); - if (sym != nullptr) { - return GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()); - } - } - } else if (naryNode.GetOpCode() == OP_icallproto) { - IcallNode &icallProto = static_cast(naryNode); - MIRFuncType *funcTy = static_cast( - GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallProto.GetRetTyIdx())); - if (funcTy->FirstArgReturn()) { - MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTy->GetNthParamType(0)); - return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); - } - return GetBecommon().GetTypeSize(funcTy->GetRetTyIdx()); - } - return 0; -} + // B.4 If the argument type is a Composite Type that is larger than 16 bytes, + // then the argument is copied to memory allocated by the caller + // and the argument is replaced by a pointer to the copy. + uint32 align = aggDesc.mirType->GetAlign() > k8ByteSize ? k16ByteSize : k8ByteSize; + structCopyOffset = static_cast(RoundUp(static_cast(structCopyOffset), align)); -void AArch64CGFunc::GenLargeStructCopyForDread(BaseNode &argExpr, int32 &structCopyOffset) { - int32 rhsOffset = 0; - DreadNode &dread = static_cast(argExpr); - MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); - CHECK_FATAL(sym != nullptr, "sym should not be nullptr"); - MIRType *ty = sym->GetType(); - if (dread.GetFieldID() != 0) { - MIRStructType *structty = static_cast(ty); - ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); - rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; - } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize > kParmMemcpySize) { - CreateCallStructParamMemcpy(*sym, static_cast(symSize), structCopyOffset, rhsOffset); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } else if (symSize > k16ByteSize) { - uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); - structCopyOffset += static_cast(numMemOp * GetPointerSize()); - } -} - -void AArch64CGFunc::GenLargeStructCopyForIread(BaseNode &argExpr, int32 &structCopyOffset) { - int32 rhsOffset = 0; - IreadNode &iread = static_cast(argExpr); - MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); - MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); - if (iread.GetFieldID() != 0) { - MIRStructType *structty = static_cast(ty); - ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); - rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; - } - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); - if (symSize > kParmMemcpySize) { - RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); - RegOperand &addrOpnd = LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); - CreateCallStructParamMemcpy(addrOpnd, static_cast(symSize), structCopyOffset, rhsOffset); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } else if (symSize > k16ByteSize) { - uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); - structCopyOffset += static_cast(numMemOp * GetPointerSize()); - } -} - -void AArch64CGFunc::GenLargeStructCopyForIreadfpoff(BaseNode &parent, BaseNode &argExpr, - int32 &structCopyOffset, size_t argNo) { - IreadFPoffNode &ireadoff = static_cast(argExpr); - MIRStructType *ty = GetLmbcStructArgType(parent, argNo); - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex()); - if (symSize > k16ByteSize) { /* kParmMemcpySize */ - RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); - RegOperand &addrOpnd = CreateRegisterOperandOfType(PTY_a64); - ImmOperand &immOpnd = CreateImmOperand(ireadoff.GetOffset(), k32BitSize, true); - GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, addrOpnd, *rfp, immOpnd)); - CreateCallStructParamMemcpy(addrOpnd, static_cast(symSize), structCopyOffset, 0); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } -} - -void AArch64CGFunc::GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, - int32 &structCopyOffset, size_t argNo) { - IreadoffNode &ireadoff = static_cast(argExpr); - MIRStructType *ty = GetLmbcStructArgType(parent, argNo); - uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex()); - if (symSize > k16ByteSize) { /* kParmMemcpySize */ - RegOperand *addrOpnd = static_cast( - HandleExpr(ireadoff, *(ireadoff.Opnd(0)))); - int32 fromOffset = ireadoff.GetOffset(); - CreateCallStructParamMemcpy(*addrOpnd, static_cast(symSize), structCopyOffset, fromOffset); - structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); - } -} - -void AArch64CGFunc::SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, - int32 &structCopyOffset, size_t argNo) { - if (argExpr.GetOpCode() == OP_dread) { - GenLargeStructCopyForDread(argExpr, structCopyOffset); - } else if (argExpr.GetOpCode() == OP_iread) { - GenLargeStructCopyForIread(argExpr, structCopyOffset); - } else if (argExpr.GetOpCode() == OP_ireadfpoff) { - GenLargeStructCopyForIreadfpoff(parent, argExpr, structCopyOffset, argNo); - } else if (argExpr.GetOpCode() == OP_ireadoff) { - GenLargeStructCopyForIreadoff(parent, argExpr, structCopyOffset, argNo); - } -} + SelectParamPreCopy(argExpr, aggDesc, static_cast(mirSize), structCopyOffset, isArgUnused); -/* preprocess call in parmlist */ -bool AArch64CGFunc::MarkParmListCall(BaseNode &expr) { - if (!CGOptions::IsPIC()) { - return false; - } - switch (expr.GetOpCode()) { - case OP_addrof: { - auto &addrNode = static_cast(expr); - MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrNode.GetStIdx()); - if (symbol->IsThreadLocal()) { - return true; - } - break; - } - default: { - for (size_t i = 0; i < expr.GetNumOpnds(); i++) { - if (expr.Opnd(i)) { - if (MarkParmListCall(*expr.Opnd(i))) { - return true; - } - } - } - break; - } - } - return false; + (void)argsDesc.emplace_back(aggDesc.mirType, nullptr, nullptr, + static_cast(structCopyOffset), true); + structCopyOffset += static_cast(RoundUp(mirSize, k8ByteSize)); } -void AArch64CGFunc::SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs) { - size_t i = start; +// Stage B - Pre-padding and extension of arguments +bool AArch64CGFunc::SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::vector &argsDesc, + const MIRFunction *callee) { + bool hasSpecialArg = false; int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); - for (; i < naryNode.NumOpnds(); ++i) { + for (size_t i = start; i < naryNode.NumOpnds(); ++i) { BaseNode *argExpr = naryNode.Opnd(i); + ASSERT(argExpr != nullptr, "not null check"); PrimType primType = argExpr->GetPrimType(); - if (MarkParmListCall(*argExpr)) { - (void)specialArgs.emplace(i); - } ASSERT(primType != PTY_void, "primType should not be void"); - if (primType != PTY_agg) { - continue; + if (primType == PTY_agg) { + SelectParmListPreprocessForAggregate(*argExpr, structCopyOffset, argsDesc, + (callee && callee->GetFuncDesc().IsArgUnused(i))); + } else { + auto *mirType = GlobalTables::GetTypeTable().GetPrimType(primType); + (void)argsDesc.emplace_back(mirType, argExpr); + } + + if (MarkParmListCall(*argExpr)) { + argsDesc.rbegin()->isSpecialArg = true; + hasSpecialArg = true; } - SelectParmListPreprocessLargeStruct(naryNode, *argExpr, structCopyOffset, i); } + return hasSpecialArg; } -bool AArch64CGFunc::IsFirstArgReturn(StmtNode &naryNode) { +std::pair AArch64CGFunc::GetCalleeFunction(StmtNode &naryNode) const { MIRFunction *callee = nullptr; + MIRFuncType *calleeType = nullptr; if (dynamic_cast(&naryNode) != nullptr) { auto calleePuIdx = static_cast(naryNode).GetPUIdx(); callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); - return callee->IsFirstArgReturn(); + calleeType = callee->GetMIRFuncType(); } else if (naryNode.GetOpCode() == OP_icallproto) { auto *iCallNode = &static_cast(naryNode); MIRType *protoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode->GetRetTyIdx()); - MIRFuncType *funcType = nullptr; if (protoType->IsMIRPtrType()) { - funcType = static_cast(protoType)->GetPointedFuncType(); + calleeType = static_cast(protoType)->GetPointedFuncType(); } else if (protoType->IsMIRFuncType()) { - funcType = static_cast(protoType); + calleeType = static_cast(protoType); } - CHECK_FATAL(funcType != nullptr, "cannot find prototype for icall"); - return funcType->FirstArgReturn(); } - return false; + return {callee, calleeType}; } -bool AArch64CGFunc::Is64x1vec(StmtNode &naryNode, BaseNode &argExpr, uint32 pnum) { - bool is64x1vec = false; - if (dynamic_cast(&naryNode) != nullptr) { - auto calleePuIdx = static_cast(naryNode).GetPUIdx(); - MIRFunction *callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); - if (pnum < callee->GetFormalCount() && callee->GetFormal(pnum) != nullptr) { - is64x1vec = callee->GetFormal(pnum)->GetAttr(ATTR_oneelem_simd); - } - } - switch (argExpr.op) { - case OP_dread: { - auto &dNode = static_cast(argExpr); - MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(dNode.GetStIdx()); - ASSERT(symbol != nullptr, "nullptr check"); - if (dNode.GetFieldID() != 0) { - auto *structType = static_cast(symbol->GetType()); - ASSERT(structType != nullptr, "SelectParmList: non-zero fieldID for non-structure"); - FieldAttrs fa = structType->GetFieldAttrs(dNode.GetFieldID()); - is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); - } else { - is64x1vec = symbol->GetAttr(ATTR_oneelem_simd); - } - break; - } - case OP_iread: { - auto &iNode = static_cast(argExpr); - MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iNode.GetTyIdx()); - auto *ptrTyp = static_cast(type); - ASSERT(ptrTyp != nullptr, "expect a pointer type at iread node"); - MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyp->GetPointedTyIdx()); - if (iNode.GetFieldID() != 0) { - auto *structType = static_cast(pointedTy); - FieldAttrs fa = structType->GetFieldAttrs(iNode.GetFieldID()); - is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); - } else { - TypeAttrs ta = static_cast(ptrTyp)->GetTypeAttrs(); - is64x1vec = ta.GetAttr(ATTR_oneelem_simd); - } - break; - } - case OP_constval: { - CallNode *call = safe_cast(&naryNode); - if (call == nullptr) { - break; - } - MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); - if (fn == nullptr || fn->GetFormalCount() == 0 || fn->GetFormalCount() <= pnum) { - break; +void AArch64CGFunc::SelectParmListSmallStruct(const MIRType &mirType, const CCLocInfo &ploc, + Operand &addr, ListOperand &srcOpnds) { + uint32 offset = 0; + ASSERT(addr.IsMemoryAccessOperand(), "NIY, must be mem opnd"); + uint64 size = mirType.GetSize(); + auto &memOpnd = static_cast(addr); + // ldr memOpnd to parmReg + auto loadParamFromMem = + [this, &offset, &memOpnd, &srcOpnds, &size](AArch64reg regno, PrimType primType) { + auto &phyReg = GetOrCreatePhysicalRegisterOperand( + regno, GetPrimTypeBitSize(primType), GetRegTyFromPrimTy(primType)); + bool isFpReg = !IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType); + if (!CGOptions::IsBigEndian() && !isFpReg && (size - offset < k8ByteSize)) { + // load exact size agg (BigEndian not support yet) + RegOperand *valOpnd = nullptr; + for (uint32 exactOfst = 0; exactOfst < (size - offset);) { + PrimType exactPrimType; + auto loadSize = size - offset - exactOfst; + if (loadSize >= k4ByteSize) { + exactPrimType = PTY_u32; + } else if (loadSize >= k2ByteSize) { + exactPrimType = PTY_u16; + } else { + exactPrimType = PTY_u8; + } + auto ldBitSize = GetPrimTypeBitSize(exactPrimType); + auto *ldOpnd = &GetMemOperandAddOffset(memOpnd, exactOfst + offset, ldBitSize); + auto ldMop = PickLdInsn(ldBitSize, exactPrimType); + ldOpnd = FixLargeMemOpnd(ldMop, *ldOpnd, ldBitSize, kSecondOpnd); + auto &tmpValOpnd = + CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(exactPrimType))); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(ldMop, tmpValOpnd, *ldOpnd); + GetCurBB()->AppendInsn(ldInsn); + if (exactOfst != 0) { + auto &shiftOpnd = CreateImmOperand(exactOfst * kBitsPerByte, k32BitSize, false); + SelectShift(tmpValOpnd, tmpValOpnd, shiftOpnd, kShiftLeft, primType); + } + if (valOpnd) { + SelectBior(*valOpnd, *valOpnd, tmpValOpnd, primType); + } else { + valOpnd = &tmpValOpnd; + } + exactOfst += GetPrimTypeSize(exactPrimType); } - is64x1vec = fn->GetFormalDefAt(pnum).formalAttrs.GetAttr(ATTR_oneelem_simd); - break; - } - default: - break; + SelectCopy(phyReg, primType, *valOpnd, primType); + } else { + auto *ldOpnd = &GetMemOperandAddOffset(memOpnd, offset, GetPrimTypeBitSize(primType)); + auto ldMop = PickLdInsn(GetPrimTypeBitSize(primType), primType); + ldOpnd = FixLargeMemOpnd(ldMop, *ldOpnd, GetPrimTypeBitSize(primType), kSecondOpnd); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(ldMop, phyReg, *ldOpnd); + GetCurBB()->AppendInsn(ldInsn); + } + srcOpnds.PushOpnd(phyReg); + offset += GetPrimTypeSize(primType); + }; + loadParamFromMem(static_cast(ploc.reg0), ploc.primTypeOfReg0); + if (ploc.reg1 != kRinvalid) { + loadParamFromMem(static_cast(ploc.reg1), ploc.primTypeOfReg1); + } + if (ploc.reg2 != kRinvalid) { + loadParamFromMem(static_cast(ploc.reg2), ploc.primTypeOfReg2); + } + if (ploc.reg3 != kRinvalid) { + loadParamFromMem(static_cast(ploc.reg3), ploc.primTypeOfReg3); } - return is64x1vec; } -/* - SelectParmList generates an instrunction for each of the parameters - to load the parameter value into the corresponding register. - We return a list of registers to the call instruction because - they may be needed in the register allocation phase. - */ +void AArch64CGFunc::SelectParmListPassByStack(const MIRType &mirType, Operand &opnd, + uint32 memOffset, bool preCopyed, + std::vector &insnForStackArgs) { + if (!preCopyed && mirType.GetPrimType() == PTY_agg) { + ASSERT(opnd.IsMemoryAccessOperand(), "NIY, must be mem opnd"); + auto &actOpnd = CreateMemOpnd(RSP, memOffset, k64BitSize); + SelectStructCopy(actOpnd, static_cast(opnd), static_cast(mirType.GetSize())); + return; + } + + PrimType primType = preCopyed ? PTY_a64 : mirType.GetPrimType(); + CHECK_FATAL(primType != PTY_i128 && primType != PTY_u128, "NIY, i128 is unsupported"); + auto &valReg = LoadIntoRegister(opnd, primType); + auto &actMemOpnd = CreateMemOpnd(RSP, memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetInsnBuilder()->BuildInsn( + PickStInsn(GetPrimTypeBitSize(primType), primType), valReg, actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == static_cast(CGOptions::kLevel2) && + insnForStackArgs.size() < kShiftAmount12) { + (void)insnForStackArgs.emplace_back(&strInsn); + } else { + GetCurBB()->AppendInsn(strInsn); + } +} + +// SelectParmList generates an instrunction for each of the parameters +// to load the parameter value into the corresponding register. +// We return a list of registers to the call instruction because +// they may be needed in the register allocation phase. void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) { size_t opndIdx = 0; - /* the first opnd of ICallNode is not parameter of function */ + // the first opnd of ICallNode is not parameter of function if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) { opndIdx++; } - bool firstArgReturn = IsFirstArgReturn(naryNode); - std::set specialArgs; - SelectParmListPreprocess(naryNode, opndIdx, specialArgs); - bool specialArg = false; + auto [callee, calleeType] = GetCalleeFunction(naryNode); + + std::vector argsDesc; + bool hasSpecialArg = SelectParmListPreprocess(naryNode, opndIdx, argsDesc, callee); BB *curBBrecord = GetCurBB(); BB *tmpBB = nullptr; - if (!specialArgs.empty()) { + if (hasSpecialArg) { tmpBB = CreateNewBB(); - specialArg = true; } + AArch64CallConvImpl parmLocator(GetBecommon()); CCLocInfo ploc; - int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); std::vector insnForStackArgs; - uint32 stackArgsCount = 0; - for (uint32 pnum = 0; opndIdx < naryNode.NumOpnds(); ++opndIdx, ++pnum) { - if (specialArg) { + + for (size_t i = 0; i < argsDesc.size(); ++i) { + if (hasSpecialArg) { ASSERT(tmpBB, "need temp bb for lower priority args"); - SetCurBB((specialArgs.count(opndIdx) > 0) ? *curBBrecord : *tmpBB); - } - MIRType *ty = nullptr; - BaseNode *argExpr = naryNode.Opnd(opndIdx); - ASSERT(argExpr != nullptr, "invalid expr"); - bool is64x1vec = Is64x1vec(naryNode, *argExpr, pnum); - - PrimType paramPType = GetParamPrimType(naryNode, pnum, isCallNative); - ASSERT(paramPType != PTY_void, "primType should not be void"); - /* use alloca */ - if (paramPType == PTY_agg) { - SelectParmListForAggregate(naryNode, srcOpnds, parmLocator, structCopyOffset, opndIdx, paramPType); - continue; + SetCurBB(argsDesc[i].isSpecialArg ? *curBBrecord : *tmpBB); } - ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(paramPType)]; - RegOperand *expRegOpnd = nullptr; - Operand *opnd = HandleExpr(naryNode, *argExpr); - if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { - is64x1vec = true; - } - if (!opnd->IsRegister()) { - opnd = &LoadIntoRegister(*opnd, paramPType); - } - expRegOpnd = static_cast(opnd); - if ((pnum == 0) && firstArgReturn) { - parmLocator.InitCCLocInfo(ploc); - ploc.reg0 = R8; - } else { - parmLocator.LocateNextParm(*ty, ploc); - } - /* is64x1vec should be an int64 value in an FP/simd reg for ABI compliance, - convert R-reg to equivalent V-reg */ - PrimType destPrimType = paramPType; - if (is64x1vec && ploc.reg0 != kRinvalid && ploc.reg0 < R7) { - ploc.reg0 = AArch64Abi::kFloatParmRegs[static_cast(ploc.reg0) - 1]; - destPrimType = PTY_f64; + auto *mirType = argsDesc[i].mirType; + + // get param opnd, for unpreCody agg, opnd must be mem opnd + Operand *opnd = nullptr; + auto preCopyed = argsDesc[i].preCopyed; + if (preCopyed) { // preCopyed agg, passed by address + naryNode.SetMayTailcall(false); // has preCopyed arguments, don't do tailcall + opnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + auto &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + SelectAdd(*opnd, spReg, CreateImmOperand(argsDesc[i].offset, k64BitSize, false), PTY_a64); + } else if (mirType->GetPrimType() == PTY_agg) { + opnd = GetAddrOpndWithBaseNode(*argsDesc[i].argExpr, *argsDesc[i].sym, argsDesc[i].offset); + } else { // base type, clac true val + opnd = &LoadIntoRegister(*AArchHandleExpr(naryNode, *argsDesc[i].argExpr), mirType->GetPrimType()); } + parmLocator.LocateNextParm(*mirType, ploc, (i == 0), calleeType); - /* skip unused args */ - if (dynamic_cast(&naryNode) != nullptr) { - auto calleePuIdx = static_cast(naryNode).GetPUIdx(); - MIRFunction *callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); - if (callee->GetFuncDesc().IsArgUnused(pnum)) { - continue; - } + // skip unused args + if (callee && callee->GetFuncDesc().IsArgUnused(i)) { + continue; } - if (ploc.reg0 != kRinvalid) { /* load to the register. */ - CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); - RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( - static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); - if (!DoCallerEnsureValidParm(parmRegOpnd, *expRegOpnd, paramPType)) { - SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, paramPType); - } - srcOpnds.PushOpnd(parmRegOpnd); - } else { /* store to the memory segment for stack-passsed arguments. */ - if (CGOptions::IsBigEndian()) { - if (GetPrimTypeBitSize(paramPType) < k64BitSize) { - ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); - } - } - MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(paramPType)); - Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(paramPType), paramPType), *expRegOpnd, - actMemOpnd); - actMemOpnd.SetStackArgMem(true); - if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2 && stackArgsCount < kShiftAmount12) { - (void)insnForStackArgs.emplace_back(&strInsn); - stackArgsCount++; + if (ploc.reg0 != kRinvalid) { // load to the register. + if (mirType->GetPrimType() == PTY_agg && !preCopyed) { + SelectParmListSmallStruct(*mirType, ploc, *opnd, srcOpnds); } else { - GetCurBB()->AppendInsn(strInsn); + CHECK_FATAL(ploc.reg1 == kRinvalid, "NIY"); + auto &phyReg = GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), + GetPrimTypeBitSize(ploc.primTypeOfReg0), GetRegTyFromPrimTy(ploc.primTypeOfReg0)); + ASSERT(opnd->IsRegister(), "NIY, must be reg"); + if (!DoCallerEnsureValidParm(phyReg, static_cast(*opnd), + ploc.primTypeOfReg0)) { + SelectCopy(phyReg, ploc.primTypeOfReg0, *opnd, ploc.primTypeOfReg0); + } + srcOpnds.PushOpnd(phyReg); } + continue; } - ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + + // store to the memory segment for stack-passsed arguments. + if (CGOptions::IsBigEndian() && ploc.memSize < static_cast(k8ByteSize)) { + ploc.memOffset = ploc.memOffset + static_cast(k4ByteSize); + } + SelectParmListPassByStack(*mirType, *opnd, static_cast(ploc.memOffset), preCopyed, insnForStackArgs); } - if (specialArg) { + // if we have stack-passed arguments, don't do tailcall + parmLocator.InitCCLocInfo(ploc); + if (ploc.memOffset != 0) { + naryNode.SetMayTailcall(false); + } + if (hasSpecialArg) { ASSERT(tmpBB, "need temp bb for lower priority args"); curBBrecord->InsertAtEnd(*tmpBB); SetCurBB(*curBBrecord); @@ -8669,55 +7978,6 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bo } } -PrimType AArch64CGFunc::GetParamPrimType(StmtNode &naryNode, uint32 pnum, bool isCallNative) { - MIRFunction *callee = nullptr; - PrimType formalPType = maple::PTY_unknown; - if (dynamic_cast(&naryNode) != nullptr) { - /* - * For call, we can get the function instance, so we get info of parameters from MIRFunction. - */ - auto calleePuIdx = static_cast(naryNode).GetPUIdx(); - callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); - if (pnum < callee->GetFormalCount()) { /* avoid varags */ - MIRType *formalType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callee->GetFormalDefAt(pnum).formalTyIdx); - ASSERT(formalType != nullptr, "get null mirType by tyIdx"); - formalPType = formalType->GetPrimType(); - } else { - pnum = (isCallNative ? ++pnum : pnum); - BaseNode *argExpr = naryNode.Opnd(pnum); - formalPType = argExpr->GetPrimType(); - } - } else if (naryNode.GetOpCode() == maple::OP_icallproto) { - /* - * For icallProto, we cannot get the function instance, but we can get funcTyIdx from icallNode, - * so we get info of parameters from MIRFuncType. - */ - auto &icallNode = static_cast(naryNode); - MIRType *protoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); - MIRFuncType *funcType = nullptr; - if (protoType->IsMIRPtrType()) { - funcType = static_cast(protoType)->GetPointedFuncType(); - } else if (protoType->IsMIRFuncType()) { - funcType = static_cast(protoType); - } - ASSERT(funcType != nullptr, "gget funcType faild from icall"); - if (pnum < funcType->GetParamTypeList().size()) { /* avoid varags */ - MIRType *formalType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetParamTypeList()[pnum]); - ASSERT(formalType != nullptr, "get null mirType by tyIdx"); - formalPType = formalType->GetPrimType(); - } - } - if (formalPType == PTY_unknown) { - /* - * For icall or varargs ..., we cannot get either the function instance or the function type, - * so we get primType from operand of icallnode, but it may be imprecise. - */ - BaseNode *argExpr = naryNode.Opnd(++pnum); - formalPType = argExpr->GetPrimType(); - } - return formalPType; -} - bool AArch64CGFunc::DoCallerEnsureValidParm(RegOperand &destOpnd, RegOperand &srcOpnd, PrimType formalPType) { if (CGOptions::CalleeEnsureParam()) { return false; @@ -9054,7 +8314,7 @@ void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymb FieldID fieldID = GetMirModule().GetMIRBuilder()->GetStructFieldIDFromFieldNameParentFirst(stringType, "count"); MIRType *fieldType = stringType->GetFieldType(fieldID); PrimType countPty = fieldType->GetPrimType(); - int32 offset = GetBecommon().GetFieldOffset(*stringType, fieldID).first; + int32 offset = GetBecommon().GetJClassFieldOffset(*stringType, fieldID).byteOffset; LabelIdx callBBLabIdx = CreateLabel(); RegOperand *srcCountOpnd = CheckStringIsCompressed(*GetCurBB(), *srcString, offset, countPty, callBBLabIdx); @@ -9138,12 +8398,12 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { BaseNode *funcArgExpr = callNode.Opnd(0); PrimType ptype = funcArgExpr->GetPrimType(); - Operand *funcOpnd = HandleExpr(callNode, *funcArgExpr); - RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, + Operand *funcOpnd = AArchHandleExpr(callNode, *funcArgExpr); + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, GetPointerBitSize(), GetRegTyFromPrimTy(PTY_a64)); SelectCopy(livein, ptype, *funcOpnd, ptype); - RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, GetPointerBitSize(), kRegTyInt); srcOpnds->PushOpnd(extraOpnd); } const std::string &funcName = fsym->GetName(); @@ -9155,6 +8415,9 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { return; } Insn &callInsn = AppendCall(*fsym, *srcOpnds); + if (callNode.GetMayTailCall()) { + callInsn.SetMayTailCall(); + } GetCurBB()->SetHasCall(); if (retType != nullptr) { callInsn.SetRetSize(static_cast(retType->GetSize())); @@ -9206,18 +8469,19 @@ void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { RegOperand *regOpnd = static_cast(fptrOpnd); Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds); + if (icallNode.GetMayTailCall()) { + callInsn.SetMayTailCall(); + } + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); if (retType != nullptr) { callInsn.SetRetSize(static_cast(retType->GetSize())); callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); } - /* check if this icall use stack slot to return */ - CallReturnVector *p2nrets = &icallNode.GetReturnVec(); - if (p2nrets->size() == k1ByteSize) { - StIdx stIdx = (*p2nrets)[0].first; - MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); - if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) { + // check if this icall use stack slot to return + if (icallNode.GetReturnVec().size() == k1ByteSize) { + if (retType != nullptr && IsReturnInMemory(*retType)) { SetStackProtectInfo(kRetureStackSlot); } } @@ -9268,7 +8532,7 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { MIRType *retTyp = GetFunction().GetReturnType(); AArch64CallConvImpl retLocator(GetBecommon()); CCLocInfo retMech; - retLocator.InitReturnInfo(*retTyp, retMech); + retLocator.LocateRetVal(*retTyp, retMech); if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); @@ -9310,14 +8574,11 @@ void AArch64CGFunc::SelectReturn(Operand *opnd0) { } RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType) { - AArch64reg reg = R0; switch (sregIdx) { case kSregSp: - reg = RSP; - break; + return GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); case kSregFp: - reg = RFP; - break; + return GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); case kSregGp: { MIRSymbol *sym = GetCG()->GetGP(); if (sym == nullptr) { @@ -9341,21 +8602,37 @@ RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, Pr return GetOrCreateVirtualRegisterOperand(uCatch.regNOCatch); } } - case kSregRetval0: - if (!IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType)) { - reg = V0; - } - break; case kSregMethodhdl: if (methodHandleVreg == regno_t(-1)) { methodHandleVreg = NewVReg(kRegTyInt, k8BitSize); } return GetOrCreateVirtualRegisterOperand(methodHandleVreg); + default: + break; + } + + bool useFpReg = !IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType); + AArch64reg pReg = RLAST_INT_REG; + switch (sregIdx) { + case kSregRetval0: + pReg = useFpReg ? V0 : R0; + break; + case kSregRetval1: + pReg = useFpReg ? V1 : R1; + break; + case kSregRetval2: + pReg = V2; + break; + case kSregRetval3: + pReg = V3; + break; default: ASSERT(false, "Special pseudo registers NYI"); break; } - return GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + uint32 bitSize = GetPrimTypeBitSize(primType); + bitSize = bitSize <= k32BitSize ? k32BitSize : bitSize; + return GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType)); } RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) { @@ -9496,7 +8773,7 @@ void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, st if (CGOptions::IsPIC() && symbol->NeedGOT(CGOptions::IsPIE())) { /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); - MemOperand *memOpnd = CreateMemOperand(GetPointerSize() * kBitsPerByte, static_cast(*srcOpnd), + MemOperand *memOpnd = CreateMemOperand(GetPointerBitSize(), static_cast(*srcOpnd), offset, *symbol); (void)rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn( memOpnd->GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, *memOpnd)); @@ -9697,7 +8974,7 @@ MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uin RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType) { - RegOperand *index = &LoadIntoRegister(*HandleExpr(indexExpr, *(indexExpr.Opnd(0))), PTY_a64); + RegOperand *index = &LoadIntoRegister(*AArchHandleExpr(indexExpr, *(indexExpr.Opnd(0))), PTY_a64); RegOperand *srcOpnd = &CreateRegisterOperandOfType(PTY_a64); ImmOperand *imm = &CreateImmOperand(PTY_a64, shift); SelectShift(*srcOpnd, *index, *imm, kShiftLeft, PTY_a64); @@ -9817,7 +9094,7 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas } /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */ uint32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0)); - RegOperand &base = static_cast(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64)); + RegOperand &base = static_cast(LoadIntoRegister(*AArchHandleExpr(addrExpr, *baseExpr), PTY_a64)); TypeCvtNode *typeCvtNode = static_cast(indexExpr); PrimType fromType = typeCvtNode->FromType(); PrimType toType = typeCvtNode->GetPrimType(); @@ -9828,13 +9105,13 @@ MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const Bas MemOperand *memOpnd = nullptr; if ((fromType == PTY_i32) && (toType == PTY_a64)) { RegOperand &index = - static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); + static_cast(LoadIntoRegister(*AArchHandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); ExtendShiftOperand &extendOperand = CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, shift, k8BitSize); memOpnd = CreateMemOperand(GetPrimTypeBitSize(ptype), base, index, extendOperand); } else if ((fromType == PTY_u32) && (toType == PTY_a64)) { RegOperand &index = - static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); + static_cast(LoadIntoRegister(*AArchHandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); ExtendShiftOperand &extendOperand = CreateExtendShiftOperand(ExtendShiftOperand::kUXTW, shift, k8BitSize); memOpnd = CreateMemOperand(GetPrimTypeBitSize(ptype), base, index, extendOperand); @@ -9847,14 +9124,14 @@ MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode Operand *addrOpnd = nullptr; if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) && addrExpr.Opnd(1)->GetOpCode() == OP_constval) { - addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0)); + addrOpnd = AArchHandleExpr(addrExpr, *addrExpr.Opnd(0)); ConstvalNode *constOfstNode = static_cast(addrExpr.Opnd(1)); ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); CHECK_FATAL(intOfst != nullptr, "just checking"); offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue(); } else { - addrOpnd = HandleExpr(parent, addrExpr); + addrOpnd = AArchHandleExpr(parent, addrExpr); } addrOpnd = static_cast(&LoadIntoRegister(*addrOpnd, PTY_a64)); Insn *lastInsn = GetCurBB() == nullptr ? nullptr : GetCurBB()->GetLastInsn(); @@ -9938,8 +9215,8 @@ void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector pt, PrimType retPrimType, bool is2ndRet) { std::string newName = funcName; // Check whether we have a maple version of libcall and we want to use it instead. - if (!CGOptions::IsDuplicateAsmFileEmpty() && asmMap.find(funcName) != asmMap.end()) { - newName = asmMap.at(funcName); + if (!CGOptions::IsDuplicateAsmFileEmpty() && kAsmMap.find(funcName) != kAsmMap.end()) { + newName = kAsmMap.at(funcName); } MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); st->SetNameStrIdx(newName); @@ -9994,7 +9271,7 @@ void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), immOpnd->GetSize(), immOpnd->IsSignedValue()); mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; - BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kShiftLSL, kShiftAmount12, k64BitSize); Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd); ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid"); if (isDest) { @@ -10162,6 +9439,9 @@ void AArch64CGFunc::SelectAddAfterInsnBySize(Operand &resOpnd, Operand &opnd0, O (void)insn.GetBB()->InsertInsnBefore(insn, movInsn); (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); } + if (!VERIFY_INSN(&movInsn)) { + SPLIT_INSN(&movInsn, this); + } } } @@ -10231,7 +9511,8 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) { if (vrNum >= vReg.VRegTableSize()) { CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); } - uint32 memBitSize = memSize <= k64BitSize ? k64BitSize : k128BitSize; + uint32 memBitSize = (memSize <= k32BitSize) ? k32BitSize : + (memSize <= k64BitSize) ? k64BitSize : k128BitSize; auto it = reuseSpillLocMem.find(memBitSize); if (it != reuseSpillLocMem.end()) { MemOperand *memOpnd = it->second->GetOne(); @@ -10242,7 +9523,7 @@ MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) { } RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); - int64 offset = GetOrCreatSpillRegLocation(vrNum); + int64 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte); MemOperand *memOpnd = nullptr; ImmOperand &offsetOpnd = CreateImmOperand(offset, k64BitSize, false); memOpnd = CreateMemOperand(memBitSize, baseOpnd, offsetOpnd); @@ -10283,7 +9564,7 @@ MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i) { AArch64reg AArch64CGFunc::GetReturnRegisterNumber() { AArch64CallConvImpl retLocator(GetBecommon()); CCLocInfo retMech; - retLocator.InitReturnInfo(*(GetFunction().GetReturnType()), retMech); + retLocator.LocateRetVal(*(GetFunction().GetReturnType()), retMech); if (retMech.GetRegCount() > 0) { return static_cast(retMech.GetReg0()); } @@ -10425,7 +9706,7 @@ Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, List OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); GetCurBB()->AppendInsn(adrpInsn); - MemOperand *memOrd = CreateMemOperand(GetPointerSize() * kBitsPerByte, static_cast(tmpReg), + MemOperand *memOrd = CreateMemOperand(GetPointerBitSize(), static_cast(tmpReg), offsetOpnd, *symbol); Insn &ldrInsn = GetInsnBuilder()->BuildInsn(memOrd->GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, tmpReg, *memOrd); GetCurBB()->AppendInsn(ldrInsn); @@ -10480,18 +9761,26 @@ Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListO * srcOpnds : list operand of the function need to be called * Return: the 'blr' instruction */ -Insn &AArch64CGFunc::GenerateGlobalNopltCallAfterInsn(const MIRSymbol &sym, ListOperand &srcOpnds) { - MIRFunction *func = sym.GetValue().mirFunc; - if ((CGOptions::IsPIE() && !func->GetBody()) || (CGOptions::IsPIC() && !func->IsStatic())) { - StImmOperand &stOpnd = CreateStImmOperand(sym, 0, 0); - RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); - SelectAddrof(tmpReg, stOpnd); - Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); +Insn &AArch64CGFunc::GenerateGlobalNopltCallAfterInsn(const MIRSymbol &funcSym, ListOperand &srcOpnds) { + MIRFunction *func = funcSym.GetValue().mirFunc; + if (func && func->IsDefaultVisibility() && + ((CGOptions::IsPIE() && !func->GetBody()) || (CGOptions::IsShlib() && !func->IsStatic()))) { + StImmOperand &stOpnd = CreateStImmOperand(funcSym, 0, 0); + RegOperand *tmpReg = nullptr; + if (!IsAfterRegAlloc()) { + tmpReg = &CreateRegisterOperandOfType(PTY_u64); + } else { + // After RA, we use reserved X16 as tmpReg. + // Utill now it will not clobber other X16 def + tmpReg = &GetOrCreatePhysicalRegisterOperand(R16, maple::k64BitSize, kRegTyInt); + } + SelectAddrof(*tmpReg, stOpnd); + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *tmpReg, srcOpnds); GetCurBB()->AppendInsn(callInsn); GetCurBB()->SetHasCall(); return callInsn; } else { - Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym); + Operand &targetOpnd = GetOrCreateFuncNameOpnd(funcSym); Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds); GetCurBB()->AppendInsn(callInsn); GetCurBB()->SetHasCall(); @@ -10617,7 +9906,6 @@ void AArch64CGFunc::SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode) { AddrofNode *addrof = static_cast(arg); MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); ASSERT(symbol->GetName().find(CLASSINFO_PREFIX_STR) == 0, "must be a symbol with __classinfo__"); - if (!symbol->IsMuidDataUndefTab()) { std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); @@ -10661,10 +9949,10 @@ void AArch64CGFunc::SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode) { } } void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { - /* FPLR only pushed in regalloc() after intrin function */ + // FPLR only pushed in regalloc() after intrin function Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); - /* __stack */ + // __stack ImmOperand *offsOpnd; if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ @@ -10679,12 +9967,12 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { } else { SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); /* stack pointer */ } - /* mem operand in va_list struct (lhs) */ + // mem operand in va_list struct (lhs) MemOperand *strOpnd = CreateMemOperand(k64BitSize, opnd, CreateImmOperand(0, k32BitSize, false)); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); - /* __gr_top ; it's the same as __stack before the 1st va_arg */ + // __gr_top ; it's the same as __stack before the 1st va_arg ImmOperand *offOpnd = nullptr; if (CGOptions::IsArm64ilp32()) { offOpnd = &CreateImmOperand(GetPointerSize(), k64BitSize, false); @@ -10739,7 +10027,7 @@ void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { */ isIntrnCallForC = true; BaseNode *argExpr = intrnNode.Opnd(0); - Operand *opnd = HandleExpr(intrnNode, *argExpr); + Operand *opnd = AArchHandleExpr(intrnNode, *argExpr); RegOperand &opnd0 = LoadIntoRegister(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ /* Find beginning of unnamed arg on stack. @@ -10761,7 +10049,6 @@ void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { } else { stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); } - GenCVaStartIntrin(opnd0, stkSize); } @@ -10775,8 +10062,8 @@ void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { */ void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccall) { auto primType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinsiccall.GetTyIdx())->GetPrimType(); - auto *addr = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(0)); - auto *value = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(1)); + auto *addr = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(0)); + auto *value = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(1)); auto *memOrderOpnd = intrinsiccall.Opnd(kInsnThirdOpnd); std::memory_order memOrder = std::memory_order_seq_cst; if (memOrderOpnd->IsConstval()) { @@ -10800,8 +10087,8 @@ void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccall) void AArch64CGFunc::SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall) { auto primType = GlobalTables::GetTypeTable(). GetTypeFromTyIdx(intrinsiccall.GetTyIdx())->GetPrimType(); - auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnFirstOpnd)); - auto *valueOpnd = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnSecondOpnd)); + auto *addrOpnd = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnFirstOpnd)); + auto *valueOpnd = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnSecondOpnd)); auto *memOrderOpnd = intrinsiccall.Opnd(kInsnThirdOpnd); std::memory_order memOrder = std::memory_order_seq_cst; if (memOrderOpnd->IsConstval()) { @@ -10820,13 +10107,101 @@ void AArch64CGFunc::SelectAtomicStore( GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, LoadIntoRegister(srcOpnd, primType), memOpnd)); } -void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) { - if (CGOptions::IsPIC() && !CGOptions::IsPIE()) { - SelectCTlsGlobalDesc(result, stImm); +bool AArch64CGFunc::SelectTLSModelByAttr(Operand &result, StImmOperand &stImm, [[maybe_unused]] bool /* isShlib */) { + const MIRSymbol *symbol = stImm.GetSymbol(); + if (symbol->GetAttr(ATTR_local_exec)) { + SelectCTlsLocalDesc(result, stImm); // local-exec + } else if (symbol->GetAttr(ATTR_initial_exec)) { + SelectCTlsGotDesc(result, stImm); // initial-exec + } else if (symbol->GetAttr(ATTR_local_dynamic)) { + if (stImm.GetSymbol()->GetStorageClass() != kScExtern) { + // gcc does not enable local dynamic opt by attr, so now we keep consist with gcc. + SelectCTlsGlobalDesc(result, stImm); // can opt + } else { + SelectCTlsGlobalDesc(result, stImm); + } + } else if (symbol->GetAttr(ATTR_global_dynamic)) { + SelectCTlsGlobalDesc(result, stImm); // global-dynamic + } else { + SelectCTlsGlobalDesc(result, stImm); // global-dynamic + } + return false; +} + +bool AArch64CGFunc::SelectTLSModelByOption(Operand &result, StImmOperand &stImm, bool isShlib) { + CGOptions::TLSModel ftlsModel = CGOptions::GetTLSModel(); + if (ftlsModel == CGOptions::kLocalExecTLSModel) { // local-exec model has same output with or without PIC + if (stImm.GetSymbol()->GetStorageClass() != kScExtern) { + SelectCTlsLocalDesc(result, stImm); + } else { + SelectCTlsGlobalDesc(result, stImm); + } + } else { + if (isShlib) { + if (ftlsModel == CGOptions::kInitialExecTLSModel) { + if (stImm.GetSymbol()->GetStorageClass() != kScExtern) { + SelectCTlsGotDesc(result, stImm); + } else { + SelectCTlsGlobalDesc(result, stImm); + } + } else if (ftlsModel == CGOptions::kLocalDynamicTLSModel) { + if (stImm.GetSymbol()->GetStorageClass() != kScExtern) { + // now local dynamic & warmup are implemented in mpl2mpl + // gcc does not enable local dynamic opt by flag, too. + // When moved to cg, we should consider whether SelectCTlsLoad(result, stImm) should be called + SelectCTlsGlobalDesc(result, stImm); // local-dynamic + } else { + SelectCTlsGlobalDesc(result, stImm); + } + } else if (ftlsModel == CGOptions::kGlobalDynamicTLSModel) { + SelectCTlsGlobalDesc(result, stImm); + } + } else { // no PIC + if (stImm.GetSymbol()->GetStorageClass() == kScExtern) { + SelectCTlsGotDesc(result, stImm); // extern TLS symbol needs to use initial-exec model without fPIC option + } else { + SelectCTlsLocalDesc(result, stImm); + } + } + } + return false; +} + +bool AArch64CGFunc::SelectTLSModelByPreemptibility(Operand &result, StImmOperand &stImm, bool isShlib) { + bool isLocal = stImm.GetSymbol()->GetStorageClass() != kScExtern; + if (!isShlib) { + if (isLocal) { + SelectCTlsLocalDesc(result, stImm); // local-exec + } else { + SelectCTlsGotDesc(result, stImm); // initial-exec + } } else { - SelectCTlsLocalDesc(result, stImm); + if (isLocal) { + SelectCTlsGlobalDesc(result, stImm); // local-dynamic + } else { + SelectCTlsGlobalDesc(result, stImm); // global-dynamic + } + } + return false; +} + +void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) { + bool isWarmUp = false; + bool isShlib = CGOptions::IsShlib(); + // judge the model of this TLS symbol by this order: + if (!stImm.GetSymbol()->IsDefaultTLSModel()) { + // 1. if it has its already-defined non-default tls_model attribute + isWarmUp = SelectTLSModelByAttr(result, stImm, isShlib); + } else { + if (CGOptions::GetTLSModel() != CGOptions::kDefaultTLSModel) { + // 2. if it does not has already-defined model attribute, check for file-wide '-ftls-model' option + isWarmUp = SelectTLSModelByOption(result, stImm, isShlib); + } else { + // 3. if no attribute or option, choose its model by fPIC option and its preemptibility + isWarmUp = SelectTLSModelByPreemptibility(result, stImm, isShlib); + } } - if (stImm.GetOffset() > 0) { + if (stImm.GetOffset() > 0 && !isWarmUp) { // warmup-dynamic does not need this extern ADD insn auto &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); SelectAdd(result, result, immOpnd, PTY_u64); } @@ -10851,6 +10226,58 @@ void AArch64CGFunc::SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm) { SelectAdd(result, r0opnd, *specialFunc, PTY_u64); } +void AArch64CGFunc::SelectCTlsGotDesc(Operand &result, StImmOperand &stImm) { + auto tpidr = &CreateCommentOperand("tpidr_el0"); + // mrs x0, tpidr_el0 + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, result, *tpidr)); + // MOP_tls_desc_got + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_u64), GetPrimTypeSize(PTY_u64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_got, vReg1, stImm)); + // add x0, x1, x0 + SelectAdd(result, vReg1, result, PTY_u64); +} + +void AArch64CGFunc::SelectCTlsLoad(Operand &result, StImmOperand &stImm) { + const MIRSymbol *st = stImm.GetSymbol(); + if (!st->IsConst()) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tlsload_tbss, result)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tlsload_tdata, result)); + } + auto tpidr = &CreateCommentOperand("tpidr_el0"); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + Operand *immOpnd = nullptr; + int64 offset = 0; + if (!st->IsConst()) { + MapleMap &tbssVarOffset = GetMirModule().GetTbssVarOffset(); + if (tbssVarOffset.find(st) != tbssVarOffset.end()) { + offset = static_cast(tbssVarOffset.at(st)); + offset += stImm.GetOffset(); + if (offset != 0) { + immOpnd = &CreateImmOperand(offset, k32BitSize, false); + SelectAdd(result, result, *immOpnd, PTY_u64); + } + } else { + CHECK_FATAL(false, "All uninitialized TLS should be in tbssVarOffset"); + } + } else { + MapleMap &tdataVarOffset = GetMirModule().GetTdataVarOffset(); + if (tdataVarOffset.find(st) != tdataVarOffset.end()) { + offset = static_cast(tdataVarOffset.at(st)); + offset += stImm.GetOffset(); + if (offset != 0) { + immOpnd = &CreateImmOperand(offset, k32BitSize, false); + SelectAdd(result, result, *immOpnd, PTY_u64); + } + } else { + CHECK_FATAL(false, "All initialized TLS should be in tdataVarOffset"); + } + } + SelectAdd(result, result, *specialFunc, PTY_u64); +} + void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsicCallNode) { MIRIntrinsicID intrinsic = intrinsicCallNode.GetIntrinsic(); @@ -10911,7 +10338,7 @@ void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsicCallNode) { SelectStackRestore(intrinsicCallNode); return; case INTRN_C___builtin_division_exception: - SelectCDIVException(intrinsicCallNode); + SelectCDIVException(); return; default: break; @@ -10920,7 +10347,7 @@ void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsicCallNode) { ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); for (size_t i = 0; i < intrinsicCallNode.NumOpnds(); i++) { BaseNode *argExpr = intrinsicCallNode.Opnd(i); - Operand *opnd = HandleExpr(intrinsicCallNode, *argExpr); + Operand *opnd = AArchHandleExpr(intrinsicCallNode, *argExpr); operands.emplace_back(opnd); if (!opnd->IsRegister()) { opnd = &LoadIntoRegister(*opnd, argExpr->GetPrimType()); @@ -11018,9 +10445,10 @@ void AArch64CGFunc::SelectIntrinsicCall(IntrinsiccallNode &intrinsicCallNode) { Operand *AArch64CGFunc::GetOpndFromIntrnNode(const IntrinsicopNode &intrnNode) { BaseNode *argexpr = intrnNode.Opnd(0); PrimType ptype = argexpr->GetPrimType(); - Operand *opnd = HandleExpr(intrnNode, *argexpr); + Operand *opnd = AArchHandleExpr(intrnNode, *argexpr); RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + ASSERT_NOT_NULL(opnd); if (opnd->IsMemoryAccessOperand()) { Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); GetCurBB()->AppendInsn(insn); @@ -11080,7 +10508,6 @@ Operand *AArch64CGFunc::SelectCctz(IntrinsicopNode &intrnNode) { * w0 -> ret */ Operand *AArch64CGFunc::SelectCpopcount(IntrinsicopNode &intrnNode) { - PrimType pType = intrnNode.Opnd(kInsnFirstOpnd)->GetPrimType(); bool is32Bits = (GetPrimTypeSize(pType) == k4ByteSize); Operand *opnd = GetOpndFromIntrnNode(intrnNode); @@ -11162,8 +10589,8 @@ Operand *AArch64CGFunc::SelectCclrsb(IntrinsicopNode &intrnNode) { Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) { BaseNode *argexpr0 = intrnNode.Opnd(0); PrimType ptype0 = argexpr0->GetPrimType(); - Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); - + Operand *opnd0 = AArchHandleExpr(intrnNode, *argexpr0); + ASSERT_NOT_NULL(opnd0); RegOperand &ldDest0 = CreateRegisterOperandOfType(ptype0); if (opnd0->IsMemoryAccessOperand()) { GetCurBB()->AppendInsn( @@ -11176,7 +10603,7 @@ Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) { BaseNode *argexpr1 = intrnNode.Opnd(1); PrimType ptype1 = argexpr1->GetPrimType(); - Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + Operand *opnd1 = AArchHandleExpr(intrnNode, *argexpr1); RegOperand &ldDest1 = CreateRegisterOperandOfType(ptype1); if (opnd1->IsMemoryAccessOperand()) { @@ -11238,13 +10665,13 @@ Operand *AArch64CGFunc::SelectAArch64CAtomicFetch(const IntrinsicopNode &intrino auto primType = intrinopNode.GetPrimType(); /* Create BB which includes atomic built_in function */ BB *atomicBB = CreateAtomicBuiltinBB(); - /* keep variables inside same BB */ + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { SetCurBB(*atomicBB); } /* handle built_in args */ - Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *addrOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { @@ -11308,14 +10735,16 @@ Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopN Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool) { PrimType primType = intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType(); ASSERT(primType == intrinopNode.GetNopndAt(kInsnThirdOpnd)->GetPrimType(), "gcc built_in rule"); + /* Create BB which includes atomic built_in function */ BB *atomicBB = CreateAtomicBuiltinBB(); + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { SetCurBB(*atomicBB); } /* handle built_in args */ - Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - Operand *oldVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); - Operand *newVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); + Operand *addrOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *oldVal = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *newVal = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { SetCurBB(*atomicBB); } @@ -11353,7 +10782,7 @@ Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, BB *nextBB = CreateNewBB(); nextBB->AddLabel(nextBBLableIdx); nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); - /* special handle for boolean return type */ + /* special handle for boolean return type */ if (intrinopNode.GetPrimType() == PTY_u1) { nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, *regLoaded, *regLoaded, CreateImmOperand(PTY_u32, 1))); @@ -11389,14 +10818,21 @@ Operand *AArch64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) { Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { auto primType = intrinopNode.GetPrimType(); - Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); - Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + /* Create BB which includes atomic built_in function */ + BB *atomicBB = CreateAtomicBuiltinBB(); + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /*handle builtin args */ + Operand *addrOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); - /* Create BB which includes atomic built_in function */ - BB *atomicBB = CreateAtomicBuiltinBB(); - SetCurBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } /* load from pointed address */ auto primTypeP2Size = GetPrimTypeP2Size(primType); auto *regLoaded = &CreateRegisterOperandOfType(primType); @@ -11420,7 +10856,7 @@ Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, Pr } void AArch64CGFunc::SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType) { - auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); + auto *addrOpnd = AArchHandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); auto primTypeBitSize = GetPrimTypeBitSize(primType); auto mOp = PickStInsn(primTypeBitSize, primType, AArch64isa::kMoRelease); auto &zero = GetZeroOpnd(primTypeBitSize); @@ -11461,7 +10897,7 @@ AArch64isa::MemoryOrdering AArch64CGFunc::PickMemOrder(std::memory_order memOrde * a load-acquire would replace ldr if memorder is not 0 */ Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { - auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *addrOpnd = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); auto *memOrderOpnd = intrinsicopNode.Opnd(1); auto primType = intrinsicopNode.GetPrimType(); std::memory_order memOrder = std::memory_order_seq_cst; @@ -11486,8 +10922,8 @@ Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { void AArch64CGFunc::SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall) { auto primType = GlobalTables::GetTypeTable(). GetTypeFromTyIdx(intrinsiccall.GetTyIdx())->GetPrimType(); - auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnFirstOpnd)); - auto *retOpnd = HandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnSecondOpnd)); + auto *addrOpnd = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnFirstOpnd)); + auto *retOpnd = AArchHandleExpr(intrinsiccall, *intrinsiccall.Opnd(kInsnSecondOpnd)); auto *memOrderOpnd = intrinsiccall.Opnd(kInsnThirdOpnd); std::memory_order memOrder = std::memory_order_seq_cst; if (memOrderOpnd->IsConstval()) { @@ -11524,15 +10960,15 @@ Operand *AArch64CGFunc::SelectCAtomicExchangeN(const IntrinsicopNode &intrinsico } bool aquire = memOrder == std::memory_order_acquire || memOrder >= std::memory_order_acq_rel; bool release = memOrder >= std::memory_order_release; - + /* Create BB which includes atomic built_in function */ BB *atomicBB = CreateAtomicBuiltinBB(); - /* keep variables inside same BB */ + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { SetCurBB(*atomicBB); } /* handle args */ - auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); - auto *valueOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnSecondOpnd)); + auto *addrOpnd = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); + auto *valueOpnd = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnSecondOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinsicopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); valueOpnd = &LoadIntoRegister(*valueOpnd, intrinsicopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { @@ -11590,20 +11026,19 @@ void AArch64CGFunc::SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccall } bool aquire = memOrder == std::memory_order_acquire || memOrder >= std::memory_order_acq_rel; bool release = memOrder >= std::memory_order_release; - - /* load value from ptr */ - auto *valueOpnd = &CreateRegisterOperandOfType(primType); - auto *srcOpnd = HandleExpr(intrinsiccallNode, *srcNode); - auto &srcMemOpnd = CreateMemOpnd(LoadIntoRegister(*srcOpnd, primType), 0, GetPrimTypeBitSize(primType)); - SelectCopy(*valueOpnd, primType, srcMemOpnd, primType); - + /* Create BB which includes atomic built_in function */ BB *atomicBB = CreateAtomicBuiltinBB(); - /* keep variables inside same BB */ + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { SetCurBB(*atomicBB); } + /* load value from ptr */ + auto *valueOpnd = &CreateRegisterOperandOfType(primType); + auto *srcOpnd = AArchHandleExpr(intrinsiccallNode, *srcNode); + auto &srcMemOpnd = CreateMemOpnd(LoadIntoRegister(*srcOpnd, primType), 0, GetPrimTypeBitSize(primType)); + SelectCopy(*valueOpnd, primType, srcMemOpnd, primType); /* handle opnds */ - auto *addrOpnd = HandleExpr(intrinsiccallNode, *ptrNode); + auto *addrOpnd = AArchHandleExpr(intrinsiccallNode, *ptrNode); addrOpnd = &LoadIntoRegister(*addrOpnd, ptrNode->GetPrimType()); if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { SetCurBB(*atomicBB); @@ -11624,28 +11059,31 @@ void AArch64CGFunc::SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccall BB *nextBB = CreateNewBB(); GetCurBB()->AppendBB(*nextBB); SetCurBB(*nextBB); - auto *retOpnd = HandleExpr(intrinsiccallNode, *retNode); + auto *retOpnd = AArchHandleExpr(intrinsiccallNode, *retNode); auto &resultMemOpnd = CreateMemOpnd(LoadIntoRegister(*retOpnd, primType), 0, GetPrimTypeBitSize(primType)); SelectCopy(resultMemOpnd, primType, *regLoaded, primType); } /* * regassign %1 (intrinsicop C___Atomic_compare_exchange(ptr, expected, desired, weak, sucMemOrder, failMemOrder)) + * (O0)label .L_x: * let %1 -> x0 * let ptr -> x1 * let expected -> x2 * let desired -> x3 * implement to asm: - * label .L_x: + * (O2)label .L_x: * ldxr/ldaxr x4, [x1] * cmp x4, x2 * bne .L_y + * bb: * stxr/stlxr w5, x3, [x1] * cmp w5, 0 / cbnz w5, .L_x * label .L_y: * cset x0, eq * cmp x0, 0 * bne .L_z + * bb: * str x1, [x2] * a load-acquire would replace ldaxr if acquire needed * a store-relase would replace stlxr if release needed @@ -11653,20 +11091,26 @@ void AArch64CGFunc::SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccall */ Operand *AArch64CGFunc::SelectCAtomicCompareExchange(const IntrinsicopNode &intrinsicopNode, bool isCompareExchangeN) { auto primType = intrinsicopNode.GetPrimType(); + /* Create BB which includes atomic built_in function */ + BB *atomicBB1 = CreateAtomicBuiltinBB(); + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB1); + } /* handle args */ /* the first param */ - Operand *addrOpnd1 = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *addrOpnd1 = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); addrOpnd1 = &LoadIntoRegister(*addrOpnd1, intrinsicopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); auto &memOpnd1 = CreateMemOpnd(*static_cast(addrOpnd1), 0, GetPrimTypeBitSize(primType)); /* the second param */ - Operand *addrOpnd2 = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *addrOpnd2 = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnSecondOpnd)); addrOpnd2 = &LoadIntoRegister(*addrOpnd2, intrinsicopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); auto &memOpnd2 = CreateMemOpnd(*static_cast(addrOpnd2), 0, GetPrimTypeBitSize(primType)); auto *regOpnd2 = &CreateRegisterOperandOfType(primType); SelectCopy(*regOpnd2, primType, memOpnd2, primType); /* the third param */ - Operand *opnd3 = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnThirdOpnd)); + Operand *opnd3 = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnThirdOpnd)); /* opnd3 can be an address operand or a value operand */ opnd3 = &LoadIntoRegister(*opnd3, intrinsicopNode.GetNopndAt(kInsnThirdOpnd)->GetPrimType()); if (!isCompareExchangeN) { @@ -11699,50 +11143,61 @@ Operand *AArch64CGFunc::SelectCAtomicCompareExchange(const IntrinsicopNode &intr (sucMemOrder == std::memory_order_release && faiMemOrder == std::memory_order_relaxed)); bool release = sucMemOrder >= std::memory_order_release; - BB *atomicBB = CreateAtomicBuiltinBB(false); - SetCurBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB1); + } /* load from pointed address */ auto primTypeP2Size = GetPrimTypeP2Size(primType); auto *regLoaded = &CreateRegisterOperandOfType(primType); auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, acquire); - atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd1)); + atomicBB1->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd1)); /* compare the first param's value with the second */ SelectAArch64Cmp(*regLoaded, *regOpnd2, true, GetPrimTypeBitSize(primType)); /* bne */ - BB *nextAtomicBB = CreateAtomicBuiltinBB(false); - LabelOperand &targetOpnd = GetOrCreateLabelOperand(*nextAtomicBB); - atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, GetOrCreateRflag(), targetOpnd)); - + BB *stxrBB = CreateNewBB(); + atomicBB1->AppendBB(*stxrBB); + SetCurBB(*stxrBB); + BB *atomicBB2 = CreateAtomicBuiltinBB(); + LabelOperand &atomicBB2Opnd = GetOrCreateLabelOperand(*atomicBB2); + atomicBB1->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, GetOrCreateRflag(), atomicBB2Opnd)); /* store to pointed address */ auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, release); - atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *opnd3, memOpnd1)); + stxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *opnd3, memOpnd1)); if (weak) { /* cmp */ SelectAArch64Cmp(*accessStatus, GetZeroOpnd(primType), true, GetPrimTypeBitSize(primType)); } else { /* cbnz */ - auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); - atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + auto &atomicBB1Opnd = GetOrCreateLabelOperand(*atomicBB1); + stxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBB1Opnd)); + stxrBB->SetKind(BB::kBBIf); } - SetCurBB(*nextAtomicBB); + + SetCurBB(*atomicBB2); /* cset */ auto *returnOpnd = &CreateRegisterOperandOfType(primType); SelectAArch64CSet(*returnOpnd, GetCondOperand(CC_EQ), false); /* cmp */ SelectAArch64Cmp(*returnOpnd, GetZeroOpnd(primType), true, GetPrimTypeBitSize(primType)); /* bne */ - BB *nextBB = CreateNewBB(); - auto &nextBBOpnd = GetOrCreateLabelOperand(*nextBB); - nextAtomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, GetOrCreateRflag(), nextBBOpnd)); + LabelIdx lastBBLableIdx = CreateLabel(); + auto &lastBBOpnd = GetOrCreateLabelOperand(lastBBLableIdx); + atomicBB2->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, GetOrCreateRflag(), lastBBOpnd)); + + BB *strBB = CreateNewBB(); + atomicBB2->AppendBB(*strBB); /* store the first param's value into the third */ auto mOpStr = PickStInsn(GetPrimTypeBitSize(primType), primType); - nextAtomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStr, *regLoaded, memOpnd2)); + strBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStr, *regLoaded, memOpnd2)); - nextAtomicBB->AppendBB(*nextBB); - SetCurBB(*nextBB); + BB *lastBB = CreateNewBB(); + lastBB->AddLabel(lastBBLableIdx); + SetLab2BBMap(static_cast(lastBBLableIdx), *lastBB); + strBB->AppendBB(*lastBB); + SetCurBB(*lastBB); return returnOpnd; } @@ -11761,10 +11216,14 @@ Operand *AArch64CGFunc::SelectCAtomicCompareExchange(const IntrinsicopNode &intr */ Operand *AArch64CGFunc::SelectCAtomicTestAndSet(const IntrinsicopNode &intrinsicopNode) { auto primType = intrinsicopNode.GetPrimType(); - + /* Create BB which includes atomic built_in function */ BB *atomicBB = CreateAtomicBuiltinBB(); + /* keep variables inside same BB in O0 to avoid obtaining parameter value across BB blocks */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } /* handle built_in args */ - Operand *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *addrOpnd = AArchHandleExpr(intrinsicopNode, *intrinsicopNode.GetNopndAt(kInsnFirstOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinsicopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); auto *memOrderOpnd = intrinsicopNode.GetNopndAt(kInsnSecondOpnd); @@ -11778,7 +11237,9 @@ Operand *AArch64CGFunc::SelectCAtomicTestAndSet(const IntrinsicopNode &intrinsic /* mov reg, 1 */ auto *regOperated = &CreateRegisterOperandOfType(PTY_u32); SelectCopyImm(*regOperated, CreateImmOperand(PTY_u32, 1), PTY_u32); - SetCurBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } /* load from pointed address */ auto primTypeP2Size = GetPrimTypeP2Size(PTY_u8); auto *regLoaded = &CreateRegisterOperandOfType(PTY_u32); @@ -11811,7 +11272,7 @@ void AArch64CGFunc::SelectCAtomicClear(const IntrinsiccallNode &intrinsiccallNod auto primType = intrinsiccallNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType(); /* handle built_in args */ - Operand *addrOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.GetNopndAt(kInsnFirstOpnd)); + Operand *addrOpnd = AArchHandleExpr(intrinsiccallNode, *intrinsiccallNode.GetNopndAt(kInsnFirstOpnd)); addrOpnd = &LoadIntoRegister(*addrOpnd, intrinsiccallNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); auto *memOrderOpnd = intrinsiccallNode.GetNopndAt(kInsnSecondOpnd); @@ -11838,7 +11299,7 @@ Operand *AArch64CGFunc::SelectAtomicLoad(Operand &addrOpnd, PrimType primType, A Operand *AArch64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_extract_return_addr) { ASSERT(intrinopNode.GetNumOpnds() == 1, "expect one parameter"); - Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *addrOpnd = AArchHandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); return &LoadIntoRegister(*addrOpnd, PTY_a64); } else if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_return_address) { BaseNode *argexpr0 = intrinopNode.Opnd(0); @@ -11875,13 +11336,13 @@ Operand *AArch64CGFunc::SelectAArch64align(const IntrinsicopNode &intrnNode, boo /* Handle Two args */ BaseNode *argexpr0 = intrnNode.Opnd(0); PrimType ptype0 = argexpr0->GetPrimType(); - Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + Operand *opnd0 = AArchHandleExpr(intrnNode, *argexpr0); PrimType resultPtype = intrnNode.GetPrimType(); RegOperand &ldDest0 = LoadIntoRegister(*opnd0, ptype0); BaseNode *argexpr1 = intrnNode.Opnd(1); PrimType ptype1 = argexpr1->GetPrimType(); - Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + Operand *opnd1 = AArchHandleExpr(intrnNode, *argexpr1); RegOperand &arg1 = LoadIntoRegister(*opnd1, ptype1); ASSERT(IsPrimitiveInteger(ptype0) && IsPrimitiveInteger(ptype1), "align integer type only"); Operand *ldDest1 = &static_cast(CreateRegisterOperandOfType(ptype0)); @@ -11922,13 +11383,13 @@ void AArch64CGFunc::SelectStackSave() { void AArch64CGFunc::SelectStackRestore(const IntrinsiccallNode &intrnNode) { BaseNode *argexpr0 = intrnNode.Opnd(0); - Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + Operand *opnd0 = AArchHandleExpr(intrnNode, *argexpr0); Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); Insn &restoreInsn = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spOpnd, *opnd0); GetCurBB()->AppendInsn(restoreInsn); } -void AArch64CGFunc::SelectCDIVException(const IntrinsiccallNode &intrnNode) { +void AArch64CGFunc::SelectCDIVException() { uint32 breakImm = 1000; ImmOperand &immOpnd = CreateImmOperand(breakImm, maplebe::k16BitSize, false); GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_brk, immOpnd)); @@ -12976,7 +12437,7 @@ static int16 ResolveLaneNumber(const IntrinsicopNode &expr, const IntrinsicOpndD return opndDesc.laneNumber; } if (opndDesc.opndId != -1) { - auto *laneExpr = expr.Opnd(opndDesc.opndId); + auto *laneExpr = expr.Opnd(static_cast(opndDesc.opndId)); CHECK_FATAL(laneExpr->IsConstval(), "unexpected opnd type"); auto *mirConst = static_cast(laneExpr)->GetConstVal(); return static_cast(safe_cast(mirConst)->GetExtValue()); @@ -12987,7 +12448,8 @@ static int16 ResolveLaneNumber(const IntrinsicopNode &expr, const IntrinsicOpndD RegOperand *AArch64CGFunc::SelectVectorIntrinsics(const IntrinsicopNode &intrinsicOp) { auto intrinsicId = intrinsicOp.GetIntrinsic(); CHECK_FATAL(intrinsicId >= maple::INTRN_vector_get_lane_v8u8, "unexpected intrinsic"); - size_t vectorIntrinsicIndex = intrinsicId - maple::INTRN_vector_get_lane_v8u8; + size_t vectorIntrinsicIndex = static_cast(intrinsicId) - + static_cast(maple::INTRN_vector_get_lane_v8u8); auto &aarch64IntrinsicDesc = vectorIntrinsicMap[vectorIntrinsicIndex]; ASSERT(vectorIntrinsicIndex == aarch64IntrinsicDesc.id, "intrinsic map error!"); auto resultType = intrinsicOp.GetPrimType(); @@ -12996,8 +12458,8 @@ RegOperand *AArch64CGFunc::SelectVectorIntrinsics(const IntrinsicopNode &intrins auto returnOpndIndex = aarch64IntrinsicDesc.returnOpndIndex; auto *result = &CreateRegisterOperandOfType(resultType); if (returnOpndIndex != -1) { - auto *srcExpr = intrinsicOp.Opnd(returnOpndIndex); - auto *srcOpnd = HandleExpr(intrinsicOp, *srcExpr); + auto *srcExpr = intrinsicOp.Opnd(static_cast(returnOpndIndex)); + auto *srcOpnd = AArchHandleExpr(intrinsicOp, *srcExpr); auto srcType = srcExpr->GetPrimType(); SelectCopy(*result, resultType, *srcOpnd, srcType); } @@ -13016,8 +12478,8 @@ RegOperand *AArch64CGFunc::SelectVectorIntrinsics(const IntrinsicopNode &intrins for (size_t i = 0; i < aarch64IntrinsicDesc.opndOrder.size(); ++i) { auto opndId = aarch64IntrinsicDesc.opndOrder[i]; - auto *opndExpr = intrinsicOp.Opnd(opndId); - auto *opnd = HandleExpr(intrinsicOp, *opndExpr); + auto *opndExpr = intrinsicOp.Opnd(static_cast(opndId)); + auto *opnd = AArchHandleExpr(intrinsicOp, *opndExpr); auto &intrinsicDesc = IntrinDesc::intrinTable[intrinsicId]; if (intrinsicDesc.argTypes[opndId + 1] == kArgTyPtr) { ASSERT(opnd->IsRegister(), "NIY, must be register"); @@ -13083,13 +12545,13 @@ void AArch64CGFunc::HandleFuncCfg(CGCFG *cfg) { cfg->WontExitAnalysis(); } CG *cg = GetCG(); - if (cg->GetCGOptions().IsLazyBinding() && cg->IsLibcore()) { + if (maplebe::CGOptions::IsLazyBinding() && cg->IsLibcore()) { ProcessLazyBinding(); } if (cg->DoPatchLongBranch()) { PatchLongBranch(); } - if (cg->GetCGOptions().DoEnableHotColdSplit()) { + if (maplebe::CGOptions::DoEnableHotColdSplit()) { cfg->CheckCFGFreq(); } NeedStackProtect(); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp index 0445283baabefbbe5ca122bf65f9add40c8a1fc1..b37d7c9ee097e55d1581ef604cc18522c6a22441 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,11 +15,17 @@ #include "aarch64_cg.h" #include "aarch64_operand.h" #include "pressure.h" +#include "cg_irbuilder.h" #include "aarch64_data_dep_base.h" -#include "cg_irbuilder.h" + /* For building dependence graph, The entry is AArch64DataDepBase::Run. */ namespace maplebe { +void AArch64DataDepBase::InitCDGNodeDataInfo(MemPool &mp, MapleAllocator &alloc, CDGNode &cdgNode) { + uint32 maxRegNum = (cgFunc.IsAfterRegAlloc() ? AArch64reg::kAllRegNum : cgFunc.GetMaxVReg()); + cdgNode.InitDataDepInfo(mp, alloc, maxRegNum); +} + void AArch64DataDepBase::ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, bool isFromClinit) const { if (isFromClinit) { @@ -183,12 +189,12 @@ void AArch64DataDepBase::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { /* Build data dependence of memory bars instructions */ void AArch64DataDepBase::BuildDepsMemBar(Insn &insn) { - if (IsIntraBlockAnalysis()) { + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetStackUseInsns(), insn, kDependenceTypeMembar); AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetHeapUseInsns(), insn, kDependenceTypeMembar); AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetStackDefInsns(), insn, kDependenceTypeMembar); AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeMembar); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kStackUses); BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kHeapUses); BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kStackDefs); @@ -202,18 +208,18 @@ void AArch64DataDepBase::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); - if (IsIntraBlockAnalysis()) { + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { /* Stack memory address */ - MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + MapleVector &stackDefs = curCDGNode->GetStackDefInsns(); for (auto defInsn : stackDefs) { if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); } } /* Heap memory address */ - MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + MapleVector &heapDefs = curCDGNode->GetHeapDefInsns(); AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockMemDefUseDependency(*insn.GetDepNode(), aarchMemOpnd, nextMemOpnd, false); } if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { @@ -224,7 +230,7 @@ void AArch64DataDepBase::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { Insn *membarInsn = curCDGNode->GetMembarInsn(); if (membarInsn != nullptr) { AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeMembar, kMembar); } @@ -237,16 +243,16 @@ void AArch64DataDepBase::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); - if (IsIntraBlockAnalysis()) { + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { /* Build anti dependence */ - MapleVector stackUses = curCDGNode->GetStackUseInsns(); + MapleVector &stackUses = curCDGNode->GetStackUseInsns(); for (auto *stackUse : stackUses) { if (NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *stackUse)) { AddDependence(*stackUse->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); } } /* Build output dependence */ - MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + MapleVector &stackDefs = curCDGNode->GetStackDefInsns(); for (auto stackDef : stackDefs) { if (stackDef->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *stackDef)) { AddDependence(*stackDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); @@ -255,16 +261,16 @@ void AArch64DataDepBase::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { /* Heap memory * Build anti dependence */ - MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + MapleVector &heapUses = curCDGNode->GetHeapUseInsns(); AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); /* Build output dependence */ - MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + MapleVector &heapDefs = curCDGNode->GetHeapDefInsns(); AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); /* Memory definition can not across may-throw insns */ - MapleVector mayThrows = curCDGNode->GetMayThrowInsns(); + MapleVector &mayThrows = curCDGNode->GetMayThrowInsns(); AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockMemDefUseDependency(*insn.GetDepNode(), aarchMemOpnd, nextMemOpnd, true); BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kMayThrows); } @@ -274,7 +280,7 @@ void AArch64DataDepBase::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { if (lastCallInsn != nullptr) { /* Build a dependence between stack passed arguments and call */ AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeControl, kLastCall); } } @@ -282,7 +288,7 @@ void AArch64DataDepBase::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { Insn *membarInsn = curCDGNode->GetMembarInsn(); if (membarInsn != nullptr) { AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeMembar, kMembar); } @@ -361,14 +367,14 @@ bool AArch64DataDepBase::NeedBuildDepsMem(const MemOperand &memOpnd, */ void AArch64DataDepBase::BuildCallerSavedDeps(Insn &insn) { /* Build anti dependence and output dependence. */ - for (uint32 i = R0; i <= R7; ++i) { + for (uint32 i = R0; i <= R9; ++i) { BuildDepsDefReg(insn, i); } for (uint32 i = V0; i <= V7; ++i) { BuildDepsDefReg(insn, i); } if (!beforeRA) { - for (uint32 i = R8; i <= R18; ++i) { + for (uint32 i = R9; i <= R18; ++i) { BuildDepsDefReg(insn, i); } for (uint32 i = RLR; i <= RSP; ++i) { @@ -389,7 +395,7 @@ void AArch64DataDepBase::BuildCallerSavedDeps(Insn &insn) { * insn : a call instruction (call/tail-call) */ void AArch64DataDepBase::BuildStackPassArgsDeps(Insn &insn) { - MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + MapleVector &stackDefs = curCDGNode->GetStackDefInsns(); for (auto stackDefInsn : stackDefs) { if (stackDefInsn->IsCall()) { continue; @@ -408,10 +414,10 @@ void AArch64DataDepBase::BuildStackPassArgsDeps(Insn &insn) { /* Some insns may dirty all stack memory, such as "bl MCC_InitializeLocalStackRef" */ void AArch64DataDepBase::BuildDepsDirtyStack(Insn &insn) { /* Build anti dependence */ - MapleVector stackUses = curCDGNode->GetStackUseInsns(); + MapleVector &stackUses = curCDGNode->GetStackUseInsns(); AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); /* Build output dependence */ - MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + MapleVector &stackDefs = curCDGNode->GetStackDefInsns(); AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); curCDGNode->AddStackDefInsn(&insn); } @@ -419,17 +425,17 @@ void AArch64DataDepBase::BuildDepsDirtyStack(Insn &insn) { /* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast" */ void AArch64DataDepBase::BuildDepsUseStack(Insn &insn) { /* Build true dependence */ - MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + MapleVector &stackDefs = curCDGNode->GetStackDefInsns(); AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeTrue); } /* Some insns may dirty all heap memory, such as a call insn */ void AArch64DataDepBase::BuildDepsDirtyHeap(Insn &insn) { /* Build anti dependence */ - MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + MapleVector &heapUses = curCDGNode->GetHeapUseInsns(); AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); /* Build output dependence */ - MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + MapleVector &heapDefs = curCDGNode->GetHeapDefInsns(); AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); Insn *membarInsn = curCDGNode->GetMembarInsn(); if (membarInsn != nullptr) { @@ -614,7 +620,37 @@ void AArch64DataDepBase::BuildSpecialInsnDependency(Insn &insn, const MapleVecto } } -void AArch64DataDepBase::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) { +void AArch64DataDepBase::BuildAsmInsnDependency(Insn &insn) { + if (!insn.IsAsmInsn()) { + return; + } + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsList(), "invalid opnd of asm insn"); + ASSERT(insn.GetOperand(kInsnThirdOpnd).IsList(), "invalid opnd of asm insn"); + ASSERT(insn.GetOperand(kInsnFourthOpnd).IsList(), "invalid opnd of asm insn"); + auto &outputList = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &clobberList = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &inputList = static_cast(insn.GetOperand(kInsnFourthOpnd)); + for (auto *defOpnd : outputList.GetOperands()) { + if (defOpnd == nullptr) { + continue; + } + BuildDepsDefReg(insn, defOpnd->GetRegisterNumber()); + } + for (auto *defOpnd : clobberList.GetOperands()) { + if (defOpnd == nullptr) { + continue; + } + BuildDepsDefReg(insn, defOpnd->GetRegisterNumber()); + } + for (auto *useOpnd : inputList.GetOperands()) { + if (useOpnd == nullptr) { + continue; + } + BuildDepsUseReg(insn, useOpnd->GetRegisterNumber()); + } +} + +void AArch64DataDepBase::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, DepNode &sepNode) { /* Update reg use */ const auto &useRegnos = depNode.GetUseRegnos(); if (beforeRA) { @@ -627,10 +663,9 @@ void AArch64DataDepBase::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, CHECK_FATAL(curCDGNode->GetUseInsnChain(regNO)->insn != nullptr, "get useInsn failed"); depNode.SetRegUses(*curCDGNode->GetUseInsnChain(regNO)); if (curCDGNode->GetLatestDefInsn(regNO) == nullptr) { - curCDGNode->SetLatestDefInsn(regNO, nodes[separatorIndex]->GetInsn()); - nodes[separatorIndex]->AddDefReg(regNO); - nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), - curCDGNode->GetUseInsnChain(regNO)); + curCDGNode->SetLatestDefInsn(regNO, sepNode.GetInsn()); + sepNode.AddDefReg(regNO); + sepNode.SetRegDefs(sepNode.GetDefRegnos().size(),curCDGNode->GetUseInsnChain(regNO)); } } } @@ -660,6 +695,7 @@ void AArch64DataDepBase::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, /* Build a pseudo node to separate data dependence graph */ DepNode *AArch64DataDepBase::BuildSeparatorNode() { Insn &pseudoSepInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_dependence_seperator); + pseudoSepInsn.SetId(separatorIndex); auto *separatorNode = memPool.New(pseudoSepInsn, alloc); separatorNode->SetType(kNodeTypeSeparator); pseudoSepInsn.SetDepNode(*separatorNode); @@ -673,11 +709,10 @@ DepNode *AArch64DataDepBase::BuildSeparatorNode() { void AArch64DataDepBase::BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, MemOperand *nextMemOpnd, bool isMemDef) { - CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + CHECK_FATAL(!isIntra, "must be inter block data dependence analysis"); + CHECK_FATAL(curRegion->GetRegionRoot() != curCDGNode, "for the root node, cross-BB search is not required"); BB *curBB = curCDGNode->GetBB(); CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); - CDGRegion *curRegion = curCDGNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); std::vector visited(curRegion->GetMaxBBIdInRegion(), false); if (isMemDef) { BuildPredPathMemDefDependencyDFS(*curBB, visited, depNode, memOpnd, nextMemOpnd); @@ -693,20 +728,24 @@ void AArch64DataDepBase::BuildPredPathMemDefDependencyDFS(BB &curBB, std::vector } CDGNode *cdgNode = curBB.GetCDGNode(); CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); - CDGRegion *curRegion = cdgNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); - if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + CDGRegion *region = cdgNode->GetRegion(); + CHECK_FATAL(region != nullptr, "get region from cdgNode failed"); + if (region->GetRegionId() != curRegion->GetRegionId()) { + return; + } + // Ignore back-edge + if (cdgNode == curRegion->GetRegionRoot()) { return; } visited[curBB.GetId()] = true; - MapleVector stackUses = cdgNode->GetStackUseInsns(); + MapleVector &stackUses = cdgNode->GetStackUseInsns(); for (auto *stackUse : stackUses) { if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackUse)) { AddDependence(*stackUse->GetDepNode(), depNode, kDependenceTypeAnti); } } /* Build output dependence */ - MapleVector stackDefs = cdgNode->GetStackDefInsns(); + MapleVector &stackDefs = cdgNode->GetStackDefInsns(); for (auto stackDef : stackDefs) { if (stackDef->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackDef)) { AddDependence(*stackDef->GetDepNode(), depNode, kDependenceTypeOutput); @@ -715,13 +754,16 @@ void AArch64DataDepBase::BuildPredPathMemDefDependencyDFS(BB &curBB, std::vector /* Heap memory * Build anti dependence */ - MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + MapleVector &heapUses = curCDGNode->GetHeapUseInsns(); AddDependence4InsnInVectorByType(heapUses, *depNode.GetInsn(), kDependenceTypeAnti); /* Build output dependence */ - MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + MapleVector &heapDefs = curCDGNode->GetHeapDefInsns(); AddDependence4InsnInVectorByType(heapDefs, *depNode.GetInsn(), kDependenceTypeOutput); for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { - BuildPredPathMemDefDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + // Ignore back-edge of self-loop + if (*predIt != &curBB) { + BuildPredPathMemDefDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + } } } @@ -732,24 +774,39 @@ void AArch64DataDepBase::BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector } CDGNode *cdgNode = curBB.GetCDGNode(); CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); - CDGRegion *curRegion = cdgNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); - if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + CDGRegion *region = cdgNode->GetRegion(); + CHECK_FATAL(region != nullptr, "get region from cdgNode failed"); + if (region->GetRegionId() != curRegion->GetRegionId()) { return; } visited[curBB.GetId()] = true; /* Stack memory address */ - MapleVector stackDefs = cdgNode->GetStackDefInsns(); + MapleVector &stackDefs = cdgNode->GetStackDefInsns(); for (auto stackDef : stackDefs) { if (stackDef->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackDef)) { AddDependence(*stackDef->GetDepNode(), depNode, kDependenceTypeTrue); } } /* Heap memory address */ - MapleVector heapDefs = cdgNode->GetHeapDefInsns(); + MapleVector &heapDefs = cdgNode->GetHeapDefInsns(); AddDependence4InsnInVectorByType(heapDefs, *depNode.GetInsn(), kDependenceTypeTrue); + // Ignore back-edge + if (cdgNode == curRegion->GetRegionRoot()) { + return; + } for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { - BuildPredPathMemUseDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + // Ignore back-edge of self-loop + if (*predIt != &curBB) { + BuildPredPathMemUseDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + } } } + +void AArch64DataDepBase::DumpNodeStyleInDot(std::ofstream &file, DepNode &depNode) { + MOperator mOp = depNode.GetInsn()->GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + file << " insn_" << depNode.GetInsn() << "["; + file << "label = \"" << depNode.GetInsn()->GetId() << ":\n"; + file << "{ " << md->name << "}\"];\n"; +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp index 377f3bb386cadfaa0d7a23a366d97973125cdf43..1dd0c7fbfd9fb71032ed9cc66e926f5138b6ccb5 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -305,26 +305,16 @@ void AArch64Ebo::DefineCallerSaveRegisters(InsnInfo &insnInfo) { } ASSERT(insn->IsCall() || insn->IsTailCall(), "insn should be a call insn."); if (CGOptions::DoIPARA()) { - auto *targetOpnd = insn->GetCallTargetOperand(); - CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); - if (targetOpnd->IsFuncNameOpnd()) { - FuncNameOperand *target = static_cast(targetOpnd); - const MIRSymbol *funcSt = target->GetFunctionSymbol(); - ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); - MIRFunction *func = funcSt->GetFunction(); - if (func != nullptr && func->IsReferedRegsValid()) { - for (auto preg : func->GetReferedRegs()) { - if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { - continue; - } - RegOperand *opnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), k64BitSize, - AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); - OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); - opndInfo->insnInfo = &insnInfo; - } - return; - } + std::set callerSaveRegs; + a64CGFunc->GetRealCallerSaveRegs(*insn, callerSaveRegs); + for (auto preg : callerSaveRegs) { + auto &opnd = a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), + k64BitSize, + (AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt)); + auto *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, opnd); + opndInfo->insnInfo = &insnInfo; } + return; } for (auto opnd : callerSaveRegTable) { OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); @@ -766,7 +756,9 @@ bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOper ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); if (!immOperand->IsSingleInstructionMovable()) { ASSERT(res->IsRegister(), " expect a register operand"); - static_cast(cgFunc)->SplitMovImmOpndInstruction(val, *(static_cast(res)), &insn); + Insn &movInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xmovri64, *res, *immOperand); + bb.InsertInsnBefore(insn, movInsn); + SPLIT_INSN(&movInsn, cgFunc); bb.RemoveInsn(insn); } else { MOperator newmOp = opndSize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32; @@ -885,7 +877,7 @@ bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVectorIsOperandImmValid(newPreMop, newMemOp, prevInsn->GetMemOpndIdx())) { return false; } prevInsn->SetMemOpnd(newMemOp); @@ -986,7 +978,7 @@ bool AArch64Ebo::CombineMultiplySub(Insn &insn, OpndInfo *opndInfo, bool is64bit return false; } -bool CheckInsnRefField(const Insn &insn, size_t opndIndex) { +bool CheckInsnRefField(const Insn &insn, uint32 opndIndex) { if (insn.IsAccessRefField() && insn.AccessMem()) { Operand &opnd0 = insn.GetOperand(opndIndex); if (opnd0.IsRegister()) { @@ -1219,7 +1211,7 @@ bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origI auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); uint32 xLslrriBitLen = 6; uint32 wLslrriBitLen = 5; - Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand(BitShiftOperand::kLSL, + Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand(BitShiftOperand::kShiftLSL, static_cast(immOpnd.GetValue()), (opCode == MOP_xlslrri6) ? xLslrriBitLen : wLslrriBitLen); MOperator mOp = (is64bits ? MOP_xaddrrrs : MOP_waddrrrs); insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, op0, opnd1, shiftOpnd)); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp index b5507e8eacb3004041b54d0322db95a87f55825a..248347293d63a39a22ebba877036a05f611d7051 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp @@ -423,27 +423,21 @@ static void InsertNopAfterLastCall(AArch64CGFunc &cgFunc) { } } -void AArch64AsmEmitter::EmitCallWithLocalAlias(Emitter &emitter, FuncNameOperand &func, const std::string &mdName) const { - const MIRSymbol *funcSymbol = func.GetFunctionSymbol(); - if (!funcSymbol->IsStatic() && funcSymbol->GetFunction()->HasBody()) { - std::string funcName = func.GetName(); - std::string funcAliasName = funcName + ".localalias"; - /* emit set alias instruction */ - (void)emitter.Emit("\t.set\t"); - (void)emitter.Emit(funcAliasName).Emit(", "); - (void)emitter.Emit(funcName).Emit("\n"); - - /* emit call instruction */ - (void)emitter.Emit("\t").Emit(mdName).Emit("\t"); - (void)emitter.Emit(funcAliasName).Emit("\n"); - } +void AArch64AsmEmitter::EmitCallWithLocalAlias(Emitter &emitter, const std::string &funcName, + const std::string &mdName) const { + std::string funcAliasName = funcName + ".localalias"; + // emit call instruction + (void)emitter.Emit("\t").Emit(mdName).Emit("\t"); + (void)emitter.Emit(funcAliasName).Emit("\n"); } void HandleSpecificSec(Emitter &emitter, CGFunc &cgFunc) { const std::string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); emitter.Emit("\t.section\t" + sectionName); if (cgFunc.GetPriority() != 0) { - emitter.Emit(".").Emit(cgFunc.GetPriority()); + if (!opts::linkerTimeOpt.IsEnabledByUser()) { + emitter.Emit(".").Emit(cgFunc.GetPriority()); + } } bool isInInitArray = sectionName == ".init_array"; bool isInFiniArray = sectionName == ".fini_array"; @@ -462,6 +456,13 @@ void HandleSpecificSec(Emitter &emitter, CGFunc &cgFunc) { } } +static void EmitLocalAliasOfFuncName(Emitter &emitter, const std::string &funcName) { + auto funcAliasName = funcName + ".localalias"; + (void)emitter.Emit("\t.set\t"); + (void)emitter.Emit(funcAliasName).Emit(", "); + (void)emitter.Emit(funcName).Emit("\n"); +} + void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); @@ -487,7 +488,11 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { (void)emitter.Emit("\t.section\t.text.startup").Emit(",\"ax\",@progbits\n"); } else { if (cgFunc.GetPriority() != 0) { - (void)emitter.Emit("\t.section\tperf_hot.").Emit(cgFunc.GetPriority()).Emit(",\"ax\",@progbits\n"); + if (opts::linkerTimeOpt.IsEnabledByUser()) { + (void)emitter.Emit("\t.section\tperf_hot").Emit(",\"ax\",@progbits\n"); + } else { + (void)emitter.Emit("\t.section\tperf_hot.").Emit(cgFunc.GetPriority()).Emit(",\"ax\",@progbits\n"); + } } else { (void)emitter.Emit("\t.text\n"); } @@ -540,10 +545,10 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { /* if no visibility set individually, set it to be same as the -fvisibility value */ if (!func->IsStatic() && func->IsDefaultVisibility()) { switch (CGOptions::GetVisibilityType()) { - case CGOptions::kHidden: + case CGOptions::kHiddenVisibility: func->SetAttr(FUNCATTR_visibility_hidden); break; - case CGOptions::kProtected: + case CGOptions::kProtectedVisibility: func->SetAttr(FUNCATTR_visibility_protected); break; default: @@ -613,6 +618,10 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { } if (boundaryBB && bb->GetNext() == boundaryBB) { (void)emitter.Emit("\t.size\t" + funcStName + ", . - " + funcStName + "\n"); + if (CGOptions::IsNoSemanticInterposition() && !cgFunc.GetFunction().IsStatic() && + cgFunc.GetFunction().IsDefaultVisibility()) { + EmitLocalAliasOfFuncName(emitter, funcStName); + } std::string sectionName = ".text.unlikely." + funcStName + ".cold"; (void)emitter.Emit("\t.section " + sectionName + ",\"ax\"\n"); (void)emitter.Emit("\t.align 5\n"); @@ -623,13 +632,19 @@ void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { /* Emit a label for calculating method size */ (void)emitter.Emit(".Label.end." + funcStName + ":\n"); } + if (cgFunc.GetExitBBLost()) { + EmitAArch64CfiInsn(emitter, cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_endproc)); + } if (boundaryBB) { (void)emitter.Emit("\t.size\t" + funcStName + ".cold, .-").Emit(funcStName + ".cold\n"); } else { (void)emitter.Emit("\t.size\t" + funcStName + ", .-").Emit(funcStName + "\n"); } - + if (!boundaryBB && CGOptions::IsNoSemanticInterposition() && !cgFunc.GetFunction().IsStatic() && + cgFunc.GetFunction().IsDefaultVisibility()) { + EmitLocalAliasOfFuncName(emitter, funcStName); + } auto constructorAttr = funcSt->GetFunction()->GetAttrs().GetConstructorPriority(); if (constructorAttr != -1) { (void)emitter.Emit("\t.section\t.init_array." + std::to_string(constructorAttr) + ",\"aw\"\n"); @@ -802,6 +817,14 @@ void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) c case MOP_pseudo_none: { return; } + case MOP_tlsload_tdata: { + EmitCTlsLoadTdata(emitter, insn); + return; + } + case MOP_tlsload_tbss: { + EmitCTlsLoadTbss(emitter, insn); + return; + } case MOP_tls_desc_call: { EmitCTlsDescCall(emitter, insn); return; @@ -810,6 +833,10 @@ void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) c EmitCTlsDescRel(emitter, insn); return; } + case MOP_tls_desc_got: { + EmitCTlsDescGot(emitter, insn); + return; + } case MOP_sync_lock_test_setI: case MOP_sync_lock_test_setL: { EmitSyncLockTestSet(emitter, insn); @@ -828,9 +855,14 @@ void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) c } } /* if fno-semantic-interposition is enabled, print function alias instead */ - if (md->IsCall() && insn.GetOperand(kInsnFirstOpnd).IsFuncNameOpnd() && CGOptions::IsNoSemanticInterposition()) { - EmitCallWithLocalAlias(emitter, static_cast(insn.GetOperand(kInsnFirstOpnd)), md->GetName()); - return; + if ((md->IsCall() || md->IsTailCall()) && insn.GetOperand(kInsnFirstOpnd).IsFuncNameOpnd() && + CGOptions::IsNoSemanticInterposition()) { + const MIRSymbol *funcSymbol = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetFunctionSymbol(); + MIRFunction *mirFunc = funcSymbol->GetFunction(); + if (mirFunc && !mirFunc->IsStatic() && mirFunc->HasBody() && mirFunc->IsDefaultVisibility()) { + EmitCallWithLocalAlias(emitter, funcSymbol->GetName(), md->GetName()); + return; + } } std::string format(md->format); @@ -1020,6 +1052,7 @@ static void AsmStringOutputRegNum( if (isInt) { newRegno = regno - intBase; } else { + CHECK_FATAL(regno >= 35, "The input type must be float."); newRegno = regno - fpBase; } if (newRegno > (kDecimalMax - 1)) { @@ -1099,9 +1132,11 @@ void AArch64AsmEmitter::EmitInlineAsm(Emitter &emitter, const Insn &insn) const } else if (c == '{') { c = asmStr[++i]; CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); - auto val = static_cast(char(c)) - static_cast(char('0')); + auto val = static_cast(static_cast(c)) - + static_cast(static_cast('0')); if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { - val = val * kDecimalMax + static_cast(char(asmStr[++i])) - static_cast(char('0')); + val = val * kDecimalMax + static_cast(static_cast(asmStr[++i])) - + static_cast(static_cast('0')); } regno_t regno; bool isAddr = false; @@ -2052,7 +2087,6 @@ void AArch64AsmEmitter::EmitCTlsDescRel(Emitter &emitter, const Insn &insn) cons result->Accept(resultVisitor); (void)emitter.Emit(", #:tprel_lo12_nc:").Emit(symName).Emit("\n"); } - void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const { const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_call]; Operand *func = &insn.GetOperand(kInsnSecondOpnd); @@ -2079,6 +2113,63 @@ void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, const Insn &insn) con (void)emitter.Emit("\n"); } +void AArch64AsmEmitter::EmitCTlsLoadTdata(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tlsload_tdata]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", :got:tdata_addr_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString() + "\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + result->Accept(resultVisitor); + (void)emitter.Emit(", #:got_lo12:tdata_addr_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString() + "]\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + result->Accept(resultVisitor); + (void)emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitCTlsLoadTbss(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tlsload_tbss]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", :got:tbss_addr_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString() + "\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + result->Accept(resultVisitor); + (void)emitter.Emit(", #:got_lo12:tbss_addr_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString() + "]\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + result->Accept(resultVisitor); + (void)emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescGot(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_got]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + Operand *symbol = &insn.GetOperand(kInsnSecondOpnd); + auto stImmOpnd = static_cast(symbol); + std::string symName = stImmOpnd->GetName(); + symName += stImmOpnd->GetSymbol()->GetStorageClass() == kScPstatic ? + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx()) : ""; + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + emitter.Emit("\t").Emit("adrp").Emit("\t"); + result->Accept(resultVisitor); + emitter.Emit(", :gottprel:").Emit(symName).Emit("\n"); + emitter.Emit("\t").Emit("ldr").Emit("\t"); + result->Accept(resultVisitor); + emitter.Emit(", ["); + result->Accept(resultVisitor); + emitter.Emit(", #:gottprel_lo12:").Emit(symName).Emit("]\n"); +} + void AArch64AsmEmitter::EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const { const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; auto *result = &insn.GetOperand(kInsnFirstOpnd); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp index 2f573eb087e4e6d18e6fc58561447404dc05533f..4da4a6bdc1c72dbdeed3a113e235a05bffe32af5 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp @@ -131,7 +131,7 @@ void AArch64FixShortBranch::FixShortBranches() const { uint32 GetLabelIdx(Insn &insn) { uint32 res = 0; uint32 foundCount = 0; - for (size_t i = 0; i < insn.GetOperandSize(); ++i) { + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { Operand &opnd = insn.GetOperand(i); if (opnd.GetKind() == Operand::kOpdBBAddress) { res = i; @@ -228,7 +228,10 @@ bool CgFixShortBranch::PhaseRun(maplebe::CGFunc &f) { if (LiteProfile::IsInWhiteList(f.GetName()) && CGOptions::DoLiteProfUse()) { LiteProfile::BBInfo *bbInfo = f.GetFunction().GetModule()->GetLiteProfile().GetFuncBBProf(f.GetName()); if (bbInfo) { - fixShortBranch->FixShortBranchesForSplitting(); + CHECK_FATAL(bbInfo->verified.first, "Must verified pgo data in pgo use"); + if (bbInfo->verified.second) { + fixShortBranch->FixShortBranchesForSplitting(); + } } } return false; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp index 75bf005f76703150719bee6b414ef315c80204a8..c9a86dced3fea5f7ca78c8c6fa1e1272f7879943 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp @@ -406,9 +406,6 @@ void ForwardPropPattern::Optimize(Insn &insn) { } void ForwardPropPattern::RemoveMopUxtwToMov(Insn &insn) { - if (CGOptions::DoCGSSA()) { - CHECK_FATAL(false, "check case in ssa"); - } auto &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); uint32 destRegNo = destOpnd.GetRegisterNumber(); @@ -545,6 +542,10 @@ bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) { defInsnForSecondOpnd->IsTailCall()) { return false; } + if (cgFunc.IsAfterRegAlloc() && defInsnForSecondOpnd->GetMachineOpcode() == insn.GetMachineOpcode() && + static_cast(defInsnForSecondOpnd->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == firstRegNO) { + return false; + } /* unconcerned regs. */ if ((secondRegNO >= RLR && secondRegNO <= RZR) || secondRegNO == RFP) { return false; @@ -1191,7 +1192,7 @@ bool LocalVarSaveInsnPattern::CheckLiveRange(const Insn &firstInsn) { } bool LocalVarSaveInsnPattern::CheckCondition(Insn &firstInsn) { - secondInsn = firstInsn.GetNext(); + secondInsn = firstInsn.GetNextMachineInsn(); if (secondInsn == nullptr) { return false; } @@ -1438,13 +1439,13 @@ void ExtendShiftOptPattern::SelectExtendOrShift(const Insn &def) { case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; break; case MOP_wlslrri5: - case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + case MOP_xlslrri6: shiftOp = BitShiftOperand::kShiftLSL; break; case MOP_xlsrrri6: - case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + case MOP_wlsrrri5: shiftOp = BitShiftOperand::kShiftLSR; break; case MOP_xasrrri6: - case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + case MOP_wasrrri5: shiftOp = BitShiftOperand::kShiftASR; break; default: { extendOp = ExtendShiftOperand::kUndef; @@ -1478,51 +1479,86 @@ bool ExtendShiftOptPattern::CheckDefUseInfo(Insn &use, uint32 size) { (regDefSrc.GetSize() > regOperand.GetSize() || useDefOpnd.GetSize() != size)) { return false; } - if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + if ((shiftOp == BitShiftOperand::kShiftLSR || shiftOp == BitShiftOperand::kShiftASR) && (defSrcOpnd.GetSize() > size)) { return false; } regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); /* check regDefSrc */ + // If srcOpnd of defInsn redefine in the loop, and the curInsn which is to combine the srcOpnd is in the same loop, + // it can not be combined. + // e.g. + // BB1 + // sxtw R103, R112 (srcOpnd) ---> R103 defInsn + // | + // BB3 (loop header) ------ + // add R127, R126, R103, LSL 2 | ---> can not prop R112 to R103 in this insn + // | | + // ... | + // BB11 (loop bottom) ------ + // sub R112, R112, #1 InsnSet defSrcSet = cgFunc.GetRD()->FindDefForRegOpnd(use, defSrcRegNo, true); + if (use.GetBB()->GetLoop() != nullptr && defInsn->GetBB() != use.GetBB()) { + for (auto defSrcIt = defSrcSet.begin(); defSrcIt != defSrcSet.end(); ++defSrcIt) { + if ((*defSrcIt)->GetBB() != use.GetBB() && (*defSrcIt)->GetBB()->GetLoop() != nullptr && + (*defSrcIt)->GetBB()->GetLoop() == use.GetBB()->GetLoop() && + (defInsn->GetBB()->GetLoop() == nullptr || defInsn->GetBB()->GetLoop() != (*defSrcIt)->GetBB()->GetLoop())) { + return false; + } + } + } /* The first defSrcInsn must be closest to useInsn */ if (defSrcSet.empty()) { return false; } - Insn *defSrcInsn = *defSrcSet.begin(); - const InsnDesc *md = defSrcInsn->GetDesc(); - if ((size != regOperand.GetSize()) && md->IsMove()) { - return false; - } - if (defInsn->GetBB() == use.GetBB()) { - /* check replace reg def between defInsn and currInsn */ - Insn *tmpInsn = defInsn->GetNext(); - while (tmpInsn != &use) { - if (tmpInsn == defSrcInsn || tmpInsn == nullptr) { - return false; - } - tmpInsn = tmpInsn->GetNext(); - } - } else { /* def use not in same BB */ - if (defSrcInsn->GetBB() != defInsn->GetBB()) { + // Need to check every defSrcDefInsn in defSrcSet: + // e.g. + // useOpnd: R103 + // defSrcOpnd: R122 + // defSrcSet: {uxtw[0], asr[1]} + // BB1 + // uxtw R122, R1 + // sxtw R103, R122 (defInsn) + // | + // BB5 + // / \ + // BB6 \ + // asr R122, R121, #1 \ + // \ / + // BB7 + // add R135, R132, R103 (useInsn) ==/==> can not use (R122 SXTW) instead of R103 in this add + for (auto *defSrcInsn : defSrcSet) { + const InsnDesc *md = defSrcInsn->GetDesc(); + if ((size != regOperand.GetSize()) && md->IsMove()) { return false; } - if (defSrcInsn->GetId() > defInsn->GetId()) { - return false; + if (defInsn->GetBB() == use.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn->GetNext(); + while (tmpInsn != &use) { + if (tmpInsn == defSrcInsn || tmpInsn == nullptr) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { /* def use not in same BB */ + if (defSrcInsn->GetBB() != defInsn->GetBB()) { + return false; + } + if (defSrcInsn->GetId() > defInsn->GetId()) { + return false; + } } - } - /* case: - * lsl w0, w0, #5 - * eor w0, w2, w0 - * ---> - * eor w0, w2, w0, lsl 5 - */ - if (defSrcInsn == defInsn) { - InsnSet replaceRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsn, defSrcRegNo, true); - if (replaceRegUseSet.size() != k1BitSize) { - return false; + // case: + // lsl w0, w0, #5 + // eor w0, w2, w0 ---> eor w0, w2, w0, lsl 5 + if (defSrcInsn == defInsn) { + InsnSet replaceRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsn, defSrcRegNo, true); + if (replaceRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; } - removeDefInsn = true; } return true; } @@ -1674,7 +1710,7 @@ bool ExtendShiftOptPattern::CheckCondition(Insn &insn) { if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { return false; } - RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); + auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); if (regOperand.IsPhysicalRegister()) { return false; } @@ -2212,10 +2248,15 @@ bool SameRHSPropPattern::CheckCondition(Insn &insn) { if (std::find(candidates.begin(), candidates.end(), mOp) == candidates.end()) { return false; } + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "insn first operand must be register"); + /* Do not optimize r16-related to avoid tmp-reg is redefined */ + if (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == R16) { + return false; + } if (!FindSameRHSInsnInBB(insn)) { return false; } - CHECK_FATAL(prevInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "prevInsn first operand must be register"); + ASSERT(prevInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "prevInsn first operand must be register"); if (prevInsn->GetOperand(kInsnSecondOpnd).IsRegister() && RegOperand::IsSameReg(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd))) { return false; @@ -2305,10 +2346,6 @@ bool ContinuousLdrPattern::IsMemValid(const MemOperand &memopnd) { return memopnd.GetAddrMode() == MemOperand::kBOI; } -bool ContinuousLdrPattern::IsImmValid(MOperator mop, const ImmOperand &imm) { - return AArch64CG::kMd[mop].IsValidImmOpnd(imm.GetValue()); -} - int64 ContinuousLdrPattern::GetMemOffsetValue(const Insn &insn) { return static_cast(insn.GetOperand(kSecondOpnd)).GetOffsetOperand()->GetValue(); } @@ -2407,7 +2444,8 @@ void ContinuousLdrPattern::Optimize(Insn &insn) { std::swap(ldpRt1, ldpRt2); } - if (IsImmValid(mop, *static_cast(ldpRt1->GetOperand(kSecondOpnd)).GetOffsetOperand())) { + Operand &opnd = ldpRt1->GetOperand(kSecondOpnd); + if (aarch64CGFunc.IsOperandImmValid(mop, &opnd, kThirdOpnd)) { auto &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mop, ldpRt1->GetOperand(kFirstOpnd), ldpRt2->GetOperand(kFirstOpnd), ldpRt1->GetOperand(kSecondOpnd)); auto &ubfxInsn = cgFunc.GetInsnBuilder()->BuildInsn(ubfx, currInsn->GetOperand(kFirstOpnd), diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global_schedule.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4229208b647800a01c5da6cbc5057f6d57b2bb2e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global_schedule.cpp @@ -0,0 +1,190 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "aarch64_global_schedule.h" +#include "aarch64_cg.h" + +namespace maplebe { +/* + * To verify the correctness of the dependency graph, + * by only scheduling the instructions of nodes in the region based on the inter-block data dependency information. + */ +void AArch64GlobalSchedule::VerifyingSchedule(CDGRegion ®ion) { + for (auto cdgNode : region.GetRegionNodes()) { + MemPool *cdgNodeMp = memPoolCtrler.NewMemPool("global-scheduler cdgNode memPool", true); + + InitInCDGNode(region, *cdgNode, cdgNodeMp); + uint32 scheduledNodeNum = 0; + + /* Schedule independent instructions sequentially */ + MapleVector candidates = commonSchedInfo->GetCandidates(); + MapleVector schedResults = commonSchedInfo->GetSchedResults(); + auto depIter = candidates.begin(); + while (!candidates.empty()) { + DepNode *depNode = *depIter; + // the depNode can be scheduled + if (depNode->GetValidPredsSize() == 0) { + Insn *insn = depNode->GetInsn(); + depNode->SetState(kScheduled); + schedResults.emplace_back(depNode); + for (auto succLink : depNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DecreaseValidPredsSize(); + } + depIter = commonSchedInfo->EraseIterFromCandidates(depIter); + if (insn->GetBB()->GetId() == cdgNode->GetBB()->GetId()) { + scheduledNodeNum++; + } + } else { + depIter++; + } + if (depIter == candidates.end()) { + depIter = candidates.begin(); + } + // When all instructions in the cdgNode are scheduled, the scheduling ends + if (scheduledNodeNum == cdgNode->GetInsnNum()) { + break; + } + } + + /* Reorder the instructions of BB based on the scheduling result */ + FinishScheduling(*cdgNode); + ClearCDGNodeInfo(region, *cdgNode, cdgNodeMp); + } +} + +void AArch64GlobalSchedule::InitInCDGNode(CDGRegion ®ion, CDGNode &cdgNode, MemPool *cdgNodeMp) { + commonSchedInfo = cdgNodeMp->New(*cdgNodeMp); + // 1. The instructions of the current node + MapleVector &curDataNodes = cdgNode.GetAllDataNodes(); + // For verify, the node is stored in reverse order and for global, the node is stored in sequence + for (auto depNode : curDataNodes) { + commonSchedInfo->AddCandidates(depNode); + depNode->SetState(kCandidate); + } + // 2. The instructions of the equivalent candidate nodes of the current node + std::vector equivalentNodes; + cda.GetEquivalentNodesInRegion(region, cdgNode, equivalentNodes); + for (auto equivNode : equivalentNodes) { + BB *equivBB = equivNode->GetBB(); + ASSERT(equivBB != nullptr, "get bb from cdgNode failed"); + if (equivBB->IsAtomicBuiltInBB()) { + continue; + } + for (auto depNode : equivNode->GetAllDataNodes()) { + Insn *insn = depNode->GetInsn(); + CHECK_FATAL(insn != nullptr, "get insn from depNode failed"); + // call & branch insns cannot be moved across BB + if (insn->IsBranch() || insn->IsCall()) { + continue; + } + commonSchedInfo->AddCandidates(depNode); + depNode->SetState(kCandidate); + } + } + listScheduler->SetCommonSchedInfo(*commonSchedInfo); + + // Init insnNum of curCDGNode + uint32 insnNum = 0; + BB *curBB = cdgNode.GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + FOR_BB_INSNS_CONST(insn, curBB) { + if (insn->IsMachineInstruction()) { + insnNum++; + } + } + cdgNode.SetInsnNum(insnNum); + + if (GLOBAL_SCHEDULE_DUMP) { + DumpCDGNodeInfoBeforeSchedule(cdgNode); + } +} + +void AArch64GlobalSchedule::FinishScheduling(CDGNode &cdgNode) { + BB *curBB = cdgNode.GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + curBB->ClearInsns(); + + MapleVector schedResults = commonSchedInfo->GetSchedResults(); + for (auto depNode : schedResults) { + CHECK_FATAL(depNode->GetInsn() != nullptr, "get insn from depNode failed"); + if (!depNode->GetClinitInsns().empty()) { + for (auto clinitInsn : depNode->GetClinitInsns()) { + curBB->AppendInsn(*clinitInsn); + } + } + + BB *bb = depNode->GetInsn()->GetBB(); + if (bb->GetId() != curBB->GetId()) { + CDGNode *node = bb->GetCDGNode(); + CHECK_FATAL(node != nullptr, "get cdgNode from bb failed"); + node->RemoveDepNodeFromDataNodes(*depNode); + // Remove the instruction & depNode from the candidate BB + bb->RemoveInsn(*depNode->GetInsn()); + // Append the instruction of candidateBB + curBB->AppendOtherBBInsn(*depNode->GetInsn()); + } else { + // Append debug & comment infos of curBB + for (auto commentInsn : depNode->GetComments()) { + if (commentInsn->GetPrev() != nullptr && commentInsn->GetPrev()->IsDbgInsn()) { + curBB->AppendInsn(*commentInsn->GetPrev()); + } + curBB->AppendInsn(*commentInsn); + } + if (depNode->GetInsn()->GetPrev() != nullptr && depNode->GetInsn()->GetPrev()->IsDbgInsn()) { + curBB->AppendInsn(*depNode->GetInsn()->GetPrev()); + } + // Append the instruction of curBB + curBB->AppendInsn(*depNode->GetInsn()); + } + } + for (auto lastComment : cdgNode.GetLastComments()) { + curBB->AppendInsn(*lastComment); + } + cdgNode.ClearLastComments(); + ASSERT(curBB->NumInsn() >= static_cast(cdgNode.GetInsnNum()), + "The number of instructions after global-scheduling is unexpected"); +} + +void AArch64GlobalSchedule::DumpInsnInfoByScheduledOrder(BB &curBB) const { + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(6) << "insn" << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(8) << "mop" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(6) << "bb" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(14) << "succs(latency)" << std::resetiosflags(std::ios::right) << "\n"; + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + FOR_BB_INSNS_CONST(insn, &curBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(6) << insn->GetId() << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(8); + const InsnDesc *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; + LogInfo::MapleLogger() << md->name << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(6) << curBB.GetId() << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(14); + const DepNode *depNode = insn->GetDepNode(); + ASSERT(depNode != nullptr, "get depNode from insn failed"); + for (auto succLink : depNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + LogInfo::MapleLogger() << succNode.GetInsn()->GetId() << "(" << succLink->GetLatency() << "), "; + } + LogInfo::MapleLogger() << std::resetiosflags(std::ios::right) << "\n"; + } + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + LogInfo::MapleLogger() << "\n"; +} +} \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp index 69a6069c506ee7f1220f18d950d541d52e0855fe..7349cb1acfd1ab2cd7e072a5a141d6033cbd0a68 100755 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -19,6 +19,8 @@ #include "aarch64_isa.h" #include "aarch64_insn.h" #include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "aarch64_isa.h" /* * This phase implements if-conversion optimization, @@ -27,35 +29,66 @@ namespace maplebe { void AArch64IfConversionOptimizer::InitOptimizePatterns() { singlePassPatterns.emplace_back(memPool->New(*cgFunc)); - singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + if (cgFunc->GetMirModule().IsCModule()) { + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + } singlePassPatterns.emplace_back(memPool->New(*cgFunc)); } /* build ccmp Insn */ -Insn *AArch64ICOPattern::BuildCcmpInsn(ConditionCode ccCode, const Insn &cmpInsn) const { +Insn *AArch64ICOPattern::BuildCcmpInsn(ConditionCode ccCode, ConditionCode ccCode2, + const Insn &cmpInsn, Insn *&moveInsn) const { Operand &opnd0 = cmpInsn.GetOperand(kInsnFirstOpnd); Operand &opnd1 = cmpInsn.GetOperand(kInsnSecondOpnd); Operand &opnd2 = cmpInsn.GetOperand(kInsnThirdOpnd); + if (!opnd2.IsImmediate() && !opnd2.IsRegister()) { + return nullptr; + } + /* ccmp has only int opnd */ + if (opnd2.IsImmediate() && !static_cast(opnd2).IsIntImmediate()) { + return nullptr; + } + if (opnd2.IsRegister() && !static_cast(opnd1).IsOfIntClass()) { + return nullptr; + } /* ccmp has only int opnd */ if (!static_cast(opnd1).IsOfIntClass()) { return nullptr; } AArch64CGFunc *func = static_cast(cgFunc); - uint32 nzcv = GetNZCV(ccCode, false); + uint32 nzcv = GetNZCV(ccCode2, false); if (nzcv == k16BitSize) { return nullptr; } ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); uint32 dSize = opnd1.GetSize(); - bool isIntTy = opnd2.IsIntImmediate(); - MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xccmpriic : MOP_wccmpriic) - : (dSize == k64BitSize ? MOP_xccmprric : MOP_wccmprric); - /* cmp opnd2 in the range 0-4095, ccmp opnd2 in the range 0-31 */ - if (isIntTy && static_cast(opnd2).GetRegisterNumber() >= k32BitSize) { + + MOperator mOpCode = (dSize == k64BitSize ? MOP_xccmprric : MOP_wccmprric); + auto moveAble = static_cast(opnd2).IsSingleInstructionMovable( + AArch64CG::kMd[cmpInsn.GetMachineOpcode()].GetOpndDes(1)->GetSize()); + if (!moveAble) { return nullptr; } - return &cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd2, opnd3, cond); + auto *newReg = &opnd2; + if (opnd2.IsImmediate()) { + if (static_cast(opnd2).GetValue() >= k32BitSize) { + newReg = cgFunc->GetTheCFG()->CreateVregFromReg(static_cast(opnd1)); + uint32 mOp = (opnd2.GetSize() == 64 ? (opnd2.IsImmediate() ? MOP_xmovri64 : MOP_xmovrr) : + (opnd2.IsImmediate() ? MOP_wmovri32 : MOP_wmovrr)); + moveInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mOp, *newReg, opnd2); + } else { + mOpCode = (dSize == k64BitSize ? MOP_xccmpriic : MOP_wccmpriic); + } + } + std::vector opnds; + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(newReg); + opnds.emplace_back(&opnd3); + opnds.emplace_back(&cond); + opnds.emplace_back(&opnd0); + return &cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnds); } /* Rooted ccCode resource NZCV */ @@ -265,6 +298,9 @@ RegOperand *AArch64ICOIfThenElsePattern::GenerateRegAndTempInsn(Operand &dest, c return &cgFunc->GetZeroOpnd(destReg.GetSize()); } Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *reg, tempSrcElse); + if (!VERIFY_INSN(&tempInsn)) { + SPLIT_INSN(&tempInsn, cgFunc); + } generateInsn.emplace_back(&tempInsn); return reg; } else { @@ -334,10 +370,10 @@ bool AArch64ICOIfThenElsePattern::CheckHasSameDestSize(std::vector &lInsn } bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(const BB &bb, - const std::map> &ifDestSrcMap, - const std::map> &elseDestSrcMap, + const DestSrcMap &destSrcTempMap, bool elseBBIsProcessed, - std::vector &generateInsn) const { + std::vector &generateInsn, + const Insn *toBeRremoved2CmpBB) const { Insn *branchInsn = cgFunc->GetTheCFG()->FindLastCondBrInsn(*cmpBB); FOR_BB_INSNS_CONST(insn, (&bb)) { if (!insn->IsMachineInstruction() || insn->IsBranch()) { @@ -352,8 +388,8 @@ bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(const BB &bb, ASSERT(dest->IsRegister(), "register check"); RegOperand *destReg = static_cast(dest); - Operand *elseDest = GetDestReg(elseDestSrcMap, *destReg); - Operand *ifDest = GetDestReg(ifDestSrcMap, *destReg); + Operand *elseDest = GetDestReg(destSrcTempMap.elseDestSrcMap, *destReg); + Operand *ifDest = GetDestReg(destSrcTempMap.ifDestSrcMap, *destReg); if (elseBBIsProcessed) { if (elseDest != nullptr) { @@ -361,13 +397,17 @@ bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(const BB &bb, } elseDest = dest; ASSERT(ifDest != nullptr, "null ptr check"); - if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber()) && insn != toBeRremoved2CmpBB) { + // When another branch does not assign a value to destReg and destReg is not used outside bb, + // it means that the instruction is redundant. + // However, when the instruction is modified in MoveSetInsn2CmpBB, + // the instruction cannot be deleted because there is a use point in the bb. continue; } } else { ASSERT(elseDest != nullptr, "null ptr check"); if (ifDest == nullptr) { - if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber()) && insn != toBeRremoved2CmpBB) { continue; } ifDest = dest; @@ -425,7 +465,23 @@ bool AArch64ICOIfThenElsePattern::CheckModifiedRegister(Insn &insn, std::map(destSrcPair.first); if (mapSrcReg->GetRegisterNumber() == srcReg.GetRegisterNumber()) { if (toBeRremovedOutOfCurrBB == nullptr && mapSrcReg->IsVirtualRegister() && - !insn.GetBB()->GetLiveOut()->TestBit(srcReg.GetRegisterNumber())) { + !insn.GetBB()->GetLiveOut()->TestBit(srcReg.GetRegisterNumber()) && + // If the insn is Has2SrcOpndSetInsn, the insn cannot be moved: + // =========init=========: + // cmp R4, R5 + // mov R102, imm:1 + // lsl R0, R102, R100 + // =========after MoveSetInsn2CmpBB=========: + // mov R1226, imm:1 + // cmp R4, R5 + // mov R102, R1226 + // lsl R0, R102, R100 + // =========after mov Has2SrcOpndSetInsn before cmpInsn=========: + // mov R1226, imm:1 + // lsl R0, R102, R100 ====> error: use undefined reg R102 + // cmp R4, R5 + // mov R102, R122 + !Has2SrcOpndSetInsn(insn)) { ASSERT(dest2InsnMap.find(mapSrcReg) != dest2InsnMap.end(), "must find"); toBeRremovedOutOfCurrBB = dest2InsnMap[mapSrcReg]; continue; @@ -476,7 +532,7 @@ bool AArch64ICOIfThenElsePattern::CheckCondMoveBB(BB *bb, std::map &destRegs, std::vector &setInsn, Insn *&toBeRremovedOutOfCurrBB) const { std::map dest2InsnMap; // CheckModifiedRegister will ensure that dest is defined only once. if (bb == nullptr) { - return false; + return true; } FOR_BB_INSNS(insn, bb) { if (!insn->IsMachineInstruction() || insn->IsBranch()) { @@ -705,54 +761,30 @@ void AArch64ICOIfThenElsePattern::RevertMoveInsns(BB *bb, Insn *prevInsnInBB, In } Insn *AArch64ICOIfThenElsePattern::MoveSetInsn2CmpBB(Insn &toBeRremoved2CmpBB, BB &currBB, - std::vector &anotherBranchDestRegs, std::map> &destSrcMap) const { - Insn *newInsn = nullptr; - bool findInAnotherBB = false; - for (auto *tempReg: anotherBranchDestRegs) { - if (static_cast(tempReg)->Equals(static_cast(toBeRremoved2CmpBB.GetOperand(0)))) { - findInAnotherBB = true; - } - } - if (findInAnotherBB) { - // If the target register w0 is both the target register in the if and else branches, do opt like: - // cmpBB: cmpBB: - // uxth w2, w1 (change) - // cmp w5, #1 cmp w5, #1 - // beq beq - // - // if bb: if bb: - // eor w0, w3, w4 => eor w0, w3, w4 - // - // else bb: else bb: - // uxth w0, w1 mov w0, w2 (change) - auto &oldDestReg = static_cast(static_cast(toBeRremoved2CmpBB.GetOperand(0))); - ASSERT(oldDestReg.IsVirtualRegister(), "must be vreg"); - auto &newDestReg = cgFunc->CreateVirtualRegisterOperand( - cgFunc->NewVReg(oldDestReg.GetRegisterType(), oldDestReg.GetSize())); - toBeRremoved2CmpBB.SetOperand(0, newDestReg); - uint32 mOp = (oldDestReg.GetSize() == 64) ? MOP_xmovrr : MOP_wmovrr; - newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(mOp, oldDestReg, newDestReg)); - (void)currBB.InsertInsnBefore(toBeRremoved2CmpBB, *newInsn); - currBB.RemoveInsn(toBeRremoved2CmpBB); - (void)cmpBB->InsertInsnBefore(*cmpInsn, toBeRremoved2CmpBB); - destSrcMap[&oldDestReg].clear(); - destSrcMap[&oldDestReg].push_back(&newDestReg); - } else { - // If the target register w0 is in if or else branch, do opt like: - // cmpBB: cmpBB: - // mov w4, #40961 (change) - // cmp w5, #1 cmp w5, #1 - // beq beq - // - // if bb: if bb: - // mov w4, #40961 (change) - // eor w1, w3, w4 => eor w1, w3, w4 - // - // else bb: else bb: - // uxth w0, w1 uxth w0, w1 - toBeRremoved2CmpBB.GetBB()->RemoveInsn(toBeRremoved2CmpBB); - (void)cmpBB->InsertInsnBefore(*cmpInsn, toBeRremoved2CmpBB); - } + std::map> &destSrcMap) const { + // When moving the instruction to the front of cmp, need to create a new register because it may change the semantics: + // cmpBB: cmpBB: + // uxth w2, w1 (change) + // cmp w5, #1 cmp w5, #1 + // beq beq + // + // if bb: if bb: + // eor w0, w3, w4 => eor w0, w3, w4 + // + // else bb: else bb: + // uxth w0, w1 mov w0, w2 (change) + auto &oldDestReg = static_cast(static_cast(toBeRremoved2CmpBB.GetOperand(0))); + ASSERT(oldDestReg.IsVirtualRegister(), "must be vreg"); + auto &newDestReg = cgFunc->CreateVirtualRegisterOperand( + cgFunc->NewVReg(oldDestReg.GetRegisterType(), oldDestReg.GetSize() / k8BitSize)); + toBeRremoved2CmpBB.SetOperand(0, newDestReg); + uint32 mOp = (oldDestReg.GetSize() == 64) ? MOP_xmovrr : MOP_wmovrr; + auto *newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(mOp, oldDestReg, newDestReg)); + (void)currBB.InsertInsnBefore(toBeRremoved2CmpBB, *newInsn); + currBB.RemoveInsn(toBeRremoved2CmpBB); + (void)cmpBB->InsertInsnBefore(*cmpInsn, toBeRremoved2CmpBB); + destSrcMap[&oldDestReg].clear(); + destSrcMap[&oldDestReg].push_back(&newDestReg); return newInsn; } @@ -808,6 +840,11 @@ bool AArch64ICOIfThenElsePattern::DoOpt(BB *ifBB, BB *elseBB, BB &joinBB) { !CheckModifiedInCmpInsn(*insnInIfBBToBeRremovedOutOfCurrBB, true)) { return false; } + // If ifBB and elseBB are the same bb, there is no need to move statement forward. + if (ifBB == elseBB && + (insnInElseBBToBeRremovedOutOfCurrBB != nullptr || insnInIfBBToBeRremovedOutOfCurrBB != nullptr)) { + return false; + } if (!CheckHasSameDest(ifSetInsn, elseSetInsn) || !CheckHasSameDest(elseSetInsn, ifSetInsn) || !CheckHasSameDestSize(ifSetInsn, elseSetInsn)) { return false; @@ -844,15 +881,13 @@ bool AArch64ICOIfThenElsePattern::DoOpt(BB *ifBB, BB *elseBB, BB &joinBB) { if (insnInElseBBToBeRremovedOutOfCurrBB != nullptr) { prevInsnInElseBB = insnInElseBBToBeRremovedOutOfCurrBB->GetPrev(); ASSERT_NOT_NULL(elseBB); - newInsnOfElseBB = MoveSetInsn2CmpBB( - *insnInElseBBToBeRremovedOutOfCurrBB, *elseBB, ifDestRegs, elseDestSrcMap); + newInsnOfElseBB = MoveSetInsn2CmpBB(*insnInElseBBToBeRremovedOutOfCurrBB, *elseBB, elseDestSrcMap); UpdateTemps(elseDestRegs, elseSetInsn, elseDestSrcMap, *insnInElseBBToBeRremovedOutOfCurrBB, newInsnOfElseBB); } if (insnInIfBBToBeRremovedOutOfCurrBB != nullptr) { prevInsnInIfBB = insnInIfBBToBeRremovedOutOfCurrBB->GetPrev(); ASSERT_NOT_NULL(ifBB); - newInsnOfIfBB = MoveSetInsn2CmpBB( - *insnInIfBBToBeRremovedOutOfCurrBB, *ifBB, elseDestRegs, ifDestSrcMap); + newInsnOfIfBB = MoveSetInsn2CmpBB(*insnInIfBBToBeRremovedOutOfCurrBB, *ifBB, ifDestSrcMap); UpdateTemps(ifDestRegs, ifSetInsn, ifDestSrcMap, *insnInIfBBToBeRremovedOutOfCurrBB, newInsnOfIfBB); } @@ -860,14 +895,15 @@ bool AArch64ICOIfThenElsePattern::DoOpt(BB *ifBB, BB *elseBB, BB &joinBB) { std::vector elseGenerateInsn; std::vector ifGenerateInsn; bool elseBBProcessResult = false; + DestSrcMap destSrcTempMap(ifDestSrcMap, elseDestSrcMap); if (elseBB != nullptr) { - elseBBProcessResult = BuildCondMovInsn(*elseBB, ifDestSrcMap, elseDestSrcMap, false, elseGenerateInsn); + elseBBProcessResult = BuildCondMovInsn(*elseBB, destSrcTempMap, false, elseGenerateInsn, newInsnOfElseBB); } bool ifBBProcessResult = false; if (ifBB != nullptr) { - ifBBProcessResult = BuildCondMovInsn(*ifBB, ifDestSrcMap, elseDestSrcMap, true, ifGenerateInsn); + ifBBProcessResult = BuildCondMovInsn(*ifBB, destSrcTempMap, true, ifGenerateInsn, newInsnOfIfBB); } - if (!elseBBProcessResult || (ifBB != nullptr && !ifBBProcessResult)) { + if ((elseBB != nullptr && !elseBBProcessResult) || (ifBB != nullptr && !ifBBProcessResult)) { RevertMoveInsns(elseBB, prevInsnInElseBB, newInsnOfElseBB, insnInElseBBToBeRremovedOutOfCurrBB); RevertMoveInsns(ifBB, prevInsnInIfBB, newInsnOfIfBB, insnInIfBBToBeRremovedOutOfCurrBB); return false; @@ -975,13 +1011,18 @@ bool AArch64ICOIfThenElsePattern::Optimize(BB &curBB) { ifBB = nullptr; elseBB = elseDest; joinBB = thenDest; + } else if (thenDest->NumPreds() == 1 && thenDest->NumSuccs() == 1 && thenDest->GetSuccs().front() == elseDest) { + ifBB = thenDest; + elseBB = nullptr; + joinBB = elseDest; } else { /* not a form we can handle */ return false; } - ASSERT(elseBB != nullptr, "elseBB should not be nullptr"); - if (CGCFG::InLSDA(elseBB->GetLabIdx(), cgFunc->GetEHFunc()) || - CGCFG::InSwitchTable(elseBB->GetLabIdx(), *cgFunc)) { + if (elseBB != nullptr && + (CGCFG::InLSDA(elseBB->GetLabIdx(), cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(elseBB->GetLabIdx(), *cgFunc) || + !elseBB->HasMachineInsn())) { return false; } @@ -1001,16 +1042,56 @@ bool AArch64ICOSameCondPattern::Optimize(BB &secondIfBB) { if (secondIfBB.GetKind() != BB::kBBIf || secondIfBB.NumPreds() != 1) { return false; } - BB *firstIfBB = secondIfBB.GetPrev(); - BB *nextBB = firstIfBB->GetNext(); - CHECK_FATAL(nextBB != nullptr, "nextBB is null in AArch64ICOSameCondPattern::Optimize"); + if (secondIfBB.GetPreds().size() != 1) { + return false; + } + BB *firstIfBB = *secondIfBB.GetPredsBegin(); /* firstIfBB's nextBB is secondIfBB */ - if (firstIfBB == nullptr || firstIfBB->GetKind() != BB::kBBIf || nextBB->GetId() != secondIfBB.GetId()) { + if (firstIfBB->GetKind() != BB::kBBIf) { + return false; + } + if (firstIfBB->GetNext() == nullptr || secondIfBB.GetNext() == nullptr) { + return false; + } + if (firstIfBB->GetSuccs().size() != kOperandNumBinary || secondIfBB.GetSuccs().size() != kOperandNumBinary) { + return false; + } + auto *firstIfSucc0 = firstIfBB->GetSuccs().front(); + auto *firstIfSucc1 = firstIfBB->GetSuccs().back(); + if (firstIfBB->GetNext()->GetLabIdx() != firstIfSucc0->GetLabIdx() && + firstIfBB->GetNext()->GetLabIdx() != firstIfSucc1->GetLabIdx()) { + return false; + } + auto *secondIfSucc0 = secondIfBB.GetSuccs().front(); + auto *secondIfSucc1 = secondIfBB.GetSuccs().back(); + if (secondIfBB.GetNext()->GetLabIdx() != secondIfSucc0->GetLabIdx() && + secondIfBB.GetNext()->GetLabIdx() != secondIfSucc1->GetLabIdx()) { + return false; + } + if (secondIfBB.GetLabIdx() != firstIfSucc0->GetLabIdx() && + secondIfBB.GetLabIdx() != firstIfSucc1->GetLabIdx()) { return false; } return DoOpt(*firstIfBB, secondIfBB); } +bool AArch64ICOPattern::IsReverseMop(MOperator mOperator1, MOperator mOperator2) const { + return (mOperator1 == MOP_beq && mOperator2 == MOP_bne) || + (mOperator1 == MOP_blt && mOperator2 == MOP_bge) || + (mOperator1 == MOP_ble && mOperator2 == MOP_bgt) || + (mOperator1 == MOP_blo && mOperator2 == MOP_bhs) || + (mOperator1 == MOP_bhi && mOperator2 == MOP_bls) || + (mOperator1 == MOP_bpl && mOperator2 == MOP_bmi) || + (mOperator1 == MOP_bvc && mOperator2 == MOP_bvs) || + (mOperator1 == MOP_bne && mOperator2 == MOP_beq) || + (mOperator1 == MOP_bge && mOperator2 == MOP_blt) || + (mOperator1 == MOP_bgt && mOperator2 == MOP_ble) || + (mOperator1 == MOP_bhs && mOperator2 == MOP_blo) || + (mOperator1 == MOP_bls && mOperator2 == MOP_bhi) || + (mOperator1 == MOP_bmi && mOperator2 == MOP_bpl) || + (mOperator1 == MOP_bvs && mOperator2 == MOP_bvc); +} + bool AArch64ICOPattern::CheckMop(MOperator mOperator) const { switch (mOperator) { case MOP_beq: @@ -1033,6 +1114,18 @@ bool AArch64ICOPattern::CheckMop(MOperator mOperator) const { } } +bool AArch64ICOPattern::CheckMopOfCmp(MOperator mOperator) const { + switch (mOperator) { + case MOP_wcmpri: + case MOP_wcmprr: + case MOP_xcmpri: + case MOP_xcmprr: + return true; + default: + return false; + } +} + /* branchInsn1 is firstIfBB's LastCondBrInsn * branchInsn2 is secondIfBB's LastCondBrInsn * @@ -1050,20 +1143,33 @@ bool AArch64ICOSameCondPattern::DoOpt(BB &firstIfBB, BB &secondIfBB) const { if (cmpInsn1 == nullptr || cmpInsn2 == nullptr) { return false; } - - /* tbz and cbz will not be optimized */ - if (mOperator1 != mOperator2 || !CheckMop(mOperator1)) { + if (!CheckMopOfCmp(cmpInsn1->GetMachineOpcode()) || !CheckMopOfCmp(cmpInsn2->GetMachineOpcode())) { + return false; + } + /* tbz and cbz will not be optimizsed */ + if (!CheckMop(mOperator1) || !CheckMop(mOperator2)) { return false; } - /* two BB has same branch */ std::vector labelOpnd1 = GetLabelOpnds(*branchInsn1); std::vector labelOpnd2 = GetLabelOpnds(*branchInsn2); - if (labelOpnd1.size() != 1 || labelOpnd1.size() != 1 || - labelOpnd1[0]->GetLabelIndex() != labelOpnd2[0]->GetLabelIndex()) { + + if (labelOpnd1.size() != 1 || labelOpnd1.size() != 1) { return false; } - + auto fallthruBBOfFirstIf = firstIfBB.GetNext(); + bool fallthruIsSendIfBB = (fallthruBBOfFirstIf->GetLabIdx() == secondIfBB.GetLabIdx()); + // Determine if two if bbs have the same successor. + if (fallthruIsSendIfBB) { + if (labelOpnd1[0]->GetLabelIndex() != labelOpnd2[0]->GetLabelIndex() && + labelOpnd1[0]->GetLabelIndex() != secondIfBB.GetNext()->GetLabIdx()) { + return false; + } + } else { + if (fallthruBBOfFirstIf->GetLabIdx() != labelOpnd2[0]->GetLabelIndex()) { + return false; + } + } /* secondifBB only has branchInsn and cmpInsn */ FOR_BB_INSNS_REV(insn, &secondIfBB) { if (!insn->IsMachineInstruction()) { @@ -1073,23 +1179,51 @@ bool AArch64ICOSameCondPattern::DoOpt(BB &firstIfBB, BB &secondIfBB) const { return false; } } - - /* build ccmp Insn */ - ConditionCode ccCode = Encode(branchInsn1->GetMachineOpcode(), true); + ConditionCode ccCode = Encode(branchInsn1->GetMachineOpcode(), fallthruIsSendIfBB); + bool inverseCCCode2 = false; + // Determine the value of flag NZCV. + if (fallthruIsSendIfBB) { + if (labelOpnd2[0]->GetLabelIndex() == labelOpnd1[0]->GetLabelIndex()) { + inverseCCCode2 = true; + } + } else { + if (labelOpnd2[0]->GetLabelIndex() == fallthruBBOfFirstIf->GetLabIdx()) { + inverseCCCode2 = true; + } + } + ConditionCode ccCode2 = Encode(branchInsn2->GetMachineOpcode(), inverseCCCode2); ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); - Insn *ccmpInsn = BuildCcmpInsn(ccCode, *cmpInsn2); + Insn *movInsn = nullptr; + /* build ccmp Insn */ + Insn *ccmpInsn = BuildCcmpInsn(ccCode, ccCode2, *cmpInsn2, movInsn); if (ccmpInsn == nullptr) { return false; } - + auto *nextBB = secondIfBB.GetNext(); /* insert ccmp Insn */ firstIfBB.InsertInsnBefore(*branchInsn1, *ccmpInsn); - - /* Remove secondIfBB */ - BB *nextBB = secondIfBB.GetNext(); + if (movInsn != nullptr) { + firstIfBB.InsertInsnBefore(*cmpInsn1, *movInsn); + } + if (fallthruIsSendIfBB) { + firstIfBB.ReplaceInsn(*branchInsn1, *branchInsn2); + } else { + LabelOperand &targetOpnd = cgFunc->GetOrCreateLabelOperand(*nextBB); + auto &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(AArch64isa::FlipConditionOp(branchInsn2->GetMachineOpcode()), + branchInsn2->GetOperand(0), targetOpnd); + firstIfBB.ReplaceInsn(*branchInsn1, newInsn); + } + auto secondIfLabel = secondIfBB.GetLabIdx(); + auto &label2BBMap = cgFunc->GetLab2BBMap(); + if (secondIfLabel != MIRLabelTable::GetDummyLabel()) { + label2BBMap.erase(secondIfLabel); + } cgFunc->GetTheCFG()->RemoveBB(secondIfBB); - firstIfBB.PushFrontSuccs(*nextBB); - nextBB->PushFrontPreds(firstIfBB); + if (nextBB->GetLabIdx() != labelOpnd1[0]->GetLabelIndex()) { + label2BBMap.emplace(nextBB->GetLabIdx(), nextBB); + firstIfBB.PushFrontSuccs(*nextBB); + nextBB->PushFrontPreds(firstIfBB); + } return true; } /* diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/src/cg/aarch64/aarch64_imm_valid.cpp similarity index 39% rename from src/mapleall/maple_be/include/cg/immvalid.def rename to src/mapleall/maple_be/src/cg/aarch64/aarch64_imm_valid.cpp index cd631230591a1553a722f47bce5ce0dbd90f9af2..a9e7b8bd738602f1d7ab8b9fc0ac5060a934434b 100644 --- a/src/mapleall/maple_be/include/cg/immvalid.def +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_imm_valid.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -12,40 +12,20 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -static std::set ValidBitmaskImmSet = { +#include "aarch64_imm_valid.h" +#include +#include +#include + +namespace maplebe { +const std::set ValidBitmaskImmSet = { #include "valid_bitmask_imm.txt" }; constexpr uint32 kMaxBitTableSize = 5; constexpr std::array kBitmaskImmMultTable = { 0x0000000100000001UL, 0x0001000100010001UL, 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL, }; - -bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { - /* mask1 is a 64bits number that is all 1 shifts left size bits */ - const uint64 mask1 = 0xffffffffffffffffUL << bitLen; - /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ - uint64 mask2 = (1UL << static_cast(nLowerZeroBits)) - 1UL; - return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; -}; - -/* This is a copy from "operand.cpp", temporary fix for me_slp.cpp usage of this file */ -/* was IsMoveWidableImmediate */ -bool IsMoveWidableImmediateCopy(uint64 val, uint32 bitLen) { - if (bitLen == k64BitSize) { - /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ - if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || - ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { - return true; - } - } else { - /* get lower 32 bits */ - val &= static_cast(0xffffffff); - } - /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ - return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || - (val & ((static_cast(0xffff)) << 0)) == val); -} - +namespace aarch64 { bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); @@ -79,8 +59,10 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { tmpVal = tmpVal & (tmpVal - 1); if (tmpVal == 0) { if (!expectedOutcome) { +#if defined(DEBUG) && DEBUG LogInfo::MapleLogger() << "0x" << std::hex << std::setw(static_cast(k16ByteSize)) << std::setfill('0') << static_cast(val) << "\n"; +#endif return false; } ASSERT(expectedOutcome, "incorrect implementation: not valid value but returning true"); @@ -108,113 +90,6 @@ bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { return val == pattern * kBitmaskImmMultTable[kMaxBitTableSize - logDiff]; #endif } +} // namespace aarch64 +} // namespace maplebe -bool IsSingleInstructionMovable32(int64 value) { - return (IsMoveWidableImmediateCopy(static_cast(value), 32) || - IsMoveWidableImmediateCopy(~static_cast(value), 32) || - IsBitmaskImmediate(static_cast(value), 32)); -} - -bool IsSingleInstructionMovable64(int64 value) { - return (IsMoveWidableImmediateCopy(static_cast(value), 64) || - IsMoveWidableImmediateCopy(~static_cast(value), 64) || - IsBitmaskImmediate(static_cast(value), 64)); -} - -bool Imm12BitValid(int64 value) { - bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); - // for target linux-aarch64-gnu - result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); - return result; -} - -bool Imm12BitMaskValid(int64 value) { - if (value == 0 || static_cast(value) == -1) { - return false; - } - return IsBitmaskImmediate(static_cast(value), k32BitSize); -} - -bool Imm13BitValid(int64 value) { - bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); - // for target linux-aarch64-gnu - result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); - return result; -} - -bool Imm13BitMaskValid(int64 value) { - if (value == 0 || static_cast(value) == -1) { - return false; - } - return IsBitmaskImmediate(static_cast(value), k64BitSize); -} - -bool Imm16BitValid(int64 value) { - bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); - /* - * for target linux-aarch64-gnu - * aarch64 assembly takes up to 24-bits immediate, generating - * either cmp or cmp with shift 12 encoding - */ - result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); - return result; -} - -/* - * 8bit : 0 - * halfword : 1 - * 32bit - word : 2 - * 64bit - word : 3 - * 128bit- word : 4 - */ -bool StrLdrSignedOfstValid(int64 value, uint wordSize) { - if (value <= k256BitSize && value >= kNegative256BitSize) { - return true; - } else if ((value > k256BitSize) && (value <= kMaxPimm[wordSize])) { - uint64 mask = (1U << wordSize) - 1U; - return (static_cast(value) & mask) > 0 ? false : true; - } - return false; -} - - -bool StrLdr8ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, 0); -} - -bool StrLdr16ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k1ByteSize); -} - -bool StrLdr32ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k2ByteSize); -} - -bool StrLdr32PairImmValid(int64 value) { - if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { - return (static_cast(value) & 3) > 0 ? false : true; - } - return false; -} - -bool StrLdr64ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k3ByteSize); -} - -bool StrLdr64PairImmValid(int64 value) { - if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { - return (static_cast(value) & 7) > 0 ? false : true; - } - return false; -} - -bool StrLdr128ImmValid(int64 value) { - return StrLdrSignedOfstValid(value, k4ByteSize); -} - -bool StrLdr128PairImmValid(int64 value) { - if (value < k1024BitSize && (value >= kNegative1024BitSize)) { - return (static_cast(value) & 0xf) > 0 ? false : true; - } - return false; -} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp index af06cfc2a72653aacc520d61e76f696306a098c9..350bfb1e1af0b7995bab90781f0b5b299ac0c085 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -73,10 +73,7 @@ void A64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { } int64 value = v->GetValue(); - bool isNegative = (value < 0); if (!v->IsFmov()) { - value = (v->GetSize() == k64BitSize ? value : (isNegative ? - static_cast(static_cast(value)) : static_cast(static_cast(value)))); (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#").Emit(value); return; } @@ -322,16 +319,16 @@ void A64OpndEmitVisitor::Visit(ExtendShiftOperand *v) { void A64OpndEmitVisitor::Visit(BitShiftOperand *v) { std::string shiftOp; switch (v->GetShiftOp()) { - case BitShiftOperand::kLSL: + case BitShiftOperand::kShiftLSL: shiftOp = "LSL #"; break; - case BitShiftOperand::kLSR: + case BitShiftOperand::kShiftLSR: shiftOp = "LSR #"; break; - case BitShiftOperand::kASR: + case BitShiftOperand::kShiftASR: shiftOp = "ASR #"; break; - case BitShiftOperand::kROR: + case BitShiftOperand::kShiftROR: shiftOp = "ROR #"; break; default: @@ -495,6 +492,9 @@ void A64OpndDumpVisitor::Visit(ImmOperand *v) { } else { LogInfo::MapleLogger() << "imm:" << v->GetValue(); } + if (v->GetVary() == kUnAdjustVary) { + LogInfo::MapleLogger() << " vary"; + } } void A64OpndDumpVisitor::Visit(MemOperand *a64v) { @@ -572,8 +572,8 @@ void A64OpndDumpVisitor::Visit(StImmOperand *v) { void A64OpndDumpVisitor::Visit(BitShiftOperand *v) { BitShiftOperand::ShiftOp shiftOp = v->GetShiftOp(); uint32 shiftAmount = v->GetShiftAmount(); - LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kLSL) ? "LSL: " : - ((shiftOp == BitShiftOperand::kLSR) ? "LSR: " : "ASR: ")); + LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kShiftLSL) ? "LSL: " : + ((shiftOp == BitShiftOperand::kShiftLSR) ? "LSR: " : "ASR: ")); LogInfo::MapleLogger() << shiftAmount; } void A64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp index 808377e86c41be263b540c8569dcc8188e231247..641ec48ebd312e3ce6bc3913b4d0c326a6c91bcf 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp @@ -73,6 +73,8 @@ MOperator FlipConditionOp(MOperator flippedOp) { return AArch64MOP_t::MOP_beq; case AArch64MOP_t::MOP_bpl: return AArch64MOP_t::MOP_bmi; + case AArch64MOP_t::MOP_bmi: + return AArch64MOP_t::MOP_bpl; case AArch64MOP_t::MOP_xcbnz: return AArch64MOP_t::MOP_xcbz; case AArch64MOP_t::MOP_wcbnz: @@ -89,6 +91,10 @@ MOperator FlipConditionOp(MOperator flippedOp) { return AArch64MOP_t::MOP_xtbz; case AArch64MOP_t::MOP_xtbz: return AArch64MOP_t::MOP_xtbnz; + case AArch64MOP_t::MOP_bvc: + return AArch64MOP_t::MOP_bvs; + case AArch64MOP_t::MOP_bvs: + return AArch64MOP_t::MOP_bvc; default: break; } @@ -181,5 +187,49 @@ MOperator GetMopSub2Subs(const Insn &insn) { return curMop; } } + +// This api is only used for cgir verify, implemented by calling the memopndofst interface. +int64 GetMemOpndOffsetValue(Operand *o) { + auto *memOpnd = static_cast(o); + CHECK_FATAL(memOpnd != nullptr, "memOpnd should not be nullptr"); + // kBOR memOpnd has no offsetvalue, so return 0 for verify. + if (memOpnd->GetAddrMode() == MemOperand::kBOR) { + return 0; + } + OfstOperand *ofStOpnd = memOpnd->GetOffsetImmediate(); + int64 offsetValue = ofStOpnd ? ofStOpnd->GetOffsetValue() : 0LL; + return offsetValue; +} + +// Returns the number of trailing 0-bits in x, starting at the least significant bit position. +// If x is 0, the result is -1. +int32 GetTail0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((static_cast(1) << static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +// Returns the number of leading 0-bits in x, starting at the most significant bit position. +// If x is 0, the result is -1. +int32 GetHead0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((0x8000000000000000ULL >> static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + } /* namespace AArch64isa */ } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp index d7a4b3e60c61fbe59836ea73059c7659b9904f79..f3f795011c5a029d22d10a3625a37156362f7385 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp @@ -416,7 +416,7 @@ void AArch64IsolateFastPath::IsolateFastPathOpt() { cgFunc.GetExitBBsVec().push_back(returnBB); MapleList::const_iterator predIt = std::find(tgtBB->GetPredsBegin(), tgtBB->GetPredsEnd(), returnBB); - tgtBB->ErasePreds(predIt); + (void)tgtBB->ErasePreds(predIt); tgtBB->ClearInsns(); returnBB->ClearSuccs(); if (tgtBB->GetPrev() != nullptr && tgtBB->GetNext() != nullptr) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_local_schedule.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_local_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..409beb1ecbf395ff51929500d90efaefe515000c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_local_schedule.cpp @@ -0,0 +1,95 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "aarch64_local_schedule.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64LocalSchedule::FinishScheduling(CDGNode &cdgNode) { + BB *curBB = cdgNode.GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + curBB->ClearInsns(); + + const Insn *prevLocInsn = (curBB->GetPrev() != nullptr ? curBB->GetPrev()->GetLastLoc() : nullptr); + MapleVector schedResults = commonSchedInfo->GetSchedResults(); + for (auto depNode : schedResults) { + Insn *curInsn = depNode->GetInsn(); + CHECK_FATAL(curInsn != nullptr, "get insn from depNode failed"); + + // Append comments + for (auto comment : depNode->GetComments()) { + if (comment->GetPrev() != nullptr && comment->GetPrev()->IsDbgInsn()) { + curBB->AppendInsn(*comment->GetPrev()); + } + curBB->AppendInsn(*comment); + } + + // Append clinit insns + if (!depNode->GetClinitInsns().empty()) { + for (auto clinitInsn : depNode->GetClinitInsns()) { + curBB->AppendInsn(*clinitInsn); + } + } else { + // Append debug insns + if (curInsn->GetPrev() != nullptr && curInsn->GetPrev()->IsDbgInsn()) { + curBB->AppendInsn(*curInsn->GetPrev()); + } + // Append insn + curBB->AppendInsn(*curInsn); + } + } + + curBB->SetLastLoc(prevLocInsn); + for (auto lastComment : cdgNode.GetLastComments()) { + curBB->AppendInsn(*lastComment); + } + cdgNode.ClearLastComments(); + ASSERT(curBB->NumInsn() >= static_cast(cdgNode.GetInsnNum()), + "The number of instructions after local-scheduling is unexpected"); + + commonSchedInfo = nullptr; +} + +void AArch64LocalSchedule::DumpInsnInfoByScheduledOrder(BB &curBB) const { + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(6) << "insn" << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(8) << "mop" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << "bb" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(14) << "succs(latency)" << std::resetiosflags(std::ios::right) << "\n"; + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + FOR_BB_INSNS_CONST(insn, &curBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(6) << insn->GetId() << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(8); + const InsnDesc *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; + LogInfo::MapleLogger() << md->name << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << curBB.GetId() << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(14); + const DepNode *depNode = insn->GetDepNode(); + ASSERT(depNode != nullptr, "get depNode from insn failed"); + for (auto succLink : depNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + LogInfo::MapleLogger() << succNode.GetInsn()->GetId() << "(" << succLink->GetLatency() << "), "; + } + LogInfo::MapleLogger() << std::resetiosflags(std::ios::right) << "\n"; + } + LogInfo::MapleLogger() << " ------------------------------------------------\n"; + LogInfo::MapleLogger() << "\n"; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp index d60902e976d2f7e61168b84f632ed246aab775ab..9c921b9eca9afff057cbb3d09428dbae314bbb72 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -97,7 +97,7 @@ uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, in } } CCLocInfo ploc; - aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + aggCopySize += static_cast(parmLocator.LocateNextParm(*ty, ploc)); if (ploc.reg0 != 0) { continue; /* passed in register, so no effect on actual area */ } @@ -108,8 +108,9 @@ uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, in } void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { - if (be.GetTypeSize(typeIdx) > k16ByteSize) { - /* size > 16 is passed on stack, the formal is just a pointer to the copy on stack. */ + auto *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + if (IsParamStructCopyToMemory(*mirType)) { + // passed on stack, the formal is just a pointer to the copy on stack. if (CGOptions::IsArm64ilp32()) { align = k8ByteSize; size = k8ByteSize; @@ -118,8 +119,8 @@ void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint size = GetPointerSize(); } } else { - align = be.GetTypeAlign(typeIdx); - size = static_cast(be.GetTypeSize(typeIdx)); + align = mirType->GetAlign(); + size = static_cast(mirType->GetSize()); } } @@ -143,14 +144,13 @@ void AArch64MemLayout::LayoutVarargParams() { for (uint32 i = 0; i < func->GetFormalCount(); i++) { if (i == 0) { if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) { - TyIdx tyIdx = func->GetFuncRetStructTyIdx(); - if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { + if (!IsReturnInMemory(*func->GetReturnType())) { continue; } } } MIRType *ty = func->GetNthParamType(i); - parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func->GetMIRFuncType()); if (ploc.reg0 != kRinvalid) { if (ploc.reg0 >= R0 && ploc.reg0 <= R7) { nIntRegs++; @@ -209,20 +209,17 @@ void AArch64MemLayout::LayoutFormalParams() { if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { symLoc->SetMemSegment(GetSegArgsRegPassed()); symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); - TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); - if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) { - if (CGOptions::IsArm64ilp32()) { - segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); - } else { - segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize()); - } + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } else { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize()); } continue; } } MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); uint32 ptyIdx = ty->GetTypeIndex(); - parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction->GetMIRFuncType()); if (ploc.reg0 != kRinvalid) { /* register */ symLoc->SetRegisters(static_cast(ploc.reg0), static_cast(ploc.reg1), static_cast(ploc.reg2), static_cast(ploc.reg3)); @@ -305,9 +302,11 @@ void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, st continue; } symLoc->SetMemSegment(segRefLocals); - segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + segRefLocals.SetSize(static_cast(RoundUp(segRefLocals.GetSize(), + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetAlign()))); symLoc->SetOffset(segRefLocals.GetSize()); - segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + segRefLocals.SetSize(segRefLocals.GetSize() + + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize())); } else { if (sym->GetName() == "__EARetTemp__" || sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") { @@ -316,7 +315,7 @@ void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, st } symLoc->SetMemSegment(segLocals); MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); - uint32 align = be.GetTypeAlign(tyIdx); + uint32 align = ty->GetAlign(); uint32 tSize = 0; if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { @@ -328,7 +327,8 @@ void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, st segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); } symLoc->SetOffset(segLocals.GetSize()); - segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + segLocals.SetSize(segLocals.GetSize() + + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize())); } if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { cgFunc->AddDIESymbolLocation(*sym, symLoc, false); @@ -344,9 +344,11 @@ void AArch64MemLayout::LayoutEAVariales(std::vector &tempVar) { SetSymAllocInfo(stIndex, *symLoc); ASSERT(!symLoc->IsRegister(), "expect not register"); symLoc->SetMemSegment(segRefLocals); - segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetAlign())); symLoc->SetOffset(segRefLocals.GetSize()); - segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + segRefLocals.SetSize(segRefLocals.GetSize() + + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize())); } } @@ -361,9 +363,11 @@ void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, ASSERT(sym->IsRefType(), "expect reftype "); symLoc->SetMemSegment(segRefLocals); - segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetAlign())); symLoc->SetOffset(segRefLocals.GetSize()); - segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + segRefLocals.SetSize(segRefLocals.GetSize() + + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize())); } segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); @@ -415,7 +419,8 @@ void AArch64MemLayout::LayoutActualParams() { */ MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); uint32 ptyIdx = ty->GetTypeIndex(); - static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte); + static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx)->GetAlign() * kBitsPerByte); } } } @@ -469,7 +474,7 @@ void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); symLoc->SetOffset(segLocals.GetSize()); MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; - segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + segLocals.SetSize(segLocals.GetSize() + static_cast(mirTy->GetSize())); spillLocTable[i] = symLoc; } @@ -501,17 +506,6 @@ void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { } } -SymbolAlloc *AArch64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { - AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); - symLoc->SetMemSegment(segSpillReg); - uint32 regSize = cgFunc->IsExtendReg(vrNum) ? k8ByteSize : cgFunc->GetVRegSize(vrNum); - segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); - symLoc->SetOffset(segSpillReg.GetSize()); - segSpillReg.SetSize(segSpillReg.GetSize() + regSize); - SetSpillRegLocInfo(vrNum, *symLoc); - return symLoc; -} - uint64 AArch64MemLayout::StackFrameSize() const { uint64 total = segArgsRegPassed.GetSize() + static_cast(cgFunc)->SizeOfCalleeSaved() + GetSizeOfRefLocals() + Locals().GetSize() + GetSizeOfSpillReg(); @@ -567,17 +561,18 @@ int32 AArch64MemLayout::GetVRSaveAreaBaseLoc() const { } int32 AArch64MemLayout::GetCalleeSaveBaseLoc() const { - auto offset = StackFrameSize() - static_cast(cgFunc)->SizeOfCalleeSaved(); - if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { - offset -= GetSizeOfLocals(); - } else { - offset = (offset - SizeOfArgsToStackPass()) + kSizeOfFplr; - } + uint32 offset = RealStackFrameSize() - static_cast(cgFunc)->SizeOfCalleeSaved(); + offset = (offset - SizeOfArgsToStackPass()) + kSizeOfFplr; if (cgFunc->GetMirModule().IsCModule() && cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { /* GR/VR save areas are above the callee save area */ - auto saveareasize = RoundUp(GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + - RoundUp(GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize); + // According to AAPCS64 document: + // __gr_top: set to the address of the byte immediately following the general register argument save area, the + // end of the save area being aligned to a 16 byte boundary. + // __vr_top: set to the address of the byte immediately following the FP/SIMD register argument save area, the + // end of the save area being aligned to a 16 byte boundary. + auto saveareasize = RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment) + + RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); offset -= saveareasize; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp index 864e6607be5e0d7db71960ef8bc8035686965e71..92b87f6d7f07b3005aa217b6026380ff15b9348a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -40,7 +40,7 @@ void AArch64FPLROffsetAdjustment::Run() { #endif } -void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn) { +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn) const { uint32 opndNum = insn.GetOperandSize(); bool replaceFP = false; for (uint32 i = 0; i < opndNum; ++i) { @@ -109,9 +109,7 @@ void AArch64FPLROffsetAdjustment::AdjustMemOfstVary(Insn &insn, uint32 i) const if (ofstOpnd->GetVary() == kAdjustVary || ofstOpnd->GetVary() == kNotVary) { bool condition = aarchCGFunc->IsOperandImmValid(insn.GetMachineOpcode(), &currMemOpnd, i); if (!condition) { - MemOperand &newMemOpnd = aarchCGFunc->SplitOffsetWithAddInstruction( - currMemOpnd, currMemOpnd.GetSize(), static_cast(R16), false, &insn, insn.IsLoadStorePair()); - insn.SetOperand(i, newMemOpnd); + SPLIT_INSN(&insn, aarchCGFunc); } } } @@ -132,7 +130,10 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 } } if (!aarchCGFunc->IsOperandImmValid(insn.GetMachineOpcode(), &immOpnd, index)) { - if (insn.GetMachineOpcode() >= MOP_xaddrri24 && insn.GetMachineOpcode() <= MOP_waddrri12) { + if (insn.GetMachineOpcode() == MOP_xaddsrri12 || insn.GetMachineOpcode() == MOP_waddsrri12) { + insn.Dump(); + CHECK_FATAL(false, "NYI, need a better away to process 'adds' "); + } else if (insn.GetMachineOpcode() >= MOP_xaddrri24 && insn.GetMachineOpcode() <= MOP_waddrri12) { PrimType destTy = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize ? PTY_i64 : PTY_i32; RegOperand *resOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); @@ -177,9 +178,7 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const { memOpnd->SetOffsetOperand(*newOfstOpnd); uint32 i = insn.IsLoadStorePair() ? kInsnThirdOpnd : kInsnSecondOpnd; if (!aarchCGFunc->IsOperandImmValid(insn.GetMachineOpcode(), memOpnd, i)) { - MemOperand &newMemOpnd = aarchCGFunc->SplitOffsetWithAddInstruction( - *memOpnd, memOpnd->GetSize(), static_cast(R16), false, &insn, insn.IsLoadStorePair()); - insn.SetOperand(i, newMemOpnd); + SPLIT_INSN(&insn, aarchCGFunc); } } else { switch (insn.GetMachineOpcode()) { @@ -205,8 +204,11 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const { case MOP_xsubrri12: { ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, "regNumber should be changed in AdjustmentOffsetForOpnd"); - ImmOperand &subend = static_cast(insn.GetOperand(kInsnThirdOpnd)); - subend.SetValue(subend.GetValue() - offset); + auto &tempReg = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + auto &offsetReg = aarchCGFunc->CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc->SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), + offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); break; } case MOP_xsubrri24: { @@ -231,6 +233,50 @@ void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const { } break; } + case MOP_xaddsrri12: { + ASSERT(static_cast(insn.GetOperand(kInsnThirdOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + auto *newAddImmOpnd = static_cast( + static_cast(insn.GetOperand(kInsnFourthOpnd)).Clone(*cgFunc->GetMemoryPool())); + newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset); + insn.SetOperand(kInsnFourthOpnd, *newAddImmOpnd); + AdjustmentOffsetForImmOpnd(insn, kInsnFourthOpnd); /* legalize imm opnd */ + break; + } + case MOP_waddsrri12: { + if (!CGOptions::IsArm64ilp32()) { + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } else { + ASSERT(static_cast(insn.GetOperand(kInsnThirdOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &addend = static_cast(insn.GetOperand(kInsnFourthOpnd)); + addend.SetValue(addend.GetValue() + offset); + AdjustmentOffsetForImmOpnd(insn, kInsnFourthOpnd); /* legalize imm opnd */ + } + break; + } + case MOP_xaddrrr: { + // Later when use of SP or FP is refacored, this case can be omitted + RegOperand *offsetReg = nullptr; + Insn *newInsn = nullptr; + if (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP) { + offsetReg = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + } else if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetRegisterNumber() == RSP) { + offsetReg = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + } else { + break; + } + if (insn.GetOperand(kInsnSecondOpnd).GetSize() == k64BitSize) { + ImmOperand &offsetImm = aarchCGFunc->CreateImmOperand(offset, k64BitSize, false); + newInsn = &aarchCGFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *offsetReg, *offsetReg, offsetImm); + } else { + ImmOperand &offsetImm = aarchCGFunc->CreateImmOperand(offset, k32BitSize, false); + newInsn = &aarchCGFunc->GetInsnBuilder()->BuildInsn(MOP_waddrri12, *offsetReg, *offsetReg, offsetImm); + } + (void)insn.GetBB()->InsertInsnBefore(insn, *newInsn); + break; + } default: insn.Dump(); CHECK_FATAL(false, "Unexpect offset adjustment insn"); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp index 38b49b550054196599fd59af1e270ee3979fc32c..45d04e0daaed2f87967b503fbcc05f791c8ff922 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp @@ -31,16 +31,6 @@ void AArch64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) { } CHECK_FATAL(modified, "ModifyJumpTarget: Could not change jump target"); return; - } else if (bb.GetKind() == BB::kBBGoto) { - for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { - if (insn->GetMachineOpcode() == MOP_adrp_label) { - maple::LabelIdx labidx = static_cast(targetOperand).GetLabelIndex(); - LabelOperand &label = static_cast(GetCGFunc())->GetOrCreateLabelOperand(labidx); - insn->SetOperand(1, label); - break; - } - } - // fallthru below to patch the branch insn } bb.GetLastInsn()->SetOperand(AArch64isa::GetJumpTargetIdx(*bb.GetLastInsn()), targetOperand); } @@ -194,4 +184,13 @@ BB *AArch64InsnVisitor::CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTar newBB->PushBackPreds(bb); return newBB; } + +void AArch64InsnVisitor::ModifyFathruBBToGotoBB(BB &bb, LabelIdx labelIdx) const { + CHECK_FATAL(bb.GetKind() == BB::kBBFallthru, "invalid kind of bb"); + CGFunc *cgFunc = GetCGFunc(); + LabelOperand &labelOpnd = cgFunc->GetOrCreateLabelOperand(labelIdx); + Insn &jumpInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xuncond, labelOpnd); + bb.AppendInsn(jumpInsn); + bb.SetKind(BB::kBBGoto); +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp index a303c20babff3e6c1eae0d7c9705e58f00e3e93e..43b68051718f8593ac30b8b9015b46f64d3ebe5f 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -1136,6 +1136,20 @@ void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { } } +bool LsrAndToUbfxPattern::CheckIntersectedCondition(Insn &insn, Insn &prevInsn) { + MOperator curMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn.GetMachineOpcode(); + int64 lsb = static_cast(prevInsn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 width = __builtin_popcountll(static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue()); + if (lsb + width <= k32BitSize) { + return true; + } else if (curMop == MOP_wandrri12 && prevMop == MOP_xlsrrri6 && lsb >= k32BitSize && (lsb + width) <= k64BitSize) { + isWXSumOutOfRange = true; + return isWXSumOutOfRange; + } + return false; +} + bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wandrri12 && curMop != MOP_xandrri13) { @@ -1156,10 +1170,10 @@ bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) { if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6) { return false; } - auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); - auto &currUseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - /* check def-use reg size found by ssa */ - CHECK_FATAL(prevDstOpnd.GetSize() == currUseOpnd.GetSize(), "def-use reg size must be same"); + if (((curMop == MOP_wandrri12 && prevMop == MOP_xlsrrri6) || (curMop == MOP_xandrri13 && prevMop == MOP_wlsrrri5)) && + !CheckIntersectedCondition(insn, *prevInsn)) { + return false; + } auto &andDstReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); VRegVersion *andDstVersion = ssaInfo->FindSSAVersion(andDstReg.GetRegisterNumber()); ASSERT(andDstVersion != nullptr, "find destReg Version failed"); @@ -1185,7 +1199,8 @@ void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) { return; } auto *aarFunc = static_cast(cgFunc); - bool is64Bits = (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize); + // If isWXSumOutOfRange returns true, newInsn will be 64bit + bool is64Bits = isWXSumOutOfRange ? true : (insn.GetOperandSize(kInsnFirstOpnd) == k64BitSize); Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); Operand &srcOpnd = prevInsn->GetOperand(kInsnSecondOpnd); int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); @@ -1276,7 +1291,6 @@ bool LslAndToUbfizPattern::CheckUseInsnMop(const Insn &useInsn) const { case MOP_xsxth64: case MOP_xuxtb32: case MOP_xuxth32: - case MOP_xuxtw64: case MOP_xsxtw64: case MOP_xubfxrri6i6: case MOP_xcmprr: @@ -1320,41 +1334,49 @@ void LslAndToUbfizPattern::Run(BB &bb, Insn &insn) { } } -/* Build ubfiz insn or mov insn */ +// Build ubfiz insn or mov insn Insn *LslAndToUbfizPattern::BuildNewInsn(const Insn &andInsn, const Insn &lslInsn, const Insn &useInsn) const { uint64 andImmValue = static_cast(static_cast(andInsn.GetOperand(kInsnThirdOpnd)).GetValue()); - /* Check whether the value of immValue is 2^n-1. */ - uint64 judgment = andImmValue & (andImmValue + 1); + uint64 lslImmValue = static_cast(static_cast(lslInsn.GetOperand(kInsnThirdOpnd)).GetValue()); + MOperator useMop = useInsn.GetMachineOpcode(); + // isLslAnd means true -> lsl + and, false -> and + lsl + bool isLslAnd = (useMop == MOP_wandrri12) || (useMop == MOP_xandrri13); + // judgment need to set non-zero value + uint64 judgment = 1; + // When useInsn is lsl, check whether the value of immValue is 2^n-1. + // When useInsn is and, check whether the value of immValue is (2^n-1) << m + if (isLslAnd) { + if ((andImmValue >> lslImmValue) != 0) { + judgment = (andImmValue >> lslImmValue) & ((andImmValue >> lslImmValue) + 1); + } + } else { + judgment = andImmValue & (andImmValue + 1); + } if (judgment != 0) { return nullptr; } - MOperator mop = andInsn.GetMachineOpcode(); - MOperator useMop = useInsn.GetMachineOpcode(); RegOperand &ubfizOpnd1 = static_cast(useInsn.GetOperand(kInsnFirstOpnd)); uint32 opnd1Size = ubfizOpnd1.GetSize(); RegOperand &ubfizOpnd2 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); uint32 opnd2Size = ubfizOpnd2.GetSize(); ImmOperand &ubfizOpnd3 = static_cast(lslInsn.GetOperand(kInsnThirdOpnd)); uint32 mValue = static_cast(ubfizOpnd3.GetValue()); - uint32 nValue = static_cast(__builtin_popcountll(andImmValue)); + uint32 nValue = 0; + if (isLslAnd) { + nValue = static_cast(__builtin_popcountll(andImmValue >> lslImmValue)); + } else { + nValue = static_cast(__builtin_popcountll(andImmValue)); + } auto *aarFunc = static_cast(cgFunc); - if (opnd1Size != opnd2Size) { + if (opnd1Size != opnd2Size || (mValue + nValue) > opnd1Size) { return nullptr; } - if (nValue > mValue || useMop == MOP_wlslrri5 || useMop == MOP_xlslrri6) { - MOperator newMop = (mop == MOP_wandrri12) ? MOP_wubfizrri5i5 : MOP_xubfizrri6i6; - uint32 size = (mop == MOP_wandrri12) ? kMaxImmVal5Bits : kMaxImmVal6Bits; - int64 val = 0; - if (useMop == MOP_wlslrri5 || useMop == MOP_xlslrri6) { - val = opnd1Size > (nValue + mValue) ? nValue : opnd1Size - mValue; - } else { - val = nValue - mValue; - } - ImmOperand &ubfizOpnd4 = aarFunc->CreateImmOperand(val, size, false); - Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, ubfizOpnd1, ubfizOpnd2, ubfizOpnd3, ubfizOpnd4); - return &newInsn; - } - return nullptr; + MOperator addMop = andInsn.GetMachineOpcode(); + MOperator newMop = (addMop == MOP_wandrri12) ? MOP_wubfizrri5i5 : MOP_xubfizrri6i6; + uint32 size = (addMop == MOP_wandrri12) ? kMaxImmVal5Bits : kMaxImmVal6Bits; + ImmOperand &ubfizOpnd4 = aarFunc->CreateImmOperand(nValue, size, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, ubfizOpnd1, ubfizOpnd2, ubfizOpnd3, ubfizOpnd4); + return &newInsn; } bool MvnAndToBicPattern::CheckCondition(Insn &insn) { @@ -1602,10 +1624,10 @@ bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); uint32 shiftAmount = shiftOpnd.GetShiftAmount(); - if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { + if (shiftOpnd.GetShiftOp() == BitShiftOperand::kShiftLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { prevLsrInsn = prevInsn; shiftValue = prevImm; - } else if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSR && + } else if (shiftOpnd.GetShiftOp() == BitShiftOperand::kShiftLSR && (prevMop == MOP_wlslrri5 || prevMop == MOP_xlslrri6)) { prevLslInsn = prevInsn; shiftValue = shiftAmount; @@ -2104,7 +2126,7 @@ bool ElimSpecificExtensionPattern::CheckCondition(Insn &insn) { return true; } -void ElimSpecificExtensionPattern::Run(BB &bb, Insn &insn) { +void ElimSpecificExtensionPattern::Run(BB& /* bb */, Insn &insn) { if (!CheckCondition(insn)) { return; } @@ -2366,12 +2388,18 @@ void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { case MOP_xsxth64: case MOP_xsxtw64: { manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + if (!manager->OptSuccess() && thisMop == MOP_xsxtw64) { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } break; } case MOP_xuxtb32: case MOP_xuxth32: case MOP_xuxtw64: { manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + if (!manager->OptSuccess() && thisMop == MOP_xuxtw64) { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } break; } case MOP_wsdivrrr: { @@ -2396,6 +2424,11 @@ void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { } break; } + case MOP_xaddrri12: + case MOP_xsubrri12: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } default: break; } @@ -2462,18 +2495,12 @@ void AArch64PrePeepHole::Run(BB &bb, Insn &insn) { void AArch64PrePeepHole1::InitOpts() { optimizations.resize(kPeepholeOptsNum); - optimizations[kComplexExtendWordLslOpt] = optOwnMemPool->New(cgFunc); optimizations[kAddCmpZeroOpt] = optOwnMemPool->New(cgFunc); } void AArch64PrePeepHole1::Run(BB &bb, Insn &insn) { MOperator thisMop = insn.GetMachineOpcode(); switch (thisMop) { - case MOP_xsxtw64: - case MOP_xuxtw64: { - (static_cast(optimizations[kComplexExtendWordLslOpt]))->Run(bb, insn); - break; - } case MOP_wcmpri: case MOP_xcmpri: { (static_cast(optimizations[kAddCmpZeroOpt]))->Run(bb, insn); @@ -2503,7 +2530,9 @@ void RemoveIdenticalLoadAndStorePattern::Run(BB &bb, Insn &insn) { bb.RemoveInsn(insn); } } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { - if (IsMemOperandsIdentical(insn, *nextInsn)) { + uint32 srcOpndSize1 = insn.GetOperand(kInsnFirstOpnd).GetSize(); + uint32 srcOpndSize2 = nextInsn->GetOperand(kInsnFirstOpnd).GetSize(); + if (srcOpndSize1 == srcOpndSize2 && IsMemOperandsIdentical(insn, *nextInsn)) { bb.RemoveInsn(*nextInsn); } } @@ -2757,12 +2786,13 @@ std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, r return prevContiInsns; } /* record continuous STD/LDR insn */ - if (!curInsn->IsLoadStorePair() && ((insn.IsStore() && curInsn->IsStore()) || (insn.IsLoad() && curInsn->IsLoad()))) { + if (!curInsn->IsLoadStorePair() && ((insn.IsStore() && curInsn->IsStore()) || + (insn.IsLoad() && curInsn->IsLoad()))) { auto *memOperand = static_cast(curInsn->GetMemOpnd()); /* do not combine ldr r0, label */ if (memOperand != nullptr) { auto *baseRegOpnd = static_cast(memOperand->GetBaseRegister()); - ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), "physical register has not been allocated?"); + ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), "physical reg has not been allocated?"); if ((memOperand->GetAddrMode() == MemOperand::kBOI) && baseRegOpnd->GetRegisterNumber() == memBaseRegNO) { prevContiInsns.emplace_back(curInsn); } @@ -2826,25 +2856,23 @@ void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { if (IsValidNormalLoadOrStorePattern(insn, *prevContiInsn, *curMemOpnd, curOfstVal, prevOfstVal)) { /* Process normal mem pair */ MOperator newMop = GetNewMemMop(insn.GetMachineOpcode()); - Insn *combineInsn = GenerateMemPairInsn(newMop, curDestOpnd, prevDestOpnd, *combineMemOpnd, curOfstVal < prevOfstVal); + Insn *combineInsn = GenerateMemPairInsn(newMop, curDestOpnd, prevDestOpnd, *combineMemOpnd, + curOfstVal < prevOfstVal); ASSERT(combineInsn != nullptr, "create combineInsn failed"); bb.InsertInsnAfter(*prevContiInsn, *combineInsn); if (!(static_cast(*cgFunc).IsOperandImmValid(newMop, combineMemOpnd, - isPairAfterCombine ? kInsnThirdOpnd : kInsnSecondOpnd))) { - if (FindUseX16AfterInsn(bb, *prevContiInsn)) { + isPairAfterCombine ? kInsnThirdOpnd : kInsnSecondOpnd))) { + if (FindUseX16AfterInsn(*prevContiInsn)) { /* Do not combine Insns when x16 was used after curInsn */ bb.RemoveInsn(*combineInsn); return; } - const InsnDesc *md = &AArch64CG::kMd[combineInsn->GetMachineOpcode()]; - auto *opndProp = md->opndMD[kInsnFirstOpnd]; - MemOperand &newMemOpnd = static_cast(cgFunc)->SplitOffsetWithAddInstruction( - *combineMemOpnd, opndProp->GetSize(), static_cast(R16), false, combineInsn, isPairAfterCombine); - combineInsn->SetOperand(isPairAfterCombine ? kInsnThirdOpnd : kInsnSecondOpnd, newMemOpnd); + SPLIT_INSN(combineInsn, cgFunc); } RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); return; - } else if (IsValidStackArgLoadOrStorePattern(insn, *prevContiInsn, *curMemOpnd, *prevMemOpnd, curOfstVal, prevOfstVal)) { + } else if (IsValidStackArgLoadOrStorePattern(insn, *prevContiInsn, *curMemOpnd, *prevMemOpnd, + curOfstVal, prevOfstVal)) { /* Process stack-arg mem pair */ regno_t curDestRegNo = curDestOpnd.GetRegisterNumber(); regno_t prevDestRegNo = prevDestOpnd.GetRegisterNumber(); @@ -2864,7 +2892,7 @@ void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { } } -bool CombineContiLoadAndStorePattern::FindUseX16AfterInsn(BB &bb, Insn &curInsn) { +bool CombineContiLoadAndStorePattern::FindUseX16AfterInsn(const Insn &curInsn) const { for (Insn *cursor = curInsn.GetNext(); cursor != nullptr; cursor = cursor->GetNext()) { if (!cursor->IsMachineInstruction()) { continue; @@ -2891,13 +2919,16 @@ bool CombineContiLoadAndStorePattern::FindUseX16AfterInsn(BB &bb, Insn &curInsn) return false; } -Insn *CombineContiLoadAndStorePattern::GenerateMemPairInsn(MOperator newMop, RegOperand &curDestOpnd, RegOperand &prevDestOpnd, - MemOperand &combineMemOpnd, bool isCurDestFirst) { +Insn *CombineContiLoadAndStorePattern::GenerateMemPairInsn(MOperator newMop, RegOperand &curDestOpnd, + RegOperand &prevDestOpnd, MemOperand &combineMemOpnd, + bool isCurDestFirst) { ASSERT(newMop != MOP_undef, "invalid MOperator"); Insn *combineInsn = nullptr; if (isPairAfterCombine) { /* for ldr/str --> ldp/stp */ - combineInsn = (isCurDestFirst) ? &cgFunc->GetInsnBuilder()->BuildInsn(newMop, curDestOpnd, prevDestOpnd, combineMemOpnd) : - &cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevDestOpnd, curDestOpnd, combineMemOpnd); + combineInsn = (isCurDestFirst) ? &cgFunc->GetInsnBuilder()->BuildInsn(newMop, curDestOpnd, + prevDestOpnd, combineMemOpnd) : + &cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevDestOpnd, + curDestOpnd, combineMemOpnd); } else { /* for strb/strh --> strh/str, curDestOpnd == preDestOpnd */ combineInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, curDestOpnd, combineMemOpnd); combineMemOpnd.SetSize(newMop == MOP_wstrh ? maplebe::k16BitSize : maplebe::k32BitSize); @@ -2905,7 +2936,8 @@ Insn *CombineContiLoadAndStorePattern::GenerateMemPairInsn(MOperator newMop, Reg return combineInsn; } -bool CombineContiLoadAndStorePattern::IsValidNormalLoadOrStorePattern(Insn &insn, Insn &prevInsn, MemOperand &memOpnd, +bool CombineContiLoadAndStorePattern::IsValidNormalLoadOrStorePattern(const Insn &insn, const Insn &prevInsn, + const MemOperand &memOpnd, int64 curOfstVal, int64 prevOfstVal) { if (memOpnd.IsStackArgMem()) { return false; @@ -2914,7 +2946,8 @@ bool CombineContiLoadAndStorePattern::IsValidNormalLoadOrStorePattern(Insn &insn ASSERT(prevInsn.GetOperand(kInsnFirstOpnd).IsRegister(), "unexpect operand"); auto &curDestOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); auto &prevDestOpnd = static_cast(prevInsn.GetOperand(kInsnFirstOpnd)); - if (prevDestOpnd.GetRegisterType() != curDestOpnd.GetRegisterType() || curDestOpnd.GetSize() != prevDestOpnd.GetSize()) { + if (prevDestOpnd.GetRegisterType() != curDestOpnd.GetRegisterType() || + curDestOpnd.GetSize() != prevDestOpnd.GetSize()) { return false; } uint32 memSize = insn.GetMemoryByteSize(); @@ -2952,9 +2985,10 @@ bool CombineContiLoadAndStorePattern::IsValidNormalLoadOrStorePattern(Insn &insn return false; } -bool CombineContiLoadAndStorePattern::IsValidStackArgLoadOrStorePattern(Insn &curInsn, Insn &prevInsn, MemOperand &curMemOpnd, - MemOperand &prevMemOpnd, int64 curOfstVal, - int64 prevOfstVal) { +bool CombineContiLoadAndStorePattern::IsValidStackArgLoadOrStorePattern(const Insn &curInsn, const Insn &prevInsn, + const MemOperand &curMemOpnd, + const MemOperand &prevMemOpnd, + int64 curOfstVal, int64 prevOfstVal) const { if (!curInsn.IsStore()) { return false; } @@ -3065,12 +3099,16 @@ void EliminateSpecifcSXTPattern::Run(BB &bb, Insn &insn) { if (value >= static_cast(0xFFFFFFFFFFFFFF80) && value <= 0x7F && immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { bb.RemoveInsn(insn); + optSuccess = true; + return; } } else if (thisMop == MOP_xsxth32) { /* value should in range between -32678 and 32678 */ if (value >= static_cast(0xFFFFFFFFFFFF8000) && value <= 0x7FFF && immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { bb.RemoveInsn(insn); + optSuccess = true; + return; } } else { uint64 flag = 0xFFFFFFFFFFFFFF80; /* initialize the flag with fifty-nine 1s at top */ @@ -3086,6 +3124,8 @@ void EliminateSpecifcSXTPattern::Run(BB &bb, Insn &insn) { prevInsn->SetOperand(kInsnFirstOpnd, dstOpnd); prevInsn->SetMOP(AArch64CG::kMd[MOP_xmovri64]); bb.RemoveInsn(insn); + optSuccess = true; + return; } } } @@ -3125,6 +3165,7 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { (thisMop == MOP_xuxth32 && retSize <= k2ByteSize) || (thisMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { bb.RemoveInsn(insn); + optSuccess = true; } return; } @@ -3148,6 +3189,8 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { /* check the top 56 bits of value */ if ((static_cast(value) & 0xFFFFFFFFFFFFFF00) == 0) { bb.RemoveInsn(insn); + optSuccess = true; + return; } } } else if (prevInsn->GetMachineOpcode() == MOP_wldrb) { @@ -3156,6 +3199,8 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { return; } bb.RemoveInsn(insn); + optSuccess = true; + return; } } else if (thisMop == MOP_xuxth32) { if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { @@ -3169,6 +3214,8 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { int64 value = immOpnd.GetValue(); if ((static_cast(value) & 0xFFFFFFFFFFFF0000) == 0) { bb.RemoveInsn(insn); + optSuccess = true; + return; } } } else if (prevInsn->GetMachineOpcode() == MOP_wldrh) { @@ -3177,6 +3224,8 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { return; } bb.RemoveInsn(insn); + optSuccess = true; + return; } } else { /* this_mop == MOP_xuxtw64 */ @@ -3189,6 +3238,8 @@ void EliminateSpecifcUXTPattern::Run(BB &bb, Insn &insn) { } /* 32-bit ldr does zero-extension by default, so this conversion can be skipped */ bb.RemoveInsn(insn); + optSuccess = true; + return; } } } @@ -3580,7 +3631,7 @@ constexpr uint32 kRefSize = 32; constexpr uint32 kRefSize = 64; #endif -bool InlineReadBarriersPattern::CheckCondition(Insn &insn) { +bool InlineReadBarriersPattern::CheckCondition(Insn& /* insn */) { /* Inline read barriers only enabled for GCONLY. */ if (!CGOptions::IsGCOnly()) { return false; @@ -3719,7 +3770,7 @@ void ReplaceDivToMultiPattern::Run(BB &bb, Insn &insn) { aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(sdivOpnd1RegNum), k64BitSize, kRegTyInt); /* shift bit amount is thirty-one at this insn */ - BitShiftOperand &addLsrOpnd = aarch64CGFunc->CreateBitShiftOperand(BitShiftOperand::kLSR, 31, 6); + BitShiftOperand &addLsrOpnd = aarch64CGFunc->CreateBitShiftOperand(BitShiftOperand::kShiftLSR, 31, 6); Insn &addLsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrrrs, extSdivO0, tempOpnd, extSdivO1, addLsrOpnd); bb.InsertInsnBefore(*prePrevInsn, addLsrInsn); @@ -3865,19 +3916,29 @@ bool AndCbzBranchesToTstPattern::CheckCondition(Insn &insn) { (nextInsn->GetMachineOpcode() != MOP_wcbz && nextInsn->GetMachineOpcode() != MOP_xcbz)) { return false; } - auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); - regno_t andRegNO1 = andRegOp.GetRegisterNumber(); - auto &cbzRegOp2 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); - regno_t cbzRegNO2 = cbzRegOp2.GetRegisterNumber(); - if (andRegNO1 != cbzRegNO2) { + auto &andRegOp1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNo1 = andRegOp1.GetRegisterNumber(); + auto &cbzRegOp1 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t cbzRegNo1 = cbzRegOp1.GetRegisterNumber(); + if (andRegNo1 != cbzRegNo1) { return false; } /* If the reg will be used later, we shouldn't optimize the and insn here */ - if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + if (IfOperandIsLiveAfterInsn(andRegOp1, *nextInsn)) { + return false; + } + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + if (andOpnd3.IsImmediate() && !static_cast(andOpnd3).IsBitmaskImmediate(andRegOp2.GetSize())) { + return false; + } + /* avoid redefine cc-reg */ + if (static_cast(cgFunc)->GetRflag() != nullptr) { return false; } return true; } + void AndCbzBranchesToTstPattern::Run(BB &bb, Insn &insn) { if (!CheckCondition(insn)) { return; @@ -3885,22 +3946,17 @@ void AndCbzBranchesToTstPattern::Run(BB &bb, Insn &insn) { Insn *nextInsn = insn.GetNextMachineInsn(); CHECK_NULL_FATAL(nextInsn); /* build tst insn */ - Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); - auto &andRegOp3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); MOperator newTstOp = MOP_undef; if (andOpnd3.IsRegister()) { - newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + newTstOp = (insn.GetMachineOpcode() == MOP_wandrrr) ? MOP_wtstrr : MOP_xtstrr; } else { - newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + newTstOp = (insn.GetMachineOpcode() == MOP_wandrri12) ? MOP_wtstri32 : MOP_xtstri64; } Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); Insn &newInsnTst = cgFunc->GetInsnBuilder()->BuildInsn(newTstOp, rflag, andRegOp2, andOpnd3); - if (andOpnd3.IsImmediate()) { - if (!static_cast(andOpnd3).IsBitmaskImmediate(andRegOp2.GetSize())) { - return; - } - } + /* build beq insn */ MOperator opCode = nextInsn->GetMachineOpcode(); bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); @@ -4376,6 +4432,12 @@ void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { return; } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand *newMemOpnd = + aarch64CGFunc->CreateMemOperand(memOpnd->GetSize(), newBaseOpnd, offOpnd, *stImmOpnd.GetSymbol()); + if (!aarch64CGFunc->IsOperandImmValid(nextMop, newMemOpnd, nextInsn->GetMemOpndIdx())) { + return; + } if (cgFunc.GetMirModule().IsCModule()) { Insn *prevInsn = insn.GetPrev(); MOperator prevMop = prevInsn->GetMachineOpcode(); @@ -4386,9 +4448,6 @@ void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { prevStImmOpnd.SetOffset(offOpnd.GetValue()); } } - auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - MemOperand *newMemOpnd = - aarch64CGFunc->CreateMemOperand(memOpnd->GetSize(), newBaseOpnd, offOpnd, *stImmOpnd.GetSymbol()); nextInsn->SetMemOpnd(newMemOpnd); bb.RemoveInsn(insn); CHECK_FATAL(!CGOptions::IsLazyBinding() || cgFunc.GetCG()->IsLibcore(), @@ -4793,7 +4852,97 @@ void NormRevTbzToTbzPattern::Run(BB &bb, Insn &insn) { } } -void UbfxAndCbzToTbzPattern::Run(BB &bb, Insn &insn) { +bool AddSubMergeLdStPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + prevInsn = insn.GetPreviousMachineInsn(); + isAddSubFront = CheckIfCanBeMerged(nextInsn, insn); + isLdStFront = CheckIfCanBeMerged(prevInsn, insn); + // If prev & next all can be merged, only one will be merged, otherwise #imm will be add/sub twice. + if (isAddSubFront && isLdStFront) { + isLdStFront = false; + } + return isAddSubFront || isLdStFront; +} + +bool AddSubMergeLdStPattern::CheckIfCanBeMerged(Insn *adjacentInsn, Insn &insn) { + if (adjacentInsn == nullptr || adjacentInsn->IsVectorOp() || (!adjacentInsn->AccessMem())) { + return false; + } + Operand &opnd = adjacentInsn->IsLoadStorePair() ? adjacentInsn->GetOperand(kInsnThirdOpnd) + : adjacentInsn->GetOperand(kInsnSecondOpnd); + if (opnd.GetKind() != Operand::kOpdMem) { + return false; + } + MemOperand *memOpnd = &static_cast(opnd); + // load/store memopnd offset value must be #0 + if (memOpnd->GetAddrMode() != MemOperand::kBOI || + AArch64isa::GetMemOpndOffsetValue(memOpnd) != static_cast(k0BitSize)) { + return false; + } + insnDefReg = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + insnUseReg = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand *memUseReg = memOpnd->GetBaseRegister(); + regno_t insnDefRegNO = insnDefReg->GetRegisterNumber(); + regno_t insnUseRegNO = insnUseReg->GetRegisterNumber(); + regno_t memUseRegNO = memUseReg->GetRegisterNumber(); + if ((insnDefRegNO != memUseRegNO) || (insnDefRegNO != insnUseRegNO) || (insnUseRegNO != memUseRegNO)) { + return false; + } + // When load/store insn def & use regno are the same, it will trigger unpredictable transfer with writeback. + regno_t ldstDefRegNO0 = static_cast(adjacentInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (ldstDefRegNO0 == memUseRegNO) { + return false; + } + if (adjacentInsn->IsLoadStorePair()) { + regno_t ldstDefRegNO1 = static_cast(adjacentInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (ldstDefRegNO1 == memUseRegNO) { + return false; + } + } + return true; +} + +void AddSubMergeLdStPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + insnToBeReplaced = isAddSubFront ? nextInsn : prevInsn; + // isInsnAdd returns true -- add, isInsnAdd returns false -- sub. + isInsnAdd = (insn.GetMachineOpcode() == MOP_xaddrri12); + int64 immVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + // Pre/Post-index simm cannot be absent, when ofstVal is #0, the assembly file will appear memopnd: [x0]! + if (immVal == static_cast(k0BitSize)) { + return; + } + Operand &opnd = insnToBeReplaced->IsLoadStorePair() ? insnToBeReplaced->GetOperand(kInsnThirdOpnd) + : insnToBeReplaced->GetOperand(kInsnSecondOpnd); + MemOperand *memOpnd = &static_cast(opnd); + ImmOperand &newImmOpnd = + static_cast(cgFunc)->CreateImmOperand((isInsnAdd ? immVal : (-immVal)), k64BitSize, true); + MemOperand *newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + memOpnd->GetSize(), *insnUseReg, newImmOpnd, (isAddSubFront ? MemOperand::kPreIndex : MemOperand::kPostIndex)); + Insn *newInsn = nullptr; + if (insnToBeReplaced->IsLoadStorePair()) { + newInsn = &static_cast(cgFunc)->GetInsnBuilder()->BuildInsn( + insnToBeReplaced->GetMachineOpcode(), insnToBeReplaced->GetOperand(kInsnFirstOpnd), + insnToBeReplaced->GetOperand(kInsnSecondOpnd), *newMemOpnd); + } else { + newInsn = &static_cast(cgFunc)->GetInsnBuilder()->BuildInsn( + insnToBeReplaced->GetMachineOpcode(), insnToBeReplaced->GetOperand(kInsnFirstOpnd), *newMemOpnd); + } + if (!VERIFY_INSN(newInsn)) { + return; + } else { + // Both [RSP, #imm]! and [RSP], #imm should be set true for stackdef. + if (insnUseReg->GetRegisterNumber() == RSP) { + newInsn->SetStackDef(true); + } + (void)bb.ReplaceInsn(*insnToBeReplaced, *newInsn); + (void)bb.RemoveInsn(insn); + } +} + +void UbfxAndCbzToTbzPattern::Run(BB& /* bb */, Insn &insn) { Operand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); ImmOperand &imm3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); if (!CheckCondition(insn)) { @@ -4854,17 +5003,33 @@ bool UbfxAndCbzToTbzPattern::CheckCondition(Insn &insn) { return false; } -bool AddCmpZeroAArch64::CheckAddCmpZeroCheckAdd(const Insn &insn, regno_t regNO) { - MOperator mop = insn.GetMachineOpcode(); +bool AddCmpZeroAArch64::CheckAddCmpZeroCheckAdd(const Insn &prevInsn, const Insn &insn) const { + MOperator mop = prevInsn.GetMachineOpcode(); switch (mop) { case MOP_xaddrrr: - case MOP_xaddrri12: case MOP_waddrrr: - case MOP_waddrri12: case MOP_xaddrrrs: case MOP_waddrrrs: { - regno_t regNO0 = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); - if (regNO0 == regNO) { + RegOperand opnd0 = static_cast(prevInsn.GetOperand(kInsnFirstOpnd)); + RegOperand opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (opnd0.Equals(opnd) && insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize() == + prevInsn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize()) { + return true; + } else { + return false; + } + } + case MOP_waddrri12: + case MOP_xaddrri12: { + RegOperand opnd0 = static_cast(prevInsn.GetOperand(kInsnFirstOpnd)); + RegOperand opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (!(opnd0.Equals(opnd) && insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize() == + prevInsn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize())) { + return false; + } + auto &immOpnd = static_cast(prevInsn.GetOperand(kInsnThirdOpnd)); + auto *aarch64CGFunc = static_cast(&cgFunc); + if (aarch64CGFunc->IsOperandImmValid(prevInsn.GetMachineOpcode(), &immOpnd, kInsnThirdOpnd)) { return true; } else { return false; @@ -4876,14 +5041,14 @@ bool AddCmpZeroAArch64::CheckAddCmpZeroCheckAdd(const Insn &insn, regno_t regNO) return false; } -bool AddCmpZeroAArch64::CheckAddCmpZeroContinue(const Insn &insn, regno_t regNO) { +bool AddCmpZeroAArch64::CheckAddCmpZeroContinue(const Insn &insn, const RegOperand &opnd) const { for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { if (insn.GetDesc()->GetOpndDes(i) == &OpndDesc::CCS) { return false; } if (insn.GetOperand(i).IsRegister()) { - RegOperand &opnd = static_cast(insn.GetOperand(i)); - if (insn.GetDesc()->GetOpndDes(i)->IsDef() && regNO == opnd.GetRegisterNumber()) { + RegOperand &opnd0 = static_cast(insn.GetOperand(i)); + if (insn.GetDesc()->GetOpndDes(i)->IsDef() && opnd0.RegNumEqual(opnd)) { return false; } } @@ -4891,21 +5056,21 @@ bool AddCmpZeroAArch64::CheckAddCmpZeroContinue(const Insn &insn, regno_t regNO) return true; } -Insn* AddCmpZeroAArch64::CheckAddCmpZeroAArch64Pattern(Insn &insn, regno_t regNO) { +Insn* AddCmpZeroAArch64::CheckAddCmpZeroAArch64Pattern(Insn &insn, const RegOperand &opnd) { Insn *prevInsn = insn.GetPrev(); while (prevInsn != nullptr) { if (!prevInsn->IsMachineInstruction()) { prevInsn = prevInsn->GetPrev(); continue; } - if (CheckAddCmpZeroCheckAdd(*prevInsn, regNO)) { + if (CheckAddCmpZeroCheckAdd(*prevInsn, insn)) { if (CheckAddCmpZeroCheckCond(insn)) { return prevInsn; } else { return nullptr; } } - if (!CheckAddCmpZeroContinue(*prevInsn, regNO)) { + if (!CheckAddCmpZeroContinue(*prevInsn, opnd)) { return nullptr; } prevInsn = prevInsn->GetPrev(); @@ -4913,7 +5078,7 @@ Insn* AddCmpZeroAArch64::CheckAddCmpZeroAArch64Pattern(Insn &insn, regno_t regNO return nullptr; } -bool AddCmpZeroAArch64::CheckAddCmpZeroCheckCond(const Insn &insn) { +bool AddCmpZeroAArch64::CheckAddCmpZeroCheckCond(const Insn &insn) const { Insn *nextInsn = insn.GetNext(); while (nextInsn != nullptr) { if (!nextInsn->IsMachineInstruction()) { @@ -4940,13 +5105,13 @@ void AddCmpZeroAArch64::Run(BB &bb, Insn &insn) { if (mop != MOP_wcmpri && mop != MOP_xcmpri) { return; } + auto &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); auto &opnd3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); if (!opnd3.IsZero()) { return; } - auto &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); - regno_t regNO = opnd2.GetRegisterNumber(); - Insn *prevAddInsn = CheckAddCmpZeroAArch64Pattern(insn, regNO); + + Insn *prevAddInsn = CheckAddCmpZeroAArch64Pattern(insn, opnd2); if (!prevAddInsn) { return; } @@ -4969,57 +5134,56 @@ void AddCmpZeroAArch64::Run(BB &bb, Insn &insn) { bb.RemoveInsn(insn); } -bool ComplexExtendWordLslAArch64::IsExtendWordLslPattern(const Insn &insn) const { - Insn *nextInsn = insn.GetNext(); - if (nextInsn == nullptr) { +bool ComplexExtendWordLslPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xsxtw64 && insn.GetMachineOpcode() != MOP_xuxtw64) { return false; } - MOperator nextMop = nextInsn->GetMachineOpcode(); + useInsn = insn.GetNextMachineInsn(); + if (useInsn == nullptr) { + return false; + } + MOperator nextMop = useInsn->GetMachineOpcode(); if (nextMop != MOP_xlslrri6) { return false; } return true; } -void ComplexExtendWordLslAArch64::Run(BB &bb, Insn &insn) { - if (!IsExtendWordLslPattern(insn)) { +void ComplexExtendWordLslPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { return; } - MOperator mop = insn.GetMachineOpcode(); - Insn *nextInsn = insn.GetNext(); - auto &nextOpnd2 = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); - if (nextOpnd2.GetValue() > k32BitSize) { + MOperator curMop = insn.GetMachineOpcode(); + auto &lslImmOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + ASSERT(lslImmOpnd.GetValue() >= 0, "invalid immOpnd of lsl"); + if (lslImmOpnd.GetValue() > k32BitSize) { return; } - auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); - auto &nextOpnd1 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); - regno_t regNO0 = opnd0.GetRegisterNumber(); - regno_t nextRegNO1 = nextOpnd1.GetRegisterNumber(); - if (regNO0 != nextRegNO1 || IfOperandIsLiveAfterInsn(opnd0, *nextInsn)) { + auto &extDefOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &lslUseOpnd = static_cast(useInsn->GetOperand(kInsnSecondOpnd)); + regno_t extDefRegNO = extDefOpnd.GetRegisterNumber(); + regno_t lslUseRegNO = lslUseOpnd.GetRegisterNumber(); + if (extDefRegNO != lslUseRegNO || IfOperandIsLiveAfterInsn(extDefOpnd, *useInsn)) { return; } - auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); - auto &nextOpnd0 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); - regno_t regNO1 = opnd1.GetRegisterNumber(); - cgFunc.InsertExtendSet(regNO1); - MOperator mopNew = mop == MOP_xsxtw64 ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; - auto *aarch64CGFunc = static_cast(&cgFunc); - RegOperand ®1 = aarch64CGFunc->GetOrCreateVirtualRegisterOperand(regNO1); - ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(k32BitSize, k6BitSize, false); - Insn &newInsnSbfiz = cgFunc.GetInsnBuilder()->BuildInsn(mopNew, - nextOpnd0, reg1, nextOpnd2, newImm); - bb.RemoveInsn(*nextInsn); - bb.ReplaceInsn(insn, newInsnSbfiz); -} + MOperator mopNew = (curMop == MOP_xsxtw64 ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6); + auto &extUseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &lslDefOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + ImmOperand &newImmOpnd = static_cast(cgFunc)->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopNew, lslDefOpnd, extUseOpnd, lslImmOpnd, newImmOpnd); + bb.RemoveInsn(*useInsn); + bb.ReplaceInsn(insn, newInsn); + optSuccess = true; +} bool AddCmpZeroPatternSSA::CheckCondition(Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); if (curMop != MOP_wcmpri && curMop != MOP_xcmpri) { return false; } - auto &ImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); - if (!ImmOpnd.IsZero()) { + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (!immOpnd.IsZero()) { return false; } @@ -5063,14 +5227,14 @@ void AddCmpZeroPatternSSA::Run(BB &bb, Insn &insn) { MOperator newAddMop = GetMopUpdateAPSR(prevAddMop, isShiftAdd); ASSERT(newAddMop != MOP_undef, "unknown Add code"); /* - * Since new opnd can be defined in SSA ReplaceInsn, we should avoid pattern matching again. + * Since new opnd can not be defined in SSA ReplaceInsn, we should avoid pattern matching again. * For "adds" can only be inserted in this phase, so we could do a simple check. */ Insn *nextInsn = insn.GetNext(); while (nextInsn != nullptr) { if (!nextInsn->IsMachineInstruction()) { nextInsn = nextInsn->GetNext(); - return; + continue; } MOperator nextMop = nextInsn->GetMachineOpcode(); if (nextMop == newAddMop) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp index 2d64d15f10663fbfe2c426de2520e312f31ba44f..95512e8912c9ea1a7ca994672d0c52042336bc54 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -83,8 +83,8 @@ bool AArch64GenProEpilog::NeedProEpilog() { auto &aarchCGFunc = static_cast(cgFunc); const MapleVector ®sToRestore = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); - size_t calleeSavedRegSize = kTwoRegister; - CHECK_FATAL(regsToRestore.size() >= calleeSavedRegSize, "Forgot FP and LR ?"); + size_t calleeSavedRegSize = kOneRegister; + CHECK_FATAL(regsToRestore.size() >= calleeSavedRegSize, "Forgot LR ?"); if (funcHasCalls || regsToRestore.size() > calleeSavedRegSize || aarchCGFunc.HasStackLoadStore() || static_cast(cgFunc.GetMemlayout())->GetSizeOfLocals() > 0 || cgFunc.GetFunction().GetAttr(FUNCATTR_callersensitive)) { @@ -110,7 +110,7 @@ MemOperand *AArch64GenProEpilog::GetDownStack() { stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); } int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); - MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerBitSize()); if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); @@ -128,10 +128,10 @@ void AArch64GenProEpilog::GenStackGuard() { GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); RegOperand &stAddrOpnd = - aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerBitSize(), kRegTyInt); aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); - MemOperand *guardMemOp = aarchCGFunc.CreateMemOperand(GetPointerSize() * kBitsPerByte, stAddrOpnd, + MemOperand *guardMemOp = aarchCGFunc.CreateMemOperand(GetPointerBitSize(), stAddrOpnd, aarchCGFunc.CreateImmOperand(0, k32BitSize, false)); MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); @@ -148,8 +148,8 @@ void AArch64GenProEpilog::AddStackGuard(BB &bb) { aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); GenStackGuard(); RegOperand &stAddrOpnd = - aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); - auto mOp = aarchCGFunc.PickStInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerBitSize(), kRegTyInt); + auto mOp = aarchCGFunc.PickStInsn(GetPointerBitSize(), PTY_u64); Insn &tmpInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *GetDownStack()); tmpInsn.SetDoNotRemove(true); cgFunc.GetCurBB()->AppendInsn(tmpInsn); @@ -159,19 +159,19 @@ void AArch64GenProEpilog::AddStackGuard(BB &bb) { cgFunc.SetCurBB(*formerCurBB); } -BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { +void AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { if (!stackProtect) { - return bb; + return; } BB *formerCurBB = cgFunc.GetCurBB(); auto &aarchCGFunc = static_cast(cgFunc); GenStackGuard(); RegOperand &stAddrOpnd = - aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerBitSize(), kRegTyInt); RegOperand &checkOp = - aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, GetPointerSize() * kBitsPerByte, kRegTyInt); - auto mOp = aarchCGFunc.PickLdInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, GetPointerBitSize(), kRegTyInt); + auto mOp = aarchCGFunc.PickLdInsn(GetPointerBitSize(), PTY_u64); Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, checkOp, *GetDownStack()); newInsn.SetDoNotRemove(true); cgFunc.GetCurBB()->AppendInsn(newInsn); @@ -181,16 +181,37 @@ BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { aarchCGFunc.SelectCondGoto(aarchCGFunc.GetOrCreateLabelOperand(failLable), OP_brtrue, OP_ne, stAddrOpnd, aarchCGFunc.CreateImmOperand(0, k64BitSize, false), PTY_u64, false); - bb.AppendBBInsns(*(cgFunc.GetCurBB())); - LabelIdx nextBBLableIdx = aarchCGFunc.CreateLabel(); - BB *nextBB = cgFunc.CreateNewBB(nextBBLableIdx, bb.IsUnreachable(), bb.GetKind(), bb.GetFrequency()); - bb.AppendBB(*nextBB); - bb.PushBackSuccs(*nextBB); - nextBB->PushBackPreds(bb); - if (cgFunc.GetLastBB() == &bb) { - cgFunc.SetLastBB(*nextBB); + auto chkBB = cgFunc.CreateNewBB(bb.GetLabIdx(), bb.IsUnreachable(), BB::kBBIf, bb.GetFrequency()); + chkBB->AppendBBInsns(bb); + bb.ClearInsns(); + Insn *lastInsn = chkBB->GetLastInsn(); + while (lastInsn != nullptr && (!lastInsn->IsMachineInstruction() || + AArch64isa::IsPseudoInstruction(lastInsn->GetMachineOpcode()))) { + lastInsn = lastInsn->GetPrev(); } + bool isTailCall = lastInsn == nullptr ? false : lastInsn->IsTailCall(); + if (isTailCall) { + chkBB->RemoveInsn(*lastInsn); + bb.AppendInsn(*lastInsn); + } + if (&bb == cgFunc.GetFirstBB()) { + cgFunc.SetFirstBB(*chkBB); + } + chkBB->AppendBBInsns(*(cgFunc.GetCurBB())); + bb.PrependBB(*chkBB); + chkBB->PushBackSuccs(bb); + auto &originPreds = bb.GetPreds(); + for (auto pred : originPreds) { + pred->RemoveSuccs(bb); + pred->PushBackSuccs(*chkBB); + chkBB->PushBackPreds(*pred); + } + LabelIdx nextLable = aarchCGFunc.CreateLabel(); + bb.SetLabIdx(nextLable); + cgFunc.SetLab2BBMap(nextLable, bb); + bb.ClearPreds(); + bb.PushBackPreds(*chkBB); BB *newBB = aarchCGFunc.CreateNewBB(failLable, bb.IsUnreachable(), BB::kBBGoto, bb.GetFrequency()); cgFunc.SetCurBB(*newBB); @@ -199,16 +220,15 @@ BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { ListOperand *srcOpnds = aarchCGFunc.CreateListOpnd(*cgFunc.GetFuncScopeAllocator()); Insn &callInsn = aarchCGFunc.AppendCall(*failFunc, *srcOpnds); callInsn.SetDoNotRemove(true); - LabelOperand &targetOpnd = cgFunc.GetOrCreateLabelOperand(nextBB->GetLabIdx()); + LabelOperand &targetOpnd = cgFunc.GetOrCreateLabelOperand(bb.GetLabIdx()); newBB->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); - nextBB->AppendBB(*newBB); - bb.PushBackSuccs(*newBB); - newBB->PushBackPreds(bb); - newBB->PushBackSuccs(*nextBB); + bb.AppendBB(*newBB); + chkBB->PushBackSuccs(*newBB); + newBB->PushBackPreds(*chkBB); + newBB->PushBackSuccs(bb); + bb.PushBackPreds(*newBB); - bb.SetKind(BB::kBBIf); cgFunc.SetCurBB(*formerCurBB); - return *nextBB; } MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, @@ -234,11 +254,11 @@ void AArch64GenProEpilog::AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); - Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); - uint32 dataSize = GetPointerSize() * kBitsPerByte; + uint32 dataSize = GetPointerBitSize(); CHECK_FATAL(offset >= 0, "offset must >= 0"); if (offset > kStpLdpImm64UpperBound) { o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); @@ -253,11 +273,11 @@ void AArch64GenProEpilog::AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopSingle]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); - Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerBitSize(), rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); MemOperand *aarchMemO1 = static_cast(o1); - uint32 dataSize = GetPointerSize() * kBitsPerByte; + uint32 dataSize = GetPointerBitSize(); if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R16); @@ -282,10 +302,12 @@ Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int } if (argsToStkPassSize <= kStrLdrImm64UpperBound - kOffset8MemPos) { mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopSingle] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; - RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), size * kBitsPerByte); - Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o2); - AppendInstructionTo(insn1, cgFunc); + if (storeFP) { + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o2); + AppendInstructionTo(insn1, cgFunc); + } RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize + size), size * kBitsPerByte); @@ -296,11 +318,13 @@ Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); ImmOperand &io1 = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k64BitSize, true); aarchCGFunc.SelectCopyImm(oo, io1, PTY_i64); - RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); MemOperand *mo = aarchCGFunc.CreateMemOperand(size * kBitsPerByte, rsp, oo); - Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); - AppendInstructionTo(insn1, cgFunc); + if (storeFP) { + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); + AppendInstructionTo(insn1, cgFunc); + } ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); aarchCGFunc.SelectAdd(oo, oo, io2, PTY_i64); RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); @@ -316,16 +340,18 @@ Insn &AArch64GenProEpilog::CreateAndAppendInstructionForAllocateCallFrame(int64 RegType rty) { auto &aarchCGFunc = static_cast(cgFunc); CG *currCG = cgFunc.GetCG(); - MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + MOperator mOp = (storeFP || argsToStkPassSize > kStrLdrPerPostUpperBound) ? + pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPushOp][rty][kPushPopSingle]; Insn *allocInsn = nullptr; if (argsToStkPassSize > kStpLdpImm64UpperBound) { allocInsn = &AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, true); } else { - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), - GetPointerSize() * kBitsPerByte); - allocInsn = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + GetPointerBitSize()); + allocInsn = (storeFP || argsToStkPassSize > kStrLdrPerPostUpperBound) ? + &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2) : &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); AppendInstructionTo(*allocInsn, cgFunc); } if (currCG->InstrumentWithDebugTraceCall()) { @@ -376,11 +402,13 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrame(AArch64reg reg0, AA } else { offset = stackFrameSize; } - MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); - MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), GetPointerSize() * kBitsPerByte); - ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + MOperator mOp = (storeFP || offset > kStrLdrPerPostUpperBound) ? + pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), GetPointerBitSize()); + ipoint = (storeFP || offset > kStrLdrPerPostUpperBound) ? + &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2) : &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, o2); AppendInstructionTo(*ipoint, cgFunc); if (currCG->InstrumentWithDebugTraceCall()) { aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); @@ -437,21 +465,25 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg ipoint = cgFunc.GetCurBB()->GetLastInsn(); ipoint->SetStackDef(true); } else { - MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); - MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, GetPointerSize() * kBitsPerByte); - ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + MOperator mOp = (storeFP || stackFrameSize > kStrLdrPerPostUpperBound) ? + pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, GetPointerBitSize()); + ipoint = (storeFP || stackFrameSize > kStrLdrPerPostUpperBound) ? + &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2) : &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, o2); AppendInstructionTo(*ipoint, cgFunc); ipoint->SetStackDef(true); } if (useStpSub) { - MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; - RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); - MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, GetPointerSize() * kBitsPerByte); - ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + MOperator mOp = storeFP ? pushPopOps[kRegsPushOp][rty][kPushPopPair] : + pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, GetPointerBitSize()); + ipoint = storeFP ? &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2) : + &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); AppendInstructionTo(*ipoint, cgFunc); } if (currCG->InstrumentWithDebugTraceCall()) { @@ -526,9 +558,10 @@ void AArch64GenProEpilog::GeneratePushRegs() { } MapleVector::const_iterator it = regsToSave.begin(); - /* skip the first two registers */ - CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); - ++it; + // skip the RFP & RLR + if (*it == RFP) { + ++it; + } CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; @@ -565,7 +598,10 @@ void AArch64GenProEpilog::GeneratePushRegs() { for (; it != regsToSave.end(); ++it) { AArch64reg reg = *it; - CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + // skip the RFP + if (reg == RFP) { + continue; + } CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; @@ -804,10 +840,10 @@ bool AArch64GenProEpilog::TestPredsOfRetBB(const BB &exitBB) { void AArch64GenProEpilog::AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopSingle]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); - Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerBitSize(), rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); MemOperand *aarchMemO1 = static_cast(o1); - uint32 dataSize = GetPointerSize() * kBitsPerByte; + uint32 dataSize = GetPointerBitSize(); if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R16); } @@ -821,11 +857,11 @@ void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); - Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerBitSize()); - uint32 dataSize = GetPointerSize() * kBitsPerByte; + uint32 dataSize = GetPointerBitSize(); CHECK_FATAL(offset >= 0, "offset must >= 0"); if (offset > kStpLdpImm64UpperBound) { o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); @@ -839,8 +875,8 @@ void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); int32 stackFrameSize = static_cast( static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); @@ -853,7 +889,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, Operand *o2 = nullptr; if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { - o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerSize() * kBitsPerByte); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerBitSize()); } else { if (stackFrameSize > kStpLdpImm64UpperBound) { useLdpAdd = true; @@ -862,7 +898,7 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, } else { offset = stackFrameSize; } - o2 = &aarchCGFunc.CreateCallFrameOperand(offset, GetPointerSize() * kBitsPerByte); + o2 = &aarchCGFunc.CreateCallFrameOperand(offset, GetPointerBitSize()); } if (useLdpAdd) { @@ -891,8 +927,8 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) { auto &aarchCGFunc = static_cast(cgFunc); MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; - Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); - Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerBitSize(), rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerBitSize(), rty); int32 stackFrameSize = static_cast( static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); int32 argsToStkPassSize = static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); @@ -909,24 +945,32 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg r lmbcOffset = argsToStkPassSize; } if (stackFrameSize > kStpLdpImm64UpperBound || isLmbc) { - Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), GetPointerSize() * kBitsPerByte); - Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), GetPointerBitSize()); + mOp = storeFP ? pushPopOps[kRegsPopOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Insn &deallocInsn = storeFP ? cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2) : + cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); } else { - MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, GetPointerSize() * kBitsPerByte); - Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, GetPointerBitSize()); + mOp = (storeFP || stackFrameSize > kStrLdrPerPostUpperBound) ? + pushPopOps[kRegsPopOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Insn &deallocInsn = (storeFP || stackFrameSize > kStrLdrPerPostUpperBound) ? + cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2) : cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); } } else { Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), - GetPointerSize() * kBitsPerByte); + GetPointerBitSize()); if (argsToStkPassSize > kStpLdpImm64UpperBound) { (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); } else { - Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + mOp = (storeFP || argsToStkPassSize > kStrLdrPerPostUpperBound) ? + pushPopOps[kRegsPopOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Insn &deallocInsn = (storeFP || argsToStkPassSize > kStrLdrPerPostUpperBound) ? + cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2) : cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); } @@ -961,8 +1005,10 @@ void AArch64GenProEpilog::GeneratePopRegs() { * Make sure this is reflected when computing calleeSavedRegs.size() * skip the first two registers */ - CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); - ++it; + // skip the RFP & RLR + if (*it == RFP) { + ++it; + } CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; @@ -1002,9 +1048,10 @@ void AArch64GenProEpilog::GeneratePopRegs() { */ for (; it != regsToRestore.end(); ++it) { AArch64reg reg = *it; - CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + if (reg == RFP) { + continue; + } CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); - RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; if (firstHalf == kRinvalid) { @@ -1042,7 +1089,8 @@ void AArch64GenProEpilog::AppendJump(const MIRSymbol &funcSymbol) { } void AArch64GenProEpilog::AppendBBtoEpilog(BB &epilogBB, BB &newBB) { - if (epilogBB.GetPreds().empty() && cgFunc.GetMirModule().IsCModule() && CGOptions::DoTailCallOpt()) { + if (epilogBB.GetPreds().empty() && &epilogBB != cgFunc.GetFirstBB() && + cgFunc.GetMirModule().IsCModule() && CGOptions::DoTailCallOpt()) { epilogBB.SetNeedRestoreCfi(false); Insn &junk = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); epilogBB.AppendInsn(junk); @@ -1079,20 +1127,7 @@ void AArch64GenProEpilog::GenerateEpilog(BB &bb) { return; } /* generate stack protected instruction */ - BB &epilogBB = GenStackGuardCheckInsn(bb); - - if (&bb != &epilogBB) { - auto curBBIt = std::find(cgFunc.GetExitBBsVec().begin(), cgFunc.GetExitBBsVec().end(), &bb); - CHECK_FATAL(curBBIt != cgFunc.GetExitBBsVec().end(), "check case in GenerateEpilog"); - (void)cgFunc.GetExitBBsVec().erase(curBBIt); - cgFunc.GetExitBBsVec().push_back(&epilogBB); - BB *commonExit = cgFunc.GetCommonExitBB(); - auto exitPredIt = std::find(commonExit->GetPredsBegin(), commonExit->GetPredsEnd(), &bb); - if (exitPredIt != commonExit->GetPredsEnd()) { - commonExit->ErasePreds(exitPredIt); - commonExit->PushBackPreds(epilogBB); - } - } + GenStackGuardCheckInsn(bb); auto &aarchCGFunc = static_cast(cgFunc); CG *currCG = cgFunc.GetCG(); @@ -1136,9 +1171,9 @@ void AArch64GenProEpilog::GenerateEpilog(BB &bb) { } GenerateRet(*(cgFunc.GetCurBB())); - AppendBBtoEpilog(epilogBB, *cgFunc.GetCurBB()); + AppendBBtoEpilog(bb, *cgFunc.GetCurBB()); if (cgFunc.GetCurBB()->GetHasCfi()) { - epilogBB.SetHasCfi(); + bb.SetHasCfi(); } cgFunc.SetCurBB(*formerCurBB); @@ -1169,7 +1204,7 @@ void AArch64GenProEpilog::Run() { if (cgFunc.IsExitBBsVecEmpty()) { if (cgFunc.GetCleanupBB() != nullptr && cgFunc.GetCleanupBB()->GetPrev() != nullptr) { cgFunc.PushBackExitBBsVec(*cgFunc.GetCleanupBB()->GetPrev()); - } else { + } else if (!cgFunc.GetMirModule().IsCModule()) { cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()); } } @@ -1177,6 +1212,10 @@ void AArch64GenProEpilog::Run() { GenerateProlog(*(cgFunc.GetPrologureBB())); for (auto *exitBB : cgFunc.GetExitBBsVec()) { + // Do not generate epilog in fast-path-return BB + if (exitBB->IsFastPathReturn()) { + continue; + } GenerateEpilog(*exitBB); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp index 71587fc7a1446eceb41ae1bfcedf488d49b0fa95..2ab84731ea0b06b7c7b012d19f929ae97acd2605 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp @@ -66,7 +66,6 @@ bool AArch64Prop::IsInLimitCopyRange(VRegVersion *toBeReplaced) { void AArch64Prop::CopyProp() { PropOptimizeManager optManager; optManager.Optimize(*cgFunc, GetSSAInfo(), GetRegll()); - optManager.Optimize(*cgFunc, GetSSAInfo()); optManager.Optimize(*cgFunc, GetSSAInfo()); } @@ -77,7 +76,7 @@ void AArch64Prop::TargetProp(Insn &insn) { a64StrLdrProp.DoOpt(); } -void A64ConstProp::DoOpt() { +void A64ConstProp::DoOpt() const { if (curInsn->GetMachineOpcode() == MOP_wmovri32 || curInsn->GetMachineOpcode() == MOP_xmovri64) { Operand &destOpnd = curInsn->GetOperand(kInsnFirstOpnd); CHECK_FATAL(destOpnd.IsRegister(), "must be reg operand"); @@ -213,15 +212,15 @@ MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, const In uint32 amount = shiftOpnd.GetShiftAmount(); BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); switch (sOp) { - case BitShiftOperand::kLSL: { + case BitShiftOperand::kShiftLSL: { newVal = constVal + static_cast((static_cast(constVal)) << amount); break; } - case BitShiftOperand::kLSR: { + case BitShiftOperand::kShiftLSR: { newVal = constVal + (static_cast(constVal) >> amount); break; } - case BitShiftOperand::kASR: { + case BitShiftOperand::kShiftASR: { newVal = constVal + (static_cast(constVal) >> amount); break; } @@ -244,15 +243,15 @@ MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, const In uint32 amount = shiftOpnd.GetShiftAmount(); BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); switch (sOp) { - case BitShiftOperand::kLSL: { + case BitShiftOperand::kShiftLSL: { newVal = constVal - static_cast((static_cast(constVal)) << amount); break; } - case BitShiftOperand::kLSR: { + case BitShiftOperand::kShiftLSR: { newVal = constVal - static_cast((static_cast(constVal) >> amount)); break; } - case BitShiftOperand::kASR: { + case BitShiftOperand::kShiftASR: { newVal = constVal - static_cast((static_cast(constVal) >> amount)); break; } @@ -320,7 +319,7 @@ bool A64ConstProp::ArithConstReplaceForOneOpnd(Insn &useInsn, DUInsnInfo &useDUI /* try aarch64 imm shift mode */ tempImm->SetValue(static_cast(tempImm->GetValue()) >> 12); if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd) && - static_cast(CGOptions::GetInstance().GetOptimizeLevel()) < CGOptions::kLevel0) { + CGOptions::GetInstance().GetOptimizeLevel() < static_cast(CGOptions::kLevel0)) { ASSERT(false, "NIY"); } auto *zeroImm = &(static_cast(cgFunc)-> @@ -352,7 +351,7 @@ bool A64ConstProp::ArithConstReplaceForOneOpnd(Insn &useInsn, DUInsnInfo &useDUI return true; } -bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) { +bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) const { Insn *useInsn = useDUInfo.GetInsn(); CHECK_FATAL(useInsn != nullptr, "get useInsn failed"); if (useDUInfo.GetOperands().size() == 1) { @@ -409,11 +408,11 @@ bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &co if (useOpndIdx == kInsnThirdOpnd) { auto &shiftBit = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); int64 val = constOpnd.GetValue(); - if (shiftBit.GetShiftOp() == BitShiftOperand::kLSL) { + if (shiftBit.GetShiftOp() == BitShiftOperand::kShiftLSL) { val = static_cast(static_cast(val) << shiftBit.GetShiftAmount()); - } else if (shiftBit.GetShiftOp() == BitShiftOperand::kLSR) { + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kShiftLSR) { val = static_cast(static_cast(val) >> shiftBit.GetShiftAmount()); - } else if (shiftBit.GetShiftOp() == BitShiftOperand::kASR) { + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kShiftASR) { val = static_cast(static_cast(val) >> shiftBit.GetShiftAmount()); } else { CHECK_FATAL(false, "shift type is not defined"); @@ -433,7 +432,7 @@ bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &co return false; } -bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { +bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const { MOperator curMop = useDUInfo.GetInsn()->GetMachineOpcode(); switch (curMop) { case MOP_xmovrr: @@ -490,7 +489,7 @@ bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { return false; } -bool A64ConstProp::ReplaceCmpToCmn(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const { +bool A64ConstProp::ReplaceCmpToCmn(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const { Insn *useInsn = useDUInfo.GetInsn(); if (useDUInfo.GetOperands().size() != 1) { return false; @@ -845,6 +844,8 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); if (replace != nullptr) { auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + // sub can not prop vary imm + CHECK_FATAL(immOpnd.GetVary() != kUnAdjustVary, "NIY, imm wrong vary type"); int64 defVal = -(immOpnd.GetValue()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); } @@ -856,7 +857,8 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { if (replace != nullptr) { auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); int64 defVal = immOpnd.GetValue(); - newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + newMemOpnd = + HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize(), immOpnd.GetVary()); } break; } @@ -867,6 +869,7 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + CHECK_FATAL(immOpnd.GetVary() != kUnAdjustVary, "NIY, imm wrong vary type"); auto defVal = static_cast(static_cast(immOpnd.GetValue()) << shiftOpnd.GetShiftAmount()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); } @@ -879,6 +882,7 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + CHECK_FATAL(immOpnd.GetVary() != kUnAdjustVary, "NIY, imm wrong vary type"); int64 defVal = -static_cast(static_cast(immOpnd.GetValue()) << shiftOpnd.GetShiftAmount()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); } @@ -923,7 +927,7 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { RegOperand *newIndexOpnd = GetReplaceReg( static_cast(defInsn->GetOperand(kInsnThirdOpnd))); auto &shift = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); - if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + if (shift.GetShiftOp() != BitShiftOperand::kShiftLSL) { break; } if (newBaseOpnd != nullptr && newIndexOpnd != nullptr && @@ -971,6 +975,7 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { OfstOperand *newOffset = &static_cast(cgFunc)->CreateOfstOpnd( static_cast(imm->GetValue()), k32BitSize); CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newOffset->SetVary(imm->GetVary()); newMemOpnd = static_cast(cgFunc)->CreateMemOperand(currMemOpnd.GetSize(), *base, *newOffset); } break; @@ -988,7 +993,7 @@ MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { if ((memPropMode == kPropOffset || memPropMode == kPropShift) && MemOperand::CheckNewAmount(currMemOpnd.GetSize(), shift)) { BitShiftOperand &shiftOperand = - static_cast(cgFunc)->CreateBitShiftOperand(BitShiftOperand::kLSL, shift, k8BitSize); + static_cast(cgFunc)->CreateBitShiftOperand(BitShiftOperand::kShiftLSL, shift, k8BitSize); newMemOpnd = static_cast(cgFunc)->CreateMemOperand( currMemOpnd.GetSize(), *base, *newOfst, shiftOperand); } @@ -1029,8 +1034,8 @@ RegOperand *A64StrLdrProp::GetReplaceReg(RegOperand &a64Reg) { return nullptr; } -MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, - int64 defVal, uint32 memSize) const { +MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, + uint32 memSize, VaryType varyType) const { if (memPropMode != kPropBase) { return nullptr; } @@ -1044,6 +1049,7 @@ MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOf static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); } CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + newOfstImm->SetVary(varyType); return static_cast(cgFunc)->CreateMemOperand(memSize, replace, *newOfstImm); } @@ -1089,7 +1095,6 @@ void AArch64Prop::PropPatternOpt() { optManager.Optimize(*cgFunc, GetSSAInfo()); optManager.Optimize(*cgFunc, GetSSAInfo()); optManager.Optimize(*cgFunc, GetSSAInfo()); - optManager.Optimize(*cgFunc, GetSSAInfo()); } bool ExtendShiftPattern::IsSwapInsn(const Insn &insn) const { @@ -1099,6 +1104,8 @@ bool ExtendShiftPattern::IsSwapInsn(const Insn &insn) const { case MOP_waddrrr: case MOP_xiorrrr: case MOP_wiorrrr: + case MOP_wandrrr: + case MOP_xandrrr: return true; default: return false; @@ -1248,6 +1255,16 @@ void ExtendShiftPattern::SetLsMOpType(const Insn &use) { lsMOpType = kLwIor; break; } + case MOP_xandrrr: + case MOP_xandrrrs: { + lsMOpType = kLxAnd; + break; + } + case MOP_wandrrr: + case MOP_wandrrrs: { + lsMOpType = kLwAnd; + break; + } default: { lsMOpType = kLsUndef; } @@ -1272,16 +1289,18 @@ void ExtendShiftPattern::SelectExtendOrShift(const Insn &def) { case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; break; case MOP_wlslrri5: - case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + case MOP_xlslrri6: shiftOp = BitShiftOperand::kShiftLSL; break; case MOP_xlsrrri6: - case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + case MOP_wlsrrri5: + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: shiftOp = BitShiftOperand::kShiftLSR; break; case MOP_xasrrri6: - case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + case MOP_wasrrri5: shiftOp = BitShiftOperand::kShiftASR; break; case MOP_wextrrrri5: - case MOP_xextrrrri6: shiftOp = BitShiftOperand::kROR; + case MOP_xextrrrri6: shiftOp = BitShiftOperand::kShiftROR; break; default: { extendOp = ExtendShiftOperand::kUndef; @@ -1326,7 +1345,7 @@ SuffixType ExtendShiftPattern::CheckOpType(const Operand &lastOpnd) const { } constexpr uint32 kExMopTypeSize = 9; -constexpr uint32 kLsMopTypeSize = 15; +constexpr uint32 kLsMopTypeSize = 17; MOperator exMopTable[kExMopTypeSize] = { MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, @@ -1335,7 +1354,8 @@ MOperator exMopTable[kExMopTypeSize] = { MOperator lsMopTable[kLsMopTypeSize] = { MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, - MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs, + MOP_xandrrrs, MOP_wandrrrs }; /* new Insn extenType: * ===================== @@ -1425,7 +1445,7 @@ void ExtendShiftPattern::Optimize(Insn &insn) { amount = lastExtendOpnd.GetShiftAmount(); } if (shiftOp != BitShiftOperand::kUndef) { - auto &immOpnd = (shiftOp == BitShiftOperand::kROR ? + auto &immOpnd = (shiftOp == BitShiftOperand::kShiftROR ? static_cast(defInsn->GetOperand(kInsnFourthOpnd)) : static_cast(defInsn->GetOperand(kInsnThirdOpnd))); offset = static_cast(immOpnd.GetValue()); @@ -1501,7 +1521,7 @@ bool ExtendShiftPattern::CheckCondition(Insn &insn) { } Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); - if (shiftOp == BitShiftOperand::kROR) { + if (shiftOp == BitShiftOperand::kShiftROR) { if (lsMOpType != kLxEor && lsMOpType != kLwEor && lsMOpType != kLxIor && lsMOpType != kLwIor) { return false; } @@ -1530,7 +1550,32 @@ bool ExtendShiftPattern::CheckCondition(Insn &insn) { if (useVersion->HasImplicitCvt() && shiftOp != BitShiftOperand::kUndef) { return false; } - if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + // Check pattern ubfx merged into bitshiftOperand LSR, for example: + // ubfx w0, w1, #m, #n + // and w2, w0, w3 ===> and w2, w3, w1, LSR #m + // Condition: m + n = regsize + // Check pattern ubfx merged into extendshiftOperand UXTH/UXTB, for example: + // ubfx w0, w1, #0, #16 + // add w2, w0, w3 ===> add w2, w3, w1, UXTH + MOperator defMop = defInsn->GetMachineOpcode(); + if (defMop == MOP_xubfxrri6i6 || defMop == MOP_wubfxrri5i5) { + int64 mValue = static_cast(defInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 nValue = static_cast(defInsn->GetOperand(kInsnFourthOpnd)).GetValue(); + int64 size = is64BitSize ? static_cast(k64BitSize) : static_cast(k32BitSize); + if (mValue == static_cast(k0BitSize) && nValue == static_cast(k16BitSize)) { + shiftOp = BitShiftOperand::kUndef; + extendOp = ExtendShiftOperand::kUXTH; + return true; + } else if (mValue == static_cast(k0BitSize) && nValue == static_cast(k8BitSize)) { + shiftOp = BitShiftOperand::kUndef; + extendOp = ExtendShiftOperand::kUXTB; + return true; + } + if ((mValue + nValue) != size) { + return false; + } + } + if ((shiftOp == BitShiftOperand::kShiftLSR || shiftOp == BitShiftOperand::kShiftASR) && (defSrcOpnd.GetSize() > regOperand.GetSize())) { return false; } @@ -1538,7 +1583,7 @@ bool ExtendShiftPattern::CheckCondition(Insn &insn) { /* check regDefSrc */ VRegVersion *replaceUseV = optSsaInfo->FindSSAVersion(defSrcRegNo); CHECK_FATAL(replaceUseV != nullptr, "useVRegVersion must not be null based on ssa"); - if (replaceUseV->GetAllUseInsns().size() > 1 && shiftOp != BitShiftOperand::kROR) { + if (replaceUseV->GetAllUseInsns().size() > 1 && shiftOp != BitShiftOperand::kShiftROR) { return false; } return true; @@ -1741,7 +1786,6 @@ bool CopyRegProp::IsValidCopyProp(const RegOperand &dstReg, const RegOperand &sr if (useInsn->IsPhi() && dstReg.GetSize() != srcReg.GetSize()) { return false; } - dstll = regll->GetLiveInterval(dstRegNO); srcll = regll->GetLiveInterval(srcRegNO); ASSERT(dstll != nullptr, "dstll should not be nullptr"); @@ -1775,18 +1819,12 @@ bool CopyRegProp::CheckCondition(Insn &insn) { insn.SetOperand(kInsnSecondOpnd, cgFunc.CreateImmOperand(PTY_u64, 0)); } if (destReg.IsSSAForm() && srcReg.IsSSAForm()) { - /* case for ExplicitExtendProp */ - auto &propInsns = optSsaInfo->GetSafePropInsns(); - bool isSafeCvt = std::find(propInsns.begin(), propInsns.end(), insn.GetId()) != propInsns.end(); - if (destReg.GetSize() != srcReg.GetSize() && !isSafeCvt) { + if (destReg.GetSize() != srcReg.GetSize()) { VaildateImplicitCvt(destReg, srcReg, insn); return false; } if (destReg.GetValidBitsNum() >= srcReg.GetValidBitsNum()) { destReg.SetValidBitsNum(srcReg.GetValidBitsNum()); - } else if (!isSafeCvt) { - CHECK_FATAL(false, "do not support explicit extract bit in mov"); - return false; } destVersion = optSsaInfo->FindSSAVersion(destReg.GetRegisterNumber()); ASSERT(destVersion != nullptr, "find Version failed"); @@ -1903,54 +1941,6 @@ void RedundantPhiProp::Run() { } } -void RedundantExpandProp::Optimize(Insn &insn) { - insn.SetMOP(AArch64CG::kMd[MOP_xmovrr]); -} - -bool RedundantExpandProp::CheckCondition(Insn &insn) { - if (insn.GetMachineOpcode() != MOP_xuxtw64) { - return false; - } - auto *destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); - if (destOpnd != nullptr && destOpnd->IsSSAForm()) { - destVersion = optSsaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); - ASSERT(destVersion != nullptr, "find Version failed"); - for (auto destUseIt : destVersion->GetAllUseInsns()) { - Insn *useInsn = destUseIt.second->GetInsn(); - int32 lastOpndId = static_cast(useInsn->GetOperandSize() - 1); - const InsnDesc *md = useInsn->GetDesc(); - for (int32 i = lastOpndId; i >= 0; --i) { - auto *reg = (md->opndMD[i]); - auto &opnd = useInsn->GetOperand(i); - if (reg->IsUse() && opnd.IsRegister() && static_cast(opnd).GetRegisterNumber() == destOpnd->GetRegisterNumber()) { - if (opnd.GetSize() == k32BitSize && reg->GetSize() == k32BitSize) { - continue; - } else { - return false; - } - } - } - } - return true; - } - return false; -} - -void RedundantExpandProp::Run() { - FOR_ALL_BB(bb, &cgFunc) { - FOR_BB_INSNS(insn, bb) { - if (!insn->IsMachineInstruction()) { - continue; - } - Init(); - if (!CheckCondition(*insn)) { - continue; - } - Optimize(*insn); - } - } -} - void RedundantPhiProp::Optimize(Insn &insn) { optSsaInfo->ReplaceAllUse(destVersion, srcVersion); } @@ -1970,119 +1960,6 @@ bool RedundantPhiProp::CheckCondition(Insn &insn) { return false; } -/* - * case : ubfx v2 v1 0 32 - * phi v3 v2 - * mopX v4 v3 - */ -bool ValidBitNumberProp::IsPhiToMopX(const RegOperand &defOpnd) const { - VRegVersion *destVersion = optSsaInfo->FindSSAVersion(defOpnd.GetRegisterNumber()); - for (auto destUseIt : destVersion->GetAllUseInsns()) { - Insn *useInsn = destUseIt.second->GetInsn(); - if (useInsn->IsPhi()) { - return true; - } - const InsnDesc *useMD = &AArch64CG::kMd[useInsn->GetMachineOpcode()]; - for (auto &opndUseIt : as_const(destUseIt.second->GetOperands())) { - const OpndDesc *useProp = useMD->GetOpndDes(opndUseIt.first); - if (useProp->GetSize() == k64BitSize) { - return true; - } - } - } - return false; -} - -bool ValidBitNumberProp::IsImplicitUse(const RegOperand &dstOpnd, const RegOperand &srcOpnd) const { - for (auto destUseIt : destVersion->GetAllUseInsns()) { - Insn *useInsn = destUseIt.second->GetInsn(); - if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { - return true; - } - if (useInsn->GetMachineOpcode() == MOP_xubfxrri6i6) { - auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); - auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); - if (lsbOpnd.GetValue() == k0BitSize && widthOpnd.GetValue() == k32BitSize) { - return false; - } - } - if (useInsn->IsPhi()) { - auto &defOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); - if (IsPhiToMopX(defOpnd)) { - return true; - } - if (defOpnd.GetSize() == k32BitSize) { - return false; - } - } - /* if srcOpnd upper 32 bits are valid, it can not prop to mop_x */ - if (srcOpnd.GetSize() == k64BitSize && dstOpnd.GetSize() == k64BitSize) { - const InsnDesc *useMD = &AArch64CG::kMd[useInsn->GetMachineOpcode()]; - for (auto &opndUseIt : as_const(destUseIt.second->GetOperands())) { - const OpndDesc *useProp = useMD->GetOpndDes(opndUseIt.first); - if (useProp->GetSize() == k64BitSize) { - return true; - } - } - } - } - return false; -} - -bool ValidBitNumberProp::CheckCondition(Insn &insn) { - /* extend to all shift pattern in future */ - RegOperand *destOpnd = nullptr; - RegOperand *srcOpnd = nullptr; - if (insn.GetMachineOpcode() == MOP_xuxtw64) { - destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); - srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); - } - if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { - destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); - srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); - auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); - auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); - if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { - return false; - } - } - if (destOpnd != nullptr && destOpnd->IsSSAForm() && srcOpnd != nullptr && srcOpnd->IsSSAForm()) { - destVersion = optSsaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); - ASSERT(destVersion != nullptr, "find Version failed"); - srcVersion = optSsaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); - ASSERT(srcVersion != nullptr, "find Version failed"); - if (destVersion->HasImplicitCvt()) { - return false; - } - if (IsImplicitUse(*destOpnd, *srcOpnd)) { - return false; - } - srcVersion->SetImplicitCvt(); - return true; - } - return false; -} - -void ValidBitNumberProp::Optimize(Insn &insn) { - optSsaInfo->ReplaceAllUse(destVersion, srcVersion); - cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); -} - -void ValidBitNumberProp::Run() { - FOR_ALL_BB(bb, &cgFunc) { - FOR_BB_INSNS(insn, bb) { - if (!insn->IsMachineInstruction()) { - continue; - } - Init(); - if (!CheckCondition(*insn)) { - continue; - } - Optimize(*insn); - } - } -} - void FpSpConstProp::Run() { FOR_ALL_BB(bb, &cgFunc) { FOR_BB_INSNS(insn, bb) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 9124fc4d075ed4bb63cc52321fb920a16d1dfd36..ef9bea776f8ae851877c6faeae1a30cdbfb14f5e 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -30,13 +30,13 @@ void AArch64ReachingDefinition::InitStartGen() { CCLocInfo pLoc; for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); ++i) { MIRType *type = cgFunc->GetFunction().GetNthParamType(i); - (void)parmLocator.LocateNextParm(*type, pLoc, i == 0, &cgFunc->GetFunction()); + (void)parmLocator.LocateNextParm(*type, pLoc, i == 0, cgFunc->GetFunction().GetMIRFuncType()); if (pLoc.reg0 == 0) { /* If is a large frame, parameter addressing mode is based vreg:Vra. */ continue; } - uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + uint64 symSize = type->GetSize(); if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { /* For C structure passing in one or two registers. */ symSize = k8ByteSize; @@ -106,7 +106,7 @@ void AArch64ReachingDefinition::InitStartGen() { static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(firstSym->GetStIndex())); int32 stOffset = cgFunc->GetBaseOffset(*firstSymLoc); MIRType *firstType = cgFunc->GetFunction().GetNthParamType(i); - uint32 firstSymSize = cgFunc->GetBecommon().GetTypeSize(firstType->GetTypeIndex()); + uint32 firstSymSize = firstType->GetSize(); uint32 firstStackSize = firstSymSize < k4ByteSize ? k4ByteSize : firstSymSize; MemOperand *memOpnd = aarchCGFunc->CreateStackMemOpnd(RFP, stOffset, firstStackSize * kBitsPerByte); @@ -178,7 +178,7 @@ void AArch64ReachingDefinition::AddRetPseudoInsns() { if (exitBBSize == 0) { if (cgFunc->GetCleanupBB() != nullptr && cgFunc->GetCleanupBB()->GetPrev() != nullptr) { AddRetPseudoInsn(*cgFunc->GetCleanupBB()->GetPrev()); - } else { + } else if (!cgFunc->GetMirModule().IsCModule()) { AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()); } } else { @@ -1170,6 +1170,9 @@ void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, CHECK_FATAL(index == nullptr, "Existing [x29 + index] Memory Address"); ASSERT(memOpnd.GetOffsetImmediate(), "offset must be a immediate value"); int64 offsetVal = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (offsetVal > stackSize) { + return; + } if (offsetVal < 0) { offsetVal = static_cast(stackSize) + offsetVal; } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp index fd67c42114f80a72a4ce56d4fcc2502d93bf2f0e..43a08b136c9c44ed30027658ad417c6c370f2e74 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp @@ -151,7 +151,7 @@ void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachUseOperand(Insn &in } } -/* handle live range for bb->live_out */ +// handle live range for bb->live_out void AArch64LiveIntervalAnalysis::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) { --currPoint; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp index 179d7f998977d401f01246a79347fd3b6650d91a..22fd7f2974285f716c333c297e3b012ec0bfca5d 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp @@ -43,7 +43,8 @@ void AArch64RegInfo::Init() { void AArch64RegInfo::Fini() { AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); - a64CGFunc->AddtoCalleeSaved(RFP); + // add a placeholder for RFP + a64CGFunc->SetNumIntregToCalleeSave(a64CGFunc->GetNumIntregToCalleeSave() + 1); a64CGFunc->AddtoCalleeSaved(RLR); a64CGFunc->NoteFPLRAddedToCalleeSavedList(); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp index dee1d2aa1a43fadffbbec79fb43ebba96d74ece2..7ce4e7430e3634fd19871c6e7fb4632ca76a47f9 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp @@ -24,8 +24,7 @@ namespace maplebe { -#define RS_DUMP 0 -#define M_LOG LogInfo::MapleLogger() +#define RS_DUMP CG_DEBUG_FUNC(*cgFunc) #define SKIP_FPLR(REG) \ if (REG >= R29 && REG < V8) { \ @@ -77,7 +76,7 @@ void AArch64RegSavesOpt::CollectLiveInfo(const BB &bb, const Operand &opnd, bool } if (isDef) { /* First def */ - if (!IsCalleeBitSet(GetCalleeBitsDef(), bb.GetId(), regNO)) { + if (!IsCalleeBitSetDef(bb.GetId(), regNO)) { SetCalleeBit(GetCalleeBitsDef(), bb.GetId(), regNO); } } @@ -225,12 +224,13 @@ void AArch64RegSavesOpt::GenRegDefUse() { GenAccDefs(); -#if RS_DUMP - M_LOG << "CalleeBits for " << cgFunc->GetName() << ":\n"; - for (BBID i = 1; i < cgFunc->NumBBs(); ++i) { - M_LOG << i << " : " << calleeBitsDef[i] << " " << calleeBitsUse[i] << " " << calleeBitsAcc[i] << "\n"; + if (RS_DUMP) { + LogInfo::MapleLogger() << "CalleeBits for " << cgFunc->GetName() << ":\n"; + for (BBID i = 1; i < cgFunc->NumBBs(); ++i) { + LogInfo::MapleLogger() << i << " : " << calleeBitsDef[i] << " " << + calleeBitsUse[i] << " " << calleeBitsAcc[i] << "\n"; + } } -#endif } bool AArch64RegSavesOpt::CheckForUseBeforeDefPath() { @@ -248,43 +248,44 @@ bool AArch64RegSavesOpt::CheckForUseBeforeDefPath() { } } if (found != 0) { -#if RS_DUMP - CalleeBitsType mask = 1; - for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << k3BitSize); ++i) { - regno_t reg = ReverseRegBitMap(i); - if ((use & mask) != 0 && (acc & mask) == 0) { - M_LOG << "R" << (reg - 1) << " in BB" << found << " is in a use before def path\n"; + if (RS_DUMP) { + CalleeBitsType mask = 1; + for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << k3BitSize); ++i) { + regno_t reg = ReverseRegBitMap(i); + if ((use & mask) != 0 && (acc & mask) == 0) { + LogInfo::MapleLogger() << "R" << (reg - 1) << " in BB" << + found << " is in a use before def path\n"; + } + mask <<= 1; + } } - mask <<= 1; - } -#endif return true; } return false; } void AArch64RegSavesOpt::PrintBBs() const { - M_LOG << "RegSaves LiveIn/Out of BFS nodes:\n"; + LogInfo::MapleLogger() << "RegSaves LiveIn/Out of BFS nodes:\n"; for (auto *bb : bfs->sortedBBs) { - M_LOG << "< === > "; - M_LOG << bb->GetId(); - M_LOG << " pred:["; + LogInfo::MapleLogger() << "< === > "; + LogInfo::MapleLogger() << bb->GetId(); + LogInfo::MapleLogger() << " pred:["; for (auto predBB : bb->GetPreds()) { - M_LOG << " " << predBB->GetId(); + LogInfo::MapleLogger() << " " << predBB->GetId(); } - M_LOG << "] succs:["; + LogInfo::MapleLogger() << "] succs:["; for (auto succBB : bb->GetSuccs()) { - M_LOG << " " << succBB->GetId(); + LogInfo::MapleLogger() << " " << succBB->GetId(); } - M_LOG << "]\n LiveIn of [" << bb->GetId() << "]: "; + LogInfo::MapleLogger() << "]\n LiveIn of [" << bb->GetId() << "]: "; for (auto liveIn: bb->GetLiveInRegNO()) { - M_LOG << liveIn << " "; + LogInfo::MapleLogger() << liveIn << " "; } - M_LOG << "\n LiveOut of [" << bb->GetId() << "]: "; + LogInfo::MapleLogger() << "\n LiveOut of [" << bb->GetId() << "]: "; for (auto liveOut: bb->GetLiveOutRegNO()) { - M_LOG << liveOut << " "; + LogInfo::MapleLogger() << liveOut << " "; } - M_LOG << "\n"; + LogInfo::MapleLogger() << "\n"; } } @@ -312,19 +313,20 @@ int32 AArch64RegSavesOpt::CheckCriteria(BB *bb, regno_t reg) const { bool AArch64RegSavesOpt::AlreadySavedInDominatorList(const BB &bb, regno_t reg) const { BB *aBB = GetDomInfo()->GetDom(bb.GetId()); -#if RS_DUMP - M_LOG << "Checking dom list starting " << bb.GetId() << " for saved R" << (reg - 1) << ":\n "; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "Checking dom list starting " << + bb.GetId() << " for saved R" << (reg - 1) << ":\n "; + } while (!aBB->GetPreds().empty()) { /* can't go beyond prolog */ -#if RS_DUMP - M_LOG << aBB->GetId() << " "; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << aBB->GetId() << " "; + } int t = CheckCriteria(aBB, reg); if (t != 0) { -#if RS_DUMP - std::string str = t == 1 ? " saved here, skip!\n" : " has livein/out, skip!\n"; - M_LOG << " --R" << (reg - 1) << str; -#endif + if (RS_DUMP) { + std::string str = t == 1 ? " saved here, skip!\n" : " has livein/out, skip!\n"; + LogInfo::MapleLogger() << " --R" << (reg - 1) << str; + } return true; /* previously saved, inspect next reg */ } aBB = GetDomInfo()->GetDom(aBB->GetId()); @@ -354,9 +356,10 @@ void AArch64RegSavesOpt::CheckAndRemoveBlksFromCurSavedList(SavedBBInfo &sp, con /* Found! Don't plan to save in abb */ sp.RemoveBB(sbb); bbSavedRegs[sbb->GetId()]->RemoveSaveReg(reg); -#if RS_DUMP - M_LOG << " --R" << (reg - 1) << " save removed from BB" << sbb->GetId() << "\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << " --R" << (reg - 1) << " save removed from BB" << + sbb->GetId() << "\n"; + } break; } abb = GetDomInfo()->GetDom(abb->GetId()); @@ -368,13 +371,14 @@ void AArch64RegSavesOpt::CheckAndRemoveBlksFromCurSavedList(SavedBBInfo &sp, con Save is needed for a 1st def callee-save register at its dominator block outside any loop. */ void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() { -#if RS_DUMP - M_LOG << "Determining regsave sites using dom list for " << cgFunc->GetName() << ":\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "Determining regsave sites using dom list for " << + cgFunc->GetName() << ":\n"; + } for (auto *bb : bfs->sortedBBs) { -#if RS_DUMP - M_LOG << "BB: " << bb->GetId() << "\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "BB: " << bb->GetId() << "\n"; + } CalleeBitsType c = GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId()); if (c == 0) { continue; @@ -410,10 +414,10 @@ void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() { regSavedBBs[creg]->InsertBB(bbDom); BBID bid = bbDom->GetId(); -#if RS_DUMP - M_LOG << " --R" << (reg - 1); - M_LOG << " to save in " << bid << "\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << " --R" << (reg - 1); + LogInfo::MapleLogger() << " to save in " << bid << "\n"; + } SavedRegInfo *ctx = GetbbSavedRegsEntry(bid); if (!ctx->ContainSaveReg(reg)) { ctx->InsertSaveReg(reg); @@ -430,33 +434,31 @@ void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() { } void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() { - AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto *aarchCGFunc = static_cast(cgFunc); MapleAllocator sprealloc(memPool); -#if RS_DUMP - M_LOG << "Determining regsave sites using ssa_pre for " << cgFunc->GetName() << ":\n"; -#endif - const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); - /* do 2 regs at a time to force store pairs */ - for (uint32 i = 0; i < callees.size(); ++i) { - AArch64reg reg1 = callees[i]; - SKIP_FPLR(reg1); - AArch64reg reg2 = kRinvalid; - if ((i + 1) < callees.size()) { - reg2 = callees[i + 1]; - SKIP_FPLR(reg2); - ++i; + if (RS_DUMP) { + LogInfo::MapleLogger() << "Determining regsave sites using ssa_pre for " << + cgFunc->GetName() << ":\n"; + } + AArch64RegFinder regFinder(*cgFunc, *this); + if (RS_DUMP) { + regFinder.Dump(); + } + while (true) { + // get reg1 and reg2 + auto [reg1, reg2] = regFinder.GetPairCalleeeReg(); + if (reg1 == kRinvalid) { + break; } SsaPreWorkCand wkCand(&sprealloc); for (BBID bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { /* Set the BB occurrences of this callee-saved register */ - if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg1) || - IsCalleeBitSet(GetCalleeBitsUse(), bid, reg1)) { + if (IsCalleeBitSetDef(bid, reg1) || IsCalleeBitSetUse(bid, reg1)) { (void)wkCand.occBBs.insert(bid); } if (reg2 != kRinvalid) { - if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg2) || - IsCalleeBitSet(GetCalleeBitsUse(), bid, reg2)) { + if (IsCalleeBitSetDef(bid, reg2) || IsCalleeBitSetUse(bid, reg2)) { (void)wkCand.occBBs.insert(bid); } } @@ -475,29 +477,29 @@ void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() { in prolog/epilog */ MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); if (std::find(pe.begin(), pe.end(), reg1) == pe.end()) { - pe.push_back(reg1); + pe.push_back(static_cast(reg1)); } if (reg2 != kRinvalid && std::find(pe.begin(), pe.end(), reg2) == pe.end()) { - pe.push_back(reg2); + pe.push_back(static_cast(reg2)); } -#if RS_DUMP - M_LOG << "Save R" << (reg1 - 1) << " n/a, do in Pro/Epilog\n"; - if (reg2 != kRinvalid) { - M_LOG << " R " << (reg2 - 1) << " n/a, do in Pro/Epilog\n"; + if (RS_DUMP) { + LogInfo::MapleLogger() << "Save R" << (reg1 - 1) << " n/a, do in Pro/Epilog\n"; + if (reg2 != kRinvalid) { + LogInfo::MapleLogger() << " R " << (reg2 - 1) << " n/a, do in Pro/Epilog\n"; + } } -#endif continue; } if (!wkCand.saveAtEntryBBs.empty()) { for (BBID entBB : wkCand.saveAtEntryBBs) { -#if RS_DUMP - std::string r = reg1 <= R28 ? "R" : "V"; - M_LOG << "BB " << entBB << " save for : " << r << (reg1 - 1) << "\n"; - if (reg2 != kRinvalid) { - std::string r2 = reg2 <= R28 ? "R" : "V"; - M_LOG << " : " << r2 << (reg2 - 1) << "\n"; + if (RS_DUMP) { + std::string r = reg1 <= R28 ? "R" : "V"; + LogInfo::MapleLogger() << "BB " << entBB << " save for : " << r << (reg1 - 1) << "\n"; + if (reg2 != kRinvalid) { + std::string r2 = reg2 <= R28 ? "R" : "V"; + LogInfo::MapleLogger() << " : " << r2 << (reg2 - 1) << "\n"; + } } -#endif GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg1); if (reg2 != kRinvalid) { GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg2); @@ -531,19 +533,19 @@ void AArch64RegSavesOpt::RevertToRestoreAtEpilog(AArch64reg reg) { if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { pe.push_back(reg); } -#if RS_DUMP - M_LOG << "Restore R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "Restore R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; + } } /* Determine calleesave regs restore locations by calling ssu-pre, previous bbSavedRegs memory is cleared and restore locs recorded in it */ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { - AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto *aarchCGFunc = static_cast(cgFunc); MapleAllocator sprealloc(memPool); -#if RS_DUMP - M_LOG << "Determining Callee Restore Locations:\n"; -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "Determining Callee Restore Locations:\n"; + } const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); for (auto reg : callees) { SKIP_FPLR(reg); @@ -557,9 +559,8 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { (void)wkCand.saveBBs.insert(bid); } } - /* Set the BB occurrences of this callee-saved register */ - if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || - IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + // Set the BB occurrences of this callee-saved register + if (IsCalleeBitSetDef(bid, reg) || IsCalleeBitSetUse(bid, reg)) { (void)wkCand.occBBs.insert(bid); } } @@ -574,10 +575,10 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { } if (!wkCand.restoreAtEntryBBs.empty() || !wkCand.restoreAtExitBBs.empty()) { for (BBID entBB : wkCand.restoreAtEntryBBs) { -#if RS_DUMP - std::string r = reg <= R28 ? "r" : "v"; - M_LOG << "BB " << entBB << " restore: " << r << (reg - 1) << "\n"; -#endif + if (RS_DUMP) { + std::string r = reg <= R28 ? "r" : "v"; + LogInfo::MapleLogger() << "BB " << entBB << " restore: " << r << (reg - 1) << "\n"; + } GetbbSavedRegsEntry(entBB)->InsertEntryReg(reg); } for (BBID exitBB : wkCand.restoreAtExitBBs) { @@ -602,48 +603,28 @@ void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { /* otherwise, BB_FT etc */ GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); } -#if RS_DUMP + if (RS_DUMP) { std::string r = reg <= R28 ? "R" : "V"; - M_LOG << "BB " << exitBB << " restore: " << r << (reg - 1) << "\n"; -#endif + LogInfo::MapleLogger() << "BB " << exitBB << " restore: " << r << (reg - 1) << "\n"; + } } } } } -int32 AArch64RegSavesOpt::FindCalleeBase() const { - int32 offset = static_cast( - (static_cast(cgFunc->GetMemlayout())->RealStackFrameSize()) - - (static_cast(cgFunc)->SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* FP/LR */) - - (cgFunc->GetMemlayout()->SizeOfArgsToStackPass())); - - if (cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { - /* GR/VR save areas are above the callee save area */ - AArch64MemLayout *ml = static_cast(cgFunc->GetMemlayout()); - int saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + - RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); - offset -= saveareasize; - } - return offset; -} - -void AArch64RegSavesOpt::SetupRegOffsets() { +int32 AArch64RegSavesOpt::GetCalleeBaseOffset() const { AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); const MapleVector &proEpilogRegs = aarchCGFunc->GetProEpilogSavedRegs(); - int32 regsInProEpilog = static_cast(proEpilogRegs.size() - 2); - const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); - - int32 offset = FindCalleeBase(); - for (auto reg : callees) { - SKIP_FPLR(reg); - if (std::count(proEpilogRegs.begin(), proEpilogRegs.end(), reg) != 0) { - continue; - } - if (regOffset.find(reg) == regOffset.end()) { - regOffset[reg] = static_cast(offset + (regsInProEpilog * kBitsPerByte)); - offset += static_cast(kIntregBytelen); - } + int32 regsInProEpilog = static_cast(proEpilogRegs.size()); + // for RLR + regsInProEpilog--; + // for RFP + if (std::find(proEpilogRegs.begin(), proEpilogRegs.end(), RFP) != proEpilogRegs.end()) { + regsInProEpilog--; } + + int32 offset = aarchCGFunc->GetMemlayout()->GetCalleeSaveBaseLoc(); + return offset + (regsInProEpilog * kBitsPerByte); } void AArch64RegSavesOpt::InsertCalleeSaveCode() { @@ -651,10 +632,10 @@ void AArch64RegSavesOpt::InsertCalleeSaveCode() { BB *saveBB = cgFunc->GetCurBB(); AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); -#if RS_DUMP - M_LOG << "Inserting Save for " << cgFunc->GetName() << ":\n"; -#endif - int32 offset = FindCalleeBase(); + if (RS_DUMP) { + LogInfo::MapleLogger() << "Inserting Save for " << cgFunc->GetName() << ":\n"; + } + int32 offset = GetCalleeBaseOffset(); for (BB *bb : bfs->sortedBBs) { bid = bb->GetId(); if (bbSavedRegs[bid] != nullptr && !bbSavedRegs[bid]->GetSaveSet().empty()) { @@ -667,17 +648,14 @@ void AArch64RegSavesOpt::InsertCalleeSaveCode() { RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; std::string r = reg <= R28 ? "R" : "V"; - /* If reg not seen before, record offset and then update */ + // assign and record stack offset to each register if (regOffset.find(areg) == regOffset.end()) { - regOffset[areg] = static_cast(offset); + regOffset[areg] = offset; offset += static_cast(kIntregBytelen); } if (firstHalf == kRinvalid) { /* 1st half in reg pair */ firstHalf = reg; -#if RS_DUMP - M_LOG << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; -#endif } else { if (regOffset[reg] == (regOffset[firstHalf] + k8ByteSize)) { /* firstHalf & reg consecutive, make regpair */ @@ -695,9 +673,10 @@ void AArch64RegSavesOpt::InsertCalleeSaveCode() { static_cast(regOffset[reg])); } firstHalf = kRinvalid; -#if RS_DUMP - M_LOG << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; -#endif + } + if (RS_DUMP) { + LogInfo::MapleLogger() << r << (reg - 1) << " save in BB" << + bid << " Offset = " << regOffset[reg]<< "\n"; } } @@ -717,13 +696,13 @@ void AArch64RegSavesOpt::InsertCalleeSaveCode() { } void AArch64RegSavesOpt::PrintSaveLocs(AArch64reg reg) { - M_LOG << " for save @BB [ "; + LogInfo::MapleLogger() << " for save @BB [ "; for (size_t b = 1; b < bbSavedRegs.size(); ++b) { if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { - M_LOG << b << " "; + LogInfo::MapleLogger() << b << " "; } } - M_LOG << "]\n"; + LogInfo::MapleLogger() << "]\n"; } void AArch64RegSavesOpt::InsertCalleeRestoreCode() { @@ -731,10 +710,9 @@ void AArch64RegSavesOpt::InsertCalleeRestoreCode() { BB *saveBB = cgFunc->GetCurBB(); AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); -#if RS_DUMP - M_LOG << "Inserting Restore: \n"; -#endif - int32 offset = FindCalleeBase(); + if (RS_DUMP) { + LogInfo::MapleLogger() << "Inserting Restore: \n"; + } for (BB *bb : bfs->sortedBBs) { bid = bb->GetId(); SavedRegInfo *sp = bbSavedRegs[bid]; @@ -747,12 +725,13 @@ void AArch64RegSavesOpt::InsertCalleeRestoreCode() { cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); for (auto areg : sp->GetEntrySet()) { AArch64reg reg = static_cast(areg); - offset = static_cast(regOffset[areg]); -#if RS_DUMP - std::string r = reg <= R28 ? "R" : "V"; - M_LOG << r << (reg - 1) << " entry restore in BB " << bid << " Saved Offset = " << offset << "\n"; - PrintSaveLocs(reg); -#endif + int32 offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + LogInfo::MapleLogger() << r << (reg - 1) << " entry restore in BB " << + bid << " Saved Offset = " << offset << "\n"; + PrintSaveLocs(reg); + } /* restore is always the same from saved offset */ RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; @@ -767,12 +746,13 @@ void AArch64RegSavesOpt::InsertCalleeRestoreCode() { cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); for (auto areg : sp->GetExitSet()) { AArch64reg reg = static_cast(areg); - offset = static_cast(regOffset[areg]); -#if RS_DUMP - std::string r = reg <= R28 ? "R" : "V"; - M_LOG << r << (reg - 1) << " exit restore in BB " << bid << " Offset = " << offset << "\n"; - PrintSaveLocs(reg); -#endif + int32 offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + LogInfo::MapleLogger() << r << (reg - 1) << " exit restore in BB " << + bid << " Offset = " << offset << "\n"; + PrintSaveLocs(reg); + } /* restore is always single from saved offset */ RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; @@ -802,10 +782,10 @@ void AArch64RegSavesOpt::Run() { Bfs localBfs(*cgFunc, *memPool); bfs = &localBfs; bfs->ComputeBlockOrder(); -#if RS_DUMP - M_LOG << "##Calleeregs Placement for: " << cgFunc->GetName() << "\n"; - PrintBBs(); -#endif + if (RS_DUMP) { + LogInfo::MapleLogger() << "##Calleeregs Placement for: " << cgFunc->GetName() << "\n"; + PrintBBs(); + } /* Determined 1st def and last use of all callee-saved registers used for all BBs */ @@ -837,23 +817,136 @@ void AArch64RegSavesOpt::Run() { /* Verify saves/restores are in pair */ std::vector rlist = { R19, R20, R21, R22, R23, R24, R25, R26, R27, R28 }; for (auto reg : rlist) { - M_LOG << "Verify calleeregs_placement data for R" << (reg - 1) << ":\n"; + LogInfo::MapleLogger() << "Verify calleeregs_placement data for R" << (reg - 1) << ":\n"; std::set visited; uint32 saveBid = 0; uint32 restoreBid = 0; Verify(reg, cgFunc->GetFirstBB(), &visited, &saveBid, &restoreBid); - M_LOG << "\nVerify Done\n"; + LogInfo::MapleLogger() << "\nVerify Done\n"; } #endif - /* Assign stack offset to each shrinkwrapped register, skip over the offsets - for registers saved in prolog */ - SetupRegOffsets(); - /* Generate callee save instrs at found sites */ InsertCalleeSaveCode(); /* Generate callee restores at found sites */ InsertCalleeRestoreCode(); } + +void AArch64RegFinder::CalcRegUsedInSameBBsMat(const CGFunc &func, + const AArch64RegSavesOpt ®Save) { + auto &aarchCGFunc = static_cast(func); + auto regNum = aarchCGFunc.GetTargetRegInfo()->GetAllRegNum(); + regUsedInSameBBsMat.resize(regNum, std::vector(regNum, 0)); + + // regtype for distinguishing between int and fp + std::vector gpCallees; + std::vector fpCallees; + for (auto reg : aarchCGFunc.GetCalleeSavedRegs()) { + SKIP_FPLR(reg); + if (AArch64isa::IsGPRegister(reg)) { + gpCallees.push_back(reg); + } else { + fpCallees.push_back(reg); + } + } + + // calc regUsedInSameBBsMat + // if r1 and r2 is used in same bb, ++regUsedInSameBBsMat[r1][r2] + auto CalcCalleeRegInSameBBWithCalleeRegVec = + [this, &func, ®Save](const std::vector &callees) { + for (BBID bid = 1; bid < static_cast(func.NumBBs()); ++bid) { + std::vector usedRegs; + for (auto reg : callees) { + if (regSave.IsCalleeBitSetDef(bid, reg) || regSave.IsCalleeBitSetUse(bid, reg)) { + usedRegs.push_back(reg); + } + } + for (uint32 i = 0; i < usedRegs.size(); ++i) { + for (uint32 j = i + 1; j < usedRegs.size(); ++j) { + ++regUsedInSameBBsMat[usedRegs[i]][usedRegs[j]]; + ++regUsedInSameBBsMat[usedRegs[j]][usedRegs[i]]; + } + } + } + }; + + CalcCalleeRegInSameBBWithCalleeRegVec(gpCallees); + CalcCalleeRegInSameBBWithCalleeRegVec(fpCallees); +} + +void AArch64RegFinder::CalcRegUsedInBBsNum(const CGFunc &func, const AArch64RegSavesOpt ®Save) { + // calc callee reg used in BBs num + auto &aarchCGFunc = static_cast(func); + for (BBID bid = 1; bid < static_cast(aarchCGFunc.NumBBs()); ++bid) { + for (auto reg : aarchCGFunc.GetCalleeSavedRegs()) { + SKIP_FPLR(reg); + if (regSave.IsCalleeBitSetDef(bid, reg) || regSave.IsCalleeBitSetUse(bid, reg)) { + ++regUsedInBBsNum[reg]; + } + } + } +} + +void AArch64RegFinder::SetCalleeRegUnalloc(const CGFunc &func) { + // set callee reg is unalloc + auto &aarchCGFunc = static_cast(func); + for (auto reg : aarchCGFunc.GetCalleeSavedRegs()) { + SKIP_FPLR(reg); + regAlloced[reg] = false; + } +} + +std::pair AArch64RegFinder::GetPairCalleeeReg() { + regno_t reg1 = kRinvalid; + regno_t reg2 = kRinvalid; + + // find max same bb num with reg1 and reg2 + uint32 maxSameNum = 0; + for (uint32 i = 0; i < regUsedInSameBBsMat.size(); ++i) { + if (regAlloced[i]) { + continue; + } + for (uint32 j = i + 1; j < regUsedInSameBBsMat[i].size(); ++j) { + if (regAlloced[j]) { + continue; + } + if (regUsedInSameBBsMat[i][j] > maxSameNum) { + reg1 = i; + reg2 = j; + maxSameNum = regUsedInSameBBsMat[i][j]; + } + } + } + + // not found, use max RegUsedInBBsNum + if (reg1 == kRinvalid) { + reg1 = FindMaxUnallocRegUsedInBBsNum(); + } + regAlloced[reg1] = true; // set reg1 is alloced + if (reg2 == kRinvalid) { + reg2 = FindMaxUnallocRegUsedInBBsNum(); + } + regAlloced[reg2] = true; // set reg2 is alloced + + return {reg1, reg2}; +} + +void AArch64RegFinder::Dump() const { + LogInfo::MapleLogger() << "Dump reg finder:\n"; + LogInfo::MapleLogger() << "regUsedInSameBBsMat:\n"; + for (uint32 i = 0; i < regUsedInSameBBsMat.size(); ++i) { + if (regAlloced[i]) { + continue; + } + LogInfo::MapleLogger() << "Mat[" << i << "]:"; + for (uint32 j = 0; j < regUsedInSameBBsMat[i].size(); ++j) { + if (regUsedInSameBBsMat[i][j] == 0) { + continue; + } + LogInfo::MapleLogger() << "[" << j << ":" << regUsedInSameBBsMat[i][j] << "],"; + } + LogInfo::MapleLogger() << "\n"; + } +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp index 28bbc00d8a80989b05300ee4633b638c9fcd86a3..54d2c7de35d23fa160de0a59a46b137bfc8d9e93 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp @@ -68,11 +68,10 @@ std::vector AArch64Rematerializer::RematerializeForAddrof(CGFunc &cgFunc, } else { Insn *insn = &cgFunc.GetInsnBuilder()->BuildInsn(MOP_xadrp, regOp, stImm); insns.push_back(insn); - if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || - (symbol->GetStorageClass() == kScExtern))) { - /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + if (!addrUpper && CGOptions::IsPIC() && symbol->NeedGOT(CGOptions::IsPIE())) { + /* ldr x0, [x0, #:got_lo12:globalVar] */ OfstOperand &offsetOp = a64Func.CreateOfstOpnd(*symbol, offset, 0); - MemOperand *memOpnd = a64Func.CreateMemOperand(GetPointerSize() * kBitsPerByte, regOp, offsetOp, *symbol); + MemOperand *memOpnd = a64Func.CreateMemOperand(GetPointerBitSize(), regOp, offsetOp, *symbol); MOperator ldOp = (memOpnd->GetSize() == k64BitSize) ? MOP_xldr : MOP_wldr; insn = &cgFunc.GetInsnBuilder()->BuildInsn(ldOp, regOp, *memOpnd); insns.push_back(insn); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp index 1a6828a90767e65ead422b2945017fa93a495f5b..f5cd11a33eda2b460e6ac3bc6e099366927e4a80 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -32,9 +32,6 @@ */ namespace maplebe { namespace { -constexpr uint32 kClinitAdvanceCycle = 10; -constexpr uint32 kAdrpLdrAdvanceCycle = 2; -constexpr uint32 kClinitTailAdvanceCycle = 4; constexpr uint32 kSecondToLastNode = 2; } @@ -158,7 +155,7 @@ void AArch64Schedule::MemoryAccessPairOpt() { /* add readNode's succs to readyList or memList. */ for (auto succLink : readNode->GetSuccs()) { DepNode &succNode = succLink->GetTo(); - succNode.DescreaseValidPredsSize(); + succNode.DecreaseValidPredsSize(); if (succNode.GetValidPredsSize() == 0) { ASSERT(succNode.GetState() == kNormal, "schedule state should be kNormal"); succNode.SetState(kReady); @@ -381,102 +378,6 @@ void AArch64Schedule::UpdateELStartsOnCycle(uint32 cycle) { ComputeLstart(ComputeEstart(cycle)); } -/* - * If all unit of this node need when it be scheduling is free, this node can be scheduled, - * Return true. - */ -bool DepNode::CanBeScheduled() const { - for (uint32 i = 0; i < unitNum; ++i) { - Unit *unit = units[i]; - if (unit != nullptr) { - if (!unit->IsFree(i)) { - return false; - } - } - } - return true; -} - -/* Mark those unit that this node need occupy unit when it is being scheduled. */ -void DepNode::OccupyUnits() const { - for (uint32 i = 0; i < unitNum; ++i) { - Unit *unit = units[i]; - if (unit != nullptr) { - unit->Occupy(*insn, i); - } - } -} - -/* Get unit kind of this node's units[0]. */ -uint32 DepNode::GetUnitKind() const { - uint32 retValue = 0; - if ((units == nullptr) || (units[0] == nullptr)) { - return retValue; - } - - switch (units[0]->GetUnitId()) { - case kUnitIdSlotD: - retValue |= kUnitKindSlot0; - break; - case kUnitIdAgen: - case kUnitIdSlotSAgen: - retValue |= kUnitKindAgen; - break; - case kUnitIdSlotDAgen: - retValue |= kUnitKindAgen; - retValue |= kUnitKindSlot0; - break; - case kUnitIdHazard: - case kUnitIdSlotSHazard: - retValue |= kUnitKindHazard; - break; - case kUnitIdCrypto: - retValue |= kUnitKindCrypto; - break; - case kUnitIdMul: - case kUnitIdSlotSMul: - retValue |= kUnitKindMul; - break; - case kUnitIdDiv: - retValue |= kUnitKindDiv; - break; - case kUnitIdBranch: - case kUnitIdSlotSBranch: - retValue |= kUnitKindBranch; - break; - case kUnitIdStAgu: - retValue |= kUnitKindStAgu; - break; - case kUnitIdLdAgu: - retValue |= kUnitKindLdAgu; - break; - case kUnitIdFpAluS: - case kUnitIdFpAluD: - retValue |= kUnitKindFpAlu; - break; - case kUnitIdFpMulS: - case kUnitIdFpMulD: - retValue |= kUnitKindFpMul; - break; - case kUnitIdFpDivS: - case kUnitIdFpDivD: - retValue |= kUnitKindFpDiv; - break; - case kUnitIdSlot0LdAgu: - retValue |= kUnitKindSlot0; - retValue |= kUnitKindLdAgu; - break; - case kUnitIdSlot0StAgu: - retValue |= kUnitKindSlot0; - retValue |= kUnitKindStAgu; - break; - default: - break; - } - - return retValue; -} - /* Count unit kinds to an array. Each element of the array indicates the unit kind number of a node set. */ void AArch64Schedule::CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const { (void)arraySize; @@ -594,7 +495,7 @@ void AArch64Schedule::SelectNode(AArch64ScheduleProcessInfo &scheduleInfo) { } } /* The priority of free-reg node is higher than pipeline */ - while (!targetNode->CanBeScheduled()) { + while (!targetNode->IsResourceFree()) { scheduleInfo.IncCurrCycle(); mad->AdvanceCycle(); } @@ -676,7 +577,7 @@ bool AArch64Schedule::CheckSchedulable(AArch64ScheduleProcessInfo &info) const { if (GetConsiderRegPressure()) { info.PushElemIntoAvailableReadyList(node); } else { - if (node->CanBeScheduled() && node->GetEStart() <= info.GetCurrCycle()) { + if (node->IsResourceFree() && node->GetEStart() <= info.GetCurrCycle()) { info.PushElemIntoAvailableReadyList(node); } } @@ -1017,7 +918,7 @@ uint32 AArch64Schedule::SimulateOnly() { } DepNode *targetNode = nodes[i]; - if ((currCycle >= targetNode->GetEStart()) && targetNode->CanBeScheduled()) { + if ((currCycle >= targetNode->GetEStart()) && targetNode->IsResourceFree()) { targetNode->SetSimulateCycle(currCycle); targetNode->OccupyUnits(); @@ -1073,6 +974,10 @@ void AArch64Schedule::FinalizeScheduling(BB &bb, const DataDepBase &dataDepBase) } bb.AppendInsn(*comment); } + /* Append cfi instructions. */ + for (auto cfi : node->GetCfiInsns()) { + bb.AppendInsn(*cfi); + } /* Append insn */ if (!node->GetClinitInsns().empty()) { for (auto clinit : node->GetClinitInsns()) { @@ -1084,11 +989,6 @@ void AArch64Schedule::FinalizeScheduling(BB &bb, const DataDepBase &dataDepBase) } bb.AppendInsn(*node->GetInsn()); } - - /* Append cfi instructions. */ - for (auto cfi : node->GetCfiInsns()) { - bb.AppendInsn(*cfi); - } } bb.SetLastLoc(prevLocInsn); @@ -1180,7 +1080,7 @@ void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVectorCanBeScheduled()) { + if (node->IsResourceFree()) { availableReadyList.emplace_back(node); } } @@ -1263,7 +1163,7 @@ uint32 AArch64Schedule::DoBruteForceSchedule() { void AArch64Schedule::UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) { for (auto succLink : targetNode.GetSuccs()) { DepNode &succNode = succLink->GetTo(); - succNode.DescreaseValidPredsSize(); + succNode.DecreaseValidPredsSize(); if (succNode.GetValidPredsSize() == 0) { readyList.emplace_back(&succNode); succNode.SetState(kReady); @@ -1480,8 +1380,8 @@ void AArch64Schedule::ListScheduling(bool beforeRA) { } // construct cdgNode for each BB auto *cda = memPool.New(cgFunc, memPool); - cda->CreateAllCDGNodes(); - ddb = memPool.New(memPool, cgFunc, *mad); + cda->ComputeSingleBBRegions(); + ddb = memPool.New(memPool, cgFunc, *mad, true); intraDDA = memPool.New(memPool, cgFunc, *ddb); FOR_ALL_BB(bb, &cgFunc) { if (bb->IsUnreachable()) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp index 9a16279c9f73f4e05462d7f9932aee9adff2f077..c8cccb066178dfa65861f1998d9b8ce06e5cd56f 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_standardize.cpp @@ -171,7 +171,7 @@ static AbstractIR2Target abstract2TargetTable[abstract::kMopLast] { {abstract::MOP_comment, {{MOP_nop, {kAbtractNone}, {}}}}, }; -Operand *AArch64Standardize::GetInsnResult(Insn *insn) { +Operand *AArch64Standardize::GetInsnResult(Insn *insn) const { for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { if (insn->OpndIsDef(i)) { return &(insn->GetOperand(i)); @@ -181,14 +181,14 @@ Operand *AArch64Standardize::GetInsnResult(Insn *insn) { } Insn *AArch64Standardize::HandleTargetImm(Insn *insn, Insn *newInsn, uint32 idx, MOperator targetMop, uint8 order) { - const InsnDesc *md = &AArch64CG::kMd[targetMop]; - ImmOperand &immOpnd = static_cast(insn->GetOperand(idx)); - if (md->IsValidImmOpnd(immOpnd.GetValue())) { + Operand &opnd = insn->GetOperand(idx); + ImmOperand &immOpnd = static_cast(opnd); + AArch64CGFunc *a64func = static_cast(GetCgFunc()); + if (a64func->IsOperandImmValid(targetMop, &opnd, idx)) { newInsn->SetOperand(order, immOpnd); } else { Operand *resOpnd = GetInsnResult(insn); CHECK_FATAL(resOpnd, "SelectTargetInsn: No result operand"); - AArch64CGFunc *a64func = static_cast(GetCgFunc()); BB &saveCurBB = *GetCgFunc()->GetCurBB(); a64func->GetDummyBB()->ClearInsns(); GetCgFunc()->SetCurBB(*a64func->GetDummyBB()); diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp index 43e99c75d8756782fb41eb6c68fb91664d42e65b..00002cae493f5c89c1fa45522258c49ad072353d 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp @@ -440,14 +440,14 @@ MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperan newMemOpnd = static_cast(cgFunc).CreateMemOperand(memSize, base, *newOffset, extendOperand); } else if (propMode == kPropShift && MemOperand::CheckNewAmount(memSize, amount)) { BitShiftOperand &bitOperand = - static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kLSL, amount, k32BitSize); + static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kShiftLSL, amount, k32BitSize); newMemOpnd = static_cast(cgFunc).CreateMemOperand(memSize, base, *newOffset, bitOperand); } return newMemOpnd; } -MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, - Operand *oldOffset, int64 defVal) { +MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, + int64 defVal, VaryType varyType) { if (propMode != kPropBase) { return nullptr; } @@ -460,6 +460,8 @@ MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, int64 newOffVal = defVal + ofstOpnd->GetValue(); newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(newOffVal), k32BitSize); } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + newOfstImm->SetVary(varyType); return static_cast(cgFunc).CreateMemOperand(memSize, replace, *newOfstImm); } @@ -530,6 +532,8 @@ MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, break; } auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + // sub can not prop vary imm + CHECK_FATAL(immOpnd.GetVary() != kUnAdjustVary, "NIY, imm wrong vary type"); int64 defVal = -(immOpnd.GetValue()); newMemOpnd = HandleArithImmDef(*replace, offset, defVal); break; @@ -538,7 +542,7 @@ MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, case MOP_waddrri12: { auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); int64 defVal = immOpnd.GetValue(); - newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, immOpnd.GetVary()); break; } case MOP_xaddrrr: @@ -623,7 +627,7 @@ MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, if (propMode == kPropOffset) { if (MemOperand::CheckNewAmount(memSize, shift)) { BitShiftOperand &shiftOperand = - static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kLSL, shift, k8BitSize); + static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kShiftLSL, shift, k8BitSize); newMemOpnd = static_cast(cgFunc).CreateMemOperand(memSize, base, *newOffset, shiftOperand); } @@ -631,7 +635,7 @@ MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, shift += amount; if (MemOperand::CheckNewAmount(memSize, shift)) { BitShiftOperand &shiftOperand = - static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kLSL, shift, k8BitSize); + static_cast(cgFunc).CreateBitShiftOperand(BitShiftOperand::kShiftLSL, shift, k8BitSize); newMemOpnd = static_cast(cgFunc).CreateMemOperand(memSize, base, *newOffset, shiftOperand); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp index df44817e5197f6325e654be6a4b4df4f4701eaa9..900c3b62daad1c0063bf759a61c882ebc5128e0c 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp @@ -68,10 +68,6 @@ bool AArch64TailCallOpt::InsnIsAddWithRsp(Insn &insn) const { return false; } -bool AArch64TailCallOpt::OpndIsStackRelatedReg(RegOperand &opnd) const { - return (opnd.GetRegisterNumber() == R29 || opnd.GetRegisterNumber() == R31 || opnd.GetRegisterNumber() == RSP); -} - bool AArch64TailCallOpt::OpndIsR0Reg(RegOperand &opnd) const { return (opnd.GetRegisterNumber() == R0); } @@ -80,23 +76,6 @@ bool AArch64TailCallOpt::OpndIsCalleeSaveReg(RegOperand &opnd) const { return AArch64Abi::IsCalleeSavedReg(static_cast(opnd.GetRegisterNumber())); } -bool AArch64TailCallOpt::IsAddOrSubOp(MOperator mOp) const { - switch (mOp) { - case MOP_xaddrrr: - case MOP_xaddrrrs: - case MOP_xxwaddrrre: - case MOP_xaddrri24: - case MOP_xaddrri12: - case MOP_xsubrrr: - case MOP_xsubrrrs: - case MOP_xxwsubrrre: - case MOP_xsubrri12: - return true; - default: - return false; - } -} - void AArch64TailCallOpt::ReplaceInsnMopWithTailCall(Insn &insn) { MOperator insnMop = insn.GetMachineOpcode(); switch (insnMop) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp index e491b1f7c6f6686e658e7e87f67042c8634ea8bd..1c3387562441138aa30703b8ad5471fbc92265a9 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp @@ -16,14 +16,136 @@ #include "aarch64_cg.h" namespace maplebe { -void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { + +void PropPattern::VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn) { + ASSERT(movInsn.GetMachineOpcode() == MOP_xmovrr || movInsn.GetMachineOpcode() == MOP_wmovrr, "NIY explicit CVT"); + if (destReg.GetSize() == k64BitSize && srcReg.GetSize() == k32BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } else if (destReg.GetSize() == k32BitSize && srcReg.GetSize() == k64BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xubfxrri6i6]); + movInsn.AddOperand(cgFunc->CreateImmOperand(PTY_i64, 0)); + movInsn.AddOperand(cgFunc->CreateImmOperand(PTY_i64, k32BitSize)); + } else { + return; + } +} + +// prop ssa info and change implicit cvt to uxtw / ubfx +void PropPattern::ReplaceImplicitCvtAndProp(VRegVersion *destVersion, VRegVersion *srcVersion) { + MapleUnorderedMap useList = destVersion->GetAllUseInsns(); + ssaInfo->ReplaceAllUse(destVersion, srcVersion); + for (auto it = useList.begin(); it != useList.end(); ++it) { + Insn *useInsn = it->second->GetInsn(); + if (useInsn->GetMachineOpcode() == MOP_xmovrr || useInsn->GetMachineOpcode() == MOP_wmovrr) { + auto &dstOpnd = useInsn->GetOperand(kFirstOpnd); + auto &srcOpnd = useInsn->GetOperand(kSecondOpnd); + ASSERT(dstOpnd.IsRegister() && srcOpnd.IsRegister(), "must be"); + auto &destReg = static_cast(dstOpnd); + auto &srcReg = static_cast(srcOpnd); + // for preg case, do not change mop because preg can not be proped later. + if (useInsn->GetMachineOpcode() == MOP_wmovrr && destReg.IsPhysicalRegister()) { + ssaInfo->InsertSafePropInsn(useInsn->GetId()); + continue; + } + VaildateImplicitCvt(destReg, srcReg, *useInsn); + } + } +} + +void AArch64ValidBitOpt::DoOpt() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + OptPatternWithImplicitCvt(*bb, *insn); + } + } + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + OptCvt(*bb, *insn); + } + } + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + OptPregCvt(*bb, *insn); + } + } +} + +void RedundantExpandProp::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + insn.SetMOP(AArch64CG::kMd[MOP_xmovrr]); +} + +bool RedundantExpandProp::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xuxtw64) { + return false; + } + auto *destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (destOpnd != nullptr && destOpnd->IsSSAForm()) { + destVersion = ssaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + for (auto destUseIt : destVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + auto &propInsns = ssaInfo->GetSafePropInsns(); + bool isSafeCvt = std::find(propInsns.begin(), propInsns.end(), useInsn->GetId()) != propInsns.end(); + if (useInsn->IsPhi() || isSafeCvt) { + return false; + } + int32 lastOpndId = static_cast(useInsn->GetOperandSize() - 1); + const InsnDesc *md = useInsn->GetDesc(); + // i should be int + for (int32 i = lastOpndId; i >= 0; --i) { + auto *reg = (md->opndMD[static_cast(i)]); + auto &opnd = useInsn->GetOperand(static_cast(i)); + if (reg->IsUse() && opnd.IsRegister() && + static_cast(opnd).GetRegisterNumber() == destOpnd->GetRegisterNumber()) { + if (opnd.GetSize() == k32BitSize && reg->GetSize() == k32BitSize) { + continue; + } else { + return false; + } + } + } + } + return true; + } + return false; +} + +// Patterns that may have implicit cvt +void AArch64ValidBitOpt::OptPatternWithImplicitCvt(BB &bb, Insn &insn) { MOperator curMop = insn.GetMachineOpcode(); switch (curMop) { - case MOP_wandrri12: - case MOP_xandrri13: { - Optimize(bb, insn); + case MOP_bge: + case MOP_blt: { + OptimizeNoProp(bb, insn); break; } + case MOP_wcsetrc: + case MOP_xcsetrc: { + OptimizeNoProp(bb, insn); + break; + } + default: + break; + } +} + +// In OptCvt, optimize all cvt +// Should convert implicit mov to explicit uxtw / ubfx in pattern. +void AArch64ValidBitOpt::OptCvt(BB &bb, Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { case MOP_xuxtb32: case MOP_xuxth32: case MOP_xuxtw64: @@ -32,17 +154,25 @@ void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { case MOP_xubfxrri6i6: case MOP_wsbfxrri5i5: case MOP_xsbfxrri6i6: { - Optimize(bb, insn); + OptimizeProp(bb, insn); break; } - case MOP_wcsetrc: - case MOP_xcsetrc: { - Optimize(bb, insn); + case MOP_wandrri12: + case MOP_xandrri13: { + OptimizeProp(bb, insn); break; } - case MOP_bge: - case MOP_blt: { - Optimize(bb, insn); + default: + break; + } +} + +// patterns with uxtw vreg preg +void AArch64ValidBitOpt::OptPregCvt(BB &bb, Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + case MOP_xuxtw64: { + OptimizeProp(bb, insn); break; } default: @@ -53,6 +183,13 @@ void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { void AArch64ValidBitOpt::SetValidBits(Insn &insn) { MOperator mop = insn.GetMachineOpcode(); switch (mop) { + // for case sbfx 32 64 + // we can not deduce that dst opnd valid bit num; + case MOP_xsbfxrri6i6: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(static_cast(k64BitSize)); + break; + } case MOP_wcsetrc: case MOP_xcsetrc: { auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); @@ -88,13 +225,13 @@ void AArch64ValidBitOpt::SetValidBits(Insn &insn) { case MOP_xlsrrri6: { Operand &opnd = insn.GetOperand(kInsnThirdOpnd); ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); - auto shiftBits = static_cast(static_cast(opnd).GetValue()); + auto shiftBits = static_cast(static_cast(opnd).GetValue()); auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); - if ((srcOpnd.GetValidBitsNum() - shiftBits) <= 0) { + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { dstOpnd.SetValidBitsNum(k1BitSize); } else { - dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - static_cast(shiftBits)); } break; } @@ -117,12 +254,12 @@ void AArch64ValidBitOpt::SetValidBits(Insn &insn) { (mop == MOP_xasrrri6 && srcOpnd.GetValidBitsNum() < k64BitSize)) { Operand &opnd = insn.GetOperand(kInsnThirdOpnd); ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); - auto shiftBits = static_cast(static_cast(opnd).GetValue()); + auto shiftBits = static_cast(static_cast(opnd).GetValue()); auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); - if ((srcOpnd.GetValidBitsNum() - shiftBits) <= 0) { + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { dstOpnd.SetValidBitsNum(k1BitSize); } else { - dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - static_cast(shiftBits)); } } break; @@ -179,7 +316,17 @@ void AArch64ValidBitOpt::SetValidBits(Insn &insn) { dstOpnd.SetValidBitsNum(newVB); break; } - case MOP_wiorrrr: + case MOP_wiorrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + if (newVB > k32BitSize) { + newVB = k32BitSize; + } + dstOpnd.SetValidBitsNum(newVB); + break; + } case MOP_xiorrrr: { auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); @@ -188,7 +335,18 @@ void AArch64ValidBitOpt::SetValidBits(Insn &insn) { dstOpnd.SetValidBitsNum(newVB); break; } - case MOP_wiorrri12: + case MOP_wiorrri12: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + if (newVB > k32BitSize) { + newVB = k32BitSize; + } + dstOpnd.SetValidBitsNum(newVB); + break; + } case MOP_xiorrri13: { auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); @@ -243,7 +401,7 @@ static bool IsZeroRegister(const Operand &opnd) { bool AndValidBitPattern::CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const { if ((__builtin_ffs(static_cast(andImm)) - 1 == shiftImm) && ((static_cast(andImm) >> static_cast(shiftImm)) == - ((1 << (andImmVB - static_cast(shiftImm))) - 1))) { + ((1UL << (andImmVB - static_cast(shiftImm))) - 1))) { return true; } return false; @@ -298,17 +456,108 @@ void AndValidBitPattern::Run(BB &bb, Insn &insn) { if (!CheckCondition(insn)) { return; } - Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *desReg, *srcReg); - bb.ReplaceInsn(insn, newInsn); - /* update ssa info */ - ssaInfo->ReplaceInsn(insn, newInsn); - ssaInfo->InsertSafePropInsn(newInsn.GetId()); - /* dump pattern info */ - if (CG_VALIDBIT_OPT_DUMP) { - std::vector prevs; - prevs.emplace_back(&insn); - DumpAfterPattern(prevs, &insn, &newInsn); + if (desReg != nullptr && desReg->IsSSAForm() && srcReg != nullptr && srcReg->IsSSAForm()) { + destVersion = ssaInfo->FindSSAVersion(desReg->GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = ssaInfo->FindSSAVersion(srcReg->GetRegisterNumber()); + ASSERT(srcVersion != nullptr, "find Version failed"); + //prop ssa info + cgFunc->InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + ReplaceImplicitCvtAndProp(destVersion, srcVersion); + } else { + return; + } +} + +bool ExtValidBitPattern::RealUseMopX(const RegOperand &defOpnd, InsnSet &visitedInsn) { + VRegVersion *vdestVersion = ssaInfo->FindSSAVersion(defOpnd.GetRegisterNumber()); + for (auto destUseIt : vdestVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + if (visitedInsn.count(useInsn) != 0) { + continue; + } + visitedInsn.insert(useInsn); + if (useInsn->IsPhi()) { + auto &phiDefOpnd = useInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(phiDefOpnd.IsRegister(), "must be register"); + auto &phiRegDefOpnd = static_cast(phiDefOpnd); + if (RealUseMopX(phiRegDefOpnd, visitedInsn)) { + return true; + } + } + if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { + return true; + } + const InsnDesc *useMD = &AArch64CG::kMd[useInsn->GetMachineOpcode()]; + for (auto &opndUseIt : as_const(destUseIt.second->GetOperands())) { + const OpndDesc *useProp = useMD->GetOpndDes(opndUseIt.first); + if (useProp->GetSize() == k64BitSize) { + return true; + } + } } + return false; +} + +bool ExtValidBitPattern::CheckValidCvt(const Insn &insn) { + // extend to all shift pattern in future + RegOperand *destOpnd = nullptr; + RegOperand *srcOpnd = nullptr; + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + } + if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { + return false; + } + } + if (destOpnd != nullptr && destOpnd->IsSSAForm() && srcOpnd != nullptr && srcOpnd->IsSSAForm()) { + destVersion = ssaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); + srcVersion = ssaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + for (auto destUseIt : destVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + // check case: + // uxtw R1 R0 + // uxtw R2 R1 + if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { + return false; + } + // recursively check all real use mop, if there is one mop that use 64 bit size reg, do not optimize + if (useInsn->IsPhi()) { + auto &defOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + InsnSet visitedInsn; + (void)visitedInsn.insert(useInsn); + if (RealUseMopX(defOpnd, visitedInsn)) { + return false; + } + } + int32 lastOpndId = static_cast(useInsn->GetOperandSize() - 1); + const InsnDesc *md = useInsn->GetDesc(); + // check case: + // uxtw R1 R0 + // mopX R2 R1(64) + for (int32 i = lastOpndId; i >= 0; --i) { + auto *reg = (md->opndMD[static_cast(i)]); + auto &opnd = useInsn->GetOperand(static_cast(i)); + if (reg->IsUse() && opnd.IsRegister() && + static_cast(opnd).GetRegisterNumber() == destOpnd->GetRegisterNumber()) { + if (reg->GetSize() == k32BitSize) { + continue; + } else { + return false; + } + } + } + } + return true; + } + return false; } bool ExtValidBitPattern::CheckCondition(Insn &insn) { @@ -327,12 +576,15 @@ bool ExtValidBitPattern::CheckCondition(Insn &insn) { break; } case MOP_xuxtw64: { - if (static_cast(srcOpnd).GetValidBitsNum() > k32BitSize || - static_cast(srcOpnd).IsPhysicalRegister()) { // Do not optimize callee ensuring vb of parameter - return false; + if (CheckValidCvt(insn) || (static_cast(srcOpnd).GetValidBitsNum() <= k32BitSize && + !static_cast(srcOpnd).IsPhysicalRegister())) { // Do not optimize callee ensuring vb of parameter + if (static_cast(srcOpnd).IsSSAForm() && srcVersion != nullptr) { + srcVersion->SetImplicitCvt(); + } + newMop = MOP_wmovrr; + break; } - newMop = MOP_wmovrr; - break; + return false; } case MOP_xsxtw64: { if (static_cast(srcOpnd).GetValidBitsNum() >= k32BitSize) { @@ -351,6 +603,13 @@ bool ExtValidBitPattern::CheckCondition(Insn &insn) { CHECK_FATAL(immOpnd2.IsImmediate(), "must be immediate"); int64 lsb = static_cast(immOpnd1).GetValue(); int64 width = static_cast(immOpnd2).GetValue(); + if (CheckValidCvt(insn)) { + if (static_cast(srcOpnd).IsSSAForm() && srcVersion != nullptr) { + srcVersion->SetImplicitCvt(); + } + newMop = MOP_xmovrr; + break; + } if (lsb != 0 || static_cast(srcOpnd).GetValidBitsNum() > width) { return false; } @@ -358,6 +617,9 @@ bool ExtValidBitPattern::CheckCondition(Insn &insn) { static_cast(srcOpnd).GetValidBitsNum() == width) { return false; } + if (static_cast(srcOpnd).IsSSAForm() && srcVersion != nullptr) { + srcVersion->SetImplicitCvt(); + } if (mOp == MOP_wubfxrri5i5 || mOp == MOP_wsbfxrri5i5) { newMop = MOP_wmovrr; } else if (mOp == MOP_xubfxrri6i6 || mOp == MOP_xsbfxrri6i6) { @@ -382,29 +644,30 @@ void ExtValidBitPattern::Run(BB &bb, Insn &insn) { case MOP_xuxtb32: case MOP_xuxth32: case MOP_xuxtw64: - case MOP_xsxtw64: { - insn.SetMOP(AArch64CG::kMd[newMop]); - if (newDstOpnd->GetSize() > newSrcOpnd->GetSize()) { - ssaInfo->InsertSafePropInsn(insn.GetId()); - } - break; - } + case MOP_xsxtw64: case MOP_wubfxrri5i5: case MOP_xubfxrri6i6: case MOP_wsbfxrri5i5: case MOP_xsbfxrri6i6: { - Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *newDstOpnd, *newSrcOpnd); - bb.ReplaceInsn(insn, newInsn); - /* update ssa info */ - ssaInfo->ReplaceInsn(insn, newInsn); - if (newDstOpnd->GetSize() > newSrcOpnd->GetSize() || newDstOpnd->GetSize() != newDstOpnd->GetValidBitsNum()) { - ssaInfo->InsertSafePropInsn(newInsn.GetId()); + // if dest is preg, change mop because there is no ssa version for preg + if (newDstOpnd != nullptr && newDstOpnd->IsPhysicalRegister() && newSrcOpnd != nullptr && + newSrcOpnd->IsSSAForm()) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *newDstOpnd, *newSrcOpnd); + bb.ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, newInsn); + if (newDstOpnd->GetSize() > newSrcOpnd->GetSize() || newDstOpnd->GetSize() != newDstOpnd->GetValidBitsNum()) { + ssaInfo->InsertSafePropInsn(newInsn.GetId()); + } + return; } - /* dump pattern info */ - if (CG_VALIDBIT_OPT_DUMP) { - std::vector prevs; - prevs.emplace_back(&insn); - DumpAfterPattern(prevs, &insn, &newInsn); + if (newDstOpnd != nullptr && newDstOpnd->IsSSAForm() && newSrcOpnd != nullptr && newSrcOpnd->IsSSAForm()) { + destVersion = ssaInfo->FindSSAVersion(newDstOpnd->GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = ssaInfo->FindSSAVersion(newSrcOpnd->GetRegisterNumber()); + ASSERT(srcVersion != nullptr, "find Version failed"); + cgFunc->InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + ReplaceImplicitCvtAndProp(destVersion, srcVersion); + return; } } default: diff --git a/src/mapleall/maple_be/src/cg/base_schedule.cpp b/src/mapleall/maple_be/src/cg/base_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..25c201765af52aeb320931eba98f66404a1aa3c6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/base_schedule.cpp @@ -0,0 +1,89 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "base_schedule.h" + +namespace maplebe { +/* + * Set insnId to guarantee default priority, + * Set locInsn to maintain debug info + */ +void BaseSchedule::InitInsnIdAndLocInsn() { + uint32 id = 0; + FOR_ALL_BB(bb, &cgFunc) { + bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr); + FOR_BB_INSNS(insn, bb) { + insn->SetId(id++); +#if defined(DEBUG) && DEBUG + insn->AppendComment(" Insn id: " + std::to_string(insn->GetId())); +#endif + if (insn->IsImmaterialInsn() && !insn->IsComment()) { + bb->SetLastLoc(insn); + } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) { + bb->SetFirstLoc(*bb->GetLastLoc()); + } + } + } +} + +void BaseSchedule::InitInRegion(CDGRegion ®ion) const { + // Init valid dependency size for scheduling + for (auto cdgNode : region.GetRegionNodes()) { + for (auto depNode : cdgNode->GetAllDataNodes()) { + depNode->SetState(kNormal); + depNode->SetValidPredsSize(depNode->GetPreds().size()); + depNode->SetValidSuccsSize(depNode->GetSuccs().size()); + } + } +} + +void BaseSchedule::DumpRegionInfoBeforeSchedule(CDGRegion ®ion) const { + LogInfo::MapleLogger() << "---------------- Schedule Region_" << region.GetRegionId() << " ----------------\n\n"; + LogInfo::MapleLogger() << "## total number of blocks: " << region.GetRegionNodeSize() << "\n\n"; + LogInfo::MapleLogger() << "## topological order of blocks in region: {"; + for (uint32 i = 0; i < region.GetRegionNodeSize(); ++i) { + BB *bb = region.GetRegionNodes()[i]->GetBB(); + ASSERT(bb != nullptr, "get bb from cdgNode failed"); + LogInfo::MapleLogger() << "bb_" << bb->GetId(); + if (i != region.GetRegionNodeSize() - 1) { + LogInfo::MapleLogger() << ", "; + } else { + LogInfo::MapleLogger() << "}\n\n"; + } + } +} + +void BaseSchedule::DumpCDGNodeInfoBeforeSchedule(CDGNode &cdgNode) const { + BB *curBB = cdgNode.GetBB(); + ASSERT(curBB != nullptr, "get bb from cdgNode failed"); + LogInfo::MapleLogger() << "= = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n"; + LogInfo::MapleLogger() << "## -- bb_" << curBB->GetId() << " before schedule --\n\n"; + LogInfo::MapleLogger() << " >> candidates info of bb_" << curBB->GetId() << " <<\n\n"; + curBB->Dump(); + LogInfo::MapleLogger() << "\n"; + DumpInsnInfoByScheduledOrder(*curBB); +} + +void BaseSchedule::DumpCDGNodeInfoAfterSchedule(CDGNode &cdgNode) const { + BB *curBB = cdgNode.GetBB(); + ASSERT(curBB != nullptr, "get bb from cdgNode failed"); + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "## -- bb_" << curBB->GetId() << " after schedule --\n"; + LogInfo::MapleLogger() << " ideal total cycles: " << (doDelayHeu ? listScheduler->GetMaxDelay() : listScheduler->GetMaxLStart()) << "\n"; + LogInfo::MapleLogger() << " sched total cycles: " << listScheduler->GetCurrCycle() << "\n\n"; + curBB->Dump(); + LogInfo::MapleLogger() << " = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n\n"; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cfgo.cpp b/src/mapleall/maple_be/src/cg/cfgo.cpp index 4ad528807e78beac36ea1ac796543403702e64fc..523b226145c57caba98571937f71e0e5b76799ed 100644 --- a/src/mapleall/maple_be/src/cg/cfgo.cpp +++ b/src/mapleall/maple_be/src/cg/cfgo.cpp @@ -325,8 +325,8 @@ bool SequentialJumpPattern::Optimize(BB &curBB) { if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); CHECK_FATAL(sucBB != nullptr, "sucBB is null in SequentialJumpPattern::Optimize"); - BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); - if ((sucBB != &curBB) && sucBB->IsSoloGoto() && tragetBB != nullptr && tragetBB != sucBB) { + BB *targetBB = CGCFG::GetTargetSuc(*sucBB); + if (sucBB != &curBB && sucBB->IsSoloGoto() && targetBB != nullptr && targetBB != sucBB && !HasInvalidPred(*sucBB)) { Log(curBB.GetId()); if (checkOnly) { return false; @@ -337,14 +337,35 @@ bool SequentialJumpPattern::Optimize(BB &curBB) { } } else if (curBB.GetKind() == BB::kBBIf) { for (BB *sucBB : curBB.GetSuccs()) { - BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); - if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && - tragetBB != nullptr && tragetBB != sucBB) { + BB *sucTargetBB = CGCFG::GetTargetSuc(*sucBB); + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && sucTargetBB != nullptr && sucTargetBB != sucBB && + !HasInvalidPred(*sucBB)) { Log(curBB.GetId()); if (checkOnly) { return false; } - cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + // e.g. + // BB12[if] (curBB) + // beq label:11 + // / \ + // / BB25[if] (label: 11) + // \ / + // BB13[goto] (sucBB) + // | + // BB6[ft] (targetBB) (label: 6) + // For the above case, the ifBB can not modify the target label of the conditional jump insn, + // because the target of the conditional jump insn is the other succBB(BB25). + BB *ifTargetBB = CGCFG::GetTargetSuc(curBB); + CHECK_NULL_FATAL(ifTargetBB); + // In addition, if the targetBB(ifTargetBB) of the ifBB is not the gotoBB(sucBB), and the targetBB(sucTargetBB) + // of the sucBB is not its next, we can not do the optimization, because it will change the layout of the + // sucTargetBB, and it does not necessarily improve performance. + if (ifTargetBB != sucBB && sucBB->GetNext() != sucTargetBB) { + return false; + } + if (ifTargetBB == sucBB) { + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + } SkipSucBB(curBB, *sucBB); return true; } @@ -352,7 +373,7 @@ bool SequentialJumpPattern::Optimize(BB &curBB) { } else if (curBB.GetKind() == BB::kBBRangeGoto) { bool changed = false; for (BB *sucBB : curBB.GetSuccs()) { - if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && !HasInvalidPred(*sucBB) && cgFunc->GetTheCFG()->GetTargetSuc(*sucBB) != nullptr) { Log(curBB.GetId()); if (checkOnly) { @@ -381,7 +402,7 @@ void SequentialJumpPattern::UpdateSwitchSucc(BB &curBB, BB &sucBB) const { } for (size_t i = 0; i < labelVec.size(); ++i) { if (labelVec[i] == sucBB.GetLabIdx()) { - curBB.SetRangeGotoLabel(i, gotoTarget->GetLabIdx()); + curBB.SetRangeGotoLabel(static_cast(i), gotoTarget->GetLabIdx()); } } cgFunc->UpdateEmitSt(curBB, sucBB.GetLabIdx(), gotoTarget->GetLabIdx()); @@ -431,6 +452,34 @@ void SequentialJumpPattern::UpdateSwitchSucc(BB &curBB, BB &sucBB) const { } } +bool SequentialJumpPattern::HasInvalidPred(BB &sucBB) const { + for (auto predIt = sucBB.GetPredsBegin(); predIt != sucBB.GetPredsEnd(); ++predIt) { + if ((*predIt)->GetKind() != BB::kBBGoto && (*predIt)->GetKind() != BB::kBBIf && + (*predIt)->GetKind() != BB::kBBRangeGoto && (*predIt)->GetKind() != BB::kBBFallthru) { + return true; + } + if ((*predIt)->GetKind() == BB::kBBIf) { + BB *ifTargetBB = CGCFG::GetTargetSuc(**predIt); + CHECK_NULL_FATAL(ifTargetBB); + BB *sucTargetBB = CGCFG::GetTargetSuc(sucBB); + CHECK_NULL_FATAL(sucTargetBB); + if (ifTargetBB != &sucBB && sucBB.GetNext() != sucTargetBB) { + return true; + } + } + if ((*predIt)->GetKind() == BB::kBBIf || (*predIt)->GetKind() == BB::kBBRangeGoto) { + if ((*predIt)->GetNext() == &sucBB) { + return true; + } + } else if ((*predIt)->GetKind() == BB::kBBGoto) { + if (*predIt == &sucBB || (*predIt)->IsEmpty()) { + return true; + } + } + } + return false; +} + /* * preCond: * sucBB is one of curBB's successor. @@ -444,7 +493,60 @@ void SequentialJumpPattern::SkipSucBB(BB &curBB, BB &sucBB) const { curBB.PushBackSuccs(*gotoTarget); sucBB.RemovePreds(curBB); gotoTarget->PushBackPreds(curBB); + // If the sucBB needs to be skipped, all preds of the sucBB must skip it and update cfg info. + // e.g. + // BB3[if] (curBB) + // / \ + // / BB6[if] + // \ / + // BB10[goto] (sucBB) + // | + // BB8 (gotoTarget) + for (auto *predBB : sucBB.GetPreds()) { + if (predBB->GetKind() == BB::kBBGoto) { + cgFunc->GetTheCFG()->RetargetJump(sucBB, *predBB); + } else if (predBB->GetKind() == BB::kBBIf) { + BB *ifTargetBB = CGCFG::GetTargetSuc(*predBB); + if (ifTargetBB == &sucBB) { + cgFunc->GetTheCFG()->RetargetJump(sucBB, *predBB); + } + } else if (predBB->GetKind() == BB::kBBFallthru) { + // e.g. + // (curBB) BB70[goto] BB27[if] + // \ / \ + // \ / \ + // \ BB71[ft] (iterPredBB) \ + // \ / \ + // BB48[goto] (sucBB) BB28[ft] + // | / + // | / + // BB29[if] (gotoTarget) + ASSERT_NOT_NULL(cgFunc->GetTheCFG()->GetInsnModifier()); + cgFunc->GetTheCFG()->GetInsnModifier()->ModifyFathruBBToGotoBB(*predBB, gotoTarget->GetLabIdx()); + } else if (predBB->GetKind() == BB::kBBRangeGoto) { + UpdateSwitchSucc(*predBB, sucBB); + } + predBB->RemoveSuccs(sucBB); + sucBB.RemovePreds(*predBB); + predBB->PushBackSuccs(*gotoTarget); + gotoTarget->PushBackPreds(*predBB); + } cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(sucBB, *cgFunc); + // LastBB cannot be removed from the preds of succBB by FlushUnReachableStatusAndRemoveRelations, Why? + // We'll do a separate process below for the case that sucBB is LastBB. + if (sucBB.GetKind() == BB::kBBGoto && &sucBB == cgFunc->GetLastBB()) { + // gotoBB has only one succ. + ASSERT(sucBB.GetSuccsSize() == 1, "invalid gotoBB"); + sucBB.SetUnreachable(true); + sucBB.SetFirstInsn(nullptr); + sucBB.SetLastInsn(nullptr); + gotoTarget->RemovePreds(sucBB); + sucBB.RemoveSuccs(*gotoTarget); + } + // Remove the unreachableBB which has been skipped + if (sucBB.IsUnreachable()) { + cgFunc->GetTheCFG()->RemoveBB(sucBB); + } } /* @@ -691,26 +793,46 @@ bool FlipBRPattern::Optimize(BB &curBB) { /* remove a basic block that contains nothing */ bool EmptyBBPattern::Optimize(BB &curBB) { - if (curBB.IsUnreachable()) { + // Can not remove the BB whose address is referenced by adrp_label insn + if (curBB.IsUnreachable() || curBB.IsAdrpLabel()) { return false; } /* Empty bb and it's not a cleanupBB/returnBB/lastBB/catchBB. */ - if (curBB.GetPrev() != nullptr && !curBB.IsCleanup() && - ((curBB.GetFirstInsn() == nullptr && curBB.GetLastInsn() == nullptr) || - (curBB.GetFirstInsn()->IsDbgInsn() && curBB.GetLastInsn() == curBB.GetFirstInsn())) && - &curBB != cgFunc->GetLastBB() && - curBB.GetKind() != BB::kBBReturn && !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx())) { + if (curBB.GetPrev() == nullptr || curBB.IsCleanup() || &curBB == cgFunc->GetLastBB() || + curBB.GetKind() == BB::kBBReturn || IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx())) { + return false; + } + + if (curBB.GetFirstInsn() == nullptr && curBB.GetLastInsn() == nullptr) { + // empty BB Log(curBB.GetId()); if (checkOnly) { return false; } - BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); if (sucBB == nullptr || sucBB->IsCleanup()) { return false; } cgFunc->GetTheCFG()->RemoveBB(curBB); - /* removeBB may do nothing. since no need to repeat, always ret false here. */ + // removeBB may do nothing. since no need to repeat, always ret false here. + return false; + } else if (!curBB.HasMachineInsn()) { + // BB only has dbg insn + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + if (sucBB == nullptr || sucBB->IsCleanup()) { + return false; + } + // For Now We try to sink first conservatively. + // All dbg insns should not be dropped. Later hoist or copy case should be considered. + if (curBB.NumSuccs() == 1) { + BB *succBB = curBB.GetSuccs().front(); + succBB->InsertAtBeginning(curBB); + cgFunc->GetTheCFG()->RemoveBB(curBB); + } return false; } return false; @@ -822,6 +944,9 @@ bool UnreachBBPattern::Optimize(BB &curBB) { * 2. curBB can't have cfi instruction when postcfgo. */ bool DuplicateBBPattern::Optimize(BB &curBB) { + if (!cgFunc->IsAfterRegAlloc()) { + return false; + } if (curBB.IsUnreachable()) { return false; } @@ -879,6 +1004,9 @@ bool DuplicateBBPattern::Optimize(BB &curBB) { } bb->RemoveInsn(*bb->GetLastInsn()); FOR_BB_INSNS(insn, (&curBB)) { + if (!insn->IsMachineInstruction()) { + continue; + } Insn *clonedInsn = cgFunc->GetTheCFG()->CloneInsn(*insn); clonedInsn->SetPrev(nullptr); clonedInsn->SetNext(nullptr); @@ -901,6 +1029,27 @@ bool DuplicateBBPattern::Optimize(BB &curBB) { } /* === new pm === */ +bool CgPreCfgo::PhaseRun(maplebe::CGFunc &f) { + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-precfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + // This phase modifies the cfg, which affects the loop analysis result, + // so we need to run loop-analysis again. + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + if (CFGO_DUMP_NEWPM) { + f.GetTheCFG()->CheckCFG(); + DotGenerator::GenerateDot("after-precfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreCfgo, precfgo) + bool CgCfgo::PhaseRun(maplebe::CGFunc &f) { CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); if (f.IsAfterRegAlloc()) { @@ -930,6 +1079,8 @@ bool CgPostCfgo::PhaseRun(maplebe::CGFunc &f) { const std::string &funcClass = f.GetFunction().GetBaseClassName(); const std::string &funcName = f.GetFunction().GetBaseFuncName(); const std::string &name = funcClass + funcName; + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + cfgOptimizer->SetPhase(kPostCfgo); if (CFGO_DUMP_NEWPM) { DotGenerator::GenerateDot("before-postcfgo", f, f.GetMirModule()); } @@ -937,6 +1088,8 @@ bool CgPostCfgo::PhaseRun(maplebe::CGFunc &f) { if (CFGO_DUMP_NEWPM) { DotGenerator::GenerateDot("after-postcfgo", f, f.GetMirModule()); } + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); return false; } MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostCfgo, postcfgo) diff --git a/src/mapleall/maple_be/src/cg/cfi_generator.cpp b/src/mapleall/maple_be/src/cg/cfi_generator.cpp index fc13e08db167efb2c46192a969796e78be236d70..8dba511bca7d3b2640ccbcd28980be8d35aabdd9 100644 --- a/src/mapleall/maple_be/src/cg/cfi_generator.cpp +++ b/src/mapleall/maple_be/src/cg/cfi_generator.cpp @@ -42,11 +42,11 @@ void GenCfi::InsertCFIDefCfaOffset(BB &bb, Insn &insn, int32 &cfiOffset) { } void GenCfi::GenerateStartDirective(BB &bb) { - Insn &startprocInsn = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_startproc); + Insn &startProcInsn = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_startproc); if (bb.GetFirstInsn() != nullptr) { - (void)bb.InsertInsnBefore(*bb.GetFirstInsn(), startprocInsn); + (void)bb.InsertInsnBefore(*bb.GetFirstInsn(), startProcInsn); } else { - bb.AppendInsn(startprocInsn); + bb.AppendInsn(startProcInsn); } #if !defined(TARGARM32) @@ -58,7 +58,7 @@ void GenCfi::GenerateStartDirective(BB &bb) { Insn &personality = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_personality_symbol).AddOpndChain( cgFunc.CreateCfiImmOperand(EHFunc::kTypeEncoding, k8BitSize)).AddOpndChain( cgFunc.CreateCfiStrOperand("DW.ref.__mpl_personality_v0")); - bb.InsertInsnAfter(startprocInsn, personality); + bb.InsertInsnAfter(startProcInsn, personality); } #endif } @@ -121,6 +121,9 @@ void GenCfi::Run() { } GenerateEndDirective(*(cgFunc.GetLastBB())); + if (cgFunc.GetLastBB()->IsUnreachable()) { + cgFunc.SetExitBBLost(true); + } } bool CgGenCfi::PhaseRun(maplebe::CGFunc &f) { diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp index ab07af8ce5f98d69399a20385e6137a51b3d9f5b..7cc8540fd72ad76b79add32b6e8490e2c9a51617 100644 --- a/src/mapleall/maple_be/src/cg/cg.cpp +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -153,11 +153,12 @@ void CG::AddStackGuardvar() const { chkGuard->SetTyIdx(GlobalTables::GetTypeTable().GetTypeTable()[PTY_u64]->GetTypeIndex()); GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkGuard); - MIRSymbol *chkFunc = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); - chkFunc->SetNameStrIdx(std::string("__stack_chk_fail")); - chkFunc->SetStorageClass(kScText); - chkFunc->SetSKind(kStFunc); - GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkFunc); + MIRFunction *func = GetMIRModule()->GetMIRBuilder()->GetOrCreateFunction("__stack_chk_fail", + GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + MIRSymbol *chkFuncSym = func->GetFuncSymbol(); + chkFuncSym->SetAppearsInCode(true); + chkFuncSym->SetStorageClass(kScExtern); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkFuncSym); } #define DBG_TRACE_ENTER MplDtEnter @@ -218,8 +219,8 @@ static void AppendReferenceOffsets64(const BECommon &beCommon, MIRStructType &cu auto &fieldTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldType->GetNameStrIdx()); auto fieldTypeKind = fieldType->GetKind(); - auto fieldSize = beCommon.GetTypeSize(fieldTypeIdx); - auto fieldAlign = beCommon.GetTypeAlign(fieldTypeIdx); + auto fieldSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx)->GetSize(); + auto fieldAlign = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx)->GetAlign(); int64 myOffset = static_cast(RoundUp(curOffset, fieldAlign)); int64 nextOffset = myOffset + static_cast(fieldSize); diff --git a/src/mapleall/maple_be/src/cg/cg_callgraph_reorder.cpp b/src/mapleall/maple_be/src/cg/cg_callgraph_reorder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad89945dbbda966e9e219802736b354d0cb63e5c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_callgraph_reorder.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "cg_callgraph_reorder.h" + +#include +#include + +namespace maple { +struct Edge { + uint32 src; + uint64 weight; +}; + +class Cluster { + public: + Cluster(uint32 i, uint64 s, uint64 weight) + : leader(i), next(i), prev(i), size(s), weight(weight), mostLikelyEdge{i, 0} {} + + double getDensity() const { + if (size == 0) { + return 0; + } + return double(weight) / double(size); + } + + uint32 leader; + uint32 next; + uint32 prev; + uint64 size; + uint64 weight; + Edge mostLikelyEdge; +}; + +class FuncSection { + public: + std::string funcName; + uint64 weight; + uint64 size; +}; + +static std::vector clusters; +static std::vector funcs; + +// reading function profile of the following format: +// calleeName calleeWeight calleeSize callerName callerWeight callerSize edgeWeight +// ... +static void ReadProfile(const std::string &path) { + std::ifstream fs(path); + if (!fs) { + LogInfo::MapleLogger() << "WARN: failed to open " << path << '\n'; + return; + } + std::string line; + std::map funcName2Cluster; + auto getOrCreateNode = [&funcName2Cluster](const FuncSection &f) { + auto res = funcName2Cluster.insert({f.funcName, clusters.size()}); + if (res.second) { + clusters.emplace_back(clusters.size(), f.size, f.weight); + funcs.push_back(f); + } + return res.first->second; + }; + + while (std::getline(fs, line)) { + std::istringstream ss(line); + std::string calleeName, callerName; + uint64 calleeSize, callerSize, calleeWeight, callerWeight, edgeWeight; + ss >> calleeName >> calleeWeight >> calleeSize >> callerName >> callerWeight >> callerSize >> edgeWeight; + if (!ss) { + LogInfo::MapleLogger() << "WARN: unexpected format in Function Priority File" << '\n'; + return; + } + FuncSection callee{calleeName, calleeWeight, calleeSize}; + FuncSection caller{callerName, callerWeight, callerSize}; + uint32 src = getOrCreateNode(caller); + uint32 dst = getOrCreateNode(callee); + // recursive call + if (src == dst) { + continue; + } + auto &c = clusters[dst]; + if (c.mostLikelyEdge.weight < edgeWeight) { + c.mostLikelyEdge.src = src; + c.mostLikelyEdge.weight = edgeWeight; + } + } +} + +// union-find with path-halving +static uint32 GetLeader(uint32 src) { + uint32 v = src; + while (clusters[v].leader != v) { + v = clusters[v].leader; + } + clusters[src].leader = v; + return v; +} + +static void MergeClusters(Cluster &dst, uint32 dstIdx, Cluster &src, uint32 srcIdx) { + uint32 tail1 = dst.prev, tail2 = src.prev; + dst.prev = tail2; + clusters[tail2].next = dstIdx; + src.prev = tail1; + clusters[tail1].next = srcIdx; + dst.size += src.size; + dst.weight += src.weight; + src.size = 0; + src.weight = 0; +} + +std::map ReorderAccordingProfile(const std::string &path) { + ReadProfile(path); + constexpr uint32 kMaxDensityDegradation = 8; + constexpr uint64 kMaxClusterSize = 1024 * 1024; + constexpr uint32 kUnlikelyThreshold = 10; + std::vector sortedIdx(clusters.size()); + std::iota(sortedIdx.begin(), sortedIdx.end(), 0); + // sort the cluster's idx by density decreasing + std::stable_sort(sortedIdx.begin(), sortedIdx.end(), + [](uint32 a, uint32 b) { return clusters[a].getDensity() > clusters[b].getDensity(); }); + for (auto idx : sortedIdx) { + Cluster &c = clusters[idx]; + // skip if c is root of callgraph or the edge is not likely + if (c.mostLikelyEdge.src == idx || c.mostLikelyEdge.weight * kUnlikelyThreshold <= funcs[idx].weight) { + continue; + } + uint32 leader = GetLeader(c.mostLikelyEdge.src); + if (leader == idx) { + continue; + } + auto &dst = clusters[leader]; + if (c.size + dst.size > kMaxClusterSize) { + continue; + } + auto newDensity = double(dst.weight + c.weight) / double(dst.size + c.size); + // if the Cluster density degradate too much after merge, don't merge; + if (newDensity * kMaxDensityDegradation < c.getDensity()) { + continue; + } + c.leader = leader; + MergeClusters(dst, leader, c, idx); + } + auto iter = std::remove_if(sortedIdx.begin(), sortedIdx.end(), [](uint32 idx) { return clusters[idx].size <= 0; }); + sortedIdx.erase(iter, sortedIdx.end()); + + std::stable_sort(sortedIdx.begin(), sortedIdx.end(), + [](uint32 a, uint32 b) { return clusters[a].getDensity() > clusters[b].getDensity(); }); + std::map result; + uint32 order = 1; + for (auto idx : sortedIdx) { + for (uint32 i = idx;;) { + result[funcs[i].funcName] = order++; + i = clusters[i].next; + if (i == idx) { + break; + } + } + } + return result; +} + +} // namespace maple diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp index dd48324b5fe6b9116bf1d3f5144b997330972187..c2abe23b6d2d6a9a467091be0354ac2b36669c1c 100644 --- a/src/mapleall/maple_be/src/cg/cg_cfg.cpp +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -18,12 +18,12 @@ #elif defined(TARGRISCV64) && TARGRISCV64 #include "riscv64_insn.h" #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 #include "arm32_insn.h" #endif #include "cg_option.h" #include "mpl_logging.h" -#if TARGX86_64 +#if defined(TARGX86_64) && TARGX86_64 #include "x64_cgfunc.h" #include "cg.h" #endif @@ -87,7 +87,7 @@ bool CanBBThrow(const BB &bb) { } namespace maplebe { -void CGCFG::BuildCFG() const { +void CGCFG::BuildCFG() { /* * Second Pass: * Link preds/succs in the BBs @@ -197,10 +197,10 @@ void CGCFG::BuildCFG() const { static inline uint32 CRC32Compute(uint32_t crc, uint32 val) { crc ^= 0xFFFFFFFFU; - for (int32 idx = 3; idx >=0 ; idx--) { + for (int32 idx = 3; idx >= 0; idx--) { uint8 byteVal = (val >> (static_cast(idx) * k8BitSize)) & 0xffu; - int TableIdx = (crc ^ byteVal) & 0xffu; - crc = CRCTable[TableIdx] ^ (crc >> k8BitSize); + uint32_t tableIdx = (crc ^ byteVal) & 0xffu; + crc = CRCTable[tableIdx] ^ (crc >> k8BitSize); } return crc ^ 0xFFFFFFFFU; } @@ -210,7 +210,7 @@ uint32 CGCFG::ComputeCFGHash() { FOR_ALL_BB(bb, cgFunc) { hash = CRC32Compute (hash, bb->GetId()); for (BB *sucBB : bb->GetSuccs()) { - hash = CRC32Compute (hash ,sucBB->GetId()); + hash = CRC32Compute (hash, sucBB->GetId()); } } return hash; @@ -222,7 +222,7 @@ void CGCFG::CheckCFG() { bool found = false; for (BB *sucPred : sucBB->GetPreds()) { if (sucPred == bb) { - if (found == false) { + if (!found) { found = true; } else { LogInfo::MapleLogger() << "dup pred " << sucPred->GetId() << " for sucBB " << sucBB->GetId() << "\n"; @@ -230,7 +230,7 @@ void CGCFG::CheckCFG() { } } } - if (found == false) { + if (!found) { LogInfo::MapleLogger() << "non pred for sucBB " << sucBB->GetId() << " for BB " << bb->GetId() << "\n"; CHECK_FATAL_FALSE("CG_CFG check failed !"); } @@ -241,7 +241,7 @@ void CGCFG::CheckCFG() { bool found = false; for (BB *predSucc : predBB->GetSuccs()) { if (predSucc == bb) { - if (found == false) { + if (!found) { found = true; } else { LogInfo::MapleLogger() << "dup succ " << predSucc->GetId() << " for predBB " << predBB->GetId() << "\n"; @@ -249,7 +249,7 @@ void CGCFG::CheckCFG() { } } } - if (found == false) { + if (!found) { LogInfo::MapleLogger() << "non succ for predBB " << predBB->GetId() << " for BB " << bb->GetId() << "\n"; CHECK_FATAL_FALSE("CG_CFG check failed !"); } @@ -260,7 +260,7 @@ void CGCFG::CheckCFG() { void CGCFG::CheckCFGFreq() { auto verifyBBFreq = [this](const BB *bb, uint32 succFreq) { uint32 res = bb->GetFrequency(); - if ((res != 0 && static_cast(abs(static_cast(res - succFreq))) / res > 1.0) || + if ((res != 0 && static_cast(abs(static_cast(res - succFreq))) / res > 1) || (res == 0 && res != succFreq)) { // Not included if (bb->GetSuccs().size() > 1 && bb->GetPreds().size() > 1) { @@ -325,6 +325,11 @@ bool CGCFG::BBJudge(const BB &first, const BB &second) const { if (first.GetKind() == BB::kBBReturn || second.GetKind() == BB::kBBReturn) { return false; } + // If the address of firstBB or secondBB is referenced by adrp_label insn, + // it can not be merged + if (first.IsAdrpLabel() || second.IsAdrpLabel()) { + return false; + } if (&first == &second) { return false; } @@ -378,21 +383,29 @@ void CGCFG::MergeBB(BB &merger, BB &mergee, CGFunc &func) { func.SetLastBB(*prevLast); } if (mergee.GetKind() == BB::kBBReturn) { - for (size_t i = 0; i < func.ExitBBsVecSize(); ++i) { - if (func.GetExitBB(i) == &mergee) { - func.EraseExitBBsVec(func.GetExitBBsVec().begin() + i); + auto retIt = func.GetExitBBsVec().begin(); + while (retIt != func.GetExitBBsVec().end()) { + if (*retIt == &mergee) { + (void)func.EraseExitBBsVec(retIt); + break; + } else { + ++retIt; } } func.PushBackExitBBsVec(merger); } - /* if mergee is infinite loop */ - BB *commonExit = func.GetCommonExitBB(); - const auto exitPredIt = std::find(commonExit->GetPredsBegin(), commonExit->GetPredsEnd(), &mergee); - if (exitPredIt != commonExit->GetPredsEnd()) { - commonExit->ErasePreds(exitPredIt); - commonExit->PushBackPreds(merger); + if (mergee.GetKind() == BB::kBBNoReturn) { + auto noRetIt = func.GetNoRetCallBBVec().begin(); + while (noRetIt != func.GetNoRetCallBBVec().end()) { + if (*noRetIt == &mergee) { + (void)func.EraseNoReturnCallBB(noRetIt); + break; + } else { + ++noRetIt; + } + } + func.PushBackNoReturnCallBBsVec(merger); } - if (mergee.GetKind() == BB::kBBRangeGoto) { func.AddEmitSt(merger.GetId(), *func.GetEmitSt(mergee.GetId())); func.DeleteEmitSt(mergee.GetId()); @@ -461,7 +474,7 @@ void CGCFG::FindAndMarkUnreachable(CGFunc &func) { /* Check if bb is the cleanupBB/switchTableBB/firstBB/lastBB of the function */ if (bb->IsCleanup() || InSwitchTable(bb->GetLabIdx(), func) || bb == func.GetFirstBB() || bb == func.GetLastBB()) { toBeAnalyzedBBs.push(bb); - } else if (bb->IsLabelTaken() == false) { + } else if (!(bb->IsLabelTaken())) { bb->SetUnreachable(true); } bb = bb->GetNext(); @@ -531,7 +544,7 @@ void CGCFG::FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) it->GetEhPreds().empty() && !InSwitchTable(it->GetLabIdx(), *cgFunc) && !cgFunc->IsExitBB(*it) && - (it->IsLabelTaken() == false); + (!(it->IsLabelTaken())); if (!needFlush) { continue; } @@ -560,52 +573,55 @@ void CGCFG::FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) } void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) const { - BB *sucBB = CGCFG::GetTargetSuc(curBB, false, isGotoIf); - if (sucBB != nullptr) { - sucBB->RemovePreds(curBB); - } - BB *fallthruSuc = nullptr; - if (isGotoIf) { - for (BB *succ : curBB.GetSuccs()) { - if (succ == sucBB) { - continue; - } - fallthruSuc = succ; - break; - } - ASSERT(fallthruSuc == curBB.GetNext(), "fallthru succ should be its next bb."); - if (fallthruSuc != nullptr) { - fallthruSuc->RemovePreds(curBB); + if (!curBB.IsUnreachable()) { + BB *sucBB = CGCFG::GetTargetSuc(curBB, false, isGotoIf); + if (sucBB != nullptr) { + sucBB->RemovePreds(curBB); } - } - for (BB *preBB : curBB.GetPreds()) { - if (preBB->GetKind() == BB::kBBIgoto) { - sucBB->PushBackPreds(curBB); - return; + + BB *fallthruSuc = nullptr; + if (isGotoIf) { + for (BB *succ : curBB.GetSuccs()) { + if (succ == sucBB) { + continue; + } + fallthruSuc = succ; + break; + } + ASSERT(fallthruSuc == curBB.GetNext(), "fallthru succ should be its next bb."); + if (fallthruSuc != nullptr) { + fallthruSuc->RemovePreds(curBB); + } } - /* + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() == BB::kBBIgoto) { + sucBB->PushBackPreds(curBB); + return; + } + /* * If curBB is the target of its predecessor, change * the jump target. */ - if (&curBB == GetTargetSuc(*preBB, true, isGotoIf)) { - LabelIdx targetLabel; - if (curBB.GetNext()->GetLabIdx() == 0) { - targetLabel = insnVisitor->GetCGFunc()->CreateLabel(); - curBB.GetNext()->SetLabIdx(targetLabel); - } else { - targetLabel = curBB.GetNext()->GetLabIdx(); + if (&curBB == GetTargetSuc(*preBB, true, isGotoIf)) { + LabelIdx targetLabel; + if (curBB.GetNext()->GetLabIdx() == 0) { + targetLabel = insnVisitor->GetCGFunc()->CreateLabel(); + curBB.GetNext()->SetLabIdx(targetLabel); + } else { + targetLabel = curBB.GetNext()->GetLabIdx(); + } + insnVisitor->ModifyJumpTarget(targetLabel, *preBB); } - insnVisitor->ModifyJumpTarget(targetLabel, *preBB); - } - if (fallthruSuc != nullptr && !fallthruSuc->IsPredecessor(*preBB)) { - preBB->PushBackSuccs(*fallthruSuc); - fallthruSuc->PushBackPreds(*preBB); - } - if (sucBB != nullptr && !sucBB->IsPredecessor(*preBB)) { - preBB->PushBackSuccs(*sucBB); - sucBB->PushBackPreds(*preBB); + if (fallthruSuc != nullptr && !fallthruSuc->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*fallthruSuc); + fallthruSuc->PushBackPreds(*preBB); + } + if (sucBB != nullptr && !sucBB->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*sucBB); + sucBB->PushBackPreds(*preBB); + } + preBB->RemoveSuccs(curBB); } - preBB->RemoveSuccs(curBB); } for (BB *ehSucc : curBB.GetEhSuccs()) { @@ -621,7 +637,11 @@ void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) const { } else { cgFunc->SetLastBB(*curBB.GetPrev()); } - curBB.GetPrev()->SetNext(curBB.GetNext()); + if (curBB.GetPrev() != nullptr) { + curBB.GetPrev()->SetNext(curBB.GetNext()); + } else { + cgFunc->SetFirstBB(*curBB.GetNext()); + } cgFunc->ClearBBInVec(curBB.GetId()); /* remove callsite */ EHFunc *ehFunc = cgFunc->GetEHFunc(); @@ -647,6 +667,23 @@ void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) const { } } +void CGCFG::UpdateCommonExitBBInfo() { + BB *commonExitBB = cgFunc->GetCommonExitBB(); + ASSERT_NOT_NULL(commonExitBB); + commonExitBB->ClearPreds(); + for (BB *exitBB : cgFunc->GetExitBBsVec()) { + if (!exitBB->IsUnreachable()) { + commonExitBB->PushBackPreds(*exitBB); + } + } + for (BB *noRetBB : cgFunc->GetNoRetCallBBVec()) { + if (!noRetBB->IsUnreachable()) { + commonExitBB->PushBackPreds(*noRetBB); + } + } + WontExitAnalysis(); +} + void CGCFG::RetargetJump(BB &srcBB, BB &targetBB) const { insnVisitor->ModifyJumpTarget(srcBB, targetBB); } @@ -657,6 +694,7 @@ BB *CGCFG::GetTargetSuc(BB &curBB, bool branchOnly, bool isGotoIf) { case BB::kBBIntrinsic: case BB::kBBIf: { const Insn* origLastInsn = curBB.GetLastMachineInsn(); + ASSERT_NOT_NULL(origLastInsn); if (isGotoIf && (curBB.GetPrev() != nullptr) && (curBB.GetKind() == BB::kBBGoto || curBB.GetKind() == BB::kBBIf) && (curBB.GetPrev()->GetKind() == BB::kBBGoto || curBB.GetPrev()->GetKind() == BB::kBBIf)) { @@ -783,7 +821,7 @@ void CGCFG::UnreachCodeAnalysis() const { } else { (void)unreachBBs.insert(bb); } - if (bb->IsLabelTaken() == false) { + if (!(bb->IsLabelTaken())) { bb->SetUnreachable(true); } bb = bb->GetNext(); @@ -815,10 +853,20 @@ void CGCFG::UnreachCodeAnalysis() const { if (cgFunc->IsExitBB(*unreachBB) && !cgFunc->GetMirModule().IsCModule()) { unreachBB->SetUnreachable(false); } + for (auto exitBB = cgFunc->GetExitBBsVec().begin(); exitBB != cgFunc->GetExitBBsVec().end(); ++exitBB) { + if (*exitBB == unreachBB) { + if (!cgFunc->GetMirModule().IsCModule()) { + unreachBB->SetUnreachable(false); + } else { + cgFunc->GetExitBBsVec().erase(exitBB); + } + break; + } + } EHFunc *ehFunc = cgFunc->GetEHFunc(); /* if unreachBB InLSDA ,replace unreachBB's label with nextReachableBB before remove it. */ if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && - cgFunc->GetTheCFG()->InLSDA(unreachBB->GetLabIdx(), ehFunc)) { + maplebe::CGCFG::InLSDA(unreachBB->GetLabIdx(), ehFunc)) { /* find next reachable BB */ BB* nextReachableBB = nullptr; for (BB* curBB = unreachBB; curBB != nullptr; curBB = curBB->GetNext()) { @@ -838,7 +886,10 @@ void CGCFG::UnreachCodeAnalysis() const { } unreachBB->GetPrev()->SetNext(unreachBB->GetNext()); - unreachBB->GetNext()->SetPrev(unreachBB->GetPrev()); + cgFunc->GetCommonExitBB()->RemovePreds(*unreachBB); + if (unreachBB->GetNext()) { + unreachBB->GetNext()->SetPrev(unreachBB->GetPrev()); + } for (BB *sucBB : unreachBB->GetSuccs()) { sucBB->RemovePreds(*unreachBB); @@ -978,8 +1029,14 @@ BB *CGCFG::BreakCriticalEdge(BB &pred, BB &succ) const { } else { BB *exitBB = cgFunc->GetExitBBsVec().size() == 0 ? nullptr : cgFunc->GetExitBB(0); if (exitBB == nullptr || exitBB->IsUnreachable()) { - cgFunc->GetLastBB()->AppendBB(*newBB); - cgFunc->SetLastBB(*newBB); + if (cgFunc->GetLastBB()->IsUnreachable()) { + // nowhere to connect the newBB, drop it + cgFunc->ClearBBInVec(newBB->GetId()); + return nullptr; + } else { + cgFunc->GetLastBB()->AppendBB(*newBB); + cgFunc->SetLastBB(*newBB); + } } else { exitBB->AppendBB(*newBB); if (cgFunc->GetLastBB() == exitBB) { @@ -1003,7 +1060,7 @@ BB *CGCFG::BreakCriticalEdge(BB &pred, BB &succ) const { for (size_t i = 0; i < labelVec.size(); ++i) { if (labelVec[i] == succ.GetLabIdx()) { /* single edge for multi jump target, so have to replace all. */ - pred.SetRangeGotoLabel(i, newLblIdx); + pred.SetRangeGotoLabel(static_cast(i), newLblIdx); } } cgFunc->UpdateEmitSt(pred, succ.GetLabIdx(), newLblIdx); diff --git a/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp b/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp index 37b2226e6567bc64353e4dadc77f9251dbae1f1e..ac18b618ada6c89ee97e0c187542a3ac2f781647 100644 --- a/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp +++ b/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp @@ -19,7 +19,9 @@ namespace maplebe { void CriticalEdge::SplitCriticalEdges() { for (auto it = criticalEdges.begin(); it != criticalEdges.end(); ++it) { BB *newBB = cgFunc->GetTheCFG()->BreakCriticalEdge(*((*it).first), *((*it).second)); - (void)newBBcreated.emplace(newBB->GetId()); + if (newBB) { + (void)newBBcreated.emplace(newBB->GetId()); + } } } diff --git a/src/mapleall/maple_be/src/cg/cg_dominance.cpp b/src/mapleall/maple_be/src/cg/cg_dominance.cpp index 85bf5512e6768d57b47796b7340393c5b5ea99c9..5780b2b4f3e2d54ecb9ae9a522e93cf489fa5467 100644 --- a/src/mapleall/maple_be/src/cg/cg_dominance.cpp +++ b/src/mapleall/maple_be/src/cg/cg_dominance.cpp @@ -477,7 +477,7 @@ void PostDomAnalysis::GeneratePdomTreeDot() { } pdomFile << " BB_" << bb->GetId(); pdomFile << "[label= \""; - if (bb == cgFunc.GetFirstBB()) { + if (bb == cgFunc.GetCommonEntryBB()) { pdomFile << "ENTRY\n"; } pdomFile << "BB_" << bb->GetId() << "\"];\n"; @@ -534,12 +534,17 @@ MAPLE_ANALYSIS_PHASE_REGISTER(CgDomAnalysis, domanalysis) bool CgPostDomAnalysis::PhaseRun(maplebe::CGFunc &f) { MemPool *pdomMemPool = GetPhaseMemPool(); + /* Currently, using the dummyBB which is created at the beginning of constructing CGFunc, as the commonEntryBB */ + f.GetCommonEntryBB()->PushBackSuccs(*f.GetFirstBB()); + f.GetFirstBB()->PushBackPreds(*f.GetCommonEntryBB()); pdomAnalysis = pdomMemPool->New(f, *pdomMemPool, *pdomMemPool, f.GetAllBBs(), - *f.GetFirstBB(), *f.GetCommonExitBB()); + *f.GetCommonEntryBB(), *f.GetCommonExitBB()); pdomAnalysis->Compute(); if (CG_DEBUG_FUNC(f)) { pdomAnalysis->Dump(); } + f.GetCommonEntryBB()->ClearSuccs(); + f.GetFirstBB()->ClearPreds(); return false; } MAPLE_ANALYSIS_PHASE_REGISTER(CgPostDomAnalysis, pdomanalysis) diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp index 68ef47ddcc1b0163db8ca51ba8ed431721ddb8e2..81deec69b25c3a14991fd533a0e75f698c14ec04 100644 --- a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -29,20 +29,24 @@ Insn &InsnBuilder::BuildInsn(MOperator opCode, const InsnDesc &idesc) { Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0) { const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); - return BuildInsn(opCode, tMd).AddOpndChain(o0); + Insn &result = BuildInsn(opCode, tMd).AddOpndChain(o0); + return result; } Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1) { const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); - return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1); + Insn &result = BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1); + return result; } Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2) { const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); - return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2); + Insn &result = BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2); + return result; } Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3) { const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); - return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3); + Insn &result = BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3); + return result; } Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4) { @@ -89,6 +93,10 @@ ImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, MemPool *mp) { return mp ? *mp->New(value, size, false) : *alloc.New(value, size, false); } +ImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, bool isSigned, MemPool *mp) { + return mp ? *mp->New(value, size, isSigned) : *alloc.New(value, size, isSigned); +} + ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp) { return mp ? *mp->New(symbol, offset, relocs, false) : *alloc.New(symbol, offset, relocs, false); @@ -125,6 +133,13 @@ MemOperand &OperandBuilder::CreateMem(uint32 size, RegOperand &baseOpnd, ImmOper return *alloc.New(size, baseOpnd, ofstOperand, symbol); } +BitShiftOperand &OperandBuilder::CreateBitShift(BitShiftOperand::ShiftOp op, uint32 amount, uint32 bitLen, MemPool *mp) { + if (mp != nullptr) { + return *mp->New(op, amount, bitLen); + } + return *alloc.New(op, amount, bitLen); +} + RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { regno_t vRegNO = virtualReg.GetNextVregNO(type, size / k8BitSize); RegOperand &rp = mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); @@ -146,11 +161,11 @@ ListOperand &OperandBuilder::CreateList(MemPool *mp) { return mp ? *mp->New(alloc) : *alloc.New(alloc); } -FuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp){ +FuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp) { return mp ? *mp->New(symbol) : *alloc.New(symbol); } -LabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp){ +LabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp) { return mp ? *mp->New(parent, idx, *mp) : *alloc.New(parent, idx, *alloc.GetMemPool()); } diff --git a/src/mapleall/maple_be/src/cg/cg_mc_ssa_pre.cpp b/src/mapleall/maple_be/src/cg/cg_mc_ssa_pre.cpp index 0a71058ebab80931b368c1b2cf4120070cd3db1f..96d3e1e3ac7ce6d72fd45de05d63b07e6da19b09 100644 --- a/src/mapleall/maple_be/src/cg/cg_mc_ssa_pre.cpp +++ b/src/mapleall/maple_be/src/cg/cg_mc_ssa_pre.cpp @@ -71,7 +71,7 @@ void McSSAPre::ComputeMCWillBeAvail() const { // ================ Step 7: Max Flow / Min Cut ================= -bool McSSAPre::AmongMinCut(RGNode *nd, uint32 idx) const { +bool McSSAPre::AmongMinCut(const RGNode *nd, uint32 idx) const { for (Visit *visit : minCut) { if (visit->node == nd && visit->predIdx == idx) { return true; @@ -105,7 +105,7 @@ void McSSAPre::DumpRGToFile() { rgFile << "real" << pre->id << " -> " << "\"sink\nmaxflow " << maxFlowValue << "\";\n"; } MapleUnorderedMap::iterator it = occ2RGNodeMap.begin(); - for (; it != occ2RGNodeMap.end(); it++) { + for (; it != occ2RGNodeMap.end(); ++it) { RGNode *rgNode = it->second; for (uint32 i = 0; i < rgNode->pred.size(); i++) { RGNode *pre = rgNode->pred[i]; @@ -139,7 +139,7 @@ void McSSAPre::DumpRGToFile() { LogInfo::MapleLogger() << "++++ ssapre candidate " << workCand->workCandID << " dumped to " << fileName << "\n"; } -bool McSSAPre::IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx) { +bool McSSAPre::IncludedEarlier(Visit **cut, const Visit *curVisit, uint32 nextRouteIdx) const { uint32 i = nextRouteIdx; while (i != 0) { i--; @@ -151,7 +151,7 @@ bool McSSAPre::IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx } // remove this route's nodes from cutSet -void McSSAPre::RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route) { +void McSSAPre::RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route) const { for (uint32 i = 1; i < route->visits.size(); i++) { Visit &curVisit = route->visits[i]; std::unordered_multiset::iterator it = cutSet.find(curVisit.node->id); @@ -169,7 +169,7 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset // determine starting value of visitIdx: start searching back from route end; // if any node is in cutSet, set visitIdx as that nodes's index in route; // otherwise, set visitIdx to 0 - uint32 visitIdx = curRoute->visits.size(); + size_t visitIdx = curRoute->visits.size(); do { visitIdx--; if (cutSet.count(curRoute->visits[visitIdx].node->id) != 0) { @@ -200,7 +200,7 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset } success = (flowSoFar + visitCap <= relaxedMaxFlowValue); if (success && nextRouteIdx != (maxFlowRoutes.size() - 1)) { - success = SearchRelaxedMinCut(cut, cutSet, nextRouteIdx+1, flowSoFar + visitCap); + success = SearchRelaxedMinCut(cut, cutSet, nextRouteIdx + 1, flowSoFar + visitCap); } visitIdx++; } while (!success); @@ -216,7 +216,7 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet // determine starting value of visitIdx: start searching back from route end; // if any node is in cutSet, set visitIdx as that nodes's index in route; // otherwise, set visitIdx to 0 - uint32 visitIdx = curRoute->visits.size(); + size_t visitIdx = curRoute->visits.size(); do { visitIdx--; if (cutSet.count(curRoute->visits[visitIdx].node->id) != 0) { @@ -225,7 +225,7 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet } while (visitIdx != 1); // update cutSet with visited nodes lower than visitIdx if (visitIdx != 1) { - for (uint i = visitIdx - 1; i > 0; i--) { + for (uint i = visitIdx - 1; i > 0; --i) { cutSet.insert(curRoute->visits[i].node->id); } } @@ -243,7 +243,7 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet if (visitIdx != 0) { cutSet.insert(curVisit->node->id); } - visitIdx++; + ++visitIdx; continue; } cut[nextRouteIdx] = curVisit; @@ -255,9 +255,9 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet } success = (flowSoFar + visitCap <= maxFlowValue); if (success && nextRouteIdx != (maxFlowRoutes.size() - 1)) { - success = SearchMinCut(cut, cutSet, nextRouteIdx+1, flowSoFar + visitCap); + success = SearchMinCut(cut, cutSet, nextRouteIdx + 1, flowSoFar + visitCap); } - visitIdx++; + ++visitIdx; } while (!success); return true; } @@ -279,7 +279,7 @@ void McSSAPre::DetermineMinCut() { if (maxFlowRoutes.size() >= 20) { // apply arbitrary heuristics to reduce search time relaxedSearch = true; - relaxedMaxFlowValue = maxFlowValue * (maxFlowRoutes.size() / 10); + relaxedMaxFlowValue = static_cast(static_cast(maxFlowValue) * (maxFlowRoutes.size() / 10)); } bool success = !relaxedSearch && SearchMinCut(cut, cutSet, 0, 0); if (!success) { @@ -294,15 +294,15 @@ void McSSAPre::DetermineMinCut() { CHECK_FATAL(false, "McSSAPre::DetermineMinCut: failed to find min cut"); } // sort cut - std::sort(cut, cut+maxFlowRoutes.size(), [](const Visit *left, const Visit *right) { + std::sort(cut, cut + maxFlowRoutes.size(), [](const Visit *left, const Visit *right) { return (left->node != right->node) ? (left->node->id < right->node->id) : (left->predIdx < right->predIdx); }); // remove duplicates in the cut to form mincut - minCut.push_back(cut[0]); + minCut.emplace_back(cut[0]); size_t duplicatedVisits = 0; for (uint32 i = 1; i < maxFlowRoutes.size(); i++) { - if (cut[i] != cut[i-1]) { - minCut.push_back(cut[i]); + if (cut[i] != cut[i - 1]) { + minCut.emplace_back(cut[i]); } else { duplicatedVisits++; } @@ -321,18 +321,18 @@ void McSSAPre::DetermineMinCut() { } bool McSSAPre::VisitANode(RGNode *node, Route *route, std::vector &visitedNodes) { - ASSERT(node->pred.size() != 0 , "McSSAPre::VisitANode: no connection to source node"); + ASSERT(node->pred.size() != 0, "McSSAPre::VisitANode: no connection to source node"); // if any pred is the source and there's capacity to reach it, return success for (uint32 i = 0; i < node->pred.size(); i++) { if (node->pred[i] == source && node->inEdgesCap[i] > node->usedCap[i]) { // if there is another pred never taken that also reaches source, use that instead for (uint32 k = i + 1; k < node->pred.size(); k++) { if (node->pred[k] == source && node->usedCap[k] == 0 && node->inEdgesCap[k] > 0) { - route->visits.push_back(Visit(node, k)); + route->visits.emplace_back(Visit(node, k)); return true; } } - route->visits.push_back(Visit(node, i)); + route->visits.emplace_back(Visit(node, i)); return true; } } @@ -340,7 +340,7 @@ bool McSSAPre::VisitANode(RGNode *node, Route *route, std::vector &visited // pick a never-taken predecessor path first for (uint32 i = 0; i < node->pred.size(); i++) { if (node->usedCap[i] == 0 && node->inEdgesCap[i] > 0 && !visitedNodes[node->pred[i]->id]) { - route->visits.push_back(Visit(node, i)); + route->visits.emplace_back(Visit(node, i)); visitedNodes[node->pred[i]->id] = true; bool success = VisitANode(node->pred[i], route, visitedNodes); if (!success) { @@ -357,13 +357,13 @@ bool McSSAPre::VisitANode(RGNode *node, Route *route, std::vector &visited sortedPred[i] = i; } // put sortedPred[] in increasing order of capacities - std::sort(sortedPred, sortedPred+numPreds, [node](uint32 m, uint32 n) { + std::sort(sortedPred, sortedPred + numPreds, [node](uint32 m, uint32 n) { return node->inEdgesCap[m] < node->inEdgesCap[n]; }); // for this round, prefer predecessor with higher unused capacity for (uint32 i = 0; i < numPreds; i++) { uint32 j = sortedPred[i]; if (!visitedNodes[node->pred[j]->id] && node->inEdgesCap[j] > node->usedCap[j]) { - route->visits.push_back(Visit(node, j)); + route->visits.emplace_back(Visit(node, j)); visitedNodes[node->pred[j]->id] = true; bool success = VisitANode(node->pred[j], route, visitedNodes); if (!success) { @@ -385,7 +385,7 @@ bool McSSAPre::FindAnotherRoute() { // pick an untaken sink predecessor first for (uint32 i = 0; i < sink->pred.size(); i++) { if (sink->usedCap[i] == 0) { - route->visits.push_back(Visit(sink, i)); + route->visits.emplace_back(Visit(sink, i)); visitedNodes[sink->pred[i]->id] = true; success = VisitANode(sink->pred[i], route, visitedNodes); if (!success) { @@ -398,7 +398,7 @@ bool McSSAPre::FindAnotherRoute() { if (!success) { // now, pick any sink predecessor for (uint32 i = 0; i < sink->pred.size(); i++) { - route->visits.push_back(Visit(sink, i)); + route->visits.emplace_back(Visit(sink, i)); visitedNodes[sink->pred[i]->id] = true; success = VisitANode(sink->pred[i], route, visitedNodes); if (!success) { @@ -422,7 +422,7 @@ bool McSSAPre::FindAnotherRoute() { for (uint32 i = 0; i < route->visits.size(); i++) { route->visits[i].IncreUsedCapacity(minAvailCap); } - maxFlowRoutes.push_back(route); + maxFlowRoutes.emplace_back(route); return true; } @@ -468,15 +468,15 @@ void McSSAPre::AddSingleSink() { sink = preMp->New(&preAllocator, nextRGNodeId++, nullptr); size_t numToSink = 0; MapleUnorderedMap::iterator it = occ2RGNodeMap.begin(); - for (; it != occ2RGNodeMap.end(); it++) { + for (; it != occ2RGNodeMap.end(); ++it) { if (it->first->occTy != kAOccReal) { continue; } RGNode *use = it->second; // add edge from this use node to sink - sink->pred.push_back(use); - sink->inEdgesCap.push_back(INT64_MAX); - sink->usedCap.push_back(0); + sink->pred.emplace_back(use); + sink->inEdgesCap.emplace_back(INT64_MAX); + sink->usedCap.emplace_back(0); numToSink++; } ASSERT(numToSink != 0, "McSSAPre::AddSingleSink: found 0 edge to sink"); @@ -493,17 +493,17 @@ void McSSAPre::AddSingleSource() { // look for null operands MapleList::iterator it = phiOcc->cgbb->GetPredsBegin(); uint32 i; // index in phiOcc's phiOpnds - for (i = 0; i < phiOcc->phiOpnds.size(); i++, it++) { + for (i = 0; i < phiOcc->phiOpnds.size(); i++, ++it) { PhiOpndOcc *phiopndOcc = phiOcc->phiOpnds[i]; if (phiopndOcc->def != nullptr) { continue; } // add edge from source to this phi node RGNode *sucNode = occ2RGNodeMap[phiOcc]; - sucNode->pred.push_back(source); - sucNode->phiOpndIndices.push_back(i); - sucNode->inEdgesCap.push_back((*it)->GetProfFreq()+1); - sucNode->usedCap.push_back(0); + sucNode->pred.emplace_back(source); + sucNode->phiOpndIndices.emplace_back(i); + sucNode->inEdgesCap.emplace_back((*it)->GetProfFreq() + 1); + sucNode->usedCap.emplace_back(0); numSourceEdges++; } } @@ -546,9 +546,9 @@ void McSSAPre::GraphReduction() { occ2RGNodeMap[realOcc] = use; numRealOccs++; RGNode *def = occ2RGNodeMap[defOcc]; - use->pred.push_back(def); - use->inEdgesCap.push_back(realOcc->cgbb->GetProfFreq()+1); - use->usedCap.push_back(0); + use->pred.emplace_back(def); + use->inEdgesCap.emplace_back(realOcc->cgbb->GetProfFreq() + 1); + use->usedCap.emplace_back(0); numType2Edges++; } } @@ -564,19 +564,19 @@ void McSSAPre::GraphReduction() { ASSERT(occ2RGNodeMap.find(defOcc) != occ2RGNodeMap.end(), "McSSAPre::GraphReduction: def node not found"); RGNode *def = occ2RGNodeMap[defOcc]; RGNode *use = occ2RGNodeMap[defPhiOcc]; - use->pred.push_back(def); + use->pred.emplace_back(def); // find the pred bb (pointed to by it) that corresponds to phiopndOcc MapleList::iterator it = defPhiOcc->cgbb->GetPredsBegin(); uint32 i; // index in defPhiOcc's phiOpnds - for (i = 0; i < defPhiOcc->phiOpnds.size(); i++, it++) { + for (i = 0; i < defPhiOcc->phiOpnds.size(); i++, ++it) { if (defPhiOcc->phiOpnds[i] == phiopndOcc) { break; } } - use->phiOpndIndices.push_back(i); + use->phiOpndIndices.emplace_back(i); ASSERT(i != defPhiOcc->phiOpnds.size(), "McSSAPre::GraphReduction: cannot find corresponding phi opnd"); - use->inEdgesCap.push_back((*it)->GetProfFreq()+1); - use->usedCap.push_back(0); + use->inEdgesCap.emplace_back((*it)->GetProfFreq() + 1); + use->usedCap.emplace_back(0); numType1Edges++; } } @@ -601,8 +601,8 @@ void McSSAPre::SetPartialAnt(PhiOpndOcc *phiOpnd) const { return; } defPhiOcc->isPartialAnt = true; - for (PhiOpndOcc *phiOpnd : defPhiOcc->phiOpnds) { - SetPartialAnt(phiOpnd); + for (PhiOpndOcc *sPhiOpnd : defPhiOcc->phiOpnds) { + SetPartialAnt(sPhiOpnd); } } @@ -715,7 +715,7 @@ void McSSAPre::ApplyMCSSAPre() { void DoProfileGuidedSavePlacement(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand) { MemPool *tempMP = memPoolCtrler.NewMemPool("cg_mc_ssa_pre", true); - McSSAPre cgssapre(f, dom, tempMP, workCand, false/*asEarlyAsPossible*/, false/*enabledDebug*/); + McSSAPre cgssapre(f, dom, tempMP, workCand, false /*asEarlyAsPossible*/, false /*enabledDebug*/); cgssapre.ApplyMCSSAPre(); diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp index 6c6f1ed6534c58024d00cfc52be15b8d0cbfb0e2..131b08d6a2b2142f63a7a11f71ee0a38b4bd26fe 100644 --- a/src/mapleall/maple_be/src/cg/cg_option.cpp +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -63,11 +63,15 @@ uint32 CGOptions::jumpAlignPow = 5; uint32 CGOptions::funcAlignPow = 5; bool CGOptions::liteProfGen = false; bool CGOptions::liteProfUse = false; +bool CGOptions::liteProfVerify = false; std::string CGOptions::liteProfile = ""; std::string CGOptions::litePgoWhiteList = ""; std::string CGOptions::instrumentationOutPutPath = ""; std::string CGOptions::litePgoOutputFunction = ""; std::string CGOptions::functionProrityFile = ""; +std::string CGOptions::functionReorderAlgorithm = ""; +std::string CGOptions::functionReorderProfile = ""; +std::string CGOptions::cpu = "cortex-a53"; #if TARGAARCH64 || TARGRISCV64 bool CGOptions::useBarriersForVolatile = false; #else @@ -76,6 +80,10 @@ bool CGOptions::useBarriersForVolatile = true; bool CGOptions::exclusiveEH = false; bool CGOptions::doEBO = false; bool CGOptions::doCGSSA = false; +bool CGOptions::doLayoutColdPath = false; +bool CGOptions::doGlobalSchedule = false; +bool CGOptions::doLocalSchedule = false; +bool CGOptions::doVerifySchedule = false; bool CGOptions::calleeEnsureParam = true; bool CGOptions::doIPARA = true; bool CGOptions::doCFGO = false; @@ -111,7 +119,7 @@ CGOptions::ABIType CGOptions::abiType = kABIHard; CGOptions::EmitFileType CGOptions::emitFileType = kAsm; bool CGOptions::genLongCalls = false; bool CGOptions::functionSections = false; -bool CGOptions::useFramePointer = false; +CGOptions::FramePointerType CGOptions::useFramePointer = kNoneFP; bool CGOptions::gcOnly = false; bool CGOptions::quiet = false; bool CGOptions::doPatchLongBranch = false; @@ -136,7 +144,8 @@ bool CGOptions::arm64ilp32 = false; bool CGOptions::noCommon = false; bool CGOptions::flavorLmbc = false; bool CGOptions::doAggrOpt = false; -CGOptions::VisibilityType CGOptions::visibilityType = kDefault; +CGOptions::VisibilityType CGOptions::visibilityType = kDefaultVisibility; +CGOptions::TLSModel CGOptions::tlsModel = kDefaultTLSModel; bool CGOptions::noplt = false; CGOptions &CGOptions::GetInstance() { @@ -189,9 +198,11 @@ bool CGOptions::SolveOptions(bool isDebug) { std::string printOpt; if (isDebug) { for (const auto &val : opt->GetRawValues()) { - printOpt += opt->GetName() + " " + val + " "; + if (opt->IsEnabledByUser()) { + printOpt += opt->GetName() + " " + val + " "; + LogInfo::MapleLogger() << "cg options: " << printOpt << '\n'; + } } - LogInfo::MapleLogger() << "cg options: " << printOpt << '\n'; } } @@ -204,9 +215,9 @@ bool CGOptions::SolveOptions(bool isDebug) { } if (opts::cg::fpie.IsEnabledByUser() || opts::cg::fPIE.IsEnabledByUser()) { - if (opts::cg::fPIE) { + if (opts::cg::fPIE && opts::cg::fPIE.IsEnabledByUser()) { SetPIEOptionHelper(kLargeMode); - } else if (opts::cg::fpie) { + } else if (opts::cg::fpie && opts::cg::fpie.IsEnabledByUser()) { SetPIEOptionHelper(kSmallMode); } else { SetPIEMode(kClose); @@ -216,12 +227,12 @@ bool CGOptions::SolveOptions(bool isDebug) { if (opts::cg::fpic.IsEnabledByUser() || opts::cg::fPIC.IsEnabledByUser()) { /* To avoid fpie mode being modified twice, need to ensure fpie is not opened. */ - if(!opts::cg::fpie && !opts::cg::fPIE) { - if (opts::cg::fPIC) { + if (!opts::cg::fpie && !opts::cg::fpie.IsEnabledByUser() && ! opts::cg::fPIE.IsEnabledByUser() &&!opts::cg::fPIE) { + if (opts::cg::fPIC && opts::cg::fPIC.IsEnabledByUser()) { SetPICOptionHelper(kLargeMode); SetPIEMode(kClose); ClearOption(CGOptions::kGenPie); - } else if (opts::cg::fpic) { + } else if (opts::cg::fpic && opts::cg::fpic.IsEnabledByUser()) { SetPICOptionHelper(kSmallMode); SetPIEMode(kClose); ClearOption(CGOptions::kGenPie); @@ -237,14 +248,21 @@ bool CGOptions::SolveOptions(bool isDebug) { } if (opts::cg::fnoSemanticInterposition.IsEnabledByUser()) { - if (opts::cg::fnoSemanticInterposition && ((GeneratePositionIndependentCode() && - !GeneratePositionIndependentExecutable()) || GeneratePositionIndependentExecutable())) { + if (opts::cg::fnoSemanticInterposition && IsShlib()) { EnableNoSemanticInterposition(); } else { DisableNoSemanticInterposition(); } } + if (opts::linkerTimeOpt.IsEnabledByUser() && IsShlib()) { + EnableNoSemanticInterposition(); + } + + if (opts::ftlsModel.IsEnabledByUser()) { + SetTLSModel(opts::ftlsModel); + } + if (opts::cg::verboseAsm.IsEnabledByUser()) { opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); } @@ -533,8 +551,8 @@ bool CGOptions::SolveOptions(bool isDebug) { : ClearOption(CGOptions::kProEpilogueOpt); } - if (opts::cg::tailcall.IsEnabledByUser()) { - opts::cg::tailcall ? EnableTailCallOpt() : DisableTailCallOpt(); + if (opts::tailcall.IsEnabledByUser()) { + opts::tailcall ? EnableTailCallOpt() : DisableTailCallOpt(); } if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { @@ -629,8 +647,14 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::functionSections ? EnableFunctionSections() : DisableFunctionSections(); } + if (opts::cg::omitLeafFramePointer.IsEnabledByUser()) { + opts::cg::omitLeafFramePointer ? SetFramePointer(kNonLeafFP) : SetFramePointer(kAllFP); + } + if (opts::cg::omitFramePointer.IsEnabledByUser()) { - opts::cg::omitFramePointer ? DisableFramePointer() : EnableFramePointer(); + opts::cg::omitFramePointer ? SetFramePointer(kNoneFP) : + ((!opts::cg::omitLeafFramePointer.IsEnabledByUser() || opts::cg::omitLeafFramePointer) ? + SetFramePointer(kNonLeafFP) : SetFramePointer(kAllFP)); } if (opts::gcOnly.IsEnabledByUser()) { @@ -658,6 +682,14 @@ bool CGOptions::SolveOptions(bool isDebug) { opts::cg::cgSsa ? EnableCGSSA() : DisableCGSSA(); } + if (opts::cg::layoutColdPath.IsEnabledByUser()) { + opts::cg::layoutColdPath ? EnableLayoutColdPath() : DisableLayoutColdPath(); + } + + if (opts::cg::globalSchedule.IsEnabledByUser()) { + opts::cg::globalSchedule ? EnableGlobalSchedule() : DisableGlobalSchedule(); + } + if (opts::cg::common.IsEnabledByUser()) { opts::cg::common ? EnableCommon() : DisableCommon(); } @@ -685,6 +717,9 @@ bool CGOptions::SolveOptions(bool isDebug) { if (opts::cg::litePgoGen.IsEnabledByUser()) { opts::cg::litePgoGen ? EnableLiteProfGen() : DisableLiteProfGen(); } + if (opts::cg::litePgoVerify.IsEnabledByUser()) { + opts::cg::litePgoVerify ? EnableLiteProfVerify() : DisableLiteProfVerify(); + } if (opts::cg::litePgoOutputFunc.IsEnabledByUser()) { EnableLiteProfGen(); @@ -715,10 +750,19 @@ bool CGOptions::SolveOptions(bool isDebug) { SetFunctionPriority(opts::cg::functionPriority); } + if (opts::functionReorderAlgorithm.IsEnabledByUser()) { + SetFunctionReorderAlgorithm(opts::functionReorderAlgorithm); + } + + if (opts::functionReorderProfile.IsEnabledByUser()) { + SetFunctionReorderProfile(opts::functionReorderProfile); + } + if (opts::fVisibility.IsEnabledByUser()) { SetVisibilityType(opts::fVisibility); } + SetOption(kWithSrc); /* override some options when loc, dwarf is generated */ if (WithLoc()) { SetOption(kWithSrc); @@ -796,6 +840,8 @@ void CGOptions::EnableO0() { optimizeLevel = kLevel0; doEBO = false; doCGSSA = false; + doGlobalSchedule = false; + doLocalSchedule = false; doCFGO = false; doICO = false; doPrePeephole = false; @@ -841,6 +887,8 @@ void CGOptions::EnableO2() { optimizeLevel = kLevel2; doEBO = true; doCGSSA = true; + doGlobalSchedule = true; + doLocalSchedule = true; doCFGO = true; doICO = true; doPrePeephole = true; @@ -857,7 +905,7 @@ void CGOptions::EnableO2() { SetOption(kUseUnwindTables); ClearOption(kUseStackProtectorStrong); ClearOption(kUseStackProtectorAll); -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 doPreLSRAOpt = false; doLocalRefSpill = false; doCalleeToSpill = false; @@ -884,6 +932,8 @@ void CGOptions::EnableLiteCG() { optimizeLevel = kLevelLiteCG; doEBO = false; doCGSSA = false; + doGlobalSchedule = false; + doLocalSchedule = false; doCFGO = false; doICO = false; doPrePeephole = false; diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp index 7581e1d6232141a0acd342f84b5b2fd6c9f14703..cf40da6571ba33a85af3f26783bd511b081b19cf 100644 --- a/src/mapleall/maple_be/src/cg/cg_options.cpp +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -13,54 +13,51 @@ * See the Mulan PSL v2 for more details. */ -#include "driver_options.h" - -#include -#include #include +#include "driver_options.h" namespace opts::cg { maplecl::Option fpie({"-fpie", "--fpie"}, - " --fpie \tGenerate position-independent executable in small mode\n" - " --no-pie/-no-pie\n", + " --fpie \tGenerate position-independent executable in small mode\n" + " --no-pie/-fno-pie\n", {cgCategory, driverCategory, ldCategory}, - maplecl::DisableEvery({"--no-pie", "-no-pie"})); + maplecl::DisableEvery({"-fno-pie", "--no-pie"})); -maplecl::Option fPIE({"-fPIE", "--fPIE",}, - " --fPIE \tGenerate position-independent executable in large mode\n" - " --no-pie/-no-pie\n", +maplecl::Option fPIE({"-fPIE", "--fPIE"}, + " --fPIE \tGenerate position-independent executable in large mode\n" + " --no-pie/-fno-pie\n", {cgCategory, driverCategory, ldCategory}); maplecl::Option fpic({"-fpic", "--fpic"}, - " --fpic \tGenerate position-independent shared library in small mode\n" - " --no-pic/no-pic\n", + " --fpic \tGenerate position-independent shared library in small mode\n" + " --no-pic/-fno-pic\n", {cgCategory, driverCategory, ldCategory}, - maplecl::DisableEvery({"--no-pic", "-no-pic"})); + maplecl::DisableEvery({"-fno-pic", "--no-pic"})); maplecl::Option fPIC({"-fPIC", "--fPIC"}, - " --fPIC \tGenerate position-independent shared library in large mode\n" - " --no-pic/no-pic\n", + " --fPIC \tGenerate position-independent shared library in large mode\n" + " --no-pic/-fno-pic\n", {cgCategory, driverCategory, ldCategory}); -maplecl::Option fnoSemanticInterposition({"-fno-semantic-interposition", "--fno-semantic-interposition"}, +maplecl::Option fnoSemanticInterposition({"-fno-semantic-interposition"}, " --fno-semantic-interposition \tif interposition happens for " "functions, the overwriting function will have precisely the same " "semantics (and side effects)\n" - " --fsemantic-interposition\n", - {cgCategory, driverCategory, ldCategory}, - maplecl::DisableWith("--fsemantic-interposition")); + " -fsemantic-interposition\n", + {cgCategory, driverCategory}, + maplecl::DisableWith("-fsemantic-interposition")); maplecl::Option verboseAsm({"--verbose-asm"}, " --verbose-asm \tAdd comments to asm output\n" " --no-verbose-asm\n", - {cgCategory}, + {driverCategory, cgCategory}, maplecl::DisableWith("--no-verbose-asm")); maplecl::Option verboseCg({"--verbose-cg"}, " --verbose-cg \tAdd comments to cg output\n" " --no-verbose-cg\n", - {cgCategory}, + {driverCategory, cgCategory}, maplecl::DisableWith("--no-verbose-cg")); maplecl::Option maplelinker({"--maplelinker"}, @@ -88,7 +85,7 @@ maplecl::Option replaceAsm({"--replaceasm"}, maplecl::DisableWith("--no-replaceasm")); maplecl::Option generalRegOnly({"--general-reg-only"}, - " --general-reg-only \tdisable floating-point or Advanced SIMD registers\n" + " --general-reg-only \tdisable floating-point or Advanced SIMD registers\n" " --no-general-reg-only\n", {cgCategory}, maplecl::DisableWith("--no-general-reg-only")); @@ -166,13 +163,13 @@ maplecl::Option lsraOptcallee({"--lsra-optcallee"}, maplecl::DisableWith("--no-lsra-optcallee")); maplecl::Option calleeregsPlacement({"--calleeregs-placement"}, - " --calleeregs-placement \tOptimize placement of callee-save registers\n" + " --calleeregs-placement \tOptimize placement of callee-save registers\n" " --no-calleeregs-placement\n", {cgCategory}, maplecl::DisableWith("--no-calleeregs-placement")); maplecl::Option ssapreSave({"--ssapre-save"}, - " --ssapre-save \tUse ssapre algorithm to save callee-save registers\n" + " --ssapre-save \tUse ssapre algorithm to save callee-save registers\n" " --no-ssapre-save\n", {cgCategory}, maplecl::DisableWith("--no-ssapre-save")); @@ -322,7 +319,7 @@ maplecl::Option gdwarf({"--gdwarf"}, {cgCategory}); maplecl::Option gsrc({"--gsrc"}, - " --gsrc \tUse original source file instead of mpl file for debugging\n", + " --gsrc \tUse original source file instead of mpl file for debugging\n", {cgCategory}); maplecl::Option gmixedsrc({"--gmixedsrc"}, @@ -359,7 +356,7 @@ maplecl::Option constFold({"--const-fold"}, maplecl::DisableWith("--no-const-fold")); maplecl::Option ehExclusiveList({"--eh-exclusive-list"}, - " --eh-exclusive-list \tFor generating gold files in unit testing\n" + " --eh-exclusive-list \tFor generating gold files in unit testing\n" " \t--eh-exclusive-list=list_file\n", {cgCategory}); @@ -398,7 +395,7 @@ maplecl::Option lsraOverlap({"--lsra-overlap"}, {cgCategory}); maplecl::Option remat({"--remat"}, - " --remat \tEnable rematerialization during register allocation\n" + " --remat \tEnable rematerialization during register allocation\n" " \t 0: no rematerialization (default)\n" " \t >= 1: rematerialize constants\n" " \t >= 2: rematerialize addresses\n" @@ -417,7 +414,7 @@ maplecl::Option dumpCfg({"--dump-cfg"}, maplecl::Option target({"--target"}, " --target=TARGETMACHINE \t generate code for TARGETMACHINE\n", {cgCategory}, - maplecl::optionalValue); + maplecl::kOptionalValue); maplecl::Option dumpPhases({"--dump-phases"}, " --dump-phases=PHASENAME,..." @@ -430,11 +427,11 @@ maplecl::Option skipPhases({"--skip-phases"}, {cgCategory, driverCategory}); maplecl::Option skipFrom({"--skip-from"}, - " --skip-from=PHASENAME \tSkip the rest phases from PHASENAME(included)\n", + " --skip-from=PHASENAME \tSkip the rest phases from PHASENAME(included)\n", {cgCategory}); maplecl::Option skipAfter({"--skip-after"}, - " --skip-after=PHASENAME \tSkip the rest phases after PHASENAME(excluded)\n", + " --skip-after=PHASENAME \tSkip the rest phases after PHASENAME(excluded)\n", {cgCategory}); maplecl::Option dumpFunc({"--dump-func"}, @@ -456,15 +453,15 @@ maplecl::Option useBarriersForVolatile({"--use-barriers-for-volatile"}, maplecl::DisableWith("--no-use-barriers-for-volatile")); maplecl::Option range({"--range"}, - " --range=NUM0,NUM1 \tOptimize only functions in the range [NUM0, NUM1]\n", + " --range=NUM0,NUM1 \tOptimize only functions in the range [NUM0, NUM1]\n", {cgCategory}); maplecl::Option fastAlloc({"--fast-alloc"}, - " --fast-alloc=[0/1] \tO2 RA fast mode, set to 1 to spill all registers\n", + " --fast-alloc=[0/1] \tO2 RA fast mode, set to 1 to spill all registers\n", {cgCategory}); maplecl::Option spillRange({"--spill_range"}, - " --spill_range=NUM0,NUM1 \tO2 RA spill registers in the range [NUM0, NUM1]\n", + " --spill_range=NUM0,NUM1 \tO2 RA spill registers in the range [NUM0, NUM1]\n", {cgCategory}); maplecl::Option dupBb({"--dup-bb"}, @@ -491,9 +488,9 @@ maplecl::Option cyclePatternList({"--cycle-pattern-list"}, {cgCategory}); maplecl::Option duplicateAsmList({"--duplicate_asm_list"}, - " --duplicate_asm_list \tDuplicate asm functions to delete plt call\n" + " --duplicate_asm_list \tDuplicate asm functions to delete plt call\n" " \t--duplicate_asm_list=list_file\n", - {cgCategory}); + {driverCategory, cgCategory}); maplecl::Option duplicateAsmList2({"--duplicate_asm_list2"}, " --duplicate_asm_list2" @@ -567,23 +564,24 @@ maplecl::Option functionSections({"--function-sections", "-ffunction-secti maplecl::DisableWith("--no-function-sections")); maplecl::Option omitFramePointer({"--omit-frame-pointer", "-fomit-frame-pointer"}, - " --omit-frame-pointer \t do not use frame pointer \n" + " --omit-frame-pointer \t do not use frame pointer for non-leaf func\n" " --no-omit-frame-pointer\n", {cgCategory, driverCategory}, maplecl::DisableEvery({"--no-omit-frame-pointer", "-fno-omit-frame-pointer"})); +maplecl::Option omitLeafFramePointer({"--omit-leaf-frame-pointer", "-momit-leaf-frame-pointer"}, + " --omit-leaf-frame-pointer \t do not use frame pointer for leaf func\n" + " --no-omit-leaf-frame-pointer\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-omit-leaf-frame-pointer", + "-mno-omit-leaf-frame-pointer"})); + maplecl::Option fastMath({"--fast-math"}, " --fast-math \tPerform fast math\n" " --no-fast-math\n", {cgCategory}, maplecl::DisableWith("--no-fast-math")); -maplecl::Option tailcall({"--tailcall", "-foptimize-sibling-calls"}, - " --tailcall/-foptimize-sibling-calls \tDo tail call optimization\n" - " --no-tailcall/-fno-optimize-sibling-calls\n", - {cgCategory, driverCategory}, - maplecl::DisableEvery({"-fno-optimize-sibling-calls", "--no-tailcall"})); - maplecl::Option alignAnalysis({"--align-analysis"}, " --align-analysis \tPerform alignanalysis\n" " --no-align-analysis\n", @@ -596,6 +594,18 @@ maplecl::Option cgSsa({"--cg-ssa"}, {cgCategory}, maplecl::DisableWith("--no-cg-ssa")); +maplecl::Option layoutColdPath({"--layout-cold-path"}, + " --layout-cold-path \tLayout cold path out of hot path\n" + " --no-layout-cold-path\n", + {cgCategory}, + maplecl::DisableWith("--no-layout-cold-path")); + +maplecl::Option globalSchedule({"--global-schedule"}, + " --global-schedule \tPerform global schedule\n" + " --no-global-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-global-schedule")); + maplecl::Option calleeEnsureParam({"--callee-ensure-param"}, " --callee-ensure-param \tCallee ensure valid vb of params\n" " --caller-ensure-param\n", @@ -625,34 +635,46 @@ maplecl::Option alignMaxBbSize({"--align-max-bb-size"}, {cgCategory}); maplecl::Option loopAlignPow({"--loop-align-pow"}, - " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", + " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", {cgCategory}); maplecl::Option jumpAlignPow({"--jump-align-pow"}, - " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", + " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", {cgCategory}); maplecl::Option funcAlignPow({"--func-align-pow"}, - " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", + " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", {cgCategory}); maplecl::Option litePgoGen({"--lite-pgo-gen"}, " --lite-pgo-gen \tinstrumentation CG bb and generate bb-cnt info\n" " --no-lite-pgo-gen\n", - {cgCategory}, + {driverCategory, cgCategory}, maplecl::DisableWith("--no-lite-pgo-gen")); maplecl::Option instrumentationFile ({"--instrumentation-file"}, - "--instrumentation-file=filepath \t instrumentation file output path\n", - {cgCategory}); + "--instrumentation-file=filepath \t instrumentation file output " + "path\n", + {driverCategory, cgCategory}); maplecl::Option litePgoWhiteList ({"--lite-pgo-white-list"}, - "--lite-pgo-white-list=filepath \t instrumentation function white list\n", - {cgCategory}); + "--lite-pgo-white-list=filepath \t instrumentation function white " + "list\n", + {driverCategory, cgCategory}); maplecl::Option litePgoOutputFunc ({"--lite-pgo-output-func"}, - "--lite-pgo-output-func=function name \t generate lite profile at the exit of the output function[default none]\n", - {cgCategory}); + "--lite-pgo-output-func=function name \t generate lite profile " + "at the exit of the output function[default none]\n", + {driverCategory, cgCategory}); + maplecl::Option litePgoFile({"--lite-pgo-file"}, " --lite-pgo-file=filepath \t lite pgo guide file\n", - {cgCategory}); + {driverCategory, cgCategory}); maplecl::Option functionPriority({"--function-priority"}, - " --function-priority=filepath \t when profile data is given, priority suffix is added to section name in order to improve code locality\n", - {cgCategory}); + " --function-priority=filepath \t when profile data is given," + "priority suffix is added to section name in order to " + "improve code locality\n", + {cgCategory}); +maplecl::Option litePgoVerify({"--lite-pgo-verify"}, + " --lite-pgo-verify \tverify lite-pgo data strictly, abort when " + "encountering mismatch data(default:skip)\n" + " --no-lite-pgo-verify\n", + {driverCategory, cgCategory}, + maplecl::DisableWith("--no-lite-pgo-verify")); } diff --git a/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp b/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp index c0b9184cdbe4f392e7e185c73b6d8f5fd9159d43..3b51e0609b6085bf2d50d25ad76d3b67d136cb25 100644 --- a/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp +++ b/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp @@ -20,7 +20,7 @@ namespace maplebe { uint64 CGProfGen::counterIdx = 0; -std::string AppendModSpecSuffix(MIRModule &m) { +std::string AppendModSpecSuffix(const MIRModule &m) { std::string specSuffix = "_"; specSuffix = specSuffix + std::to_string(DJBHash(m.GetEntryFuncName().c_str()) + m.GetNumFuncs()); return specSuffix; @@ -149,7 +149,7 @@ void CGProfGen::CreateProfFileSym(MIRModule &m, const std::string &outputPath, c auto *mirBuilder = m.GetMIRBuilder(); auto *charPtrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_a64)); const std::string finalName = outputPath + "mpl_lite_pgo.data"; - auto *modNameMirConst =m.GetMemPool()->New(finalName, *charPtrType); + auto *modNameMirConst = m.GetMemPool()->New(finalName, *charPtrType); auto *funcPtrSym = mirBuilder->GetOrCreateGlobalDecl(symName, *charPtrType); funcPtrSym->SetAttr(ATTR_weak); // weak symbol funcPtrSym->SetKonst(modNameMirConst); @@ -165,7 +165,7 @@ void CGProfGen::CreateChildTimeSym(maple::MIRModule &m, const std::string &symNa // if time is set. wait_fork is required to be set auto *u8Type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8)); - auto *waitForkMirConst =m.GetMemPool()->New(0, *u8Type); + auto *waitForkMirConst = m.GetMemPool()->New(0, *u8Type); auto *waitForkSym = mirBuilder->GetOrCreateGlobalDecl("__mpl_pgo_wait_forks", *u8Type); waitForkSym->SetAttr(ATTR_weak); // weak symbol waitForkSym->SetKonst(waitForkMirConst); diff --git a/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp b/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp index d62f3bbf6df8d355c86843d31d588284b2c7d490..8b3b1ed18d4a200abc716e70cc677ab59624a42d 100644 --- a/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp +++ b/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp @@ -16,7 +16,13 @@ #include "cg_critical_edge.h" #include "loop.h" #include "optimize_common.h" +#include "chain_layout.h" namespace maplebe { +void RelinkBB(BB &prev, BB &next) { + prev.SetNext(&next); + next.SetPrev(&prev); +} + bool CGProfUse::ApplyPGOData() { instrumenter.PrepareInstrumentInfo(f->GetFirstBB(), f->GetCommonExitBB()); std::vector iBBs; @@ -31,8 +37,12 @@ bool CGProfUse::ApplyPGOData() { LogInfo::MapleLogger() << "find profile for " << f->GetName() << "Failed\n"; } CHECK_FATAL(bbInfo != nullptr, "Get profile Failed"); - if (!VerifyProfiledata(iBBs, *bbInfo)) { - CHECK_FATAL_FALSE("Verify lite profile data Failed!"); + if (!VerifyProfileData(iBBs, *bbInfo)) { + if (!CGOptions::DoLiteProfVerify()) { + LogInfo::MapleLogger() << "skip profile applying for " << f->GetName() << " due to out of date\n"; + } else { + CHECK_FATAL_FALSE("Verify lite profile data Failed!"); + } return false; } @@ -45,26 +55,26 @@ bool CGProfUse::ApplyPGOData() { ComputeEdgeFreq(); ApplyOnBB(); - InitFrequencyReversePostOrderBBList(); -#if 0 - f->GetTheCFG()->CheckCFGFreq(); -#endif return true; } -bool CGProfUse::VerifyProfiledata(const std::vector &iBBs, LiteProfile::BBInfo &bbInfo) { +bool CGProfUse::VerifyProfileData(const std::vector &iBBs, LiteProfile::BBInfo &bbInfo) { /* check bb size */ + bbInfo.verified.first = true; if (bbInfo.counter.size() != iBBs.size()) { LogInfo::MapleLogger() << f->GetName() << " counter doesn't match profile counter :" << bbInfo.counter.size() << " func real counter :" << iBBs.size() << '\n'; + bbInfo.verified.second = false; return false; } /* check cfg hash*/ if (bbInfo.funcHash != f->GetTheCFG()->ComputeCFGHash()) { LogInfo::MapleLogger() << f->GetName() << " CFG hash doesn't match profile cfghash :" << bbInfo.funcHash << " func cfghash :" << f->GetTheCFG()->ComputeCFGHash() << '\n'; + bbInfo.verified.second = false; return false; } + bbInfo.verified.second = true; return true; } @@ -226,8 +236,13 @@ void CGProfUse::LayoutBBwithProfile() { /* initialize */ laidOut.resize(f->GetAllBBs().size(), false); /* BB chain layout */ - BuildChainForFunc(); - BBChain *mainChain = bb2chain[f->GetFirstBB()->GetId()]; + ChainLayout chainLayout(*f, *mp, debugChainLayout, *domInfo); + // Init layout settings for CG + chainLayout.SetHasRealProfile(true); + chainLayout.SetConsiderBetterPred(true); + chainLayout.BuildChainForFunc(); + NodeChain *mainChain = chainLayout.GetNode2Chain()[f->GetFirstBB()->GetID()]; + for (auto bbId : bbSplit) { BB *cbb = f->GetBBFromID(bbId); CHECK_FATAL(cbb, "get bb failed"); @@ -235,13 +250,22 @@ void CGProfUse::LayoutBBwithProfile() { } std::vector coldSection; std::vector layoutID; + // clear next pointer for last non-split BB + for (auto it = mainChain->rbegin(); it != mainChain->rend(); ++it) { + auto *bb = static_cast(*it); + if (bbSplit.count(bb->GetID()) == 0) { + bb->SetNext(nullptr); + break; + } + } for (auto it = mainChain->begin(); it != mainChain->end(); ++it) { - if (!bbSplit.count((*it)->GetId())) { - if ((*it)->IsInColdSection()) { - coldSection.emplace_back(*it); + auto *bb = static_cast(*it); + if (!bbSplit.count(bb->GetId())) { + if (bb->IsInColdSection()) { + coldSection.emplace_back(bb); } else { - AddBBProf(**it); - layoutID.emplace_back((*it)->GetId()); + AddBBProf(*bb); + layoutID.emplace_back(bb->GetId()); } } } @@ -249,358 +273,33 @@ void CGProfUse::LayoutBBwithProfile() { AddBBProf(*coldSection[i]); layoutID.emplace_back(coldSection[i]->GetId()); } - if (debugChainLayout) { - LogInfo::MapleLogger() << "Finish forming layout : "; - for (auto it : layoutID) { - LogInfo::MapleLogger() << it << " "; - } - LogInfo::MapleLogger() << "\n"; - } -} - -void CGProfUse::InitBBChains() { - uint32 id = 0; - bb2chain.resize(f->GetAllBBs().size(), nullptr); - BB *commonEntry = f->GetFirstBB(); - for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { - // BBChain constructor will update bb2chain - // attention cleanup & unreachable - (void)mp->New(puAlloc, bb2chain, *curbb, id++); - } -} - -void CGProfUse::BuildChainForFunc() { - uint32 validBBNum = 0; - BB *commonEntry = f->GetFirstBB(); - // attention cleanup & unreachable - for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { - if (curbb->IsUnreachable()) { - ASSERT(false, "check unreachable bb"); - continue; - } - if (f->IsExitBB(*curbb)) { - if (curbb->GetPrev() && curbb->GetPrev()->GetKind() == BB::kBBGoto && - curbb->GetPreds().empty() && curbb->GetSuccs().empty()) { - continue; - } - } - ++validBBNum; - } - // --validBBNum; // exclude cleanup BB - LogInfo::MapleLogger() << "\n[Chain layout] " << f->GetName() << ", valid bb num: " << validBBNum << std::endl; - InitBBChains(); - BuildChainForLoops(); - // init ready chains for func - for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { - uint32 bbId = curbb->GetId(); - BBChain *chain = bb2chain[bbId]; - - if (chain->IsReadyToLayout(nullptr)) { - (void)readyToLayoutChains.insert(chain); - } - } - BBChain *entryChain = bb2chain[commonEntry->GetId()]; - DoBuildChain(*commonEntry, *entryChain, nullptr); - - /* merge clean up */ - if (f->GetCleanupBB()) { - BBChain *cleanup = bb2chain[f->GetCleanupBB()->GetId()]; - if (readyToLayoutChains.find(cleanup) == readyToLayoutChains.end()) { - LogInfo::MapleLogger() << "clean up bb is not in ready layout "; - } - CHECK_FATAL(cleanup->GetHeader() == f->GetCleanupBB(), "more than one cleanup"); - if (CGOptions::DoEnableHotColdSplit()) { - cleanup->GetHeader()->SetColdSection(); - } - entryChain->MergeFrom(cleanup); - } - /* merge symbol label in C which is not in control flow */ - std::vector labelBB; - { - for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { - if (curbb->IsUnreachable()) { - /* delete unreachable bb in cfgo */ - ASSERT(false, "check unreachable bb"); - CHECK_FATAL_FALSE("check unreachable bb"); - continue; - } - if (f->IsExitBB(*curbb)) { - if (curbb->GetPrev() && curbb->GetPrev()->GetKind() == BB::kBBGoto && - curbb->GetPreds().empty() && curbb->GetSuccs().empty()) { - continue; + // adjust the last BB if kind is fallthru or condtion BB + BB *lastBB = layoutBBs.empty() ? nullptr : layoutBBs.back(); + if (lastBB != nullptr && !lastBB->IsEmptyOrCommentOnly()) { + if (lastBB->GetKind() == BB::kBBFallthru) { + CHECK_FATAL(lastBB->GetSuccs().size() == 1, "it is fallthru"); + BB *targetBB = *lastBB->GetSuccs().begin(); + BB *newBB = f->GetTheCFG()->GetInsnModifier()->CreateGotoBBAfterCondBB(*lastBB, *targetBB, targetBB == lastBB); + RelinkBB(*lastBB, *newBB); + } else if (lastBB->GetKind() == BB::kBBIf) { + BB *targetBB = CGCFG::GetTargetSuc(*lastBB); + BB *ftBB = nullptr; + for (BB *sucBB : lastBB->GetSuccs()) { + if (sucBB != targetBB) { + ftBB = sucBB; } } - if (!entryChain->FindBB(*curbb)) { - if (curbb->GetPreds().empty() && CGCFG::InSwitchTable(curbb->GetLabIdx(), *f)) { - labelBB.push_back(curbb); - // last bb which is not in control flow - } else if (curbb->GetPreds().empty() && curbb->GetSuccs().empty() && f->GetLastBB() == curbb) { - labelBB.push_back(curbb); - } else { - LogInfo::MapleLogger() << "In function " << f->GetName() << " bb " << curbb->GetId() << " is no in chain\n"; - } - } - } - - for (auto bb : labelBB) { - BBChain *labelchain = bb2chain[bb->GetId()]; - if (readyToLayoutChains.find(labelchain) == readyToLayoutChains.end()) { - LogInfo::MapleLogger() << "label bb is not in ready layout "; - } - entryChain->MergeFrom(labelchain); - if (CGOptions::DoEnableHotColdSplit()) { - bb->SetColdSection(); - } - bb->SetNext(nullptr); - bb->SetPrev(nullptr); - } - } - - // To sure all of BBs have been laid out - CHECK_FATAL(entryChain->size() == validBBNum, "has any BB not been laid out?"); -} - -void CGProfUse::BuildChainForLoops() { - if (f->GetLoops().empty()) { - return; - } - auto &loops = f->GetLoops(); - // sort loops from inner most to outer most - std::stable_sort(loops.begin(), loops.end(), [](const CGFuncLoops *loop1, const CGFuncLoops *loop2) { - return loop1->GetLoopLevel() > loop2->GetLoopLevel(); - }); - auto *context = mp->New>(f->GetAllBBs().size(), false, puAlloc.Adapter()); - for (auto *loop : loops) { - BuildChainForLoop(*loop, context); - } -} - -void CGProfUse::BuildChainForLoop(CGFuncLoops &loop, MapleVector *context) { - // init loop context - std::fill(context->begin(), context->end(), false); - for (auto *bbMember : loop.GetLoopMembers()) { - CHECK_FATAL(bbMember->GetId() < context->size(), "index out of range"); - (*context)[bbMember->GetId()] = true; - } - // init ready chains for loop - for (auto *bbMember : loop.GetLoopMembers()) { - BBChain *chain = bb2chain[bbMember->GetId()]; - if (chain->IsReadyToLayout(context)) { - (void)readyToLayoutChains.insert(chain); - } - } - // find loop chain starting BB - BB *startBB = FindBestStartBBForLoop(loop, context); - if (startBB == nullptr) { - return; // all blocks in the loop have been laid out, just return - } - BBChain *startChain = bb2chain[startBB->GetId()]; - DoBuildChain(*startBB, *startChain, context); - readyToLayoutChains.clear(); -} - -// Multiple loops may share the same header, we try to find the best unplaced BB in the loop -BB *CGProfUse::FindBestStartBBForLoop(CGFuncLoops &loop, const MapleVector *context) { - auto *headerChain = bb2chain[loop.GetHeader()->GetId()]; - if (headerChain->size() == 1) { - return loop.GetHeader(); - } - // take inner loop chain tail BB as start BB - if (headerChain->size() > 1 && IsBBInCurrContext(*headerChain->GetTail(), context)) { - return headerChain->GetTail(); - } - for (auto *bbMember : loop.GetLoopMembers()) { - if (bb2chain[bbMember->GetId()]->size() == 1) { - return f->GetBBFromID(bbMember->GetId()); + BB *newBB = f->GetTheCFG()->GetInsnModifier()->CreateGotoBBAfterCondBB(*lastBB, *ftBB, targetBB == lastBB); + RelinkBB(*lastBB, *newBB); } } - return nullptr; -} - -bool CGProfUse::IsBBInCurrContext(const BB &bb, const MapleVector *context) const { - if (context == nullptr) { - return true; - } - return (*context)[bb.GetId()]; -} - -void CGProfUse::DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context) { - CHECK_FATAL(bb2chain[header.GetId()] == &chain, "bb2chain mis-match"); - BB *bb = chain.GetTail(); - BB *bestSucc = GetBestSucc(*bb, chain, context, true); - while (bestSucc != nullptr) { - BBChain *succChain = bb2chain[bestSucc->GetId()]; - succChain->UpdateSuccChainBeforeMerged(chain, context, readyToLayoutChains); - chain.MergeFrom(succChain); - (void)readyToLayoutChains.erase(succChain); - bb = chain.GetTail(); - bestSucc = GetBestSucc(*bb, chain, context, true); - } - if (debugChainLayout) { - bool inLoop = context != nullptr; - LogInfo::MapleLogger() << "Finish forming " << (inLoop ? "loop" : "func") << " chain: "; - chain.Dump(); - } -} - -BB *CGProfUse::GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, bool considerBetterPred) { - // (1) search in succ - CHECK_FATAL(bb2chain[bb.GetId()] == &chain, "bb2chain mis-match"); - uint64 bestEdgeFreq = 0; - BB *bestSucc = nullptr; - auto iterBB = bb.GetSuccsBegin(); - for (uint32 i = 0; i < bb.GetSuccs().size(); ++i, ++iterBB) { - CHECK_FATAL(iterBB != bb.GetSuccsEnd(), "check unexpect BB"); - BB *succ = *iterBB; - CHECK_FATAL(succ, "check Empty BB"); - if (!IsCandidateSucc(bb, *succ, context)) { - continue; - } - if (considerBetterPred && HasBetterLayoutPred(bb, *succ)) { - continue; - } - uint64 currEdgeFreq = bb.GetEdgeFreq(i); // attention: entryBB->succFreq[i] is always 0 - if (bb.GetId() == 0) { // special case for common entry BB - CHECK_FATAL(bb.GetSuccs().size() == 1, "common entry BB should not have more than 1 succ"); - bestSucc = succ; - break; - } - if (currEdgeFreq > bestEdgeFreq) { // find max edge freq - bestEdgeFreq = currEdgeFreq; - bestSucc = succ; - } - } - if (bestSucc != nullptr) { - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range1 succ ]: "; - LogInfo::MapleLogger() << bb.GetId() << " -> " << bestSucc->GetId() << std::endl; - } - return bestSucc; - } - - // (2) search in readyToLayoutChains - uint32 bestFreq = 0; - for (auto it = readyToLayoutChains.begin(); it != readyToLayoutChains.end(); ++it) { - BBChain *readyChain = *it; - BB *header = readyChain->GetHeader(); - if (!IsCandidateSucc(bb, *header, context)) { - continue; - } - bool useBBFreq = false; - if (useBBFreq) { // use bb freq - if (header->GetFrequency() > bestFreq) { // find max bb freq - bestFreq = header->GetFrequency(); - bestSucc = header; - } - } else { // use edge freq - uint32 subBestFreq = 0; - for (auto *pred : header->GetPreds()) { - uint32 curFreq = static_cast(pred->GetEdgeFreq(*header)); - if (curFreq > subBestFreq) { - subBestFreq = curFreq; - } - } - if (subBestFreq > bestFreq) { - bestFreq = subBestFreq; - bestSucc = header; - } else if (subBestFreq == bestFreq && bestSucc != nullptr && - bb2chain[header->GetId()]->GetId() < bb2chain[bestSucc->GetId()]->GetId()) { - bestSucc = header; - } - } - } - if (bestSucc != nullptr) { - (void)readyToLayoutChains.erase(bb2chain[bestSucc->GetId()]); - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range2 ready]: "; - LogInfo::MapleLogger() << bb.GetId() << " -> " << bestSucc->GetId() << std::endl; - } - return bestSucc; - } - - // (3) search left part in context by profile - for (auto freRpoEle : frequencyReversePostOrderBBList) { - if (freRpoEle.frequency > 0) { - BB *candBB = freRpoEle.bb; - if (IsBBInCurrContext(*candBB, context) && bb2chain[candBB->GetId()] != &chain) { - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range3 frequency ]: "; - LogInfo::MapleLogger() << bb.GetId() << " -> " << candBB->GetId() << std::endl; - } - return candBB; - } - } else { - break; - } - } - - // (4) search left part in context by topological sequence - const auto &rpoVec = domInfo->GetReversePostOrder(); - bool searchedAgain = false; - for (uint32 i = rpoSearchPos; i < rpoVec.size(); ++i) { - BB *candBB = rpoVec[i]; - if (IsBBInCurrContext(*candBB, context) && bb2chain[candBB->GetId()] != &chain) { - rpoSearchPos = i; - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range4 rpot ]: "; - LogInfo::MapleLogger() << bb.GetId() << " -> " << candBB->GetId() << std::endl; - } - if (CGOptions::DoEnableHotColdSplit()) { - candBB->SetColdSection(); - } - return candBB; - } - if (i == rpoVec.size() - 1 && !searchedAgain) { - i = 0; - searchedAgain = true; - } - } - return nullptr; -} - -void CGProfUse::InitFrequencyReversePostOrderBBList() { - const auto &rpoVec = domInfo->GetReversePostOrder(); - for (uint32 i = 0; i < rpoVec.size(); ++i) { - BB *cbb = rpoVec[i]; - BBOrderEle bbELe(cbb->GetFrequency(), i, cbb); - frequencyReversePostOrderBBList.emplace(bbELe); - } -} - -bool CGProfUse::HasBetterLayoutPred(const BB &bb, const BB &succ) const { - auto &predList = succ.GetPreds(); - // predList.size() may be 0 if bb is common entry BB - if (predList.size() <= 1) { - return false; - } - uint32 sumEdgeFreq = succ.GetFrequency(); - const double hotEdgeFreqPercent = 0.6; // should further fine tuning - uint64 hotEdgeFreq = static_cast(sumEdgeFreq * hotEdgeFreqPercent); - // if edge freq(bb->succ) contribute more than 60% to succ block freq, no better layout pred than bb - for (auto predIt = predList.begin(); predIt != predList.end(); ++predIt) { - if (*predIt == &bb) { - continue; - } - uint64 edgeFreq = (*predIt)->GetEdgeFreq(succ); - if (edgeFreq > (sumEdgeFreq - hotEdgeFreq)) { - return true; + LogInfo::MapleLogger() << "Finish forming layout : "; + for (auto it : layoutID) { + LogInfo::MapleLogger() << it << " "; } + LogInfo::MapleLogger() << "\n"; } - return false; -} - -bool CGProfUse::IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context) { - if (!IsBBInCurrContext(succ, context)) { // succ must be in the current context (current loop or current func) - return false; - } - if (bb2chain[succ.GetId()] == bb2chain[bb.GetId()]) { // bb and succ should belong to different chains - return false; - } - if (succ.GetId() == 1) { // special case, exclude common exit BB - return false; - } - return true; } bool CgPgoUse::PhaseRun(maplebe::CGFunc &f) { @@ -610,8 +309,7 @@ bool CgPgoUse::PhaseRun(maplebe::CGFunc &f) { } LiteProfile::BBInfo *bbInfo = f.GetFunction().GetModule()->GetLiteProfile().GetFuncBBProf(f.GetName()); - - /* + /* * Currently, If all the counters of the function are 0, the bbInfo will not be recorded in pgo data. * skip this case. However, it cannot distinguish which is not genereated correct. Need to be improved */ if (!bbInfo) { @@ -641,6 +339,13 @@ bool CgPgoUse::PhaseRun(maplebe::CGFunc &f) { LogInfo::MapleLogger() << "Finial Layout : "; FOR_ALL_BB(bb, &f) { LogInfo::MapleLogger() << bb->GetId() << " "; + // Maintain head and tail BB, because emit phase still will access CFG + if (count == 0) { + f.SetFirstBB(*bb); + } + if (bb->GetNext() == nullptr) { + f.SetLastBB(*bb); + } count++; if (count > f.GetAllBBs().size()) { CHECK_FATAL(false, "infinte loop"); @@ -652,11 +357,6 @@ bool CgPgoUse::PhaseRun(maplebe::CGFunc &f) { MAPLE_TRANSFORM_PHASE_REGISTER(CgPgoUse, cgpgouse) -void RelinkBB(BB &prev, BB &next) { - prev.SetNext(&next); - next.SetPrev(&prev); -} - void CGProfUse::AddBBProf(BB &bb) { if (layoutBBs.empty()) { AddBB(bb); @@ -698,7 +398,7 @@ void CGProfUse::AddBBProf(BB &bb) { } CHECK_FATAL(ftBB, "find ft bb after ifBB failed"); if (&bb == targetBB) { - CHECK_FATAL(!bbSplit.count(ftBB->GetId()), "check split bb"); + CHECK_FATAL(bbSplit.count(ftBB->GetId()) == 0, "check split bb"); LabelIdx fallthruLabel = GetOrCreateBBLabIdx(*ftBB); f->GetTheCFG()->GetInsnModifier()->FlipIfBB(*curBB, fallthruLabel); } else if (&bb != ftBB) { @@ -731,7 +431,7 @@ void CGProfUse::ReTargetSuccBB(BB &bb, BB &fallthru) { bb.SetKind(BB::kBBGoto); } -void CGProfUse::ChangeToFallthruFromGoto(BB &bb) { +void CGProfUse::ChangeToFallthruFromGoto(BB &bb) const { CHECK_FATAL(bb.GetLastMachineInsn(), "Get last insn in GOTO bb failed"); bb.RemoveInsn(*bb.GetLastMachineInsn()); bb.SetKind(BB::kBBFallthru); diff --git a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp index 9f5bd1af9e1984cdcb2e9f287e96c97b5e8495f0..3006c83c1d05aaeeaf1c78546c4d622486bcf477 100644 --- a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp +++ b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp @@ -19,7 +19,6 @@ #include "args.h" #include "label_creation.h" #include "driver_options.h" -#include "expand128floats.h" #include "isel.h" #include "offset_adjust.h" #include "alignment.h" @@ -28,6 +27,7 @@ #include "reg_alloc.h" #include "target_info.h" #include "standardize.h" +#include "cg_callgraph_reorder.h" #if defined(TARGAARCH64) && TARGAARCH64 #include "aarch64_emitter.h" #include "aarch64_cg.h" @@ -67,6 +67,129 @@ void DumpMIRFunc(MIRFunction &func, const char *msg, bool printAlways = false, c } /* anonymous namespace */ +// a tricky implementation of accessing TLS symbol +// call to __tls_get_addr has been arranged at init_array of specific so/exec +// the TLSLD entry of the module will be recorded in a global accessable symbol +void PrepareForWarmupDynamicTLS(MIRModule &m) { + if (m.GetTdataVarOffset().empty() && m.GetTbssVarOffset().empty()) { + return; + } + auto *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_ptr)); + auto *anchorMirConst = m.GetMemPool()->New(0, *ptrType); + + ArgVector formals(m.GetMPAllocator().Adapter()); + MIRType *voidTy = GlobalTables::GetTypeTable().GetVoid(); + auto *tlsWarmup = m.GetMIRBuilder()->CreateFunction("__tls_address_warmup_" + m.GetTlsAnchorHashString(), + *voidTy, formals); + auto *warmupBody = tlsWarmup->GetCodeMempool()->New(); + MIRSymbol *tempAnchorSym = nullptr; + AddrofNode *tlsAddrNode = nullptr; + DassignNode *dassignNode = nullptr; + + if (!m.GetTdataVarOffset().empty()) { + auto *tdataAnchorSym = m.GetMIRBuilder()->GetOrCreateGlobalDecl("tdata_addr_" + m.GetTlsAnchorHashString(), + *ptrType); + tdataAnchorSym->SetKonst(anchorMirConst); + tempAnchorSym = m.GetMIRBuilder()->GetOrCreateGlobalDecl(".tdata_start_" + m.GetTlsAnchorHashString(), *ptrType); + tempAnchorSym->SetIsDeleted(); + tlsAddrNode = tlsWarmup->GetCodeMempool()->New(OP_addrof, PTY_ptr, tempAnchorSym->GetStIdx(), 0); + dassignNode = tlsWarmup->GetCodeMempool()->New(); + dassignNode->SetStIdx(tdataAnchorSym->GetStIdx()); + dassignNode->SetFieldID(0); + dassignNode->SetOpnd(tlsAddrNode, 0); + warmupBody->AddStatement(dassignNode); + } + + if (!m.GetTbssVarOffset().empty()) { + auto *tbssAnchorSym = m.GetMIRBuilder()->GetOrCreateGlobalDecl("tbss_addr_" + m.GetTlsAnchorHashString(), *ptrType); + tbssAnchorSym->SetKonst(anchorMirConst); + tempAnchorSym = m.GetMIRBuilder()->GetOrCreateGlobalDecl(".tbss_start_" + m.GetTlsAnchorHashString(), *ptrType); + tempAnchorSym->SetIsDeleted(); + tlsAddrNode = tlsWarmup->GetCodeMempool()->New(OP_addrof, PTY_ptr, tempAnchorSym->GetStIdx(), 0); + dassignNode = tlsWarmup->GetCodeMempool()->New(); + dassignNode->SetStIdx(tbssAnchorSym->GetStIdx()); + dassignNode->SetFieldID(0); + dassignNode->SetOpnd(tlsAddrNode, 0); + warmupBody->AddStatement(dassignNode); + } + + tlsWarmup->SetBody(warmupBody); + tlsWarmup->SetAttr(FUNCATTR_section); + tlsWarmup->GetFuncAttrs().SetPrefixSectionName(".init_array"); + m.AddFunction(tlsWarmup); +} + + +// calculate all local dynamic TLS offset from the anchor +void CalculateWarmupDynamicTLS(MIRModule &m) { + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + MIRType *mirType = nullptr; + uint64 tdataOffset = 0; + uint64 tbssOffset = 0; + MapleMap &tdataVarOffset = m.GetTdataVarOffset(); + MapleMap &tbssVarOffset = m.GetTbssVarOffset(); + + for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + MIRFunction *mirFunc = *it; + if (mirFunc->GetBody() == nullptr || mirFunc->GetSymTab() == nullptr) { + continue; + } + MIRSymbolTable *lSymTab = mirFunc->GetSymTab(); + size_t lsize = lSymTab->GetSymbolTableSize(); + for (size_t i = 0; i < lsize; i++) { + const MIRSymbol *mirSymbol = lSymTab->GetSymbolFromStIdx(static_cast(i)); + mirType = mirSymbol->GetType(); + uint64 align = 0; + if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeArray || mirType->GetKind() == kTypeUnion) { + align = k8ByteSize; + } else { + align = mirType->GetAlign(); + } + if (mirSymbol == nullptr || mirSymbol->GetStorageClass() != kScPstatic) { + continue; + } + if (mirSymbol->IsThreadLocal()) { + if (!mirSymbol->IsConst()) { + tbssOffset = RoundUp(tbssOffset, align); + tbssVarOffset[mirSymbol] = tbssOffset; + tbssOffset += mirType->GetSize(); + } else { + tdataOffset = RoundUp(tdataOffset, align); + tdataVarOffset[mirSymbol] = tdataOffset; + tdataOffset += mirType->GetSize(); + } + } + } + } + + for (size_t i = 0; i < size; ++i) { + const MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->GetStorageClass() == kScExtern) { + continue; + } + mirType = mirSymbol->GetType(); + uint64 align = 0; + if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeArray || mirType->GetKind() == kTypeUnion) { + align = k8ByteSize; + } else { + align = mirType->GetAlign(); + } + if (mirSymbol->IsThreadLocal()) { + if (!mirSymbol->IsConst()) { + tbssOffset = RoundUp(tbssOffset, align); + tbssVarOffset[mirSymbol] = tbssOffset; + tbssOffset += mirType->GetSize(); + } else { + tdataOffset = RoundUp(tdataOffset, align); + tdataVarOffset[mirSymbol] = tdataOffset; + tdataOffset += mirType->GetSize(); + } + } + } +} + void CgFuncPM::GenerateOutPutFile(MIRModule &m) const { CHECK_FATAL(cg != nullptr, "cg is null"); CHECK_FATAL(cg->GetEmitter(), "emitter is null"); @@ -237,6 +360,21 @@ void CgFuncPM::SweepUnusedStaticSymbol(MIRModule &m) const { } void InitFunctionPriority(std::map &prioritylist) { + std::string reorderAlgo = CGOptions::GetFunctionReorderAlgorithm(); + if (!reorderAlgo.empty()) { + std::string reorderProfile = CGOptions::GetFunctionReorderProfile(); + if (reorderProfile.empty()) { + LogInfo::MapleLogger() << "WARN: function reorder need profile" + << "\n"; + } + if (reorderAlgo == "call-chain-clustering") { + prioritylist = ReorderAccordingProfile(reorderProfile); + } else { + LogInfo::MapleLogger() << "WARN: function reorder algorithm no support" + << "\n"; + } + return; + } if (CGOptions::GetFunctionPriority() != "") { std::string fileName = CGOptions::GetFunctionPriority(); std::ifstream in(fileName); @@ -264,7 +402,7 @@ void InitFunctionPriority(std::map &prioritylist) { } } -void MarkFunctionPriority(std::map &prioritylist, CGFunc &f) { +static void MarkFunctionPriority(std::map &prioritylist, CGFunc &f) { if (!prioritylist.empty()) { if (prioritylist.count(f.GetName()) != 0) { f.SetPriority(prioritylist[f.GetName()]); @@ -272,6 +410,28 @@ void MarkFunctionPriority(std::map &prioritylist, CGFunc &f } } +static std::optional> ReorderFunction(MIRModule &m, + const std::map &priorityList) { + if (!opts::linkerTimeOpt.IsEnabledByUser()) { + return std::nullopt; + } + if (priorityList.empty()) { + return std::nullopt; + } + MapleList reorderdFunctionList(m.GetMPAllocator().Adapter()); + std::vector hotFunctions(priorityList.size()); + for (auto f : m.GetFunctionList()) { + if (priorityList.find(f->GetName()) != priorityList.end()) { + CHECK_FATAL(hotFunctions.size() > priorityList.at(f->GetName()) - 1, "func priority out of range"); + hotFunctions[priorityList.at(f->GetName()) - 1] = f; + } else { + reorderdFunctionList.push_back(f); + } + } + std::copy(hotFunctions.begin(), hotFunctions.end(), std::back_inserter(reorderdFunctionList)); + return reorderdFunctionList; +} + /* =================== new phase manager =================== */ #ifdef RA_PERF_ANALYSIS #include "reg_alloc_lsra.h" @@ -292,6 +452,13 @@ bool CgFuncPM::PhaseRun(MIRModule &m) { std::map priorityList; InitFunctionPriority(priorityList); + auto reorderedFunctions = ReorderFunction(m, priorityList); + + if (opts::aggressiveTlsLocalDynamicOpt) { + m.SetTlsAnchorHashString(); + PrepareForWarmupDynamicTLS(m); + } + uint32 countFuncId = 0; unsigned long rangeNum = 0; @@ -300,10 +467,19 @@ bool CgFuncPM::PhaseRun(MIRModule &m) { auto admMempool = AllocateMemPoolInPhaseManager("cg phase manager's analysis data manager mempool"); auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); - for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + auto *funcList = &m.GetFunctionList(); + if (reorderedFunctions) { + funcList = &reorderedFunctions.value(); + } + for (auto it = funcList->begin(); it != funcList->end(); ++it) { ASSERT(serialADM->CheckAnalysisInfoEmpty(), "clean adm before function run"); MIRFunction *mirFunc = *it; if (mirFunc->GetBody() == nullptr) { + if (mirFunc->GetAttr(FUNCATTR_visibility_hidden)) { + (void)cg->GetEmitter()->Emit("\t.hidden\t").Emit(mirFunc->GetName()).Emit("\n"); + } else if (mirFunc->GetAttr(FUNCATTR_visibility_protected)) { + (void)cg->GetEmitter()->Emit("\t.protected\t").Emit(mirFunc->GetName()).Emit("\n"); + } continue; } @@ -325,12 +501,6 @@ bool CgFuncPM::PhaseRun(MIRModule &m) { /* LowerIR. */ m.SetCurFunction(mirFunc); - if (opts::expand128Floats) { - DumpMIRFunc(*mirFunc, "************* before Expand128Floats **************"); - Expand128Floats expand128Floats(m); - expand128Floats.ProcessFunc(mirFunc); - } - if (cg->DoConstFold()) { DumpMIRFunc(*mirFunc, "************* before ConstantFold **************"); ConstantFold cf(m); @@ -414,12 +584,9 @@ void CgFuncPM::InitProfile(MIRModule &m) const { } } if (!CGOptions::GetLiteProfile().empty()) { - bool handleSucc = m.GetLiteProfile().HandleLitePGOFile(CGOptions::GetLiteProfile(), m.GetFileName()); - CHECK_FATAL(handleSucc, "Error: Open Lite PGO input file failed"); - if (!handleSucc) { - LogInfo::MapleLogger() << "WARN: Handle Lite PGO input file " << CGOptions::GetLiteProfile() << - "failed in mplcg\n"; - } + bool handleSucc = m.GetLiteProfile().HandleLitePGOFile(CGOptions::GetLiteProfile(), m); + CHECK_FATAL(handleSucc, "Error: Handle Lite PGO input file ", + CGOptions::GetLiteProfile().c_str(), "failed in mplcg"); } } @@ -463,7 +630,7 @@ void CgFuncPM::CreateCGAndBeCommon(MIRModule &m) { #if TARGAARCH64 if (!m.IsCModule()) { - CGOptions::EnableFramePointer(); + CGOptions::SetFramePointer(CGOptions::kAllFP); } #endif } diff --git a/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp b/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp index 738c45d3aae251ef8e8068005e1be8b61f9bcd28..d9a3d15a7fdb42bb0713b634cb17656fe5ad7a79 100644 --- a/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp +++ b/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp @@ -39,8 +39,12 @@ void PhiEliminate::TranslateTSSAToCSSA() { #endif PlaceMovInPredBB(fBBId, CreateMov(tempMovDest, *(phiOpndIt.second))); } - Insn &movInsn = CreateMov(destReg, tempMovDest); - bb->ReplaceInsn(*phiInsnIt.second, movInsn); + if (!destReg.IsOfCC()) { + Insn &movInsn = CreateMov(destReg, tempMovDest); + bb->ReplaceInsn(*phiInsnIt.second, movInsn); + } else { + bb->RemoveInsn(*phiInsnIt.second); + } } } diff --git a/src/mapleall/maple_be/src/cg/cg_profile_use.cpp b/src/mapleall/maple_be/src/cg/cg_profile_use.cpp index ba5943117fdcbb542b413e043267cc1d6ee6dacf..165fafc6dd8c477d742feceb7d725e708ce4d575 100644 --- a/src/mapleall/maple_be/src/cg/cg_profile_use.cpp +++ b/src/mapleall/maple_be/src/cg/cg_profile_use.cpp @@ -33,7 +33,7 @@ void CgProfUse::setupProf() { (static_cast(funcProf->GetStmtFreq(bb->GetLastStmt()->GetStmtID())) >= 0)) { bb->SetProfFreq(funcProf->GetStmtFreq(bb->GetLastStmt()->GetStmtID())); } else { -#if DEBUG +#if defined(DEBUG) && DEBUG if (!CGOptions::IsQuiet()) { LogInfo::MapleLogger() << "BB" << bb->GetId() << " : frequency undetermined\n"; } @@ -71,7 +71,7 @@ bool CGProfUse::PhaseRun(maplebe::CGFunc &f) { CgProfUse cgprofuse(f, *GetPhaseMemPool()); cgprofuse.setupProf(); -#if DEBUG +#if defined(DEBUG) && DEBUG if (CGOptions::FuncFilter(f.GetName())) { DotGenerator::GenerateDot("after-CGProfUse", f, f.GetMirModule(), false, f.GetName()); } diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp index a7bbf11eb76ecabc85d626cf06fb95cf646bef31..a083f876876ed9b479a82a627fd6648a3a2cb68f 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -140,7 +140,7 @@ void CGSSAInfo::RenameBB(BB &bb) { RenameVariablesForBB(bb.GetId()); /* stack pop up */ for (size_t i = 0; i < vRegStk.size(); ++i) { - if (vRegStk[i].empty() || !IsNewVersionPushed(i)) { + if (vRegStk[i].empty() || !IsNewVersionPushed(static_cast(i))) { continue; } // only need to pop top, because we only keep the newest version on the top diff --git a/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp b/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp index 17c1f50da3ea9e8a58f3deb06388354b48416308..420256480c90fd963d91a4b3c4ab3fa14f3e1e89 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp @@ -382,7 +382,7 @@ void SSAPre::Rename() { // ================ Step 1: insert phis ================ -// form pih occ based on the real occ in workCand->realOccs; result is +// form phi occ based on the real occ in workCand->realOccs; result is // stored in phiDfns void SSAPre::FormPhis() { for (Occ *occ : realOccs) { diff --git a/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp b/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp index 65e5529a8e4306e85d6b7aadffe8d784ba047e40..8394ec0b41d2ad4a4cbdbd955becdec92c202046 100644 --- a/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp +++ b/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp @@ -19,7 +19,7 @@ namespace maplebe { // ================ Step 6: Code Motion ================ void SSUPre::CodeMotion() { - // pass 1 only donig insertion + // pass 1 only doing insertion for (SOcc *occ : allOccs) { if (occ->occTy != kSOccLambdaRes) { continue; @@ -40,7 +40,9 @@ void SSUPre::CodeMotion() { workCand->restoreAtEpilog = true; break; } - workCand->restoreAtExitBBs.insert(realOcc->cgbb->GetId()); + if (realOcc->cgbb != cgFunc->GetCommonEntryBB()) { + workCand->restoreAtExitBBs.insert(realOcc->cgbb->GetId()); + } } } if (enabledDebug) { diff --git a/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp b/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp index 586f1135fafff2aec1edeb706254e2424e7363dc..cb665c4c87db68d8419e3e8c41861578f185470f 100644 --- a/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp +++ b/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp @@ -15,9 +15,12 @@ #include "cg_validbit_opt.h" #include "mempool.h" #include "aarch64_validbit_opt.h" +#include "aarch64_reg_coalesce.h" namespace maplebe { -InsnSet ValidBitPattern::GetAllUseInsn(const RegOperand &defReg) { + + +InsnSet ValidBitPattern::GetAllUseInsn(const RegOperand &defReg) const { InsnSet allUseInsn; if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); @@ -126,14 +129,8 @@ void ValidBitOpt::Run() { * Set validbit of regOpnd before optimization */ RectifyValidBitNum(); - FOR_ALL_BB(bb, cgFunc) { - FOR_BB_INSNS(insn, bb) { - if (!insn->IsMachineInstruction()) { - continue; - } - DoOpt(*bb, *insn); - } - } + DoOpt(); + cgDce->DoDce(); /* * Recover validbit of regOpnd after optimization */ @@ -143,14 +140,18 @@ void ValidBitOpt::Run() { bool CgValidBitOpt::PhaseRun(maplebe::CGFunc &f) { CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); CHECK_FATAL(ssaInfo != nullptr, "Get ssaInfo failed"); - auto *vbOpt = f.GetCG()->CreateValidBitOpt(*GetPhaseMemPool(), f, *ssaInfo); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CHECK_FATAL(ll != nullptr, "Get ll failed"); + auto *vbOpt = f.GetCG()->CreateValidBitOpt(*GetPhaseMemPool(), f, *ssaInfo, *ll); CHECK_FATAL(vbOpt != nullptr, "vbOpt instance create failed"); vbOpt->Run(); + ll->ClearBFS(); return true; } void CgValidBitOpt::GetAnalysisDependence(AnalysisDep &aDep) const { aDep.AddRequired(); + aDep.AddRequired(); aDep.AddPreserved(); } MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgValidBitOpt, cgvalidbitopt) diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp index 41095de945c9661bbd02c0533398e3ece61c4507..fe524fb9babf75dba3423562171020c7436d3d75 100644 --- a/src/mapleall/maple_be/src/cg/cgbb.cpp +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -179,7 +179,7 @@ void BB::InsertAtBeginning(BB &bb) { bb.firstInsn = bb.lastInsn = nullptr; } -void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) { +void BB::InsertBeforeInsn(BB &fromBB, Insn &beforeInsn) const { if (fromBB.firstInsn == nullptr) { /* nothing to add */ return; } @@ -332,7 +332,7 @@ void BB::Dump() const { LogInfo::MapleLogger() << " taken"; } } - LogInfo::MapleLogger() << "> <" << id << "> "; + LogInfo::MapleLogger() << "> <" << GetID() << "> "; if (isCleanup) { LogInfo::MapleLogger() << "[is_cleanup] "; } @@ -341,11 +341,11 @@ void BB::Dump() const { } LogInfo::MapleLogger() << "succs "; for (auto *bb : succs) { - LogInfo::MapleLogger() << bb->id << " "; + LogInfo::MapleLogger() << bb->GetID() << " "; } LogInfo::MapleLogger() << "preds "; for (auto *bb : preds) { - LogInfo::MapleLogger() << bb->id << " "; + LogInfo::MapleLogger() << bb->GetID() << " "; } LogInfo::MapleLogger() << "frequency:" << frequency << "===\n"; diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 85d1c7e236eb97ffd312e91b4b497a9c6e43d781..b7bae9b9fb5dda314badabc10ab5f89944521f60 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -40,9 +40,6 @@ Operand *HandleDread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { (void)parent; auto ®ReadNode = static_cast(expr); - if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { - return &cgFunc.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); - } return cgFunc.SelectRegread(regReadNode); } @@ -635,6 +632,10 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) // int case INTRN_C_ffs: return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "ffs"); + case INTRN_C_fmaxl: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_f128, "fmaxl"); + case INTRN_C_fminl: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_f128, "fminl"); // libc mem* and str* functions as intrinsicops case INTRN_C_memcmp: return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "memcmp"); @@ -980,6 +981,10 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) case INTRN_vector_mov_narrow_v4i32: case INTRN_vector_mov_narrow_v4u32: case INTRN_vector_mov_narrow_v8i16: case INTRN_vector_mov_narrow_v8u16: return HandleVectorMovNarrow(intrinsicopNode, cgFunc); + + case INTRN_C___tls_get_tbss_anchor: case INTRN_C___tls_get_tdata_anchor: + return cgFunc.SelectIntrinsicOpLoadTlsAnchor(intrinsicopNode, parent); + default: { if (!intrinsicDesc.IsVectorOp()) { CHECK_FATAL(false, "Unsupported intrinsicop."); @@ -1457,7 +1462,8 @@ CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, loops(allocator.Adapter()), lmbcParamVec(allocator.Adapter()), scpIdSet(allocator.Adapter()), - shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) { + shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool), + adrpLabels(allocator.Adapter()) { mirModule.SetCurFunction(&func); SetMemlayout(*GetCG()->CreateMemLayout(memPool, beCommon, func, allocator)); GetMemlayout()->SetCurrFunction(*this); @@ -1576,6 +1582,14 @@ void CGFunc::RemoveUnreachableBB() { } } +void CGFunc::MarkAdrpLabelBB() { + for (auto lIdx : GetAdrpLabels()) { + BB *targetBB = GetBBFromLab2BBMap(lIdx); + ASSERT_NOT_NULL(targetBB); + targetBB->SetIsAdrpLabel(); + } +} + Insn &CGFunc::BuildLocInsn(int64 fileNum, int64 lineNum, int64 columnNum) { Operand *o0 = CreateDbgImmOperand(fileNum); Operand *o1 = CreateDbgImmOperand(lineNum); @@ -1706,7 +1720,7 @@ void CGFunc::CreateLmbcFormalParamInfo() { } primType = type->GetPrimType(); offset = stackOffset; - typeSize = static_cast(GetBecommon().GetTypeSize(tyIdx)); + typeSize = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize()); stackOffset += (typeSize + 7) & (-8); LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); lmbcParamVec.push_back(info); @@ -2100,6 +2114,7 @@ void CGFunc::HandleFunction() { theCFG = memPool->New(*this); theCFG->MarkLabelTakenBB(); theCFG->BuildCFG(); + MarkAdrpLabelBB(); RemoveUnreachableBB(); AddCommonExitBB(); if (mirModule.GetSrcLang() != kSrcLangC) { @@ -2248,7 +2263,7 @@ void CGFunc::ClearLoopInfo() { } void CGFunc::DumpCFGToDot(const std::string &fileNamePrefix) { - std::ofstream file(fileNamePrefix + GetName()); + std::ofstream file(fileNamePrefix + GetName() + ".dot"); file << "digraph {" << std::endl; for (auto *bb : GetAllBBs()) { if (bb == nullptr) { @@ -2289,6 +2304,20 @@ void CGFunc::PatchLongBranch() { } } +// Cgirverify phase function: all insns will be verified before cgemit. +void CGFunc::VerifyAllInsn() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS(insn, bb) { + if(!VERIFY_INSN(insn)) { + LogInfo::MapleLogger() << "Illegal insn is:\n"; + insn->Dump(); + LogInfo::MapleLogger() << "Function name is:\n" << GetName() << "\n"; + CHECK_FATAL_FALSE("The problem is illegal insn, info is above."); + } + } + } +} + void CGFunc::UpdateAllRegisterVregMapping(MapleMap &newMap) { vregsToPregsMap.clear(); for (auto &it : std::as_const(newMap)) { @@ -2353,4 +2382,13 @@ bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { return false; } MAPLE_TRANSFORM_PHASE_REGISTER(CgFixCFLocOsft, dbgfixcallframeoffsets) + +bool CgVerify::PhaseRun(maplebe::CGFunc &f) { + f.VerifyAllInsn(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgVerify, cgirverify) } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp b/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp index a154859f4d646f6582bf2ae0123a74443d04ad73..90385e4489fc518e74232c881e9f9a66b3d84f19 100644 --- a/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp +++ b/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp @@ -18,10 +18,18 @@ namespace maplebe { void ControlDepAnalysis::Run() { - pdom->GeneratePdomTreeDot(); - BuildCFGInfo(); - ConstructFCDG(); - ComputeRegions(); + if (CONTROL_DEP_ANALYSIS_DUMP) { + pdom->GeneratePdomTreeDot(); + } + if (cgFunc.IsAfterRegAlloc() || isSingleBB) { + // For local scheduling + ComputeSingleBBRegions(); + } else { + // For global scheduling based on regions + BuildCFGInfo(); + ConstructFCDG(); + ComputeRegions(false); + } } /* Augment CFG info */ @@ -47,7 +55,6 @@ void ControlDepAnalysis::BuildCFGInfo() { BB *destBB = cfgEdge->GetDestBB(); CHECK_FATAL(srcBB != nullptr, "get srcBB of cfgEdge failed"); if (srcBB == cgFunc.GetFirstBB()) { - CHECK_FATAL(srcBB->GetSuccsSize() == 1, "EntryBB should have only one succ"); cfgEdge->SetCondition(0); continue; } else if (srcBB == cgFunc.GetCommonExitBB()) { @@ -62,6 +69,7 @@ void ControlDepAnalysis::BuildCFGInfo() { cfgEdge->SetCondition(0); break; case BB::kBBIntrinsic: + ASSERT_NOT_NULL(srcBB->GetLastMachineInsn()); if (!srcBB->GetLastMachineInsn()->IsBranch()) { // set default cond number cfgEdge->SetCondition(0); @@ -140,7 +148,238 @@ void ControlDepAnalysis::ConstructFCDG() { * Traverse the post-dominator tree by means of a post-order to * assure that all children in the post-dominator tree are visited before their parent. */ -void ControlDepAnalysis::ComputeRegions() { +void ControlDepAnalysis::ComputeRegions(bool doCDRegion) { + if (doCDRegion) { + ComputeSameCDRegions(false); + } else { + ComputeGeneralNonLinearRegions(); + } +} + +/* + * This algorithm computes the general non-linear region, including: + * 1). A reducible loops which have a single-in edge + * 2). A fallthrough path not in any regions + * 3). A single-bb not in other regions as a region + */ +void ControlDepAnalysis::ComputeGeneralNonLinearRegions() { + /* If ebo phase was removed, must recalculate loop info */ + /* 1. Find all innermost loops */ + std::vector innermostLoops; + std::unordered_map visited; + MapleVector &loopInfos = cgFunc.GetLoops(); + for (auto loop : loopInfos) { + FindInnermostLoops(innermostLoops, visited, loop); + } + + /* 2. Find reducible loops as a region */ + for (auto innerLoop : innermostLoops) { + /* + * Avoid the case as following: + * | + * | + * ----- BB4 ---------- + * | / \ | + * | / \ | + * BB3 BB5 | + * / \ | + * / \ | + * BB6 BB10 - + * | + * | + * EXIT + * By the current loop analysis, {BB4, BB3, BB5, BB10} are in the same loop, which the headerBB is BB4 + * By the dom and pdom analysis, {BB4 dom BB5} and {BB5 pdom BB4}, BB4 and BB5 are EQUIVALENT, + * but they cannot schedule in parallel. + * + * The above case may cause loop-carried dependency instructions to be scheduled, and currently this + * dependency is not considered. + */ + if (innerLoop->GetBackedge().size() > 1) { + continue; + } + + bool isReducible = true; + BB *header = innerLoop->GetHeader(); + for (auto member : innerLoop->GetLoopMembers()) { + if (!dom->Dominate(*header, *member)) { + isReducible = false; + } + } + if (isReducible) { + auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); + CDGNode *headerNode = header->GetCDGNode(); + CHECK_FATAL(headerNode != nullptr, "get cdgNode from bb failed"); + region->SetRegionRoot(*headerNode); + for (auto member : innerLoop->GetLoopMembers()) { + CDGNode *memberNode = member->GetCDGNode(); + CHECK_FATAL(memberNode != nullptr, "get cdgNode from bb failed"); + memberNode->SetRegion(*region); + } + if (AddRegionNodesInTopologicalOrder(*region, *headerNode, innerLoop->GetLoopMembers())) { + fcdg->AddRegion(*region); + } + } + } + + /* 3. Find fallthrough path not in any regions as a region */ + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + std::vector regionMembers; + CDGNode *cdgNode = bb->GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + if (!cdgNode->IsVisitedInExtendedFind() && bb->GetSuccsSize() == 1 && cdgNode->GetRegion() == nullptr) { + // Nodes in the region are in the order of topology in this way + FindFallthroughPath(regionMembers, bb, true); + auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); + region->SetRegionRoot(*cdgNode); + for (auto memberNode : regionMembers) { + region->AddCDGNode(memberNode); + memberNode->SetRegion(*region); + } + fcdg->AddRegion(*region); + regionMembers.clear(); + } + } + + /* 4. Create region for the remaining BBs that are not in any region */ + CreateRegionForSingleBB(); +} + +void ControlDepAnalysis::FindFallthroughPath(std::vector ®ionMembers, BB *curBB, bool isRoot) { + CHECK_FATAL(curBB != nullptr, "invalid bb"); + CDGNode *curNode = curBB->GetCDGNode(); + CHECK_FATAL(curNode != nullptr, "get cdgNode from bb failed"); + if (curNode->IsVisitedInExtendedFind()) { + return; + } + curNode->SetVisitedInExtendedFind(); + if (isRoot) { + if (curBB->GetSuccsSize() == 1 && curNode->GetRegion() == nullptr) { + regionMembers.emplace_back(curNode); + } else { + return; + } + } else { + if (curBB->GetPreds().size() == 1 && curBB->GetSuccsSize() == 1 && curNode->GetRegion() == nullptr) { + regionMembers.emplace_back(curNode); + } else { + return; + } + } + FindFallthroughPath(regionMembers, *curBB->GetSuccsBegin(), false); +} + +void ControlDepAnalysis::CreateRegionForSingleBB() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + CDGNode *cdgNode = bb->GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + if (cdgNode->GetRegion() == nullptr) { + auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); + region->AddCDGNode(cdgNode); + cdgNode->SetRegion(*region); + region->SetRegionRoot(*cdgNode); + fcdg->AddRegion(*region); + } + } +} + +/* + * Recursive search for innermost loop + */ +void ControlDepAnalysis::FindInnermostLoops(std::vector &innermostLoops, + std::unordered_map &visited, CGFuncLoops *loop) { + if (loop == nullptr) { + return; + } + auto it = visited.find(loop); + if (it != visited.end() && it->second) { + return; + } + visited.emplace(loop, true); + const MapleVector &innerLoops = loop->GetInnerLoops(); + if (innerLoops.empty()) { + innermostLoops.emplace_back(loop); + } else { + for (auto innerLoop : innerLoops) { + FindInnermostLoops(innermostLoops, visited, innerLoop); + } + } +} + +bool ControlDepAnalysis::AddRegionNodesInTopologicalOrder(CDGRegion ®ion, CDGNode &root, + const MapleVector &members) { + /* Init predSum for memberNode except for root in region */ + for (auto bb : members) { + CDGNode *cdgNode = bb->GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + if (cdgNode == &root) { + continue; + } + int32 predSumInRegion = 0; + for (auto predIt = bb->GetPredsBegin(); predIt != bb->GetPredsEnd(); ++predIt) { + CDGNode *predNode = (*predIt)->GetCDGNode(); + CHECK_FATAL(predNode != nullptr, "get CDGNode from bb failed"); + if (predNode->GetRegion() == ®ion) { + predSumInRegion++; + } + } + cdgNode->InitPredNodeSumInRegion(predSumInRegion); + cdgNode->SetVisitedInTopoSort(false); + } + + /* Topological sort */ + std::queue topoQueue; + topoQueue.push(&root); + while (!topoQueue.empty()) { + CDGNode *curNode = topoQueue.front(); + topoQueue.pop(); + region.AddCDGNode(curNode); + + for (auto bb : members) { + CDGNode *memberNode = bb->GetCDGNode(); + CHECK_FATAL(memberNode != nullptr, "get cdgNode from bb failed"); + if (memberNode == &root || memberNode->IsVisitedInTopoSort()) { + continue; + } + for (auto predIt = bb->GetPredsBegin(); predIt != bb->GetPredsEnd(); ++predIt) { + CDGNode *predNode = (*predIt)->GetCDGNode(); + CHECK_FATAL(predNode != nullptr, "get cdgNode from bb failed"); + if (predNode == curNode) { + memberNode->DecPredNodeSumInRegion(); + } + } + if (memberNode->IsAllPredInRegionProcessed()) { + topoQueue.push(memberNode); + memberNode->SetVisitedInTopoSort(true); + } + } + } + /* + * To avoid irreducible loops in reducible loops, need to modify the loop analysis algorithm in the future. + */ + if (region.GetRegionNodeSize() != members.size()) { + return false; + } + return true; +} + +/* + * This region computing algorithm is based on this paper: + * The Program Dependence Graph and Its Use in Optimization + * It traverses the post-dominator tree by means of a post-order to assure that + * all children in the post-dominator tree are visited before their parent. + * + * The region is non-linear too. + * If cdgNodes that do not have any control dependency are divided into a region, the region is multi-root, + * which is not supported in inter-block data dependence analysis + */ +void ControlDepAnalysis::ComputeSameCDRegions(bool considerNonDep) { // The default bbId starts from 1 std::vector visited(fcdg->GetFCDGNodeSize(), false); for (uint32 bbId = 1; bbId < fcdg->GetFCDGNodeSize(); ++bbId) { @@ -148,7 +387,9 @@ void ControlDepAnalysis::ComputeRegions() { ComputeRegionForCurNode(bbId, visited); } } - ComputeRegionForNonDepNodes(); + if (considerNonDep) { + ComputeRegionForNonDepNodes(); + } } /* Nodes that don't have any control dependency are divided into a region */ @@ -220,7 +461,7 @@ void ControlDepAnalysis::CreateAndDivideRegion(uint32 pBBId) { } /* Check whether the region corresponding to the control dependence set exists */ -CDGRegion *ControlDepAnalysis::FindExistRegion(CDGNode &node) { +CDGRegion *ControlDepAnalysis::FindExistRegion(CDGNode &node) const { MapleVector &allRegions = fcdg->GetAllRegions(); MapleVector &curCDs = node.GetAllInEdges(); // Nodes that don't have control dependencies are processed in a unified method at last @@ -316,6 +557,21 @@ CDGEdge *ControlDepAnalysis::BuildControlDependence(const BB &fromBB, const BB & return cdgEdge; } +CDGRegion *ControlDepAnalysis::CreateFCDGRegion(CDGNode &curNode) { + MapleVector cdEdges = curNode.GetAllInEdges(); + auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); + region->AddCDEdgeSet(cdEdges); + region->AddCDGNode(&curNode); + fcdg->AddRegion(*region); + curNode.SetRegion(*region); + return region; +} + +void ControlDepAnalysis::ComputeSingleBBRegions() { + CreateAllCDGNodes(); + CreateRegionForSingleBB(); +} + /* Create CDGNode for every BB */ void ControlDepAnalysis::CreateAllCDGNodes() { fcdg = cdgMemPool.New(cgFunc, cdgAlloc); @@ -338,14 +594,40 @@ void ControlDepAnalysis::CreateAllCDGNodes() { fcdg->AddFCDGNode(*exitNode); } -CDGRegion *ControlDepAnalysis::CreateFCDGRegion(CDGNode &curNode) { - MapleVector cdEdges = curNode.GetAllInEdges(); - auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); - region->AddCDEdgeSet(cdEdges); - region->AddCDGNode(&curNode); - fcdg->AddRegion(*region); - curNode.SetRegion(*region); - return region; +/* + * Find equivalent candidate nodes of current cdgNode: + * A and B are equivalent if and only if A dominates B and B post-dominates A + * + * And it must be behind the current cdgNode in the topology order + */ +void ControlDepAnalysis::GetEquivalentNodesInRegion(CDGRegion ®ion, CDGNode &cdgNode, + std::vector &equivalentNodes) const { + BB *curBB = cdgNode.GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + MapleVector &memberNodes = region.GetRegionNodes(); + bool isBehind = false; + for (auto member : memberNodes) { + if (member == &cdgNode) { + isBehind = true; + continue; + } + BB *memberBB = member->GetBB(); + CHECK_FATAL(memberBB != nullptr, "get bb from cdgNode failed"); + if (dom->Dominate(*curBB, *memberBB) && pdom->PostDominate(*memberBB, *curBB) && + isBehind) { + // To avoid the loop-carried instructions are scheduled + bool isInPartialCycle = false; + for (auto predBB : memberBB->GetPreds()) { + if (predBB->GetCDGNode()->GetRegion() == ®ion && predBB->GetSuccsSize() > 1) { + isInPartialCycle = true; + break; + } + } + if (!isInPartialCycle) { + equivalentNodes.emplace_back(member); + } + } + } } void ControlDepAnalysis::GenerateFCDGDot() const { @@ -433,7 +715,7 @@ void ControlDepAnalysis::GenerateCFGDot() const { /* Define the output file name */ std::string fileName; - (void)fileName.append("cfg_before_cdg_"); + (void)fileName.append("cfg_after_cdg_"); (void)fileName.append(cgFunc.GetName()); (void)fileName.append(".dot"); @@ -476,29 +758,157 @@ void ControlDepAnalysis::GenerateCFGDot() const { } cfgFile << " BB_" << srcBB->GetId() << " -> " << "BB_" << destBB->GetId(); cfgFile << " [label = \""; - cfgFile << cfgEdge->GetCondition() << "\"];\n"; + cfgFile << cfgEdge->GetCondition() << "\""; + if (cfgEdge->IsBackEdge()) { + cfgFile << ",color=darkorchid1"; + } + cfgFile << "];\n"; + } + cfgFile << "\n"; + + /* Dump region style using cluster in dot language */ + for (auto region : fcdg->GetAllRegions()) { + if (region == nullptr) { + continue; + } + CHECK_FATAL(region->GetRegionNodeSize() != 0, "invalid region"); + cfgFile << " subgraph cluster_" << region->GetRegionId() << " {\n"; + cfgFile << " color=red;\n"; + cfgFile << " label = \"region #" << region->GetRegionId() << "\";\n"; + for (auto cdgNode : region->GetRegionNodes()) { + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + cfgFile << " BB_" << bb->GetId() << ";\n"; + } + cfgFile << "}\n\n"; + } + + cfgFile << "}\n"; + (void)cfgFile.flush(); + cfgFile.close(); + (void)std::cout.rdbuf(coutBuf); +} + +void ControlDepAnalysis::GenerateSimplifiedCFGDot() const { + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream cfgFile; + std::streambuf *fileBuf = cfgFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + /* Define the output file name */ + std::string fileName; + (void)fileName.append("cfg_simplify_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + cfgFile.open(fileName.c_str(), std::ios::trunc); + if (!cfgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + + cfgFile << "digraph CFG_SIMPLE" << cgFunc.GetName() << " {\n\n"; + cfgFile << " node [shape=box];\n\n"; + + /* Dump nodes style */ + FOR_ALL_BB_CONST(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + cfgFile << " BB_" << bb->GetId(); + cfgFile << "[label= \""; + if (bb == cgFunc.GetFirstBB()) { + cfgFile << "ENTRY\n"; + } + cfgFile << bb->GetId() << "\"];\n"; } + BB *exitBB = cgFunc.GetCommonExitBB(); + cfgFile << " BB_" << exitBB->GetId(); + cfgFile << "[label= \"EXIT\n"; + cfgFile << exitBB->GetId() << "\"];\n"; cfgFile << "\n"; + /* Dump edges style */ + for (auto cfgEdge : cfgMST->GetAllEdges()) { + BB *srcBB = cfgEdge->GetSrcBB(); + BB *destBB = cfgEdge->GetDestBB(); + CHECK_FATAL(srcBB != nullptr && destBB != nullptr, "get wrong cfg-edge"); + if (srcBB == cgFunc.GetCommonExitBB()) { + continue; + } + cfgFile << " BB_" << srcBB->GetId() << " -> " << "BB_" << destBB->GetId(); + cfgFile << " [label = \""; + cfgFile << cfgEdge->GetCondition() << "\""; + if (cfgEdge->IsBackEdge()) { + cfgFile << ",color=darkorchid1"; + } + cfgFile << "];\n"; + } + cfgFile << "}\n"; (void)cfgFile.flush(); cfgFile.close(); (void)std::cout.rdbuf(coutBuf); } +void ControlDepAnalysis::GenerateCFGInRegionDot(CDGRegion ®ion) const { + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream cfgOfRFile; + std::streambuf *fileBuf = cfgOfRFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + /* Define the output file name */ + std::string fileName; + (void)fileName.append("cfg_region"); + (void)fileName.append(std::to_string(region.GetRegionId())); + (void)fileName.append("_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + cfgOfRFile.open(fileName.c_str(), std::ios::trunc); + if (!cfgOfRFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + + cfgOfRFile << "digraph CFG_REGION" << region.GetRegionId() << " {\n\n"; + cfgOfRFile << " node [shape=box];\n\n"; + + for (auto cdgNode : region.GetRegionNodes()) { + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + + for (auto succ = bb->GetSuccsBegin(); succ != bb->GetSuccsEnd(); ++succ) { + CDGNode *node = (*succ)->GetCDGNode(); + CHECK_FATAL(node != nullptr, "get cdgNode from bb failed"); + if (node->GetRegion() == ®ion) { + cfgOfRFile << "\tbb_" << bb->GetId() << " -> " << "bb_" << (*succ)->GetId() << "\n"; + } + } + } + cfgOfRFile << "}\n"; + (void)cfgOfRFile.flush(); + cfgOfRFile.close(); + (void)std::cout.rdbuf(coutBuf); +} + void CgControlDepAnalysis::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); aDep.AddRequired(); + aDep.AddRequired(); } bool CgControlDepAnalysis::PhaseRun(maplebe::CGFunc &f) { MemPool *cdgMemPool = GetPhaseMemPool(); MemPool *tmpMemPool = ApplyTempMemPool(); CHECK_FATAL(cdgMemPool != nullptr && tmpMemPool != nullptr, "get memPool failed"); + DomAnalysis *domInfo = GET_ANALYSIS(CgDomAnalysis, f); + CHECK_FATAL(domInfo != nullptr, "get result of DomAnalysis failed"); PostDomAnalysis *pdomInfo = GET_ANALYSIS(CgPostDomAnalysis, f); CHECK_FATAL(pdomInfo != nullptr, "get result of PostDomAnalysis failed"); - auto *cfgmst = cdgMemPool->New, maplebe::BB>>(*cdgMemPool); - cda = cdgMemPool->New(f, *cdgMemPool, *tmpMemPool, - *pdomInfo, *cfgmst); + auto *cfgMST = cdgMemPool->New, maplebe::BB>>(*cdgMemPool); + cda = f.IsAfterRegAlloc() ? cdgMemPool->New(f, *cdgMemPool, "localschedule", true) : + cdgMemPool->New(f, *cdgMemPool, *tmpMemPool, *domInfo, *pdomInfo, cfgMST, "globalschedule"); cda->Run(); return true; } diff --git a/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp b/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp index 4eac04e6943ba069a5c12c4f6cb552289bf9b253..388de5c7a34692bbb5e3d2da0ba475d60f2c958c 100644 --- a/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp +++ b/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp @@ -22,7 +22,7 @@ void IntraDataDepAnalysis::Run(BB &bb, MapleVector &dataNodes) { if (bb.IsUnreachable()) { return; } - MemPool *localMp = memPoolCtrler.NewMemPool("dda for bb mempool", true); + MemPool *localMp = memPoolCtrler.NewMemPool("intra-block dda mempool", true); auto *localAlloc = new MapleAllocator(localMp); InitCurNodeInfo(*localMp, *localAlloc, bb, dataNodes); uint32 nodeSum = 1; @@ -45,6 +45,7 @@ void IntraDataDepAnalysis::Run(BB &bb, MapleVector &dataNodes) { ddb.BuildSpecialInsnDependency(*insn, dataNodes); /* Build Dependency for ambi insn if needed */ ddb.BuildAmbiInsnDependency(*insn); + ddb.BuildAsmInsnDependency(*insn); /* Update stack and heap dependency */ ddb.UpdateStackAndHeapDependency(*ddgNode, *insn, *locInsn); if (insn->IsFrameDef()) { @@ -54,7 +55,7 @@ void IntraDataDepAnalysis::Run(BB &bb, MapleVector &dataNodes) { uint32 separatorIndex = ddb.GetSeparatorIndex(); ddb.AddDependence(*dataNodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); /* Update register use and register def */ - ddb.UpdateRegUseAndDef(*insn, *ddgNode, dataNodes); + ddb.UpdateRegUseAndDef(*insn, *ddgNode, *dataNodes[separatorIndex]); } AddEndSeparatorNode(bb, dataNodes); ddb.CopyAndClearComments(comments); @@ -67,9 +68,7 @@ void IntraDataDepAnalysis::InitCurNodeInfo(MemPool &tmpMp, MapleAllocator &tmpAl CDGNode *curCDGNode = bb.GetCDGNode(); CHECK_FATAL(curCDGNode != nullptr, "invalid cdgNode from bb"); ddb.SetCDGNode(curCDGNode); - // Need to move to target subclass - uint32 maxRegNum = (cgFunc.IsAfterRegAlloc() ? AArch64reg::kAllRegNum : cgFunc.GetMaxVReg()); - curCDGNode->InitDataDepInfo(tmpMp, tmpAlloc, maxRegNum); + ddb.InitCDGNodeDataInfo(tmpMp, tmpAlloc, *curCDGNode); /* Analysis live-in registers in catch BB */ ddb.AnalysisAmbiInsns(bb); /* Clear all dependence nodes and push the first separator node */ @@ -123,102 +122,230 @@ void IntraDataDepAnalysis::AddEndSeparatorNode(BB &bb, MapleVector &no } } -void InterDataDepAnalysis::Run(CDGRegion ®ion, MapleVector &dataNodes) { - uint32 nodeSum = 1; +void InterDataDepAnalysis::Run(CDGRegion ®ion) { + MemPool *regionMp = memPoolCtrler.NewMemPool("inter-block dda mempool", true); + auto *regionAlloc = new MapleAllocator(regionMp); + MapleVector comments(interAlloc.Adapter()); - // Visit CDGNodes in the region follow the topological order of CFG - ComputeTopologicalOrderInRegion(region); - // Init data dependence info for the entire region - GlobalInit(dataNodes); - ddb.SeparateDependenceGraph(dataNodes, nodeSum); - for (std::size_t idx = 0; idx < readyNodes.size(); ++idx) { - CDGNode *cdgNode = readyNodes[idx]; + CDGNode *root = region.GetRegionRoot(); + CHECK_FATAL(root != nullptr, "the root of region must be computed first"); + InitInfoInRegion(*regionMp, *regionAlloc, region); + + /* Visit CDGNodes in the region follow the topological order of CFG */ + for (auto cdgNode : region.GetRegionNodes()) { BB *curBB = cdgNode->GetBB(); CHECK_FATAL(curBB != nullptr, "get bb from CDGNode failed"); - // Init data dependence info for cur cdgNode - LocalInit(*curBB, *cdgNode, dataNodes, idx); + /* Init data dependence info for cur cdgNode */ + InitInfoInCDGNode(*regionMp, *regionAlloc, *curBB, *cdgNode); const Insn *locInsn = curBB->GetFirstLoc(); FOR_BB_INSNS(insn, curBB) { if (!insn->IsMachineInstruction()) { - ddb.ProcessNonMachineInsn(*insn, comments, dataNodes, locInsn); + ddb.ProcessNonMachineInsn(*insn, comments, cdgNode->GetAllDataNodes(), locInsn); continue; } - /* Add a pseudo node to separate dependence graph when appropriate */ - ddb.SeparateDependenceGraph(dataNodes, nodeSum); - /* Generate a DepNode */ - DepNode *ddgNode = ddb.GenerateDepNode(*insn, dataNodes, nodeSum, comments); - /* Build Dependency for may-throw insn */ + cdgNode->AccNodeSum(); + DepNode *ddgNode = ddb.GenerateDepNode(*insn, cdgNode->GetAllDataNodes(), cdgNode->GetNodeSum(), comments); ddb.BuildMayThrowInsnDependency(*insn); - /* Build Dependency for each operand of insn */ ddb.BuildOpndDependency(*insn); - /* Build Dependency for special insn */ - ddb.BuildSpecialInsnDependency(*insn, dataNodes); - /* Build Dependency for ambi insn if needed */ + BuildSpecialInsnDependency(*insn, *cdgNode, region, *regionAlloc); ddb.BuildAmbiInsnDependency(*insn); - /* Update stack and heap dependency */ + ddb.BuildAsmInsnDependency(*insn); ddb.UpdateStackAndHeapDependency(*ddgNode, *insn, *locInsn); if (insn->IsFrameDef()) { - ddb.SetLastFrameDefInsn(insn); + cdgNode->SetLastFrameDefInsn(insn); } - /* Separator exists */ - uint32 separatorIndex = ddb.GetSeparatorIndex(); - ddb.AddDependence(*dataNodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); - /* Update register use and register def */ - ddb.UpdateRegUseAndDef(*insn, *ddgNode, dataNodes); + UpdateRegUseAndDef(*insn, *ddgNode, *cdgNode); } - ddb.CopyAndClearComments(comments); + cdgNode->CopyAndClearComments(comments); + UpdateReadyNodesInfo(*cdgNode, *root); } + ClearInfoInRegion(regionMp, regionAlloc, region); } -void InterDataDepAnalysis::GlobalInit(MapleVector &dataNodes) { - dataNodes.clear(); - // Need Check: where to record the pseudoSepNode? cdgNode? pseudoNode is of BB or of Region? - DepNode *pseudoSepNode = ddb.BuildSeparatorNode(); - (void)dataNodes.emplace_back(pseudoSepNode); - ddb.SetSeparatorIndex(0); +void InterDataDepAnalysis::InitInfoInRegion(MemPool ®ionMp, MapleAllocator ®ionAlloc, CDGRegion ®ion) { + ddb.SetCDGRegion(®ion); + for (auto cdgNode : region.GetRegionNodes()) { + cdgNode->InitTopoInRegionInfo(regionMp, regionAlloc); + } } -void InterDataDepAnalysis::LocalInit(BB &bb, CDGNode &cdgNode, MapleVector &dataNodes, std::size_t idx) { +void InterDataDepAnalysis::InitInfoInCDGNode(MemPool ®ionMp, MapleAllocator ®ionAlloc, BB &bb, CDGNode &cdgNode) { ddb.SetCDGNode(&cdgNode); - cdgNode.ClearDataDepInfo(); + ddb.InitCDGNodeDataInfo(regionMp, regionAlloc, cdgNode); /* Analysis live-in registers in catch BB */ ddb.AnalysisAmbiInsns(bb); +} + +void InterDataDepAnalysis::AddBeginSeparatorNode(CDGNode *rootNode) { + BB *rootBB = rootNode->GetBB(); + CHECK_FATAL(rootBB != nullptr, "get rootBB failed"); + /* The first separatorNode of the entire region */ + ddb.SetSeparatorIndex(0); + DepNode *pseudoSepNode = ddb.BuildSeparatorNode(); + rootNode->AddDataNode(pseudoSepNode); + rootNode->SetNodeSum(1); - if (!cgFunc.IsAfterRegAlloc() && idx == 0) { - /* assume first pseudo_dependence_separator insn of current region define live-in's registers for first bb */ - DepNode *pseudoSepNode = dataNodes[0]; + if (!cgFunc.IsAfterRegAlloc()) { Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); - for (auto ®NO : bb.GetLiveInRegNO()) { - cdgNode.SetLatestDefInsn(regNO, pseudoSepInsn); + for (auto regNO : rootBB->GetLiveInRegNO()) { + rootNode->SetLatestDefInsn(regNO, pseudoSepInsn); pseudoSepNode->AddDefReg(regNO); pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); } } } -void InterDataDepAnalysis::ComputeTopologicalOrderInRegion(CDGRegion ®ion) { - MapleVector controlNodes = region.GetRegionNodes(); - InitRestNodes(controlNodes); - for (auto cdgNode : restNodes) { - // Check whether CFG preds of the CDGNode are in the cur region - BB *bb = cdgNode->GetBB(); - CHECK_FATAL(bb != nullptr, "get bb from CDGNode failed"); - bool hasNonPredsInRegion = true; - for (auto predIt = bb->GetPredsBegin(); predIt != bb->GetPredsEnd(); ++predIt) { - CDGNode *predNode = (*predIt)->GetCDGNode(); - CHECK_FATAL(predNode != nullptr, "get CDGNode from bb failed"); - if (predNode->GetRegion() == ®ion) { - hasNonPredsInRegion = false; - break; +void InterDataDepAnalysis::SeparateDependenceGraph(CDGRegion ®ion, CDGNode &cdgNode) { + uint32 &nodeSum = cdgNode.GetNodeSum(); + if ((nodeSum > 0) && (nodeSum % kMaxInsnNum == 0)) { + /* Add a pseudo node to separate dependence graph */ + DepNode *separateNode = ddb.BuildSeparatorNode(); + separateNode->SetIndex(nodeSum++); + cdgNode.AddDataNode(separateNode); + cdgNode.AddPseudoSepNodes(separateNode); + BuildDepsForNewSeparator(region, cdgNode, *separateNode); + + cdgNode.ClearDepDataVec(); + } +} + +void InterDataDepAnalysis::BuildDepsForNewSeparator(CDGRegion ®ion, CDGNode &cdgNode, DepNode &newSepNode) { + bool hasSepInNode = false; + MapleVector &dataNodes = cdgNode.GetAllDataNodes(); + for (auto i = static_cast(dataNodes.size() - 1); i >= 0; --i) { + if (dataNodes[i]->GetType() == kNodeTypeSeparator && dataNodes.size() != 1) { + hasSepInNode = true; + break; + } + ddb.AddDependence(*dataNodes[i], newSepNode, kDependenceTypeSeparator); + } + if (!hasSepInNode) { + for (auto nodeId : cdgNode.GetTopoPredInRegion()) { + CDGNode *predNode = region.GetCDGNodeById(nodeId); + CHECK_FATAL(predNode != nullptr, "get cdgNode from region by id failed"); + MapleVector &predDataNodes = predNode->GetAllDataNodes(); + for (std::size_t i = predDataNodes.size() - 1; i >= 0; --i) { + if (predDataNodes[i]->GetType() == kNodeTypeSeparator) { + break; + } + ddb.AddDependence(*predDataNodes[i], newSepNode, kDependenceTypeSeparator); + } + } + } +} + +void InterDataDepAnalysis::BuildDepsForPrevSeparator(CDGNode &cdgNode, DepNode &depNode, CDGRegion &curRegion) { + if (cdgNode.GetRegion() != &curRegion) { + return; + } + DepNode *prevSepNode = nullptr; + MapleVector &dataNodes = cdgNode.GetAllDataNodes(); + for (auto i = static_cast(dataNodes.size() - 1); i >= 0; --i) { + if (dataNodes[i]->GetType() == kNodeTypeSeparator) { + prevSepNode = dataNodes[i]; + break; + } + } + if (prevSepNode != nullptr) { + ddb.AddDependence(*prevSepNode, depNode, kDependenceTypeSeparator); + return; + } + BB *bb = cdgNode.GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + for (auto predIt = bb->GetPredsBegin(); predIt != bb->GetPredsEnd(); ++predIt) { + CDGNode *predNode = (*predIt)->GetCDGNode(); + CHECK_FATAL(predNode != nullptr, "get cdgNode from bb failed"); + BuildDepsForPrevSeparator(*predNode, depNode, curRegion); + } +} + +void InterDataDepAnalysis::BuildSpecialInsnDependency(Insn &insn, CDGNode &cdgNode, CDGRegion ®ion, + MapleAllocator &alloc) { + MapleVector dataNodes(alloc.Adapter()); + for (auto nodeId : cdgNode.GetTopoPredInRegion()) { + CDGNode *predNode = region.GetCDGNodeById(nodeId); + CHECK_FATAL(predNode != nullptr, "get cdgNode from region by id failed"); + for (auto depNode : predNode->GetAllDataNodes()) { + dataNodes.emplace_back(depNode); + } + } + for (auto depNode : cdgNode.GetAllDataNodes()) { + if (depNode != insn.GetDepNode()) { + dataNodes.emplace_back(depNode); + } + } + ddb.BuildSpecialInsnDependency(insn, dataNodes); +} + +void InterDataDepAnalysis::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, CDGNode &cdgNode) { + /* Update reg use */ + const auto &useRegnos = depNode.GetUseRegnos(); + bool beforeRA = !cgFunc.IsAfterRegAlloc(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } + for (auto regNO : useRegnos) { + /* Update reg use for cur depInfo */ + cdgNode.AppendUseInsnChain(regNO, &insn, interMp, beforeRA); + } + + /* Update reg def */ + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } + for (const auto regNO : defRegnos) { + /* Update reg def for cur depInfo */ + cdgNode.SetLatestDefInsn(regNO, &insn); + cdgNode.ClearUseInsnChain(regNO); + if (beforeRA) { + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); } } - if (hasNonPredsInRegion) { - AddReadyNode(cdgNode); + ++i; + } +} + +void InterDataDepAnalysis::UpdateReadyNodesInfo(CDGNode &cdgNode, const CDGNode &root) const { + BB *bb = cdgNode.GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + for (auto succIt = bb->GetSuccsBegin(); succIt != bb->GetSuccsEnd(); ++succIt) { + CDGNode *succNode = (*succIt)->GetCDGNode(); + CHECK_FATAL(succNode != nullptr, "get cdgNode from bb failed"); + if (succNode != &root && succNode->GetRegion() == cdgNode.GetRegion()) { + succNode->SetNodeSum(std::max(cdgNode.GetNodeSum(), succNode->GetNodeSum())); + /* Successor nodes in region record nodeIds that have been visited in topology order */ + for (const auto &nodeId : cdgNode.GetTopoPredInRegion()) { + succNode->InsertVisitedTopoPredInRegion(nodeId); + } + succNode->InsertVisitedTopoPredInRegion(cdgNode.GetNodeId()); } } } -void InterDataDepAnalysis::GenerateInterDDGDot(MapleVector &dataNodes) { +void InterDataDepAnalysis::AddEndSeparatorNode(CDGRegion ®ion, CDGNode &cdgNode) { + DepNode *separatorNode = ddb.BuildSeparatorNode(); + cdgNode.AddDataNode(separatorNode); + cdgNode.AddPseudoSepNodes(separatorNode); + BuildDepsForNewSeparator(region, cdgNode, *separatorNode); +} + +void InterDataDepAnalysis::ClearInfoInRegion(MemPool *regionMp, MapleAllocator *regionAlloc, CDGRegion ®ion) { + delete regionAlloc; + memPoolCtrler.DeleteMemPool(regionMp); + for (auto cdgNode : region.GetRegionNodes()) { + cdgNode->ClearDataDepInfo(); + cdgNode->ClearTopoInRegionInfo(); + } +} + +void InterDataDepAnalysis::GenerateDataDepGraphDotOfRegion(CDGRegion ®ion) { + bool hasExceedMaximum = (region.GetRegionNodes().size() > kMaxDumpRegionNodeNum); std::streambuf *coutBuf = std::cout.rdbuf(); std::ofstream iddgFile; std::streambuf *fileBuf = iddgFile.rdbuf(); @@ -228,6 +355,8 @@ void InterDataDepAnalysis::GenerateInterDDGDot(MapleVector &dataNodes) std::string fileName; (void)fileName.append("interDDG_"); (void)fileName.append(cgFunc.GetName()); + (void)fileName.append("_region"); + (void)fileName.append(std::to_string(region.GetRegionId())); (void)fileName.append(".dot"); char absPath[PATH_MAX]; @@ -237,32 +366,69 @@ void InterDataDepAnalysis::GenerateInterDDGDot(MapleVector &dataNodes) return; } iddgFile << "digraph InterDDG_" << cgFunc.GetName() << " {\n\n"; + if (hasExceedMaximum) { + iddgFile << "newrank = true;\n"; + } iddgFile << " node [shape=box];\n\n"; - /* Dump nodes style */ - for (auto node : dataNodes) { - MOperator mOp = node->GetInsn()->GetMachineOpcode(); - // Need move to target - const InsnDesc *md = &AArch64CG::kMd[mOp]; - iddgFile << " insn_" << node->GetInsn() << "["; - iddgFile << "label = \"" << node->GetInsn()->GetId() << ":\n"; - iddgFile << "{ " << md->name << "}\"];\n"; - } - iddgFile << "\n"; + for (auto cdgNode : region.GetRegionNodes()) { + /* Dump nodes style */ + for (auto depNode : cdgNode->GetAllDataNodes()) { + ddb.DumpNodeStyleInDot(iddgFile, *depNode); + } + iddgFile << "\n"; - /* Dump edges style */ - for (auto node : dataNodes) { - for (auto succ : node->GetSuccs()) { - iddgFile << " insn" << node->GetInsn() << " -> " << "insn" << succ->GetTo().GetInsn(); - iddgFile <<" ["; - if (succ->GetDepType() == kDependenceTypeTrue) { - iddgFile << "color=red,"; + /* Dump edges style */ + for (auto depNode : cdgNode->GetAllDataNodes()) { + for (auto succ : depNode->GetSuccs()) { + // Avoid overly complex data dependency graphs + if (hasExceedMaximum && succ->GetDepType() == kDependenceTypeSeparator) { + continue; + } + iddgFile << " insn_" << depNode->GetInsn() << " -> " << "insn_" << succ->GetTo().GetInsn(); + iddgFile <<" ["; + switch (succ->GetDepType()) { + case kDependenceTypeTrue: + iddgFile << "color=red,"; + iddgFile << "label= \"" << succ->GetLatency() << "\""; + break; + case kDependenceTypeOutput: + iddgFile << "label= \"" << "output" << "\""; + break; + case kDependenceTypeAnti: + iddgFile << "label= \"" << "anti" << "\""; + break; + case kDependenceTypeControl: + iddgFile << "label= \"" << "control" << "\""; + break; + case kDependenceTypeMembar: + iddgFile << "label= \"" << "membar" << "\""; + break; + case kDependenceTypeThrow: + iddgFile << "label= \"" << "throw" << "\""; + break; + case kDependenceTypeSeparator: + iddgFile << "label= \"" << "separator" << "\""; + break; + default: + CHECK_FATAL(false, "invalid depType"); + } + iddgFile << "];\n"; } - iddgFile << "label= \"" << succ->GetLatency() << "\""; - iddgFile << "];\n"; } + iddgFile << "\n"; + + /* Dump BB cluster */ + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + iddgFile << " subgraph cluster_" << bb->GetId() << " {\n"; + iddgFile << " color=blue;\n"; + iddgFile << " label = \"bb #" << bb->GetId() << "\";\n"; + for (auto depNode : cdgNode->GetAllDataNodes()) { + iddgFile << " insn_" << depNode->GetInsn() << ";\n"; + } + iddgFile << "}\n\n"; } - iddgFile << "\n"; iddgFile << "}\n"; (void)iddgFile.flush(); diff --git a/src/mapleall/maple_be/src/cg/data_dep_base.cpp b/src/mapleall/maple_be/src/cg/data_dep_base.cpp index 8ed355d25d5f880e43cb35784ad6c149be3bda30..40dd8365097a5f1a2a29222c963cbd262aa67219 100644 --- a/src/mapleall/maple_be/src/cg/data_dep_base.cpp +++ b/src/mapleall/maple_be/src/cg/data_dep_base.cpp @@ -111,7 +111,7 @@ void DataDepBase::BuildMayThrowInsnDependency(Insn &insn) { Insn *lastFrameDef = curCDGNode->GetLastFrameDefInsn(); if (lastFrameDef != nullptr) { AddDependence(*lastFrameDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeThrow); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kLastFrameDef); } } @@ -147,8 +147,14 @@ void DataDepBase::BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) { /* Build control data dependence for branch/ret instructions */ void DataDepBase::BuildDepsControlAll(Insn &insn, const MapleVector &nodes) { DepNode *depNode = insn.GetDepNode(); - for (uint32 i = separatorIndex; i < depNode->GetIndex(); ++i) { - AddDependence(*nodes[i], *depNode, kDependenceTypeControl); + if (isIntra) { + for (uint32 i = separatorIndex; i < depNode->GetIndex(); ++i) { + AddDependence(*nodes[i], *depNode, kDependenceTypeControl); + } + } else { + for (auto dataNode : nodes) { + AddDependence(*dataNode, *depNode, kDependenceTypeControl); + } } } @@ -165,10 +171,10 @@ void DataDepBase::BuildDepsSeparator(DepNode &newSepNode, MapleVector /* Build data dependence of may throw instructions */ void DataDepBase::BuildDepsMayThrowInsn(Insn &insn) { - if (IsIntraBlockAnalysis()) { - MapleVector ambiInsns = curCDGNode->GetAmbiguousInsns(); + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { + MapleVector &ambiInsns = curCDGNode->GetAmbiguousInsns(); AddDependence4InsnInVectorByType(ambiInsns, insn, kDependenceTypeThrow); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kAmbiguous); } } @@ -178,10 +184,10 @@ void DataDepBase::BuildDepsMayThrowInsn(Insn &insn) { * ambiguous instruction: instructions that can not across may throw instructions */ void DataDepBase::BuildDepsAmbiInsn(Insn &insn) { - if (IsIntraBlockAnalysis()) { - MapleVector mayThrows = curCDGNode->GetMayThrowInsns(); + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { + MapleVector &mayThrows = curCDGNode->GetMayThrowInsns(); AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kMayThrows); } curCDGNode->AddAmbiguousInsn(&insn); @@ -196,15 +202,14 @@ void DataDepBase::BuildDepsDefReg(Insn &insn, regno_t regNO) { * 2. For building inter-block data dependence, require the data flow info of all BBs on the pred path in CFG */ /* Build anti dependence */ - // Build intra block data dependence - RegList *regList = curCDGNode->GetUseInsnChain(regNO); - while (regList != nullptr) { - CHECK_NULL_FATAL(regList->insn); - AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); - regList = regList->next; - } - // Build inter block data dependence - if (!IsIntraBlockAnalysis()) { + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { + RegList *regList = curCDGNode->GetUseInsnChain(regNO); + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); + regList = regList->next; + } + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeAnti, false); } @@ -213,7 +218,7 @@ void DataDepBase::BuildDepsDefReg(Insn &insn, regno_t regNO) { Insn *defInsn = curCDGNode->GetLatestDefInsn(regNO); if (defInsn != nullptr) { AddDependence(*defInsn->GetDepNode(), *node, kDependenceTypeOutput); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { // Build inter block data dependence BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeOutput, true); } @@ -228,7 +233,7 @@ void DataDepBase::BuildDepsUseReg(Insn &insn, regno_t regNO) { Insn *defInsn = curCDGNode->GetLatestDefInsn(regNO); if (defInsn != nullptr) { AddDependence(*defInsn->GetDepNode(), *node, kDependenceTypeTrue); - } else if (!IsIntraBlockAnalysis()) { + } else if (!isIntra && curRegion->GetRegionRoot() != curCDGNode) { // Build inter block data dependence BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeTrue, true); } @@ -241,27 +246,22 @@ void DataDepBase::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, con } depNode.SetLocInsn(locInsn); curCDGNode->AddMayThrowInsn(&insn); - if (IsIntraBlockAnalysis()) { + if (isIntra || curRegion->GetRegionNodeSize() == 1 || curRegion->GetRegionRoot() == curCDGNode) { AddDependence4InsnInVectorByType(curCDGNode->GetStackDefInsns(), insn, kDependenceTypeThrow); AddDependence4InsnInVectorByType(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeThrow); - } else { + } else if (curRegion->GetRegionRoot() != curCDGNode) { BuildInterBlockSpecialDataInfoDependency(depNode, false, kDependenceTypeThrow, kStackDefs); BuildInterBlockSpecialDataInfoDependency(depNode, false, kDependenceTypeThrow, kHeapDefs); } } -void DataDepBase::BuildSeparatorNodeDependency(MapleVector &dataNodes, Insn &insn) { - AddDependence(*dataNodes[separatorIndex], *insn.GetDepNode(), kDependenceTypeSeparator); -} - /* For inter data dependence analysis */ void DataDepBase::BuildInterBlockDefUseDependency(DepNode &curDepNode, regno_t regNO, DepType depType, bool isDef) { - CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + CHECK_FATAL(!isIntra, "must be inter block data dependence analysis"); + CHECK_FATAL(curRegion->GetRegionRoot() != curCDGNode, "for the root node, cross-BB search is not required"); BB *curBB = curCDGNode->GetBB(); CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); - CDGRegion *curRegion = curCDGNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); std::vector visited(curRegion->GetMaxBBIdInRegion(), false); if (isDef) { BuildPredPathDefDependencyDFS(*curBB, visited, curDepNode, regNO, depType); @@ -277,9 +277,9 @@ void DataDepBase::BuildPredPathDefDependencyDFS(BB &curBB, std::vector &vi } CDGNode *cdgNode = curBB.GetCDGNode(); CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); - CDGRegion *curRegion = cdgNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); - if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + CDGRegion *region = cdgNode->GetRegion(); + CHECK_FATAL(region != nullptr, "get region from cdgNode failed"); + if (region->GetRegionId() != curRegion->GetRegionId()) { return; } Insn *curDefInsn = cdgNode->GetLatestDefInsn(regNO); @@ -288,8 +288,15 @@ void DataDepBase::BuildPredPathDefDependencyDFS(BB &curBB, std::vector &vi AddDependence(*curDefInsn->GetDepNode(), depNode, depType); return; } + // Ignore back-edge + if (cdgNode == curRegion->GetRegionRoot()) { + return; + } for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { - BuildPredPathDefDependencyDFS(**predIt, visited, depNode, regNO, depType); + // Ignore back-edge of self-loop + if (*predIt != &curBB) { + BuildPredPathDefDependencyDFS(**predIt, visited, depNode, regNO, depType); + } } } @@ -300,9 +307,9 @@ void DataDepBase::BuildPredPathUseDependencyDFS(BB &curBB, std::vector &vi } CDGNode *cdgNode = curBB.GetCDGNode(); CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); - CDGRegion *curRegion = cdgNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); - if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + CDGRegion *region = cdgNode->GetRegion(); + CHECK_FATAL(region != nullptr, "get region from cdgNode failed"); + if (region->GetRegionId() != curRegion->GetRegionId()) { return; } visited[curBB.GetId()] = true; @@ -311,19 +318,26 @@ void DataDepBase::BuildPredPathUseDependencyDFS(BB &curBB, std::vector &vi Insn *useInsn = useChain->insn; CHECK_FATAL(useInsn != nullptr, "get useInsn failed"); AddDependence(*useInsn->GetDepNode(), depNode, depType); + useChain = useChain->next; + } + // Ignore back-edge + if (cdgNode == curRegion->GetRegionRoot()) { + return; } for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { - BuildPredPathDefDependencyDFS(**predIt, visited, depNode, regNO, depType); + // Ignore back-edge of self-loop + if (*predIt != &curBB) { + BuildPredPathUseDependencyDFS(**predIt, visited, depNode, regNO, depType); + } } } void DataDepBase::BuildInterBlockSpecialDataInfoDependency(DepNode &curDepNode, bool needCmp, DepType depType, DataDepBase::DataFlowInfoType infoType) { - CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + CHECK_FATAL(!isIntra, "must be inter block data dependence analysis"); + CHECK_FATAL(curRegion->GetRegionRoot() != curCDGNode, "for the root node, cross-BB search is not required"); BB *curBB = curCDGNode->GetBB(); CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); - CDGRegion *curRegion = curCDGNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); std::vector visited(curRegion->GetMaxBBIdInRegion(), false); BuildPredPathSpecialDataInfoDependencyDFS(*curBB, visited, needCmp, curDepNode, depType, infoType); } @@ -336,9 +350,9 @@ void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vect } CDGNode *cdgNode = curBB.GetCDGNode(); CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); - CDGRegion *curRegion = cdgNode->GetRegion(); - CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); - if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + CDGRegion *region = cdgNode->GetRegion(); + CHECK_FATAL(region != nullptr, "get region from cdgNode failed"); + if (region != curCDGNode->GetRegion()) { return; } @@ -389,7 +403,7 @@ void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vect } case kHeapUses: { visited[curBB.GetId()] = true; - MapleVector heapUses = cdgNode->GetHeapUseInsns(); + MapleVector &heapUses = cdgNode->GetHeapUseInsns(); if (needCmp) { AddDependence4InsnInVectorByTypeAndCmp(heapUses, *depNode.GetInsn(), depType); } else { @@ -399,7 +413,7 @@ void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vect } case kHeapDefs: { visited[curBB.GetId()] = true; - MapleVector heapDefs = cdgNode->GetHeapDefInsns(); + MapleVector &heapDefs = cdgNode->GetHeapDefInsns(); if (needCmp) { AddDependence4InsnInVectorByTypeAndCmp(heapDefs, *depNode.GetInsn(), depType); } else { @@ -409,13 +423,13 @@ void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vect } case kMayThrows: { visited[curBB.GetId()] = true; - MapleVector mayThrows = cdgNode->GetMayThrowInsns(); + MapleVector &mayThrows = cdgNode->GetMayThrowInsns(); AddDependence4InsnInVectorByType(mayThrows, *depNode.GetInsn(), depType); break; } case kAmbiguous: { visited[curBB.GetId()] = true; - MapleVector ambiInsns = cdgNode->GetAmbiguousInsns(); + MapleVector &ambiInsns = cdgNode->GetAmbiguousInsns(); AddDependence4InsnInVectorByType(ambiInsns, *depNode.GetInsn(), depType); break; } @@ -424,8 +438,15 @@ void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vect break; } } + // Ignore back-edge + if (cdgNode == curRegion->GetRegionRoot()) { + return; + } for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { - BuildPredPathSpecialDataInfoDependencyDFS(**predIt, visited, needCmp, depNode, depType, infoType); + // Ignore back-edge of self-loop + if (*predIt != &curBB) { + BuildPredPathSpecialDataInfoDependencyDFS(**predIt, visited, needCmp, depNode, depType, infoType); + } } } @@ -531,20 +552,12 @@ void DataDepBase::RemoveSelfDeps(Insn &insn) { node->RemovePred(); } -/* Check if in intra-block data dependence analysis */ -bool DataDepBase::IsIntraBlockAnalysis() const { - if (curCDGNode->GetRegion() == nullptr || curCDGNode->GetRegion()->GetRegionNodes().size() == 1) { - return true; - } - return false; -} - /* Check if regNO is in ehInRegs. */ bool DataDepBase::IfInAmbiRegs(regno_t regNO) const { if (!curCDGNode->HasAmbiRegs()) { return false; } - MapleSet ehInRegs = curCDGNode->GetEhInRegs(); + MapleSet &ehInRegs = curCDGNode->GetEhInRegs(); if (ehInRegs.find(regNO) != ehInRegs.end()) { return true; } diff --git a/src/mapleall/maple_be/src/cg/ebo.cpp b/src/mapleall/maple_be/src/cg/ebo.cpp index 26e4154137e2f13702d789f8a79cf8347839b55b..ac2d1c8592a80f9f192de907380ffbf531cf98e2 100644 --- a/src/mapleall/maple_be/src/cg/ebo.cpp +++ b/src/mapleall/maple_be/src/cg/ebo.cpp @@ -81,7 +81,7 @@ bool Ebo::IsFrameReg(Operand &opnd) const { } Operand *Ebo::GetZeroOpnd(uint32 size) const { -#if TARGAARCH64 || (defined(TARGRISCV64) && TARGRISCV64) +#if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64) return size > k64BitSize ? nullptr : &cgFunc->GetZeroOpnd(size); #else return nullptr; @@ -878,7 +878,7 @@ void Ebo::RemoveInsn(InsnInfo &info) const { #endif } -/* Mark opnd is live between def bb and into bb. */ +/* Mark opnd is live between def bb and into bb. (parameter into & def cannot be marked as const) */ void Ebo::MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const { if (live == nullptr) { return; @@ -979,7 +979,7 @@ void Ebo::RemoveUnusedInsns(BB &bb, bool normal) { /* Copies to and from the same register are not needed. */ if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn)) { if (HasAssignedReg(*opnd) && HasAssignedReg(insn->GetOperand(kInsnSecondOpnd)) && - RegistersIdentical(*opnd, insn->GetOperand(kInsnSecondOpnd)) && + RegistersIdentical(*opnd, insn->GetOperand(kInsnSecondOpnd)) && insn->GetOperand(kInsnFirstOpnd).GetSize() == insn->GetOperand(kInsnSecondOpnd).GetSize()) { /* We may be able to get rid of the copy, but be sure that the operand is marked live into this block. */ if ((insnInfo->origOpnd[kInsnSecondOpnd] != nullptr) && (&bb != insnInfo->origOpnd[kInsnSecondOpnd]->bb)) { diff --git a/src/mapleall/maple_be/src/cg/emit.cpp b/src/mapleall/maple_be/src/cg/emit.cpp index e66091f1081dd550961374a001c3b221c52b9f03..9ae2c8b1957aab372df0447c249ce6b82aa323ef 100644 --- a/src/mapleall/maple_be/src/cg/emit.cpp +++ b/src/mapleall/maple_be/src/cg/emit.cpp @@ -63,10 +63,11 @@ int32 GetPrimitiveTypeSize(const std::string &name) { } } DBGDieAttr *LFindAttribute(const MapleVector &vec, DwAt key) { - for (DBGDieAttr *at : vec) + for (DBGDieAttr *at : vec) { if (at->GetDwAt() == key) { return at; } + } return nullptr; } @@ -215,9 +216,21 @@ void Emitter::EmitFileInfo(const std::string &fileName) { } std::string irFile("\""); irFile.append(path).append("\""); - Emit(asmInfo->GetFile()); - Emit(irFile); - Emit("\n"); + if (cg->GetCGOptions().WithLoc()) { + Emit(asmInfo->GetFile()); + Emit(irFile); + Emit("\n"); + } else if (cg->GetCGOptions().WithSrc()) { + // insert the list of src files as gcc + for (auto it : cg->GetMIRModule()->GetSrcFileInfo()) { + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()).Emit(" \""); + std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + Emit(kStr).Emit("\"\n"); + } + } /* save directory path in index 8 */ SetFileMapValue(0, path); @@ -249,10 +262,11 @@ void Emitter::EmitFileInfo(const std::string &fileName) { } } } + Emit("\n"); free(curDirName); EmitInlineAsmSection(); -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 Emit("\t.syntax unified\n"); /* * "The arm instruction set is a subset of @@ -391,26 +405,24 @@ void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { return; } case kAsmZero: { - uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + uint64 size = mirType->GetSize(); EmitNullConstant(size); return; } case kAsmComm: { std::string size; if (isFlexibleArray) { - size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + size = std::to_string(mirType->GetSize() + arraySize); } else { - size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + size = std::to_string(mirType->GetSize()); } (void)Emit(asmInfo->GetComm()).Emit(symName).Emit(", ").Emit(size).Emit(", "); -#if PECOFF +#if defined(PECOFF) && PECOFF #if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGARM32) && TARGARM32) || (defined(TARGARK) && TARGARK) ||\ (defined(TARGRISCV64) && TARGRISCV64) - std::string align = std::to_string( - static_cast(log2(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())))); + std::string align = std::to_string(static_cast(log2(mirType->GetAlign()))); #else - std::string align = std::to_string( - Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())); + std::string align = std::to_string(mirType->GetAlign()); #endif emit(align.c_str()); #else /* ELF */ @@ -421,14 +433,14 @@ void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { Emit(4096); } else if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || (kind == kTypeUnion)) && ((storage == kScGlobal) || (storage == kScPstatic) || (storage == kScFstatic))) { - int32 align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()); + uint32 align = mirType->GetAlign(); if (GetPointerSize() < align) { (void)Emit(std::to_string(align)); } else { (void)Emit(std::to_string(k8ByteSize)); } } else { - (void)Emit(std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()))); + (void)Emit(std::to_string(mirType->GetAlign())); } #endif Emit("\n"); @@ -444,10 +456,14 @@ void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { #if (defined(TARGX86) && TARGX86) || (defined(TARGX86_64) && TARGX86_64) return; #else - align = kAlignOfU8; + uint8 alignMin = 0; + if (mirSymbol.GetType()->GetAlign() > 0) { + alignMin = static_cast(log2(mirSymbol.GetType()->GetAlign())); + } + align = std::max(kAlignOfU8, alignMin); #endif } else { - align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); + align = static_cast(mirSymbol.GetType()->GetAlign()); #if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGARM32) && TARGARM32) || (defined(TARGARK) && TARGARK) ||\ (defined(TARGRISCV64) && TARGRISCV64) if (CGOptions::IsArm64ilp32() && mirSymbol.GetType()->GetPrimType() == PTY_a32) { @@ -478,10 +494,9 @@ void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { #else std::string size; if (isFlexibleArray) { - size = std::to_string( - Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + size = std::to_string(mirType->GetSize() + arraySize); } else { - size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + size = std::to_string(mirType->GetSize()); } Emit(size); #endif @@ -537,7 +552,7 @@ void Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo, bool finished auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); if (structEmitInfo.GetCombineBitFieldWidth() < width) { structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() << - (width - structEmitInfo.GetCombineBitFieldWidth())); + static_cast(width - structEmitInfo.GetCombineBitFieldWidth())); structEmitInfo.IncreaseCombineBitFieldWidth(static_cast( width - structEmitInfo.GetCombineBitFieldWidth())); } @@ -567,8 +582,9 @@ void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mir uint64 fieldOffset) { MIRType &mirType = mirConst.GetType(); if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { - uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); - structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + uint16 curFieldOffset = static_cast(structEmitInfo.GetNextFieldOffset() - + structEmitInfo.GetCombineBitFieldWidth()); + structEmitInfo.SetCombineBitFieldWidth(static_cast(fieldOffset - curFieldOffset)); EmitCombineBfldValue(structEmitInfo, true); ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, "structEmitInfo's nextFieldOffset should be <= fieldOffset"); @@ -593,7 +609,7 @@ void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mir } else { structEmitInfo.SetCombineBitFieldValue((static_cast(fieldValue.GetExtValue()) << structEmitInfo.GetCombineBitFieldWidth()) + - structEmitInfo.GetCombineBitFieldValue()); + structEmitInfo.GetCombineBitFieldValue()); } structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); structEmitInfo.IncreaseNextFieldOffset(fieldSize); @@ -603,7 +619,7 @@ void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mir structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & structEmitInfo.GetCombineBitFieldValue()); } - if ((nextType == nullptr) || (kTypeBitField != nextType->GetKind())) { + if ((nextType == nullptr) || (nextType->GetKind() != kTypeBitField)) { /* emit structEmitInfo->combineBitFieldValue */ EmitCombineBfldValue(structEmitInfo, true); } @@ -654,8 +670,9 @@ void Emitter::EmitStr(const std::string& mplStr, bool emitAscii, bool emitNewlin buf[kThirdChar] = 0; Emit(buf); } else { - /* all others, print as number */ - int ret = snprintf_s(buf, sizeof(buf), k4BitSize, "\\%03o", static_cast(*str) & 0xFF); + // all others, print as number + int ret = snprintf_s(buf, sizeof(buf), k4BitSize, "\\%03o", + static_cast(static_cast(*str)) & 0xFF); if (ret < 0) { FATAL(kLncFatal, "snprintf_s failed"); } @@ -812,7 +829,7 @@ void Emitter::EmitScalarConstant(MIRConst &mirConst, bool newLine, bool flag32, } else { str = ".quad"; } - if (stIdx.IsGlobal() == false && symAddrSym->GetStorageClass() == kScPstatic) { + if (!stIdx.IsGlobal() && symAddrSym->GetStorageClass() == kScPstatic) { PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); (void)Emit("\t" + str + "\t" + symAddrSym->GetName() + std::to_string(pIdx)); } else { @@ -824,8 +841,8 @@ void Emitter::EmitScalarConstant(MIRConst &mirConst, bool newLine, bool flag32, if (symAddr.GetFieldID() > 1) { MIRStructType *structType = static_cast(symAddrSym->GetType()); ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); - (void)Emit(" + ").Emit(Globals::GetInstance()->GetBECommon()->GetFieldOffset( - *structType, symAddr.GetFieldID()).first); + int32 offset = structType->GetFieldOffsetFromBaseAddr(symAddr.GetFieldID()).byteOffset; + (void)Emit(" + ").Emit(offset); } break; } @@ -979,9 +996,9 @@ void Emitter::EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemCons } #if TARGAARCH64 || TARGRISCV64 || TARGX86_64 - EmitAsmLabel(kAsmQuad); + EmitAsmLabel(kAsmQuad); #else - Emit("\t.word\t"); + Emit("\t.word\t"); #endif Emit(funcName); if ((stName.find(VTAB_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0) || @@ -1316,9 +1333,10 @@ int64 Emitter::GetFieldOffsetValue(const std::string &className, const MIRIntCon ASSERT(it != strIdx2Type.end(), "Can not find type"); MIRType &ty = *it->second; MIRStructType &structType = static_cast(ty); - std::pair fieldOffsetPair = - Globals::GetInstance()->GetBECommon()->GetFieldOffset(structType, fieldIdx); - int64 fieldOffset = fieldOffsetPair.first * static_cast(charBitWidth) + fieldOffsetPair.second; + ASSERT_NOT_NULL(Globals::GetInstance()->GetBECommon()); + OffsetPair fieldOffsetPair = Globals::GetInstance()->GetBECommon()-> + GetJClassFieldOffset(structType, static_cast(fieldIdx)); + int64 fieldOffset = fieldOffsetPair.byteOffset * static_cast(charBitWidth) + fieldOffsetPair.bitOffset; return fieldOffset; } } @@ -1604,7 +1622,9 @@ void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, ui ASSERT(it != strIdx2Type.end(), "Can not find type"); MIRType *mirType = it->second; ASSERT_NOT_NULL(mirType); - objSize = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + objSize = static_cast(mirType->GetKind() == kTypeClass ? + Globals::GetInstance()->GetBECommon()->GetClassTypeSize(mirType->GetTypeIndex()) : + mirType->GetSize()); } /* objSize should not exceed 16 bits */ CHECK_FATAL(objSize <= 0xffff, "Error:the objSize is too large"); @@ -1717,7 +1737,7 @@ void Emitter::EmitArrayConstant(MIRConst &mirConst) { strLiteral = true; } } - EmitScalarConstant(*elemConst, true, false, strLiteral == false); + EmitScalarConstant(*elemConst, true, false, !strLiteral); } else { EmitScalarConstant(*elemConst); } @@ -1740,13 +1760,13 @@ void Emitter::EmitArrayConstant(MIRConst &mirConst) { CHECK_FATAL(!arrayCt.GetConstVec().empty(), "container empty check"); } if (uNum > 0) { - uint64 unInSizeInByte = static_cast(iNum) * static_cast( - Globals::GetInstance()->GetBECommon()->GetTypeSize(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + uint64 unInSizeInByte = static_cast(iNum) * static_cast(GlobalTables::GetTypeTable(). + GetTypeFromTyIdx(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())->GetSize()); if (unInSizeInByte != 0) { EmitNullConstant(unInSizeInByte); } } else { - uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(scalarIdx.GetIdx()) * dim; + uint64 size = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx.GetIdx())->GetSize() * dim; Emit("\t.zero\t").Emit(static_cast(size)).Emit("\n"); } } @@ -1764,14 +1784,14 @@ void Emitter::EmitVectorConstant(MIRConst &mirConst) { MIRConst *elemConst = vecCt.GetConstVecItem(i); if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { bool strLiteral = false; - EmitScalarConstant(*elemConst, true, false, strLiteral == false); + EmitScalarConstant(*elemConst, true, false, !strLiteral); } else { ASSERT(false, "should not run here"); } } size_t lanes = GetVecLanes(mirType.GetPrimType()); if (lanes > uNum) { - MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + MIRIntConst zConst(static_cast(0), vecCt.GetConstVecItem(0)->GetType()); for (size_t i = uNum; i < lanes; i++) { EmitScalarConstant(zConst, true, false, false); } @@ -1799,7 +1819,7 @@ void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCount } BECommon *beCommon = Globals::GetInstance()->GetBECommon(); /* total size of emitted elements size. */ - uint32 size = beCommon->GetTypeSize(structType.GetTypeIndex()); + size_t size = structType.GetSize(); uint32 fieldIdx = 1; if (structType.GetKind() == kTypeUnion) { fieldIdx = structCt.GetFieldIdItem(0); @@ -1823,16 +1843,16 @@ void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCount if (i != static_cast(num - 1)) { nextElemType = structType.GetElemType(i + 1); } - uint64 elemSize = beCommon->GetTypeSize(elemType->GetTypeIndex()); + uint64 elemSize = elemType->GetSize(); uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; if (elemType->GetKind() == kTypeBitField) { if (elemConst == nullptr) { MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); elemConst = zeroFill; } - uint64 fieldOffset = static_cast(static_cast(beCommon->GetFieldOffset( - structType, fieldIdx).first)) * static_cast(charBitWidth) + static_cast( - static_cast(beCommon->GetFieldOffset(structType, static_cast(fieldIdx)).second)); + OffsetPair offsetPair = structType.GetFieldOffsetFromBaseAddr(static_cast(fieldIdx)); + uint64 fieldOffset = static_cast(offsetPair.byteOffset) * static_cast(charBitWidth) + + static_cast(offsetPair.bitOffset); EmitBitFieldConstant(*sEmitInfo, *elemConst, nextElemType, fieldOffset); } else { if (elemConst != nullptr) { @@ -1858,12 +1878,12 @@ void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCount sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); } - if (nextElemType != nullptr && kTypeBitField != nextElemType->GetKind()) { + if (nextElemType != nullptr && nextElemType->GetKind() != kTypeBitField) { ASSERT(i < static_cast(num - 1), "NYI"); - uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + uint8 nextAlign = static_cast(nextElemType->GetAlign()); auto fieldAttr = structType.GetFields()[i + 1].second.second; nextAlign = std::max(nextAlign, static_cast(fieldAttr.GetAlign())); - nextAlign = fieldAttr.IsPacked() ? 1 : std::min(nextAlign, structPack); + nextAlign = static_cast(fieldAttr.IsPacked() ? 1 : std::min(nextAlign, structPack)); ASSERT(nextAlign != 0, "expect non-zero"); /* append size, append 0 when align need. */ uint64 totalSize = sEmitInfo->GetTotalSize(); @@ -1930,7 +1950,7 @@ void Emitter::EmitBlockMarker(const std::string &markerName, const std::string & #else Emit("3\n" + markerName + ":\n"); #endif - (void)EmitAsmLabel(kAsmQuad); + EmitAsmLabel(kAsmQuad); if (withAddr) { Emit(addrName + "\n"); } else { @@ -2158,20 +2178,17 @@ void Emitter::MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec) } void Emitter::EmitStringSectionAndAlign(bool isTermByZero) { - if (CGOptions::OptimizeForSize()) { - if (!isTermByZero) { - (void)Emit(asmInfo->GetSection()).Emit(".rodata.str,\"aMS\",@progbits,1").Emit("\n"); - } else { - (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); - } - } else { - (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); + if (CGOptions::OptimizeForSize() && !isTermByZero) { + (void)Emit(asmInfo->GetSection()).Emit(".rodata.str,\"aMS\",@progbits,1").Emit("\n"); + return; } + (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); #if (defined(TARGX86) && TARGX86) || (defined(TARGX86_64) && TARGX86_64) Emit("\t.align 8\n"); #else Emit("\t.align 3\n"); #endif + return; } void Emitter::EmitStringPointers() { @@ -2232,6 +2249,16 @@ void Emitter::EmitLocalVariable(const CGFunc &cgFunc) { } emittedLocalSym.push_back(localName); + // temporary for LocalDynamicTLS,need to be refactor later + if (st->IsThreadLocal() && opts::aggressiveTlsLocalDynamicOpt) { + if (st->IsConst()) { + globalTlsDataVec.emplace_back(st); + } else { + globalTlsBssVec.emplace_back(st); + } + continue; + } + MIRType *ty = st->GetType(); MIRConst *ct = st->GetKonst(); if (ct == nullptr) { @@ -2259,12 +2286,12 @@ void Emitter::EmitLocalVariable(const CGFunc &cgFunc) { } EmitAsmLabel(*st, kAsmAlign); EmitAsmLabel(*st, kAsmLocal); - if (kTypeStruct == ty->GetKind() || kTypeUnion == ty->GetKind() || kTypeClass == ty->GetKind()) { + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion || ty->GetKind() == kTypeClass) { EmitAsmLabel(*st, kAsmSyname); EmitStructConstant(*ct); continue; } - if (kTypeArray != ty->GetKind()) { + if (ty->GetKind() != kTypeArray) { EmitAsmLabel(*st, kAsmSyname); EmitScalarConstant(*ct, true, false, true /* isIndirect */); continue; @@ -2345,9 +2372,7 @@ void Emitter::EmitGlobalVars(std::vector> &globalVar MIRType *mirType = endSym->GetType(); ASSERT_NOT_NULL(endSym); ASSERT_NOT_NULL(mirType); - const std::string kStaticVarEndAdd = - std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())) + "+" + - endSym->GetName(); + const std::string kStaticVarEndAdd = std::to_string(mirType->GetSize()) + "+" + endSym->GetName(); EmitBlockMarker("__MBlock_globalVars_cold_end", "", true, kStaticVarEndAdd); } @@ -2390,14 +2415,14 @@ void Emitter::EmitUninitializedSymbol(const MIRSymbol &mirSymbol) { } } +// if no visibility set individually, set it to be same as the -fvisibility value void SetVariableVisibility(MIRSymbol *mirSymbol) { - /* if no visibility set individually, set it to be same as the -fvisibility value */ if (mirSymbol->IsDefaultVisibility()) { switch (CGOptions::GetVisibilityType()) { - case CGOptions::kHidden: + case CGOptions::kHiddenVisibility: mirSymbol->SetAttr(ATTR_visibility_hidden); break; - case CGOptions::kProtected: + case CGOptions::kProtectedVisibility: mirSymbol->SetAttr(ATTR_visibility_protected); break; default: @@ -2561,16 +2586,20 @@ void Emitter::EmitGlobalVariable() { } if (GetCG()->GetMIRModule()->IsCModule() && mirSymbol->GetStorageClass() == kScExtern) { - /* emit initialized extern variable */ - if (mirSymbol->IsConst()) { + bool isInitialized = mirSymbol->IsConst(); + if (isInitialized) { + // emit .global directive for initialized extern variable EmitAsmLabel(*mirSymbol, kAsmGlbl); - SetVariableVisibility(mirSymbol); - if (mirSymbol->GetAttr(ATTR_visibility_hidden)) { - EmitAsmLabel(*mirSymbol, kAsmHidden); - } else if (mirSymbol->GetAttr(ATTR_visibility_protected)) { - EmitAsmLabel(*mirSymbol, kAsmProtected); - } - } else { + } + // emit visibility + SetVariableVisibility(mirSymbol); + if (mirSymbol->GetAttr(ATTR_visibility_hidden)) { + EmitAsmLabel(*mirSymbol, kAsmHidden); + } else if (mirSymbol->GetAttr(ATTR_visibility_protected)) { + EmitAsmLabel(*mirSymbol, kAsmProtected); + } + // if not initialized, continue after emitting visibility + if (!isInitialized) { continue; } } @@ -2589,6 +2618,19 @@ void Emitter::EmitGlobalVariable() { globalVarVec.emplace_back(std::make_pair(mirSymbol, false)); continue; } + if (mirSymbol->IsThreadLocal() && opts::aggressiveTlsLocalDynamicOpt) { + globalTlsBssVec.emplace_back(mirSymbol); + continue; + } + // emit visibility if symbol is global + if (mirSymbol->GetStorageClass() == kScGlobal) { + SetVariableVisibility(mirSymbol); + if (mirSymbol->GetAttr(ATTR_visibility_hidden)) { + EmitAsmLabel(*mirSymbol, kAsmHidden); + } else if (mirSymbol->GetAttr(ATTR_visibility_protected)) { + EmitAsmLabel(*mirSymbol, kAsmProtected); + } + } EmitUninitializedSymbol(*mirSymbol); continue; } @@ -2597,6 +2639,16 @@ void Emitter::EmitGlobalVariable() { if (mirSymbol->GetStorageClass() == kScGlobal || (mirSymbol->GetStorageClass() == kScExtern && GetCG()->GetMIRModule()->IsCModule()) || (mirSymbol->GetStorageClass() == kScFstatic && !mirSymbol->IsReadOnly())) { + // process TLS LocalDynamic First, need to refactor + if (mirSymbol->IsThreadLocal() && opts::aggressiveTlsLocalDynamicOpt) { + if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + LogInfo::MapleLogger() << "sectionName is : " << sectionName << "\n"; + CHECK_FATAL(false, "Not support layout TLS symbol in different sections yet"); + } + globalTlsDataVec.emplace_back(mirSymbol); + continue; + } /* Emit section */ EmitAsmLabel(*mirSymbol, kAsmType); if (mirSymbol->IsReflectionStrTab()) { @@ -2750,6 +2802,7 @@ void Emitter::EmitGlobalVariable() { } } } /* end proccess all mirSymbols. */ + EmitTLSBlock(globalTlsDataVec, globalTlsBssVec); EmitStringPointers(); /* emit global var */ EmitGlobalVars(globalVarVec); @@ -3085,6 +3138,75 @@ void Emitter::EmitMethodFieldSequential(const MIRSymbol &mirSymbol, Emit(symbolName + "\n"); } +// tdata anchor (tdata not implement yet) +// tdata symbols +// tbss anchor +// tbss symbols +void Emitter::EmitTLSBlock(const std::vector &tdataVec, const std::vector &tbssVec) { + if (!tdataVec.empty()) { + Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + Emit(asmInfo->GetAlign()).Emit(4).Emit("\n"); + InsertAnchor("tdata_start_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString(), 0); + for (const auto tdataSym : tdataVec) { + if (tdataSym->GetAttr(ATTR_weak)) { + EmitAsmLabel(*tdataSym, kAsmWeak); + } else { + EmitAsmLabel(*tdataSym, kAsmGlbl); + } + EmitAsmLabel(*tdataSym, kAsmAlign); + EmitAsmLabel(*tdataSym, kAsmSyname); + MIRConst *mirConst = tdataSym->GetKonst(); + MIRType *mirType = tdataSym->GetType(); + if (IsPrimitiveVector(mirType->GetPrimType())) { + EmitVectorConstant(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (!CGOptions::IsArm64ilp32()) { + if (IsAddress(mirType->GetPrimType())) { + uint32 sizeinbits = GetPrimTypeBitSize(mirConst->GetType().GetPrimType()); + CHECK_FATAL(sizeinbits == k64BitSize, "EmitGlobalVariable: pointer must be of size 8"); + } + } + if (cg->GetMIRModule()->IsCModule()) { + EmitScalarConstant(*mirConst, true, false, true); + } else { + EmitScalarConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeArray) { + EmitArrayConstant(*mirConst); + } else if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeUnion) { + EmitStructConstant(*mirConst); + } else { + ASSERT(false, "NYI"); + } + EmitAsmLabel(*tdataSym, kAsmSize); + } + } + if (!tbssVec.empty()) { + Emit("\t.section\t.tbss,\"awT\",@nobits\n"); + Emit(asmInfo->GetAlign()).Emit(4).Emit("\n"); + InsertAnchor("tbss_start_" + GetCG()->GetMIRModule()->GetTlsAnchorHashString(), 0); + for (auto *tbssSym : tbssVec) { + if (tbssSym->GetAttr(ATTR_weak)) { + EmitAsmLabel(*tbssSym, kAsmWeak); + } else if (tbssSym->GetStorageClass() == kScGlobal) { + EmitAsmLabel(*tbssSym, kAsmGlbl); + } + EmitAsmLabel(*tbssSym, kAsmType); + EmitAsmLabel(*tbssSym, kAsmAlign); + EmitAsmLabel(*tbssSym, kAsmSyname); + EmitAsmLabel(*tbssSym, kAsmZero); + EmitAsmLabel(*tbssSym, kAsmSize); + } + } + return; +} + +void Emitter::InsertAnchor(std::string anchorName, int64 offset) { + Emit(asmInfo->GetSet()).Emit("\t.").Emit(anchorName).Emit(",."); + Emit(" + ").Emit(offset).Emit("\n"); +} + void Emitter::EmitDWRef(const std::string &name) { /* * .hidden DW.ref._ZTI3xxx @@ -3228,7 +3350,7 @@ void Emitter::EmitDIFormSpecification(unsigned int dwform) { } } -MIRFunction *Emitter::GetDwTagSubprogram(const MapleVector &attrvec, DebugInfo &di) { +MIRFunction *Emitter::GetDwTagSubprogram(const MapleVector &attrvec, DebugInfo &di) const { DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); if (name == nullptr) { DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); @@ -3696,7 +3818,7 @@ void Emitter::FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr) const { uint32 tid = typeAttr->GetId(); CHECK_FATAL(tid < Globals::GetInstance()->GetBECommon()->GetSizeOfTypeSizeTable(), "index out of range in Emitter::FillInClassByteSize"); - int64_t byteSize = static_cast(Globals::GetInstance()->GetBECommon()->GetTypeSize(tid)); + int64_t byteSize = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid)->GetSize()); LUpdateAttrValue(byteSizeAttr, byteSize); } } @@ -3751,7 +3873,9 @@ void Emitter::SetupDBGInfo(DebugInfo *mirdi) { } prevSubstruct = fieldty->EmbeddedStructType(); FieldID fieldID = static_cast(i + embeddedIDs) + 1; - int offset = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*sty, fieldID).first; + int32 offset = sty->GetKind() == kTypeClass ? + Globals::GetInstance()->GetBECommon()->GetJClassFieldOffset(*sty, fieldID).byteOffset : + sty->GetFieldOffsetFromBaseAddr(fieldID).byteOffset; GStrIdx fldName = sty->GetFieldsElemt(i).first; DBGDie *cdie = LFindChildDieWithName(die, DW_TAG_member, fldName); CHECK_FATAL(cdie != nullptr, "cdie is null in Emitter::SetupDBGInfo"); diff --git a/src/mapleall/maple_be/src/cg/global.cpp b/src/mapleall/maple_be/src/cg/global.cpp index 3801745db379d8eb1ff5c9c1a12b432f06c55f81..8ab2eaac06847777db747da5eb966714eb405b21 100644 --- a/src/mapleall/maple_be/src/cg/global.cpp +++ b/src/mapleall/maple_be/src/cg/global.cpp @@ -14,10 +14,10 @@ */ #if TARGAARCH64 #include "aarch64_global.h" -#elif TARGRISCV64 +#elif defined(TARGRISCV64) && TARGRISCV64 #include "riscv64_global.h" #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 #include "arm32_global.h" #endif #include "reaching.h" @@ -71,11 +71,12 @@ bool CgGlobalOpt::PhaseRun(maplebe::CGFunc &f) { return false; } reachingDef->SetAnalysisMode(kRDAllAnalysis); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); GlobalOpt *globalOpt = nullptr; #if TARGAARCH64 || TARGRISCV64 globalOpt = GetPhaseAllocator()->New(f); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 globalOpt = GetPhaseAllocator()->New(f); #endif globalOpt->Run(); diff --git a/src/mapleall/maple_be/src/cg/global_schedule.cpp b/src/mapleall/maple_be/src/cg/global_schedule.cpp index efab5310db4399defd033645f2d9dd25dc5202c1..5c65c409aa3488a0f4e936f179eea9302034eab0 100644 --- a/src/mapleall/maple_be/src/cg/global_schedule.cpp +++ b/src/mapleall/maple_be/src/cg/global_schedule.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. * * OpenArkCompiler is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -22,17 +22,100 @@ namespace maplebe { void GlobalSchedule::Run() { FCDG *fcdg = cda.GetFCDG(); CHECK_FATAL(fcdg != nullptr, "control dependence analysis failed"); - cda.GenerateFCDGDot(); - cda.GenerateCFGDot(); - DotGenerator::GenerateDot("globalsched", cgFunc, cgFunc.GetMirModule(), - true, cgFunc.GetName()); + if (GLOBAL_SCHEDULE_DUMP) { + cda.GenerateSimplifiedCFGDot(); + cda.GenerateCFGDot(); + cda.GenerateFCDGDot(); + DotGenerator::GenerateDot("globalsched", cgFunc, cgFunc.GetMirModule(), true, cgFunc.GetName()); + } + InitInsnIdAndLocInsn(); for (auto region : fcdg->GetAllRegions()) { - if (region == nullptr) { + if (region == nullptr || !CheckCondition(*region)) { + continue; + } + interDDA.Run(*region); + if (GLOBAL_SCHEDULE_DUMP) { + cda.GenerateCFGInRegionDot(*region); + interDDA.GenerateDataDepGraphDotOfRegion(*region); + } + InitInRegion(*region); + if (CGOptions::DoVerifySchedule()) { + VerifyingSchedule(*region); + continue; + } + DoGlobalSchedule(*region); + } +} + +bool GlobalSchedule::CheckCondition(CDGRegion ®ion) { + uint32 insnSum = 0; + for (auto cdgNode : region.GetRegionNodes()) { + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + FOR_BB_INSNS_CONST(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + insnSum++; + } + } + return insnSum <= kMaxInsnNum; +} + +/* + * The entry of global scheduling + */ +void GlobalSchedule::DoGlobalSchedule(CDGRegion ®ion) { + if (GLOBAL_SCHEDULE_DUMP) { + DumpRegionInfoBeforeSchedule(region); + } + listScheduler = schedMP.New(schedMP, cgFunc, false, "globalschedule"); + /* Process nodes in a region by the topology sequence */ + for (auto cdgNode : region.GetRegionNodes()) { + BB *bb = cdgNode->GetBB(); + ASSERT(bb != nullptr, "get bb from cdgNode failed"); + if (bb->IsAtomicBuiltInBB()) { + for (auto depNode : cdgNode->GetAllDataNodes()) { + for (auto succLink : depNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DecreaseValidPredsSize(); + } + } continue; } - idda.Run(*region, dataNodes); - idda.GenerateInterDDGDot(dataNodes); + + MemPool *cdgNodeMp = memPoolCtrler.NewMemPool("global-scheduler cdgNode memPool", true); + /* Collect candidate instructions of current cdgNode */ + InitInCDGNode(region, *cdgNode, cdgNodeMp); + + /* Execute list scheduling */ + listScheduler->SetCDGRegion(region); + listScheduler->SetCDGNode(*cdgNode); + listScheduler->DoListScheduling(); + + /* Reorder instructions in the current BB based on the scheduling result */ + FinishScheduling(*cdgNode); + + if (GLOBAL_SCHEDULE_DUMP) { + DumpCDGNodeInfoAfterSchedule(*cdgNode); + } + + ClearCDGNodeInfo(region, *cdgNode, cdgNodeMp); + } +} + +void GlobalSchedule::ClearCDGNodeInfo(CDGRegion ®ion, CDGNode &cdgNode, MemPool *cdgNodeMp) { + std::vector equivalentNodes; + cda.GetEquivalentNodesInRegion(region, cdgNode, equivalentNodes); + for (auto equivNode : equivalentNodes) { + for (auto depNode : equivNode->GetAllDataNodes()) { + ASSERT(depNode->GetState() != kScheduled, "update state of depNode failed in finishScheduling"); + depNode->SetState(kNormal); + } } + + memPoolCtrler.DeleteMemPool(cdgNodeMp); + commonSchedInfo = nullptr; } void CgGlobalSchedule::GetAnalysisDependence(maple::AnalysisDep &aDep) const { @@ -40,14 +123,13 @@ void CgGlobalSchedule::GetAnalysisDependence(maple::AnalysisDep &aDep) const { } bool CgGlobalSchedule::PhaseRun(maplebe::CGFunc &f) { - MemPool *gsMemPool = GetPhaseMemPool(); + MemPool *memPool = GetPhaseMemPool(); ControlDepAnalysis *cda = GET_ANALYSIS(CgControlDepAnalysis, f); MAD *mad = Globals::GetInstance()->GetMAD(); - // Need move to target - auto *ddb = gsMemPool->New(*gsMemPool, f, *mad); - auto *idda = gsMemPool->New(f, *gsMemPool, *ddb); - auto *globalSched = gsMemPool->New(*gsMemPool, f, *cda, *idda); - globalSched->Run(); + auto *ddb = memPool->New(*memPool, f, *mad, false); + auto *idda = memPool->New(f, *memPool, *ddb); + auto *globalScheduler = f.GetCG()->CreateGlobalSchedule(*memPool, f, *cda, *idda); + globalScheduler->Run(); return true; } MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgGlobalSchedule, globalschedule) diff --git a/src/mapleall/maple_be/src/cg/ico.cpp b/src/mapleall/maple_be/src/cg/ico.cpp index 078260aeb49139edc3cb2ea4af4f2c358ff92cfd..148b1b27d233abd6317f6a484a240479605149b9 100644 --- a/src/mapleall/maple_be/src/cg/ico.cpp +++ b/src/mapleall/maple_be/src/cg/ico.cpp @@ -18,7 +18,7 @@ #include "aarch64_ico.h" #include "aarch64_isa.h" #include "aarch64_insn.h" -#elif TARGRISCV64 +#elif defined(TARGRISCV64) && TARGRISCV64 #include "riscv64_ico.h" #include "riscv64_isa.h" #include "riscv64_insn.h" @@ -47,7 +47,7 @@ Insn *ICOPattern::FindLastCmpInsn(BB &bb) const { return nullptr; } -std::vector ICOPattern::GetLabelOpnds(Insn &insn) const { +std::vector ICOPattern::GetLabelOpnds(const Insn &insn) const { std::vector labelOpnds; for (uint32 i = 0; i < insn.GetOperandSize(); i++) { if (insn.GetOperand(i).IsLabelOpnd()) { @@ -67,13 +67,19 @@ bool CgIco::PhaseRun(maplebe::CGFunc &f) { #if TARGAARCH64 || TARGRISCV64 ico = memPool->New(f, *memPool); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 ico = memPool->New(f, *memPool); #endif const std::string &funcClass = f.GetFunction().GetBaseClassName(); const std::string &funcName = f.GetFunction().GetBaseFuncName(); std::string name = funcClass + funcName; ico->Run(name); + if (ico->IsOptimized()) { + // This phase modifies the cfg, which affects the loop analysis result, + // so we need to run loop-analysis again. + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + } if (ICO_DUMP_NEWPM) { DotGenerator::GenerateDot("ico-after", f, f.GetMirModule()); } diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp index f2841aef3ccd47879915bd3c535eeb5cb41fb053..62adc07d763422b5512119c0d428d98a9b6631da 100644 --- a/src/mapleall/maple_be/src/cg/insn.cpp +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -46,7 +46,7 @@ bool Insn::IsBasicOp() const { return md ? md->IsBasicOp() : false; } bool Insn::IsConversion() const { - return md? md->IsConversion() : false; + return md ? md->IsConversion() : false; } bool Insn::IsUnaryOp() const { return md ? md->IsUnaryOp() : false; @@ -108,11 +108,11 @@ bool Insn::IsLoadStorePair() const { bool Insn::IsLoadLabel() const { return md && md->IsLoad() && GetOperand(kInsnSecondOpnd).GetKind() == Operand::kOpdBBAddress; } -bool Insn::OpndIsDef(uint32 id) const { - return md ? md->GetOpndDes(id)->IsDef() : false; +bool Insn::OpndIsDef(uint32 opndId) const { + return md ? md->GetOpndDes(opndId)->IsDef() : false; } -bool Insn::OpndIsUse(uint32 id) const { - return md ? md->GetOpndDes(id)->IsUse() : false; +bool Insn::OpndIsUse(uint32 opndId) const { + return md ? md->GetOpndDes(opndId)->IsUse() : false; } bool Insn::IsClinit() const { return Globals::GetInstance()->GetTarget()->IsClinitInsn(mOp); @@ -133,6 +133,16 @@ Operand *Insn::GetMemOpnd() const { } return nullptr; } +uint32 Insn::GetMemOpndIdx() const { + uint32 opndIdx = kInsnMaxOpnd; + for (uint32 i = 0; i < static_cast(opnds.size()); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + return i; + } + } + return opndIdx; +} void Insn::SetMemOpnd(MemOperand *memOpnd) { for (uint32 i = 0; i < static_cast(opnds.size()); ++i) { Operand &opnd = GetOperand(i); @@ -179,7 +189,7 @@ std::set Insn::GetDefRegs() const { return defRegNOs; } -#if DEBUG +#if defined(DEBUG) && DEBUG void Insn::Check() const { if (!md) { CHECK_FATAL(false, " need machine description for target insn "); @@ -193,10 +203,11 @@ void Insn::Check() const { } #endif -Insn *Insn::Clone(const MemPool &memPool) const { +Insn *Insn::Clone(const MemPool /* &memPool */) const { CHECK_FATAL(false, "NIY"); return nullptr; } + Operand *Insn::GetCallTargetOperand() const { ASSERT(IsCall() || IsTailCall(), "should be call"); return &GetOperand(kInsnFirstOpnd); @@ -208,7 +219,6 @@ ListOperand *Insn::GetCallArgumentOperand() { return &static_cast(GetOperand(kInsnSecondOpnd)); } - void Insn::CommuteOperands(uint32 dIndex, uint32 sIndex) { Operand *tempCopy = opnds[sIndex]; opnds[sIndex] = opnds[dIndex]; diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp index 532545ca770c09295d73605fcb104b7a7503edfb..da28fa9c7b91ac2db3476a35717003424843deb5 100644 --- a/src/mapleall/maple_be/src/cg/isel.cpp +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -444,12 +444,12 @@ Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); } -Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { +Operand *HandleAlloca(const BaseNode /* &parent */, BaseNode &expr, MPISel &iSel) { return iSel.SelectAlloca(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); } -Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { - return iSel.SelectCGArrayElemAdd(static_cast(expr), parent); +Operand *HandleCGArrayElemAdd(const BaseNode /* &parent */, const BaseNode &expr, MPISel &iSel) { + return iSel.SelectCGArrayElemAdd(static_cast(expr)); } void HandleAsm(StmtNode &stmt, MPISel &iSel) { @@ -467,16 +467,16 @@ Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { return iSel.SelectSelect(static_cast(expr), condOpnd, trueOpnd, falseOpnd, parent); } -Operand *HandleMin(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { +Operand *HandleMin(const BaseNode /* &parent */, BaseNode &expr, MPISel &iSel) { return iSel.SelectMin(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), - *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); + *iSel.HandleExpr(expr, *expr.Opnd(1))); } -Operand *HandleMax(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { +Operand *HandleMax(const BaseNode /* &parent */, BaseNode &expr, MPISel &iSel) { return iSel.SelectMax(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), - *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); + *iSel.HandleExpr(expr, *expr.Opnd(1))); } -Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { +Operand *HandleRetype(const BaseNode /* &parent */, BaseNode &expr, MPISel &iSel) { return iSel.SelectRetype(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); } @@ -551,7 +551,7 @@ Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { case INTRN_C_ctz64: return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); default: - ASSERT(false, "NIY, unsupported intrinsicop."); + CHECK_FATAL(false, "NIY, unsupported intrinsicop."); return nullptr; } } @@ -739,12 +739,12 @@ MirTypeInfo MPISel::GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); MIRStructType *structType = static_cast(mirType); mirType = structType->GetFieldType(fieldId); - mirTypeInfo.offset = cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first; + mirTypeInfo.offset = structType->GetFieldOffsetFromBaseAddr(fieldId).byteOffset; } mirTypeInfo.primType = mirType->GetPrimType(); // aggSize for AggType if (mirTypeInfo.primType == maple::PTY_agg) { - mirTypeInfo.size = cgFunc->GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + mirTypeInfo.size = static_cast(mirType->GetSize()); } return mirTypeInfo; } @@ -784,7 +784,7 @@ void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { return; } -void MPISel::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { +void MPISel::SelectDassignoff(const DassignoffNode &stmt, Operand &opnd0) { MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); PrimType primType = stmt.GetPrimType(); uint32 bitSize = GetPrimTypeBitSize(primType); @@ -826,7 +826,7 @@ void MPISel::SelectIassignoff(const IassignoffNode &stmt) { SelectCopy(memOpnd, rhsReg, primType); } -ImmOperand *MPISel::SelectIntConst(MIRIntConst &intConst, PrimType primType) const { +ImmOperand *MPISel::SelectIntConst(const MIRIntConst &intConst, PrimType primType) const { return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), intConst.GetExtValue()); } @@ -987,7 +987,7 @@ void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bit * resOpnd = opnd0 & ((1 << bitSize) - 1) */ ImmOperand &imm = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, - (static_cast(1) << bitSize) - 1); + static_cast((1ULL << bitSize) - 1)); SelectBand(resOpnd, opnd0, imm, primType); } else { /* @@ -1351,7 +1351,7 @@ static inline uint64 CreateDepositBitsImm1(uint32 primBitSize, uint8 bitOffset, if (bitSize + bitOffset >= primBitSize) { val = 0; } else { - val <<= (bitSize + bitOffset); + val <<= static_cast(bitSize + bitOffset); } val |= (static_cast(1) << bitOffset) - 1; return val; @@ -1378,9 +1378,9 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, /* and */ SelectBand(resOpnd, opnd0, imm1Opnd, primType); if (opnd1.IsIntImmediate()) { - /* opnd1 is immediate, imm2 = (opnd1.val << bitOffset) & (~$imm1) */ + // opnd1 is immediate, imm2 = (opnd1.val << bitOffset) & (~$imm1) int64 imm2Val = static_cast((static_cast(static_cast(opnd1).GetValue()) << - bitOffset)) & (~imm1Val); + bitOffset) & (~imm1Val)); ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, imm2Val); /* or */ SelectBior(resOpnd, resOpnd, imm2Opnd, primType); @@ -1392,7 +1392,7 @@ Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, ImmOperand &countOpnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, bitOffset); SelectShift(tmpOpnd, tmpOpnd, countOpnd, OP_shl, primType, primType); /* and (~$imm1) */ - ImmOperand &nonImm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, (~imm1Val)); + ImmOperand &nonImm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, static_cast(~imm1Val)); SelectBand(tmpOpnd, tmpOpnd, nonImm1Opnd, primType); /* or */ SelectBior(resOpnd, resOpnd, tmpOpnd, primType); @@ -1432,7 +1432,7 @@ Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { return &resOpnd; } -Operand *MPISel::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { +Operand *MPISel::SelectCGArrayElemAdd(const BinaryNode &node) { BaseNode *opnd0 = node.Opnd(0); BaseNode *opnd1 = node.Opnd(1); ASSERT(opnd1->GetOpCode() == OP_constval, "NIY, opnd1->op should be OP_constval."); @@ -1575,7 +1575,7 @@ void MPISel::SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType) con cgFunc->GetCurBB()->AppendInsn(insn); } -Operand *MPISel::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { +Operand *MPISel::SelectMin(const BinaryNode &node, Operand &opnd0, Operand &opnd1) { PrimType primType = node.GetPrimType(); RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); @@ -1588,7 +1588,7 @@ void MPISel::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimTyp } -Operand *MPISel::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { +Operand *MPISel::SelectMax(const BinaryNode &node, Operand &opnd0, Operand &opnd1) { PrimType primType = node.GetPrimType(); RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); @@ -1600,7 +1600,7 @@ void MPISel::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimTyp SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); } -Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { +Operand *MPISel::SelectRetype(const TypeCvtNode &node, Operand &opnd0) { PrimType fromType = node.Opnd(0)->GetPrimType(); PrimType toType = node.GetPrimType(); ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); @@ -1615,13 +1615,13 @@ Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { } if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + cgFunc->GetRegTyFromPrimTy(toType)); SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); return resOpnd; } if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), - cgFunc->GetRegTyFromPrimTy(toType)); + cgFunc->GetRegTyFromPrimTy(toType)); SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); return resOpnd; } diff --git a/src/mapleall/maple_be/src/cg/list_scheduler.cpp b/src/mapleall/maple_be/src/cg/list_scheduler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9bf90a25a3fcc121757c226d3df5851747653bfe --- /dev/null +++ b/src/mapleall/maple_be/src/cg/list_scheduler.cpp @@ -0,0 +1,626 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "list_scheduler.h" + +namespace maplebe { +void ListScheduler::DoListScheduling() { + Init(); + + MapleVector &candidates = commonSchedInfo->GetCandidates(); + /* Set the initial earliest time of all nodes to 0 */ + for (auto depNode : candidates) { + depNode->SetEStart(0); + } + if (doDelayHeuristics) { + /* Compute delay priority of all candidates */ + ComputeDelayPriority(); + } + + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << "## --- schedule bb_" << curCDGNode->GetBB()->GetId() << " ---\n\n"; + if (doDelayHeuristics) { + DumpDelay(); + } + LogInfo::MapleLogger() << " >> dependencies resolved: "; + } + + /* Push depNodes whose dependencies resolved into waitingQueue */ + auto candiIter = candidates.begin(); + while (candiIter != candidates.end()) { + DepNode *candiNode = *candiIter; + // dependencies resolved + if (candiNode->GetValidPredsSize() == 0) { + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << "insn_" << candiNode->GetInsn()->GetId() << ", "; + } + (void)waitingQueue.emplace_back(candiNode); + candiNode->SetState(kWaiting); + candiIter = commonSchedInfo->EraseIterFromCandidates(candiIter); + } else { + ++candiIter; + } + } + + ComputeEStart(currCycle); + + /* Iterate until the instructions in the current BB are scheduled */ + while (scheduledNodeNum < curCDGNode->GetInsnNum()) { + UpdateInfoBeforeSelectNode(); + + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << "\n\n '' current cycle: " << currCycle << "\n\n"; + DumpWaitingQueue(); + } + + /* Push depNodes whose resources are free from waitingQueue into readyList */ + auto waitingIter = waitingQueue.begin(); + while (waitingIter != waitingQueue.end()) { + DepNode *waitingNode = *waitingIter; + if (waitingNode->IsResourceFree() && waitingNode->GetEStart() <= currCycle) { + (void)readyList.emplace_back(waitingNode); + waitingNode->SetState(kReady); + waitingIter = EraseIterFromWaitingQueue(waitingIter); + } else { + ++waitingIter; + } + } + + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << " >> ReadyList before sort: {"; + DumpReadyList(); + } + + // If there are no ready insns, stall until one is ready + if (readyList.empty()) { + advancedCycle = 1; + continue; + } + CalculateMostUsedUnitKindCount(); + if (!doDelayHeuristics) { + /* Update LStart */ + ComputeLStart(); + if (LIST_SCHEDULE_DUMP && !doDelayHeuristics) { + DumpEStartLStartOfAllNodes(); + } + } + // Sort the readyList by priority from highest to lowest + SortReadyList(); + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << " >> ReadyList after sort: {"; + DumpReadyList(); + } + // Select the ready node with the highest priority + DepNode *schedNode = *readyList.begin(); + CHECK_FATAL(schedNode != nullptr, "select readyNode failed"); + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << " >> Select node: insn_" << schedNode->GetInsn()->GetId() << "\n\n"; + } + if (schedNode->GetInsn()->GetBB()->GetId() == curCDGNode->GetBB()->GetId()) { + scheduledNodeNum++; + } + UpdateInfoAfterSelectNode(*schedNode); + } + commonSchedInfo = nullptr; +} + +void ListScheduler::Init() { + ASSERT(region != nullptr, "invalid region"); + ASSERT(curCDGNode != nullptr, "invalid cdgNode"); + ASSERT(commonSchedInfo != nullptr, "invalid common scheduling info"); + + mad = Globals::GetInstance()->GetMAD(); + + waitingQueue.clear(); + readyList.clear(); + + mad->ReleaseAllUnits(); + currCycle = 0; + advancedCycle = 0; + scheduledNodeNum = 0; +} + +void ListScheduler::UpdateInfoBeforeSelectNode() { + while (advancedCycle > 0) { + currCycle++; + mad->AdvanceCycle(); + advancedCycle--; + } + UpdateNodesInReadyList(); +} + +void ListScheduler::SortReadyList() { + // Use default rank rules + if (rankScheduleInsns == nullptr) { + if (doDelayHeuristics) { + std::sort(readyList.begin(), readyList.end(), DelayRankScheduleInsns); + } else { + std::sort(readyList.begin(), readyList.end(), CriticalPathRankScheduleInsns); + } + } else { + // Use custom rank rules + std::sort(readyList.begin(), readyList.end(), rankScheduleInsns); + } +} + +void ListScheduler::UpdateEStart(DepNode &schedNode) { + std::vector traversalList; + (void)traversalList.emplace_back(&schedNode); + + while (!traversalList.empty()) { + DepNode *curNode = traversalList.front(); + traversalList.erase(traversalList.begin()); + + for (auto succLink : curNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + ASSERT(succNode.GetState() != kScheduled, "invalid state of depNode"); + succNode.SetEStart(std::max(succNode.GetEStart(), schedNode.GetSchedCycle() + succLink->GetLatency())); + maxEStart = std::max(maxEStart, succNode.GetEStart()); + if (!succNode.GetSuccs().empty() && + std::find(traversalList.begin(), traversalList.end(), &succNode) == traversalList.end()) { + (void)traversalList.emplace_back(&succNode); + } + } + } +} + +void ListScheduler::UpdateInfoAfterSelectNode(DepNode &schedNode) { + schedNode.SetState(kScheduled); + schedNode.SetSchedCycle(currCycle); + schedNode.OccupyUnits(); + schedNode.SetEStart(currCycle); + commonSchedInfo->AddSchedResults(&schedNode); + EraseNodeFromReadyList(&schedNode); + UpdateAdvanceCycle(schedNode); + + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << " >> dependencies resolved: {"; + } + for (auto succLink : schedNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DecreaseValidPredsSize(); + // Push depNodes whose dependencies resolved from candidates into waitingQueue + if (succNode.GetValidPredsSize() == 0 && succNode.GetState() == kCandidate) { + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << "insn_" << succNode.GetInsn()->GetId() << ", "; + } + (void)waitingQueue.emplace_back(&succNode); + commonSchedInfo->EraseNodeFromCandidates(&succNode); + succNode.SetState(kWaiting); + } + } + + UpdateEStart(schedNode); + + if (LIST_SCHEDULE_DUMP) { + LogInfo::MapleLogger() << "}\n\n"; + DumpScheduledResult(); + LogInfo::MapleLogger() << "'' issue insn_" << schedNode.GetInsn()->GetId() << " at cycle " << currCycle << "\n\n"; + } +} + +void ListScheduler::UpdateNodesInReadyList() { + auto readyIter = readyList.begin(); + while (readyIter != readyList.end()) { + DepNode *readyNode = *readyIter; + if (!readyNode->IsResourceFree() || readyNode->GetEStart() > currCycle) { + (void)waitingQueue.emplace_back(readyNode); + readyNode->SetState(kWaiting); + readyIter = EraseIterFromReadyList(readyIter); + } else { + ++readyIter; + } + } +} + +void ListScheduler::UpdateAdvanceCycle(const DepNode &schedNode) { + switch (schedNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + advancedCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advancedCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advancedCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + if (advancedCycle == 0 && mad->IsFullIssued()) { + advancedCycle = 1; + } +} + +/* + * Compute the delay of the depNode by postorder, and + * the delay of the leaf node is initially set to 0 or execTime + */ +void ListScheduler::ComputeDelayPriority() { + std::vector traversalList; + MapleVector &candidates = commonSchedInfo->GetCandidates(); + for (auto depNode : candidates) { + depNode->SetDelay(0); + // Leaf node + if (depNode->GetSuccs().empty()) { + (void)traversalList.emplace_back(depNode); + } + } + + // Compute delay from leaf node to root node + while (!traversalList.empty()) { + DepNode *depNode = traversalList.front(); + traversalList.erase(traversalList.begin()); + + for (const auto predLink : depNode->GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + // Consider the cumulative effect of nodes on the critical path + predNode.SetDelay(std::max(predLink->GetLatency() + depNode->GetDelay(), predNode.GetDelay())); + maxDelay = std::max(maxDelay, predNode.GetDelay()); + predNode.DecreaseValidSuccsSize(); + if (predNode.GetValidSuccsSize() == 0) { + (void)traversalList.emplace_back(&predNode); + } + } + } +} + +void ListScheduler::InitInfoBeforeCompEStart(uint32 cycle, std::vector &traversalList) { + for (CDGNode *cdgNode : region->GetRegionNodes()) { + for (auto *depNode : cdgNode->GetAllDataNodes()) { + depNode->SetTopoPredsSize(static_cast(depNode->GetPreds().size())); + if (depNode->GetState() != kScheduled) { + depNode->SetEStart(cycle); + } + if (depNode->GetTopoPredsSize() == 0) { + (void)traversalList.emplace_back(depNode); + } + } + } +} + +/* + * Compute the earliest start cycle of the instruction + */ +void ListScheduler::ComputeEStart(uint32 cycle) { + std::vector traversalList; + InitInfoBeforeCompEStart(cycle, traversalList); + + /* Compute the eStart of each depNode in the topology sequence */ + while (!traversalList.empty()) { + DepNode *depNode = traversalList.front(); + traversalList.erase(traversalList.begin()); + + for (const auto succLink : depNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DecreaseTopoPredsSize(); + + if (succNode.GetState() != kScheduled) { + succNode.SetEStart(std::max(depNode->GetEStart() + succLink->GetLatency(), succNode.GetEStart())); + } + maxEStart = std::max(succNode.GetEStart(), maxEStart); + + if (succNode.GetTopoPredsSize() == 0) { + (void)traversalList.emplace_back(&succNode); + } + } + } + if (maxEStart < cycle) { + maxEStart = cycle; + } +} + +void ListScheduler::InitInfoBeforeCompLStart(std::vector &traversalList) { + for (CDGNode *cdgNode : region->GetRegionNodes()) { + for (auto depNode : cdgNode->GetAllDataNodes()) { + depNode->SetLStart(maxEStart); + depNode->SetValidSuccsSize(static_cast(depNode->GetSuccs().size())); + if (depNode->GetSuccs().empty()) { + (void)traversalList.emplace_back(depNode); + } + } + } +} + +/* + * Compute the latest start cycle of the instruction + */ +void ListScheduler::ComputeLStart() { + maxLStart = maxEStart; + + MapleVector &candidates = commonSchedInfo->GetCandidates(); + if (candidates.empty() && waitingQueue.empty()) { + return; + } + + // Push leaf nodes into traversalList + std::vector traversalList; + InitInfoBeforeCompLStart(traversalList); + + /* Compute the lStart of all nodes in the topology sequence */ + while (!traversalList.empty()) { + DepNode *depNode = traversalList.front(); + traversalList.erase(traversalList.begin()); + + for (const auto predLink : depNode->GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + + if (predNode.GetState() != kScheduled) { + predNode.SetLStart(std::min(depNode->GetLStart() - predLink->GetLatency(), predNode.GetLStart())); + } + maxLStart = std::max(maxLStart, predNode.GetLStart()); + + predNode.DecreaseValidSuccsSize(); + if (predNode.GetValidSuccsSize() == 0) { + traversalList.emplace_back(&predNode); + } + } + } +} + +/* Calculate the most used unitKind index */ +void ListScheduler::CalculateMostUsedUnitKindCount() { + std::array unitKindCount = { 0 }; + for (auto node : readyList) { + CountUnitKind(*node, unitKindCount); + } + + uint32 maxCount = 0; + maxUnitIdx = 0; + for (uint32 i = 1; i < kUnitKindLast; ++i) { + if (maxCount < unitKindCount[i]) { + maxCount = unitKindCount[i]; + maxUnitIdx = i; + } + } +} + +/* The index of unitKindCount is unitKind, the element of unitKindCount is count of the unitKind */ +void ListScheduler::CountUnitKind(const DepNode &depNode, std::array &unitKindCount) const { + uint32 unitKind = depNode.GetUnitKind(); + auto index = static_cast(__builtin_ffs(static_cast(unitKind))); + while (index != 0) { + ASSERT(index < kUnitKindLast, "invalid unitKind index"); + ++unitKindCount[index]; + unitKind &= ~(1u << (index - 1u)); + index = static_cast(__builtin_ffs(static_cast(unitKind))); + } +} + +void ListScheduler::DumpWaitingQueue() const { + LogInfo::MapleLogger() << " >> waitingQueue: {"; + for (uint32 i = 0; i < waitingQueue.size(); ++i) { + Insn *waitInsn = waitingQueue[i]->GetInsn(); + ASSERT(waitInsn != nullptr, "get insn from depNode failed"); + LogInfo::MapleLogger() << "insn_" << waitInsn->GetId(); + if (i != waitingQueue.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "}\n\n"; +} + +void ListScheduler::DumpReadyList() const { + for (uint32 i = 0; i < readyList.size(); ++i) { + Insn *readyInsn = readyList[i]->GetInsn(); + ASSERT(readyInsn != nullptr, "get insn from depNode failed"); + LogInfo::MapleLogger() << "insn_" << readyInsn->GetId() << "(EStart: " << readyList[i]->GetEStart() << ")"; + if (i != readyList.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "}\n\n"; +} + +void ListScheduler::DumpScheduledResult() const { + LogInfo::MapleLogger() << " >> scheduledResult: {"; + for (uint32 i = 0; i < commonSchedInfo->GetSchedResultsSize(); ++i) { + Insn *schedInsn = commonSchedInfo->GetSchedResults()[i]->GetInsn(); + ASSERT(schedInsn != nullptr, "get insn from depNode failed"); + LogInfo::MapleLogger() << "insn_" << schedInsn->GetId(); + if (i != commonSchedInfo->GetSchedResultsSize() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "}\n\n"; +} + +void ListScheduler::DumpDelay() const { + BB *curBB = curCDGNode->GetBB(); + ASSERT(curBB != nullptr, "get bb from cdgNode failed"); + LogInfo::MapleLogger() << " >> Delay priority of readyList in bb_" << curBB->GetId() << "\n"; + LogInfo::MapleLogger() << " --------------------------------------------------------\n"; + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(8) << "insn" << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(4) << "bb" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << "predDepSize" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << "delay" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << "cost" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(15) << "reservation" << std::resetiosflags(std::ios::right) << + "\n"; + LogInfo::MapleLogger() << " --------------------------------------------------------\n"; + for (auto depNode : commonSchedInfo->GetCandidates()) { + Insn *insn = depNode->GetInsn(); + ASSERT(insn != nullptr, "get insn from depNode failed"); + uint32 predSize = depNode->GetValidPredsSize(); + uint32 delay = depNode->GetDelay(); + ASSERT_NOT_NULL(mad->FindReservation(*insn)); + int latency = mad->FindReservation(*insn)->GetLatency(); + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(8) << insn->GetId() << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(4) << curBB->GetId() << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << predSize << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << delay << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << latency << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(15); + DumpReservation(*depNode); + LogInfo::MapleLogger() << std::resetiosflags(std::ios::right) << "\n"; + } + LogInfo::MapleLogger() << " --------------------------------------------------------\n"; +} + +void ListScheduler::DumpEStartLStartOfAllNodes() { + BB *curBB = curCDGNode->GetBB(); + ASSERT(curBB != nullptr, "get bb from cdgNode failed"); + LogInfo::MapleLogger() << " >> max EStart: " << maxEStart << "\n\n"; + LogInfo::MapleLogger() << " >> CP priority of readyList in bb_" << curBB->GetId() << "\n"; + LogInfo::MapleLogger() << " --------------------------------------------------------------------------\n"; + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(8) << "insn" << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(4) << "bb" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << "state" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(12) << "predDepSize" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << "EStart" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << "LStart" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << "cost" << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(15) << "reservation" << std::resetiosflags(std::ios::right) << + "\n"; + LogInfo::MapleLogger() << " --------------------------------------------------------------------------\n"; + DumpDepNodeInfo(*curBB, commonSchedInfo->GetCandidates(), "candi"); + DumpDepNodeInfo(*curBB, waitingQueue, "wait"); + DumpDepNodeInfo(*curBB, readyList, "ready"); + LogInfo::MapleLogger() << " --------------------------------------------------------------------------\n\n"; +} + +void ListScheduler::DumpDepNodeInfo(const BB &curBB, MapleVector &nodes, const std::string state) const{ + for (auto depNode : nodes) { + Insn *insn = depNode->GetInsn(); + ASSERT(insn != nullptr, "get insn from depNode failed"); + uint32 predSize = depNode->GetValidPredsSize(); + uint32 eStart = depNode->GetEStart(); + uint32 lStart = depNode->GetLStart(); + ASSERT_NOT_NULL(mad->FindReservation(*insn)); + int latency = mad->FindReservation(*insn)->GetLatency(); + LogInfo::MapleLogger() << " " << + std::setiosflags(std::ios::left) << std::setw(8) << insn->GetId() << std::resetiosflags(std::ios::left) << + std::setiosflags(std::ios::right) << std::setw(4) << curBB.GetId() << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << state << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(12) << predSize << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << eStart << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(10) << lStart << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(8) << latency << std::resetiosflags(std::ios::right) << + std::setiosflags(std::ios::right) << std::setw(15); + DumpReservation(*depNode); + LogInfo::MapleLogger() << std::resetiosflags(std::ios::right) << "\n"; + } +} + +void ListScheduler::DumpReservation(const DepNode &depNode) const { + for (uint32 i = 0; i < depNode.GetUnitNum(); ++i) { + UnitId unitId = depNode.GetUnitByIndex(i)->GetUnitId(); + switch (unitId) { + case kUnitIdSlot0: + LogInfo::MapleLogger() << "slot0"; + break; + case kUnitIdSlot1: + LogInfo::MapleLogger() << "slot1"; + break; + case kUnitIdAgen: + LogInfo::MapleLogger() << "agen"; + break; + case kUnitIdHazard: + LogInfo::MapleLogger() << "hazard"; + break; + case kUnitIdCrypto: + LogInfo::MapleLogger() << "crypto"; + break; + case kUnitIdMul: + LogInfo::MapleLogger() << "mul"; + break; + case kUnitIdDiv: + LogInfo::MapleLogger() << "div"; + break; + case kUnitIdBranch: + LogInfo::MapleLogger() << "branch"; + break; + case kUnitIdStAgu: + LogInfo::MapleLogger() << "stAgu"; + break; + case kUnitIdLdAgu: + LogInfo::MapleLogger() << "ldAgu"; + break; + case kUnitIdFpAluLo: + LogInfo::MapleLogger() << "fpAluLo"; + break; + case kUnitIdFpAluHi: + LogInfo::MapleLogger() << "fpAluHi"; + break; + case kUnitIdFpMulLo: + LogInfo::MapleLogger() << "fpMulLo"; + break; + case kUnitIdFpMulHi: + LogInfo::MapleLogger() << "fpMulHi"; + break; + case kUnitIdFpDivLo: + LogInfo::MapleLogger() << "fpDivLo"; + break; + case kUnitIdFpDivHi: + LogInfo::MapleLogger() << "fpDivHi"; + break; + case kUnitIdSlotS: + LogInfo::MapleLogger() << "slot0 | slot1"; + break; + case kUnitIdFpAluS: + LogInfo::MapleLogger() << "fpAluLo | fpAluHi"; + break; + case kUnitIdFpMulS: + LogInfo::MapleLogger() << "fpMulLo | fpMulHi"; + break; + case kUnitIdFpDivS: + LogInfo::MapleLogger() << "fpDivLo | fpDivHi"; + break; + case kUnitIdSlotD: + LogInfo::MapleLogger() << "slot0 & slot1"; + break; + case kUnitIdFpAluD: + LogInfo::MapleLogger() << "fpAluLo & fpAluHi"; + break; + case kUnitIdFpMulD: + LogInfo::MapleLogger() << "fpMulLo & fpMulHi"; + break; + case kUnitIdFpDivD: + LogInfo::MapleLogger() << "fpMulLo & fpMulHi"; + break; + case kUnitIdSlotSHazard: + LogInfo::MapleLogger() << "(slot0 | slot1) & hazard"; + break; + case kUnitIdSlotSMul: + LogInfo::MapleLogger() << "(slot0 | slot1) & mul"; + break; + case kUnitIdSlotSBranch: + LogInfo::MapleLogger() << "(slot0 | slot1) & branch"; + break; + case kUnitIdSlotSAgen: + LogInfo::MapleLogger() << "(slot0 | slot1) & agen"; + break; + case kUnitIdSlotDAgen: + LogInfo::MapleLogger() << "slot0 & slot1 & agen"; + break; + case kUnitIdSlot0LdAgu: + LogInfo::MapleLogger() << "slot0 & ldAgu"; + break; + case kUnitIdSlot0StAgu: + LogInfo::MapleLogger() << "slot0 & stAgu"; + break; + default: + LogInfo::MapleLogger() << "unknown"; + break; + } + if (i != depNode.GetUnitNum() - 1) { + LogInfo::MapleLogger() << ", "; + } + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/live.cpp b/src/mapleall/maple_be/src/cg/live.cpp index 0d5724e35542300139da53ebb3b5d79894b166e2..19191057b85bf7e91de80e105fb2eb46d21cf938 100644 --- a/src/mapleall/maple_be/src/cg/live.cpp +++ b/src/mapleall/maple_be/src/cg/live.cpp @@ -106,6 +106,9 @@ void LiveAnalysis::BuildInOutforFunc() { ++iteration; hasChange = false; FOR_ALL_BB_REV(bb, cgFunc) { + if (!bb || bb->IsUnreachable() || !bb->GetLiveOut() || !bb->GetLiveIn()) { + continue; + } if (!GenerateLiveOut(*bb) && bb->GetInsertUse()) { continue; } @@ -201,7 +204,7 @@ void LiveAnalysis::GetBBDefUse(BB &bb) const { if (bb.GetKind() == BB::kBBReturn) { GenerateReturnBBDefUse(bb); } - if (bb.IsEmpty()) { + if (!bb.HasMachineInsn()) { return; } bb.DefResetAllBit(); @@ -243,11 +246,14 @@ void LiveAnalysis::GetBBDefUse(BB &bb) const { CollectLiveInfo(bb, opnd, isDef, isUse); } } + if (insn->GetSSAImpDefOpnd() != nullptr) { + CollectLiveInfo(bb, *insn->GetSSAImpDefOpnd(), true, false); + } } } -/* build use and def sets of each BB according to the type of regOpnd. */ -void LiveAnalysis::CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse) const { +/* build use and def sets of each BB according to the type of regOpnd. bb can not be marked as const. */ +void LiveAnalysis::CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const { if (!opnd.IsRegister()) { return; } diff --git a/src/mapleall/maple_be/src/cg/local_schedule.cpp b/src/mapleall/maple_be/src/cg/local_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9fb76d106fef6382c129131af4975cfbb4ae12b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/local_schedule.cpp @@ -0,0 +1,117 @@ +/* +* Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "local_schedule.h" +#include "data_dep_base.h" +#include "aarch64_data_dep_base.h" +#include "cg.h" +#include "optimize_common.h" + + +namespace maplebe { +void LocalSchedule::Run() { + FCDG *fcdg = cda.GetFCDG(); + CHECK_FATAL(fcdg != nullptr, "control dependence analysis failed"); + if (LOCAL_SCHEDULE_DUMP) { + DotGenerator::GenerateDot("localsched", cgFunc, cgFunc.GetMirModule(), true, cgFunc.GetName()); + } + InitInsnIdAndLocInsn(); + for (auto region : fcdg->GetAllRegions()) { + if (region == nullptr || !CheckCondition(*region)) { + continue; + } + CDGNode *cdgNode = region->GetRegionRoot(); + BB *bb = cdgNode->GetBB(); + ASSERT(bb != nullptr, "get bb from cdgNode failed"); + if (bb->IsAtomicBuiltInBB()) { + continue; + } + interDDA.Run(*region); + InitInRegion(*region); + if (LOCAL_SCHEDULE_DUMP) { + DumpRegionInfoBeforeSchedule(*region); + } + DoLocalSchedule(*cdgNode); + } +} + +bool LocalSchedule::CheckCondition(CDGRegion ®ion) const { + CHECK_FATAL(region.GetRegionNodeSize() == 1 && region.GetRegionRoot() != nullptr, + "invalid region in local scheduling"); + uint32 insnSum = 0; + for (auto cdgNode : region.GetRegionNodes()) { + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from cdgNode failed"); + FOR_BB_INSNS_CONST(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + insnSum++; + } + } + if (insnSum > kMaxInsnNum) { + return false; + } + return true; +} + +void LocalSchedule::DoLocalSchedule(CDGNode &cdgNode) { + listScheduler = schedMP.New(schedMP, cgFunc, false, "localschedule"); + InitInCDGNode(cdgNode); + listScheduler->SetCDGRegion(*cdgNode.GetRegion()); + listScheduler->SetCDGNode(cdgNode); + listScheduler->DoListScheduling(); + FinishScheduling(cdgNode); + if (LOCAL_SCHEDULE_DUMP) { + DumpCDGNodeInfoAfterSchedule(cdgNode); + } +} + +void LocalSchedule::InitInCDGNode(CDGNode &cdgNode) { + commonSchedInfo = schedMP.New(schedMP); + for (auto depNode : cdgNode.GetAllDataNodes()) { + commonSchedInfo->AddCandidates(depNode); + depNode->SetState(kCandidate); + } + listScheduler->SetCommonSchedInfo(*commonSchedInfo); + + uint32 insnNum = 0; + BB *curBB = cdgNode.GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + FOR_BB_INSNS_CONST(insn, curBB) { + if (insn->IsMachineInstruction()) { + insnNum++; + } + } + cdgNode.SetInsnNum(insnNum); + + if (LOCAL_SCHEDULE_DUMP) { + DumpCDGNodeInfoBeforeSchedule(cdgNode); + } +} + +bool CgLocalSchedule::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + auto *cda = memPool->New(f, *memPool, "localschedule", true); + cda->Run(); + MAD *mad = Globals::GetInstance()->GetMAD(); + auto *ddb = memPool->New(*memPool, f, *mad, false); + auto *idda = memPool->New(f, *memPool, *ddb); + auto *localScheduler = f.GetCG()->CreateLocalSchedule(*memPool, f, *cda, *idda); + localScheduler->Run(); + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgLocalSchedule, localschedule) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/loop.cpp b/src/mapleall/maple_be/src/cg/loop.cpp index a895278712051f0874f83928930e7abb9bcdd2af..17397fa7ff0be73942273574f66f19bd5b2addd2 100644 --- a/src/mapleall/maple_be/src/cg/loop.cpp +++ b/src/mapleall/maple_be/src/cg/loop.cpp @@ -582,7 +582,7 @@ static void CopyLoopInfo(const LoopHierarchy &from, CGFuncLoops &to, CGFuncLoops } for (auto *bb : from.GetLoopMembers()) { to.AddLoopMembers(*bb); - bb->SetLoop(to); + bb->SetLoop(&to); } for (auto *bb : from.GetBackedge()) { to.AddBackedge(*bb); @@ -629,6 +629,11 @@ void LoopFinder::FormLoopHierarchy() { FOR_ALL_BB(bb, cgFunc) { bb->SetLevel(0); + // Loop analysis inserts info into common objects in the entire CG phase(CGFunc & BB ...), + // before re-analysis, the info must be cleared. + // It needs to be rectified in the future, infos after loop analysis are stored in the result, + // instead of the common object. + bb->SetLoop(nullptr); } bool changed; do { diff --git a/src/mapleall/maple_be/src/cg/optimize_common.cpp b/src/mapleall/maple_be/src/cg/optimize_common.cpp index 635f9e2bd701c9df42abfcf00921d6f56385d3a1..afd73052803f03d6975a3f85030c01aea6ad97dd 100644 --- a/src/mapleall/maple_be/src/cg/optimize_common.cpp +++ b/src/mapleall/maple_be/src/cg/optimize_common.cpp @@ -36,6 +36,7 @@ void Optimizer::Run(const std::string &funcName, bool checkOnly) { while (curBB != nullptr) { for (OptimizationPattern *p : singlePassPatterns) { if (p->Optimize(*curBB)) { + doOptWithSinglePassPatterns = true; flag = p->IsKeepPosition(); p->SetKeepPosition(false); break; @@ -50,6 +51,11 @@ void Optimizer::Run(const std::string &funcName, bool checkOnly) { } } + // Update commonExitBB info, especially in infinite loop case. + // But we can not get the commonExitBB by traversal CFG, so + // it needs to be handled separately. + cgFunc->GetTheCFG()->UpdateCommonExitBBInfo(); + if (CGOptions::IsDumpOptimizeCommonLog()) { constexpr int arrSize = 80; char post[arrSize]; @@ -270,7 +276,7 @@ void DotGenerator::GenerateDot(const std::string &preFix, const CGFunc &cgFunc, std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ std::streambuf *buf = cfgFile.rdbuf(); std::cout.rdbuf(buf); - std::string fileName = GetFileName(mod, (preFix + "-" + fname)); + std::string fileName = GetFileName(mod, (preFix + "-" + fname + ".dot")); cfgFile.open(fileName, std::ios::trunc); CHECK_FATAL(cfgFile.is_open(), "Failed to open output file: %s", fileName.c_str()); diff --git a/src/mapleall/maple_be/src/cg/peep.cpp b/src/mapleall/maple_be/src/cg/peep.cpp index ca891ad2e20a97dc9d9c0db360fa305d3fd60929..3ff7e39813dc224636c6f8ff5cd2533bfbc7756f 100644 --- a/src/mapleall/maple_be/src/cg/peep.cpp +++ b/src/mapleall/maple_be/src/cg/peep.cpp @@ -23,7 +23,7 @@ #elif defined TARGX86_64 #include "x64_peep.h" #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 #include "arm32_peep.h" #endif @@ -164,7 +164,7 @@ bool CGPeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &in continue; } const InsnDesc *md = nextInsn->GetDesc(); - auto *regProp = (md->opndMD[i]); + auto *regProp = (md->opndMD[static_cast(i)]); bool isUse = regProp->IsUse(); /* if noUse Redefined, no need to check live-out. */ return isUse; @@ -269,7 +269,7 @@ ReturnType CGPeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); for (int32 i = lastOpndId; i >= 0; --i) { Operand &opnd = insn->GetOperand(static_cast(i)); - auto *regProp = (md->opndMD[i]); + auto *regProp = (md->opndMD[static_cast(i)]); if (opnd.IsConditionCode()) { if (regOpnd.GetRegisterNumber() == kRFLAG) { bool isUse = regProp->IsUse(); @@ -378,7 +378,7 @@ bool PeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn continue; } const InsnDesc *md = nextInsn->GetDesc(); - auto *regProp = (md->opndMD[i]); + auto *regProp = (md->opndMD[static_cast(i)]); bool isUse = regProp->IsUse(); /* if noUse Redefined, no need to check live-out. */ return isUse; @@ -483,7 +483,7 @@ ReturnType PeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); for (int32 i = lastOpndId; i >= 0; --i) { Operand &opnd = insn->GetOperand(static_cast(i)); - auto *regProp = (md->opndMD[i]); + auto *regProp = (md->opndMD[static_cast(i)]); if (opnd.IsConditionCode()) { if (regOpnd.GetRegisterNumber() == kRFLAG) { bool isUse = regProp->IsUse(); @@ -601,10 +601,10 @@ int32 PeepOptimizer::index = 0; void PeepHoleOptimizer::Peephole0() const { auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); -#if TARGAARCH64 || TARGRISCV64 +#if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64) peepOptimizer.Run(); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 peepOptimizer.Run(); #endif } @@ -612,10 +612,10 @@ void PeepHoleOptimizer::Peephole0() const { void PeepHoleOptimizer::PrePeepholeOpt() const { auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); -#if TARGAARCH64 || TARGRISCV64 +#if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64) peepOptimizer.Run(); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 peepOptimizer.Run(); #endif } @@ -623,10 +623,10 @@ void PeepHoleOptimizer::PrePeepholeOpt() const { void PeepHoleOptimizer::PrePeepholeOpt1() const { auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); -#if TARGAARCH64 || TARGRISCV64 +#if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64) peepOptimizer.Run(); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 peepOptimizer.Run(); #endif } diff --git a/src/mapleall/maple_be/src/cg/proepilog.cpp b/src/mapleall/maple_be/src/cg/proepilog.cpp index 51c502b4a7accd533df4102fcbb157635fc7938d..6ddd99c624e5652ec8991cda2d9dbe2f392e4ec3 100644 --- a/src/mapleall/maple_be/src/cg/proepilog.cpp +++ b/src/mapleall/maple_be/src/cg/proepilog.cpp @@ -13,15 +13,15 @@ * See the Mulan PSL v2 for more details. */ #include "proepilog.h" -#if TARGAARCH64 +#if defined(TARGAARCH64) && TARGAARCH64 #include "aarch64_proepilog.h" #elif defined(TARGRISCV64) && TARGRISCV64 #include "riscv64_proepilog.h" #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 #include "arm32_proepilog.h" #endif -#if TARGX86_64 +#if defined(TARGX86_64) && TARGX86_64 #include "x64_proepilog.h" #endif #include "cgfunc.h" @@ -32,13 +32,13 @@ using namespace maple; bool CgGenProEpiLog::PhaseRun(maplebe::CGFunc &f) { GenProEpilog *genPE = nullptr; -#if TARGAARCH64 || TARGRISCV64 +#if (defined(TARGAARCH64) && TARGAARCH64) || (defined(TARGRISCV64) && TARGRISCV64) genPE = GetPhaseAllocator()->New(f, *ApplyTempMemPool()); #endif -#if TARGARM32 +#if defined(TARGARM32) && TARGARM32 genPE = GetPhaseAllocator()->New(f); #endif -#if TARGX86_64 +#if defined(TARGX86_64) && TARGX86_64 genPE = GetPhaseAllocator()->New(f); #endif genPE->Run(); diff --git a/src/mapleall/maple_be/src/cg/ra_opt.cpp b/src/mapleall/maple_be/src/cg/ra_opt.cpp index 4289f545a00471846bd561f40415a6729d758d84..7d0c2c2ffe2f69670542ae4bbf1b29add5eab9fb 100644 --- a/src/mapleall/maple_be/src/cg/ra_opt.cpp +++ b/src/mapleall/maple_be/src/cg/ra_opt.cpp @@ -28,7 +28,7 @@ bool CgRaOpt::PhaseRun(maplebe::CGFunc &f) { RaOpt *raOpt = nullptr; #if TARGAARCH64 raOpt = memPool->New(f, *memPool); -#elif || TARGRISCV64 +#elif defined(TARGRISCV64) || TARGRISCV64 raOpt = memPool->New(f, *memPool); #endif diff --git a/src/mapleall/maple_be/src/cg/reaching.cpp b/src/mapleall/maple_be/src/cg/reaching.cpp index 05fbed0736e029ea678c9926b4e3fa681a0af471..c6b21be609c51685efa7f4f7b915aebaf3ed0354 100644 --- a/src/mapleall/maple_be/src/cg/reaching.cpp +++ b/src/mapleall/maple_be/src/cg/reaching.cpp @@ -582,6 +582,10 @@ void ReachingDefinition::Initialize() { maxInsnNO = 0; FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + insn->SetId(maxInsnNO); + continue; + } insn->SetId(maxInsnNO); maxInsnNO += kInsnNoInterval; } diff --git a/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp b/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp index 31098beff6527156a4271f95afa4ca86e896b72b..93c8fbba5db1e0e462c0ffc165e8bbe36e4285f0 100644 --- a/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp +++ b/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp @@ -120,7 +120,7 @@ void DefaultO0RegAllocator::InitAvailReg() { } } -void DefaultO0RegAllocator::CheckLiveAndReleaseReg(regno_t preg, regno_t vreg , const Insn &cInsn) { +void DefaultO0RegAllocator::CheckLiveAndReleaseReg(regno_t preg, regno_t vreg, const Insn &cInsn) { /* record defined register number in this insn */ multiDefForInsn.emplace(preg); uint32 id = GetRegLivenessId(vreg); diff --git a/src/mapleall/maple_be/src/cg/reg_alloc_color_ra.cpp b/src/mapleall/maple_be/src/cg/reg_alloc_color_ra.cpp index d08bde085986d6825ee5bc5efbc7d92d16f483c4..1ce059e650bd0e34f553be1ef3af0d16627e3a9c 100644 --- a/src/mapleall/maple_be/src/cg/reg_alloc_color_ra.cpp +++ b/src/mapleall/maple_be/src/cg/reg_alloc_color_ra.cpp @@ -13,6 +13,7 @@ * See the Mulan PSL v2 for more details. */ #include "reg_alloc_color_ra.h" +#include #include "cg.h" #include "mir_lower.h" #include "securec.h" @@ -68,11 +69,11 @@ static inline PrimType GetPrimTypeFromRegTyAndRegSize(RegType regTy, uint32 regS return primType; } -void LiveUnit::PrintLiveUnit() const { - LogInfo::MapleLogger() << "[" << begin << "," << end << "]" +void LiveUnit::Dump() const { + LogInfo::MapleLogger() << "[" << begin << "," << end << ")" << ""; if (!hasCall) { - /* Too many calls, so only print when there is no call. */ + // Too many calls, so only print when there is no call. LogInfo::MapleLogger() << " nc"; } if (needReload) { @@ -83,146 +84,119 @@ void LiveUnit::PrintLiveUnit() const { } } -template -void GraphColorRegAllocator::ForEachBBArrElem(const uint64 *vec, Func functor) const { - for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { - for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { - functor(iBBArrElem * kU64 + bBBArrElem); - } +void AdjMatrix::Dump() const { + LogInfo::MapleLogger() << "Dump AdjMatrix\n"; + LogInfo::MapleLogger() << "matrix size = " << matrix.size() << ", bucket = " << bucket << "\n"; + for (uint32 i = 0; i < matrix.size(); ++i) { + auto edges = ConvertEdgeToVec(i); + if (edges.empty()) { + continue; } - } -} - -template -void GraphColorRegAllocator::ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const { - for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { - for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { - if (functor(iBBArrElem * kU64 + bBBArrElem)) { - return; - } - } + LogInfo::MapleLogger() << "R" << i << " edge : "; + for (auto edge : edges) { + LogInfo::MapleLogger() << edge << ","; } + LogInfo::MapleLogger() << "\n"; } } -template -void GraphColorRegAllocator::ForEachRegArrElem(const uint64 *vec, Func functor) const { - for (uint32 iBBArrElem = 0; iBBArrElem < regBuckets; ++iBBArrElem) { - for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { - if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { - functor(iBBArrElem * kU64 + bBBArrElem); - } +void ConfilctInfo::Dump() const { + LogInfo::MapleLogger() << "\tforbidden(" << numForbidden << "): "; + for (regno_t preg = 0; preg < forbidden.size(); ++preg) { + if (forbidden[preg]) { + LogInfo::MapleLogger() << preg << ","; } } -} - -void GraphColorRegAllocator::PrintLiveUnitMap(const LiveRange &lr) const { - LogInfo::MapleLogger() << "\n\tlu:"; - for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { - if (!IsBitArrElemSet(lr.GetBBMember(), i)) { - continue; + LogInfo::MapleLogger() << "\tpregveto(" << numPregveto << "): "; + for (regno_t preg = 0; preg < pregveto.size(); ++preg) { + if (pregveto[preg]) { + LogInfo::MapleLogger() << preg << ","; } - auto lu = lr.GetLuMap().find(i); - if (lu != lr.GetLuMap().end() && ((lu->second->GetDefNum() > 0) || (lu->second->GetUseNum() > 0))) { - LogInfo::MapleLogger() << "(" << i << " "; - lu->second->PrintLiveUnit(); - LogInfo::MapleLogger() << ")"; + } + LogInfo::MapleLogger() << "\tprefs: "; + for (auto regNO : prefs) { + LogInfo::MapleLogger() << regNO << ","; + } + LogInfo::MapleLogger() << "\tcalldef: "; + for (regno_t preg = 0; preg < callDef.size(); ++preg) { + if (callDef[preg]) { + LogInfo::MapleLogger() << preg << ","; } } + LogInfo::MapleLogger() << "\n\tinterfere(" << conflict.size() << "): "; + for (auto regNO : conflict) { + LogInfo::MapleLogger() << regNO << ","; + } LogInfo::MapleLogger() << "\n"; } -void GraphColorRegAllocator::PrintLiveRangeConflicts(const LiveRange &lr) const { - LogInfo::MapleLogger() << "\n\tinterfere(" << lr.GetNumBBConflicts() << "): "; - for (uint32 i = 0; i < regBuckets; ++i) { - uint64 chunk = lr.GetBBConflictElem(i); - for (uint64 bit = 0; bit < kU64; ++bit) { - if ((chunk & (1ULL << bit)) > 0) { - regno_t newNO = i * kU64 + bit; - LogInfo::MapleLogger() << newNO << ","; - } +void LiveRange::DumpLiveUnitMap() const { + LogInfo::MapleLogger() << "\n\tlu:"; + auto dumpLuMapFunc = [this](uint32 bbID) { + auto lu = luMap.find(bbID); + if (lu != luMap.end() && ((lu->second->GetDefNum() > 0) || (lu->second->GetUseNum() > 0))) { + LogInfo::MapleLogger() << "(" << bbID << " "; + lu->second->Dump(); + LogInfo::MapleLogger() << ")"; } - } + }; + ForEachBBArrElem(bbMember, dumpLuMapFunc); LogInfo::MapleLogger() << "\n"; } -void GraphColorRegAllocator::PrintLiveBBBit(const LiveRange &lr) const { - LogInfo::MapleLogger() << "live_bb(" << lr.GetNumBBMembers() << "): "; - for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { - if (IsBitArrElemSet(lr.GetBBMember(), i)) { - LogInfo::MapleLogger() << i << " "; - } - } +void LiveRange::DumpLiveBB() const { + LogInfo::MapleLogger() << "live_bb(" << GetNumBBMembers() << "): "; + auto dumpLiveBBFunc = [](uint32 bbID) { + LogInfo::MapleLogger() << bbID << " "; + }; + ForEachBBArrElem(bbMember, dumpLiveBBFunc); LogInfo::MapleLogger() << "\n"; } -void GraphColorRegAllocator::PrintLiveRange(const LiveRange &lr, const std::string &str) const { +void LiveRange::Dump(const std::string &str) const { LogInfo::MapleLogger() << str << "\n"; - LogInfo::MapleLogger() << "R" << lr.GetRegNO(); - if (lr.GetRegType() == kRegTyInt) { + LogInfo::MapleLogger() << "R" << regNO; + if (regType == kRegTyInt) { LogInfo::MapleLogger() << "(I)"; - } else if (lr.GetRegType() == kRegTyFloat) { + } else if (regType == kRegTyFloat) { LogInfo::MapleLogger() << "(F)"; } else { LogInfo::MapleLogger() << "(U)"; } - if (lr.GetSpillSize() == k8BitSize) { - LogInfo::MapleLogger() << "S8"; - } else if (lr.GetSpillSize() == k16BitSize) { - LogInfo::MapleLogger() << "S16"; - } else if (lr.GetSpillSize() == k32BitSize) { - LogInfo::MapleLogger() << "S32"; - } else if (lr.GetSpillSize() == k64BitSize) { - LogInfo::MapleLogger() << "S64"; - } else { - LogInfo::MapleLogger() << "S0(nodef)"; - } - LogInfo::MapleLogger() << "\tnumCall " << lr.GetNumCall(); - if (lr.GetCrossCall()) { - LogInfo::MapleLogger() << "\tcrossCall "; - } - LogInfo::MapleLogger() << "\tpriority " << lr.GetPriority(); - LogInfo::MapleLogger() << "\tforbidden: "; - for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { - if (lr.GetForbidden(preg)) { - LogInfo::MapleLogger() << preg << ","; - } - } - LogInfo::MapleLogger() << "\tcalldef: "; - for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { - if (lr.GetCallDef(preg)) { - LogInfo::MapleLogger() << preg << ","; - } + LogInfo::MapleLogger() << "S" << spillSize; + if (assignedRegNO != 0) { + LogInfo::MapleLogger() << " assigned " << assignedRegNO; } - LogInfo::MapleLogger() << "\tpregveto: "; - for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { - if (lr.GetPregveto(preg)) { - LogInfo::MapleLogger() << preg << ","; - } + LogInfo::MapleLogger() << " numCall " << numCall; + if (crossCall) { + LogInfo::MapleLogger() << " crossCall "; } - if (lr.IsSpilled()) { - LogInfo::MapleLogger() << " spilled"; + LogInfo::MapleLogger() << " priority " << priority; + if (spilled) { + LogInfo::MapleLogger() << " spilled " << spillReg; } - if (lr.GetSplitLr()) { + if (splitLr) { LogInfo::MapleLogger() << " split"; } - LogInfo::MapleLogger() << "\top: " << kOpcodeInfo.GetName(lr.GetOp()); + LogInfo::MapleLogger() << " op: " << kOpcodeInfo.GetName(GetOp()); + if (IsLocalReg()) { + LogInfo::MapleLogger() << " local"; + } LogInfo::MapleLogger() << "\n"; - PrintLiveBBBit(lr); - PrintLiveRangeConflicts(lr); - PrintLiveUnitMap(lr); - if (lr.GetSplitLr()) { - PrintLiveRange(*lr.GetSplitLr(), "===>Split LR"); + DumpLiveBB(); + DumpLiveUnitMap(); + confilctInfo.Dump(); + + if (splitLr) { + splitLr->Dump("===>Split LR"); } } void GraphColorRegAllocator::PrintLiveRanges() const { LogInfo::MapleLogger() << "PrintLiveRanges: size = " << lrMap.size() << "\n"; - for (auto it : lrMap) { - PrintLiveRange(*it.second, ""); + for (const auto [_, lr] : lrMap) { + lr->Dump(""); } LogInfo::MapleLogger() << "\n"; } @@ -292,7 +266,7 @@ void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const { if (cgFunc->GetCG()->IsLmbc()) { lr.SetRematLevel(kRematOff); regno_t spSaveReg = cgFunc->GetSpSaveReg(); - if (spSaveReg && lr.GetRegNO() == spSaveReg) { + if (spSaveReg != 0 && lr.GetRegNO() == spSaveReg) { /* For lmbc, %fp and %sp are frame pointer and stack pointer respectively, unlike * non-lmbc where %fp and %sp can be of the same. * With alloca() potentially changing %sp, lmbc creates another register to act @@ -361,7 +335,7 @@ void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const { lr.SetNumDefs(numDefs); lr.SetNumUses(numUses); if (isSpSave) { - lr.SetPriority(MAXFLOAT); + lr.SetPriority(std::numeric_limits::max()); lr.SetIsSpSave(); return; } @@ -373,20 +347,20 @@ void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const { } } -void GraphColorRegAllocator::PrintBBs() const { - for (auto *bb : bfs->sortedBBs) { - LogInfo::MapleLogger() << "\n< === > "; - LogInfo::MapleLogger() << bb->GetId(); - LogInfo::MapleLogger() << " succs:"; - for (auto *succBB : bb->GetSuccs()) { - LogInfo::MapleLogger() << " " << succBB->GetId(); - } - LogInfo::MapleLogger() << " eh_succs:"; - for (auto *succBB : bb->GetEhSuccs()) { - LogInfo::MapleLogger() << " " << succBB->GetId(); +void GraphColorRegAllocator::CalculatePriority() const { + for (auto [_, lr] : std::as_const(lrMap)) { +#ifdef USE_LRA + if (doLRA && lr->IsLocalReg()) { + continue; } +#endif // USE_LRA + CalculatePriority(*lr); + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After CalculatePriority\n"; + PrintLiveRanges(); } - LogInfo::MapleLogger() << "\n"; } void GraphColorRegAllocator::InitFreeRegPool() { @@ -469,16 +443,7 @@ void GraphColorRegAllocator::InitFreeRegPool() { */ LiveRange *GraphColorRegAllocator::NewLiveRange() { LiveRange *lr = memPool->New(regInfo->GetAllRegNum(), alloc); - - if (bbBuckets == 0) { - bbBuckets = (cgFunc->NumBBs() / kU64) + 1; - } - lr->SetBBBuckets(bbBuckets); - lr->InitBBMember(*memPool, bbBuckets); - lr->SetRegBuckets(regBuckets); - lr->InitBBConflict(*memPool, regBuckets); - lr->InitPregveto(); - lr->InitForbidden(); + lr->InitBBMember(cgFunc->NumBBs()); lr->SetRematerializer(cgFunc->GetCG()->CreateRematerializer(*memPool)); return lr; } @@ -507,34 +472,22 @@ bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, const BB return false; } -LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, +LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, uint32 currId) { LiveRange *lr = GetLiveRange(regNO); if (lr == nullptr) { lr = NewLiveRange(); lr->SetID(currId); + } - LiveUnit *lu = memPool->New(); + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb.GetId()); + if (lu == nullptr) { + lu = memPool->New(); lr->SetElemToLuMap(bb.GetId(), *lu); lu->SetBegin(currId); lu->SetEnd(currId); - if (isDef) { - /* means no use after def for reg, chances for ebo opt */ - for (const auto &pregNO : pregLive) { - lr->InsertElemToPregveto(pregNO); - } - } - } else { - LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb.GetId()); - if (lu == nullptr) { - lu = memPool->New(); - lr->SetElemToLuMap(bb.GetId(), *lu); - lu->SetBegin(currId); - lu->SetEnd(currId); - } - if (lu->GetBegin() > currId) { - lu->SetBegin(currId); - } + } else if (lu->GetBegin() > currId) { + lu->SetBegin(currId); } if (CLANG) { @@ -546,8 +499,7 @@ LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regN break; case OP_addrof: case OP_dread: - lr->SetRematerializable(preg->GetOp(), preg->rematInfo.sym, - preg->fieldID, preg->addrUpper); + lr->SetRematerializable(preg->GetOp(), preg->rematInfo.sym, preg->fieldID, preg->addrUpper); break; case OP_undef: break; @@ -560,18 +512,16 @@ LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regN return lr; } -void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount) { - bool isNonLocal = CreateLiveRangeHandleLocal(regNO, bb, isDef); - - if (!isDef) { - --currId; - } - - LiveRange *lr = CreateLiveRangeAllocateAndUpdate(regNO, bb, isDef, currId); +void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, + uint32 currId, bool updateCount) { + LiveRange *lr = CreateLiveRangeAllocateAndUpdate(regNO, bb, currId); lr->SetRegNO(regNO); - lr->SetIsNonLocal(isNonLocal); +#ifdef USE_LRA + if (doLRA) { + lr->SetIsNonLocal(CreateLiveRangeHandleLocal(regNO, bb, isDef)); + } +#endif // USE_LRA if (isDef) { - (void)vregLive.erase(regNO); #ifdef OPTIMIZE_FOR_PROLOG if (doOptProlog && updateCount) { if (lr->GetNumDefs() == 0) { @@ -579,9 +529,8 @@ void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool i } lr->IncNumDefs(); } -#endif /* OPTIMIZE_FOR_PROLOG */ +#endif // OPTIMIZE_FOR_PROLOG } else { - (void)vregLive.insert(regNO); #ifdef OPTIMIZE_FOR_PROLOG if (doOptProlog && updateCount) { if (lr->GetNumUses() == 0) { @@ -589,24 +538,14 @@ void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool i } lr->IncNumUses(); } -#endif /* OPTIMIZE_FOR_PROLOG */ - } - for (const auto &pregNO : pregLive) { - lr->InsertElemToPregveto(pregNO); +#endif // OPTIMIZE_FOR_PROLOG } - - /* only handle it in live_in and def point? */ - uint32 bbID = bb.GetId(); - lr->SetMemberBitArrElem(bbID); - + // only handle it in live_in and def point? + lr->SetBBMember(bb.GetId()); lrMap[regNO] = lr; } -bool GraphColorRegAllocator::SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, - regno_t regNO, bool isDef) { - if (!regOpnd.IsPhysicalRegister()) { - return false; - } +void GraphColorRegAllocator::SetupLiveRangeByPhysicalReg(const Insn &insn, regno_t regNO, bool isDef) { LocalRaInfo *lraInfo = localRegVec[insn.GetBB()->GetId()]; if (lraInfo == nullptr) { lraInfo = memPool->New(alloc); @@ -614,61 +553,45 @@ bool GraphColorRegAllocator::SetupLiveRangeByOpHandlePhysicalReg(const RegOperan } if (isDef) { - if (FindNotIn(pregLive, regNO)) { - for (const auto &vRegNO : vregLive) { - if (regInfo->IsUnconcernedReg(vRegNO)) { - continue; - } - lrMap[vRegNO]->InsertElemToPregveto(regNO); - } - } - pregLive.erase(regNO); - if (lraInfo != nullptr) { - lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); - } + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); } else { - (void)pregLive.insert(regNO); - for (const auto &vregNO : vregLive) { - if (regInfo->IsUnconcernedReg(vregNO)) { - continue; - } - LiveRange *lr = lrMap[vregNO]; - lr->InsertElemToPregveto(regNO); - } - - if (lraInfo != nullptr) { - lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); - } + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); } - return true; } -/* - * add pregs to forbidden list of lr. If preg is in - * the live list, then it is forbidden for other vreg on the list. - */ -void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses) { - if (!op.IsRegister()) { - return; - } - auto ®Opnd = static_cast(op); +void GraphColorRegAllocator::SetupLiveRangeByRegOpnd(const Insn &insn, const RegOperand ®Opnd, + uint32 regSize, bool isDef) { uint32 regNO = regOpnd.GetRegisterNumber(); if (regInfo->IsUnconcernedReg(regOpnd)) { if (GetLiveRange(regNO) != nullptr) { ASSERT(false, "Unconcerned reg"); - lrMap.erase(regNO); + (void)lrMap.erase(regNO); } return; } - if (SetupLiveRangeByOpHandlePhysicalReg(regOpnd, insn, regNO, isDef)) { + + if (!regInfo->IsVirtualRegister(regNO)) { + SetupLiveRangeByPhysicalReg(insn, regNO, isDef); return; } CreateLiveRange(regNO, *insn.GetBB(), isDef, insn.GetId(), true); LiveRange *lr = GetLiveRange(regNO); - ASSERT(lr != nullptr, "lr should not be nullptr"); - lr->SetSpillSize(regOpnd.GetSize()); + CHECK_FATAL(lr != nullptr, "lr should not be nullptr"); + if (isDef) { + lr->SetMaxDefSize(std::max(regSize, lr->GetMaxDefSize())); + } else { + lr->SetMaxUseSize(std::max(regSize, lr->GetMaxUseSize())); + } + if (lr->GetMaxDefSize() == 0) { + lr->SetSpillSize(lr->GetMaxUseSize()); + } else if (lr->GetMaxUseSize() == 0) { + lr->SetSpillSize(lr->GetMaxDefSize()); + } else { + lr->SetSpillSize(std::min(lr->GetMaxDefSize(), lr->GetMaxUseSize())); + } + if (lr->GetRegType() == kRegTyUndef) { lr->SetRegType(regOpnd.GetRegisterType()); } @@ -678,7 +601,6 @@ void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool is } else { lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncUseNum(); lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsUse); - ++numUses; } #ifdef MOVE_COALESCE if (insn.IsIntRegisterMov()) { @@ -698,227 +620,176 @@ void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool is } } -/* handle live range for bb->live_out */ -void GraphColorRegAllocator::SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint) { - if (regInfo->IsUnconcernedReg(liveOut)) { - return; - } - if (regInfo->IsVirtualRegister(liveOut)) { - (void)vregLive.insert(liveOut); - CreateLiveRange(liveOut, bb, false, currPoint, false); - return; - } - - (void)pregLive.insert(liveOut); - for (const auto &vregNO : vregLive) { - LiveRange *lr = lrMap[vregNO]; - lr->InsertElemToPregveto(liveOut); - } - - /* See if phys reg is livein also. Then assume it span the entire bb. */ - if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { - return; - } - LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; - if (lraInfo == nullptr) { - lraInfo = memPool->New(alloc); - localRegVec[bb.GetId()] = lraInfo; - } - /* Make it a large enough so no locals can be allocated. */ - lraInfo->SetUseCntElem(liveOut, kMaxUint16); -} +void GraphColorRegAllocator::ComputeLiveRangeByLiveOut(BB &bb, uint32 currPoint) { + for (auto liveOut : bb.GetLiveOutRegNO()) { + if (regInfo->IsUnconcernedReg(liveOut)) { + continue; + } + if (regInfo->IsVirtualRegister(liveOut)) { + (void)vregLive.insert(liveOut); + CreateLiveRange(liveOut, bb, false, currPoint, false); + continue; + } -void GraphColorRegAllocator::ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, - const Operand &opnd) const { - if (!opnd.IsRegister()) { - return; - } - auto ®Opnd = static_cast(opnd); - regno_t regNO = regOpnd.GetRegisterNumber(); - if (regInfo->IsUnconcernedReg(regNO)) { - return; - } - if (regOpnd.IsPhysicalRegister()) { - (void)pregs.insert(regNO); - } else { - (void)vregs.insert(regNO); + (void)pregLive.insert(liveOut); + // See if phys reg is livein also. Then assume it span the entire bb. + if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { + continue; + } + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + // Make it a large enough so no locals can be allocated. + lraInfo->SetUseCntElem(liveOut, kMaxUint16); } } -void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) { - uint32 opndNum = insn.GetOperandSize(); - if (opndNum <= 1) { - return; - } +// collet reg opnd from insn +void GraphColorRegAllocator::CollectRegOpndInfo(const Insn &insn, + std::vector &defOpnds, + std::vector &useOpnds) { const InsnDesc *md = insn.GetDesc(); - std::unordered_set pregs; - std::unordered_set vregs; - - for (uint32 i = 0; i < opndNum; ++i) { - Operand &opnd = insn.GetOperand(i); - if (!onlyDef) { - if (opnd.IsList()) { - auto &listOpnd = static_cast(opnd); - for (auto &op : listOpnd.GetOperands()) { - ClassifyOperand(pregs, vregs, *op); - } - } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - Operand *base = memOpnd.GetBaseRegister(); - Operand *offset = memOpnd.GetIndexRegister(); - if (base != nullptr) { - ClassifyOperand(pregs, vregs, *base); + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { + auto &opnd = insn.GetOperand(i); + const auto *opndDesc = md->GetOpndDes(i); + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regInfo->IsUnconcernedReg(regOpnd)) { + continue; + } + if (opndDesc->IsDef()) { + (void)defOpnds.emplace_back(®Opnd, opndDesc->GetSize()); + } + if (opndDesc->IsUse()) { + (void)useOpnds.emplace_back(®Opnd, opndDesc->GetSize()); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *offset = memOpnd.GetIndexRegister(); + if (base && !regInfo->IsUnconcernedReg(*base)) { + (void)useOpnds.emplace_back(base, base->GetSize()); + } + if (offset && !regInfo->IsUnconcernedReg(*offset)) { + (void)useOpnds.emplace_back(offset, offset->GetSize()); + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto *regOpnd : std::as_const(listOpnd.GetOperands())) { + if (regInfo->IsUnconcernedReg(*regOpnd)) { + continue; } - if (offset != nullptr) { - ClassifyOperand(pregs, vregs, *offset); + if (opndDesc->IsDef()) { + (void)defOpnds.emplace_back(regOpnd, regOpnd->GetSize()); } - } else if (opnd.IsRegister()) { - ClassifyOperand(pregs, vregs, opnd); - } - } else { - if (md->GetOpndDes(i)->IsRegDef()) { - ClassifyOperand(pregs, vregs, opnd); - } - if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - Operand *base = memOpnd.GetBaseRegister(); - if (base != nullptr && !memOpnd.IsIntactIndexed()) { - ClassifyOperand(pregs, vregs, *base); + if (opndDesc->IsUse()) { + (void)useOpnds.emplace_back(regOpnd, regOpnd->GetSize()); } } } } + if (defOpnds.size() > 1 || useOpnds.size() >= regInfo->GetNormalUseOperandNum()) { + needExtraSpillReg = true; + } - if (vregs.empty()) { + if (insn.IsCall()) { + // def the return value, all living preg is defed as call + for (regno_t preg : pregLive) { + auto *phyReg = regInfo->GetOrCreatePhyRegOperand(preg, k64BitSize, + regInfo->IsGPRegister(preg) ? kRegTyInt : kRegTyFloat); + (void)defOpnds.emplace_back(phyReg, k64BitSize); + } + } +} + +void GraphColorRegAllocator::InsertRegLive(regno_t regNO) { + if (regInfo->IsUnconcernedReg(regNO)) { return; } - /* Set BBConflict and Pregveto */ - for (regno_t vregNO : vregs) { - for (regno_t conflictVregNO : vregs) { - if (conflictVregNO != vregNO) { - lrMap[vregNO]->SetConflictBitArrElem(conflictVregNO); - } - } - for (regno_t conflictPregNO : pregs) { - lrMap[vregNO]->InsertElemToPregveto(conflictPregNO); - } + if (regInfo->IsVirtualRegister(regNO)) { + (void)vregLive.insert(regNO); + } else { + (void)pregLive.insert(regNO); } } -void GraphColorRegAllocator::UpdateOpndConflict(const Insn &insn, bool multiDef) { - /* if IsSpecialIntrinsic or IsAtomicStore, set conflicts for all opnds */ - if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { - SetOpndConflict(insn, false); +void GraphColorRegAllocator::RemoveRegLive(regno_t regNO) { + if (regInfo->IsUnconcernedReg(regNO)) { return; } - if (multiDef) { - SetOpndConflict(insn, true); + if (regInfo->IsVirtualRegister(regNO)) { + (void)vregLive.erase(regNO); + } else { + (void)pregLive.erase(regNO); } } -void GraphColorRegAllocator::ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef) { - uint32 numDefs = 0; - uint32 numUses = 0; - const InsnDesc *md = insn.GetDesc(); - uint32 opndNum = insn.GetOperandSize(); - for (uint32 i = 0; i < opndNum; ++i) { - if (insn.IsAsmInsn() && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { - for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { - SetupLiveRangeByOp(*opnd, insn, true, numUses); - ++numDefs; - } - continue; +// regno will conflict with all live reg +void GraphColorRegAllocator::UpdateAdjMatrixByRegNO(regno_t regNO, AdjMatrix &adjMat) { + if (regInfo->IsVirtualRegister(regNO)) { + for (auto preg : pregLive) { + adjMat.AddEdge(regNO, preg); } - Operand &opnd = insn.GetOperand(i); - if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - if (!memOpnd.IsIntactIndexed()) { - SetupLiveRangeByOp(opnd, insn, true, numUses); - ++numDefs; + for (auto conflictReg : vregLive) { + if (conflictReg == regNO) { + continue; } + adjMat.AddEdge(regNO, conflictReg); } - if (!md->GetOpndDes(i)->IsRegDef()) { - continue; + } else { + for (auto conflictReg : vregLive) { + adjMat.AddEdge(regNO, conflictReg); } - SetupLiveRangeByOp(opnd, insn, true, numUses); - ++numDefs; - } - ASSERT(numUses == 0, "should only be def opnd"); - if (numDefs > 1) { - multiDef = true; - needExtraSpillReg = true; } } -void GraphColorRegAllocator::ComputeLiveRangesForEachUseOperand(Insn &insn) { - uint32 numUses = 0; - const InsnDesc *md = insn.GetDesc(); - uint32 opndNum = insn.GetOperandSize(); - for (uint32 i = 0; i < opndNum; ++i) { - if (insn.IsAsmInsn() && i == kAsmInputListOpnd) { - for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { - SetupLiveRangeByOp(*opnd, insn, false, numUses); - } - continue; - } - if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { - continue; - } - Operand &opnd = insn.GetOperand(i); - if (opnd.IsList()) { - auto &listOpnd = static_cast(opnd); - for (auto &op : listOpnd.GetOperands()) { - SetupLiveRangeByOp(*op, insn, false, numUses); - } - } else if (opnd.IsMemoryAccessOperand()) { - auto &memOpnd = static_cast(opnd); - Operand *base = memOpnd.GetBaseRegister(); - Operand *offset = memOpnd.GetIndexRegister(); - if (base != nullptr) { - SetupLiveRangeByOp(*base, insn, false, numUses); - } - if (offset != nullptr) { - SetupLiveRangeByOp(*offset, insn, false, numUses); - } - } else { - SetupLiveRangeByOp(opnd, insn, false, numUses); +void GraphColorRegAllocator::UpdateAdjMatrix(const Insn &insn, + const std::vector &defOpnds, + const std::vector &useOpnds, + AdjMatrix &adjMat) { + // if IsAtomicStore or IsSpecialIntrinsic or IsAsm, set conflicts for all opnds + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic() || insn.IsAsmInsn()) { + for (const auto [regOpnd, _] : useOpnds) { + InsertRegLive(regOpnd->GetRegisterNumber()); } } - if (numUses >= regInfo->GetNormalUseOperandNum()) { - needExtraSpillReg = true; + for (const auto [regOpnd, _] : defOpnds) { + InsertRegLive(regOpnd->GetRegisterNumber()); } -} -void GraphColorRegAllocator::ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn) { - if (!insn.IsCall()) { - return; + // Set conflict reg and pregvoto + for (const auto [regOpnd, _] : defOpnds) { + UpdateAdjMatrixByRegNO(regOpnd->GetRegisterNumber(), adjMat); } - /* def the return value */ - for (uint32 i = 0; i < regInfo->GetIntRetRegsNum(); ++i) { - pregLive.erase(regInfo->GetIntRetReg(i)); + + // update reglive + for (const auto [regOpnd, _] : defOpnds) { + RemoveRegLive(regOpnd->GetRegisterNumber()); } - for (uint32 i = 0; i < regInfo->GetFpRetRegsNum(); ++i) { - pregLive.erase(regInfo->GetFpRetReg(i)); +} + +void GraphColorRegAllocator::ComputeLiveRangeForDefOperands(const Insn &insn, + std::vector &defOpnds) { + for (auto [regOpnd, regSize] : defOpnds) { + SetupLiveRangeByRegOpnd(insn, *regOpnd, regSize, true); } +} - /* active the parametes */ - Operand &opnd1 = insn.GetOperand(1); - if (opnd1.IsList()) { - auto &srcOpnds = static_cast(opnd1); - for (auto ®Opnd : srcOpnds.GetOperands()) { - ASSERT(!regOpnd->IsVirtualRegister(), "not be a virtual register"); - auto physicalReg = regOpnd->GetRegisterNumber(); - (void)pregLive.insert(physicalReg); - } +void GraphColorRegAllocator::ComputeLiveRangeForUseOperands(const Insn &insn, + std::vector &useOpnds) { + for (auto [regOpnd, regSize] : useOpnds) { + SetupLiveRangeByRegOpnd(insn, *regOpnd, regSize, false); } } -void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint) { - for (auto lin : bb.GetLiveInRegNO()) { - if (!regInfo->IsVirtualRegister(lin)) { +void GraphColorRegAllocator::ComputeLiveRangeByLiveIn(BB &bb, uint32 currPoint) { + for (auto liveIn : bb.GetLiveInRegNO()) { + if (!regInfo->IsVirtualRegister(liveIn)) { continue; } - LiveRange *lr = GetLiveRange(lin); + LiveRange *lr = GetLiveRange(liveIn); if (lr == nullptr) { continue; } @@ -934,36 +805,23 @@ void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, ui } } -bool GraphColorRegAllocator::UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const { - insn.SetId(currPoint); - if (insn.IsImmaterialInsn() || !insn.IsMachineInstruction()) { - --currPoint; - return true; +void GraphColorRegAllocator::UpdateAdjMatrixByLiveIn(BB &bb, AdjMatrix &adjMat) { + for (auto liveIn : bb.GetLiveInRegNO()) { + InsertRegLive(liveIn); + } + for (auto liveIn : bb.GetLiveInRegNO()) { + UpdateAdjMatrixByRegNO(liveIn, adjMat); } - return false; } void GraphColorRegAllocator::UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn) { - auto *targetOpnd = insn.GetCallTargetOperand(); - CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); - if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { - FuncNameOperand *target = static_cast(targetOpnd); - const MIRSymbol *funcSt = target->GetFunctionSymbol(); - ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); - MIRFunction *func = funcSt->GetFunction(); - if (func != nullptr && func->IsReferedRegsValid()) { - for (auto preg : func->GetReferedRegs()) { - if (!regInfo->IsCalleeSavedReg(preg)) { - for (auto vregNO : vregLive) { - LiveRange *lr = lrMap[vregNO]; - lr->InsertElemToCallDef(preg); - } - } - } - } else { + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + for (auto preg : callerSaveRegs) { for (auto vregNO : vregLive) { LiveRange *lr = lrMap[vregNO]; - lr->SetCrossCall(); + lr->InsertElemToCallDef(preg); } } } else { @@ -984,8 +842,26 @@ void GraphColorRegAllocator::UpdateCallInfo(uint32 bbId, uint32 currPoint, const } } -void GraphColorRegAllocator::SetLrMustAssign(const RegOperand *regOpnd) { - regno_t regNO = regOpnd->GetRegisterNumber(); +void GraphColorRegAllocator::UpdateRegLive(const Insn &insn, + const std::vector &useOpnds) { + if (insn.IsCall()) { + // def the return value + for (uint32 i = 0; i < regInfo->GetIntRetRegsNum(); ++i) { + (void)pregLive.erase(regInfo->GetIntRetReg(i)); + } + for (uint32 i = 0; i < regInfo->GetFpRetRegsNum(); ++i) { + (void)pregLive.erase(regInfo->GetFpRetReg(i)); + } + } + + // all use opnds insert to live reg + for (auto [regOpnd, _] : useOpnds) { + InsertRegLive(regOpnd->GetRegisterNumber()); + } +} + +void GraphColorRegAllocator::SetLrMustAssign(const RegOperand ®Opnd) { + regno_t regNO = regOpnd.GetRegisterNumber(); LiveRange *lr = GetLiveRange(regNO); if (lr != nullptr) { lr->SetMustAssigned(); @@ -998,25 +874,70 @@ void GraphColorRegAllocator::SetupMustAssignedLiveRanges(const Insn &insn) { return; } if (insn.IsAsmInsn()) { - for (auto ®Opnd : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { - SetLrMustAssign(regOpnd); + auto &outputList = static_cast(insn.GetOperand(kAsmOutputListOpnd)); + for (const auto ®Opnd : outputList.GetOperands()) { + SetLrMustAssign(*regOpnd); } - for (auto ®Opnd : static_cast(insn.GetOperand(kAsmInputListOpnd)).GetOperands()) { - SetLrMustAssign(regOpnd); + auto &inputList = static_cast(insn.GetOperand(kAsmInputListOpnd)); + for (const auto ®Opnd : inputList.GetOperands()) { + SetLrMustAssign(*regOpnd); } return; } uint32 opndNum = insn.GetOperandSize(); for (uint32 i = 0; i < opndNum; ++i) { - Operand *opnd = &insn.GetOperand(i); - if (!opnd->IsRegister()) { + auto &opnd = insn.GetOperand(i); + if (!opnd.IsRegister()) { continue; } - auto regOpnd = static_cast(opnd); + auto ®Opnd = static_cast(opnd); SetLrMustAssign(regOpnd); } } +void GraphColorRegAllocator::AddConflictAndPregvetoToLr(const std::vector &conflict, + LiveRange &lr, bool isInt) { + for (auto conflictReg : conflict) { + if (regInfo->IsUnconcernedReg(conflictReg)) { + continue; + } + + if (regInfo->IsVirtualRegister(conflictReg)) { +#ifdef USE_LRA + if (doLRA && lr.IsLocalReg()) { + continue; + } +#endif // USE_LRA + auto *conflictLr = GetLiveRange(conflictReg); + CHECK_FATAL(conflictLr != nullptr, "null ptr check!"); +#ifdef USE_LRA + if (doLRA && conflictLr->IsLocalReg()) { + continue; + } +#endif // USE_LRA + if (lr.GetRegType() == conflictLr->GetRegType()) { + lr.InsertConflict(conflictReg); + } + } else if ((isInt && regInfo->IsGPRegister(conflictReg)) || + (!isInt && !regInfo->IsGPRegister(conflictReg))) { +#ifdef RESERVED_REGS + if (regInfo->IsReservedReg(conflictReg, doMultiPass)) { + continue; + } +#endif // RESERVED_REGS + lr.InsertElemToPregveto(conflictReg); + } + } +} + +void GraphColorRegAllocator::ConvertAdjMatrixToConflict(const AdjMatrix &adjMat) { + for (const auto [_, lr] : std::as_const(lrMap)) { + CHECK_FATAL(lr->GetRegType() != kRegTyUndef, "error reg type"); + AddConflictAndPregvetoToLr(adjMat.ConvertEdgeToVec(lr->GetRegNO()), *lr, + (lr->GetRegType() == kRegTyInt)); + } +} + /* * For each succ bb->GetSuccs(), if bb->liveout - succ->livein is not empty, the vreg(s) is * dead on this path (but alive on the other path as there is some use of it on the @@ -1024,30 +945,26 @@ void GraphColorRegAllocator::SetupMustAssignedLiveRanges(const Insn &insn) { * splits (lr split into lr1 & lr2 and lr2 will need to reload.) * Not for now though. */ -void GraphColorRegAllocator::ComputeLiveRanges() { +void GraphColorRegAllocator::ComputeLiveRangesAndConflict() { bbVec.clear(); bbVec.resize(cgFunc->NumBBs()); auto currPoint = static_cast(cgFunc->GetTotalNumberOfInstructions() + bfs->sortedBBs.size()); /* distinguish use/def */ - CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); - currPoint = currPoint << 2; + CHECK_FATAL(currPoint < (INT_MAX >> kInsnStep), "integer overflow check"); + currPoint = currPoint << kInsnStep; + AdjMatrix adjMat(cgFunc->GetMaxVReg()); for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { BB *bb = bfs->sortedBBs[bbIdx - 1]; bbVec[bb->GetId()] = bb; - bb->SetLevel(bbIdx - 1); + bb->SetLevel(static_cast(bbIdx - 1)); pregLive.clear(); vregLive.clear(); - for (auto liveOut : bb->GetLiveOutRegNO()) { - SetupLiveRangeByRegNO(liveOut, *bb, currPoint); - } - --currPoint; - if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { - UpdateCallInfo(bb->GetId(), currPoint, *bb->GetLastInsn()); - } + ComputeLiveRangeByLiveOut(*bb, currPoint); + --currPoint; FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { #ifdef MOVE_COALESCE @@ -1060,54 +977,55 @@ void GraphColorRegAllocator::ComputeLiveRanges() { continue; } #endif - if (UpdateInsnCntAndSkipUseless(*insn, currPoint)) { - if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { - UpdateCallInfo(bb->GetId(), currPoint, *ninsn); - } + insn->SetId(currPoint); + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + --currPoint; continue; } - bool multiDef = false; - ComputeLiveRangesForEachDefOperand(*insn, multiDef); - ComputeLiveRangesForEachUseOperand(*insn); - - UpdateOpndConflict(*insn, multiDef); - SetupMustAssignedLiveRanges(*insn); - - if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { - UpdateCallInfo(bb->GetId(), currPoint - kInsnStep, *ninsn); + std::vector defOpnds; + std::vector useOpnds; + CollectRegOpndInfo(*insn, defOpnds, useOpnds); + ComputeLiveRangeForDefOperands(*insn, defOpnds); + ComputeLiveRangeForUseOperands(*insn, useOpnds); + UpdateAdjMatrix(*insn, defOpnds, useOpnds, adjMat); + if (insn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *insn); } - - ComputeLiveRangesUpdateIfInsnIsCall(*insn); - /* distinguish use/def */ - currPoint -= 2; + UpdateRegLive(*insn, useOpnds); + SetupMustAssignedLiveRanges(*insn); + currPoint -= kInsnStep; } - ComputeLiveRangesUpdateLiveUnitInsnRange(*bb, currPoint); - /* move one more step for each BB */ + ComputeLiveRangeByLiveIn(*bb, currPoint); + UpdateAdjMatrixByLiveIn(*bb, adjMat); + // move one more step for each BB --currPoint; } + ConvertAdjMatrixToConflict(adjMat); if (GCRA_DUMP) { - LogInfo::MapleLogger() << "After ComputeLiveRanges\n"; + LogInfo::MapleLogger() << "After ComputeLiveRangesAndConflict\n"; + adjMat.Dump(); PrintLiveRanges(); #ifdef USE_LRA if (doLRA) { - PrintLocalRAInfo("After ComputeLiveRanges"); + PrintLocalRAInfo("After ComputeLiveRangesAndConflict"); } #endif /* USE_LRA */ } } -/* Create a common stack space for spilling with need_spill */ -MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, uint32 memSize, +// Create a common stack space for spilling with need_spill +MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, RegType regType, SpillMemCheck check) { + auto &spillMemOpnds = (regType == kRegTyInt) ? intSpillMemOpnds : fpSpillMemOpnds; if (spillIdx >= spillMemOpnds.size()) { return nullptr; } if (operandSpilled[spillIdx]) { /* For this insn, spill slot already used, need to find next available slot. */ - uint32 i; + uint32 i = 0; for (i = spillIdx + 1; i < kSpillMemOpndNum; ++i) { if (!operandSpilled[i]) { break; @@ -1121,160 +1039,27 @@ MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, uint32 memSi } if (spillMemOpnds[spillIdx] == nullptr) { - regno_t reg = cgFunc->NewVReg(kRegTyInt, sizeof(int64)); - spillMemOpnds[spillIdx] = cgFunc->GetOrCreatSpillMem(reg, memSize); + regno_t reg = cgFunc->NewVReg(kRegTyInt, k64BitSize); + // spillPre or spillPost need maxSize for spill + // so, for int, spill 64-bit; for float, spill 128-bit + spillMemOpnds[spillIdx] = cgFunc->GetOrCreatSpillMem(reg, + (regType == kRegTyInt) ? k64BitSize : k128BitSize); } return spillMemOpnds[spillIdx]; } bool GraphColorRegAllocator::IsLocalReg(regno_t regNO) const { - LiveRange *lr = GetLiveRange(regNO); - if (lr == nullptr) { - LogInfo::MapleLogger() << "unexpected regNO" << regNO; - return true; - } - return IsLocalReg(*lr); -} - -bool GraphColorRegAllocator::IsLocalReg(const LiveRange &lr) const { - return !lr.GetSplitLr() && (lr.GetNumBBMembers() == 1) && !lr.IsNonLocal(); -} - -bool GraphColorRegAllocator::CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const { - regno_t lr1RegNO = lr1.GetRegNO(); - regno_t lr2RegNO = lr2.GetRegNO(); - for (uint32 x = 0; x < kU64; ++x) { - if ((val & (1ULL << x)) != 0) { - uint32 lastBitSet = i * kU64 + x; - /* - * begin and end should be in the bb info (LU) - * Need to rethink this if. - * Under some circumstance, lr->begin can occur after lr->end. - */ - auto lu1 = lr1.FindInLuMap(lastBitSet); - auto lu2 = lr2.FindInLuMap(lastBitSet); - if (lu1 != lr1.EndOfLuMap() && lu2 != lr2.EndOfLuMap() && - !((lu1->second->GetBegin() < lu2->second->GetBegin() && lu1->second->GetEnd() < lu2->second->GetBegin()) || - (lu2->second->GetBegin() < lu1->second->GetEnd() && lu2->second->GetEnd() < lu1->second->GetBegin()))) { - lr1.SetConflictBitArrElem(lr2RegNO); - lr2.SetConflictBitArrElem(lr1RegNO); - return true; - } - } - } - return false; -} - -void GraphColorRegAllocator::CheckInterference(LiveRange &lr1, LiveRange &lr2) const { - uint64 bitArr[bbBuckets]; - for (uint32 i = 0; i < bbBuckets; ++i) { - bitArr[i] = lr1.GetBBMember()[i] & lr2.GetBBMember()[i]; - } - - for (uint32 i = 0; i < bbBuckets; ++i) { - uint64 val = bitArr[i]; - if (val == 0) { - continue; - } - if (CheckOverlap(val, i, lr1, lr2)) { - break; - } - } -} - -void GraphColorRegAllocator::BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, - std::vector &fpLrVec) { - for (auto &it : std::as_const(lrMap)) { - LiveRange *lr = it.second; - if (lr->GetRegNO() == 0) { - continue; - } #ifdef USE_LRA - if (doLRA && IsLocalReg(*lr)) { - continue; - } -#endif /* USE_LRA */ - if (lr->GetRegType() == kRegTyInt) { - intLrVec.emplace_back(lr); - } else if (lr->GetRegType() == kRegTyFloat) { - fpLrVec.emplace_back(lr); - } else { - ASSERT(false, "Illegal regType in BuildInterferenceGraph"); - LogInfo::MapleLogger() << "error: Illegal regType in BuildInterferenceGraph\n"; - } - } -} - -/* - * Based on intersection of LRs. When two LRs interfere, add to each other's - * interference list. - */ -void GraphColorRegAllocator::BuildInterferenceGraph() { - std::vector intLrVec; - std::vector fpLrVec; - BuildInterferenceGraphSeparateIntFp(intLrVec, fpLrVec); - - /* - * Once number of BB becomes larger for big functions, the checking for interferences - * takes significant long time. Taking advantage of unique bucket is one of strategies - * to avoid unnecessary computation - */ - auto lrSize = intLrVec.size(); - std::vector uniqueBucketIdx(lrSize); - for (uint32 i = 0; i < lrSize; i++) { - uint32 count = 0; - uint32 uniqueIdx; - LiveRange *lr = intLrVec[i]; - for (uint32 j = 0; j < bbBuckets; ++j) { - if (lr->GetBBMember()[j] > 0) { - count++; - uniqueIdx = j; - } - } - if (count == 1) { - uniqueBucketIdx[i] = static_cast(uniqueIdx); - } else { - /* LR spans multiple buckets */ - ASSERT(count >= 1, "A live range can not be empty"); - uniqueBucketIdx[i] = -1; - } - } - - for (auto it1 = intLrVec.begin(); it1 != intLrVec.end(); ++it1) { - LiveRange *lr1 = *it1; - CalculatePriority(*lr1); - int32 lr1UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it1))]; - for (auto it2 = it1 + 1; it2 != intLrVec.end(); ++it2) { - LiveRange *lr2 = *it2; - if (lr1->GetRegNO() < lr2->GetRegNO()) { - int32 lr2UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it2))]; - if (lr1UniqueBucketIdx == -1 && lr2UniqueBucketIdx == -1) { - CheckInterference(*lr1, *lr2); - } else if (((lr1UniqueBucketIdx >= 0) && ((lr1->GetBBMember()[lr1UniqueBucketIdx] & - lr2->GetBBMember()[lr1UniqueBucketIdx])) > 0) || ((lr2UniqueBucketIdx >= 0) && - ((lr1->GetBBMember()[lr2UniqueBucketIdx] & lr2->GetBBMember()[lr2UniqueBucketIdx]) > 0))) { - CheckInterference(*lr1, *lr2); - } - } - } - } - - // Might need to do same as to intLrVec - for (auto it1 = fpLrVec.begin(); it1 != fpLrVec.end(); ++it1) { - LiveRange *lr1 = *it1; - CalculatePriority(*lr1); - for (auto it2 = it1 + 1; it2 != fpLrVec.end(); ++it2) { - LiveRange *lr2 = *it2; - if (lr1->GetRegNO() < lr2->GetRegNO()) { - CheckInterference(*lr1, *lr2); - } + if (doLRA) { + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + LogInfo::MapleLogger() << "unexpected regNO" << regNO; + return true; } + return lr->IsLocalReg(); } - - if (GCRA_DUMP) { - LogInfo::MapleLogger() << "After BuildInterferenceGraph\n"; - PrintLiveRanges(); - } +#endif // USE_LRA + return false; } void GraphColorRegAllocator::SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO) { @@ -1302,16 +1087,15 @@ bool GraphColorRegAllocator::HaveAvailableColor(const LiveRange &lr, uint32 num) * Compute a sorted list of constrained LRs based on priority cost. */ void GraphColorRegAllocator::Separate() { - for (auto &it : std::as_const(lrMap)) { - LiveRange *lr = it.second; + for (auto [_, lr] : std::as_const(lrMap)) { #ifdef USE_LRA - if (doLRA && IsLocalReg(*lr)) { + if (doLRA && lr->IsLocalReg()) { continue; } #endif /* USE_LRA */ #ifdef OPTIMIZE_FOR_PROLOG if (doOptProlog && ((lr->GetNumDefs() <= 1) && (lr->GetNumUses() <= 1) && (lr->GetNumCall() > 0)) && - (lr->GetFrequency() <= (cgFunc->GetFirstBB()->GetFrequency() << 1))) { + (lr->GetFrequency() <= static_cast(cgFunc->GetFirstBB()->GetFrequency() << 1))) { if (lr->GetRegType() == kRegTyInt) { intDelayed.emplace_back(lr); } else { @@ -1322,8 +1106,8 @@ void GraphColorRegAllocator::Separate() { #endif /* OPTIMIZE_FOR_PROLOG */ if (lr->GetRematLevel() != kRematOff) { unconstrained.emplace_back(lr); - } else if (HaveAvailableColor(*lr, lr->GetNumBBConflicts() + static_cast(lr->GetPregvetoSize()) + - static_cast(lr->GetForbiddenSize()))) { + } else if (HaveAvailableColor(*lr, lr->GetConflictSize() + lr->GetPregvetoSize() + + lr->GetForbiddenSize())) { if (lr->GetPrefs().size() > 0) { unconstrainedPref.emplace_back(lr); } else { @@ -1340,21 +1124,23 @@ void GraphColorRegAllocator::Separate() { } } if (GCRA_DUMP) { - LogInfo::MapleLogger() << "Unconstrained : "; - for (auto lr : unconstrainedPref) { + LogInfo::MapleLogger() << "UnconstrainedPref : "; + for (const auto lr : unconstrainedPref) { LogInfo::MapleLogger() << lr->GetRegNO() << " "; } - for (auto lr : unconstrained) { + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "Unconstrained : "; + for (const auto lr : unconstrained) { LogInfo::MapleLogger() << lr->GetRegNO() << " "; } LogInfo::MapleLogger() << "\n"; LogInfo::MapleLogger() << "Constrained : "; - for (auto lr : constrained) { + for (const auto lr : constrained) { LogInfo::MapleLogger() << lr->GetRegNO() << " "; } LogInfo::MapleLogger() << "\n"; LogInfo::MapleLogger() << "mustAssigned : "; - for (auto lr : mustAssigned) { + for (const auto lr : mustAssigned) { LogInfo::MapleLogger() << lr->GetRegNO() << " "; } LogInfo::MapleLogger() << "\n"; @@ -1378,24 +1164,22 @@ MapleVector::iterator GraphColorRegAllocator::GetHighPriorityLr(Mapl } void GraphColorRegAllocator::UpdateForbiddenForNeighbors(const LiveRange &lr) const { - auto updateForbidden = [&lr, this] (regno_t regNO) { + for (auto regNO : lr.GetConflict()) { LiveRange *newLr = GetLiveRange(regNO); - ASSERT(newLr != nullptr, "newLr should not be nullptr"); + CHECK_FATAL(newLr != nullptr, "newLr should not be nullptr"); if (!newLr->GetPregveto(lr.GetAssignedRegNO())) { newLr->InsertElemToForbidden(lr.GetAssignedRegNO()); } - }; - ForEachRegArrElem(lr.GetBBConflict(), updateForbidden); + } } void GraphColorRegAllocator::UpdatePregvetoForNeighbors(const LiveRange &lr) const { - auto updatePregveto = [&lr, this] (regno_t regNO) { + for (auto regNO : lr.GetConflict()) { LiveRange *newLr = GetLiveRange(regNO); - ASSERT(newLr != nullptr, "newLr should not be nullptr"); + CHECK_FATAL(newLr != nullptr, "newLr should not be nullptr"); newLr->InsertElemToPregveto(lr.GetAssignedRegNO()); newLr->EraseElemFromForbidden(lr.GetAssignedRegNO()); - }; - ForEachRegArrElem(lr.GetBBConflict(), updatePregveto); + } } /* @@ -1458,20 +1242,20 @@ regno_t GraphColorRegAllocator::FindColorForLr(const LiveRange &lr) const { #ifdef MOVE_COALESCE if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { for (const auto reg : lr.GetPrefs()) { - if ((FindIn(*currRegSet, reg) || FindIn(*nextRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + if ((FindIn(*currRegSet, reg) || FindIn(*nextRegSet, reg)) && !lr.HaveConflict(reg)) { return reg; } } } #endif /* MOVE_COALESCE */ for (const auto reg : *currRegSet) { - if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + if (!lr.HaveConflict(reg)) { return reg; } } /* Failed to allocate in first choice. Try 2nd choice. */ for (const auto reg : *nextRegSet) { - if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + if (!lr.HaveConflict(reg)) { return reg; } } @@ -1491,14 +1275,14 @@ regno_t GraphColorRegAllocator::TryToAssignCallerSave(const LiveRange &lr) const #ifdef MOVE_COALESCE if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { for (const auto reg : lr.GetPrefs()) { - if ((FindIn(*currRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + if ((FindIn(*currRegSet, reg)) && !lr.HaveConflict(reg) && !lr.GetCallDef(reg)) { return reg; } } } #endif /* MOVE_COALESCE */ for (const auto reg : *currRegSet) { - if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + if (!lr.HaveConflict(reg) && !lr.GetCallDef(reg)) { return reg; } } @@ -1565,7 +1349,7 @@ void GraphColorRegAllocator::PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, return; } - bb.SetInternalFlag1(true); + bb.SetInternalFlag1(1); MapleMap::const_iterator lu = lr.FindInLuMap(bb.GetId()); uint32 defNum = 0; uint32 useNum = 0; @@ -1585,7 +1369,7 @@ void GraphColorRegAllocator::PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, remove = false; } else { /* No ref in this bb. mark as potential remove. */ - bb.SetInternalFlag2(true); + bb.SetInternalFlag2(1); return; } } else { @@ -1665,7 +1449,7 @@ void GraphColorRegAllocator::ComputeBBForNewSplit(LiveRange &newLr, LiveRange &o } if (bb->GetLoop() != nullptr || FindNotIn(defInLoop, bb->GetLoop())) { /* defInLoop should be a subset of candidateInLoop. remove. */ - newLr.UnsetMemberBitArrElem(bbID); + newLr.UnsetBBMember(bbID); } } }; @@ -1720,7 +1504,7 @@ void GraphColorRegAllocator::FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, } } - bb->SetInternalFlag1(true); + bb->SetInternalFlag1(1); MapleMap::const_iterator lu = lr.FindInLuMap(bb->GetId()); uint32 defNum = 0; uint32 useNum = 0; @@ -1734,7 +1518,7 @@ void GraphColorRegAllocator::FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, /* In removal mode, has not encountered a ref yet. */ if (defNum == 0 && useNum == 0) { /* No ref in this bb. mark as potential remove. */ - bb->SetInternalFlag2(true); + bb->SetInternalFlag2(1); if (bb->GetLoop() != nullptr) { /* bb in loop, need to make sure of loop carried dependency */ (void)candidateInLoop.insert(bb->GetLoop()); @@ -1804,10 +1588,10 @@ void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &o continue; } for (auto pred : bb->GetPreds()) { - pred->SetInternalFlag1(true); + pred->SetInternalFlag1(1); } for (auto pred : bb->GetEhPreds()) { - pred->SetInternalFlag1(true); + pred->SetInternalFlag1(1); } bbInfo.SetCandidateBB(*bb); bbInfo.SetStartBB(*bb); @@ -1818,7 +1602,7 @@ void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &o BB *bb = bbVec[bbID]; if (bb->GetInternalFlag2() != 0) { if (bb->GetLoop() != nullptr && FindNotIn(defInLoop, bb->GetLoop())) { - origLr.UnsetMemberBitArrElem(bbID); + origLr.UnsetBBMember(bbID); } } }; @@ -1839,7 +1623,7 @@ bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdd RegType type = lr.GetRegType(); std::unordered_set newConflict; - auto updateConflictFunc = [&bbAdded, &conflictRegs, &newConflict, &lr, this](regno_t regNO) { + for (auto regNO : lr.GetConflict()) { /* check the real conflict in current bb */ LiveRange *conflictLr = lrMap[regNO]; /* @@ -1848,13 +1632,13 @@ bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdd * assigned a color that is not in the conflictRegs, * then add it as a newConflict. */ - if (IsBitArrElemSet(conflictLr->GetBBMember(), bbAdded.GetId())) { + if (conflictLr->GetBBMember(bbAdded.GetId())) { regno_t confReg = conflictLr->GetAssignedRegNO(); if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { (void)newConflict.insert(confReg); } } else if (conflictLr->GetSplitLr() != nullptr && - IsBitArrElemSet(conflictLr->GetSplitLr()->GetBBMember(), bbAdded.GetId())) { + conflictLr->GetSplitLr()->GetBBMember(bbAdded.GetId())) { /* * The after split LR is split into pieces, and this ensures * the after split color is taken into consideration. @@ -1864,8 +1648,7 @@ bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdd (void)newConflict.insert(confReg); } } - }; - ForEachRegArrElem(lr.GetBBConflict(), updateConflictFunc); + } size_t numRegs = newConflict.size() + lr.GetPregvetoSize() + conflictRegs.size(); @@ -1891,12 +1674,12 @@ bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdd /* Support function for LR split. Move one BB from LR1 to LR2. */ void GraphColorRegAllocator::MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const { /* initialize backward traversal flag for the bb pruning phase */ - bb.SetInternalFlag1(false); + bb.SetInternalFlag1(0); /* initialize bb removal marker */ - bb.SetInternalFlag2(false); + bb.SetInternalFlag2(0); /* Insert BB into new LR */ uint32 bbID = bb.GetId(); - newLr.SetMemberBitArrElem(bbID); + newLr.SetBBMember(bbID); /* Move LU from old LR to new LR */ MapleMap::const_iterator luIt = oldLr.FindInLuMap(bb.GetId()); @@ -1906,7 +1689,7 @@ void GraphColorRegAllocator::MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB } /* Remove BB from old LR */ - oldLr.UnsetMemberBitArrElem(bbID); + oldLr.UnsetBBMember(bbID); } /* Is the set of loops inside the loop? */ @@ -1923,7 +1706,8 @@ bool GraphColorRegAllocator::ContainsLoop(const CGFuncLoops &loop, return false; } -void GraphColorRegAllocator::GetAllLrMemberLoops(LiveRange &lr, std::set &loops) { +void GraphColorRegAllocator::GetAllLrMemberLoops(const LiveRange &lr, + std::set &loops) { auto getLrMemberFunc = [&loops, this](uint32 bbID) { BB *bb = bbVec[bbID]; CGFuncLoops *loop = bb->GetLoop(); @@ -1934,7 +1718,7 @@ void GraphColorRegAllocator::GetAllLrMemberLoops(LiveRange &lr, std::setGetBBMember(), bbID) || - (confLrVec->GetSplitLr() != nullptr && IsBitArrElemSet(confLrVec->GetSplitLr()->GetBBMember(), bbID))) { + if (confLrVec->GetBBMember(bbID) || + (confLrVec->GetSplitLr() != nullptr && confLrVec->GetSplitLr()->GetBBMember(bbID))) { /* * New LR getting the interference does not mean the * old LR can remove the interference. * Old LR's interference will be handled at the end of split. */ - newLr.SetConflictBitArrElem(regNO); + newLr.InsertConflict(regNO); } - }; - ForEachRegArrElem(lr.GetBBConflict(), lrFunc); + } }; ForEachBBArrElem(newLr.GetBBMember(), recomputeConflict); @@ -2027,7 +1810,7 @@ void GraphColorRegAllocator::SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, continue; } for (auto bb : loop->GetLoopMembers()) { - if (!IsBitArrElemSet(newLr.GetBBMember(), bb->GetId())) { + if (!newLr.GetBBMember(bb->GetId())) { continue; } LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bb->GetId()); @@ -2079,19 +1862,18 @@ void GraphColorRegAllocator::SplitLrUpdateInterference(LiveRange &lr) { * Also recompute the forbidden info */ lr.ClearForbidden(); - auto updateInterfrence = [&lr, this](regno_t regNO) { + for (auto regNO : lr.GetConflict()) { LiveRange *confLrVec = lrMap[regNO]; - if (IsBBsetOverlap(lr.GetBBMember(), confLrVec->GetBBMember(), bbBuckets)) { + if (IsBBsetOverlap(lr.GetBBMember(), confLrVec->GetBBMember())) { /* interfere */ if ((confLrVec->GetAssignedRegNO() > 0) && !lr.GetPregveto(confLrVec->GetAssignedRegNO())) { lr.InsertElemToForbidden(confLrVec->GetAssignedRegNO()); } } else { /* no interference */ - lr.UnsetConflictBitArrElem(regNO); + lr.EraseConflict(regNO); } - }; - ForEachRegArrElem(lr.GetBBConflict(), updateInterfrence); + } } void GraphColorRegAllocator::SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, @@ -2110,7 +1892,7 @@ void GraphColorRegAllocator::SplitLrUpdateRegInfo(const LiveRange &origLr, LiveR void GraphColorRegAllocator::SplitLrErrorCheckAndDebug(const LiveRange &origLr) const { if (origLr.GetNumBBMembers() == 0) { - ASSERT(origLr.GetNumBBConflicts() == 0, "Error: member and conflict not match"); + ASSERT(origLr.GetConflict().empty(), "Error: member and conflict not match"); } } @@ -2133,13 +1915,6 @@ void GraphColorRegAllocator::SplitLr(LiveRange &lr) { if (!SplitLrFindCandidateLr(lr, *newLr, conflictRegs)) { return; } -#ifdef REUSE_SPILLMEM - /* Copy the original conflict vector for spill reuse optimization */ - lr.SetOldConflict(memPool->NewArray(regBuckets)); - for (uint32 i = 0; i < regBuckets; ++i) { - lr.SetBBConflictElem(static_cast(i), lr.GetBBConflictElem(static_cast(i))); - } -#endif /* REUSE_SPILLMEM */ std::set newLoops; std::set origLoops; @@ -2311,7 +2086,7 @@ void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAl if (usedIt != localRa.GetUseInfo().end() && !isDef) { /* reg use, decrement count */ ASSERT(usedIt->second > 0, "Incorrect local ra info"); - localRa.SetUseInfoElem(regNO, usedIt->second - 1); + localRa.SetUseInfoElem(regNO, static_cast(usedIt->second - 1)); if (regInfo->IsVirtualRegister(regNO) && localRa.IsInRegAssigned(regNO)) { localRa.IncUseInfoElem(localRa.GetRegAssignmentItem(regNO)); } @@ -2324,7 +2099,7 @@ void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAl if (defIt != localRa.GetDefInfo().end() && isDef) { /* reg def, decrement count */ ASSERT(defIt->second > 0, "Incorrect local ra info"); - localRa.SetDefInfoElem(regNO, defIt->second - 1); + localRa.SetDefInfoElem(regNO, static_cast(defIt->second - 1)); if (regInfo->IsVirtualRegister(regNO) && localRa.IsInRegAssigned(regNO)) { localRa.IncDefInfoElem(localRa.GetRegAssignmentItem(regNO)); } @@ -2335,17 +2110,18 @@ void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAl } void GraphColorRegAllocator::UpdateLocalRegConflict(regno_t regNO, - const LocalRegAllocator &localRa) { + const LocalRegAllocator &localRa) { LiveRange *lr = lrMap[regNO]; - if (lr->GetNumBBConflicts() == 0) { + if (lr->GetConflict().empty()) { return; } if (!localRa.IsInRegAssigned(regNO)) { return; } regno_t preg = localRa.GetRegAssignmentItem(regNO); - ForEachRegArrElem(lr->GetBBConflict(), - [&preg, this](regno_t regNO) { lrMap[regNO]->InsertElemToPregveto(preg); }); + for (auto conflictReg : lr->GetConflict()) { + lrMap[conflictReg]->InsertElemToPregveto(preg); + } } void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const { @@ -2385,6 +2161,12 @@ void GraphColorRegAllocator::HandleLocalReg(Operand &op, LocalRegAllocator &loca return; } +#ifdef RESERVED_REGS + if (regInfo->IsReservedReg(regNO, doMultiPass)) { + return; + } +#endif /* RESERVED_REGS */ + /* is this a local register ? */ if (regInfo->IsVirtualRegister(regNO) && !IsLocalReg(regNO)) { return; @@ -2395,6 +2177,9 @@ void GraphColorRegAllocator::HandleLocalReg(Operand &op, LocalRegAllocator &loca } if (regOpnd.IsPhysicalRegister()) { + if (!regInfo->IsAvailableReg(regNO)) { + return; + } /* conflict with preg is record in lr->pregveto and BBAssignInfo->globalsAssigned */ UpdateLocalRegDefUseCount(regNO, localRa, isDef); /* See if it is needed by global RA */ @@ -2613,23 +2398,17 @@ void GraphColorRegAllocator::LocalRegisterAllocator(bool doAllocate) { } } -MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict, +MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const MapleSet &conflict, const std::set &usedMemOpnd, uint32 size, RegType regType) { std::set sconflict; - regno_t regNO; - for (uint32 i = 0; i < regBuckets; ++i) { - for (uint32 b = 0; b < kU64; ++b) { - if ((conflict[i] & (1ULL << b)) != 0) { - continue; - } - regNO = i * kU64 + b; - if (regNO >= numVregs) { - break; - } - if (GetLiveRange(regNO) != nullptr) { - (void)sconflict.insert(lrMap[regNO]); - } + + for (regno_t regNO = 0; regNO < numVregs; ++regNO) { + if (FindIn(conflict, regNO)) { + continue; + } + if (GetLiveRange(regNO) != nullptr) { + (void)sconflict.insert(lrMap[regNO]); } } @@ -2645,33 +2424,27 @@ MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict return nullptr; } -MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, - uint32 size, RegType regType) { - regno_t regNO; - for (uint32 i = 0; i < regBuckets; ++i) { - for (uint32 b = 0; b < kU64; ++b) { - if ((conflict[i] & (1ULL << b)) != 0) { - continue; - } - regNO = i * kU64 + b; - if (regNO >= numVregs) { - break; - } - LiveRange *noConflictLr = GetLiveRange(regNO); - if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || - noConflictLr->GetSpillSize() != size) { - continue; - } - if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { - return noConflictLr->GetSpillMem(); - } +MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const MapleSet &conflict, + const std::set &usedMemOpnd, + uint32 size, RegType regType) const { + for (regno_t regNO = 0; regNO < numVregs; ++regNO) { + if (FindIn(conflict, regNO)) { + continue; + } + LiveRange *noConflictLr = GetLiveRange(regNO); + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || + noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); } } return nullptr; } /* See if any of the non-conflict LR is spilled and use its memOpnd. */ -MemOperand *GraphColorRegAllocator::GetReuseMem(const LiveRange &lr) { +MemOperand *GraphColorRegAllocator::GetReuseMem(const LiveRange &lr) const { if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { return nullptr; } @@ -2688,33 +2461,29 @@ MemOperand *GraphColorRegAllocator::GetReuseMem(const LiveRange &lr) { } std::set usedMemOpnd; - auto updateMemOpnd = [&usedMemOpnd, this](regno_t regNO) { - if (regNO >= numVregs) { - return; - } + for (auto regNO : lr.GetConflict()) { LiveRange *lrInner = GetLiveRange(regNO); if (lrInner && lrInner->GetSpillMem() != nullptr) { (void)usedMemOpnd.insert(lrInner->GetSpillMem()); } - }; - const uint64 *conflict = lr.GetBBConflict(); - ForEachRegArrElem(conflict, updateMemOpnd); + } + /* * This is to order the search so memOpnd given out is consistent. * When vreg#s do not change going through VtableImpl.mpl file * then this can be simplified. */ #ifdef CONSISTENT_MEMOPND - return GetConsistentReuseMem(conflict, usedMemOpnd, lr.GetSpillSize(), lr.GetRegType()); + return GetConsistentReuseMem(lr.GetConflict(), usedMemOpnd, lr.GetSpillSize(), lr.GetRegType()); #else /* CONSISTENT_MEMOPND */ - return GetCommonReuseMem(conflict, usedMemOpnd, lr.GetSpillSize(), lr.GetRegType()); + return GetCommonReuseMem(lr.GetConflict(), usedMemOpnd, lr.GetSpillSize(), lr.GetRegType()); #endif /* CONSISTENT_MEMOPNDi */ } MemOperand *GraphColorRegAllocator::GetSpillMem(uint32 vregNO, uint32 spillSize, bool isDest, Insn &insn, regno_t regNO, bool &isOutOfRange) { MemOperand *memOpnd = cgFunc->GetOrCreatSpillMem(vregNO, spillSize); - if (cgFunc->GetCG()->IsLmbc() && cgFunc->GetSpSaveReg()) { + if (cgFunc->GetCG()->IsLmbc() && cgFunc->GetSpSaveReg() != 0) { LiveRange *lr = lrMap[cgFunc->GetSpSaveReg()]; RegOperand *baseReg = nullptr; if (lr == nullptr) { @@ -2751,14 +2520,16 @@ void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand & uint32 regNO = regOpnd.GetRegisterNumber(); LiveRange *lr = lrMap[regNO]; - MemOperand *spillMem = CreateSpillMem(spillIdx, lr->GetSpillSize(), kSpillMemPre); + MemOperand *spillMem = CreateSpillMem(spillIdx, regOpnd.GetRegisterType(), kSpillMemPre); ASSERT(spillMem != nullptr, "spillMem nullptr check"); - PrimType stype = GetPrimTypeFromRegTyAndRegSize(regOpnd.GetRegisterType(), regOpnd.GetSize()); + // for int, must str 64-bit; for float, must str 128-bit + uint32 strSize = (regOpnd.GetRegisterType() == kRegTyInt) ? k64BitSize : k128BitSize; + PrimType stype = GetPrimTypeFromRegTyAndRegSize(regOpnd.GetRegisterType(), strSize); bool isOutOfRange = false; spillMem = regInfo->AdjustMemOperandIfOffsetOutOfRange(spillMem, regOpnd.GetRegisterNumber(), false, insn, regInfo->GetReservedSpillReg(), isOutOfRange); - Insn &stInsn = *regInfo->BuildStrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + Insn &stInsn = *regInfo->BuildStrInsn(strSize, stype, phyOpnd, *spillMem); std::string comment = " SPILL for spill vreg: " + std::to_string(regNO) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); stInsn.SetComment(comment); @@ -2800,10 +2571,12 @@ void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand return; } - MemOperand *spillMem = CreateSpillMem(spillIdx, lr->GetSpillSize(), kSpillMemPost); + MemOperand *spillMem = CreateSpillMem(spillIdx, regOpnd.GetRegisterType(), kSpillMemPost); ASSERT(spillMem != nullptr, "spillMem nullptr check"); - PrimType stype = GetPrimTypeFromRegTyAndRegSize(regOpnd.GetRegisterType(), regOpnd.GetSize()); + // for int, must ldr 64-bit; for float, must ldr 128-bit + uint32 ldrSize = (regOpnd.GetRegisterType() == kRegTyInt) ? k64BitSize : k128BitSize; + PrimType stype = GetPrimTypeFromRegTyAndRegSize(regOpnd.GetRegisterType(), ldrSize); bool isOutOfRange = false; Insn *nextInsn = insn.GetNextMachineInsn(); spillMem = regInfo->AdjustMemOperandIfOffsetOutOfRange(spillMem, regOpnd.GetRegisterNumber(), @@ -2812,12 +2585,12 @@ void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand " op:" + kOpcodeInfo.GetName(lr->GetOp()); if (isLastInsn) { for (auto tgtBB : insn.GetBB()->GetSuccs()) { - Insn *newLd = regInfo->BuildLdrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + Insn *newLd = regInfo->BuildLdrInsn(ldrSize, stype, phyOpnd, *spillMem); newLd->SetComment(comment); tgtBB->InsertInsnBegin(*newLd); } } else { - Insn *ldrInsn = regInfo->BuildLdrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + Insn *ldrInsn = regInfo->BuildLdrInsn(ldrSize, stype, phyOpnd, *spillMem); ldrInsn->SetComment(comment); if (isOutOfRange) { if (nextInsn == nullptr) { @@ -2896,7 +2669,7 @@ Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool } LiveRange *lr = lrMap[regNO]; bool isForCallerSave = lr->GetSplitLr() == nullptr && (lr->GetNumCall() > 0) && !isCalleeReg; - uint32 regSize = regOpnd.GetSize(); + uint32 regSize = lr->GetSpillSize(); PrimType stype = GetPrimTypeFromRegTyAndRegSize(lr->GetRegType(), regSize); bool isOutOfRange = false; if (isDef) { @@ -2993,21 +2766,20 @@ void GraphColorRegAllocator::CollectCannotUseReg(std::unordered_set &ca (void)cannotUseReg.insert(regNO); } } - auto updateCannotUse = [&insn, &cannotUseReg, this](regno_t regNO) { + for (auto regNO : lr.GetConflict()) { LiveRange *conflictLr = lrMap[regNO]; /* * conflictLr->GetAssignedRegNO() might be zero * caller save will be inserted so the assigned reg can be released actually */ - if ((conflictLr->GetAssignedRegNO() > 0) && IsBitArrElemSet(conflictLr->GetBBMember(), insn.GetBB()->GetId())) { + if ((conflictLr->GetAssignedRegNO() > 0) && conflictLr->GetBBMember(insn.GetBB()->GetId())) { if (!regInfo->IsCalleeSavedReg(conflictLr->GetAssignedRegNO()) && (conflictLr->GetNumCall() > 0) && !conflictLr->GetProcessed()) { - return; + continue; } (void)cannotUseReg.insert(conflictLr->GetAssignedRegNO()); } - }; - ForEachRegArrElem(lr.GetBBConflict(), updateCannotUse); + } #ifdef USE_LRA if (!doLRA) { return; @@ -3239,56 +3011,6 @@ bool GraphColorRegAllocator::FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool return insn.GetBB()->GetPreds().size() == 0 ? false : true; } -// find next def before next call ? and no next use -bool GraphColorRegAllocator::EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap) { - if (lr.FindInLuMap(succ.GetId()) != lr.EndOfLuMap()) { - LiveUnit *lu = lr.GetLiveUnitFromLuMap(succ.GetId()); - bool findNextDef = false; - if ((lu->GetDefNum() > 0) || lu->HasCall()) { - MapleMap refs = lr.GetRefs(succ.GetId()); - for (auto it = refs.begin(); it != refs.end(); ++it) { - if ((it->second & kIsDef) != 0) { - findNextDef = true; - break; - } - if ((it->second & kIsCall) != 0) { - break; - } - if ((it->second & kIsUse) != 0) { - continue; - } - } - return findNextDef; - } - if (lu->HasCall()) { - return false; - } - } - visitedMap[succ.GetId()] = true; - bool found = true; - for (auto succBB: succ.GetSuccs()) { - if (!visitedMap[succBB->GetId()]) { - found = EncountNextRef(*succBB, lr, isDef, visitedMap) && found; - if (!found) { - return false; - } - } - } - return found; -} - -bool GraphColorRegAllocator::FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef) { - bool haveFind = true; - std::vector visitedMap(bbVec.size() + 1, false); - for (auto succ: insn.GetBB()->GetSuccs()) { - haveFind = EncountNextRef(*succ, lr, isDef, visitedMap) && haveFind; - if (!haveFind) { - return false; - } - } - return insn.GetBB()->GetSuccs().size() > 0; -} - bool GraphColorRegAllocator::HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const { LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); bool findPrevRef = false; @@ -3388,7 +3110,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &op bool isSplitPart = false; bool needSpillLr = false; - if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + if (lr->GetSplitLr() && lr->GetSplitLr()->GetBBMember(insn.GetBB()->GetId())) { isSplitPart = true; } @@ -3396,7 +3118,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &op needSpillLr = GetSpillReg(insn, *lr, spillIdx, usedRegMask, isDef); } - regno_t regNO; + regno_t regNO = 0; if (isSplitPart) { regNO = lr->GetSplitLr()->GetAssignedRegNO(); } else { @@ -3474,7 +3196,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceUseDefOpnd(Insn &insn, const Opera bool isSplitPart = false; bool needSpillLr = false; - if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + if (lr->GetSplitLr() && lr->GetSplitLr()->GetBBMember(insn.GetBB()->GetId())) { isSplitPart = true; } @@ -3529,7 +3251,7 @@ RegOperand *GraphColorRegAllocator::GetReplaceUseDefOpnd(Insn &insn, const Opera return &phyOpnd; } -void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask) { +void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask) const { auto ®Opnd = static_cast(opnd); uint32 vregNO = regOpnd.GetRegisterNumber(); LiveRange *lr = GetLiveRange(vregNO); @@ -3679,8 +3401,8 @@ void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) { } } } else if (opnd->IsRegister()) { - bool isDef = md->GetOpndDes(static_cast(opndIdx))->IsRegDef(); - bool isUse = md->GetOpndDes(static_cast(opndIdx))->IsRegUse(); + bool isDef = md->GetOpndDes(opndIdx)->IsRegDef(); + bool isUse = md->GetOpndDes(opndIdx)->IsRegUse(); RegOperand *ropnd = static_cast(opnd); if (regInfo->IsUnconcernedReg(*ropnd)) { continue; @@ -3781,7 +3503,8 @@ RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, RegType rtype = lr->GetRegType(); spreg = lr->GetSpillReg(); ASSERT(lr->GetSpillReg() != 0, "no reg in CreateSpillFillCode"); - RegOperand *regopnd = &cgFunc->GetOpndBuilder()->CreatePReg(spreg, opnd.GetSize(), rtype); + uint32 regSize = lr->GetSpillSize(); + RegOperand *regopnd = &cgFunc->GetOpndBuilder()->CreatePReg(spreg, regSize, rtype); if (lr->GetRematLevel() != kRematOff) { if (isdef) { @@ -3801,12 +3524,6 @@ RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, Insn *nextInsn = insn.GetNextMachineInsn(); MemOperand *loadmem = GetSpillOrReuseMem(*lr, isOutOfRange, insn, isdef); - uint32 regSize = opnd.GetSize(); - if (cgFunc->IsExtendReg(vregno) && isdef) { - // ExtendReg is def32 and use64, so def point needs str 64bit - regSize = k64BitSize; - } - PrimType primType = GetPrimTypeFromRegTyAndRegSize(lr->GetRegType(), regSize); CHECK_FATAL(spillCnt < kSpillMemOpndNum, "spill count exceeded"); Insn *memInsn; @@ -4318,8 +4035,7 @@ void CallerSavePre::ComputeVarAndDfPhis() { void CallerSavePre::BuildWorkList() { size_t numBBs = dom->GetDtPreOrderSize(); std::vector callSaveLrs; - for (auto it: regAllocator->GetLrMap()) { - LiveRange *lr = it.second; + for (auto [_, lr] : regAllocator->GetLrMap()) { if (lr == nullptr || lr->IsSpilled()) { continue; } @@ -4350,17 +4066,11 @@ void CallerSavePre::BuildWorkList() { } if ((it->second & kIsCall) > 0) { Insn *callInsn = insnMap[it->first]; - auto *targetOpnd = callInsn->GetCallTargetOperand(); - if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { - FuncNameOperand *target = static_cast(targetOpnd); - const MIRSymbol *funcSt = target->GetFunctionSymbol(); - ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); - MIRFunction *mirFunc = funcSt->GetFunction(); - if (mirFunc != nullptr && mirFunc->IsReferedRegsValid()) { - auto regSet = mirFunc->GetReferedRegs(); - if (regSet.find(lr->GetAssignedRegNO()) == regSet.end()) { - continue; - } + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + func->GetRealCallerSaveRegs(*callInsn, callerSaveRegs); + if (callerSaveRegs.find(lr->GetAssignedRegNO()) == callerSaveRegs.end()) { + continue; } } (void) CreateRealOcc(*callInsn, opnd, kOccStore); @@ -4650,8 +4360,7 @@ void GraphColorRegAllocator::FinalizeRegisters() { if (doMultiPass && hasSpill) { if (GCRA_DUMP) { LogInfo::MapleLogger() << "In this round, spill vregs : \n"; - for (auto &it: std::as_const(lrMap)) { - LiveRange *lr = it.second; + for (auto [_, lr]: std::as_const(lrMap)) { if (lr->IsSpilled()) { LogInfo::MapleLogger() << "R" << lr->GetRegNO() << " "; } @@ -4820,7 +4529,7 @@ bool GraphColorRegAllocator::AllocateRegisters() { */ regInfo->Fini(); -#if DEBUG +#if defined(DEBUG) && DEBUG int32 cnt = 0; FOR_ALL_BB(bb, cgFunc) { FOR_BB_INSNS(insn, bb) { @@ -4835,11 +4544,11 @@ bool GraphColorRegAllocator::AllocateRegisters() { bfs = &localBfs; bfs->ComputeBlockOrder(); - ComputeLiveRanges(); + ComputeLiveRangesAndConflict(); InitFreeRegPool(); - BuildInterferenceGraph(); + CalculatePriority(); Separate(); diff --git a/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp b/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp index 50b371bbb2c67c2b90820710f4416b9351d3a3ad..5b0a8e018773456c2ec345b8bde72414ef47555c 100644 --- a/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp +++ b/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp @@ -579,7 +579,7 @@ void LSRALinearScanRegAllocator::SetupLiveInterval(Operand &opnd, Insn &insn, bo */ void LSRALinearScanRegAllocator::LiveInterval::AddRange(uint32 from, uint32 to) { if (ranges.empty()) { - ranges.push_back(std::pair(from, to)); + ranges.emplace_back(std::pair(from, to)); } else { if (to < ranges.front().first) { (void)ranges.insert(ranges.cbegin(), std::pair(from, to)); @@ -881,7 +881,7 @@ void LSRALinearScanRegAllocator::ComputeLiveIntervalForEachOperand(Insn &insn) { */ for (int32 i = static_cast(opndNum - 1); i >= 0; --i) { Operand &opnd = insn.GetOperand(static_cast(i)); - const OpndDesc *opndDesc = md->GetOpndDes(i); + const OpndDesc *opndDesc = md->GetOpndDes(static_cast(i)); ASSERT(opndDesc != nullptr, "ptr null check."); if (opnd.IsList()) { auto &listOpnd = static_cast(opnd); @@ -980,11 +980,9 @@ void LSRALinearScanRegAllocator::ComputeLiveInterval() { static_cast(li->GetFirstDef() - li->GetLastUse())); } } - if (LSRA_DUMP) { PrintLiveIntervals(); } - } /* A physical register is freed at the end of the live interval. Return to pool. */ @@ -1514,7 +1512,7 @@ RegOperand *LSRALinearScanRegAllocator::AssignPhysRegs(Operand &opnd, const Insn } if (LSRA_DUMP) { - uint32 activeSz = active.size(); + size_t activeSz = active.size(); LogInfo::MapleLogger() << "\tAssignPhysRegs-active_sz " << activeSz << "\n"; } @@ -1733,7 +1731,7 @@ RegOperand *LSRALinearScanRegAllocator::HandleSpillForInsn(const Insn &insn, Ope return newOpnd; } -bool LSRALinearScanRegAllocator::OpndNeedAllocation(const Insn &insn, Operand &opnd, bool isDef, uint32 insnNum) { +bool LSRALinearScanRegAllocator::OpndNeedAllocation(Operand &opnd, bool isDef, uint32 insnNum) { if (!opnd.IsRegister()) { return false; } @@ -1933,7 +1931,7 @@ void LSRALinearScanRegAllocator::LiveIntervalAnalysis() { } /* 2 get interfere info, and analysis */ - uint32 interNum = active.size(); + size_t interNum = active.size(); if (LSRA_DUMP) { LogInfo::MapleLogger() << "In insn " << insn->GetId() << ", " << interNum << " overlap live intervals.\n"; LogInfo::MapleLogger() << "\n"; @@ -1997,7 +1995,7 @@ void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { if (opnd.IsList()) { auto &listOpnd = static_cast(opnd); for (auto op : listOpnd.GetOperands()) { - if (!OpndNeedAllocation(insn, *op, isDef, insn.GetId())) { + if (!OpndNeedAllocation(*op, isDef, insn.GetId())) { continue; } if (isDef && !fastAlloc) { @@ -2030,7 +2028,7 @@ void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { Operand *offset = memOpnd.GetIndexRegister(); isDef = false; if (base != nullptr) { - if (OpndNeedAllocation(insn, *base, isDef, insn.GetId())) { + if (OpndNeedAllocation(*base, isDef, insn.GetId())) { newOpnd = AssignPhysRegs(*base, insn); if (newOpnd == nullptr) { SetOperandSpill(*base); @@ -2039,7 +2037,7 @@ void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { } } if (offset != nullptr) { - if (!OpndNeedAllocation(insn, *offset, isDef, insn.GetId())) { + if (!OpndNeedAllocation(*offset, isDef, insn.GetId())) { continue; } newOpnd = AssignPhysRegs(*offset, insn); @@ -2048,7 +2046,7 @@ void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { } } } else { - if (!OpndNeedAllocation(insn, opnd, isDef, insn.GetId())) { + if (!OpndNeedAllocation(opnd, isDef, insn.GetId())) { continue; } if (isDef && !fastAlloc) { @@ -2444,7 +2442,7 @@ bool LSRALinearScanRegAllocator::AllocateRegisters() { (spillCount + callerSaveSpillCount) << " SPILL\n"; LogInfo::MapleLogger() << "Total " << "(" << reloadCount << "+ " << callerSaveReloadCount << ") = " << (reloadCount + callerSaveReloadCount) << " RELOAD\n"; - uint32_t insertInsn = spillCount + callerSaveSpillCount + reloadCount + callerSaveReloadCount; + uint64 insertInsn = spillCount + callerSaveSpillCount + reloadCount + callerSaveReloadCount; float rate = (float(insertInsn) / float(insnNumBeforRA)); LogInfo::MapleLogger() <<"insn Num Befor RA:"<< insnNumBeforRA <<", insert " << insertInsn << " insns: " << ", insertInsn/insnNumBeforRA: "<< rate <<"\n"; diff --git a/src/mapleall/maple_be/src/cg/regsaves.cpp b/src/mapleall/maple_be/src/cg/regsaves.cpp index c3a7a98ceeef177e596e34277595b234811b70b4..9bbb3225875efc44b30dfb4a50d26603127ed0ba 100644 --- a/src/mapleall/maple_be/src/cg/regsaves.cpp +++ b/src/mapleall/maple_be/src/cg/regsaves.cpp @@ -57,7 +57,7 @@ bool CgRegSavesOpt::PhaseRun(maplebe::CGFunc &f) { RegSavesOpt *regSavesOpt = nullptr; #if TARGAARCH64 regSavesOpt = memPool->New(f, *memPool, *dom, *pdom); -#elif || TARGRISCV64 +#elif defined(TARGRISCV64) || TARGRISCV64 regSavesOpt = memPool->New(f, *memPool); #endif diff --git a/src/mapleall/maple_be/src/cg/rematerialize.cpp b/src/mapleall/maple_be/src/cg/rematerialize.cpp index ec72b5a18e8d8902053058be924efb3a4c62835b..b005c2162fa285d2da68c79a4ee6d33a9ff4159d 100644 --- a/src/mapleall/maple_be/src/cg/rematerialize.cpp +++ b/src/mapleall/maple_be/src/cg/rematerialize.cpp @@ -27,9 +27,8 @@ bool Rematerializer::IsRematerializableForAddrof(CGFunc &cgFunc, const LiveRange } /* cost too much to remat */ if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && - ((fieldID != 0) || - (cgFunc.GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > - k16ByteSize))) { + ((fieldID != 0) || (GlobalTables::GetTypeTable().GetTypeFromTyIdx( + symbol->GetType()->GetTypeIndex().GetIdx())->GetSize() > k16ByteSize))) { return false; } if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || @@ -70,7 +69,7 @@ bool Rematerializer::IsRematerializableForDread(CGFunc &cgFunc, RematLevel remat if (fieldID != 0) { ASSERT(symbol->GetType()->IsMIRStructType(), "non-zero fieldID for non-structure"); MIRStructType *structType = static_cast(symbol->GetType()); - offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + offset = structType->GetFieldOffsetFromBaseAddr(fieldID).byteOffset; } if (rematLev < kRematDreadGlobal && !symbol->IsLocal()) { return false; @@ -127,7 +126,7 @@ std::vector Rematerializer::Rematerialize(CGFunc &cgFunc, RegOperand ® ASSERT(symbol->GetType()->IsMIRStructType(), "non-zero fieldID for non-structure"); MIRStructType *structType = static_cast(symbol->GetType()); symType = structType->GetFieldType(fieldID)->GetPrimType(); - offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + offset = structType->GetFieldOffsetFromBaseAddr(fieldID).byteOffset; } return RematerializeForDread(cgFunc, regOp, offset, symType); } @@ -138,7 +137,7 @@ std::vector Rematerializer::Rematerialize(CGFunc &cgFunc, RegOperand ® ASSERT(symbol->GetType()->IsMIRStructType() || symbol->GetType()->IsMIRUnionType(), "non-zero fieldID for non-structure"); MIRStructType *structType = static_cast(symbol->GetType()); - offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + offset = structType->GetFieldOffsetFromBaseAddr(fieldID).byteOffset; } return RematerializeForAddrof(cgFunc, regOp, offset); } diff --git a/src/mapleall/maple_be/src/cg/schedule.cpp b/src/mapleall/maple_be/src/cg/schedule.cpp index 788c846cc7b841e7856e4ebb56452c04ec55f2e8..0fe5cbff3101c50306c50564ecd592dd44818f3d 100644 --- a/src/mapleall/maple_be/src/cg/schedule.cpp +++ b/src/mapleall/maple_be/src/cg/schedule.cpp @@ -405,7 +405,7 @@ void RegPressureSchedule::UpdateReadyList(const DepNode &node) { if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { continue; } - succNode.DescreaseValidPredsSize(); + succNode.DecreaseValidPredsSize(); if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { readyList.emplace_back(&succNode); succNode.SetState(kReady); @@ -430,7 +430,7 @@ void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector< if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { continue; } - succNode.DescreaseValidPredsSize(); + succNode.DecreaseValidPredsSize(); if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { readyList.emplace_back(&succNode); succNode.SetState(kReady); diff --git a/src/mapleall/maple_be/src/cg/tailcall.cpp b/src/mapleall/maple_be/src/cg/tailcall.cpp index ec457c7bf55a2ad91005003aa2b2850cae980506..8f55841357deeda47e5d9e5bcb34fee6a17e696e 100644 --- a/src/mapleall/maple_be/src/cg/tailcall.cpp +++ b/src/mapleall/maple_be/src/cg/tailcall.cpp @@ -19,32 +19,11 @@ namespace maplebe { using namespace maple; -/* tailcallopt cannot be used if stack address of this function is taken and passed, - not checking the passing for now, just taken */ -bool TailCallOpt::IsStackAddrTaken() { - FOR_ALL_BB(bb, &cgFunc) { - FOR_BB_INSNS_REV(insn, bb) { - if (!IsAddOrSubOp(insn->GetMachineOpcode())) { - continue; - } - for (uint32 i = 0; i < insn->GetOperandSize(); i++) { - if (insn->GetOperand(i).IsRegister()) { - RegOperand ® = static_cast(insn->GetOperand(i)); - if (OpndIsStackRelatedReg(reg)) { - return true; - } - } - } - } - } - return false; -} - /* * Remove redundant mov and mark optimizable bl/blr insn in the BB. * Return value: true to call this modified block again. */ -bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const { +bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns) const { Insn *lastInsn = bb.GetLastInsn(); if (bb.NumInsn() == 1 && lastInsn->IsMachineInstruction() && !AArch64isa::IsPseudoInstruction(lastInsn->GetMachineOpcode()) && !InsnIsCallCand(*bb.GetLastInsn())) { @@ -72,7 +51,7 @@ bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &e } bb.RemoveInsn(*insn); continue; - } else if (InsnIsIndirectCall(*insn)) { + } else if (InsnIsIndirectCall(*insn) && insn->GetMayTailCall()) { if (insn->GetOperand(0).IsRegister()) { RegOperand ® = static_cast(insn->GetOperand(0)); if (OpndIsCalleeSaveReg(reg)) { @@ -81,15 +60,11 @@ bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &e } (void)callInsns.insert(insn); return false; - } else if (InsnIsCall(*insn)) { + } else if (InsnIsCall(*insn) && insn->GetMayTailCall()) { (void)callInsns.insert(insn); return false; } else if (InsnIsUncondJump(*insn)) { - LabelOperand &bLab = static_cast(insn->GetOperand(0)); - if (exitBB.GetLabIdx() == bLab.GetLabelIndex()) { - continue; - } - return false; + continue; } else { return false; } @@ -98,20 +73,20 @@ bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &e } /* Recursively invoke this function for all predecessors of exitBB */ -void TailCallOpt::TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB) { +void TailCallOpt::TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB) { /* callsite also in the return block as in "if () return; else foo();" call in the exit block */ - if (!bb.IsEmpty() && !OptimizeTailBB(bb, callInsns, exitBB)) { + if (!bb.IsEmpty() && !OptimizeTailBB(bb, callInsns)) { return; } for (auto tmpBB : bb.GetPreds()) { - if (tmpBB->GetSuccs().size() != 1 || !tmpBB->GetEhSuccs().empty() || + if (tmpBB->GetId() == bb.GetId() || tmpBB->GetSuccs().size() != 1 || !tmpBB->GetEhSuccs().empty() || (tmpBB->GetKind() != BB::kBBFallthru && tmpBB->GetKind() != BB::kBBGoto)) { continue; } - if (OptimizeTailBB(*tmpBB, callInsns, exitBB)) { + if (OptimizeTailBB(*tmpBB, callInsns)) { TailCallBBOpt(*tmpBB, callInsns, exitBB); } } @@ -159,7 +134,7 @@ bool TailCallOpt::DoTailCallOpt() { if (cgFunc.GetCleanupBB() != nullptr && cgFunc.GetCleanupBB()->GetPrev() != nullptr) { exitBB = cgFunc.GetCleanupBB()->GetPrev(); } else { - exitBB = cgFunc.GetLastBB()->GetPrev(); + exitBB = cgFunc.GetLastBB(); } } else { exitBB = cgFunc.GetExitBBsVec().front(); @@ -167,11 +142,11 @@ bool TailCallOpt::DoTailCallOpt() { uint32 i = 1; size_t optCount = 0; do { - MapleSet callInsns(tmpAlloc.Adapter()); + MapleSet callInsns(tmpAlloc.Adapter()); TailCallBBOpt(*exitBB, callInsns, *exitBB); if (callInsns.size() != 0) { optCount += callInsns.size(); - (void)exitBB2CallSitesMap.emplace(exitBB, callInsns); + exitBB2CallSitesMap.emplace(exitBB, callInsns); } if (i < exitBBSize) { exitBB = cgFunc.GetExitBBsVec()[i]; @@ -185,15 +160,9 @@ bool TailCallOpt::DoTailCallOpt() { return nCount == optCount; } -void TailCallOpt::ConvertToTailCalls(MapleSet &callInsnsMap) { +void TailCallOpt::ConvertToTailCalls(MapleSet &callInsnsMap) { BB *exitBB = GetCurTailcallExitBB(); - /* ExitBB is filled only by now. If exitBB has restore of SP indicating extra stack space has - been allocated, such as a function call with more than 8 args, argument with large aggr etc */ - int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); - if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { - return; - } FOR_BB_INSNS(insn, exitBB) { if (InsnIsAddWithRsp(*insn)) { return; @@ -222,10 +191,9 @@ void TailCallOpt::ConvertToTailCalls(MapleSet &callInsnsMap) { !CGCFG::InSwitchTable(sBB->GetLabIdx(), cgFunc)) { auto it = std::find(cgFunc.GetExitBBsVec().begin(), cgFunc.GetExitBBsVec().end(), sBB); CHECK_FATAL(it != cgFunc.GetExitBBsVec().end(), "find unuse exit failed"); - cgFunc.EraseExitBBsVec(it); + (void)cgFunc.EraseExitBBsVec(it); cgFunc.GetTheCFG()->RemoveBB(*sBB); } - break; } } } @@ -234,7 +202,7 @@ void TailCallOpt::TideExitBB() { cgFunc.GetTheCFG()->UnreachCodeAnalysis(); std::vector realRets; for (auto *exitBB : cgFunc.GetExitBBsVec()) { - if (!exitBB->GetPreds().empty()) { + if (!exitBB->GetPreds().empty() || exitBB == cgFunc.GetFirstBB()) { (void)realRets.emplace_back(exitBB); } } @@ -246,14 +214,14 @@ void TailCallOpt::TideExitBB() { void TailCallOpt::Run() { stackProtect = cgFunc.GetNeedStackProtect(); - if (CGOptions::DoTailCallOpt() && !IsStackAddrTaken() && !stackProtect) { + if (CGOptions::DoTailCallOpt()) { (void)DoTailCallOpt(); // return value == "no call instr/only or 1 tailcall" } if (cgFunc.GetMirModule().IsCModule() && !exitBB2CallSitesMap.empty()) { cgFunc.GetTheCFG()->InitInsnVisitor(cgFunc); for (auto pair : exitBB2CallSitesMap) { BB *curExitBB = pair.first; - MapleSet& callInsnsMap = pair.second; + MapleSet& callInsnsMap = pair.second; SetCurTailcallExitBB(curExitBB); ConvertToTailCalls(callInsnsMap); } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp index 755fefaf4adda93c8e032615d4e0b36f4a542ef1..4bfcd268a89a7fb60fda51e54becace65265e4ce 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -44,7 +44,7 @@ MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, Fie ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); MIRStructType *structType = static_cast(mirType); symType = structType->GetFieldType(fieldId)->GetPrimType(); - fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + fieldOffset = static_cast(structType->GetFieldOffsetFromBaseAddr(fieldId).byteOffset); } uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); @@ -295,7 +295,7 @@ void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { if (symbol.GetStorageClass() == kScFormal && - cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol.GetTyIdx().GetIdx())->GetSize() > k16ByteSize) { return true; } return false; @@ -1090,7 +1090,7 @@ Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode"); - if(IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) { uint32 bitSize = GetPrimTypeBitSize(primType); /* copy dividend to eax */ RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp index e3c287b3fb2b361db7841f556f57f34bb6ecc79e..a07b9e1da7ee9429b971bf434ebbd2a927949828 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp @@ -40,7 +40,7 @@ void X64MoveRegArgs::CollectRegisterArgs(std::map &argsList, MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { TyIdx tyIdx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); - if (x64CGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize() <= k16ByteSize) { start = 1; } } @@ -77,7 +77,7 @@ ArgInfo X64MoveRegArgs::GetArgInfo(std::map &argsList, ArgInfo argInfo; argInfo.reg = argsList[argIndex]; argInfo.mirTy = x64CGFunc->GetFunction().GetNthParamType(argIndex); - argInfo.symSize = x64CGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.symSize = argInfo.mirTy->GetSize(); argInfo.memPairSecondRegSize = 0; argInfo.doMemPairOpt = false; argInfo.createTwoStores = false; @@ -266,7 +266,7 @@ void X64MoveRegArgs::MoveVRegisterArgs() { MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { TyIdx idx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); - if (x64CGFunc->GetBecommon().GetTypeSize(idx) <= k16BitSize) { + if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(idx)->GetSize() <= k16BitSize) { start = 1; } } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp index 015098686e69dd6409d0dfc6f76fb59ca43b7220..96ee659c07b4392a1f273ec0b47e05414a33ec8c 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp @@ -85,7 +85,7 @@ int32 Classification(const BECommon &be, MIRType &mirType, ArgumentClass classes * The size of each argument gets rounded up to eightbytes, * Therefore the stack will always be eightbyte aligned. */ - uint64 sizeOfTy = RoundUp(be.GetTypeSize(mirType.GetTypeIndex()), k8ByteSize); + uint64 sizeOfTy = RoundUp(mirType.GetSize(), k8ByteSize); if (sizeOfTy == 0) { return 0; } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp index b3a34fe6680b9ac055b1541c374949a51a2e2758..b7071d2b7066caef1f0f6fd05afd0ff73f893068 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp @@ -19,7 +19,7 @@ namespace maplebe { /* Initialize cfg optimization patterns */ void X64CFGOptimizer::InitOptimizePatterns() { - diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + // The SequentialJumpPattern is not supported until the interface is optimized. diffPassPatterns.emplace_back(memPool->New(*cgFunc)); diffPassPatterns.emplace_back(memPool->New(*cgFunc)); diffPassPatterns.emplace_back(memPool->New(*cgFunc)); diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp index d58b82d54848e2c8adf999d902bcb5dbb166cbc6..5152c079de835c6ae5cf6fb1632e7b5c86fd57b9 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp @@ -25,25 +25,28 @@ const InsnDesc X64CG::kMd[kMopLast] = { }; #undef DEFINE_MOP -std::array, kIntRegTypeNum> X64CG::intRegNames = { +std::array, X64CG::kIntRegTypeNum> X64CG::intRegNames = { std::array { "err", "al", "bl", "cl", "dl", "spl", "bpl", "sil", "dil", "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", - "r14b", "r15b", "err1", "errMaxRegNum" - }, std::array { + "r14b", "r15b", "err1", "errMaxRegNum"}, + std::array { "err", "ah", "bh", "ch", "dh", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", - "err10", "err11", "err12", "errMaxRegNum" - }, std::array { + "err10", "err11", "err12", "errMaxRegNum"}, + std::array { "err", "ax", "bx", "cx", "dx", "sp", "bp", "si", "di", "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", - "r14w", "r15w", "err1", "errMaxRegNum" - }, std::array { + "r14w", "r15w", "err1", "errMaxRegNum"}, + std::array { "err", "eax", "ebx", "ecx", "edx", "esp", "ebp", "esi", "edi", "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d", "err1", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", - "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum" - }, std::array { + "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum"}, + std::array { "err", "rax", "rbx", "rcx", "rdx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "rip", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", - "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum" - }, + "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum"}, + std::array { + "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", "err", + "err", "err", "err", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", + "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum"}, }; void X64CG::EnrollTargetPhases(maple::MaplePhaseManager *pm) const { diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp index cb7cf3fb6f09b5ce1d797e3081290144f7e8a0da..f33628525dfa33c8ac045b17b7ab4a0c872baf31 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -495,11 +495,6 @@ Operand *X64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimT void X64CGFunc::GenerateYieldpoint(BB &bb) { CHECK_FATAL(false, "NIY"); } -Operand &X64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { - CHECK_FATAL(false, "NIY"); - Operand *a; - return *a; -} Operand &X64CGFunc::GetOrCreateRflag() { CHECK_FATAL(false, "NIY"); Operand *a; @@ -548,7 +543,7 @@ RegOperand &X64CGFunc::GetOrCreateFramePointerRegOperand() { return *a; } RegOperand &X64CGFunc::GetOrCreateStackBaseRegOperand() { - return GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); + return GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerBitSize(), kRegTyInt); } RegOperand &X64CGFunc::GetZeroOpnd(uint32 size) { CHECK_FATAL(false, "NIY"); @@ -576,9 +571,15 @@ void X64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn void X64CGFunc::CleanupDeadMov(bool dump) { CHECK_FATAL(false, "NIY"); } + void X64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) { - CHECK_FATAL(false, "NIY"); + for (uint32 i = x64::kRinvalid; i <= x64::kMaxRegNum; ++i) { + if (x64::IsCallerSaveReg(static_cast(i))) { + realCallerSave.insert(i); + } + } } + bool X64CGFunc::IsFrameReg(const RegOperand &opnd) const { CHECK_FATAL(false, "NIY"); return false; @@ -761,7 +762,7 @@ RegOperand *X64CGFunc::GetBaseReg(const maplebe::SymbolAlloc &symAlloc) { ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), "NIY"); if (sgKind == kMsLocals || sgKind == kMsArgsRegPassed || sgKind == kMsArgsStkPassed) { - return &GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); + return &GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerBitSize(), kRegTyInt); } else { CHECK_FATAL(false, "NIY sgKind"); } @@ -820,7 +821,7 @@ MemOperand *X64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) { } RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); - int32 offset = GetOrCreatSpillRegLocation(vrNum); + int32 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte); MemOperand *memOpnd = &GetOpndBuilder()->CreateMem(baseOpnd, offset, memBitSize); spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); return memOpnd; @@ -829,20 +830,18 @@ MemOperand *X64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) { } } +RegOperand *X64CGFunc::SelectIntrinsicOpLoadTlsAnchor(const IntrinsicopNode& intrinsicopNode, + const BaseNode &parent) { + CHECK_FATAL_FALSE("Tls anchor not supported in x86_64 yet"); + return nullptr; +} + void X64OpndDumpVisitor::Visit(maplebe::RegOperand *v) { DumpOpndPrefix(); LogInfo::MapleLogger() << "reg "; DumpRegInfo(*v); DumpSize(*v); - const OpndDesc *regDesc = GetOpndDesc(); - LogInfo::MapleLogger() << " ["; - if (regDesc->IsRegDef()) { - LogInfo::MapleLogger() << "DEF "; - } - if (regDesc->IsRegUse()) { - LogInfo::MapleLogger() << "USE"; - } - LogInfo::MapleLogger() << "]"; + DumpOpndDesc(); DumpOpndSuffix(); } @@ -878,9 +877,10 @@ void X64OpndDumpVisitor::DumpRegInfo(maplebe::RegOperand &v) { if (v.GetRegisterNumber() > kBaseVirtualRegNO) { LogInfo::MapleLogger() << "V" << v.GetRegisterNumber(); } else { - bool r32 = (v.GetSize() == k32BitSize); + uint32 regType = (v.GetSize() == k32BitSize) ? X64CG::kR32List : X64CG::kR64List; LogInfo::MapleLogger() << "%" - << X64CG::intRegNames[(r32 ? X64CG::kR32List : X64CG::kR64List)][v.GetRegisterNumber()]; + << X64CG::intRegNames[regType][v.GetRegisterNumber()] + << " R" << v.GetRegisterNumber(); } } @@ -898,10 +898,12 @@ void X64OpndDumpVisitor::Visit(maplebe::ListOperand *v) { MapleList opndList = v->GetOperands(); for (auto it = opndList.begin(); it != opndList.end();) { - (*it)->Dump(); - LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + LogInfo::MapleLogger() << "reg "; + DumpRegInfo(*(*it)); + DumpSize(*(*it)); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : ", "); } - DumpSize(*v); + DumpOpndDesc(); DumpOpndSuffix(); } diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp index 6b2133baf7daa03733e2ab4793c68d499902048a..7fec34a27a30aa06e173a50f50c28d2b728f17f5 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp @@ -47,7 +47,7 @@ void X64Emitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string } } -void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { +void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v, uint32 regSize) { ASSERT(v->IsRegister(), "NIY"); /* check legality of register operand: reg no. should not be larger than 100 or equal to 0 */ ASSERT(v->IsPhysicalRegister(), "register is still virtual"); @@ -55,7 +55,7 @@ void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { /* Mapping with physical register after register allocation is done * try table-driven register mapping ? */ uint8 regType = -1; - switch (v->GetSize()) { + switch (regSize) { case k8BitSize: regType = v->IsHigh8Bit() ? X64CG::kR8HighList : X64CG::kR8LowList; break; @@ -68,6 +68,9 @@ void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { case k64BitSize: regType = X64CG::kR64List; break; + case k128BitSize: + regType = X64CG::kR128List; + break; default: CHECK_FATAL(false, "unkown reg size"); break; @@ -75,6 +78,10 @@ void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { emitter.Emit("%").Emit(X64CG::intRegNames[regType][v->GetRegisterNumber()]); } +void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { + Visit(v, opndProp->GetSize()); +} + void X64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { ASSERT(v->IsImmediate(), "NIY"); emitter.Emit("$"); @@ -115,12 +122,12 @@ void X64OpndEmitVisitor::Visit(maplebe::MemOperand *v) { /* Emit RBP or EBP only when index register doesn't exist */ if ((v->GetIndexRegister() != nullptr && v->GetBaseRegister()->GetRegisterNumber() != x64::RBP) || v->GetIndexRegister() == nullptr) { - Visit(v->GetBaseRegister()); + Visit(v->GetBaseRegister(), GetPointerBitSize()); } } if (v->GetIndexRegister() != nullptr) { emitter.Emit(", "); - Visit(v->GetIndexRegister()); + Visit(v->GetIndexRegister(), GetPointerBitSize()); emitter.Emit(", ").Emit(v->GetScaleOperand()->GetValue()); } emitter.Emit(")"); @@ -201,7 +208,7 @@ void DumpTargetASM(Emitter &emitter, Insn &insn) { for (int i = 0; i < size; i++) { Operand *opnd = &insn.GetOperand(i); - X64OpndEmitVisitor visitor(emitter); + X64OpndEmitVisitor visitor(emitter, curMd.GetOpndDes(i)); opnd->Accept(visitor); if (i != size - 1) { emitter.Emit(",\t"); diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp index 2bd58bdc9a39a902bc5a84de6fe2f712d454bdfd..ed8fac9485ef406c6260a0dc0474da49a7945078 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp @@ -82,8 +82,8 @@ uint32 X64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 } void X64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { - align = be.GetTypeAlign(typeIdx); - size = static_cast(be.GetTypeSize(typeIdx)); + align = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx)->GetAlign(); + size = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx)->GetSize(); } void X64MemLayout::LayoutVarargParams() { @@ -97,7 +97,7 @@ void X64MemLayout::LayoutVarargParams() { if (i == 0) { if (be.HasFuncReturnType(*func)) { TyIdx tidx = be.GetFuncReturnType(*func); - if (be.GetTypeSize(tidx.GetIdx()) <= k16ByteSize) { + if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(tidx.GetIdx())->GetSize() <= k16ByteSize) { continue; } } @@ -170,7 +170,8 @@ void X64MemLayout::LayoutFormalParams() { if (!sym->IsPreg()) { SetSizeAlignForTypeIdx(ptyIdx, size, align); symLoc->SetMemSegment(GetSegArgsRegPassed()); - if (ty->GetPrimType() == PTY_agg && be.GetTypeSize(ptyIdx) > k4ByteSize) { + if (ty->GetPrimType() == PTY_agg && + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx)->GetSize() > k4ByteSize) { /* struct param aligned on 8 byte boundary unless it is small enough */ align = GetPointerSize(); } @@ -204,14 +205,14 @@ void X64MemLayout::LayoutLocalVariables() { symLoc->SetMemSegment(segLocals); MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); - uint32 align = be.GetTypeAlign(tyIdx); + uint32 align = ty->GetAlign(); if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); } else { segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); } symLoc->SetOffset(segLocals.GetSize()); - segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + segLocals.SetSize(segLocals.GetSize() + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->GetSize()); } } @@ -228,22 +229,11 @@ void X64MemLayout::AssignSpillLocationsToPseudoRegisters() { segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); symLoc->SetOffset(segLocals.GetSize()); MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; - segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + segLocals.SetSize(segLocals.GetSize() + mirTy->GetSize()); spillLocTable[i] = symLoc; } } -SymbolAlloc *X64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { - X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); - symLoc->SetMemSegment(segSpillReg); - uint32 regSize = GetPointerSize(); - segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); - symLoc->SetOffset(segSpillReg.GetSize()); - segSpillReg.SetSize(segSpillReg.GetSize() + regSize); - SetSpillRegLocInfo(vrNum, *symLoc); - return symLoc; -} - void X64MemLayout::LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize) { segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp index 170a4dc93e68d1b8edea23d2fe740b5498ab128e..975ef4de6f10c03289db6fe67f43819166258f38 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp @@ -173,4 +173,10 @@ BB *X64InsnVisitor::CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetF (void)isTargetFallthru; return nullptr; } + +void X64InsnVisitor::ModifyFathruBBToGotoBB(BB &bb, LabelIdx labelIdx) const { + ASSERT(false, "not implement in X86_64"); + (void)bb; + return; +} } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp index 664af1029e7bc2456b70156aa2b5757bc7165a8d..54214d210853f5db82891c9cf3278de3cda051e1 100644 --- a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -141,7 +141,7 @@ void X64Standardize::StdzFloatingNeg(Insn &insn) { uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; // mov dest -> tmpOperand0 - MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; + MOperator movOp = (mOp == abstract::MOP_neg_f_32) ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; RegOperand *tmpOperand0 = &GetCgFunc()->GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); Insn &movInsn0 = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); Operand &dest = insn.GetOperand(kInsnFirstOpnd); @@ -170,6 +170,7 @@ void X64Standardize::StdzFloatingNeg(Insn &insn) { } // mov tmpOperand0 -> dest + movOp = (mOp == abstract::MOP_neg_f_32) ? x64::MOP_movd_r_fr : x64::MOP_movq_r_fr; Insn &movq = GetCgFunc()->GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); insn.GetBB()->InsertInsnBefore(insn, movq); diff --git a/src/mapleall/maple_driver/include/compiler.h b/src/mapleall/maple_driver/include/compiler.h index cd44d9d7d75e06f21ac22817cad9a37c4fb0eb50..e06a8d43b1713b76e4fa3e9d092a37281d1ebd50 100644 --- a/src/mapleall/maple_driver/include/compiler.h +++ b/src/mapleall/maple_driver/include/compiler.h @@ -129,7 +129,7 @@ class Jbc2MplCompiler : public Compiler { public: explicit Jbc2MplCompiler(const std::string &name) : Compiler(name) {} - ~Jbc2MplCompiler() = default; + ~Jbc2MplCompiler() override = default; private: const std::string &GetBinName() const override; @@ -144,7 +144,7 @@ class ClangCompiler : public Compiler { public: explicit ClangCompiler(const std::string &name) : Compiler(name) {} - ~ClangCompiler() = default; + ~ClangCompiler() override = default; private: const std::string &GetBinName() const override; @@ -168,7 +168,7 @@ class Cpp2MplCompiler : public Compiler { public: explicit Cpp2MplCompiler(const std::string &name) : Compiler(name) {} - ~Cpp2MplCompiler() = default; + ~Cpp2MplCompiler() override = default; private: std::string GetBinPath(const MplOptions &mplOptions) const override; @@ -186,7 +186,7 @@ class Dex2MplCompiler : public Compiler { public: explicit Dex2MplCompiler(const std::string &name) : Compiler(name) {} - ~Dex2MplCompiler() = default; + ~Dex2MplCompiler() override = default; #ifdef INTERGRATE_DRIVER ErrorCode Compile(MplOptions &options, const Action &action, std::unique_ptr &theModule) override; @@ -211,7 +211,7 @@ class IpaCompiler : public Compiler { public: explicit IpaCompiler(const std::string &name) : Compiler(name) {} - ~IpaCompiler() = default; + ~IpaCompiler() override = default; private: const std::string &GetBinName() const override; @@ -223,7 +223,7 @@ class MapleCombCompiler : public Compiler { public: explicit MapleCombCompiler(const std::string &name) : Compiler(name) {} - ~MapleCombCompiler() = default; + ~MapleCombCompiler() override = default; ErrorCode Compile(MplOptions &options, const Action &action, std::unique_ptr &theModule) override; @@ -245,7 +245,7 @@ class MplcgCompiler : public Compiler { public: explicit MplcgCompiler(const std::string &name) : Compiler(name) {} - ~MplcgCompiler() = default; + ~MplcgCompiler() override = default; ErrorCode Compile(MplOptions &options, const Action &action, std::unique_ptr &theModule) override; void PrintMplcgCommand(const MplOptions &options, const Action &action, const MIRModule &md) const; @@ -263,7 +263,7 @@ class MplcgCompiler : public Compiler { class MapleCombCompilerWrp : public Compiler { public: explicit MapleCombCompilerWrp(const std::string &name) : Compiler(name) {} - ~MapleCombCompilerWrp() = default; + ~MapleCombCompilerWrp() override = default; std::string GetInputFileName(const MplOptions &options, const Action &action) const override; @@ -282,7 +282,7 @@ class AsCompiler : public Compiler { public: explicit AsCompiler(const std::string &name) : Compiler(name) {} - ~AsCompiler() = default; + ~AsCompiler() override = default; private: std::string GetBin(const MplOptions &mplOptions) const override; @@ -312,7 +312,7 @@ class LdCompiler : public Compiler { public: explicit LdCompiler(const std::string &name) : Compiler(name) {} - ~LdCompiler() = default; + ~LdCompiler() override = default; private: std::string GetBin(const MplOptions &mplOptions) const override; diff --git a/src/mapleall/maple_driver/include/driver_options.h b/src/mapleall/maple_driver/include/driver_options.h index d050ace03da37a5f4da6fe7ef9698afa2564b9e5..ea01c77e30c3ac08ac49e2d2680990efc7a12746 100644 --- a/src/mapleall/maple_driver/include/driver_options.h +++ b/src/mapleall/maple_driver/include/driver_options.h @@ -88,7 +88,6 @@ extern maplecl::Option fDataSections; extern maplecl::Option fRegStructReturn; extern maplecl::Option fTreeVectorize; extern maplecl::Option fNoStrictAliasing; -extern maplecl::Option fNoFatLtoObjects; extern maplecl::Option gcSections; extern maplecl::Option copyDtNeededEntries; extern maplecl::Option sOpt; @@ -100,7 +99,7 @@ extern maplecl::Option expand128Floats; extern maplecl::Option shared; extern maplecl::Option rdynamic; extern maplecl::Option dndebug; -extern maplecl::Option usesignedchar; +extern maplecl::Option useSignedChar; extern maplecl::Option suppressWarnings; extern maplecl::Option pthread; extern maplecl::Option passO2ToClang; @@ -108,6 +107,1769 @@ extern maplecl::Option defaultSafe; extern maplecl::Option onlyPreprocess; extern maplecl::Option noStdLib; extern maplecl::Option r; +extern maplecl::Option tailcall; +extern maplecl::Option fnoDiagnosticsShowCaret; +extern maplecl::Option wCCompat; +extern maplecl::Option wpaa; +extern maplecl::Option fm; +extern maplecl::Option dumpTime; +extern maplecl::Option aggressiveTlsLocalDynamicOpt; +extern maplecl::Option oDumpversion; +extern maplecl::Option oWnounusedcommandlineargument; +extern maplecl::Option oWnoconstantconversion; +extern maplecl::Option oWnounknownwarningoption; +extern maplecl::Option oW; +extern maplecl::Option oWabi; +extern maplecl::Option oWabiTag; +extern maplecl::Option oWaddrSpaceConvert; +extern maplecl::Option oWaddress; +extern maplecl::Option oWaggregateReturn; +extern maplecl::Option oWaggressiveLoopOptimizations; +extern maplecl::Option oWalignedNew; +extern maplecl::Option oWallocZero; +extern maplecl::Option oWalloca; +extern maplecl::Option oWarrayBounds; +extern maplecl::Option oWassignIntercept; +extern maplecl::Option oWattributes; +extern maplecl::Option oWboolCompare; +extern maplecl::Option oWboolOperation; +extern maplecl::Option oWbuiltinDeclarationMismatch; +extern maplecl::Option oWbuiltinMacroRedefined; +extern maplecl::Option oW11Compat; +extern maplecl::Option oW14Compat; +extern maplecl::Option oW1zCompat; +extern maplecl::Option oWc90C99Compat; +extern maplecl::Option oWc99C11Compat; +extern maplecl::Option oWcastAlign; +extern maplecl::Option oWcharSubscripts; +extern maplecl::Option oWchkp; +extern maplecl::Option oWclobbered; +extern maplecl::Option oWcomment; +extern maplecl::Option oWcomments; +extern maplecl::Option oWconditionallySupported; +extern maplecl::Option oWconversion; +extern maplecl::Option oWconversionNull; +extern maplecl::Option oWctorDtorPrivacy; +extern maplecl::Option oWdanglingElse; +extern maplecl::Option oWdeclarationAfterStatement; +extern maplecl::Option oWdeleteIncomplete; +extern maplecl::Option oWdeleteNonVirtualDtor; +extern maplecl::Option oWdeprecated; +extern maplecl::Option oWdeprecatedDeclarations; +extern maplecl::Option oWdisabledOptimization; +extern maplecl::Option oWdiscardedArrayQualifiers; +extern maplecl::Option oWdiscardedQualifiers; +extern maplecl::Option oWdivByZero; +extern maplecl::Option oWdoublePromotion; +extern maplecl::Option oWduplicateDeclSpecifier; +extern maplecl::Option oWduplicatedBranches; +extern maplecl::Option oWduplicatedCond; +extern maplecl::Option oWeak_reference_mismatches; +extern maplecl::Option oWeffc; +extern maplecl::Option oWemptyBody; +extern maplecl::Option oWendifLabels; +extern maplecl::Option oWenumCompare; +extern maplecl::Option oWerror; +extern maplecl::Option oWexpansionToDefined; +extern maplecl::Option oWfatalErrors; +extern maplecl::Option oWfloatConversion; +extern maplecl::Option oWformatContainsNul; +extern maplecl::Option oWformatExtraArgs; +extern maplecl::Option oWformatNonliteral; +extern maplecl::Option oWformatOverflow; +extern maplecl::Option oWformatSignedness; +extern maplecl::Option oWformatTruncation; +extern maplecl::Option oWformatY2k; +extern maplecl::Option oWformatZeroLength; +extern maplecl::Option oWframeAddress; +extern maplecl::Option oWframeLargerThan; +extern maplecl::Option oWfreeNonheapObject; +extern maplecl::Option oWignoredAttributes; +extern maplecl::Option oWimplicit; +extern maplecl::Option oWimplicitFunctionDeclaration; +extern maplecl::Option oWimplicitInt; +extern maplecl::Option oWincompatiblePointerTypes; +extern maplecl::Option oWinheritedVariadicCtor; +extern maplecl::Option oWinitSelf; +extern maplecl::Option oWinline; +extern maplecl::Option oWintConversion; +extern maplecl::Option oWintInBoolContext; +extern maplecl::Option oWintToPointerCast; +extern maplecl::Option oWinvalidMemoryModel; +extern maplecl::Option oWinvalidOffsetof; +extern maplecl::Option oWLiteralSuffix; +extern maplecl::Option oWLogicalNotParentheses; +extern maplecl::Option oWinvalidPch; +extern maplecl::Option oWjumpMissesInit; +extern maplecl::Option oWLogicalOp; +extern maplecl::Option oWLongLong; +extern maplecl::Option oWmain; +extern maplecl::Option oWmaybeUninitialized; +extern maplecl::Option oWmemsetEltSize; +extern maplecl::Option oWmemsetTransposedArgs; +extern maplecl::Option oWmisleadingIndentation; +extern maplecl::Option oWmissingBraces; +extern maplecl::Option oWmissingDeclarations; +extern maplecl::Option oWmissingFormatAttribute; +extern maplecl::Option oWmissingIncludeDirs; +extern maplecl::Option oWmissingParameterType; +extern maplecl::Option oWmissingPrototypes; +extern maplecl::Option oWmultichar; +extern maplecl::Option oWmultipleInheritance; +extern maplecl::Option oWnamespaces; +extern maplecl::Option oWnarrowing; +extern maplecl::Option oWnestedExterns; +extern maplecl::Option oWnoexcept; +extern maplecl::Option oWnoexceptType; +extern maplecl::Option oWnonTemplateFriend; +extern maplecl::Option oWnonVirtualDtor; +extern maplecl::Option oWnonnull; +extern maplecl::Option oWnonnullCompare; +extern maplecl::Option oWnormalized; +extern maplecl::Option oWnullDereference; +extern maplecl::Option oWodr; +extern maplecl::Option oWoldStyleCast; +extern maplecl::Option oWoldStyleDeclaration; +extern maplecl::Option oWoldStyleDefinition; +extern maplecl::Option oWopenmSimd; +extern maplecl::Option oWoverflow; +extern maplecl::Option oWoverlengthStrings; +extern maplecl::Option oWoverloadedVirtual; +extern maplecl::Option oWoverrideInit; +extern maplecl::Option oWoverrideInitSideEffects; +extern maplecl::Option oWpacked; +extern maplecl::Option oWpackedBitfieldCompat; +extern maplecl::Option oWpadded; +extern maplecl::Option oWparentheses; +extern maplecl::Option oWpedantic; +extern maplecl::Option oWpedanticMsFormat; +extern maplecl::Option oWplacementNew; +extern maplecl::Option oWpmfConversions; +extern maplecl::Option oWpointerCompare; +extern maplecl::Option oWpointerSign; +extern maplecl::Option oWpointerToIntCast; +extern maplecl::Option oWpragmas; +extern maplecl::Option oWprotocol; +extern maplecl::Option oWredundantDecls; +extern maplecl::Option oWregister; +extern maplecl::Option oWreorder; +extern maplecl::Option oWrestrict; +extern maplecl::Option oWreturnLocalAddr; +extern maplecl::Option oWreturnType; +extern maplecl::Option oWselector; +extern maplecl::Option oWsequencePoint; +extern maplecl::Option oWshadowIvar; +extern maplecl::Option oWshiftCountNegative; +extern maplecl::Option oWshiftCountOverflow; +extern maplecl::Option oWsignConversion; +extern maplecl::Option oWsignPromo; +extern maplecl::Option oWsizedDeallocation; +extern maplecl::Option oWsizeofArrayArgument; +extern maplecl::Option oWsizeofPointerMemaccess; +extern maplecl::Option oWstackProtector; +extern maplecl::Option oWstrictAliasing; +extern maplecl::Option oWstrictNullSentinel; +extern maplecl::Option oWstrictOverflow; +extern maplecl::Option oWstrictSelectorMatch; +extern maplecl::Option oWstringopOverflow; +extern maplecl::Option oWsubobjectLinkage; +extern maplecl::Option oWsuggestAttributeConst; +extern maplecl::Option oWsuggestAttributeFormat; +extern maplecl::Option oWsuggestAttributeNoreturn; +extern maplecl::Option oWsuggestAttributePure; +extern maplecl::Option oWsuggestFinalMethods; +extern maplecl::Option oWsuggestFinalTypes; +extern maplecl::Option oWswitch; +extern maplecl::Option oWswitchBool; +extern maplecl::Option oWswitchDefault; +extern maplecl::Option oWswitchEnum; +extern maplecl::Option oWswitchUnreachable; +extern maplecl::Option oWsyncNand; +extern maplecl::Option oWsystemHeaders; +extern maplecl::Option oWtautologicalCompare; +extern maplecl::Option oWtemplates; +extern maplecl::Option oWterminate; +extern maplecl::Option oWtraditional; +extern maplecl::Option oWtraditionalConversion; +extern maplecl::Option oWtrampolines; +extern maplecl::Option oWtrigraphs; +extern maplecl::Option oWundeclaredSelector; +extern maplecl::Option oWuninitialized; +extern maplecl::Option oWunknownPragmas; +extern maplecl::Option oWunsafeLoopOptimizations; +extern maplecl::Option oWunsuffixedFloatConstants; +extern maplecl::Option oWunused; +extern maplecl::Option oWunusedButSetParameter; +extern maplecl::Option oWunusedButSetVariable; +extern maplecl::Option oWunusedConstVariable; +extern maplecl::Option oWunusedFunction; +extern maplecl::Option oWunusedLabel; +extern maplecl::Option oWunusedLocalTypedefs; +extern maplecl::Option oWunusedResult; +extern maplecl::Option oWunusedValue; +extern maplecl::Option oWunusedVariable; +extern maplecl::Option oWuselessCast; +extern maplecl::Option oWvarargs; +extern maplecl::Option oWvariadicMacros; +extern maplecl::Option oWvectorOperationPerformance; +extern maplecl::Option oWvirtualInheritance; +extern maplecl::Option oWvirtualMoveAssign; +extern maplecl::Option oWvolatileRegisterVar; +extern maplecl::Option oWzeroAsNullPointerConstant; +extern maplecl::Option oWnoScalarStorageOrder; +extern maplecl::Option oStaticLibasan; +extern maplecl::Option oStaticLiblsan; +extern maplecl::Option oStaticLibtsan; +extern maplecl::Option oStaticLibubsan; +extern maplecl::Option oStaticLibmpx; +extern maplecl::Option oStaticLibmpxwrappers; +extern maplecl::Option oSymbolic; +extern maplecl::Option oFipaBitCp; +extern maplecl::Option oFipaVrp; +extern maplecl::Option oMindirectBranchRegister; +extern maplecl::Option oMlowPrecisionDiv; +extern maplecl::Option oMlowPrecisionSqrt; +extern maplecl::Option oM80387; +extern maplecl::Option oAllowable_client; +extern maplecl::Option oAll_load; +extern maplecl::Option oAnsi; +extern maplecl::Option oArch_errors_fatal; +extern maplecl::Option oAuxInfo; +extern maplecl::Option oBdynamic; +extern maplecl::Option oBind_at_load; +extern maplecl::Option oBstatic; +extern maplecl::Option oBundle; +extern maplecl::Option oC; +extern maplecl::Option oCC; +extern maplecl::Option oClient_name; +extern maplecl::Option oCompatibility_version; +extern maplecl::Option oCoverage; +extern maplecl::Option oCurrent_version; +extern maplecl::Option oDa; +extern maplecl::Option oDA; +extern maplecl::Option oDD; +extern maplecl::Option oDead_strip; +extern maplecl::Option oDependencyFile; +extern maplecl::Option oDH; +extern maplecl::Option oDI; +extern maplecl::Option oDM; +extern maplecl::Option oDN; +extern maplecl::Option oDp; +extern maplecl::Option oDP; +extern maplecl::Option oDU; +extern maplecl::Option oDumpfullversion; +extern maplecl::Option oDumpmachine; +extern maplecl::Option oDumpspecs; +extern maplecl::Option oDx; +extern maplecl::Option oDylib_file; +extern maplecl::Option oDylinker_install_name; +extern maplecl::Option oDynamic; +extern maplecl::Option oDynamiclib; +extern maplecl::Option oEB; +extern maplecl::Option oEL; +extern maplecl::Option oExported_symbols_list; +extern maplecl::Option oFaggressiveLoopOptimizations; +extern maplecl::Option oFchkpFlexibleStructTrailingArrays; +extern maplecl::Option oFchkpInstrumentCalls; +extern maplecl::Option oFchkpInstrumentMarkedOnly; +extern maplecl::Option oFchkpNarrowBounds; +extern maplecl::Option oFchkpNarrowToInnermostArray; +extern maplecl::Option oFchkpOptimize; +extern maplecl::Option oFchkpStoreBounds; +extern maplecl::Option oFchkpTreatZeroDynamicSizeAsInfinite; +extern maplecl::Option oFchkpUseFastStringFunctions; +extern maplecl::Option oFchkpUseNochkStringFunctions; +extern maplecl::Option oFchkpUseStaticBounds; +extern maplecl::Option oFchkpUseStaticConstBounds; +extern maplecl::Option oFchkpUseWrappers; +extern maplecl::Option oFcilkplus; +extern maplecl::Option oFcodeHoisting; +extern maplecl::Option oFcombineStackAdjustments; +extern maplecl::Option oFcompareDebugSecond; +extern maplecl::Option oFcompareDebug; +extern maplecl::Option oFcompareElim; +extern maplecl::Option oFconcepts; +extern maplecl::Option oFcondMismatch; +extern maplecl::Option oFconserveStack; +extern maplecl::Option oFcpropRegisters; +extern maplecl::Option oFcrossjumping; +extern maplecl::Option oFcseFollowJumps; +extern maplecl::Option oFcseSkipBlocks; +extern maplecl::Option oFcxFortranRules; +extern maplecl::Option oFcxLimitedRange; +extern maplecl::Option oFdbgCnt; +extern maplecl::Option oFdbgCntList; +extern maplecl::Option oFdce; +extern maplecl::Option oFdebugCpp; +extern maplecl::Option oFdebugPrefixMap; +extern maplecl::Option oFdebugTypesSection; +extern maplecl::Option oFdecloneCtorDtor; +extern maplecl::Option oFdeduceInitList; +extern maplecl::Option oFdelayedBranch; +extern maplecl::Option oFdeleteDeadExceptions; +extern maplecl::Option oFdeleteNullPointerChecks; +extern maplecl::Option oFdevirtualize; +extern maplecl::Option oFdevirtualizeAtLtrans; +extern maplecl::Option oFdevirtualizeSpeculatively; +extern maplecl::Option oFdiagnosticsGeneratePatch; +extern maplecl::Option oFdiagnosticsParseableFixits; +extern maplecl::Option oFdiagnosticsShowCaret; +extern maplecl::Option oFdiagnosticsShowOption; +extern maplecl::Option oFdirectivesOnly; +extern maplecl::Option oFdollarsInIdentifiers; +extern maplecl::Option oFdse; +extern maplecl::Option oFdumpAdaSpec; +extern maplecl::Option oFdumpClassHierarchy; +extern maplecl::Option oFdumpIpa; +extern maplecl::Option oFdumpNoaddr; +extern maplecl::Option oFdumpPasses; +extern maplecl::Option oFdumpRtlAlignments; +extern maplecl::Option oFdumpRtlAll; +extern maplecl::Option oFdumpRtlAsmcons; +extern maplecl::Option oFdumpRtlAuto_inc_dec; +extern maplecl::Option oFdumpRtlBarriers; +extern maplecl::Option oFdumpRtlBbpart; +extern maplecl::Option oFdumpRtlBbro; +extern maplecl::Option oFdumpRtlBtl2; +extern maplecl::Option oFdumpRtlBypass; +extern maplecl::Option oFdumpRtlCe1; +extern maplecl::Option oFdumpRtlCe2; +extern maplecl::Option oFdumpRtlCe3; +extern maplecl::Option oFdumpRtlCombine; +extern maplecl::Option oFdumpRtlCompgotos; +extern maplecl::Option oFdumpRtlCprop_hardreg; +extern maplecl::Option oFdumpRtlCsa; +extern maplecl::Option oFdumpRtlCse1; +extern maplecl::Option oFdumpRtlCse2; +extern maplecl::Option oFdumpRtlDbr; +extern maplecl::Option oFdumpRtlDce; +extern maplecl::Option oFdumpRtlDce1; +extern maplecl::Option oFdumpRtlDce2; +extern maplecl::Option oFdumpRtlDfinish; +extern maplecl::Option oFdumpRtlDfinit; +extern maplecl::Option oFdumpRtlEh; +extern maplecl::Option oFdumpRtlEh_ranges; +extern maplecl::Option oFdumpRtlExpand; +extern maplecl::Option oFdumpRtlFwprop1; +extern maplecl::Option oFdumpRtlFwprop2; +extern maplecl::Option oFdumpRtlGcse1; +extern maplecl::Option oFdumpRtlGcse2; +extern maplecl::Option oFdumpRtlInitRegs; +extern maplecl::Option oFdumpRtlInitvals; +extern maplecl::Option oFdumpRtlInto_cfglayout; +extern maplecl::Option oFdumpRtlIra; +extern maplecl::Option oFdumpRtlJump; +extern maplecl::Option oFdumpRtlLoop2; +extern maplecl::Option oFdumpRtlMach; +extern maplecl::Option oFdumpRtlMode_sw; +extern maplecl::Option oFdumpRtlOutof_cfglayout; +extern maplecl::Option oFdumpRtlPeephole2; +extern maplecl::Option oFdumpRtlPostreload; +extern maplecl::Option oFdumpRtlPro_and_epilogue; +extern maplecl::Option oFdumpRtlRee; +extern maplecl::Option oFdumpRtlRegclass; +extern maplecl::Option oFdumpRtlRnreg; +extern maplecl::Option oFdumpRtlSched1; +extern maplecl::Option oFdumpRtlSched2; +extern maplecl::Option oFdumpRtlSeqabstr; +extern maplecl::Option oFdumpRtlShorten; +extern maplecl::Option oFdumpRtlSibling; +extern maplecl::Option oFdumpRtlSms; +extern maplecl::Option oFdumpRtlSplit1; +extern maplecl::Option oFdumpRtlSplit2; +extern maplecl::Option oFdumpRtlSplit3; +extern maplecl::Option oFdumpRtlSplit4; +extern maplecl::Option oFdumpRtlSplit5; +extern maplecl::Option oFdumpRtlStack; +extern maplecl::Option oFdumpRtlSubreg1; +extern maplecl::Option oFdumpRtlSubreg2; +extern maplecl::Option oFdumpRtlSubregs_of_mode_finish; +extern maplecl::Option oFdumpRtlSubregs_of_mode_init; +extern maplecl::Option oFdumpRtlUnshare; +extern maplecl::Option oFdumpRtlVartrack; +extern maplecl::Option oFdumpRtlVregs; +extern maplecl::Option oFdumpRtlWeb; +extern maplecl::Option oFdumpStatistics; +extern maplecl::Option oFdumpTranslationUnit; +extern maplecl::Option oFdumpTree; +extern maplecl::Option oFdumpTreeAll; +extern maplecl::Option oFdumpUnnumbered; +extern maplecl::Option oFdumpUnnumberedLinks; +extern maplecl::Option oFdwarf2CfiAsm; +extern maplecl::Option oFearlyInlining; +extern maplecl::Option oFeliminateDwarf2Dups; +extern maplecl::Option oFeliminateUnusedDebugSymbols; +extern maplecl::Option oFeliminateUnusedDebugTypes; +extern maplecl::Option oFemitClassDebugAlways; +extern maplecl::Option oFemitStructDebugBaseonly; +extern maplecl::Option oFemitStructDebugReduced; +extern maplecl::Option oFexceptions; +extern maplecl::Option oFexpensiveOptimizations; +extern maplecl::Option oFextNumericLiterals; +extern maplecl::Option oFextendedIdentifiers; +extern maplecl::Option oFexternTlsInit; +extern maplecl::Option oFfastMath; +extern maplecl::Option oFfatLtoObjects; +extern maplecl::Option oFfiniteMathOnly; +extern maplecl::Option oFfixAndContinue; +extern maplecl::Option oFfloatStore; +extern maplecl::Option oFforScope; +extern maplecl::Option oFforwardPropagate; +extern maplecl::Option oFfreestanding; +extern maplecl::Option oFfriendInjection; +extern maplecl::Option oFgcse; +extern maplecl::Option oFgcseAfterReload; +extern maplecl::Option oFgcseLas; +extern maplecl::Option oFgcseLm; +extern maplecl::Option oFgcseSm; +extern maplecl::Option oFgimple; +extern maplecl::Option oFgnuRuntime; +extern maplecl::Option oFgnuTm; +extern maplecl::Option oFgnu89Inline; +extern maplecl::Option oFgraphiteIdentity; +extern maplecl::Option oFhoistAdjacentLoads; +extern maplecl::Option oFhosted; +extern maplecl::Option oFifConversion; +extern maplecl::Option oFifConversion2; +extern maplecl::Option oFilelist; +extern maplecl::Option oFindirectData; +extern maplecl::Option oFindirectInlining; +extern maplecl::Option oFinhibitSizeDirective; +extern maplecl::Option oFinlineFunctions; +extern maplecl::Option oFinlineFunctionsCalledOnce; +extern maplecl::Option oFinlineSmallFunctions; +extern maplecl::Option oFinstrumentFunctions; +extern maplecl::Option oFipaCp; +extern maplecl::Option oFipaCpClone; +extern maplecl::Option oFipaIcf; +extern maplecl::Option oFipaProfile; +extern maplecl::Option oFipaPta; +extern maplecl::Option oFipaPureConst; +extern maplecl::Option oFipaRa; +extern maplecl::Option oFipaReference; +extern maplecl::Option oFipaSra; +extern maplecl::Option oFiraHoistPressure; +extern maplecl::Option oFiraLoopPressure; +extern maplecl::Option oFisolateErroneousPathsAttribute; +extern maplecl::Option oFisolateErroneousPathsDereference; +extern maplecl::Option oFivopts; +extern maplecl::Option oFkeepInlineFunctions; +extern maplecl::Option oFkeepStaticConsts; +extern maplecl::Option oFkeepStaticFunctions; +extern maplecl::Option oFlat_namespace; +extern maplecl::Option oFlaxVectorConversions; +extern maplecl::Option oFleadingUnderscore; +extern maplecl::Option oFlocalIvars; +extern maplecl::Option oFloopBlock; +extern maplecl::Option oFloopInterchange; +extern maplecl::Option oFloopNestOptimize; +extern maplecl::Option oFloopParallelizeAll; +extern maplecl::Option oFloopStripMine; +extern maplecl::Option oFloopUnrollAndJam; +extern maplecl::Option oFlraRemat; +extern maplecl::Option oFltoOdrTypeMerging; +extern maplecl::Option oFltoReport; +extern maplecl::Option oFltoReportWpa; +extern maplecl::Option oFmemReport; +extern maplecl::Option oFmemReportWpa; +extern maplecl::Option oFmergeAllConstants; +extern maplecl::Option oFmergeConstants; +extern maplecl::Option oFmergeDebugStrings; +extern maplecl::Option oFmoduloSched; +extern maplecl::Option oFmoduloSchedAllowRegmoves; +extern maplecl::Option oFmoveLoopInvariants; +extern maplecl::Option oFmsExtensions; +extern maplecl::Option oFnewInheritingCtors; +extern maplecl::Option oFnewTtpMatching; +extern maplecl::Option oFnextRuntime; +extern maplecl::Option oFnoAccessControl; +extern maplecl::Option oFnoAsm; +extern maplecl::Option oFnoBranchCountReg; +extern maplecl::Option oFnoBuiltin; +extern maplecl::Option oFnoCanonicalSystemHeaders; +extern maplecl::Option oFCheckPointerBounds; +extern maplecl::Option oFChkpCheckIncompleteType; +extern maplecl::Option oFChkpCheckRead; +extern maplecl::Option oFChkpCheckWrite; +extern maplecl::Option oFChkpFirstFieldHasOwnBounds; +extern maplecl::Option oFDefaultInline; +extern maplecl::Option oFdefaultInteger8; +extern maplecl::Option oFdefaultReal8; +extern maplecl::Option oFDeferPop; +extern maplecl::Option oFElideConstructors; +extern maplecl::Option oFEnforceEhSpecs; +extern maplecl::Option oFFpIntBuiltinInexact; +extern maplecl::Option oFFunctionCse; +extern maplecl::Option oFGnuKeywords; +extern maplecl::Option oFGnuUnique; +extern maplecl::Option oFGuessBranchProbability; +extern maplecl::Option oFIdent; +extern maplecl::Option oFImplementInlines; +extern maplecl::Option oFImplicitInlineTemplates; +extern maplecl::Option oFImplicitTemplates; +extern maplecl::Option oFIraShareSaveSlots; +extern maplecl::Option oFIraShareSpillSlots; +extern maplecl::Option oFJumpTables; +extern maplecl::Option oFKeepInlineDllexport; +extern maplecl::Option oFLifetimeDse; +extern maplecl::Option oFMathErrno; +extern maplecl::Option oFNilReceivers; +extern maplecl::Option oFNonansiBuiltins; +extern maplecl::Option oFOperatorNames; +extern maplecl::Option oFOptionalDiags; +extern maplecl::Option oFPeephole; +extern maplecl::Option oFPeephole2; +extern maplecl::Option oFPrettyTemplates; +extern maplecl::Option oFPrintfReturnValue; +extern maplecl::Option oFRtti; +extern maplecl::Option oFnoSanitizeAll; +extern maplecl::Option oFSchedInterblock; +extern maplecl::Option oFSchedSpec; +extern maplecl::Option oFnoSetStackExecutable; +extern maplecl::Option oFShowColumn; +extern maplecl::Option oFSignedZeros; +extern maplecl::Option oFStackLimit; +extern maplecl::Option oFThreadsafeStatics; +extern maplecl::Option oFToplevelReorder; +extern maplecl::Option oFTrappingMath; +extern maplecl::Option oFUseCxaGetExceptionPtr; +extern maplecl::Option oFWeak; +extern maplecl::Option oFnoWritableRelocatedRdata; +extern maplecl::Option oFZeroInitializedInBss; +extern maplecl::Option oFnonCallExceptions; +extern maplecl::Option oFnothrowOpt; +extern maplecl::Option oFobjcCallCxxCdtors; +extern maplecl::Option oFobjcDirectDispatch; +extern maplecl::Option oFobjcExceptions; +extern maplecl::Option oFobjcGc; +extern maplecl::Option oFobjcNilcheck; +extern maplecl::Option oFopenacc; +extern maplecl::Option oFopenmp; +extern maplecl::Option oFopenmpSimd; +extern maplecl::Option oFoptInfo; +extern maplecl::Option oFoptimizeStrlen; +extern maplecl::Option oForce_cpusubtype_ALL; +extern maplecl::Option oForce_flat_namespace; +extern maplecl::Option oFpackStruct; +extern maplecl::Option oFpartialInlining; +extern maplecl::Option oFpccStructReturn; +extern maplecl::Option oFpchDeps; +extern maplecl::Option oFpchPreprocess; +extern maplecl::Option oFpeelLoops; +extern maplecl::Option oFpermissive; +extern maplecl::Option oFplan9Extensions; +extern maplecl::Option oFpostIpaMemReport; +extern maplecl::Option oFpreIpaMemReport; +extern maplecl::Option oFpredictiveCommoning; +extern maplecl::Option oFprefetchLoopArrays; +extern maplecl::Option oFpreprocessed; +extern maplecl::Option oFprofileArcs; +extern maplecl::Option oFprofileCorrection; +extern maplecl::Option oFprofileGenerate; +extern maplecl::Option oFprofileReorderFunctions; +extern maplecl::Option oFprofileReport; +extern maplecl::Option oFprofileUse; +extern maplecl::Option oFprofileValues; +extern maplecl::Option oFpu; +extern maplecl::Option oFreciprocalMath; +extern maplecl::Option oFrecordGccSwitches; +extern maplecl::Option oFree; +extern maplecl::Option oFrenameRegisters; +extern maplecl::Option oFreorderBlocks; +extern maplecl::Option oFreorderBlocksAndPartition; +extern maplecl::Option oFreorderFunctions; +extern maplecl::Option oFreplaceObjcClasses; +extern maplecl::Option oFrepo; +extern maplecl::Option oFreportBug; +extern maplecl::Option oFrerunCseAfterLoop; +extern maplecl::Option oFrescheduleModuloScheduledLoops; +extern maplecl::Option oFroundingMath; +extern maplecl::Option oFsanitizeAddressUseAfterScope; +extern maplecl::Option oFsanitizeCoverageTracePc; +extern maplecl::Option oFsanitizeUndefinedTrapOnError; +extern maplecl::Option oFschedCriticalPathHeuristic; +extern maplecl::Option oFschedDepCountHeuristic; +extern maplecl::Option oFschedGroupHeuristic; +extern maplecl::Option oFschedLastInsnHeuristic; +extern maplecl::Option oFschedPressure; +extern maplecl::Option oFschedRankHeuristic; +extern maplecl::Option oFschedSpecInsnHeuristic; +extern maplecl::Option oFschedSpecLoad; +extern maplecl::Option oFschedSpecLoadDangerous; +extern maplecl::Option oFschedStalledInsns; +extern maplecl::Option oFschedStalledInsnsDep; +extern maplecl::Option oFschedVerbose; +extern maplecl::Option oFsched2UseSuperblocks; +extern maplecl::Option oFscheduleFusion; +extern maplecl::Option oFscheduleInsns; +extern maplecl::Option oFscheduleInsns2; +extern maplecl::Option oFsectionAnchors; +extern maplecl::Option oFselSchedPipelining; +extern maplecl::Option oFselSchedPipeliningOuterLoops; +extern maplecl::Option oFselectiveScheduling; +extern maplecl::Option oFselectiveScheduling2; +extern maplecl::Option oFshortEnums; +extern maplecl::Option oFshortWchar; +extern maplecl::Option oFshrinkWrap; +extern maplecl::Option oFshrinkWrapSeparate; +extern maplecl::Option oFsignalingNans; +extern maplecl::Option oFsignedBitfields; +extern maplecl::Option oFsimdCostModel; +extern maplecl::Option oFsinglePrecisionConstant; +extern maplecl::Option oFsizedDeallocation; +extern maplecl::Option oFsplitIvsInUnroller; +extern maplecl::Option oFsplitLoops; +extern maplecl::Option oFsplitPaths; +extern maplecl::Option oFsplitStack; +extern maplecl::Option oFsplitWideTypes; +extern maplecl::Option oFssaBackprop; +extern maplecl::Option oFssaPhiopt; +extern maplecl::Option oFssoStruct; +extern maplecl::Option oFstackCheck; +extern maplecl::Option oFstackProtector; +extern maplecl::Option oFstackProtectorAll; +extern maplecl::Option oFstackProtectorExplicit; +extern maplecl::Option oFstackUsage; +extern maplecl::Option oFstats; +extern maplecl::Option oFstdargOpt; +extern maplecl::Option oFstoreMerging; +extern maplecl::Option oFstrictAliasing; +extern maplecl::Option oFstrictEnums; +extern maplecl::Option oFstrictOverflow; +extern maplecl::Option oFstrictVolatileBitfields; +extern maplecl::Option oFsyncLibcalls; +extern maplecl::Option oFsyntaxOnly; +extern maplecl::Option oFtestCoverage; +extern maplecl::Option oFthreadJumps; +extern maplecl::Option oFtimeReport; +extern maplecl::Option oFtimeReportDetails; +extern maplecl::Option oFtracer; +extern maplecl::Option oFtrampolines; +extern maplecl::Option oFtrapv; +extern maplecl::Option oFtreeBitCcp; +extern maplecl::Option oFtreeBuiltinCallDce; +extern maplecl::Option oFtreeCcp; +extern maplecl::Option oFtreeCh; +extern maplecl::Option oFtreeCoalesceVars; +extern maplecl::Option oFtreeCopyProp; +extern maplecl::Option oFtreeDce; +extern maplecl::Option oFtreeDominatorOpts; +extern maplecl::Option oFtreeDse; +extern maplecl::Option oFtreeForwprop; +extern maplecl::Option oFtreeFre; +extern maplecl::Option oFtreeLoopDistributePatterns; +extern maplecl::Option oFtreeLoopDistribution; +extern maplecl::Option oFtreeLoopIfConvert; +extern maplecl::Option oFtreeLoopIm; +extern maplecl::Option oFtreeLoopIvcanon; +extern maplecl::Option oFtreeLoopLinear; +extern maplecl::Option oFtreeLoopOptimize; +extern maplecl::Option oFtreeLoopVectorize; +extern maplecl::Option oFtreeParallelizeLoops; +extern maplecl::Option oFtreePartialPre; +extern maplecl::Option oFtreePhiprop; +extern maplecl::Option oFtreePre; +extern maplecl::Option oFtreePta; +extern maplecl::Option oFtreeReassoc; +extern maplecl::Option oFtreeSink; +extern maplecl::Option oFtreeSlpVectorize; +extern maplecl::Option oFtreeSlsr; +extern maplecl::Option oFtreeSra; +extern maplecl::Option oFtreeSwitchConversion; +extern maplecl::Option oFtreeTailMerge; +extern maplecl::Option oFtreeTer; +extern maplecl::Option oFtreeVrp; +extern maplecl::Option oFunconstrainedCommons; +extern maplecl::Option oFunitAtATime; +extern maplecl::Option oFunrollAllLoops; +extern maplecl::Option oFunrollLoops; +extern maplecl::Option oFunsafeMathOptimizations; +extern maplecl::Option oFunsignedBitfields; +extern maplecl::Option oFunswitchLoops; +extern maplecl::Option oFunwindTables; +extern maplecl::Option oFuseCxaAtexit; +extern maplecl::Option oFuseLdBfd; +extern maplecl::Option oFuseLdGold; +extern maplecl::Option oFuseLinkerPlugin; +extern maplecl::Option oFvarTracking; +extern maplecl::Option oFvarTrackingAssignments; +extern maplecl::Option oFvarTrackingAssignmentsToggle; +extern maplecl::Option oFvariableExpansionInUnroller; +extern maplecl::Option oFvectCostModel; +extern maplecl::Option oFverboseAsm; +extern maplecl::Option oFvisibilityInlinesHidden; +extern maplecl::Option oFvisibilityMsCompat; +extern maplecl::Option oFvpt; +extern maplecl::Option oFvtableVerify; +extern maplecl::Option oFvtvCounts; +extern maplecl::Option oFvtvDebug; +extern maplecl::Option oFweb; +extern maplecl::Option oFwholeProgram; +extern maplecl::Option oFworkingDirectory; +extern maplecl::Option oFwrapv; +extern maplecl::Option oFzeroLink; +extern maplecl::Option oGcoff; +extern maplecl::Option oGcolumnInfo; +extern maplecl::Option oGdwarf; +extern maplecl::Option oGenDecls; +extern maplecl::Option oGfull; +extern maplecl::Option oGgdb; +extern maplecl::Option oGgnuPubnames; +extern maplecl::Option oGnoColumnInfo; +extern maplecl::Option oGnoRecordGccSwitches; +extern maplecl::Option oGnoStrictDwarf; +extern maplecl::Option oGpubnames; +extern maplecl::Option oGrecordGccSwitches; +extern maplecl::Option oGsplitDwarf; +extern maplecl::Option oGstabs; +extern maplecl::Option oGstabsA; +extern maplecl::Option oGstrictDwarf; +extern maplecl::Option oGtoggle; +extern maplecl::Option oGused; +extern maplecl::Option oGvms; +extern maplecl::Option oGxcoff; +extern maplecl::Option oGxcoffA; +extern maplecl::Option oGz; +extern maplecl::Option oH; +extern maplecl::Option oHeaderpad_max_install_names; +extern maplecl::Option oI; +extern maplecl::Option oIdirafter; +extern maplecl::Option oImage_base; +extern maplecl::Option oInit; +extern maplecl::Option oInstall_name; +extern maplecl::Option oKeep_private_externs; +extern maplecl::Option oM; +extern maplecl::Option oM1; +extern maplecl::Option oM10; +extern maplecl::Option oM128bitLongDouble; +extern maplecl::Option oM16; +extern maplecl::Option oM16Bit; +extern maplecl::Option oM2; +extern maplecl::Option oM210; +extern maplecl::Option oM2a; +extern maplecl::Option oM2e; +extern maplecl::Option oM2aNofpu; +extern maplecl::Option oM2aSingle; +extern maplecl::Option oM2aSingleOnly; +extern maplecl::Option oM3; +extern maplecl::Option oM31; +extern maplecl::Option oM32; +extern maplecl::Option oM32Bit; +extern maplecl::Option oM32bitDoubles; +extern maplecl::Option oM32r; +extern maplecl::Option oM32r2; +extern maplecl::Option oM32rx; +extern maplecl::Option oM340; +extern maplecl::Option oM3dnow; +extern maplecl::Option oM3dnowa; +extern maplecl::Option oM3e; +extern maplecl::Option oM4; +extern maplecl::Option oM4100; +extern maplecl::Option oM4100Nofpu; +extern maplecl::Option oM4100Single; +extern maplecl::Option oM4100SingleOnly; +extern maplecl::Option oM4200; +extern maplecl::Option oM4200Nofpu; +extern maplecl::Option oM4200Single; +extern maplecl::Option oM4200SingleOnly; +extern maplecl::Option oM4300; +extern maplecl::Option oM4300Nofpu; +extern maplecl::Option oM4300Single; +extern maplecl::Option oM4300SingleOnly; +extern maplecl::Option oM4340; +extern maplecl::Option oM4500; +extern maplecl::Option oM4Nofpu; +extern maplecl::Option oM4Single; +extern maplecl::Option oM4SingleOnly; +extern maplecl::Option oM40; +extern maplecl::Option oM45; +extern maplecl::Option oM4a; +extern maplecl::Option oM4aNofpu; +extern maplecl::Option oM4aSingle; +extern maplecl::Option oM4aSingleOnly; +extern maplecl::Option oM4al; +extern maplecl::Option oM4byteFunctions; +extern maplecl::Option oM5200; +extern maplecl::Option oM5206e; +extern maplecl::Option oM528x; +extern maplecl::Option oM5307; +extern maplecl::Option oM5407; +extern maplecl::Option oM64; +extern maplecl::Option oM64bitDoubles; +extern maplecl::Option oM68000; +extern maplecl::Option oM68010; +extern maplecl::Option oM68020; +extern maplecl::Option oM6802040; +extern maplecl::Option oM6802060; +extern maplecl::Option oM68030; +extern maplecl::Option oM68040; +extern maplecl::Option oM68060; +extern maplecl::Option oM68881; +extern maplecl::Option oM8Bit; +extern maplecl::Option oM8bitIdiv; +extern maplecl::Option oM8byteAlign; +extern maplecl::Option oM96bitLongDouble; +extern maplecl::Option oMA6; +extern maplecl::Option oMA7; +extern maplecl::Option oMabicalls; +extern maplecl::Option oMabm; +extern maplecl::Option oMabortOnNoreturn; +extern maplecl::Option oMabs2008; +extern maplecl::Option oMabsLegacy; +extern maplecl::Option oMabsdata; +extern maplecl::Option oMabsdiff; +extern maplecl::Option oMabshi; +extern maplecl::Option oMac0; +extern maplecl::Option oMacc4; +extern maplecl::Option oMacc8; +extern maplecl::Option oMaccumulateArgs; +extern maplecl::Option oMaccumulateOutgoingArgs; +extern maplecl::Option oMaddressModeLong; +extern maplecl::Option oMaddressModeShort; +extern maplecl::Option oMaddressSpaceConversion; +extern maplecl::Option oMads; +extern maplecl::Option oMaes; +extern maplecl::Option oMaixStructReturn; +extern maplecl::Option oMaix32; +extern maplecl::Option oMaix64; +extern maplecl::Option oMalign300; +extern maplecl::Option oMalignCall; +extern maplecl::Option oMalignDouble; +extern maplecl::Option oMalignInt; +extern maplecl::Option oMalignLabels; +extern maplecl::Option oMalignLoops; +extern maplecl::Option oMalignNatural; +extern maplecl::Option oMalignPower; +extern maplecl::Option oMallOpts; +extern maplecl::Option oMallocCc; +extern maplecl::Option oMallowStringInsns; +extern maplecl::Option oMallregs; +extern maplecl::Option oMaltivec; +extern maplecl::Option oMaltivecBe; +extern maplecl::Option oMaltivecLe; +extern maplecl::Option oMam33; +extern maplecl::Option oMam332; +extern maplecl::Option oMam34; +extern maplecl::Option oMandroid; +extern maplecl::Option oMannotateAlign; +extern maplecl::Option oMapcs; +extern maplecl::Option oMapcsFrame; +extern maplecl::Option oMappRegs; +extern maplecl::Option oMARC600; +extern maplecl::Option oMARC601; +extern maplecl::Option oMARC700; +extern maplecl::Option oMarclinux; +extern maplecl::Option oMarclinux_prof; +extern maplecl::Option oMargonaut; +extern maplecl::Option oMarm; +extern maplecl::Option oMas100Syntax; +extern maplecl::Option oMasmHex; +extern maplecl::Option oMasmSyntaxUnified; +extern maplecl::Option oMasmDialect; +extern maplecl::Option oMatomic; +extern maplecl::Option oMatomicUpdates; +extern maplecl::Option oMautoLitpools; +extern maplecl::Option oMautoModifyReg; +extern maplecl::Option oMautoPic; +extern maplecl::Option oMaverage; +extern maplecl::Option oMavoidIndexedAddresses; +extern maplecl::Option oMavx; +extern maplecl::Option oMavx2; +extern maplecl::Option oMavx256SplitUnalignedLoad; +extern maplecl::Option oMavx256SplitUnalignedStore; +extern maplecl::Option oMavx512bw; +extern maplecl::Option oMavx512cd; +extern maplecl::Option oMavx512dq; +extern maplecl::Option oMavx512er; +extern maplecl::Option oMavx512f; +extern maplecl::Option oMavx512ifma; +extern maplecl::Option oMavx512pf; +extern maplecl::Option oMavx512vbmi; +extern maplecl::Option oMavx512vl; +extern maplecl::Option oMb; +extern maplecl::Option oMbackchain; +extern maplecl::Option oMbarrelShiftEnabled; +extern maplecl::Option oMbarrelShifter; +extern maplecl::Option oMbarrel_shifter; +extern maplecl::Option oMbaseAddresses; +extern maplecl::Option oMbbitPeephole; +extern maplecl::Option oMbcopy; +extern maplecl::Option oMbcopyBuiltin; +extern maplecl::Option oMbig; +extern maplecl::Option oMbigEndianData; +extern maplecl::Option oMbigSwitch; +extern maplecl::Option oMbigtable; +extern maplecl::Option oMbionic; +extern maplecl::Option oMbitAlign; +extern maplecl::Option oMbitOps; +extern maplecl::Option oMbitfield; +extern maplecl::Option oMbitops; +extern maplecl::Option oMbmi; +extern maplecl::Option oMbranchCheap; +extern maplecl::Option oMbranchExpensive; +extern maplecl::Option oMbranchHints; +extern maplecl::Option oMbranchLikely; +extern maplecl::Option oMbranchPredict; +extern maplecl::Option oMbssPlt; +extern maplecl::Option oMbuildConstants; +extern maplecl::Option oMbwx; +extern maplecl::Option oMbypassCache; +extern maplecl::Option oMc68000; +extern maplecl::Option oMc68020; +extern maplecl::Option oMcacheVolatile; +extern maplecl::Option oMcallEabi; +extern maplecl::Option oMcallAixdesc; +extern maplecl::Option oMcallFreebsd; +extern maplecl::Option oMcallLinux; +extern maplecl::Option oMcallOpenbsd; +extern maplecl::Option oMcallNetbsd; +extern maplecl::Option oMcallPrologues; +extern maplecl::Option oMcallSysv; +extern maplecl::Option oMcallSysvEabi; +extern maplecl::Option oMcallSysvNoeabi; +extern maplecl::Option oMcalleeSuperInterworking; +extern maplecl::Option oMcallerCopies; +extern maplecl::Option oMcallerSuperInterworking; +extern maplecl::Option oMcallgraphData; +extern maplecl::Option oMcaseVectorPcrel; +extern maplecl::Option oMcbcond; +extern maplecl::Option oMcbranchForceDelaySlot; +extern maplecl::Option oMccInit; +extern maplecl::Option oMcfv4e; +extern maplecl::Option oMcheckZeroDivision; +extern maplecl::Option oMcix; +extern maplecl::Option oMcld; +extern maplecl::Option oMclearHwcap; +extern maplecl::Option oMclflushopt; +extern maplecl::Option oMclip; +extern maplecl::Option oMclzero; +extern maplecl::Option oMcmov; +extern maplecl::Option oMcmove; +extern maplecl::Option oMcmpb; +extern maplecl::Option oMcmse; +extern maplecl::Option oMcodeDensity; +extern maplecl::Option oMcodeRegion; +extern maplecl::Option oMcompactBranchesAlways; +extern maplecl::Option oMcompactBranchesNever; +extern maplecl::Option oMcompactBranchesOptimal; +extern maplecl::Option oMcompactCasesi; +extern maplecl::Option oMcompatAlignParm; +extern maplecl::Option oMcondExec; +extern maplecl::Option oMcondMove; +extern maplecl::Option oMconsole; +extern maplecl::Option oMconstAlign; +extern maplecl::Option oMconst16; +extern maplecl::Option oMconstantGp; +extern maplecl::Option oMcop; +extern maplecl::Option oMcop32; +extern maplecl::Option oMcop64; +extern maplecl::Option oMcorea; +extern maplecl::Option oMcoreb; +extern maplecl::Option oMcpu32; +extern maplecl::Option oMcr16c; +extern maplecl::Option oMcr16cplus; +extern maplecl::Option oMcrc32; +extern maplecl::Option oMcrypto; +extern maplecl::Option oMcsyncAnomaly; +extern maplecl::Option oMctorDtor; +extern maplecl::Option oMcustomFpuCfg; +extern maplecl::Option oMcustomInsn; +extern maplecl::Option oMcx16; +extern maplecl::Option oMdalign; +extern maplecl::Option oMdataAlign; +extern maplecl::Option oMdataModel; +extern maplecl::Option oMdc; +extern maplecl::Option oMdebug; +extern maplecl::Option oMdebugMainPrefix; +extern maplecl::Option oMdecAsm; +extern maplecl::Option oMdirectMove; +extern maplecl::Option oMdisableCallt; +extern maplecl::Option oMdisableFpregs; +extern maplecl::Option oMdisableIndexing; +extern maplecl::Option oMdiv; +extern maplecl::Option oMdivRem; +extern maplecl::Option oMdivStrategy; +extern maplecl::Option oMdivideBreaks; +extern maplecl::Option oMdivideEnabled; +extern maplecl::Option oMdivideTraps; +extern maplecl::Option oMdll; +extern maplecl::Option oMdlmzb; +extern maplecl::Option oMdmx; +extern maplecl::Option oMdouble; +extern maplecl::Option oMdoubleFloat; +extern maplecl::Option oMdpfp; +extern maplecl::Option oMdpfpCompact; +extern maplecl::Option oMdpfpFast; +extern maplecl::Option oMdpfp_compact; +extern maplecl::Option oMdpfp_fast; +extern maplecl::Option oMdsp; +extern maplecl::Option oMdspPacka; +extern maplecl::Option oMdspr2; +extern maplecl::Option oMdsp_packa; +extern maplecl::Option oMdualNops; +extern maplecl::Option oMdumpTuneFeatures; +extern maplecl::Option oMdvbf; +extern maplecl::Option oMdwarf2Asm; +extern maplecl::Option oMdword; +extern maplecl::Option oMdynamicNoPic; +extern maplecl::Option oMea; +extern maplecl::Option oMEa; +extern maplecl::Option oMea32; +extern maplecl::Option oMea64; +extern maplecl::Option oMeabi; +extern maplecl::Option oMearlyCbranchsi; +extern maplecl::Option oMearlyStopBits; +extern maplecl::Option oMeb; +extern maplecl::Option oMel; +extern maplecl::Option oMelf; +extern maplecl::Option oMemb; +extern maplecl::Option oMembeddedData; +extern maplecl::Option oMep; +extern maplecl::Option oMepsilon; +extern maplecl::Option oMesa; +extern maplecl::Option oMetrax100; +extern maplecl::Option oMetrax4; +extern maplecl::Option oMeva; +extern maplecl::Option oMexpandAdddi; +extern maplecl::Option oMexplicitRelocs; +extern maplecl::Option oMexr; +extern maplecl::Option oMexternSdata; +extern maplecl::Option oMf16c; +extern maplecl::Option oMfastFp; +extern maplecl::Option oMfastIndirectCalls; +extern maplecl::Option oMfastSwDiv; +extern maplecl::Option oMfasterStructs; +extern maplecl::Option oMfdiv; +extern maplecl::Option oMfdpic; +extern maplecl::Option oMfentry; +extern maplecl::Option oMfix; +extern maplecl::Option oMfix24k; +extern maplecl::Option oMfixAndContinue; +extern maplecl::Option oMfixAt697f; +extern maplecl::Option oMfixCortexA53835769; +extern maplecl::Option oMfixCortexA53843419; +extern maplecl::Option oMfixCortexM3Ldrd; +extern maplecl::Option oMfixGr712rc; +extern maplecl::Option oMfixR10000; +extern maplecl::Option oMfixR4000; +extern maplecl::Option oMfixR4400; +extern maplecl::Option oMfixRm7000; +extern maplecl::Option oMfixSb1; +extern maplecl::Option oMfixUt699; +extern maplecl::Option oMfixUt700; +extern maplecl::Option oMfixVr4120; +extern maplecl::Option oMfixVr4130; +extern maplecl::Option oMfixedCc; +extern maplecl::Option oMflat; +extern maplecl::Option oMflipMips16; +extern maplecl::Option oMfloatAbi; +extern maplecl::Option oMfloatIeee; +extern maplecl::Option oMfloatVax; +extern maplecl::Option oMfloat128; +extern maplecl::Option oMfloat128Hardware; +extern maplecl::Option oMfloat32; +extern maplecl::Option oMfloat64; +extern maplecl::Option oMfma; +extern maplecl::Option oMfma4; +extern maplecl::Option oMfmaf; +extern maplecl::Option oMfmovd; +extern maplecl::Option oMforceNoPic; +extern maplecl::Option oMfpExceptions; +extern maplecl::Option oMfpMode; +extern maplecl::Option oMfp16Format; +extern maplecl::Option oMfp32; +extern maplecl::Option oMfp64; +extern maplecl::Option oMfpmath; +extern maplecl::Option oMfpr32; +extern maplecl::Option oMfpr64; +extern maplecl::Option oMfprnd; +extern maplecl::Option oMfpxx; +extern maplecl::Option oMfractConvertTruncate; +extern maplecl::Option oMframeHeaderOpt; +extern maplecl::Option oMfriz; +extern maplecl::Option oMfsca; +extern maplecl::Option oMfsgsbase; +extern maplecl::Option oMfsmuld; +extern maplecl::Option oMfsrra; +extern maplecl::Option oMfullRegs; +extern maplecl::Option oMfullToc; +extern maplecl::Option oMfusedMadd; +extern maplecl::Option oMfxsr; +extern maplecl::Option oMG; +extern maplecl::Option oMg10; +extern maplecl::Option oMg13; +extern maplecl::Option oMg14; +extern maplecl::Option oMgas; +extern maplecl::Option oMgccAbi; +extern maplecl::Option oMgenCellMicrocode; +extern maplecl::Option oMgeneralRegsOnly; +extern maplecl::Option oMghs; +extern maplecl::Option oMglibc; +extern maplecl::Option oMgnu; +extern maplecl::Option oMgnuAs; +extern maplecl::Option oMgnuAttribute; +extern maplecl::Option oMgnuLd; +extern maplecl::Option oMgomp; +extern maplecl::Option oMgotplt; +extern maplecl::Option oMgp32; +extern maplecl::Option oMgp64; +extern maplecl::Option oMgpopt; +extern maplecl::Option oMgpr32; +extern maplecl::Option oMgpr64; +extern maplecl::Option oMgprelRo; +extern maplecl::Option oMh; +extern maplecl::Option oMhal; +extern maplecl::Option oMhalfRegFile; +extern maplecl::Option oMhardDfp; +extern maplecl::Option oMhardFloat; +extern maplecl::Option oMhardQuadFloat; +extern maplecl::Option oMhardlit; +extern maplecl::Option oMhpLd; +extern maplecl::Option oMhtm; +extern maplecl::Option oMhwDiv; +extern maplecl::Option oMhwMul; +extern maplecl::Option oMhwMulx; +extern maplecl::Option oMiamcu; +extern maplecl::Option oMicplb; +extern maplecl::Option oMidSharedLibrary; +extern maplecl::Option oMieee; +extern maplecl::Option oMieeeConformant; +extern maplecl::Option oMieeeFp; +extern maplecl::Option oMieeeWithInexact; +extern maplecl::Option oMilp32; +extern maplecl::Option oMimadd; +extern maplecl::Option oMimpureText; +extern maplecl::Option oMincomingStackBoundary; +extern maplecl::Option oMindexedLoads; +extern maplecl::Option oMinlineAllStringops; +extern maplecl::Option oMinlineFloatDivideMaxThroughput; +extern maplecl::Option oMinlineFloatDivideMinLatency; +extern maplecl::Option oMinlineIc_invalidate; +extern maplecl::Option oMinlineIntDivideMaxThroughput; +extern maplecl::Option oMinlineIntDivideMinLatency; +extern maplecl::Option oMinlinePlt; +extern maplecl::Option oMinlineSqrtMaxThroughput; +extern maplecl::Option oMinlineSqrtMinLatency; +extern maplecl::Option oMinlineStringopsDynamically; +extern maplecl::Option oMinrt; +extern maplecl::Option oMintRegister; +extern maplecl::Option oMint16; +extern maplecl::Option oMint32; +extern maplecl::Option oMint8; +extern maplecl::Option oMinterlinkCompressed; +extern maplecl::Option oMinterlinkMips16; +extern maplecl::Option oMioVolatile; +extern maplecl::Option oMips1; +extern maplecl::Option oMips16; +extern maplecl::Option oMips2; +extern maplecl::Option oMips3; +extern maplecl::Option oMips32; +extern maplecl::Option oMips32r3; +extern maplecl::Option oMips32r5; +extern maplecl::Option oMips32r6; +extern maplecl::Option oMips3d; +extern maplecl::Option oMips4; +extern maplecl::Option oMips64; +extern maplecl::Option oMips64r2; +extern maplecl::Option oMips64r3; +extern maplecl::Option oMips64r5; +extern maplecl::Option oMips64r6; +extern maplecl::Option oMisize; +extern maplecl::Option oMissueRateNumber; +extern maplecl::Option oMivc2; +extern maplecl::Option oMjsr; +extern maplecl::Option oMjumpInDelay; +extern maplecl::Option oMkernel; +extern maplecl::Option oMknuthdiv; +extern maplecl::Option oMl; +extern maplecl::Option oMlarge; +extern maplecl::Option oMlargeData; +extern maplecl::Option oMlargeDataThreshold; +extern maplecl::Option oMlargeMem; +extern maplecl::Option oMlargeText; +extern maplecl::Option oMleadz; +extern maplecl::Option oMleafIdSharedLibrary; +extern maplecl::Option oMlibfuncs; +extern maplecl::Option oMlibraryPic; +extern maplecl::Option oMlinkedFp; +extern maplecl::Option oMlinkerOpt; +extern maplecl::Option oMlinux; +extern maplecl::Option oMlittle; +extern maplecl::Option oMlittleEndian; +extern maplecl::Option oMlittleEndianData; +extern maplecl::Option oMliw; +extern maplecl::Option oMll64; +extern maplecl::Option oMllsc; +extern maplecl::Option oMloadStorePairs; +extern maplecl::Option oMlocalSdata; +extern maplecl::Option oMlock; +extern maplecl::Option oMlongCalls; +extern maplecl::Option oMlongDouble128; +extern maplecl::Option oMlongDouble64; +extern maplecl::Option oMlongDouble80; +extern maplecl::Option oMlongJumpTableOffsets; +extern maplecl::Option oMlongJumps; +extern maplecl::Option oMlongLoadStore; +extern maplecl::Option oMlong32; +extern maplecl::Option oMlong64; +extern maplecl::Option oMlongcall; +extern maplecl::Option oMlongcalls; +extern maplecl::Option oMloop; +extern maplecl::Option oMlow64k; +extern maplecl::Option oMlowPrecisionRecipSqrt; +extern maplecl::Option oMlp64; +extern maplecl::Option oMlra; +extern maplecl::Option oMlraPriorityCompact; +extern maplecl::Option oMlraPriorityNoncompact; +extern maplecl::Option oMlraPriorityNone; +extern maplecl::Option oMlwp; +extern maplecl::Option oMlxc1Sxc1; +extern maplecl::Option oMlzcnt; +extern maplecl::Option oMM; +extern maplecl::Option oMm; +extern maplecl::Option oMmac; +extern maplecl::Option oMmac24; +extern maplecl::Option oMmacD16; +extern maplecl::Option oMmac_24; +extern maplecl::Option oMmac_d16; +extern maplecl::Option oMmad; +extern maplecl::Option oMmadd4; +extern maplecl::Option oMmainkernel; +extern maplecl::Option oMmalloc64; +extern maplecl::Option oMmax; +extern maplecl::Option oMmaxConstantSize; +extern maplecl::Option oMmaxStackFrame; +extern maplecl::Option oMmcountRaAddress; +extern maplecl::Option oMmcu; +extern maplecl::Option oMMD; +extern maplecl::Option oMmedia; +extern maplecl::Option oMmediumCalls; +extern maplecl::Option oMmemcpy; +extern maplecl::Option oMmemcpyStrategyStrategy; +extern maplecl::Option oMmemsetStrategyStrategy; +extern maplecl::Option oMmfcrf; +extern maplecl::Option oMmfpgpr; +extern maplecl::Option oMmicromips; +extern maplecl::Option oMminimalToc; +extern maplecl::Option oMminmax; +extern maplecl::Option oMmitigateRop; +extern maplecl::Option oMmixedCode; +extern maplecl::Option oMmmx; +extern maplecl::Option oMmodelLarge; +extern maplecl::Option oMmodelMedium; +extern maplecl::Option oMmodelSmall; +extern maplecl::Option oMmovbe; +extern maplecl::Option oMmpx; +extern maplecl::Option oMmpyOption; +extern maplecl::Option oMmsBitfields; +extern maplecl::Option oMmt; +extern maplecl::Option oMmul; +extern maplecl::Option oMmulBugWorkaround; +extern maplecl::Option oMmulx; +extern maplecl::Option oMmul32x16; +extern maplecl::Option oMmul64; +extern maplecl::Option oMmuladd; +extern maplecl::Option oMmulhw; +extern maplecl::Option oMmult; +extern maplecl::Option oMmultBug; +extern maplecl::Option oMmultcost; +extern maplecl::Option oMmultiCondExec; +extern maplecl::Option oMmulticore; +extern maplecl::Option oMmultiple; +extern maplecl::Option oMmusl; +extern maplecl::Option oMmvcle; +extern maplecl::Option oMmvme; +extern maplecl::Option oMmwaitx; +extern maplecl::Option oMn; +extern maplecl::Option oMnFlash; +extern maplecl::Option oMnan2008; +extern maplecl::Option oMnanLegacy; +extern maplecl::Option oMneonFor64bits; +extern maplecl::Option oMnestedCondExec; +extern maplecl::Option oMnhwloop; +extern maplecl::Option oMnoAlignStringops; +extern maplecl::Option oMnoBrcc; +extern maplecl::Option oMnoClearbss; +extern maplecl::Option oMnoCrt0; +extern maplecl::Option oMnoDefault; +extern maplecl::Option oMnoDpfpLrsr; +extern maplecl::Option oMnoEflags; +extern maplecl::Option oMnoFancyMath387; +extern maplecl::Option oMnoFloat; +extern maplecl::Option oMnoFpInToc; +extern maplecl::Option oMFpReg; +extern maplecl::Option oMnoFpRetIn387; +extern maplecl::Option oMnoInlineFloatDivide; +extern maplecl::Option oMnoInlineIntDivide; +extern maplecl::Option oMnoInlineSqrt; +extern maplecl::Option oMnoInterrupts; +extern maplecl::Option oMnoLsim; +extern maplecl::Option oMnoMillicode; +extern maplecl::Option oMnoMpy; +extern maplecl::Option oMnoOpts; +extern maplecl::Option oMnoPic; +extern maplecl::Option oMnoPostinc; +extern maplecl::Option oMnoPostmodify; +extern maplecl::Option oMnoRedZone; +extern maplecl::Option oMnoRoundNearest; +extern maplecl::Option oMnoSchedProlog; +extern maplecl::Option oMnoSideEffects; +extern maplecl::Option oMnoSoftCmpsf; +extern maplecl::Option oMnoSpaceRegs; +extern maplecl::Option oMSpe; +extern maplecl::Option oMnoSumInToc; +extern maplecl::Option oMnoVectDouble; +extern maplecl::Option oMnobitfield; +extern maplecl::Option oMnodiv; +extern maplecl::Option oMnoliw; +extern maplecl::Option oMnomacsave; +extern maplecl::Option oMnopFunDllimport; +extern maplecl::Option oMnopMcount; +extern maplecl::Option oMnops; +extern maplecl::Option oMnorm; +extern maplecl::Option oMnosetlb; +extern maplecl::Option oMnosplitLohi; +extern maplecl::Option oModdSpreg; +extern maplecl::Option oMomitLeafFramePointer; +extern maplecl::Option oMoneByteBool; +extern maplecl::Option oMoptimize; +extern maplecl::Option oMoptimizeMembar; +extern maplecl::Option oMP; +extern maplecl::Option oMpaRisc10; +extern maplecl::Option oMpaRisc11; +extern maplecl::Option oMpaRisc20; +extern maplecl::Option oMpack; +extern maplecl::Option oMpackedStack; +extern maplecl::Option oMpadstruct; +extern maplecl::Option oMpaired; +extern maplecl::Option oMpairedSingle; +extern maplecl::Option oMpcRelativeLiteralLoads; +extern maplecl::Option oMpc32; +extern maplecl::Option oMpc64; +extern maplecl::Option oMpc80; +extern maplecl::Option oMpclmul; +extern maplecl::Option oMpcrel; +extern maplecl::Option oMpdebug; +extern maplecl::Option oMpe; +extern maplecl::Option oMpeAlignedCommons; +extern maplecl::Option oMperfExt; +extern maplecl::Option oMpicDataIsTextRelative; +extern maplecl::Option oMpicRegister; +extern maplecl::Option oMpid; +extern maplecl::Option oMpku; +extern maplecl::Option oMpointerSizeSize; +extern maplecl::Option oMpointersToNestedFunctions; +extern maplecl::Option oMpokeFunctionName; +extern maplecl::Option oMpopc; +extern maplecl::Option oMpopcnt; +extern maplecl::Option oMpopcntb; +extern maplecl::Option oMpopcntd; +extern maplecl::Option oMportableRuntime; +extern maplecl::Option oMpower8Fusion; +extern maplecl::Option oMpower8Vector; +extern maplecl::Option oMpowerpcGfxopt; +extern maplecl::Option oMpowerpcGpopt; +extern maplecl::Option oMpowerpc64; +extern maplecl::Option oMpreferAvx128; +extern maplecl::Option oMpreferShortInsnRegs; +extern maplecl::Option oMprefergot; +extern maplecl::Option oMpreferredStackBoundary; +extern maplecl::Option oMprefetchwt1; +extern maplecl::Option oMpretendCmove; +extern maplecl::Option oMprintTuneInfo; +extern maplecl::Option oMprologFunction; +extern maplecl::Option oMprologueEpilogue; +extern maplecl::Option oMprototype; +extern maplecl::Option oMpureCode; +extern maplecl::Option oMpushArgs; +extern maplecl::Option oMQ; +extern maplecl::Option oMqClass; +extern maplecl::Option oMquadMemory; +extern maplecl::Option oMquadMemoryAtomic; +extern maplecl::Option oMr10kCacheBarrier; +extern maplecl::Option oMRcq; +extern maplecl::Option oMRcw; +extern maplecl::Option oMrdrnd; +extern maplecl::Option oMreadonlyInSdata; +extern maplecl::Option oMrecipPrecision; +extern maplecl::Option oMrecordMcount; +extern maplecl::Option oMreducedRegs; +extern maplecl::Option oMregisterNames; +extern maplecl::Option oMregnames; +extern maplecl::Option oMregparm; +extern maplecl::Option oMrelax; +extern maplecl::Option oMrelaxImmediate; +extern maplecl::Option oMrelaxPicCalls; +extern maplecl::Option oMrelocatable; +extern maplecl::Option oMrelocatableLib; +extern maplecl::Option oMrenesas; +extern maplecl::Option oMrepeat; +extern maplecl::Option oMrestrictIt; +extern maplecl::Option oMreturnPointerOnD0; +extern maplecl::Option oMrh850Abi; +extern maplecl::Option oMrl78; +extern maplecl::Option oMrmw; +extern maplecl::Option oMrtd; +extern maplecl::Option oMrtm; +extern maplecl::Option oMrtp; +extern maplecl::Option oMrtsc; +extern maplecl::Option oMs; +extern maplecl::Option oMs2600; +extern maplecl::Option oMsafeDma; +extern maplecl::Option oMsafeHints; +extern maplecl::Option oMsahf; +extern maplecl::Option oMsatur; +extern maplecl::Option oMsaveAccInInterrupts; +extern maplecl::Option oMsaveMducInInterrupts; +extern maplecl::Option oMsaveRestore; +extern maplecl::Option oMsaveTocIndirect; +extern maplecl::Option oMscc; +extern maplecl::Option oMschedArDataSpec; +extern maplecl::Option oMschedArInDataSpec; +extern maplecl::Option oMschedBrDataSpec; +extern maplecl::Option oMschedBrInDataSpec; +extern maplecl::Option oMschedControlSpec; +extern maplecl::Option oMschedCountSpecInCriticalPath; +extern maplecl::Option oMschedFpMemDepsZeroCost; +extern maplecl::Option oMschedInControlSpec; +extern maplecl::Option oMschedMaxMemoryInsns; +extern maplecl::Option oMschedMaxMemoryInsnsHardLimit; +extern maplecl::Option oMschedPreferNonControlSpecInsns; +extern maplecl::Option oMschedPreferNonDataSpecInsns; +extern maplecl::Option oMschedSpecLdc; +extern maplecl::Option oMschedStopBitsAfterEveryCycle; +extern maplecl::Option oMscore5; +extern maplecl::Option oMscore5u; +extern maplecl::Option oMscore7; +extern maplecl::Option oMscore7d; +extern maplecl::Option oMsdata; +extern maplecl::Option oMsdataAll; +extern maplecl::Option oMsdataData; +extern maplecl::Option oMsdataDefault; +extern maplecl::Option oMsdataEabi; +extern maplecl::Option oMsdataNone; +extern maplecl::Option oMsdataSdata; +extern maplecl::Option oMsdataSysv; +extern maplecl::Option oMsdataUse; +extern maplecl::Option oMsdram; +extern maplecl::Option oMsecurePlt; +extern maplecl::Option oMselSchedDontCheckControlSpec; +extern maplecl::Option oMsepData; +extern maplecl::Option oMserializeVolatile; +extern maplecl::Option oMsetlb; +extern maplecl::Option oMsha; +extern maplecl::Option oMshort; +extern maplecl::Option oMsignExtendEnabled; +extern maplecl::Option oMsim; +extern maplecl::Option oMsimd; +extern maplecl::Option oMsimnovec; +extern maplecl::Option oMsimpleFpu; +extern maplecl::Option oMsingleExit; +extern maplecl::Option oMsingleFloat; +extern maplecl::Option oMsinglePicBase; +extern maplecl::Option oMsio; +extern maplecl::Option oMskipRaxSetup; +extern maplecl::Option oMslowBytes; +extern maplecl::Option oMslowFlashData; +extern maplecl::Option oMsmall; +extern maplecl::Option oMsmallData; +extern maplecl::Option oMsmallDataLimit; +extern maplecl::Option oMsmallDivides; +extern maplecl::Option oMsmallExec; +extern maplecl::Option oMsmallMem; +extern maplecl::Option oMsmallModel; +extern maplecl::Option oMsmallText; +extern maplecl::Option oMsmall16; +extern maplecl::Option oMsmallc; +extern maplecl::Option oMsmartmips; +extern maplecl::Option oMsoftFloat; +extern maplecl::Option oMsoftQuadFloat; +extern maplecl::Option oMsoftStack; +extern maplecl::Option oMsp8; +extern maplecl::Option oMspace; +extern maplecl::Option oMspecldAnomaly; +extern maplecl::Option oMspfp; +extern maplecl::Option oMspfpCompact; +extern maplecl::Option oMspfpFast; +extern maplecl::Option oMspfp_compact; +extern maplecl::Option oMspfp_fast; +extern maplecl::Option oMsplitAddresses; +extern maplecl::Option oMsplitVecmoveEarly; +extern maplecl::Option oMsse; +extern maplecl::Option oMsse2; +extern maplecl::Option oMsse2avx; +extern maplecl::Option oMsse3; +extern maplecl::Option oMsse4; +extern maplecl::Option oMsse41; +extern maplecl::Option oMsse42; +extern maplecl::Option oMsse4a; +extern maplecl::Option oMsseregparm; +extern maplecl::Option oMssse3; +extern maplecl::Option oMstackAlign; +extern maplecl::Option oMstackBias; +extern maplecl::Option oMstackCheckL1; +extern maplecl::Option oMstackIncrement; +extern maplecl::Option oMstackOffset; +extern maplecl::Option oMstackrealign; +extern maplecl::Option oMstdStructReturn; +extern maplecl::Option oMstdmain; +extern maplecl::Option oMstrictAlign; +extern maplecl::Option oMstrictX; +extern maplecl::Option oMstring; +extern maplecl::Option oMstringopStrategyAlg; +extern maplecl::Option oMstructureSizeBoundary; +extern maplecl::Option oMsubxc; +extern maplecl::Option oMsvMode; +extern maplecl::Option oMsvr4StructReturn; +extern maplecl::Option oMswap; +extern maplecl::Option oMswape; +extern maplecl::Option oMsym32; +extern maplecl::Option oMsynci; +extern maplecl::Option oMsysCrt0; +extern maplecl::Option oMsysLib; +extern maplecl::Option oMtargetAlign; +extern maplecl::Option oMtas; +extern maplecl::Option oMtbm; +extern maplecl::Option oMtelephony; +extern maplecl::Option oMtextSectionLiterals; +extern maplecl::Option oMtf; +extern maplecl::Option oMthread; +extern maplecl::Option oMthreads; +extern maplecl::Option oMthumb; +extern maplecl::Option oMthumbInterwork; +extern maplecl::Option oMtinyStack; +extern maplecl::Option oMTLS; +extern maplecl::Option oMtlsDirectSegRefs; +extern maplecl::Option oMtlsMarkers; +extern maplecl::Option oMtoc; +extern maplecl::Option oMtomcatStats; +extern maplecl::Option oMtoplevelSymbols; +extern maplecl::Option oMtpcsFrame; +extern maplecl::Option oMtpcsLeafFrame; +extern maplecl::Option oMtpfTrace; +extern maplecl::Option oMtuneCtrlFeatureList; +extern maplecl::Option oMuclibc; +extern maplecl::Option oMuls; +extern maplecl::Option oMultcostNumber; +extern maplecl::Option oMultilibLibraryPic; +extern maplecl::Option oMmultiplyEnabled; +extern maplecl::Option oMultiply_defined; +extern maplecl::Option oMultiply_defined_unused; +extern maplecl::Option oMulti_module; +extern maplecl::Option oMunalignProbThreshold; +extern maplecl::Option oMunalignedAccess; +extern maplecl::Option oMunalignedDoubles; +extern maplecl::Option oMunicode; +extern maplecl::Option oMuniformSimt; +extern maplecl::Option oMuninitConstInRodata; +extern maplecl::Option oMunixAsm; +extern maplecl::Option oMupdate; +extern maplecl::Option oMupperRegs; +extern maplecl::Option oMupperRegsDf; +extern maplecl::Option oMupperRegsDi; +extern maplecl::Option oMupperRegsSf; +extern maplecl::Option oMuserEnabled; +extern maplecl::Option oMuserMode; +extern maplecl::Option oMusermode; +extern maplecl::Option oMv3push; +extern maplecl::Option oMv850; +extern maplecl::Option oMv850e; +extern maplecl::Option oMv850e1; +extern maplecl::Option oMv850e2; +extern maplecl::Option oMv850e2v3; +extern maplecl::Option oMv850e2v4; +extern maplecl::Option oMv850e3v5; +extern maplecl::Option oMv850es; +extern maplecl::Option oMv8plus; +extern maplecl::Option oMvect8RetInMem; +extern maplecl::Option oMvirt; +extern maplecl::Option oMvis; +extern maplecl::Option oMvis2; +extern maplecl::Option oMvis3; +extern maplecl::Option oMvis4; +extern maplecl::Option oMvis4b; +extern maplecl::Option oMvliwBranch; +extern maplecl::Option oMvmsReturnCodes; +extern maplecl::Option oMvolatileAsmStop; +extern maplecl::Option oMvolatileCache; +extern maplecl::Option oMvr4130Align; +extern maplecl::Option oMvrsave; +extern maplecl::Option oMvsx; +extern maplecl::Option oMvx; +extern maplecl::Option oMvxworks; +extern maplecl::Option oMvzeroupper; +extern maplecl::Option oMwarnCellMicrocode; +extern maplecl::Option oMwarnDynamicstack; +extern maplecl::Option oMwarnMcu; +extern maplecl::Option oMwarnMultipleFastInterrupts; +extern maplecl::Option oMwarnReloc; +extern maplecl::Option oMwideBitfields; +extern maplecl::Option oMwin32; +extern maplecl::Option oMwindows; +extern maplecl::Option oMwordRelocations; +extern maplecl::Option oMx32; +extern maplecl::Option oMxgot; +extern maplecl::Option oMxilinxFpu; +extern maplecl::Option oMxlBarrelShift; +extern maplecl::Option oMxlCompat; +extern maplecl::Option oMxlFloatConvert; +extern maplecl::Option oMxlFloatSqrt; +extern maplecl::Option oMxlGpOpt; +extern maplecl::Option oMxlMultiplyHigh; +extern maplecl::Option oMxlPatternCompare; +extern maplecl::Option oMxlReorder; +extern maplecl::Option oMxlSoftDiv; +extern maplecl::Option oMxlSoftMul; +extern maplecl::Option oMxlStackCheck; +extern maplecl::Option oMxop; +extern maplecl::Option oMxpa; +extern maplecl::Option oMxsave; +extern maplecl::Option oMxsavec; +extern maplecl::Option oMxsaveopt; +extern maplecl::Option oMxsaves; +extern maplecl::Option oMxy; +extern maplecl::Option oMyellowknife; +extern maplecl::Option oMzarch; +extern maplecl::Option oMzdcbranch; +extern maplecl::Option oMzeroExtend; +extern maplecl::Option oMzvector; +extern maplecl::Option oNo80387; +extern maplecl::Option oNoCanonicalPrefixes; +extern maplecl::Option oNoIntegratedCpp; +extern maplecl::Option oNoSysrootSuffix; +extern maplecl::Option oNoall_load; +extern maplecl::Option oNocpp; +extern maplecl::Option oNodefaultlibs; +extern maplecl::Option oNodevicelib; +extern maplecl::Option oNofixprebinding; +extern maplecl::Option oNofpu; +extern maplecl::Option oNolibdld; +extern maplecl::Option oNomultidefs; +extern maplecl::Option oNonStatic; +extern maplecl::Option oNoprebind; +extern maplecl::Option oNoseglinkedit; +extern maplecl::Option oNostartfiles; +extern maplecl::Option oNostdinc; +extern maplecl::Option oNo_dead_strip_inits_and_terms; +extern maplecl::Option oOfast; +extern maplecl::Option oOg; +extern maplecl::Option oP; +extern maplecl::Option oLargeP; +extern maplecl::Option oPagezero_size; +extern maplecl::Option oParam; +extern maplecl::Option oPassExitCodes; +extern maplecl::Option oPedantic; +extern maplecl::Option oPedanticErrors; +extern maplecl::Option oPg; +extern maplecl::Option oPlt; +extern maplecl::Option oPrebind; +extern maplecl::Option oPrebind_all_twolevel_modules; +extern maplecl::Option oPrintFileName; +extern maplecl::Option oPrintLibgccFileName; +extern maplecl::Option oPrintMultiDirectory; +extern maplecl::Option oPrintMultiLib; +extern maplecl::Option oPrintMultiOsDirectory; +extern maplecl::Option oPrintMultiarch; +extern maplecl::Option oPrintObjcRuntimeInfo; +extern maplecl::Option oPrintProgName; +extern maplecl::Option oPrintSearchDirs; +extern maplecl::Option oPrintSysroot; +extern maplecl::Option oPrintSysrootHeadersSuffix; +extern maplecl::Option oPrivate_bundle; +extern maplecl::Option oPthreads; +extern maplecl::Option oQ; +extern maplecl::Option oQn; +extern maplecl::Option oQy; +extern maplecl::Option oRead_only_relocs; +extern maplecl::Option oRemap; +extern maplecl::Option oSectalign; +extern maplecl::Option oSectcreate; +extern maplecl::Option oSectobjectsymbols; +extern maplecl::Option oSectorder; +extern maplecl::Option oSeg1addr; +extern maplecl::Option oSegaddr; +extern maplecl::Option oSeglinkedit; +extern maplecl::Option oSegprot; +extern maplecl::Option oSegs_read_only_addr; +extern maplecl::Option oSegs_read_write_addr; +extern maplecl::Option oSeg_addr_table; +extern maplecl::Option oSeg_addr_table_filename; +extern maplecl::Option oSharedLibgcc; +extern maplecl::Option oSim; +extern maplecl::Option oSim2; +extern maplecl::Option oSingle_module; +extern maplecl::Option oStaticLibgcc; +extern maplecl::Option oStaticLibstdc; +extern maplecl::Option oSub_library; +extern maplecl::Option oSub_umbrella; +extern maplecl::Option oTargetHelp; +extern maplecl::Option oThreads; +extern maplecl::Option oTnoAndroidCc; +extern maplecl::Option oTnoAndroidLd; +extern maplecl::Option oTraditional; +extern maplecl::Option oTraditionalCpp; +extern maplecl::Option oTrigraphs; +extern maplecl::Option oTwolevel_namespace; +extern maplecl::Option oUmbrella; +extern maplecl::Option oUndef; +extern maplecl::Option oUndefined; +extern maplecl::Option oUnexported_symbols_list; +extern maplecl::Option oWhatsloaded; +extern maplecl::Option oWhyload; +extern maplecl::Option oWLtoTypeMismatch; +extern maplecl::Option oWmisspelledIsr; +extern maplecl::Option oWrapper; +extern maplecl::Option oXbindLazy; +extern maplecl::Option oXbindNow; +extern maplecl::Option oStd03; +extern maplecl::Option oStd0x; +extern maplecl::Option oStd11; +extern maplecl::Option oStd14; +extern maplecl::Option oStd17; +extern maplecl::Option oStd1y; +extern maplecl::Option oStd1z; +extern maplecl::Option oStd98; +extern maplecl::Option oStd11p; +extern maplecl::Option oStdc1x; +extern maplecl::Option oStd89; +extern maplecl::Option oStd90; +extern maplecl::Option oStd99; +extern maplecl::Option oStd9x; +extern maplecl::Option oStd2003; +extern maplecl::Option oStd2008; +extern maplecl::Option oStd2008ts; +extern maplecl::Option oStdf95; +extern maplecl::Option oStdgnu; +extern maplecl::Option oStdgnu03p; +extern maplecl::Option oStdgnuoxp; +extern maplecl::Option oStdgnu11p; +extern maplecl::Option oStdgnu14p; +extern maplecl::Option oStdgnu17p; +extern maplecl::Option oStdgnu1yp; +extern maplecl::Option oStdgnu1zp; +extern maplecl::Option oStdgnu98p; +extern maplecl::Option oStdgnu11; +extern maplecl::Option oStdgnu1x; +extern maplecl::Option oStdgnu89; +extern maplecl::Option oStdgnu90; +extern maplecl::Option oStdgnu99; +extern maplecl::Option oStdgnu9x; +extern maplecl::Option oStd1990; +extern maplecl::Option oStd1994; +extern maplecl::Option oStd1999; +extern maplecl::Option oStd199x; +extern maplecl::Option oStd2011; +extern maplecl::Option oStdlegacy; +extern maplecl::Option oFChecking; +extern maplecl::Option oFtrackMacroExpansion; +extern maplecl::Option oFsanitizeRecover; +extern maplecl::Option oFobjcStd; +extern maplecl::Option oFobjcSjljExceptions; +extern maplecl::Option oFrandomSeed; +extern maplecl::Option tlsAggressiveOpt; +extern maplecl::Option staticLibmplpgo; +extern maplecl::Option oFnoExtendedIdentifiers; +extern maplecl::Option oFnoPchPreprocess; /* ##################### STRING Options ############################################################### */ @@ -138,16 +1900,173 @@ extern maplecl::Option output; extern maplecl::Option saveTempOpt; extern maplecl::Option target; extern maplecl::Option linkerTimeOptE; -extern maplecl::Option MT; -extern maplecl::Option MF; -extern maplecl::Option std; -extern maplecl::Option Wl; +extern maplecl::Option oMT; +extern maplecl::Option oMF; +extern maplecl::Option oWl; extern maplecl::Option fVisibility; extern maplecl::Option fStrongEvalOrderE; extern maplecl::Option march; extern maplecl::Option sysRoot; extern maplecl::Option specs; extern maplecl::Option folder; +extern maplecl::Option imacros; +extern maplecl::Option fdiagnosticsColor; +extern maplecl::Option mtlsSize; +extern maplecl::Option oWerrorE; +extern maplecl::Option oWnormalizedE; +extern maplecl::Option oWplacementNewE; +extern maplecl::Option oWstackUsage; +extern maplecl::Option oWstrictAliasingE; +extern maplecl::Option oFmaxErrors; +extern maplecl::Option oBundle_loader; +extern maplecl::Option oFabiCompatVersion; +extern maplecl::Option oFabiVersion; +extern maplecl::Option oFadaSpecParent; +extern maplecl::Option oFcompareDebugE; +extern maplecl::Option oFconstantStringClass; +extern maplecl::Option oFconstexprDepth; +extern maplecl::Option oFconstexprLoopLimit; +extern maplecl::Option oFdiagnosticsShowLocation; +extern maplecl::Option oFdisable; +extern maplecl::Option oFdumpFinalInsns; +extern maplecl::Option oFdumpGoSpec; +extern maplecl::Option oFdumpRtlPass; +extern maplecl::Option oFemitStructDebugDetailedE; +extern maplecl::Option oFenable; +extern maplecl::Option oFexcessPrecision; +extern maplecl::Option oFexecCharset; +extern maplecl::Option oFfpContract ; +extern maplecl::Option oFinputCharset; +extern maplecl::Option oFinstrumentFunctionsExcludeFileList; +extern maplecl::Option oFinstrumentFunctionsExcludeFunctionList; +extern maplecl::Option oFiraAlgorithmE; +extern maplecl::Option oFiraRegion; +extern maplecl::Option oFiraVerbose; +extern maplecl::Option oFivarVisibility; +extern maplecl::Option oFliveRangeShrinkage; +extern maplecl::Option oFltoCompressionLevel; +extern maplecl::Option oFltoPartition; +extern maplecl::Option oFmessageLength; +extern maplecl::Option oFCheckingE; +extern maplecl::Option oFobjcAbiVersion; +extern maplecl::Option oFopenaccDim; +extern maplecl::Option oFpermittedFltEvalMethods; +extern maplecl::Option oFplugin; +extern maplecl::Option oFpluginArg; +extern maplecl::Option oFprofileUpdate; +extern maplecl::Option oFrandomSeedE; +extern maplecl::Option oFreorderBlocksAlgorithm; +extern maplecl::Option oFsanitizeRecoverE; +extern maplecl::Option oFsanitizeSections; +extern maplecl::Option oFsanitize; +extern maplecl::Option oFstackCheckE; +extern maplecl::Option oFstackLimitRegister; +extern maplecl::Option oFstackLimitSymbol; +extern maplecl::Option oFstack_reuse; +extern maplecl::Option oFtabstop; +extern maplecl::Option oFtemplateBacktraceLimit; +extern maplecl::Option ftlsModel; +extern maplecl::Option oFtrackMacroExpansionE; +extern maplecl::Option oFwideExecCharset; +extern maplecl::Option oG; +extern maplecl::Option oIframework; +extern maplecl::Option oImultilib; +extern maplecl::Option oInclude; +extern maplecl::Option oIplugindir; +extern maplecl::Option oIprefix; +extern maplecl::Option oIquote; +extern maplecl::Option oIsysroot; +extern maplecl::Option oIwithprefix; +extern maplecl::Option oIwithprefixbefore; +extern maplecl::Option oMalignData; +extern maplecl::Option oMatomicModel; +extern maplecl::Option oMaxVectAlign; +extern maplecl::Option oMbased; +extern maplecl::Option oMblockMoveInlineLimit; +extern maplecl::Option oMbranchCost; +extern maplecl::Option oMc; +extern maplecl::Option oMcacheBlockSize; +extern maplecl::Option oMcacheSize; +extern maplecl::Option oMcmodel; +extern maplecl::Option oMcodeReadable; +extern maplecl::Option oMconfig; +extern maplecl::Option oMcpu; +extern maplecl::Option oMdataRegion; +extern maplecl::Option oMdivsi3_libfuncName; +extern maplecl::Option oMdualNopsE; +extern maplecl::Option oMemregsE; +extern maplecl::Option oMfixedRange; +extern maplecl::Option oMfloatGprs; +extern maplecl::Option oMflushFunc; +extern maplecl::Option oMflushTrap; +extern maplecl::Option oMfpRoundingMode; +extern maplecl::Option oMfpTrapMode; +extern maplecl::Option oMfpu; +extern maplecl::Option oMhintMaxDistance; +extern maplecl::Option oMhintMaxNops; +extern maplecl::Option oMhotpatch; +extern maplecl::Option oMhwmultE; +extern maplecl::Option oMinsertSchedNops; +extern maplecl::Option oMiselE; +extern maplecl::Option oMisel; +extern maplecl::Option oMisrVectorSize; +extern maplecl::Option oMmcuE; +extern maplecl::Option oMmemoryLatency; +extern maplecl::Option oMmemoryModel; +extern maplecl::Option oMoverrideE; +extern maplecl::Option oMprioritizeRestrictedInsns; +extern maplecl::Option oMrecip; +extern maplecl::Option oMrecipE; +extern maplecl::Option oMschedCostlyDep; +extern maplecl::Option oMschedule; +extern maplecl::Option oMsda; +extern maplecl::Option oMsharedLibraryId; +extern maplecl::Option oMsignReturnAddress; +extern maplecl::Option oMsiliconErrata; +extern maplecl::Option oMsiliconErrataWarn; +extern maplecl::Option oMsizeLevel; +extern maplecl::Option oMspe; +extern maplecl::Option oMstackGuard; +extern maplecl::Option oMstackProtectorGuard; +extern maplecl::Option oMstackProtectorGuardOffset; +extern maplecl::Option oMstackProtectorGuardReg; +extern maplecl::Option oMstackSize; +extern maplecl::Option oMtda; +extern maplecl::Option oMtiny; +extern maplecl::Option oMtlsDialect; +extern maplecl::Option oMtp; +extern maplecl::Option oMtpRegno; +extern maplecl::Option oMtrapPrecision; +extern maplecl::Option oMtune; +extern maplecl::Option oMultcost; +extern maplecl::Option oMunix; +extern maplecl::Option oMveclibabi; +extern maplecl::Option oMwarnFramesize; +extern maplecl::Option oMzda; +extern maplecl::Option oT; +extern maplecl::Option oTime; +extern maplecl::Option oWa; +extern maplecl::Option oWp; +extern maplecl::Option oXassembler; +extern maplecl::Option oXlinker; +extern maplecl::Option oXpreprocessor; +extern maplecl::Option oYm; +extern maplecl::Option oYP; +extern maplecl::Option oZ; +extern maplecl::Option oU; +extern maplecl::Option oFprofileDir; +extern maplecl::Option oFprofileUseE; +extern maplecl::Option oFoffloadAbi; +extern maplecl::Option oFoffload; +extern maplecl::Option oFinlineMatmulLimitE; +extern maplecl::Option oFinlineLimitE; +extern maplecl::Option oFinlineLimit; +extern maplecl::Option oFfixed; +extern maplecl::Option oFtemplateDepth; +extern maplecl::Option oFtemplateDepthE; +extern maplecl::Option functionReorderAlgorithm; +extern maplecl::Option functionReorderProfile; +extern maplecl::Option oA; #ifdef ENABLE_MAPLE_SAN extern maplecl::Option sanitizer; #endif diff --git a/src/mapleall/maple_driver/include/mpl_options.h b/src/mapleall/maple_driver/include/mpl_options.h index c3d2b950420eacd98e56e7235b91e085ac9cd2d8..55f2eea97818de56ebda04121abbdb61d549bc0b 100644 --- a/src/mapleall/maple_driver/include/mpl_options.h +++ b/src/mapleall/maple_driver/include/mpl_options.h @@ -30,26 +30,6 @@ #include "mir_module.h" namespace maple { -enum InputFileType { - kFileTypeNone, - kFileTypeClass, - kFileTypeJar, - kFileTypeAst, - kFileTypeCpp, - kFileTypeC, - kFileTypeDex, - kFileTypeMpl, - kFileTypeVtableImplMpl, - kFileTypeS, - kFileTypeObj, - kFileTypeBpl, - kFileTypeMeMpl, - kFileTypeMbc, - kFileTypeLmbc, - kFileTypeH, - kFileTypeI, -}; - enum OptimizationLevel { kO0, kO1, @@ -77,7 +57,7 @@ class InputInfo { public: explicit InputInfo(const std::string &inputFile) : inputFile(inputFile) { - inputFileType = GetInputFileType(inputFile); + inputFileType = FileUtils::GetFileType(inputFile); inputName = FileUtils::GetFileName(inputFile, true); inputFolder = FileUtils::GetFileFolder(inputFile); @@ -88,54 +68,18 @@ class InputInfo { fullOutput = outputFolder + outputName; } - ~InputInfo() = default; - static InputFileType GetInputFileType(const std::string &inputFilePath) { - InputFileType fileType = InputFileType::kFileTypeNone; - std::string extensionName = FileUtils::GetFileExtension(inputFilePath); - if (extensionName == "class") { - fileType = InputFileType::kFileTypeClass; - } - else if (extensionName == "dex") { - fileType = InputFileType::kFileTypeDex; - } - else if (extensionName == "c") { - fileType = InputFileType::kFileTypeC; - } - else if (extensionName == "cpp") { - fileType = InputFileType::kFileTypeCpp; - } - else if (extensionName == "ast") { - fileType = InputFileType::kFileTypeAst; - } - else if (extensionName == "jar") { - fileType = InputFileType::kFileTypeJar; - } - else if (extensionName == "mpl" || extensionName == "bpl") { - if (inputFilePath.find("VtableImpl") == std::string::npos) { - if (inputFilePath.find(".me.mpl") != std::string::npos) { - fileType = InputFileType::kFileTypeMeMpl; - } else { - fileType = extensionName == "mpl" ? InputFileType::kFileTypeMpl : InputFileType::kFileTypeBpl; - } - } else { - fileType = InputFileType::kFileTypeVtableImplMpl; - } - } else if (extensionName == "s") { - fileType = InputFileType::kFileTypeS; - } else if (extensionName == "o") { - fileType = InputFileType::kFileTypeObj; - } else if (extensionName == "mbc") { - fileType = InputFileType::kFileTypeMbc; - } else if (extensionName == "lmbc") { - fileType = InputFileType::kFileTypeLmbc; - } else if (extensionName == "h") { - fileType = InputFileType::kFileTypeH; - } else if (extensionName == "i") { - fileType = InputFileType::kFileTypeI; - } + InputInfo(const std::string &inputFile, const InputFileType &inputFileType, const std::string &inputName, + const std::string &inputFolder, const std::string &outputFolder, const std::string &outputName, + const std::string &fullOutput) + : inputFile(inputFile), + inputFileType(inputFileType), + inputName(inputName), + inputFolder(inputFolder), + outputName(outputName), + outputFolder(outputFolder), + fullOutput(fullOutput) {} - return fileType; - } + ~InputInfo() = default; InputFileType GetInputFileType() const { return inputFileType; @@ -334,6 +278,11 @@ class MplOptions { return linkInputFiles; } + /* return hirInputFiles when -flto. */ + const std::vector &GetHirInputFiles() const { + return hirInputFiles; + } + const std::string &GetExeFolder() const { return exeFolder; } @@ -370,6 +319,10 @@ class MplOptions { return rootActions; } + bool GetIsAllAst() const { + return isAllAst; + } + maplecl::OptionCategory *GetCategory(const std::string &tool) const; ErrorCode AppendCombOptions(MIRSrcLang srcLang); ErrorCode AppendMplcgOptions(MIRSrcLang srcLang); @@ -407,6 +360,7 @@ class MplOptions { std::vector inputFiles; std::vector linkInputFiles; + std::vector hirInputFiles; std::string exeFolder; RunMode runMode = RunMode::kUnkownRun; std::vector saveFiles = {}; @@ -420,6 +374,7 @@ class MplOptions { bool hasPrinted = false; bool generalRegOnly = false; + bool isAllAst = false; SafetyCheckMode npeCheckMode = SafetyCheckMode::kNoCheck; SafetyCheckMode boundaryCheckMode = SafetyCheckMode::kNoCheck; diff --git a/src/mapleall/maple_driver/include/safe_exe.h b/src/mapleall/maple_driver/include/safe_exe.h index 56105e065114344c12affd71257a0271e22d27b1..ee4bf28c87feb76d5df4fb2650eee2f141edc052 100644 --- a/src/mapleall/maple_driver/include/safe_exe.h +++ b/src/mapleall/maple_driver/include/safe_exe.h @@ -27,7 +27,6 @@ #endif #include -#include #include #include "error_code.h" #include "mpl_logging.h" @@ -59,6 +58,7 @@ class SafeExe { } // end of arguments sentinel is nullptr argv[vectorArgs.size()] = nullptr; + fflush(nullptr); pid_t pid = fork(); ErrorCode ret = kErrorNoError; if (pid == 0) { @@ -75,10 +75,11 @@ class SafeExe { // parent process int status = -1; waitpid(pid, &status, 0); - if (!WIFEXITED(static_cast(status))) { + auto exitStatus = static_cast(status); + if (!WIFEXITED(exitStatus)) { LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; ret = kErrorCompileFail; - } else if (WEXITSTATUS(status) != 0) { + } else if (WEXITSTATUS(exitStatus) != 0) { LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; ret = kErrorCompileFail; } @@ -97,35 +98,47 @@ class SafeExe { char **argv; Compilee compileeFlag = Compilee::unKnow; std::string ldLibPath = ""; - int index = cmd.find_last_of("-"); - if (index > 0 && cmd.substr(index) == "-gcc") { + size_t index = cmd.find_last_of("-"); + if (index > 0 && index < cmd.size() && cmd.substr(index) == "-gcc") { compileeFlag = Compilee::gcc; - } else if (cmd.find("hir2mpl", 0) != -1) { + for (auto &opt : options) { + if (opt.GetKey() == "-c") { + compileeFlag = Compilee::unKnow; + } + } + } else if (StringUtils::GetStrAfterLast(cmd, kFileSeperatorStr) == "hir2mpl" || + StringUtils::GetStrAfterLast(cmd, kFileSeperatorStr) == "clang") { compileeFlag = Compilee::hir2mpl; - ldLibPath += mplOptions.GetExeFolder().substr(0, mplOptions.GetExeFolder().length() - 4); - ldLibPath += "thirdparty/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04/lib"; + if (FileUtils::SafeGetenv(kMapleRoot) != "") { + ldLibPath += FileUtils::SafeGetenv(kMapleRoot) + "/build/tools/hpk/:"; + ldLibPath += FileUtils::SafeGetenv(kMapleRoot) + "/tools/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04/lib"; + } else { + ldLibPath += mplOptions.GetExeFolder().substr(0, mplOptions.GetExeFolder().length() - 4); + ldLibPath += "thirdparty/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04/lib"; + } } - std::tie(argv, argIndex) = GenerateUnixArguments(cmd, mplOptions, options, compileeFlag); + std::tie(argv, argIndex) = GenerateUnixArguments(cmd, options, compileeFlag); if (opts::debug) { LogInfo::MapleLogger() << "Run: " << cmd; for (auto &opt : options) { LogInfo::MapleLogger() << " " << opt.GetKey() << " " << opt.GetValue(); } if (compileeFlag == Compilee::gcc) { - for (auto &opt : mplOptions.GetLinkInputFiles()) { + for (auto &opt : maplecl::CommandLine::GetCommandLine().GetLinkOptions()) { LogInfo::MapleLogger() << " " << opt; } } LogInfo::MapleLogger() << "\n"; } + fflush(nullptr); pid_t pid = fork(); ErrorCode ret = kErrorNoError; if (pid == 0) { // child process fflush(nullptr); if (compileeFlag == Compilee::hir2mpl) { - std::string ld_path =":"; + std::string ld_path = ":"; if (FileUtils::SafeGetenv(kLdLibPath) != "") { ld_path += FileUtils::SafeGetenv(kLdLibPath); ldLibPath += ld_path; @@ -145,9 +158,10 @@ class SafeExe { // parent process int status = -1; waitpid(pid, &status, 0); - if (!WIFEXITED(static_cast(status))) { + auto exitStatus = static_cast(status); + if (!WIFEXITED(exitStatus)) { ret = kErrorCompileFail; - } else if (WEXITSTATUS(status) != 0) { + } else if (WEXITSTATUS(exitStatus) != 0) { ret = kErrorCompileFail; } @@ -156,6 +170,11 @@ class SafeExe { for (auto &opt : options) { LogInfo::MapleLogger() << opt.GetKey() << " " << opt.GetValue() << " "; } + if (compileeFlag == Compilee::gcc) { + for (auto &opt : maplecl::CommandLine::GetCommandLine().GetLinkOptions()) { + LogInfo::MapleLogger() << opt << " "; + } + } LogInfo::MapleLogger() << "\n"; } } @@ -284,16 +303,16 @@ class SafeExe { return tmpArgs; } - static std::tuple GenerateUnixArguments(const std::string &cmd, const MplOptions &mplOptions, - const std::vector &options, Compilee compileeFlag) { + static std::tuple GenerateUnixArguments(const std::string &cmd, + const std::vector &options, Compilee compileeFlag) { /* argSize=2, because we reserve 1st arg as exe binary, and another arg as last nullptr arg */ size_t argSize = 2; /* Calculate how many args are needed. * (* 2) is needed, because we have key and value arguments in each option */ - if (compileeFlag == Compilee::gcc && mplOptions.GetLinkInputFiles().size() > 0) { - argSize += mplOptions.GetLinkInputFiles().size(); + if (compileeFlag == Compilee::gcc && maplecl::CommandLine::GetCommandLine().GetLinkOptions().size() > 0) { + argSize += maplecl::CommandLine::GetCommandLine().GetLinkOptions().size(); } argSize += options.size() * 2; @@ -304,8 +323,8 @@ class SafeExe { // copy args auto cmdSize = cmd.size() + 1; // +1 for NUL terminal argv[0] = new char[cmdSize]; - strncpy_s(argv[0], cmdSize, cmd.c_str(), cmdSize); // c_str includes NUL terminal - + errno_t errSafe = strncpy_s(argv[0], cmdSize, cmd.c_str(), cmdSize); // c_str includes NUL terminal + CHECK_FATAL(errSafe == EOK, "strncpy_s failed"); /* Allocate and fill all arguments */ for (auto &opt : options) { auto key = opt.GetKey(); @@ -316,24 +335,27 @@ class SafeExe { if (keySize != 1) { argv[argIndex] = new char[keySize]; - strncpy_s(argv[argIndex], keySize, key.c_str(), keySize); + errSafe = strncpy_s(argv[argIndex], keySize, key.c_str(), keySize); + CHECK_FATAL(errSafe == EOK, "strncpy_s failed"); ++argIndex; } if (valSize != 1) { argv[argIndex] = new char[valSize]; - strncpy_s(argv[argIndex], valSize, val.c_str(), valSize); + errSafe = strncpy_s(argv[argIndex], valSize, val.c_str(), valSize); + CHECK_FATAL(errSafe == EOK, "strncpy_s failed"); ++argIndex; } } if (compileeFlag == Compilee::gcc) { - for (auto &opt : mplOptions.GetLinkInputFiles()) { + for (auto &opt : maplecl::CommandLine::GetCommandLine().GetLinkOptions()) { auto keySize = opt.size() + 1; if (keySize != 1) { argv[argIndex] = new char[keySize]; - strncpy_s(argv[argIndex], keySize, opt.c_str(), keySize); + errSafe = strncpy_s(argv[argIndex], keySize, opt.c_str(), keySize); + CHECK_FATAL(errSafe == EOK, "strncpy_s failed"); ++argIndex; } } diff --git a/src/mapleall/maple_driver/src/as_compiler.cpp b/src/mapleall/maple_driver/src/as_compiler.cpp index 1885554447143f2c90f350617fb8a6713fede56f..e71c393721b9492dcc1a0b754722ece6ed165b41 100644 --- a/src/mapleall/maple_driver/src/as_compiler.cpp +++ b/src/mapleall/maple_driver/src/as_compiler.cpp @@ -86,10 +86,10 @@ std::string AsCompiler::GetBin(const MplOptions &mplOptions [[maybe_unused]]) co #ifdef ANDROID return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; #else - if (FileUtils::SafeGetenv(kMapleRoot) != "") { - return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/aarch64-linux-gnu-gcc"; - } else if (FileUtils::SafeGetenv(kGccPath) != "") { + if (FileUtils::SafeGetenv(kGccPath) != "") { return FileUtils::SafeGetenv(kGccPath); + } else if (FileUtils::SafeGetenv(kMapleRoot) != "") { + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/aarch64-linux-gnu-gcc"; } return FileUtils::SafeGetPath("which aarch64-linux-gnu-gcc", "aarch64-linux-gnu-gcc"); #endif diff --git a/src/mapleall/maple_driver/src/clang_compiler.cpp b/src/mapleall/maple_driver/src/clang_compiler.cpp index bec012322307e322466eed719b61667f50408596..273716d88e9fe7d060c11c39c7241ffeaf5e5a2e 100644 --- a/src/mapleall/maple_driver/src/clang_compiler.cpp +++ b/src/mapleall/maple_driver/src/clang_compiler.cpp @@ -92,7 +92,7 @@ bool IsUseSafeOption() { opts::npeDynamicCheckAll.IsEnabledByUser() || opts::boundaryDynamicCheck.IsEnabledByUser() || opts::boundaryDynamicCheckSilent.IsEnabledByUser() || opts::safeRegionOption.IsEnabledByUser() || opts::enableArithCheck.IsEnabledByUser() || opts::defaultSafe.IsEnabledByUser()) { - flag= true; + flag = true; } return flag; } @@ -134,9 +134,16 @@ static uint32_t FillSpecialDefaulOpt(std::unique_ptr &opt, } /* Set last option as -o option */ - if (action.GetInputFileType() != InputFileType::kFileTypeH && !opts::onlyPreprocess.IsEnabledByUser()) { - opt[additionalLen - 1].SetKey("-o"); - opt[additionalLen - 1].SetValue(action.GetFullOutputName() + ".ast"); + if (action.GetInputFileType() != InputFileType::kFileTypeH && !opts::onlyPreprocess.IsEnabledByUser() && + !opts::oM.IsEnabledByUser() && !opts::oMM.IsEnabledByUser() && !opts::oMG.IsEnabledByUser() && + !opts::oMQ.IsEnabledByUser()) { + if (!opts::linkerTimeOpt.IsEnabledByUser() || !opts::compileWOLink.IsEnabledByUser()) { + opt[additionalLen - 1].SetKey("-o"); + opt[additionalLen - 1].SetValue(action.GetFullOutputName() + ".ast"); + } else { + opt[additionalLen - 1].SetKey("-o"); + opt[additionalLen - 1].SetValue("./" + action.GetOutputName() + ".o"); + } opt[additionalLen - 2].SetKey("-emit-ast"); // 2 is the array sequence number. opt[additionalLen - 2].SetValue(""); } diff --git a/src/mapleall/maple_driver/src/compiler.cpp b/src/mapleall/maple_driver/src/compiler.cpp index a9f785348b73343b4788986ffbb8de590696faeb..6f391590e6d71486ab3fd4394abfe7e648c4dbff 100644 --- a/src/mapleall/maple_driver/src/compiler.cpp +++ b/src/mapleall/maple_driver/src/compiler.cpp @@ -128,21 +128,18 @@ void Compiler::AppendExtraOptions(std::vector &finalOptions, const Mp } for (const auto &val : opt->GetRawValues()) { if (opt->GetEqualType() == maplecl::EqualType::kWithEqual) { - (void)finalOptions.emplace_back(opt->GetName() + "=" + val, ""); - } else { - if (opt->GetName() == "-Wl") { - (void)finalOptions.emplace_back(val, ""); + auto pos = opt->GetName().find('='); + if (pos != std::string::npos) { + (void)finalOptions.emplace_back(opt->GetName() + val, ""); } else { - (void)finalOptions.emplace_back(opt->GetName(), val); + (void)finalOptions.emplace_back(opt->GetName() + "=" + val, ""); } + } else { + (void)finalOptions.emplace_back(opt->GetName(), val); } if (isDebug) { - if (opt->GetName() == "-Wl") { - LogInfo::MapleLogger() << val << " "; - } else { - LogInfo::MapleLogger() << opt->GetName() << " " << val << " "; - } + LogInfo::MapleLogger() << opt->GetName() << " " << val << " "; } } } @@ -150,7 +147,7 @@ void Compiler::AppendExtraOptions(std::vector &finalOptions, const Mp /* output file can not be specified for several last actions. As exaple: * If last actions are assembly tool for 2 files (to get file1.o, file2.o), * we can not have one output name for them. */ - if (opts::output.IsEnabledByUser() && options.GetActions().size() == 1) { + if ((opts::output.IsEnabledByUser() && options.GetActions().size() == 1) || action.GetTool() == "ld") { /* Set output file for last compilation tool */ if (&action == options.GetActions()[0].get()) { /* the tool may not support "-o" for output option */ @@ -164,6 +161,16 @@ void Compiler::AppendExtraOptions(std::vector &finalOptions, const Mp } AppendOutputOption(finalOptions, opts::output.GetValue()); } + if (action.GetTool() == "ld") { + AppendOutputOption(finalOptions, opts::output.GetValue()); + } + } + + /* Append -Wl option and link files. */ + if (isDebug && toolName == "ld") { + for (auto &opt : maplecl::CommandLine::GetCommandLine().GetLinkOptions()) { + LogInfo::MapleLogger() << " " << opt; + } } if (isDebug) { diff --git a/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp b/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp index 2f3f9c0fcf0956ae3db910b07120e0abcb61d635..fc6fca3ca3dcd2a79ca447fed00c1690f7f3e704 100644 --- a/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp +++ b/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp @@ -19,10 +19,6 @@ namespace maple { std::string Cpp2MplCompiler::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const{ - if (FileUtils::SafeGetenv(kMapleRoot) != "") { - return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + - FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; - } return mplOptions.GetExeFolder(); } @@ -50,12 +46,21 @@ DefaultOption Cpp2MplCompiler::GetDefaultOptions(const MplOptions &options, uint32_t len = sizeof(kCpp2MplDefaultOptionsForAst) / sizeof(MplOption); // 1 for option -p uint32_t length = len + 1; + + if (options.GetIsAllAst()) { + length += options.GetHirInputFiles().size(); + length++; + } + if (IsUseBoundaryOption()) { length++; } if (IsUseNpeOption()) { length++; } + if (opts::linkerTimeOpt.IsEnabledByUser()) { + length++; + } DefaultOption defaultOptions = { std::make_unique(length), length }; for (uint32_t i = 0; i < len; ++i) { @@ -69,6 +74,16 @@ DefaultOption Cpp2MplCompiler::GetDefaultOptions(const MplOptions &options, options.GetExeFolder())); } + if (options.GetIsAllAst()) { + for (auto tmp : options.GetHirInputFiles()) { + defaultOptions.mplOptions[len].SetKey(tmp); + defaultOptions.mplOptions[len].SetValue(""); + len++; + } + defaultOptions.mplOptions[len].SetKey("-o"); + defaultOptions.mplOptions[len++].SetValue("tmp.mpl"); + } + defaultOptions.mplOptions[len].SetKey("--output"); defaultOptions.mplOptions[len++].SetValue(action.GetOutputFolder()); if (IsUseBoundaryOption()) { @@ -81,6 +96,11 @@ DefaultOption Cpp2MplCompiler::GetDefaultOptions(const MplOptions &options, defaultOptions.mplOptions[len].SetValue(""); len++; } + if (opts::linkerTimeOpt.IsEnabledByUser()) { + defaultOptions.mplOptions[len].SetKey("-wpaa"); + defaultOptions.mplOptions[len].SetValue(""); + len++; + } return defaultOptions; } diff --git a/src/mapleall/maple_driver/src/driver_options.cpp b/src/mapleall/maple_driver/src/driver_options.cpp index 7fbe9967c8ae4dcdd7fee404275e4fe1e7f45c15..7c3223ab59fb2bc66bf25f9d6a6374dddd4462f0 100644 --- a/src/mapleall/maple_driver/src/driver_options.cpp +++ b/src/mapleall/maple_driver/src/driver_options.cpp @@ -21,477 +21,528 @@ namespace opts { /* ##################### BOOL Options ############################################################### */ maplecl::Option version({"--version", "-v"}, - " --version [command] \tPrint version and exit.\n", - {driverCategory}); + " --version [command] \tPrint version and exit.\n", + {driverCategory}); maplecl::Option ignoreUnkOpt({"--ignore-unknown-options"}, - " --ignore-unknown-options \tIgnore unknown compilation options\n", - {driverCategory}); + " --ignore-unknown-options \tIgnore unknown compilation options\n", + {driverCategory}); maplecl::Option o0({"--O0", "-O0"}, - " -O0 \tNo optimization. (Default)\n", - {driverCategory}); + " -O0 \tNo optimization. (Default)\n", + {driverCategory}); maplecl::Option o1({"--O1", "-O1"}, - " -O1 \tDo some optimization.\n", - {driverCategory}); + " -O1 \tDo some optimization.\n", + {driverCategory}); maplecl::Option o2({"--O2", "-O2"}, - " -O2 \tDo more optimization.\n", - {driverCategory, hir2mplCategory}); + " -O2 \tDo more optimization.\n", + {driverCategory, hir2mplCategory}); maplecl::Option o3({"--O3", "-O3"}, - " -O3 \tDo more optimization.\n", - {driverCategory}); + " -O3 \tDo more optimization.\n", + {driverCategory}); maplecl::Option os({"--Os", "-Os"}, - " -Os \tOptimize for size, based on O2.\n", - {driverCategory}); + " -Os \tOptimize for size, based on O2.\n", + {driverCategory}); maplecl::Option verify({"--verify"}, - " --verify \tVerify mpl file\n", - {driverCategory, dex2mplCategory, mpl2mplCategory}); + " --verify \tVerify mpl file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory}); maplecl::Option decoupleStatic({"--decouple-static", "-decouple-static"}, - " --decouple-static \tDecouple the static method and field\n" - " --no-decouple-static \tDon't decouple the static method and field\n", - {driverCategory, dex2mplCategory, meCategory, mpl2mplCategory}, - maplecl::DisableWith("--no-decouple-static")); + " --decouple-static \tDecouple the static method and field\n" + " --no-decouple-static \tDon't decouple the static method and field\n", + {driverCategory, dex2mplCategory, meCategory, mpl2mplCategory}, + maplecl::DisableWith("--no-decouple-static")); maplecl::Option gcOnly({"--gconly", "-gconly"}, - " --gconly \tMake gconly is enable\n" - " --no-gconly \tDon't make gconly is enable\n", - {driverCategory, dex2mplCategory, meCategory, - mpl2mplCategory, cgCategory}, - maplecl::DisableWith("--no-gconly")); + " --gconly \tMake gconly is enable\n" + " --no-gconly \tDon't make gconly is enable\n", + {driverCategory, dex2mplCategory, meCategory, mpl2mplCategory, cgCategory}, + maplecl::DisableWith("--no-gconly")); maplecl::Option timePhase({"-time-phases"}, - " -time-phases \tTiming phases and print percentages\n", - {driverCategory}); + " -time-phases \tTiming phases and print percentages\n", + {driverCategory}); maplecl::Option genMeMpl({"--genmempl"}, - " --genmempl \tGenerate me.mpl file\n", - {driverCategory}); + " --genmempl \tGenerate me.mpl file\n", + {driverCategory}); maplecl::Option compileWOLink({"-c"}, - " -c \tCompile the source files without linking\n", - {driverCategory}); + " -c \tCompile the source files without linking\n", + {driverCategory}); maplecl::Option genVtable({"--genVtableImpl"}, - " --genVtableImpl \tGenerate VtableImpl.mpl file\n", - {driverCategory}); + " --genVtableImpl \tGenerate VtableImpl.mpl file\n", + {driverCategory}); maplecl::Option verbose({"-verbose"}, - " -verbose \tPrint informations\n", - {driverCategory, jbc2mplCategory, hir2mplCategory, - meCategory, mpl2mplCategory, cgCategory}); + " -verbose \tPrint informations\n", + {driverCategory, jbc2mplCategory, hir2mplCategory, meCategory, mpl2mplCategory, cgCategory}); maplecl::Option debug({"--debug"}, - " --debug \tPrint debug info.\n", - {driverCategory}); + " --debug \tPrint debug info.\n", + {driverCategory}); maplecl::Option withDwarf({"-g"}, - " --debug \tPrint debug info.\n", - {driverCategory, hir2mplCategory}); + " --debug \tPrint debug info.\n", + {driverCategory, hir2mplCategory}); maplecl::Option withIpa({"--with-ipa"}, - " --with-ipa \tRun IPA when building\n" - " --no-with-ipa \n", - {driverCategory}, - maplecl::DisableWith("--no-with-ipa")); + " --with-ipa \tRun IPA when building\n" + " --no-with-ipa \n", + {driverCategory}, + maplecl::DisableWith("--no-with-ipa")); maplecl::Option npeNoCheck({"--no-npe-check"}, - " --no-npe-check \tDisable null pointer check (Default)\n", - {driverCategory}); + " --no-npe-check \tDisable null pointer check (Default)\n", + {driverCategory}); maplecl::Option npeStaticCheck({"--npe-check-static"}, - " --npe-check-static \tEnable null pointer static check only\n", - {driverCategory}); + " --npe-check-static \tEnable null pointer static check only\n", + {driverCategory}); maplecl::Option npeDynamicCheck({"--npe-check-dynamic", "-npe-check-dynamic"}, - " --npe-check-dynamic \tEnable null " - "pointer dynamic check with static warning\n", - {driverCategory, hir2mplCategory}); + " --npe-check-dynamic \tEnable null " + "pointer dynamic check with static warning\n", + {driverCategory, hir2mplCategory}); maplecl::Option npeDynamicCheckSilent({"--npe-check-dynamic-silent"}, - " --npe-check-dynamic-silent \tEnable null pointer dynamic " - "without static warning\n", - {driverCategory}); + " --npe-check-dynamic-silent \tEnable null pointer dynamic " + "without static warning\n", + {driverCategory}); maplecl::Option npeDynamicCheckAll({"--npe-check-dynamic-all"}, - " --npe-check-dynamic-all \tKeep dynamic check before dereference, " - "used with --npe-check-dynamic* options\n", - {driverCategory}); + " --npe-check-dynamic-all \tKeep dynamic check before dereference, " + "used with --npe-check-dynamic* options\n", + {driverCategory}); maplecl::Option boundaryNoCheck({"--no-boundary-check"}, - " --no-boundary-check \tDisable boundary check (Default)\n", - {driverCategory}); + " --no-boundary-check \tDisable boundary check (Default)\n", + {driverCategory}); maplecl::Option boundaryStaticCheck({"--boundary-check-static"}, - " --boundary-check-static \tEnable boundary static check\n", - {driverCategory}); + " --boundary-check-static \tEnable boundary static check\n", + {driverCategory}); maplecl::Option boundaryDynamicCheck({"--boundary-check-dynamic", "-boundary-check-dynamic"}, - " --boundary-check-dynamic \tEnable boundary dynamic check " - "with static warning\n", - {driverCategory, hir2mplCategory}); + " --boundary-check-dynamic \tEnable boundary dynamic check " + "with static warning\n", + {driverCategory, hir2mplCategory}); maplecl::Option boundaryDynamicCheckSilent({"--boundary-check-dynamic-silent"}, - " --boundary-check-dynamic-silent \tEnable boundary dynamic " - "check without static warning\n", - {driverCategory}); + " --boundary-check-dynamic-silent \tEnable boundary dynamic " + "check without static warning\n", + {driverCategory}); maplecl::Option safeRegionOption({"--safe-region", "-safe-region"}, - " --safe-region \tEnable safe region\n", - {driverCategory, hir2mplCategory}); + " --safe-region \tEnable safe region\n", + {driverCategory, hir2mplCategory}); maplecl::Option enableArithCheck({"--boundary-arith-check"}, - " --boundary-arith-check \tEnable pointer arithmetic check\n", - {driverCategory}); + " --boundary-arith-check \tEnable pointer arithmetic check\n", + {driverCategory}); maplecl::Option enableCallFflush({"--boudary-dynamic-call-fflush"}, - " --boudary-dynamic-call-fflush \tEnable call fflush function to flush " - "boundary-dynamic-check error message to the STDOUT\n", - {driverCategory}); + " --boudary-dynamic-call-fflush \tEnable call fflush function to flush " + "boundary-dynamic-check error message to the STDOUT\n", + {driverCategory}); maplecl::Option onlyCompile({"-S"}, "Only run preprocess and compilation steps", {driverCategory}); maplecl::Option printDriverPhases({"--print-driver-phases"}, - " --print-driver-phases \tPrint Driver Phases\n", - {driverCategory}); + " --print-driver-phases \tPrint Driver Phases\n", + {driverCategory}); maplecl::Option ldStatic({"-static", "--static"}, - " -static \tForce the linker to link a program statically\n", - {driverCategory, ldCategory}); + " -static \tForce the linker to link a program statically\n", + {driverCategory, ldCategory}); maplecl::Option maplePhase({"--maple-phase"}, - " --maple-phase \tRun maple phase only\n --no-maple-phase\n", - {driverCategory}, - maplecl::DisableWith("--maple-toolchain"), - maplecl::Init(true)); + " --maple-phase \tRun maple phase only\n --no-maple-phase\n", + {driverCategory}, + maplecl::DisableWith("--maple-toolchain"), + maplecl::Init(true)); maplecl::Option genMapleBC({"--genmaplebc"}, - " --genmaplebc \tGenerate .mbc file\n", - {driverCategory}); + " --genmaplebc \tGenerate .mbc file\n", + {driverCategory}); maplecl::Option genLMBC({"--genlmbc"}, - " --genlmbc \tGenerate .lmbc file\n", - {driverCategory, mpl2mplCategory}); + " --genlmbc \tGenerate .lmbc file\n", + {driverCategory, mpl2mplCategory}); maplecl::Option profileGen({"--profileGen"}, - " --profileGen \tGenerate profile data for static languages\n", - {driverCategory, meCategory, mpl2mplCategory, cgCategory}); + " --profileGen \tGenerate profile data for static languages\n", + {driverCategory, meCategory, mpl2mplCategory, cgCategory}); maplecl::Option profileUse({"--profileUse"}, - " --profileUse \tOptimize static languages with profile data\n", - {driverCategory, mpl2mplCategory}); + " --profileUse \tOptimize static languages with profile data\n", + {driverCategory, mpl2mplCategory}); maplecl::Option missingProfDataIsError({"--missing-profdata-is-error"}, - " --missing-profdata-is-error \tTreat missing profile data file as error\n" - " --no-missing-profdata-is-error \tOnly warn on missing profile data file\n", - {driverCategory}, - maplecl::DisableWith("--no-missing-profdata-is-error"), - maplecl::Init(true)); + " --missing-profdata-is-error \tTreat missing profile data file as error\n" + " --no-missing-profdata-is-error \tOnly warn on missing profile data file\n", + {driverCategory}, + maplecl::DisableWith("--no-missing-profdata-is-error"), + maplecl::Init(true)); + maplecl::Option stackProtectorStrong({"--stack-protector-strong", "-fstack-protector-strong"}, - " -fstack-protector-strong \tadd stack guard for some function\n", - {driverCategory, meCategory, cgCategory}); + " -fstack-protector-strong \tadd stack guard for some function\n", + {driverCategory, meCategory, cgCategory}); maplecl::Option stackProtectorAll({"--stack-protector-all"}, - " --stack-protector-all \tadd stack guard for all functions\n", - {driverCategory, meCategory, cgCategory}); + " --stack-protector-all \tadd stack guard for all functions\n", + {driverCategory, meCategory, cgCategory}); maplecl::Option inlineAsWeak({"-inline-as-weak", "--inline-as-weak"}, - " --inline-as-weak \tSet inline functions as weak symbols" - " as it's in C++\n", {driverCategory, hir2mplCategory}); + " --inline-as-weak \tSet inline functions as weak symbols" + " as it's in C++\n", {driverCategory, hir2mplCategory}); + maplecl::Option expand128Floats({"--expand128floats"}, - " --expand128floats \tEnable expand128floats pass\n", - {driverCategory}, - maplecl::DisableWith("--no-expand128floats"), - maplecl::hide, maplecl::Init(true)); + " --expand128floats \tEnable expand128floats pass\n", + {driverCategory}, + maplecl::DisableWith("--no-expand128floats"), + maplecl::hide, maplecl::Init(true)); maplecl::Option MD({"-MD"}, - " -MD \tWrite a depfile containing user and system headers\n", - {driverCategory, unSupCategory}); + " -MD \tWrite a depfile containing user and system headers\n", + {driverCategory, clangCategory}); maplecl::Option fNoPlt({"-fno-plt"}, - " -fno-plt \tDo not use the PLT to make function calls\n", - {driverCategory}); + " -fno-plt \tDo not use the PLT to make function calls\n", + {driverCategory}); maplecl::Option usePipe({"-pipe"}, - " -pipe \tUse pipes between commands, when possible\n", - {driverCategory, unSupCategory}); + " -pipe \tUse pipes between commands, when possible\n", + {driverCategory, unSupCategory}); maplecl::Option fDataSections({"-fdata-sections"}, - " -fdata-sections \tPlace each data in its own section (ELF Only)\n", - {driverCategory, unSupCategory}); + " -fdata-sections \tPlace each data in its own section (ELF Only)\n", + {driverCategory, unSupCategory}); maplecl::Option fRegStructReturn({"-freg-struct-return"}, - " -freg-struct-return \tOverride the default ABI to return small structs in registers\n", - {driverCategory}); + " -freg-struct-return \tOverride the default ABI to return small structs in registers\n", + {driverCategory}); maplecl::Option fTreeVectorize({"-ftree-vectorize"}, - " -ftree-vectorize \tEnable vectorization on trees\n", - {driverCategory}); - -maplecl::Option fNoFatLtoObjects({"-fno-fat-lto-objects"}, - " -fno-fat-lto-objects \tSpeeding up lto compilation\n", - {driverCategory, unSupCategory}); + " -ftree-vectorize \tEnable vectorization on trees\n", + {driverCategory}); maplecl::Option gcSections({"--gc-sections"}, - " -gc-sections \tDiscard all sections that are not accessed in the final elf\n", - {driverCategory, ldCategory}); + " -gc-sections \tDiscard all sections that are not accessed in the final elf\n", + {driverCategory, ldCategory}); maplecl::Option copyDtNeededEntries({"--copy-dt-needed-entries"}, - " --copy-dt-needed-entries \tGenerate a DT_NEEDED entry for each lib that is present in" - " the link command.\n", - {driverCategory, ldCategory}); + " --copy-dt-needed-entries \tGenerate a DT_NEEDED entry for each lib that is present in" + " the link command.\n", + {driverCategory, ldCategory}); maplecl::Option sOpt({"-s"}, - " -s \tRemove all symbol table and relocation information from the executable\n", - {driverCategory, ldCategory}); + " -s \tRemove all symbol table and relocation information from the executable\n", + {driverCategory, ldCategory}); maplecl::Option noStdinc({"-nostdinc"}, - " -s \tDo not search standard system include directories" - "(those specified with -isystem will still be used).\n", - {driverCategory, clangCategory}); + " -s \tDo not search standard system include directories" + "(those specified with -isystem will still be used).\n", + {driverCategory, clangCategory}); maplecl::Option pie({"-pie"}, - " -pie \tCreate a position independent executable.\n", - {driverCategory, ldCategory}); + " -pie \tCreate a position independent executable.\n", + {driverCategory, ldCategory}); maplecl::Option fStrongEvalOrder({"-fstrong-eval-order"}, - " -fstrong-eval-order \tFollow the C++17 evaluation order requirements" - "for assignment expressions, shift, member function calls, etc.\n", - {driverCategory, unSupCategory}); + " -fstrong-eval-order \tFollow the C++17 evaluation order requirements" + "for assignment expressions, shift, member function calls, etc.\n", + {driverCategory, unSupCategory}); maplecl::Option linkerTimeOpt({"-flto"}, - " -flto \tEnable LTO in 'full' mode\n", - {driverCategory, unSupCategory}); + " -flto \tEnable LTO in 'full' mode\n", + {driverCategory, cgCategory}); maplecl::Option shared({"-shared"}, - " -shared \tCreate a shared library.\n", - {driverCategory, ldCategory}); + " -shared \tCreate a shared library.\n", + {driverCategory, ldCategory}); maplecl::Option rdynamic({"-rdynamic"}, - " -rdynamic \tPass the flag `-export-dynamic' to the ELF linker," - "on targets that support it. This instructs the linker to add all symbols," - "not only used ones, to the dynamic symbol table.\n", - {driverCategory, ldCategory}); + " -rdynamic \tPass the flag `-export-dynamic' to the ELF linker," + "on targets that support it. This instructs the linker to add all symbols," + "not only used ones, to the dynamic symbol table.\n", + {driverCategory, ldCategory}); maplecl::Option dndebug({"-DNDEBUG"}, - " -DNDEBUG \t\n", - {driverCategory, ldCategory}); + " -DNDEBUG \t\n", + {driverCategory, ldCategory}); -maplecl::Option usesignedchar({"-fsigned-char", "-usesignedchar", "--usesignedchar"}, +maplecl::Option useSignedChar({"-fsigned-char", "-usesignedchar", "--usesignedchar"}, " -fsigned-char \tuse signed char\n", {driverCategory, clangCategory, hir2mplCategory}, maplecl::DisableWith("-funsigned-char")); maplecl::Option suppressWarnings({"-w"}, - " -w \tSuppress all warnings.\n", - {driverCategory, clangCategory, asCategory, ldCategory}); + " -w \tSuppress all warnings.\n", + {driverCategory, clangCategory, asCategory, ldCategory}); maplecl::Option pthread({"-pthread"}, - " -pthread \tDefine additional macros required for using" - "the POSIX threads library.\n", - {driverCategory, clangCategory, asCategory, ldCategory}); + " -pthread \tDefine additional macros required for using" + "the POSIX threads library.\n", + {driverCategory, clangCategory, asCategory, ldCategory}); maplecl::Option passO2ToClang({"-pO2ToCl"}, - " -pthread \ttmp for option -D_FORTIFY_SOURCE=1\n", - {clangCategory}); + " -pthread \ttmp for option -D_FORTIFY_SOURCE=1\n", + {clangCategory}); maplecl::Option defaultSafe({"-defaultSafe", "--defaultSafe"}, - " --defaultSafe : treat unmarked function or blocks as safe region by default", - {driverCategory, hir2mplCategory}); + " --defaultSafe : treat unmarked function or blocks as safe region by default", + {driverCategory, hir2mplCategory}); maplecl::Option onlyPreprocess({"-E"}, - " -E \tPreprocess only; do not compile, assemble or link.\n", - {driverCategory, clangCategory}); + " -E \tPreprocess only; do not compile, assemble or link.\n", + {driverCategory, clangCategory}); + +maplecl::Option tailcall({"--tailcall", "-foptimize-sibling-calls"}, + " --tailcall/-foptimize-sibling-calls \tDo tail call optimization\n" + " --no-tailcall/-fno-optimize-sibling-calls\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"-fno-optimize-sibling-calls", "--no-tailcall"})); maplecl::Option noStdLib({"-nostdlib"}, - " -nostdlib \tDo not look for object files in standard path.\n", - {driverCategory, ldCategory}); + " -nostdlib \tDo not look for object files in standard path.\n", + {driverCategory, ldCategory}); maplecl::Option r({"-r"}, - " -r \tProduce a relocatable object as output. This is also" - " known as partial linking.\n", - {driverCategory, ldCategory}); + " -r \tProduce a relocatable object as output. This is also" + " known as partial linking.\n", + {driverCategory, ldCategory}); + +maplecl::Option wCCompat({"-Wc++-compat"}, + " -Wc++-compat \tWarn about C constructs that are not in the " + "common subset of C and C++ .\n", + {driverCategory, asCategory, ldCategory}); + +maplecl::Option wpaa({"-wpaa", "--wpaa"}, + " -dump-cfg funcname1,funcname2\n" \ + " -wpaa : enable whole program ailas analysis", + {driverCategory, hir2mplCategory}); + +maplecl::Option fm({"-fm", "--fm"}, + " static function merge will be enabled" \ + " only when wpaa is enabled at the same time", + {driverCategory, hir2mplCategory}); + +maplecl::Option dumpTime({"--dump-time", "-dump-time"}, + " -dump-time : dump time", + {driverCategory, hir2mplCategory}); + +maplecl::Option aggressiveTlsLocalDynamicOpt({"--tls-local-dynamic-opt"}, + " --tls-local-dynamic-opt \tdo aggressive tls local dynamic opt\n", + {driverCategory}); /* ##################### STRING Options ############################################################### */ + maplecl::Option help({"--help", "-h"}, - " --help \tPrint help\n", - {driverCategory}, - maplecl::optionalValue); + " --help \tPrint help\n", + {driverCategory}, + maplecl::kOptionalValue); maplecl::Option infile({"--infile"}, - " --infile file1,file2,file3 \tInput files.\n", - {driverCategory}); + " --infile file1,file2,file3 \tInput files.\n", + {driverCategory}); maplecl::Option mplt({"--mplt", "-mplt"}, - " --mplt=file1,file2,file3 \tImport mplt files.\n", - {driverCategory, dex2mplCategory, jbc2mplCategory}); + " --mplt=file1,file2,file3 \tImport mplt files.\n", + {driverCategory, dex2mplCategory, jbc2mplCategory}); maplecl::Option partO2({"--partO2"}, - " --partO2 \tSet func list for O2\n", - {driverCategory}); + " --partO2 \tSet func list for O2\n", + {driverCategory}); maplecl::List jbc2mplOpt({"--jbc2mpl-opt"}, - " --jbc2mpl-opt \tSet options for jbc2mpl\n", - {driverCategory}); + " --jbc2mpl-opt \tSet options for jbc2mpl\n", + {driverCategory}); maplecl::List hir2mplOpt({"--hir2mpl-opt"}, - " --hir2mpl-opt \tSet options for hir2mpl\n", - {driverCategory}); + " --hir2mpl-opt \tSet options for hir2mpl\n", + {driverCategory}); maplecl::List clangOpt({"--clang-opt"}, - " --clang-opt \tSet options for clang as AST generator\n", - {driverCategory}); + " --clang-opt \tSet options for clang as AST generator\n", + {driverCategory}); maplecl::List asOpt({"--as-opt"}, - " --as-opt \tSet options for as\n", - {driverCategory}); + " --as-opt \tSet options for as\n", + {driverCategory}); maplecl::List ldOpt({"--ld-opt"}, - " --ld-opt \tSet options for ld\n", - {driverCategory}); + " --ld-opt \tSet options for ld\n", + {driverCategory}); maplecl::List dex2mplOpt({"--dex2mpl-opt"}, - " --dex2mpl-opt \tSet options for dex2mpl\n", - {driverCategory}); + " --dex2mpl-opt \tSet options for dex2mpl\n", + {driverCategory}); maplecl::List mplipaOpt({"--mplipa-opt"}, - " --mplipa-opt \tSet options for mplipa\n", - {driverCategory}); + " --mplipa-opt \tSet options for mplipa\n", + {driverCategory}); maplecl::List mplcgOpt({"--mplcg-opt"}, - " --mplcg-opt \tSet options for mplcg\n", - {driverCategory}); + " --mplcg-opt \tSet options for mplcg\n", + {driverCategory}); maplecl::List meOpt({"--me-opt"}, - " --me-opt \tSet options for me\n", - {driverCategory}); + " --me-opt \tSet options for me\n", + {driverCategory}); maplecl::List mpl2mplOpt({"--mpl2mpl-opt"}, - " --mpl2mpl-opt \tSet options for mpl2mpl\n", - {driverCategory}); + " --mpl2mpl-opt \tSet options for mpl2mpl\n", + {driverCategory}); maplecl::Option profile({"--profile"}, - " --profile \tFor PGO optimization\n" - " \t--profile=list_file\n", - {driverCategory, dex2mplCategory, mpl2mplCategory, cgCategory}); + " --profile \tFor PGO optimization\n" + " \t--profile=list_file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory, cgCategory}); maplecl::Option run({"--run"}, - " --run=cmd1:cmd2 \tThe name of executables that are going\n" - " \tto execute. IN SEQUENCE.\n" - " \tSeparated by \":\".Available exe names:\n" - " \tjbc2mpl, me, mpl2mpl, mplcg\n" - " \tInput file must match the tool can\n" - " \thandle\n", - {driverCategory}); + " --run=cmd1:cmd2 \tThe name of executables that are going\n" + " \tto execute. IN SEQUENCE.\n" + " \tSeparated by \":\".Available exe names:\n" + " \tjbc2mpl, me, mpl2mpl, mplcg\n" + " \tInput file must match the tool can\n" + " \thandle\n", + {driverCategory}); maplecl::Option optionOpt({"--option"}, - " --option=\"opt1:opt2\" \tOptions for each executable,\n" - " \tseparated by \":\".\n" - " \tThe sequence must match the sequence in\n" - " \t--run.\n", - {driverCategory}); + " --option=\"opt1:opt2\" \tOptions for each executable,\n" + " \tseparated by \":\".\n" + " \tThe sequence must match the sequence in\n" + " \t--run.\n", + {driverCategory}); maplecl::List ldLib({"-l"}, - " -l \tLinks with a library file\n", - {driverCategory, ldCategory}, - maplecl::joinedValue); + " -l \tLinks with a library file\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); maplecl::List ldLibPath({"-L"}, - " -L \tAdd directory to library search path\n", - {driverCategory, ldCategory}, - maplecl::joinedValue); + " -L \tAdd directory to library search path\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); maplecl::List enableMacro({"-D"}, - " -D = \tDefine to " - "(or 1 if omitted)\n", - {driverCategory, clangCategory}, - maplecl::joinedValue); + " -D = \tDefine to " + "(or 1 if omitted)\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); maplecl::List disableMacro({"-U"}, - " -U \tUndefine macro \n", - {driverCategory, clangCategory}, - maplecl::joinedValue); + " -U \tUndefine macro \n", + {driverCategory, clangCategory}, + maplecl::joinedValue); maplecl::List includeDir({"-I"}, - " -I \tAdd directory to include search path\n", - {driverCategory, clangCategory}, - maplecl::joinedValue); + " -I \tAdd directory to include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); maplecl::List includeSystem({"-isystem"}, - " -isystem \tAdd directory to SYSTEM include search path\n", - {driverCategory, clangCategory}, - maplecl::joinedValue); + " -isystem \tAdd directory to SYSTEM include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); maplecl::Option output({"-o"}, - " -o \tPlace the output into \n", - {driverCategory}, - maplecl::Init("a.out")); + " -o \tPlace the output into \n", + {driverCategory}, + maplecl::Init("a.out")); maplecl::Option saveTempOpt({"--save-temps"}, - " --save-temps \tDo not delete intermediate files.\n" - " \t--save-temps Save all intermediate files.\n" - " \t--save-temps=file1,file2,file3 Save the\n" - " \ttarget files.\n", - {driverCategory}, - maplecl::optionalValue); + " --save-temps \tDo not delete intermediate files.\n" + " \t--save-temps Save all intermediate files.\n" + " \t--save-temps=file1,file2,file3 Save the\n" + " \ttarget files.\n", + {driverCategory, clangCategory}, + maplecl::kOptionalValue); maplecl::Option target({"--target", "-target"}, - " --target= \tDescribe target platform\n" - " \t\t\t\tExample: --target=aarch64-gnu or --target=aarch64_be-gnuilp32\n", - {driverCategory, clangCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); - -maplecl::Option MT({"-MT"}, - " -MT \tSpecify name of main file output in depfile\n", - {driverCategory, unSupCategory}, maplecl::joinedValue); + " --target= \tDescribe target platform\n" + " \t\t\t\tExample: --target=aarch64-gnu or --target=aarch64_be-gnuilp32\n", + {driverCategory, clangCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); -maplecl::Option MF({"-MF"}, - " -MF \tWrite depfile output from -MD, -M to \n", - {driverCategory, clangCategory}, maplecl::joinedValue); +maplecl::Option oMT({"-MT"}, + " -MT \tSpecify name of main file output in depfile\n", + {driverCategory, clangCategory}, maplecl::joinedValue); +maplecl::Option oMF({"-MF"}, + " -MF \tWrite depfile output from -MD, -M to \n", + {driverCategory, clangCategory}, maplecl::joinedValue); -maplecl::Option std({"-std"}, - " -std \t\n", - {driverCategory, clangCategory, ldCategory, unSupCategory}); - -maplecl::Option Wl({"-Wl"}, - " -Wl, \tPass the comma separated arguments in to the linker\n", - {driverCategory, ldCategory}, maplecl::joinedValue); +maplecl::Option oWl({"-Wl"}, + " -Wl, \tPass the comma separated arguments in to the linker\n", + {driverCategory, ldCategory}, maplecl::joinedValue); maplecl::Option linkerTimeOptE({"-flto="}, - " -flto= \tSet LTO mode to either 'full' or 'thin'\n", - {driverCategory, unSupCategory}); + " -flto= \tSet LTO mode to either 'full' or 'thin'\n", + {driverCategory, unSupCategory}); maplecl::Option fVisibility({"-fvisibility"}, - " -fvisibility=[default|hidden|protected|internal] \tSet the default symbol visibility" - " for every global declaration unless overridden within the code\n", - {driverCategory}); + " -fvisibility=[default|hidden|protected|internal] \tSet the default symbol visibility" + " for every global declaration unless overridden within the code\n", + {driverCategory}); maplecl::Option fStrongEvalOrderE({"-fstrong-eval-order="}, - " -fstrong-eval-order \tFollow the C++17 evaluation order requirements" - "for assignment expressions, shift, member function calls, etc.\n", - {driverCategory, unSupCategory}); + " -fstrong-eval-order \tFollow the C++17 evaluation order requirements" + "for assignment expressions, shift, member function calls, etc.\n", + {driverCategory, unSupCategory}); -maplecl::Option march({"-march"}, - " -march= \tGenerate code for given CPU.\n", - {driverCategory, clangCategory, asCategory, ldCategory}); +maplecl::Option marchE({"-march="}, + " -march= \tGenerate code for given CPU.\n", + {driverCategory, clangCategory, asCategory, ldCategory, unSupCategory}); maplecl::Option sysRoot({"--sysroot"}, - " --sysroot \tSet the root directory of the target platform.\n" - " --sysroot= \tSet the root directory of the target platform.\n", - {driverCategory, clangCategory, ldCategory}); + " --sysroot \tSet the root directory of the target platform.\n" + " --sysroot= \tSet the root directory of the target platform.\n", + {driverCategory, clangCategory, ldCategory}); maplecl::Option specs({"-specs"}, - " -specs \tOverride built-in specs with the contents of .\n", - {driverCategory, ldCategory}); - -maplecl::Option folder({"-p"}, - " -p \tsave former folder when generating multiple output.\n", + " -specs \tOverride built-in specs with the contents of .\n", + {driverCategory, asCategory, ldCategory}); + +maplecl::Option folder({"-tmp-folder"}, + " -tmp-folder \tsave former folder when generating multiple output.\n", + {driverCategory}); + +maplecl::Option imacros({"-imacros", "--imacros"}, + " -imacros \tExactly like `-include', except that any output produced by" + " scanning FILE is thrown away.\n", + {driverCategory, clangCategory}); + +maplecl::Option fdiagnosticsColor({"-fdiagnostics-color"}, + " -fdiagnostics-color \tUse color in diagnostics. WHEN is 'never'," + " 'always', or 'auto'.\n", + {driverCategory, clangCategory, asCategory, ldCategory}); + +maplecl::Option mtlsSize({"-mtls-size"}, + " -mtls-size \tSpecify bit size of immediate TLS offsets. Valid values are 12, " + "24, 32, 48. This option requires binutils 2.26 or newer.\n", + {driverCategory, asCategory, ldCategory}); + +maplecl::Option ftlsModel({"-ftls-model"}, + " -ftls-model=[global-dynamic|local-dynamic|initial-exec|local-exec|warmup-dynamic] " + " \tAlter the thread-local storage model to be used.\n", {driverCategory}); + +maplecl::Option functionReorderAlgorithm({"--function-reorder-algorithm"}, + " --function-reorder-algorithm=[call-chain-clustering]" + " \t choose function reorder algorithm\n", + {driverCategory, cgCategory}); +maplecl::Option functionReorderProfile({"--function-reorder-profile"}, + " --function-reorder-profile=filepath" + " \t profile for function reorder\n", + {driverCategory, cgCategory}); + #ifdef ENABLE_MAPLE_SAN maplecl::Option sanitizer({"-fsanitize"}, " -fsanitize=address \tEnable AddressSanitizer.\n", @@ -500,137 +551,4581 @@ maplecl::Option sanitizer({"-fsanitize"}, /* ##################### DIGITAL Options ############################################################### */ maplecl::Option helpLevel({"--level"}, - " --level=NUM \tPrint the help info of specified level.\n" - " \tNUM=0: All options (Default)\n" - " \tNUM=1: Product options\n" - " \tNUM=2: Experimental options\n" - " \tNUM=3: Debug options\n", - {driverCategory}); + " --level=NUM \tPrint the help info of specified level.\n" + " \tNUM=0: All options (Default)\n" + " \tNUM=1: Product options\n" + " \tNUM=2: Experimental options\n" + " \tNUM=3: Debug options\n", + {driverCategory}); maplecl::Option funcInliceSize({"-func-inline-size", "--func-inline-size"}, - " -func-inline-size : set func inline size", - {driverCategory, hir2mplCategory}); - -/* ##################### Warnings Options ############################################################### */ - -maplecl::Option wUnusedMacro({"-Wunused-macros"}, - " -Wunused-macros \twarning: macro is not used\n", - {driverCategory, clangCategory}); - -maplecl::Option wBadFunctionCast({"-Wbad-function-cast"}, - " -Wbad-function-cast \twarning: " - "cast from function call of type A to non-matching type B\n", - {driverCategory, clangCategory}); - -maplecl::Option wStrictPrototypes({"-Wstrict-prototypes"}, - " -Wstrict-prototypes \twarning: " - "Warn if a function is declared or defined without specifying the argument types\n", - {driverCategory, clangCategory}); - -maplecl::Option wUndef({"-Wundef"}, - " -Wundef \twarning: " - "Warn if an undefined identifier is evaluated in an #if directive. " - "Such identifiers are replaced with zero\n", - {driverCategory, clangCategory}); - -maplecl::Option wCastQual({"-Wcast-qual"}, - " -Wcast-qual \twarning: " - "Warn whenever a pointer is cast so as to remove a type qualifier from the target type. " - "For example, warn if a const char * is cast to an ordinary char *\n", - {driverCategory, clangCategory}); - -maplecl::Option wMissingFieldInitializers({"-Wmissing-field-initializers"}, - " -Wmissing-field-initializers\twarning: " - "Warn if a structure’s initializer has some fields missing\n", - {driverCategory, clangCategory}, - maplecl::DisableWith("-Wno-missing-field-initializers")); - -maplecl::Option wUnusedParameter({"-Wunused-parameter"}, - " -Wunused-parameter \twarning: " - "Warn whenever a function parameter is unused aside from its declaration\n", - {driverCategory, clangCategory}, - maplecl::DisableWith("-Wno-unused-parameter")); - -maplecl::Option wAll({"-Wall"}, - " -Wall \tThis enables all the warnings about constructions " - "that some users consider questionable\n", - {driverCategory, clangCategory}); - -maplecl::Option wExtra({"-Wextra"}, - " -Wextra \tEnable some extra warning flags that are not enabled by -Wall\n", - {driverCategory, clangCategory}); - -maplecl::Option wWriteStrings({"-Wwrite-strings"}, - " -Wwrite-strings \tWhen compiling C, give string constants the type " - "const char[length] so that copying the address of one into " - "a non-const char * pointer produces a warning\n", - {driverCategory, clangCategory}); - -maplecl::Option wVla({"-Wvla"}, - " -Wvla \tWarn if a variable-length array is used in the code\n", - {driverCategory, clangCategory}); - -maplecl::Option wFormatSecurity({"-Wformat-security"}, - " -Wformat-security \tWwarn about uses of format " - "functions that represent possible security problems\n", - {driverCategory, clangCategory}); - -maplecl::Option wShadow({"-Wshadow"}, - " -Wshadow \tWarn whenever a local variable " - "or type declaration shadows another variable\n", - {driverCategory, clangCategory}); - -maplecl::Option wTypeLimits({"-Wtype-limits"}, - " -Wtype-limits \tWarn if a comparison is always true or always " - "false due to the limited range of the data type\n", - {driverCategory, clangCategory}); - -maplecl::Option wSignCompare({"-Wsign-compare"}, - " -Wsign-compare \tWarn when a comparison between signed and " - " unsigned values could produce an incorrect result when the signed value is converted " - "to unsigned\n", - {driverCategory, clangCategory}); - -maplecl::Option wShiftNegativeValue({"-Wshift-negative-value"}, - " -Wshift-negative-value \tWarn if left " - "shifting a negative value\n", - {driverCategory, clangCategory}); - -maplecl::Option wPointerArith({"-Wpointer-arith"}, - " -Wpointer-arith \tWarn about anything that depends on the " - "“size of” a function type or of void\n", - {driverCategory, clangCategory}); - -maplecl::Option wIgnoredQualifiers({"-Wignored-qualifiers"}, - " -Wignored-qualifiers \tWarn if the return type of a " - "function has a type qualifier such as const\n", - {driverCategory, clangCategory}); - -maplecl::Option wFormat({"-Wformat"}, - " -Wformat \tCheck calls to printf and scanf, etc., " - "to make sure that the arguments supplied have types appropriate " - "to the format string specified\n", - {driverCategory, clangCategory}); - -maplecl::Option wFloatEqual({"-Wfloat-equal"}, - " -Wfloat-equal \tWarn if floating-point values are used " - "in equality comparisons\n", - {driverCategory, clangCategory}); - -maplecl::Option wDateTime({"-Wdate-time"}, - " -Wdate-time \tWarn when macros __TIME__, __DATE__ or __TIMESTAMP__ " - "are encountered as they might prevent bit-wise-identical reproducible compilations\n", - {driverCategory, clangCategory}); - -maplecl::Option wImplicitFallthrough({"-Wimplicit-fallthrough"}, - " -Wimplicit-fallthrough \tWarn when a switch case falls through\n", - {driverCategory, clangCategory}); - -maplecl::Option wShiftOverflow({"-Wshift-overflow"}, - " -Wshift-overflow \tWarn about left shift overflows\n", - {driverCategory, clangCategory}, - maplecl::DisableWith("-Wno-shift-overflow")); - -/* #################################################################################################### */ + " -func-inline-size : set func inline size", + {driverCategory, hir2mplCategory}); + +/* ##################### maple Options ############################################################### */ + +maplecl::Option oFmaxErrors({"-fmax-errors"}, + " -fmax-errors \tLimits the maximum number of " + "error messages to n, If n is 0 (the default), there is no limit on the number of " + "error messages produced. If -Wfatal-errors is also specified, then -Wfatal-errors " + "takes precedence over this option.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStaticLibasan({"-static-libasan"}, + " -static-libasan \tThe -static-libasan option directs the MAPLE driver" + " to link libasan statically, without necessarily linking other libraries " + "statically.\n", + {driverCategory, ldCategory}); + +maplecl::Option oStaticLiblsan({"-static-liblsan"}, + " -static-liblsan \tThe -static-liblsan option directs the MAPLE driver" + " to link liblsan statically, without necessarily linking other libraries " + "statically.\n", + {driverCategory, ldCategory}); + +maplecl::Option oStaticLibtsan({"-static-libtsan"}, + " -static-libtsan \tThe -static-libtsan option directs the MAPLE driver" + " to link libtsan statically, without necessarily linking other libraries " + "statically.\n", + {driverCategory, ldCategory}); + +maplecl::Option oStaticLibubsan({"-static-libubsan"}, + " -static-libubsan \tThe -static-libubsan option directs the MAPLE" + " driver to link libubsan statically, without necessarily linking other libraries " + "statically.\n", + {driverCategory, ldCategory}); + +maplecl::Option oStaticLibmpx({"-static-libmpx"}, + " -static-libmpx \tThe -static-libmpx option directs the MAPLE driver to " + "link libmpx statically, without necessarily linking other libraries statically.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStaticLibmpxwrappers({"-static-libmpxwrappers"}, + " -static-libmpxwrappers \tThe -static-libmpxwrappers option directs the" + " MAPLE driver to link libmpxwrappers statically, without necessarily linking other" + " libraries statically.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSymbolic({"-symbolic"}, + " -symbolic \tWarn about any unresolved references (unless overridden " + "by the link editor option -Xlinker -z -Xlinker defs).\n", + {driverCategory, ldCategory}); + +maplecl::Option oFipaBitCp({"-fipa-bit-cp"}, + " -fipa-bit-cp \tWhen enabled, perform interprocedural bitwise constant" + " propagation. This flag is enabled by default at -O2. It requires that -fipa-cp " + "is enabled.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFipaVrp({"-fipa-vrp"}, + " -fipa-vrp \tWhen enabled, perform interprocedural propagation of value" + " ranges. This flag is enabled by default at -O2. It requires that -fipa-cp is " + "enabled.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMindirectBranchRegister({"-mindirect-branch-register"}, + " -mindirect-branch-register \tForce indirect call and jump via register.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlowPrecisionDiv({"-mlow-precision-div"}, + " -mlow-precision-div \tEnable the division approximation. " + "Enabling this reduces precision of division results to about 16 bits for " + "single precision and to 32 bits for double precision.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-low-precision-div")); + +maplecl::Option oMlowPrecisionSqrt({"-mlow-precision-sqrt"}, + " -mlow-precision-sqrt \tEnable the reciprocal square root approximation." + " Enabling this reduces precision of reciprocal square root results to about 16 bits " + "for single precision and to 32 bits for double precision.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-low-precision-sqrt")); + +maplecl::Option oM80387({"-m80387"}, + " -m80387 \tGenerate output containing 80387 instructions for " + "floating point.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-80387")); + +maplecl::Option oAllowable_client({"-allowable_client"}, + " -allowable_client \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oAll_load({"-all_load"}, + " -all_load \tLoads all members of static archive libraries.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oAnsi({"-ansi", "--ansi"}, + " -ansi \tIn C mode, this is equivalent to -std=c90. " + "In C++ mode, it is equivalent to -std=c++98.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oArch_errors_fatal({"-arch_errors_fatal"}, + " -arch_errors_fatal \tCause the errors having to do with files " + "that have the wrong architecture to be fatal.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oAuxInfo({"-aux-info"}, + " -aux-info \tEmit declaration information into .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oBdynamic({"-Bdynamic"}, + " -Bdynamic \tDefined for compatibility with Diab.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oBind_at_load({"-bind_at_load"}, + " -bind_at_load \tCauses the output file to be marked such that the dynamic" + " linker will bind all undefined references when the file is loaded or launched.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oBstatic({"-Bstatic"}, + " -Bstatic \tdefined for compatibility with Diab.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oBundle({"-bundle"}, + " -bundle \tProduce a Mach-o bundle format file. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oBundle_loader({"-bundle_loader"}, + " -bundle_loader \tThis option specifies the executable that will load " + "the build output file being linked.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oC({"-C"}, + " -C \tDo not discard comments. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oCC({"-CC"}, + " -CC \tDo not discard comments in macro expansions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oClient_name({"-client_name"}, + " -client_name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oCompatibility_version({"-compatibility_version"}, + " -compatibility_version \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oCoverage({"-coverage"}, + " -coverage \tThe option is a synonym for -fprofile-arcs -ftest-coverage" + " (when compiling) and -lgcov (when linking). \n", + {driverCategory, unSupCategory}); + +maplecl::Option oCurrent_version({"-current_version"}, + " -current_version \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDa({"-da"}, + " -da \tProduce all the dumps listed above.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDA({"-dA"}, + " -dA \tAnnotate the assembler output with miscellaneous debugging " + "information.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDD({"-dD"}, + " -dD \tDump all macro definitions, at the end of preprocessing, " + "in addition to normal output.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDead_strip({"-dead_strip"}, + " -dead_strip \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDependencyFile({"-dependency-file"}, + " -dependency-file \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDH({"-dH"}, + " -dH \tProduce a core dump whenever an error occurs.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDI({"-dI"}, + " -dI \tOutput '#include' directives in addition to the result of" + " preprocessing.\n", + {driverCategory, clangCategory}); + +maplecl::Option oDM({"-dM"}, + " -dM \tInstead of the normal output, generate a list of '#define' " + "directives for all the macros defined during the execution of the preprocessor, " + "including predefined macros. \n", + {driverCategory, clangCategory}); + +maplecl::Option oDN({"-dN"}, + " -dN \t Like -dD, but emit only the macro names, not their expansions.\n", + {driverCategory, clangCategory}); + +maplecl::Option oDp({"-dp"}, + " -dp \tAnnotate the assembler output with a comment indicating which " + "pattern and alternative is used. The length of each instruction is also printed.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDP({"-dP"}, + " -dP \tDump the RTL in the assembler output as a comment before each" + " instruction. Also turns on -dp annotation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDU({"-dU"}, + " -dU \tLike -dD except that only macros that are expanded.\n", + {driverCategory, clangCategory}); + +maplecl::Option oDumpfullversion({"-dumpfullversion"}, + " -dumpfullversion \tPrint the full compiler version, always 3 numbers " + "separated by dots, major, minor and patchlevel version.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDumpmachine({"-dumpmachine"}, + " -dumpmachine \tPrint the compiler's target machine (for example, " + "'i686-pc-linux-gnu')—and don't do anything else.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDumpspecs({"-dumpspecs"}, + " -dumpspecs \tPrint the compiler's built-in specs—and don't do anything " + "else. (This is used when MAPLE itself is being built.)\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDumpversion({"-dumpversion"}, + " -dumpversion \tPrint the compiler version " + "(for example, 3.0, 6.3.0 or 7)—and don't do anything else.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDx({"-dx"}, + " -dx \tJust generate RTL for a function instead of compiling it." + " Usually used with -fdump-rtl-expand.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDylib_file({"-dylib_file"}, + " -dylib_file \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDylinker_install_name({"-dylinker_install_name"}, + " -dylinker_install_name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDynamic({"-dynamic"}, + " -dynamic \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oDynamiclib({"-dynamiclib"}, + " -dynamiclib \tWhen passed this option, GCC produces a dynamic library " + "instead of an executable when linking, using the Darwin libtool command.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oEB({"-EB"}, + " -EB \tCompile code for big-endian targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oEL({"-EL"}, + " -EL \tCompile code for little-endian targets. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oExported_symbols_list({"-exported_symbols_list"}, + " -exported_symbols_list \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFabiCompatVersion({"-fabi-compat-version="}, + " -fabi-compat-version= \tThe version of the C++ ABI in use.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFabiVersion({"-fabi-version="}, + " -fabi-version= \tUse version n of the C++ ABI. The default is " + "version 0.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFadaSpecParent({"-fada-spec-parent="}, + " -fada-spec-parent= \tIn conjunction with -fdump-ada-spec[-slim] above, " + "generate Ada specs as child units of parent unit.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFaggressiveLoopOptimizations({"-faggressive-loop-optimizations"}, + " -faggressive-loop-optimizations \tThis option tells the loop optimizer " + "to use language constraints to derive bounds for the number of iterations of a loop.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-aggressive-loop-optimizations")); + +maplecl::Option oFchkpFlexibleStructTrailingArrays({"-fchkp-flexible-struct-trailing-arrays"}, + " -fchkp-flexible-struct-trailing-arrays \tForces Pointer Bounds Checker " + "to treat all trailing arrays in structures as possibly flexible. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-flexible-struct-trailing-arrays")); + +maplecl::Option oFchkpInstrumentCalls({"-fchkp-instrument-calls"}, + " -fchkp-instrument-calls \tInstructs Pointer Bounds Checker to pass " + "pointer bounds to calls. Enabled by default.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-instrument-calls")); + +maplecl::Option oFchkpInstrumentMarkedOnly({"-fchkp-instrument-marked-only"}, + " -fchkp-instrument-marked-only \tInstructs Pointer Bounds Checker to " + "instrument only functions marked with the bnd_instrument attribute \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-instrument-marked-only")); + +maplecl::Option oFchkpNarrowBounds({"-fchkp-narrow-bounds"}, + " -fchkp-narrow-bounds \tControls bounds used by Pointer Bounds Checker" + " for pointers to object fields. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-narrow-bounds")); + +maplecl::Option oFchkpNarrowToInnermostArray({"-fchkp-narrow-to-innermost-array"}, + " -fchkp-narrow-to-innermost-array \tForces Pointer Bounds Checker to use " + "bounds of the innermost arrays in case of nested static array access.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-narrow-to-innermost-array")); + +maplecl::Option oFchkpOptimize({"-fchkp-optimize"}, + " -fchkp-optimize \tEnables Pointer Bounds Checker optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-optimize")); + +maplecl::Option oFchkpStoreBounds({"-fchkp-store-bounds"}, + " -fchkp-store-bounds \tInstructs Pointer Bounds Checker to generate bounds" + " stores for pointer writes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-store-bounds")); + +maplecl::Option oFchkpTreatZeroDynamicSizeAsInfinite({"-fchkp-treat-zero-dynamic-size-as-infinite"}, + " -fchkp-treat-zero-dynamic-size-as-infinite \tWith this option, objects " + "with incomplete type whose dynamically-obtained size is zero are treated as having " + "infinite size instead by Pointer Bounds Checker. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-treat-zero-dynamic-size-as-infinite")); + +maplecl::Option oFchkpUseFastStringFunctions({"-fchkp-use-fast-string-functions"}, + " -fchkp-use-fast-string-functions \tEnables use of *_nobnd versions of " + "string functions (not copying bounds) by Pointer Bounds Checker. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-use-fast-string-functions")); + +maplecl::Option oFchkpUseNochkStringFunctions({"-fchkp-use-nochk-string-functions"}, + " -fchkp-use-nochk-string-functions \tEnables use of *_nochk versions of " + "string functions (not checking bounds) by Pointer Bounds Checker. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-use-nochk-string-functions")); + +maplecl::Option oFchkpUseStaticBounds({"-fchkp-use-static-bounds"}, + " -fchkp-use-static-bounds \tAllow Pointer Bounds Checker to generate " + "static bounds holding bounds of static variables. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-use-static-bounds")); + +maplecl::Option oFchkpUseStaticConstBounds({"-fchkp-use-static-const-bounds"}, + " -fchkp-use-static-const-bounds \tUse statically-initialized bounds for " + "constant bounds instead of generating them each time they are required.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-use-static-const-bounds")); + +maplecl::Option oFchkpUseWrappers({"-fchkp-use-wrappers"}, + " -fchkp-use-wrappers \tAllows Pointer Bounds Checker to replace calls to " + "built-in functions with calls to wrapper functions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-use-wrappers")); + +maplecl::Option oFcilkplus({"-fcilkplus"}, + " -fcilkplus \tEnable Cilk Plus.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cilkplus")); + +maplecl::Option oFcodeHoisting({"-fcode-hoisting"}, + " -fcode-hoisting \tPerform code hoisting. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-code-hoisting")); + +maplecl::Option oFcombineStackAdjustments({"-fcombine-stack-adjustments"}, + " -fcombine-stack-adjustments \tTracks stack adjustments (pushes and pops)" + " and stack memory references and then tries to find ways to combine them.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-combine-stack-adjustments")); + +maplecl::Option oFcompareDebug({"-fcompare-debug"}, + " -fcompare-debug \tCompile with and without e.g. -gtoggle, and compare " + "the final-insns dump.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-compare-debug")); + +maplecl::Option oFcompareDebugE({"-fcompare-debug="}, + " -fcompare-debug= \tCompile with and without e.g. -gtoggle, and compare " + "the final-insns dump.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFcompareDebugSecond({"-fcompare-debug-second"}, + " -fcompare-debug-second \tRun only the second compilation of " + "-fcompare-debug.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFcompareElim({"-fcompare-elim"}, + " -fcompare-elim \tPerform comparison elimination after register " + "allocation has finished.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-compare-elim")); + +maplecl::Option oFconcepts({"-fconcepts"}, + " -fconcepts \tEnable support for C++ concepts.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-concepts")); + +maplecl::Option oFcondMismatch({"-fcond-mismatch"}, + " -fcond-mismatch \tAllow the arguments of the '?' operator to have " + "different types.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cond-mismatch")); + +maplecl::Option oFconserveStack({"-fconserve-stack"}, + " -fconserve-stack \tDo not perform optimizations increasing noticeably" + " stack usage.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-conserve-stack")); + +maplecl::Option oFconstantStringClass({"-fconstant-string-class="}, + " -fconstant-string-class= \tUse class ofor constant strings." + "no class name specified with %qs\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFconstexprDepth({"-fconstexpr-depth="}, + " -fconstexpr-depth= \tpecify maximum constexpr recursion depth.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFconstexprLoopLimit({"-fconstexpr-loop-limit="}, + " -fconstexpr-loop-limit= \tSpecify maximum constexpr loop iteration " + "count.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFcpropRegisters({"-fcprop-registers"}, + " -fcprop-registers \tPerform a register copy-propagation optimization" + " pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cprop-registers")); + +maplecl::Option oFcrossjumping({"-fcrossjumping"}, + " -fcrossjumping \tPerform cross-jumping transformation. This " + "transformation unifies equivalent code and saves code size. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-crossjumping")); + +maplecl::Option oFcseFollowJumps({"-fcse-follow-jumps"}, + " -fcse-follow-jumps \tWhen running CSE, follow jumps to their targets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cse-follow-jumps")); + +maplecl::Option oFcseSkipBlocks({"-fcse-skip-blocks"}, + " -fcse-skip-blocks \tDoes nothing. Preserved for backward " + "compatibility.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cse-skip-blocks")); + +maplecl::Option oFcxFortranRules({"-fcx-fortran-rules"}, + " -fcx-fortran-rules \tComplex multiplication and division follow " + "Fortran rules.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cx-fortran-rules")); + +maplecl::Option oFcxLimitedRange({"-fcx-limited-range"}, + " -fcx-limited-range \tOmit range reduction step when performing complex " + "division.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-cx-limited-range")); + +maplecl::Option oFdbgCnt({"-fdbg-cnt"}, + " -fdbg-cnt \tPlace data items into their own section.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dbg-cnt")); + +maplecl::Option oFdbgCntList({"-fdbg-cnt-list"}, + " -fdbg-cnt-list \tList all available debugging counters with their " + "limits and counts.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dbg-cnt-list")); + +maplecl::Option oFdce({"-fdce"}, + " -fdce \tUse the RTL dead code elimination pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dce")); + +maplecl::Option oFdebugCpp({"-fdebug-cpp"}, + " -fdebug-cpp \tEmit debug annotations during preprocessing.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-debug-cpp")); + +maplecl::Option oFdebugPrefixMap({"-fdebug-prefix-map"}, + " -fdebug-prefix-map \tMap one directory name to another in debug " + "information.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-debug-prefix-map")); + +maplecl::Option oFdebugTypesSection({"-fdebug-types-section"}, + " -fdebug-types-section \tOutput .debug_types section when using DWARF " + "v4 debuginfo.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-debug-types-section")); + +maplecl::Option oFdecloneCtorDtor({"-fdeclone-ctor-dtor"}, + " -fdeclone-ctor-dtor \tFactor complex constructors and destructors to " + "favor space over speed.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-declone-ctor-dtor")); + +maplecl::Option oFdeduceInitList({"-fdeduce-init-list"}, + " -fdeduce-init-list \tenable deduction of std::initializer_list for a " + "template type parameter from a brace-enclosed initializer-list.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-deduce-init-list")); + +maplecl::Option oFdelayedBranch({"-fdelayed-branch"}, + " -fdelayed-branch \tAttempt to fill delay slots of branch instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-delayed-branch")); + +maplecl::Option oFdeleteDeadExceptions({"-fdelete-dead-exceptions"}, + " -fdelete-dead-exceptions \tDelete dead instructions that may throw " + "exceptions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-delete-dead-exceptions")); + +maplecl::Option oFdeleteNullPointerChecks({"-fdelete-null-pointer-checks"}, + " -fdelete-null-pointer-checks \tDelete useless null pointer checks.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-delete-null-pointer-checks")); + +maplecl::Option oFdevirtualize({"-fdevirtualize"}, + " -fdevirtualize \tTry to convert virtual calls to direct ones.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-devirtualize")); + +maplecl::Option oFdevirtualizeAtLtrans({"-fdevirtualize-at-ltrans"}, + " -fdevirtualize-at-ltrans \tStream extra data to support more aggressive " + "devirtualization in LTO local transformation mode.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-devirtualize-at-ltrans")); + +maplecl::Option oFdevirtualizeSpeculatively({"-fdevirtualize-speculatively"}, + " -fdevirtualize-speculatively \tPerform speculative devirtualization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-devirtualize-speculatively")); + +maplecl::Option oFdiagnosticsGeneratePatch({"-fdiagnostics-generate-patch"}, + " -fdiagnostics-generate-patch \tPrint fix-it hints to stderr in unified " + "diff format.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-diagnostics-generate-patch")); + +maplecl::Option oFdiagnosticsParseableFixits({"-fdiagnostics-parseable-fixits"}, + " -fdiagnostics-parseable-fixits \tPrint fixit hints in machine-readable " + "form.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-diagnostics-parseable-fixits")); + +maplecl::Option oFdiagnosticsShowCaret({"-fdiagnostics-show-caret"}, + " -fdiagnostics-show-caret \tShow the source line with a caret indicating " + "the column.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-diagnostics-show-caret")); + +maplecl::Option oFdiagnosticsShowLocation({"-fdiagnostics-show-location"}, + " -fdiagnostics-show-location \tHow often to emit source location at the " + "beginning of line-wrapped diagnostics.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdiagnosticsShowOption({"-fdiagnostics-show-option"}, + " -fdiagnostics-show-option \tAmend appropriate diagnostic messages with " + "the command line option that controls them.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--fno-diagnostics-show-option")); + +maplecl::Option oFdirectivesOnly({"-fdirectives-only"}, + " -fdirectives-only \tPreprocess directives only.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fdirectives-only")); + +maplecl::Option oFdisable({"-fdisable-"}, + " -fdisable- \t-fdisable-[tree|rtl|ipa]-=range1+range2 " + "disables an optimization pass.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdollarsInIdentifiers({"-fdollars-in-identifiers"}, + " -fdollars-in-identifiers \tPermit '$' as an identifier character.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-fno-dollars-in-identifiers")); + +maplecl::Option oFdse({"-fdse"}, + " -fdse \tUse the RTL dead store elimination pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dse")); + +maplecl::Option oFdumpAdaSpec({"-fdump-ada-spec"}, + " -fdump-ada-spec \tWrite all declarations as Ada code transitively.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpClassHierarchy({"-fdump-class-hierarchy"}, + " -fdump-class-hierarchy \tC++ only)\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-class-hierarchy")); + +maplecl::Option oFdumpFinalInsns({"-fdump-final-insns"}, + " -fdump-final-insns \tDump the final internal representation (RTL) to " + "file.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpGoSpec({"-fdump-go-spec="}, + " -fdump-go-spec \tWrite all declarations to file as Go code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpIpa({"-fdump-ipa"}, + " -fdump-ipa \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpNoaddr({"-fdump-noaddr"}, + " -fdump-noaddr \tSuppress output of addresses in debugging dumps.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-noaddr")); + +maplecl::Option oFdumpPasses({"-fdump-passes"}, + " -fdump-passes \tDump optimization passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-passes")); + +maplecl::Option oFdumpRtlAlignments({"-fdump-rtl-alignments"}, + " -fdump-rtl-alignments \tDump after branch alignments have been " + "computed.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpRtlAll({"-fdump-rtl-all"}, + " -fdump-rtl-all \tProduce all the dumps listed above.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-all")); + +maplecl::Option oFdumpRtlAsmcons({"-fdump-rtl-asmcons"}, + " -fdump-rtl-asmcons \tDump after fixing rtl statements that have " + "unsatisfied in/out constraints.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-asmcons")); + +maplecl::Option oFdumpRtlAuto_inc_dec({"-fdump-rtl-auto_inc_dec"}, + " -fdump-rtl-auto_inc_dec \tDump after auto-inc-dec discovery. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-auto_inc_dec")); + +maplecl::Option oFdumpRtlBarriers({"-fdump-rtl-barriers"}, + " -fdump-rtl-barriers \tDump after cleaning up the barrier instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-barriers")); + +maplecl::Option oFdumpRtlBbpart({"-fdump-rtl-bbpart"}, + " -fdump-rtl-bbpart \tDump after partitioning hot and cold basic blocks.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-bbpart")); + +maplecl::Option oFdumpRtlBbro({"-fdump-rtl-bbro"}, + " -fdump-rtl-bbro \tDump after block reordering.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-bbro")); + +maplecl::Option oFdumpRtlBtl2({"-fdump-rtl-btl2"}, + " -fdump-rtl-btl2 \t-fdump-rtl-btl1 and -fdump-rtl-btl2 enable dumping " + "after the two branch target load optimization passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-btl2")); + +maplecl::Option oFdumpRtlBypass({"-fdump-rtl-bypass"}, + " -fdump-rtl-bypass \tDump after jump bypassing and control flow " + "optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-bypass")); + +maplecl::Option oFdumpRtlCe1({"-fdump-rtl-ce1"}, + " -fdump-rtl-ce1 \tEnable dumping after the three if conversion passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-ce1")); + +maplecl::Option oFdumpRtlCe2({"-fdump-rtl-ce2"}, + " -fdump-rtl-ce2 \tEnable dumping after the three if conversion passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-ce2")); + +maplecl::Option oFdumpRtlCe3({"-fdump-rtl-ce3"}, + " -fdump-rtl-ce3 \tEnable dumping after the three if conversion passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-ce3")); + +maplecl::Option oFdumpRtlCombine({"-fdump-rtl-combine"}, + " -fdump-rtl-combine \tDump after the RTL instruction combination pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-combine")); + +maplecl::Option oFdumpRtlCompgotos({"-fdump-rtl-compgotos"}, + " -fdump-rtl-compgotos \tDump after duplicating the computed gotos.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-compgotos")); + +maplecl::Option oFdumpRtlCprop_hardreg({"-fdump-rtl-cprop_hardreg"}, + " -fdump-rtl-cprop_hardreg \tDump after hard register copy propagation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-cprop_hardreg")); + +maplecl::Option oFdumpRtlCsa({"-fdump-rtl-csa"}, + " -fdump-rtl-csa \tDump after combining stack adjustments.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-csa")); + +maplecl::Option oFdumpRtlCse1({"-fdump-rtl-cse1"}, + " -fdump-rtl-cse1 \tEnable dumping after the two common subexpression " + "elimination passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-cse1")); + +maplecl::Option oFdumpRtlCse2({"-fdump-rtl-cse2"}, + " -fdump-rtl-cse2 \tEnable dumping after the two common subexpression " + "elimination passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-cse2")); + +maplecl::Option oFdumpRtlDbr({"-fdump-rtl-dbr"}, + " -fdump-rtl-dbr \tDump after delayed branch scheduling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-dbr")); + +maplecl::Option oFdumpRtlDce({"-fdump-rtl-dce"}, + " -fdump-rtl-dce \tDump after the standalone dead code elimination " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fdump-rtl-dce")); + +maplecl::Option oFdumpRtlDce1({"-fdump-rtl-dce1"}, + " -fdump-rtl-dce1 \tenable dumping after the two dead store elimination " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-dce1")); + +maplecl::Option oFdumpRtlDce2({"-fdump-rtl-dce2"}, + " -fdump-rtl-dce2 \tenable dumping after the two dead store elimination " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-dce2")); + +maplecl::Option oFdumpRtlDfinish({"-fdump-rtl-dfinish"}, + " -fdump-rtl-dfinish \tThis dump is defined but always produce empty " + "files.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-dfinish")); + +maplecl::Option oFdumpRtlDfinit({"-fdump-rtl-dfinit"}, + " -fdump-rtl-dfinit \tThis dump is defined but always produce empty " + "files.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-dfinit")); + +maplecl::Option oFdumpRtlEh({"-fdump-rtl-eh"}, + " -fdump-rtl-eh \tDump after finalization of EH handling code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-eh")); + +maplecl::Option oFdumpRtlEh_ranges({"-fdump-rtl-eh_ranges"}, + " -fdump-rtl-eh_ranges \tDump after conversion of EH handling range " + "regions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-eh_ranges")); + +maplecl::Option oFdumpRtlExpand({"-fdump-rtl-expand"}, + " -fdump-rtl-expand \tDump after RTL generation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-expand")); + +maplecl::Option oFdumpRtlFwprop1({"-fdump-rtl-fwprop1"}, + " -fdump-rtl-fwprop1 \tenable dumping after the two forward propagation " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-fwprop1")); + +maplecl::Option oFdumpRtlFwprop2({"-fdump-rtl-fwprop2"}, + " -fdump-rtl-fwprop2 \tenable dumping after the two forward propagation " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-fwprop2")); + +maplecl::Option oFdumpRtlGcse1({"-fdump-rtl-gcse1"}, + " -fdump-rtl-gcse1 \tenable dumping after global common subexpression " + "elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-gcse1")); + +maplecl::Option oFdumpRtlGcse2({"-fdump-rtl-gcse2"}, + " -fdump-rtl-gcse2 \tenable dumping after global common subexpression " + "elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-gcse2")); + +maplecl::Option oFdumpRtlInitRegs({"-fdump-rtl-init-regs"}, + " -fdump-rtl-init-regs \tDump after the initialization of the registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tedump-rtl-init-regsst")); + +maplecl::Option oFdumpRtlInitvals({"-fdump-rtl-initvals"}, + " -fdump-rtl-initvals \tDump after the computation of the initial " + "value sets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-initvals")); + +maplecl::Option oFdumpRtlInto_cfglayout({"-fdump-rtl-into_cfglayout"}, + " -fdump-rtl-into_cfglayout \tDump after converting to cfglayout mode.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-into_cfglayout")); + +maplecl::Option oFdumpRtlIra({"-fdump-rtl-ira"}, + " -fdump-rtl-ira \tDump after iterated register allocation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-ira")); + +maplecl::Option oFdumpRtlJump({"-fdump-rtl-jump"}, + " -fdump-rtl-jump \tDump after the second jump optimization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-jump")); + +maplecl::Option oFdumpRtlLoop2({"-fdump-rtl-loop2"}, + " -fdump-rtl-loop2 \tenables dumping after the rtl loop optimization " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-loop2")); + +maplecl::Option oFdumpRtlMach({"-fdump-rtl-mach"}, + " -fdump-rtl-mach \tDump after performing the machine dependent " + "reorganization pass, if that pass exists.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-mach")); + +maplecl::Option oFdumpRtlMode_sw({"-fdump-rtl-mode_sw"}, + " -fdump-rtl-mode_sw \tDump after removing redundant mode switches.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-mode_sw")); + +maplecl::Option oFdumpRtlOutof_cfglayout({"-fdump-rtl-outof_cfglayout"}, + " -fdump-rtl-outof_cfglayout \tDump after converting from cfglayout " + "mode.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-outof_cfglayout")); + +maplecl::Option oFdumpRtlPass({"-fdump-rtl-pass"}, + " -fdump-rtl-pass \tSays to make debugging dumps during compilation at " + "times specified by letters.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFdumpRtlPeephole2({"-fdump-rtl-peephole2"}, + " -fdump-rtl-peephole2 \tDump after the peephole pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-peephole2")); + +maplecl::Option oFdumpRtlPostreload({"-fdump-rtl-postreload"}, + " -fdump-rtl-postreload \tDump after post-reload optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-postreload")); + +maplecl::Option oFdumpRtlPro_and_epilogue({"-fdump-rtl-pro_and_epilogue"}, + " -fdump-rtl-pro_and_epilogue \tDump after generating the function " + "prologues and epilogues.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-pro_and_epilogue")); + +maplecl::Option oFdumpRtlRee({"-fdump-rtl-ree"}, + " -fdump-rtl-ree \tDump after sign/zero extension elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-ree")); + +maplecl::Option oFdumpRtlRegclass({"-fdump-rtl-regclass"}, + " -fdump-rtl-regclass \tThis dump is defined but always produce " + "empty files.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-regclass")); + +maplecl::Option oFdumpRtlRnreg({"-fdump-rtl-rnreg"}, + " -fdump-rtl-rnreg \tDump after register renumbering.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-rnreg")); + +maplecl::Option oFdumpRtlSched1({"-fdump-rtl-sched1"}, + " -fdump-rtl-sched1 \tnable dumping after the basic block scheduling " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-sched1")); + +maplecl::Option oFdumpRtlSched2({"-fdump-rtl-sched2"}, + " -fdump-rtl-sched2 \tnable dumping after the basic block scheduling " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-sched2")); + +maplecl::Option oFdumpRtlSeqabstr({"-fdump-rtl-seqabstr"}, + " -fdump-rtl-seqabstr \tDump after common sequence discovery.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-seqabstr")); + +maplecl::Option oFdumpRtlShorten({"-fdump-rtl-shorten"}, + " -fdump-rtl-shorten \tDump after shortening branches.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-shorten")); + +maplecl::Option oFdumpRtlSibling({"-fdump-rtl-sibling"}, + " -fdump-rtl-sibling \tDump after sibling call optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-sibling")); + +maplecl::Option oFdumpRtlSms({"-fdump-rtl-sms"}, + " -fdump-rtl-sms \tDump after modulo scheduling. This pass is only " + "run on some architectures.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-sms")); + +maplecl::Option oFdumpRtlSplit1({"-fdump-rtl-split1"}, + " -fdump-rtl-split1 \tThis option enable dumping after five rounds of " + "instruction splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-split1")); + +maplecl::Option oFdumpRtlSplit2({"-fdump-rtl-split2"}, + " -fdump-rtl-split2 \tThis option enable dumping after five rounds of " + "instruction splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-split2")); + +maplecl::Option oFdumpRtlSplit3({"-fdump-rtl-split3"}, + " -fdump-rtl-split3 \tThis option enable dumping after five rounds of " + "instruction splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-split3")); + +maplecl::Option oFdumpRtlSplit4({"-fdump-rtl-split4"}, + " -fdump-rtl-split4 \tThis option enable dumping after five rounds of " + "instruction splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-split4")); + +maplecl::Option oFdumpRtlSplit5({"-fdump-rtl-split5"}, + " -fdump-rtl-split5 \tThis option enable dumping after five rounds of " + "instruction splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-split5")); + +maplecl::Option oFdumpRtlStack({"-fdump-rtl-stack"}, + " -fdump-rtl-stack \tDump after conversion from GCC's " + "'flat register file' registers to the x87's stack-like registers. " + "This pass is only run on x86 variants.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-stack")); + +maplecl::Option oFdumpRtlSubreg1({"-fdump-rtl-subreg1"}, + " -fdump-rtl-subreg1 \tenable dumping after the two subreg expansion " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-subreg1")); + +maplecl::Option oFdumpRtlSubreg2({"-fdump-rtl-subreg2"}, + " -fdump-rtl-subreg2 \tenable dumping after the two subreg expansion " + "passes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-subreg2")); + +maplecl::Option oFdumpRtlSubregs_of_mode_finish({"-fdump-rtl-subregs_of_mode_finish"}, + " -fdump-rtl-subregs_of_mode_finish \tThis dump is defined but always " + "produce empty files.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-subregs_of_mode_finish")); + +maplecl::Option oFdumpRtlSubregs_of_mode_init({"-fdump-rtl-subregs_of_mode_init"}, + " -fdump-rtl-subregs_of_mode_init \tThis dump is defined but always " + "produce empty files.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-subregs_of_mode_init")); + +maplecl::Option oFdumpRtlUnshare({"-fdump-rtl-unshare"}, + " -fdump-rtl-unshare \tDump after all rtl has been unshared.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-unshare")); + +maplecl::Option oFdumpRtlVartrack({"-fdump-rtl-vartrack"}, + " -fdump-rtl-vartrack \tDump after variable tracking.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-vartrack")); + +maplecl::Option oFdumpRtlVregs({"-fdump-rtl-vregs"}, + " -fdump-rtl-vregs \tDump after converting virtual registers to " + "hard registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-vregs")); + +maplecl::Option oFdumpRtlWeb({"-fdump-rtl-web"}, + " -fdump-rtl-web \tDump after live range splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-rtl-web")); + +maplecl::Option oFdumpStatistics({"-fdump-statistics"}, + " -fdump-statistics \tEnable and control dumping of pass statistics " + "in a separate file.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-statistics")); + +maplecl::Option oFdumpTranslationUnit({"-fdump-translation-unit"}, + " -fdump-translation-unit \tC++ only\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-translation-unit")); + +maplecl::Option oFdumpTree({"-fdump-tree"}, + " -fdump-tree \tControl the dumping at various stages of processing the " + "intermediate language tree to a file.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-tree")); + +maplecl::Option oFdumpTreeAll({"-fdump-tree-all"}, + " -fdump-tree-all \tControl the dumping at various stages of processing " + "the intermediate language tree to a file.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-tree-all")); + +maplecl::Option oFdumpUnnumbered({"-fdump-unnumbered"}, + " -fdump-unnumbered \tWhen doing debugging dumps, suppress instruction " + "numbers and address output. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-unnumbered")); + +maplecl::Option oFdumpUnnumberedLinks({"-fdump-unnumbered-links"}, + " -fdump-unnumbered-links \tWhen doing debugging dumps (see -d option " + "above), suppress instruction numbers for the links to the previous and next " + "instructions in a sequence.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dump-unnumbered-links")); + +maplecl::Option oFdwarf2CfiAsm({"-fdwarf2-cfi-asm"}, + " -fdwarf2-cfi-asm \tEnable CFI tables via GAS assembler directives.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-dwarf2-cfi-asm")); + +maplecl::Option oFearlyInlining({"-fearly-inlining"}, + " -fearly-inlining \tPerform early inlining.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-early-inlining")); + +maplecl::Option oFeliminateDwarf2Dups({"-feliminate-dwarf2-dups"}, + " -feliminate-dwarf2-dups \tPerform DWARF duplicate elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-eliminate-dwarf2-dups")); + +maplecl::Option oFeliminateUnusedDebugSymbols({"-feliminate-unused-debug-symbols"}, + " -feliminate-unused-debug-symbols \tPerform unused symbol elimination " + "in debug info.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-feliminate-unused-debug-symbols")); + +maplecl::Option oFeliminateUnusedDebugTypes({"-feliminate-unused-debug-types"}, + " -feliminate-unused-debug-types \tPerform unused type elimination in " + "debug info.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-eliminate-unused-debug-types")); + +maplecl::Option oFemitClassDebugAlways({"-femit-class-debug-always"}, + " -femit-class-debug-always \tDo not suppress C++ class debug " + "information.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-emit-class-debug-always")); + +maplecl::Option oFemitStructDebugBaseonly({"-femit-struct-debug-baseonly"}, + " -femit-struct-debug-baseonly \tAggressive reduced debug info for " + "structs.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-emit-struct-debug-baseonly")); + +maplecl::Option oFemitStructDebugDetailedE({"-femit-struct-debug-detailed="}, + " -femit-struct-debug-detailed \t-femit-struct-debug-detailed= o" + "Detailed reduced debug info for structs.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFemitStructDebugReduced({"-femit-struct-debug-reduced"}, + " -femit-struct-debug-reduced \tConservative reduced debug info for " + "structs.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-emit-struct-debug-reduced")); + +maplecl::Option oFenable({"-fenable-"}, + " -fenable- \t-fenable-[tree|rtl|ipa]-=range1+range2 enables an " + "optimization pass.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFexceptions({"-fexceptions"}, + " -fexceptions \tEnable exception handling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-exceptions")); + +maplecl::Option oFexcessPrecision({"-fexcess-precision="}, + " -fexcess-precision= \tSpecify handling of excess floating-point " + "precision.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFexecCharset({"-fexec-charset="}, + " -fexec-charset= \tConvert all strings and character constants to " + "character set .\n", + {driverCategory, clangCategory}); + +maplecl::Option oFexpensiveOptimizations({"-fexpensive-optimizations"}, + " -fexpensive-optimizations \tPerform a number of minor, expensive " + "optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-expensive-optimizations")); + +maplecl::Option oFextNumericLiterals({"-fext-numeric-literals"}, + " -fext-numeric-literals \tInterpret imaginary, fixed-point, or other " + "gnu number suffix as the corresponding number literal rather than a user-defined " + "number literal.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ext-numeric-literals")); + +maplecl::Option oFextendedIdentifiers({"-fextended-identifiers"}, + " -fextended-identifiers \tPermit universal character names (\\u and \\U) in identifiers.\n", + {driverCategory, clangCategory}); + +maplecl::Option oFnoExtendedIdentifiers({"-fno-extended-identifiers"}, + " -fno-extended-identifiers \tDon't ermit universal character names (\\u and \\U) in identifiers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFexternTlsInit({"-fextern-tls-init"}, + " -fextern-tls-init \tSupport dynamic initialization of thread-local " + "variables in a different translation unit.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-extern-tls-init")); + +maplecl::Option oFfastMath({"-ffast-math"}, + " -ffast-math \tThis option causes the preprocessor macro __FAST_MATH__ " + "to be defined.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fast-math")); + +maplecl::Option oFfatLtoObjects({"-ffat-lto-objects"}, + " -ffat-lto-objects \tOutput lto objects containing both the intermediate " + "language and binary output.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fat-lto-objects")); + +maplecl::Option oFfiniteMathOnly({"-ffinite-math-only"}, + " -ffinite-math-only \tAssume no NaNs or infinities are generated.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-finite-math-only")); + +maplecl::Option oFfixAndContinue({"-ffix-and-continue"}, + " -ffix-and-continue \tGenerate code suitable for fast turnaround " + "development, such as to allow GDB to dynamically load .o files into already-running " + "programs.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fix-and-continue")); + +maplecl::Option oFfixed({"-ffixed-"}, + " -ffixed- \t-ffixed- Mark oas being unavailable to " + "the compiler.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFfloatStore({"-ffloat-store"}, + " -ffloat-store \ton't allocate floats and doubles in extended-precision " + "registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-float-store")); + +maplecl::Option oFforScope({"-ffor-scope"}, + " -ffor-scope \tScope of for-init-statement variables is local to the " + "loop.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-for-scope")); + +maplecl::Option oFforwardPropagate({"-fforward-propagate"}, + " -fforward-propagate \tPerform a forward propagation pass on RTL.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFfpContract ({"-ffp-contract="}, + " -ffp-contract= \tPerform floating-point expression contraction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFfreestanding({"-ffreestanding"}, + " -ffreestanding \tDo not assume that standard C libraries and 'main' " + "exist.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-freestanding")); + +maplecl::Option oFfriendInjection({"-ffriend-injection"}, + " -ffriend-injection \tInject friend functions into enclosing namespace.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-friend-injection")); + +maplecl::Option oFgcse({"-fgcse"}, + " -fgcse \tPerform global common subexpression elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gcse")); + +maplecl::Option oFgcseAfterReload({"-fgcse-after-reload"}, + " -fgcse-after-reload \t-fgcse-after-reload\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gcse-after-reload")); + +maplecl::Option oFgcseLas({"-fgcse-las"}, + " -fgcse-las \tPerform redundant load after store elimination in global " + "common subexpression elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gcse-las")); + +maplecl::Option oFgcseLm({"-fgcse-lm"}, + " -fgcse-lm \tPerform enhanced load motion during global common " + "subexpression elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gcse-lm")); + +maplecl::Option oFgcseSm({"-fgcse-sm"}, + " -fgcse-sm \tPerform store motion after global common subexpression " + "elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gcse-sm")); + +maplecl::Option oFgimple({"-fgimple"}, + " -fgimple \tEnable parsing GIMPLE.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gimple")); + +maplecl::Option oFgnuRuntime({"-fgnu-runtime"}, + " -fgnu-runtime \tGenerate code for GNU runtime environment.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gnu-runtime")); + +maplecl::Option oFgnuTm({"-fgnu-tm"}, + " -fgnu-tm \tEnable support for GNU transactional memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gnu-tm")); + +maplecl::Option oFgnu89Inline({"-fgnu89-inline"}, + " -fgnu89-inline \tUse traditional GNU semantics for inline functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gnu89-inline")); + +maplecl::Option oFgraphiteIdentity({"-fgraphite-identity"}, + " -fgraphite-identity \tEnable Graphite Identity transformation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-graphite-identity")); + +maplecl::Option oFhoistAdjacentLoads({"-fhoist-adjacent-loads"}, + " -fhoist-adjacent-loads \tEnable hoisting adjacent loads to encourage " + "generating conditional move instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-hoist-adjacent-loads")); + +maplecl::Option oFhosted({"-fhosted"}, + " -fhosted \tAssume normal C execution environment.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-hosted")); + +maplecl::Option oFifConversion({"-fif-conversion"}, + " -fif-conversion \tPerform conversion of conditional jumps to " + "branchless equivalents.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-if-conversion")); + +maplecl::Option oFifConversion2({"-fif-conversion2"}, + " -fif-conversion2 \tPerform conversion of conditional " + "jumps to conditional execution.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-if-conversion2")); + +maplecl::Option oFilelist({"-filelist"}, + " -filelist \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFindirectData({"-findirect-data"}, + " -findirect-data \tGenerate code suitable for fast turnaround " + "development, such as to allow GDB to dynamically load .o files into " + "already-running programs\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-indirect-data")); + +maplecl::Option oFindirectInlining({"-findirect-inlining"}, + " -findirect-inlining \tPerform indirect inlining.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-indirect-inlining")); + +maplecl::Option oFinhibitSizeDirective({"-finhibit-size-directive"}, + " -finhibit-size-directive \tDo not generate .size directives.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-inhibit-size-directive")); + +maplecl::Option oFinlineFunctions({"-finline-functions"}, + " -finline-functions \tIntegrate functions not declared 'inline' " + "into their callers when profitable.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-inline-functions")); + +maplecl::Option oFinlineFunctionsCalledOnce({"-finline-functions-called-once"}, + " -finline-functions-called-once \tIntegrate functions only required " + "by their single caller.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-inline-functions-called-once")); + +maplecl::Option oFinlineLimit({"-finline-limit-"}, + " -finline-limit- \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFinlineLimitE({"-finline-limit="}, + " -finline-limit= \tLimit the size of inlined functions to .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFinlineMatmulLimitE({"-finline-matmul-limit="}, + " -finline-matmul-limit= \tecify the size of the largest matrix for " + "which matmul will be inlined.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFinlineSmallFunctions({"-finline-small-functions"}, + " -finline-small-functions \tIntegrate functions into their callers" + " when code size is known not to grow.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-inline-small-functions")); + +maplecl::Option oFinputCharset({"-finput-charset="}, + " -finput-charset= \tSpecify the default character set for source files.\n", + {driverCategory, clangCategory}); + +maplecl::Option oFinstrumentFunctions({"-finstrument-functions"}, + " -finstrument-functions \tInstrument function entry and exit with " + "profiling calls.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-instrument-functions")); + +maplecl::Option oFinstrumentFunctionsExcludeFileList({"-finstrument-functions-exclude-file-list="}, + " -finstrument-functions-exclude-file-list= \t Do not instrument functions " + "listed in files.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFinstrumentFunctionsExcludeFunctionList({"-finstrument-functions-exclude-function-list="}, + " -finstrument-functions-exclude-function-list= \tDo not instrument " + "listed functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFipaCp({"-fipa-cp"}, + " -fipa-cp \tPerform interprocedural constant propagation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-cp")); + +maplecl::Option oFipaCpClone({"-fipa-cp-clone"}, + " -fipa-cp-clone \tPerform cloning to make Interprocedural constant " + "propagation stronger.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-cp-clone")); + +maplecl::Option oFipaIcf({"-fipa-icf"}, + " -fipa-icf \tPerform Identical Code Folding for functions and " + "read-only variables.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-icf")); + +maplecl::Option oFipaProfile({"-fipa-profile"}, + " -fipa-profile \tPerform interprocedural profile propagation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-profile")); + +maplecl::Option oFipaPta({"-fipa-pta"}, + " -fipa-pta \tPerform interprocedural points-to analysis.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-pta")); + +maplecl::Option oFipaPureConst({"-fipa-pure-const"}, + " -fipa-pure-const \tDiscover pure and const functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-pure-const")); + +maplecl::Option oFipaRa({"-fipa-ra"}, + " -fipa-ra \tUse caller save register across calls if possible.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-ra")); + +maplecl::Option oFipaReference({"-fipa-reference"}, + " -fipa-reference \tDiscover readonly and non addressable static " + "variables.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-reference")); + +maplecl::Option oFipaSra({"-fipa-sra"}, + " -fipa-sra \tPerform interprocedural reduction of aggregates.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ipa-sra")); + +maplecl::Option oFiraAlgorithmE({"-fira-algorithm="}, + " -fira-algorithm= \tSet the used IRA algorithm.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFiraHoistPressure({"-fira-hoist-pressure"}, + " -fira-hoist-pressure \tUse IRA based register pressure calculation in " + "RTL hoist optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ira-hoist-pressure")); + +maplecl::Option oFiraLoopPressure({"-fira-loop-pressure"}, + " -fira-loop-pressure \tUse IRA based register pressure calculation in " + "RTL loop optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ira-loop-pressure")); + +maplecl::Option oFiraRegion({"-fira-region="}, + " -fira-region= \tSet regions for IRA.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFiraVerbose({"-fira-verbose="}, + " -fira-verbose= \t Control IRA's level of diagnostic messages.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFisolateErroneousPathsAttribute({"-fisolate-erroneous-paths-attribute"}, + " -fisolate-erroneous-paths-attribute \tDetect paths that trigger " + "erroneous or undefined behavior due to a null value being used in a way forbidden " + "by a returns_nonnull or nonnull attribute. Isolate those paths from the main control " + "flow and turn the statement with erroneous or undefined behavior into a trap.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-isolate-erroneous-paths-attribute")); + +maplecl::Option oFisolateErroneousPathsDereference({"-fisolate-erroneous-paths-dereference"}, + " -fisolate-erroneous-paths-dereference \tDetect paths that trigger " + "erroneous or undefined behavior due to dereferencing a null pointer. Isolate those " + "paths from the main control flow and turn the statement with erroneous or undefined " + "behavior into a trap.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-isolate-erroneous-paths-dereference")); + +maplecl::Option oFivarVisibility({"-fivar-visibility="}, + " -fivar-visibility= \tSet the default symbol visibility.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFivopts({"-fivopts"}, + " -fivopts \tOptimize induction variables on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ivopts")); + +maplecl::Option oFkeepInlineFunctions({"-fkeep-inline-functions"}, + " -fkeep-inline-functions \tGenerate code for functions even if they " + "are fully inlined.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-keep-inline-functions")); + +maplecl::Option oFkeepStaticConsts({"-fkeep-static-consts"}, + " -fkeep-static-consts \tEmit static const variables even if they are" + " not used.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-keep-static-consts")); + +maplecl::Option oFkeepStaticFunctions({"-fkeep-static-functions"}, + " -fkeep-static-functions \tGenerate code for static functions even " + "if they are never called.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-keep-static-functions")); + +maplecl::Option oFlat_namespace({"-flat_namespace"}, + " -flat_namespace \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lat_namespace")); + +maplecl::Option oFlaxVectorConversions({"-flax-vector-conversions"}, + " -flax-vector-conversions \tAllow implicit conversions between vectors " + "with differing numbers of subparts and/or differing element types.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lax-vector-conversions")); + +maplecl::Option oFleadingUnderscore({"-fleading-underscore"}, + " -fleading-underscore \tGive external symbols a leading underscore.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-leading-underscore")); + +maplecl::Option oFliveRangeShrinkage({"-flive-range-shrinkage"}, + " -flive-range-shrinkage \tRelief of register pressure through live range " + "shrinkage\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-live-range-shrinkage")); + +maplecl::Option oFlocalIvars({"-flocal-ivars"}, + " -flocal-ivars \tAllow access to instance variables as if they were local" + " declarations within instance method implementations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-local-ivars")); + +maplecl::Option oFloopBlock({"-floop-block"}, + " -floop-block \tEnable loop nest transforms. Same as " + "-floop-nest-optimize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-block")); + +maplecl::Option oFloopInterchange({"-floop-interchange"}, + " -floop-interchange \tEnable loop nest transforms. Same as " + "-floop-nest-optimize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-interchange")); + +maplecl::Option oFloopNestOptimize({"-floop-nest-optimize"}, + " -floop-nest-optimize \tEnable the loop nest optimizer.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-nest-optimize")); + +maplecl::Option oFloopParallelizeAll({"-floop-parallelize-all"}, + " -floop-parallelize-all \tMark all loops as parallel.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-parallelize-all")); + +maplecl::Option oFloopStripMine({"-floop-strip-mine"}, + " -floop-strip-mine \tEnable loop nest transforms. " + "Same as -floop-nest-optimize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-strip-mine")); + +maplecl::Option oFloopUnrollAndJam({"-floop-unroll-and-jam"}, + " -floop-unroll-and-jam \tEnable loop nest transforms. " + "Same as -floop-nest-optimize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-loop-unroll-and-jam")); + +maplecl::Option oFlraRemat({"-flra-remat"}, + " -flra-remat \tDo CFG-sensitive rematerialization in LRA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lra-remat")); + +maplecl::Option oFltoCompressionLevel({"-flto-compression-level="}, + " -flto-compression-level= \tUse zlib compression level ofor IL.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFltoOdrTypeMerging({"-flto-odr-type-merging"}, + " -flto-odr-type-merging \tMerge C++ types using One Definition Rule.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lto-odr-type-merging")); + +maplecl::Option oFltoPartition({"-flto-partition="}, + " -flto-partition= \tSpecify the algorithm to partition symbols and " + "vars at linktime.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFltoReport({"-flto-report"}, + " -flto-report \tReport various link-time optimization statistics.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lto-report")); + +maplecl::Option oFltoReportWpa({"-flto-report-wpa"}, + " -flto-report-wpa \tReport various link-time optimization statistics " + "for WPA only.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lto-report-wpa")); + +maplecl::Option oFmemReport({"-fmem-report"}, + " -fmem-report \tReport on permanent memory allocation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-mem-report")); + +maplecl::Option oFmemReportWpa({"-fmem-report-wpa"}, + " -fmem-report-wpa \tReport on permanent memory allocation in WPA only.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-mem-report-wpa")); + +maplecl::Option oFmergeAllConstants({"-fmerge-all-constants"}, + " -fmerge-all-constants \tAttempt to merge identical constants and " + "constantvariables.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-merge-all-constants")); + +maplecl::Option oFmergeConstants({"-fmerge-constants"}, + " -fmerge-constants \tAttempt to merge identical constants across " + "compilation units.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-merge-constants")); + +maplecl::Option oFmergeDebugStrings({"-fmerge-debug-strings"}, + " -fmerge-debug-strings \tAttempt to merge identical debug strings " + "across compilation units.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-merge-debug-strings")); + +maplecl::Option oFmessageLength({"-fmessage-length="}, + " -fmessage-length= \t-fmessage-length= o Limit diagnostics to " + " ocharacters per line. 0 suppresses line-wrapping.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFmoduloSched({"-fmodulo-sched"}, + " -fmodulo-sched \tPerform SMS based modulo scheduling before the first " + "scheduling pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-modulo-sched")); + +maplecl::Option oFmoduloSchedAllowRegmoves({"-fmodulo-sched-allow-regmoves"}, + " -fmodulo-sched-allow-regmoves \tPerform SMS based modulo scheduling with " + "register moves allowed.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-modulo-sched-allow-regmoves")); + +maplecl::Option oFmoveLoopInvariants({"-fmove-loop-invariants"}, + " -fmove-loop-invariants \tMove loop invariant computations " + "out of loops.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-move-loop-invariants")); + +maplecl::Option oFmsExtensions({"-fms-extensions"}, + " -fms-extensions \tDon't warn about uses of Microsoft extensions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ms-extensions")); + +maplecl::Option oFnewInheritingCtors({"-fnew-inheriting-ctors"}, + " -fnew-inheriting-ctors \tImplement C++17 inheriting constructor " + "semantics.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-new-inheriting-ctors")); + +maplecl::Option oFnewTtpMatching({"-fnew-ttp-matching"}, + " -fnew-ttp-matching \tImplement resolution of DR 150 for matching of " + "template template arguments.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-new-ttp-matching")); + +maplecl::Option oFnextRuntime({"-fnext-runtime"}, + " -fnext-runtime \tGenerate code for NeXT (Apple Mac OS X) runtime " + "environment.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFnoAccessControl({"-fno-access-control"}, + " -fno-access-control \tTurn off all access checking.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFnoAsm({"-fno-asm"}, + " -fno-asm \tDo not recognize asm, inline or typeof as a keyword, " + "so that code can use these words as identifiers. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oFnoBranchCountReg({"-fno-branch-count-reg"}, + " -fno-branch-count-reg \tAvoid running a pass scanning for opportunities" + " to use “decrement and branch” instructions on a count register instead of generating " + "sequences of instructions that decrement a register, compare it against zero, and then" + " branch based upon the result.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFnoBuiltin({"-fno-builtin", "-fno-builtin-function"}, + " -fno-builtin \tDon't recognize built-in functions that do not begin " + "with '__builtin_' as prefix.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFnoCanonicalSystemHeaders({"-fno-canonical-system-headers"}, + " -fno-canonical-system-headers \tWhen preprocessing, do not shorten " + "system header paths with canonicalization.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFCheckPointerBounds({"-fcheck-pointer-bounds"}, + " -fcheck-pointer-bounds \tEnable Pointer Bounds Checker " + "instrumentation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-check-pointer-bounds")); + +maplecl::Option oFChecking({"-fchecking"}, + " -fchecking \tPerform internal consistency checkings.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-checking")); + +maplecl::Option oFCheckingE({"-fchecking="}, + " -fchecking= \tPerform internal consistency checkings.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFChkpCheckIncompleteType({"-fchkp-check-incomplete-type"}, + " -fchkp-check-incomplete-type \tGenerate pointer bounds checks for " + "variables with incomplete type.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-check-incomplete-type")); + +maplecl::Option oFChkpCheckRead({"-fchkp-check-read"}, + " -fchkp-check-read \tGenerate checks for all read accesses to memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-check-read")); + +maplecl::Option oFChkpCheckWrite({"-fchkp-check-write"}, + " -fchkp-check-write \tGenerate checks for all write accesses to memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-check-write")); + +maplecl::Option oFChkpFirstFieldHasOwnBounds({"-fchkp-first-field-has-own-bounds"}, + " -fchkp-first-field-has-own-bounds \tForces Pointer Bounds Checker to " + "use narrowed bounds for address of the first field in the structure. By default " + "pointer to the first field has the same bounds as pointer to the whole structure.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-chkp-first-field-has-own-bounds")); + +maplecl::Option oFDefaultInline({"-fdefault-inline"}, + " -fdefault-inline \tDoes nothing. Preserved for backward " + "compatibility.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-default-inline")); + +maplecl::Option oFdefaultInteger8({"-fdefault-integer-8"}, + " -fdefault-integer-8 \tSet the default integer kind to an 8 byte " + "wide type.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-default-integer-8")); + +maplecl::Option oFdefaultReal8({"-fdefault-real-8"}, + " -fdefault-real-8 \tSet the default real kind to an 8 byte wide type.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-default-real-8")); + +maplecl::Option oFDeferPop({"-fdefer-pop"}, + " -fdefer-pop \tDefer popping functions args from stack until later.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-defer-pop")); + +maplecl::Option oFElideConstructors({"-felide-constructors"}, + " -felide-constructors \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("fno-elide-constructors")); + +maplecl::Option oFEnforceEhSpecs({"-fenforce-eh-specs"}, + " -fenforce-eh-specs \tGenerate code to check exception " + "specifications.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-enforce-eh-specs")); + +maplecl::Option oFFpIntBuiltinInexact({"-ffp-int-builtin-inexact"}, + " -ffp-int-builtin-inexact \tAllow built-in functions ceil, floor, " + "round, trunc to raise 'inexact' exceptions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fp-int-builtin-inexact")); + +maplecl::Option oFFunctionCse({"-ffunction-cse"}, + " -ffunction-cse \tAllow function addresses to be held in registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-function-cse")); + +maplecl::Option oFGnuKeywords({"-fgnu-keywords"}, + " -fgnu-keywords \tRecognize GNU-defined keywords.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gnu-keywords")); + +maplecl::Option oFGnuUnique({"-fgnu-unique"}, + " -fgnu-unique \tUse STB_GNU_UNIQUE if supported by the assembler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-gnu-unique")); + +maplecl::Option oFGuessBranchProbability({"-fguess-branch-probability"}, + " -fguess-branch-probability \tEnable guessing of branch probabilities.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-guess-branch-probability")); + +maplecl::Option oFIdent({"-fident"}, + " -fident \tProcess #ident directives.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ident")); + +maplecl::Option oFImplementInlines({"-fimplement-inlines"}, + " -fimplement-inlines \tExport functions even if they can be inlined.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-implement-inlines")); + +maplecl::Option oFImplicitInlineTemplates({"-fimplicit-inline-templates"}, + " -fimplicit-inline-templates \tEmit implicit instantiations of inline " + "templates.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-implicit-inline-templates")); + +maplecl::Option oFImplicitTemplates({"-fimplicit-templates"}, + " -fimplicit-templates \tEmit implicit instantiations of templates.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("no-implicit-templates")); + +maplecl::Option oFIraShareSaveSlots({"-fira-share-save-slots"}, + " -fira-share-save-slots \tShare slots for saving different hard " + "registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ira-share-save-slots")); + +maplecl::Option oFIraShareSpillSlots({"-fira-share-spill-slots"}, + " -fira-share-spill-slots \tShare stack slots for spilled " + "pseudo-registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ira-share-spill-slots")); + +maplecl::Option oFJumpTables({"-fjump-tables"}, + " -fjump-tables \tUse jump tables for sufficiently large " + "switch statements.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-jump-tables")); + +maplecl::Option oFKeepInlineDllexport({"-fkeep-inline-dllexport"}, + " -fkeep-inline-dllexport \tDon't emit dllexported inline functions " + "unless needed.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-keep-inline-dllexport")); + +maplecl::Option oFLifetimeDse({"-flifetime-dse"}, + " -flifetime-dse \tTell DSE that the storage for a C++ object is " + "dead when the constructor starts and when the destructor finishes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-lifetime-dse")); + +maplecl::Option oFMathErrno({"-fmath-errno"}, + " -fmath-errno \tSet errno after built-in math functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-math-errno")); + +maplecl::Option oFNilReceivers({"-fnil-receivers"}, + " -fnil-receivers \tAssume that receivers of Objective-C " + "messages may be nil.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-nil-receivers")); + +maplecl::Option oFNonansiBuiltins({"-fnonansi-builtins"}, + " -fnonansi-builtins \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-nonansi-builtins")); + +maplecl::Option oFOperatorNames({"-foperator-names"}, + " -foperator-names \tRecognize C++ keywords like 'compl' and 'xor'.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-operator-names")); + +maplecl::Option oFOptionalDiags({"-foptional-diags"}, + " -foptional-diags \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-optional-diags")); + +maplecl::Option oFPeephole({"-fpeephole"}, + " -fpeephole \tEnable machine specific peephole optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-peephole")); + +maplecl::Option oFPeephole2({"-fpeephole2"}, + " -fpeephole2 \tEnable an RTL peephole pass before sched2.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-peephole2")); + +maplecl::Option oFPrettyTemplates({"-fpretty-templates"}, + " -fpretty-templates \tpretty-print template specializations as " + "the template signature followed by the arguments.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-pretty-templates")); + +maplecl::Option oFPrintfReturnValue({"-fprintf-return-value"}, + " -fprintf-return-value \tTreat known sprintf return values as " + "constants.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-printf-return-value")); + +maplecl::Option oFRtti({"-frtti"}, + " -frtti \tGenerate run time type descriptor information.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-rtti")); + +maplecl::Option oFnoSanitizeAll({"-fno-sanitize=all"}, + " -fno-sanitize=all \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFSchedInterblock({"-fsched-interblock"}, + " -fsched-interblock \tEnable scheduling across basic blocks.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-interblock")); + +maplecl::Option oFSchedSpec({"-fsched-spec"}, + " -fsched-spec \tAllow speculative motion of non-loads.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-spec")); + +maplecl::Option oFnoSetStackExecutable({"-fno-set-stack-executable"}, + " -fno-set-stack-executable \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFShowColumn({"-fshow-column"}, + " -fshow-column \tShow column numbers in diagnostics, " + "when available. Default on.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-show-column")); + +maplecl::Option oFSignedZeros({"-fsigned-zeros"}, + " -fsigned-zeros \tDisable floating point optimizations that " + "ignore the IEEE signedness of zero.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-signed-zeros")); + +maplecl::Option oFStackLimit({"-fstack-limit"}, + " -fstack-limit \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-limit")); + +maplecl::Option oFThreadsafeStatics({"-fthreadsafe-statics"}, + " -fthreadsafe-statics \tDo not generate thread-safe code for " + "initializing local statics.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-threadsafe-statics")); + +maplecl::Option oFToplevelReorder({"-ftoplevel-reorder"}, + " -ftoplevel-reorder \tReorder top level functions, variables, and asms.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-toplevel-reorder")); + +maplecl::Option oFTrappingMath({"-ftrapping-math"}, + " -ftrapping-math \tAssume floating-point operations can trap.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-trapping-math")); + +maplecl::Option oFUseCxaGetExceptionPtr({"-fuse-cxa-get-exception-ptr"}, + " -fuse-cxa-get-exception-ptr \tUse __cxa_get_exception_ptr in " + "exception handling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-use-cxa-get-exception-ptr")); + +maplecl::Option oFWeak({"-fweak"}, + " -fweak \tEmit common-like symbols as weak symbols.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-weak")); + +maplecl::Option oFnoWritableRelocatedRdata({"-fno-writable-relocated-rdata"}, + " -fno-writable-relocated-rdata \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFZeroInitializedInBss({"-fzero-initialized-in-bss"}, + " -fzero-initialized-in-bss \tPut zero initialized data in the" + " bss section.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-zero-initialized-in-bss")); + +maplecl::Option oFnonCallExceptions({"-fnon-call-exceptions"}, + " -fnon-call-exceptions \tSupport synchronous non-call exceptions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-non-call-exceptions")); + +maplecl::Option oFnothrowOpt({"-fnothrow-opt"}, + " -fnothrow-opt \tTreat a throw() exception specification as noexcept to " + "improve code size.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-nothrow-opt")); + +maplecl::Option oFobjcAbiVersion({"-fobjc-abi-version="}, + " -fobjc-abi-version= \tSpecify which ABI to use for Objective-C " + "family code and meta-data generation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFobjcCallCxxCdtors({"-fobjc-call-cxx-cdtors"}, + " -fobjc-call-cxx-cdtors \tGenerate special Objective-C methods to " + "initialize/destroy non-POD C++ ivars\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-objc-call-cxx-cdtors")); + +maplecl::Option oFobjcDirectDispatch({"-fobjc-direct-dispatch"}, + " -fobjc-direct-dispatch \tAllow fast jumps to the message dispatcher.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-objc-direct-dispatch")); + +maplecl::Option oFobjcExceptions({"-fobjc-exceptions"}, + " -fobjc-exceptions \tEnable Objective-C exception and synchronization " + "syntax.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-objc-exceptions")); + +maplecl::Option oFobjcGc({"-fobjc-gc"}, + " -fobjc-gc \tEnable garbage collection (GC) in Objective-C/Objective-C++ " + "programs.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-objc-gc")); + +maplecl::Option oFobjcNilcheck({"-fobjc-nilcheck"}, + " -fobjc-nilcheck \tEnable inline checks for nil receivers with the NeXT " + "runtime and ABI version 2.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fobjc-nilcheck")); + +maplecl::Option oFobjcSjljExceptions({"-fobjc-sjlj-exceptions"}, + " -fobjc-sjlj-exceptions \tEnable Objective-C setjmp exception " + "handling runtime.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-objc-sjlj-exceptions")); + +maplecl::Option oFobjcStd({"-fobjc-std=objc1"}, + " -fobjc-std \tConform to the Objective-C 1.0 language as " + "implemented in GCC 4.0.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFoffloadAbi({"-foffload-abi="}, + " -foffload-abi= \tSet the ABI to use in an offload compiler.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFoffload({"-foffload="}, + " -foffload= \tSpecify offloading targets and options for them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFopenacc({"-fopenacc"}, + " -fopenacc \tEnable OpenACC.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-openacc")); + +maplecl::Option oFopenaccDim({"-fopenacc-dim="}, + " -fopenacc-dim= \tSpecify default OpenACC compute dimensions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFopenmp({"-fopenmp"}, + " -fopenmp \tEnable OpenMP (implies -frecursive in Fortran).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-openmp")); + +maplecl::Option oFopenmpSimd({"-fopenmp-simd"}, + " -fopenmp-simd \tEnable OpenMP's SIMD directives.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-openmp-simd")); + +maplecl::Option oFoptInfo({"-fopt-info"}, + " -fopt-info \tEnable all optimization info dumps on stderr.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-opt-info")); + +maplecl::Option oFoptimizeStrlen({"-foptimize-strlen"}, + " -foptimize-strlen \tEnable string length optimizations on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-foptimize-strlen")); + +maplecl::Option oForce_cpusubtype_ALL({"-force_cpusubtype_ALL"}, + " -force_cpusubtype_ALL \tThis causes GCC's output file to have the " + "'ALL' subtype, instead of one controlled by the -mcpu or -march option.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oForce_flat_namespace({"-force_flat_namespace"}, + " -force_flat_namespace \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFpackStruct({"-fpack-struct"}, + " -fpack-struct \tPack structure members together without holes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-pack-struct")); + +maplecl::Option oFpartialInlining({"-fpartial-inlining"}, + " -fpartial-inlining \tPerform partial inlining.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-partial-inlining")); + +maplecl::Option oFpccStructReturn({"-fpcc-struct-return"}, + " -fpcc-struct-return \tReturn small aggregates in memory\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-pcc-struct-return")); + +maplecl::Option oFpchDeps({"-fpch-deps"}, + " -fpch-deps \tWhen using precompiled headers (see Precompiled Headers), " + "this flag causes the dependency-output flags to also list the files from the " + "precompiled header's dependencies.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-pch-deps")); + +maplecl::Option oFpchPreprocess({"-fpch-preprocess"}, + " -fpch-preprocess \tLook for and use PCH files even when preprocessing.\n", + {driverCategory, clangCategory}); + +maplecl::Option oFnoPchPreprocess({"-fno-pch-preprocess"}, + " -fno-pch-preprocess \tLook for and use PCH files even when preprocessing.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFpeelLoops({"-fpeel-loops"}, + " -fpeel-loops \tPerform loop peeling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-peel-loops")); + +maplecl::Option oFpermissive({"-fpermissive"}, + " -fpermissive \tDowngrade conformance errors to warnings.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-permissive")); + +maplecl::Option oFpermittedFltEvalMethods({"-fpermitted-flt-eval-methods="}, + " -fpermitted-flt-eval-methods= \tSpecify which values of FLT_EVAL_METHOD" + " are permitted.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFplan9Extensions({"-fplan9-extensions"}, + " -fplan9-extensions \tEnable Plan 9 language extensions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fplan9-extensions")); + +maplecl::Option oFplugin({"-fplugin="}, + " -fplugin= \tSpecify a plugin to load.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFpluginArg({"-fplugin-arg-"}, + " -fplugin-arg- \tSpecify argument = ofor plugin .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFpostIpaMemReport({"-fpost-ipa-mem-report"}, + " -fpost-ipa-mem-report \tReport on memory allocation before " + "interprocedural optimization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-post-ipa-mem-report")); + +maplecl::Option oFpreIpaMemReport({"-fpre-ipa-mem-report"}, + " -fpre-ipa-mem-report \tReport on memory allocation before " + "interprocedural optimization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-pre-ipa-mem-report")); + +maplecl::Option oFpredictiveCommoning({"-fpredictive-commoning"}, + " -fpredictive-commoning \tRun predictive commoning optimization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fpredictive-commoning")); + +maplecl::Option oFprefetchLoopArrays({"-fprefetch-loop-arrays"}, + " -fprefetch-loop-arrays \tGenerate prefetch instructions\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-prefetch-loop-arrays")); + +maplecl::Option oFpreprocessed({"-fpreprocessed"}, + " -fpreprocessed \tTreat the input file as already preprocessed.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-preprocessed")); + +maplecl::Option oFprofileArcs({"-fprofile-arcs"}, + " -fprofile-arcs \tInsert arc-based program profiling code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-arcs")); + +maplecl::Option oFprofileCorrection({"-fprofile-correction"}, + " -fprofile-correction \tEnable correction of flow inconsistent profile " + "data input.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-correction")); + +maplecl::Option oFprofileDir({"-fprofile-dir="}, + " -fprofile-dir= \tSet the top-level directory for storing the profile " + "data. The default is 'pwd'.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFprofileGenerate({"-fprofile-generate"}, + " -fprofile-generate \tEnable common options for generating profile " + "info for profile feedback directed optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-generate")); + +maplecl::Option oFprofileReorderFunctions({"-fprofile-reorder-functions"}, + " -fprofile-reorder-functions \tEnable function reordering that " + "improves code placement.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-reorder-functions")); + +maplecl::Option oFprofileReport({"-fprofile-report"}, + " -fprofile-report \tReport on consistency of profile.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-report")); + +maplecl::Option oFprofileUpdate({"-fprofile-update="}, + " -fprofile-update= \tSet the profile update method.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFprofileUse({"-fprofile-use"}, + " -fprofile-use \tEnable common options for performing profile feedback " + "directed optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-use")); + +maplecl::Option oFprofileUseE({"-fprofile-use="}, + " -fprofile-use= \tEnable common options for performing profile feedback " + "directed optimizations, and set -fprofile-dir=.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFprofileValues({"-fprofile-values"}, + " -fprofile-values \tInsert code to profile values of expressions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-profile-values")); + +maplecl::Option oFpu({"-fpu"}, + " -fpu \tEnables (-fpu) or disables (-nofpu) the use of RX " + "floating-point hardware. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-nofpu")); + +maplecl::Option oFrandomSeed({"-frandom-seed"}, + " -frandom-seed \tMake compile reproducible using .\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-random-seed")); + +maplecl::Option oFrandomSeedE({"-frandom-seed="}, + " -frandom-seed= \tMake compile reproducible using .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFreciprocalMath({"-freciprocal-math"}, + " -freciprocal-math \tSame as -fassociative-math for expressions which " + "include division.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-reciprocal-math")); + +maplecl::Option oFrecordGccSwitches({"-frecord-gcc-switches"}, + " -frecord-gcc-switches \tRecord gcc command line switches in the " + "object file.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-record-gcc-switches")); + +maplecl::Option oFree({"-free"}, + " -free \tTurn on Redundant Extensions Elimination pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ree")); + +maplecl::Option oFrenameRegisters({"-frename-registers"}, + " -frename-registers \tPerform a register renaming optimization pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-rename-registers")); + +maplecl::Option oFreorderBlocks({"-freorder-blocks"}, + " -freorder-blocks \tReorder basic blocks to improve code placement.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-reorder-blocks")); + +maplecl::Option oFreorderBlocksAlgorithm({"-freorder-blocks-algorithm="}, + " -freorder-blocks-algorithm= \tSet the used basic block reordering " + "algorithm.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFreorderBlocksAndPartition({"-freorder-blocks-and-partition"}, + " -freorder-blocks-and-partition \tReorder basic blocks and partition into " + "hot and cold sections.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-reorder-blocks-and-partition")); + +maplecl::Option oFreorderFunctions({"-freorder-functions"}, + " -freorder-functions \tReorder functions to improve code placement.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-reorder-functions")); + +maplecl::Option oFreplaceObjcClasses({"-freplace-objc-classes"}, + " -freplace-objc-classes \tUsed in Fix-and-Continue mode to indicate that" + " object files may be swapped in at runtime.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-replace-objc-classes")); + +maplecl::Option oFrepo({"-frepo"}, + " -frepo \tEnable automatic template instantiation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-repo")); + +maplecl::Option oFreportBug({"-freport-bug"}, + " -freport-bug \tCollect and dump debug information into temporary file " + "if ICE in C/C++ compiler occurred.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-report-bug")); + +maplecl::Option oFrerunCseAfterLoop({"-frerun-cse-after-loop"}, + " -frerun-cse-after-loop \tAdd a common subexpression elimination pass " + "after loop optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-rerun-cse-after-loop")); + +maplecl::Option oFrescheduleModuloScheduledLoops({"-freschedule-modulo-scheduled-loops"}, + " -freschedule-modulo-scheduled-loops \tEnable/Disable the traditional " + "scheduling in loops that already passed modulo scheduling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-reschedule-modulo-scheduled-loops")); + +maplecl::Option oFroundingMath({"-frounding-math"}, + " -frounding-math \tDisable optimizations that assume default FP " + "rounding behavior.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-rounding-math")); + +maplecl::Option oFsanitizeAddressUseAfterScope({"-fsanitize-address-use-after-scope"}, + " -fsanitize-address-use-after-scope \tEnable sanitization of local " + "variables to detect use-after-scope bugs. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sanitize-address-use-after-scope")); + +maplecl::Option oFsanitizeCoverageTracePc({"-fsanitize-coverage=trace-pc"}, + " -fsanitize-coverage=trace-pc \tEnable coverage-guided fuzzing code i" + "nstrumentation. Inserts call to __sanitizer_cov_trace_pc into every basic block.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sanitize-coverage=trace-pc")); + +maplecl::Option oFsanitizeRecover({"-fsanitize-recover"}, + " -fsanitize-recover \tAfter diagnosing undefined behavior attempt to " + "continue execution.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sanitize-recover")); + +maplecl::Option oFsanitizeRecoverE({"-fsanitize-recover="}, + " -fsanitize-recover= \tAfter diagnosing undefined behavior attempt to " + "continue execution.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFsanitizeSections({"-fsanitize-sections="}, + " -fsanitize-sections= \tSanitize global variables in user-defined " + "sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFsanitizeUndefinedTrapOnError({"-fsanitize-undefined-trap-on-error"}, + " -fsanitize-undefined-trap-on-error \tUse trap instead of a library " + "function for undefined behavior sanitization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sanitize-undefined-trap-on-error")); + +maplecl::Option oFsanitize({"-fsanitize"}, + " -fsanitize \tSelect what to sanitize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sanitize")); + +maplecl::Option oFschedCriticalPathHeuristic({"-fsched-critical-path-heuristic"}, + " -fsched-critical-path-heuristic \tEnable the critical path heuristic " + "in the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-critical-path-heuristic")); + +maplecl::Option oFschedDepCountHeuristic({"-fsched-dep-count-heuristic"}, + " -fsched-dep-count-heuristic \tEnable the dependent count heuristic in " + "the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-dep-count-heuristic")); + +maplecl::Option oFschedGroupHeuristic({"-fsched-group-heuristic"}, + " -fsched-group-heuristic \tEnable the group heuristic in the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-group-heuristic")); + +maplecl::Option oFschedLastInsnHeuristic({"-fsched-last-insn-heuristic"}, + " -fsched-last-insn-heuristic \tEnable the last instruction heuristic " + "in the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-last-insn-heuristic")); + +maplecl::Option oFschedPressure({"-fsched-pressure"}, + " -fsched-pressure \tEnable register pressure sensitive insn scheduling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-pressure")); + +maplecl::Option oFschedRankHeuristic({"-fsched-rank-heuristic"}, + " -fsched-rank-heuristic \tEnable the rank heuristic in the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-rank-heuristic")); + +maplecl::Option oFschedSpecInsnHeuristic({"-fsched-spec-insn-heuristic"}, + " -fsched-spec-insn-heuristic \tEnable the speculative instruction " + "heuristic in the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-spec-insn-heuristic")); + +maplecl::Option oFschedSpecLoad({"-fsched-spec-load"}, + " -fsched-spec-load \tAllow speculative motion of some loads.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-spec-load")); + +maplecl::Option oFschedSpecLoadDangerous({"-fsched-spec-load-dangerous"}, + " -fsched-spec-load-dangerous \tAllow speculative motion of more loads.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-spec-load-dangerous")); + +maplecl::Option oFschedStalledInsns({"-fsched-stalled-insns"}, + " -fsched-stalled-insns \tAllow premature scheduling of queued insns.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-stalled-insns")); + +maplecl::Option oFschedStalledInsnsDep({"-fsched-stalled-insns-dep"}, + " -fsched-stalled-insns-dep \tSet dependence distance checking in " + "premature scheduling of queued insns.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-stalled-insns-dep")); + +maplecl::Option oFschedVerbose({"-fsched-verbose"}, + " -fsched-verbose \tSet the verbosity level of the scheduler.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched-verbose")); + +maplecl::Option oFsched2UseSuperblocks({"-fsched2-use-superblocks"}, + " -fsched2-use-superblocks \tIf scheduling post reload\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sched2-use-superblocks")); + +maplecl::Option oFscheduleFusion({"-fschedule-fusion"}, + " -fschedule-fusion \tPerform a target dependent instruction fusion" + " optimization pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-schedule-fusion")); + +maplecl::Option oFscheduleInsns({"-fschedule-insns"}, + " -fschedule-insns \tReschedule instructions before register allocation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-schedule-insns")); + +maplecl::Option oFscheduleInsns2({"-fschedule-insns2"}, + " -fschedule-insns2 \tReschedule instructions after register allocation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-schedule-insns2")); + +maplecl::Option oFsectionAnchors({"-fsection-anchors"}, + " -fsection-anchors \tAccess data in the same section from shared " + "anchor points.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fsection-anchors")); + +maplecl::Option oFselSchedPipelining({"-fsel-sched-pipelining"}, + " -fsel-sched-pipelining \tPerform software pipelining of inner " + "loops during selective scheduling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sel-sched-pipelining")); + +maplecl::Option oFselSchedPipeliningOuterLoops({"-fsel-sched-pipelining-outer-loops"}, + " -fsel-sched-pipelining-outer-loops \tPerform software pipelining of " + "outer loops during selective scheduling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sel-sched-pipelining-outer-loops")); + +maplecl::Option oFselectiveScheduling({"-fselective-scheduling"}, + " -fselective-scheduling \tSchedule instructions using selective " + "scheduling algorithm.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-selective-scheduling")); + +maplecl::Option oFselectiveScheduling2({"-fselective-scheduling2"}, + " -fselective-scheduling2 \tRun selective scheduling after reload.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-selective-scheduling2")); + +maplecl::Option oFshortEnums({"-fshort-enums"}, + " -fshort-enums \tUse the narrowest integer type possible for " + "enumeration types.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-short-enums")); + +maplecl::Option oFshortWchar({"-fshort-wchar"}, + " -fshort-wchar \tForce the underlying type for 'wchar_t' to " + "be 'unsigned short'.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-short-wchar")); + +maplecl::Option oFshrinkWrap({"-fshrink-wrap"}, + " -fshrink-wrap \tEmit function prologues only before parts of the " + "function that need it\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-shrink-wrap")); + +maplecl::Option oFshrinkWrapSeparate({"-fshrink-wrap-separate"}, + " -fshrink-wrap-separate \tShrink-wrap parts of the prologue and " + "epilogue separately.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-shrink-wrap-separate")); + +maplecl::Option oFsignalingNans({"-fsignaling-nans"}, + " -fsignaling-nans \tDisable optimizations observable by IEEE " + "signaling NaNs.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-signaling-nans")); + +maplecl::Option oFsignedBitfields({"-fsigned-bitfields"}, + " -fsigned-bitfields \tWhen 'signed' or 'unsigned' is not given " + "make the bitfield signed.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-signed-bitfields")); + +maplecl::Option oFsimdCostModel({"-fsimd-cost-model"}, + " -fsimd-cost-model \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-simd-cost-model")); + +maplecl::Option oFsinglePrecisionConstant({"-fsingle-precision-constant"}, + " -fsingle-precision-constant \tConvert floating point constants to " + "single precision constants.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-single-precision-constant")); + +maplecl::Option oFsizedDeallocation({"-fsized-deallocation"}, + " -fsized-deallocation \tEnable C++14 sized deallocation support.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sized-deallocation")); + +maplecl::Option oFsplitIvsInUnroller({"-fsplit-ivs-in-unroller"}, + " -fsplit-ivs-in-unroller \tSplit lifetimes of induction variables " + "when loops are unrolled.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-split-ivs-in-unroller")); + +maplecl::Option oFsplitLoops({"-fsplit-loops"}, + " -fsplit-loops \tPerform loop splitting.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-split-loops")); + +maplecl::Option oFsplitPaths({"-fsplit-paths"}, + " -fsplit-paths \tSplit paths leading to loop backedges.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-split-paths")); + +maplecl::Option oFsplitStack({"-fsplit-stack"}, + " -fsplit-stack \tGenerate discontiguous stack frames.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-split-stack")); + +maplecl::Option oFsplitWideTypes({"-fsplit-wide-types"}, + " -fsplit-wide-types \tSplit wide types into independent registers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-split-wide-types")); + +maplecl::Option oFssaBackprop({"-fssa-backprop"}, + " -fssa-backprop \tEnable backward propagation of use properties at " + "the SSA level.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ssa-backprop")); + +maplecl::Option oFssaPhiopt({"-fssa-phiopt"}, + " -fssa-phiopt \tOptimize conditional patterns using SSA PHI nodes.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-ssa-phiopt")); + +maplecl::Option oFssoStruct({"-fsso-struct"}, + " -fsso-struct \tSet the default scalar storage order.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sso-struct")); + +maplecl::Option oFstackCheck({"-fstack-check"}, + " -fstack-check \tInsert stack checking code into the program. Same " + "as -fstack-check=specific.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-check")); + +maplecl::Option oFstackCheckE({"-fstack-check="}, + " -fstack-check= \t-fstack-check=[no|generic|specific] Insert stack " + "checking code into the program.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFstackLimitRegister({"-fstack-limit-register="}, + " -fstack-limit-register= \tTrap if the stack goes past \n", + {driverCategory, unSupCategory}); + +maplecl::Option oFstackLimitSymbol({"-fstack-limit-symbol="}, + " -fstack-limit-symbol= \tTrap if the stack goes past symbol .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFstackProtector({"-fstack-protector"}, + " -fstack-protector \tUse propolice as a stack protection method.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-protector")); + +maplecl::Option oFstackProtectorAll({"-fstack-protector-all"}, + " -fstack-protector-all \tUse a stack protection method for " + "every function.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-protector-all")); + +maplecl::Option oFstackProtectorExplicit({"-fstack-protector-explicit"}, + " -fstack-protector-explicit \tUse stack protection method only for" + " functions with the stack_protect attribute.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-protector-explicit")); + +maplecl::Option oFstackUsage({"-fstack-usage"}, + " -fstack-usage \tOutput stack usage information on a per-function " + "basis.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stack-usage")); + +maplecl::Option oFstack_reuse({"-fstack-reuse="}, + " -fstack_reuse= \tSet stack reuse level for local variables.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFstats({"-fstats"}, + " -fstats \tDisplay statistics accumulated during compilation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stats")); + +maplecl::Option oFstdargOpt({"-fstdarg-opt"}, + " -fstdarg-opt \tOptimize amount of stdarg registers saved to stack at " + "start of function.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-stdarg-opt")); + +maplecl::Option oFstoreMerging({"-fstore-merging"}, + " -fstore-merging \tMerge adjacent stores.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-store-merging")); + +maplecl::Option oFstrictAliasing({"-fstrict-aliasing"}, + " -fstrict-aliasing \tAssume strict aliasing rules apply.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFstrictEnums({"-fstrict-enums"}, + " -fstrict-enums \tAssume that values of enumeration type are always " + "within the minimum range of that type.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-strict-enums")); + +maplecl::Option oFstrictOverflow({"-fstrict-overflow"}, + " -fstrict-overflow \tTreat signed overflow as undefined.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-strict-overflow")); + +maplecl::Option oFstrictVolatileBitfields({"-fstrict-volatile-bitfields"}, + " -fstrict-volatile-bitfields \tForce bitfield accesses to match their " + "type width.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-strict-volatile-bitfields")); + +maplecl::Option oFsyncLibcalls({"-fsync-libcalls"}, + " -fsync-libcalls \tImplement __atomic operations via libcalls to " + "legacy __sync functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-sync-libcalls")); + +maplecl::Option oFsyntaxOnly({"-fsyntax-only"}, + " -fsyntax-only \tCheck for syntax errors\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-syntax-only")); + +maplecl::Option oFtabstop({"-ftabstop="}, + " -ftabstop= \tSet the distance between tab stops.\n", + {driverCategory, clangCategory}); + +maplecl::Option oFtemplateBacktraceLimit({"-ftemplate-backtrace-limit="}, + " -ftemplate-backtrace-limit= \tSet the maximum number of template " + "instantiation notes for a single warning or error to n.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFtemplateDepth({"-ftemplate-depth-"}, + " -ftemplate-depth- \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFtemplateDepthE({"-ftemplate-depth="}, + " -ftemplate-depth= \tSpecify maximum template instantiation depth.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFtestCoverage({"-ftest-coverage"}, + " -ftest-coverage \tCreate data files needed by \"gcov\".\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-test-coverage")); + +maplecl::Option oFthreadJumps({"-fthread-jumps"}, + " -fthread-jumps \tPerform jump threading optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-thread-jumps")); + +maplecl::Option oFtimeReport({"-ftime-report"}, + " -ftime-report \tReport the time taken by each compiler pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-time-report")); + +maplecl::Option oFtimeReportDetails({"-ftime-report-details"}, + " -ftime-report-details \tRecord times taken by sub-phases separately.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-time-report-details")); + +maplecl::Option oFtracer({"-ftracer"}, + " -ftracer \tPerform superblock formation via tail duplication.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tracer")); + +maplecl::Option oFtrackMacroExpansion({"-ftrack-macro-expansion"}, + " -ftrack-macro-expansion \tTrack locations of tokens across " + "macro expansions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-track-macro-expansion")); + +maplecl::Option oFtrackMacroExpansionE({"-ftrack-macro-expansion="}, + " -ftrack-macro-expansion= \tTrack locations of tokens across " + "macro expansions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFtrampolines({"-ftrampolines"}, + " -ftrampolines \tFor targets that normally need trampolines for " + "nested functions\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-trampolines")); + +maplecl::Option oFtrapv({"-ftrapv"}, + " -ftrapv \tTrap for signed overflow in addition\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-trapv")); + +maplecl::Option oFtreeBitCcp({"-ftree-bit-ccp"}, + " -ftree-bit-ccp \tEnable SSA-BIT-CCP optimization on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-bit-ccp")); + +maplecl::Option oFtreeBuiltinCallDce({"-ftree-builtin-call-dce"}, + " -ftree-builtin-call-dce \tEnable conditional dead code elimination for" + " builtin calls.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-builtin-call-dce")); + +maplecl::Option oFtreeCcp({"-ftree-ccp"}, + " -ftree-ccp \tEnable SSA-CCP optimization on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-ccp")); + +maplecl::Option oFtreeCh({"-ftree-ch"}, + " -ftree-ch \tEnable loop header copying on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-ch")); + +maplecl::Option oFtreeCoalesceVars({"-ftree-coalesce-vars"}, + " -ftree-coalesce-vars \tEnable SSA coalescing of user variables.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-coalesce-vars")); + +maplecl::Option oFtreeCopyProp({"-ftree-copy-prop"}, + " -ftree-copy-prop \tEnable copy propagation on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-copy-prop")); + +maplecl::Option oFtreeDce({"-ftree-dce"}, + " -ftree-dce \tEnable SSA dead code elimination optimization on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-dce")); + +maplecl::Option oFtreeDominatorOpts({"-ftree-dominator-opts"}, + " -ftree-dominator-opts \tEnable dominator optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-dominator-opts")); + +maplecl::Option oFtreeDse({"-ftree-dse"}, + " -ftree-dse \tEnable dead store elimination.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-dse")); + +maplecl::Option oFtreeForwprop({"-ftree-forwprop"}, + " -ftree-forwprop \tEnable forward propagation on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-forwprop")); + +maplecl::Option oFtreeFre({"-ftree-fre"}, + " -ftree-fre \tEnable Full Redundancy Elimination (FRE) on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-fre")); + +maplecl::Option oFtreeLoopDistributePatterns({"-ftree-loop-distribute-patterns"}, + " -ftree-loop-distribute-patterns \tEnable loop distribution for " + "patterns transformed into a library call.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-distribute-patterns")); + +maplecl::Option oFtreeLoopDistribution({"-ftree-loop-distribution"}, + " -ftree-loop-distribution \tEnable loop distribution on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-distribution")); + +maplecl::Option oFtreeLoopIfConvert({"-ftree-loop-if-convert"}, + " -ftree-loop-if-convert \tConvert conditional jumps in innermost loops " + "to branchless equivalents.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-if-convert")); + +maplecl::Option oFtreeLoopIm({"-ftree-loop-im"}, + " -ftree-loop-im \tEnable loop invariant motion on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-im")); + +maplecl::Option oFtreeLoopIvcanon({"-ftree-loop-ivcanon"}, + " -ftree-loop-ivcanon \tCreate canonical induction variables in loops.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-ivcanon")); + +maplecl::Option oFtreeLoopLinear({"-ftree-loop-linear"}, + " -ftree-loop-linear \tEnable loop nest transforms. Same as " + "-floop-nest-optimize.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-linear")); + +maplecl::Option oFtreeLoopOptimize({"-ftree-loop-optimize"}, + " -ftree-loop-optimize \tEnable loop optimizations on tree level.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-optimize")); + +maplecl::Option oFtreeLoopVectorize({"-ftree-loop-vectorize"}, + " -ftree-loop-vectorize \tEnable loop vectorization on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-loop-vectorize")); + +maplecl::Option oFtreeParallelizeLoops({"-ftree-parallelize-loops"}, + " -ftree-parallelize-loops \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-parallelize-loops")); + +maplecl::Option oFtreePartialPre({"-ftree-partial-pre"}, + " -ftree-partial-pre \tIn SSA-PRE optimization on trees\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-partial-pre")); + +maplecl::Option oFtreePhiprop({"-ftree-phiprop"}, + " -ftree-phiprop \tEnable hoisting loads from conditional pointers.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-phiprop")); + +maplecl::Option oFtreePre({"-ftree-pre"}, + " -ftree-pre \tEnable SSA-PRE optimization on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-pre")); + +maplecl::Option oFtreePta({"-ftree-pta"}, + " -ftree-pta \tPerform function-local points-to analysis on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-pta")); + +maplecl::Option oFtreeReassoc({"-ftree-reassoc"}, + " -ftree-reassoc \tEnable reassociation on tree level.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-reassoc")); + +maplecl::Option oFtreeSink({"-ftree-sink"}, + " -ftree-sink \tEnable SSA code sinking on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-sink")); + +maplecl::Option oFtreeSlpVectorize({"-ftree-slp-vectorize"}, + " -ftree-slp-vectorize \tEnable basic block vectorization (SLP) " + "on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-slp-vectorize")); + +maplecl::Option oFtreeSlsr({"-ftree-slsr"}, + " -ftree-slsr \tPerform straight-line strength reduction.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-slsr")); + +maplecl::Option oFtreeSra({"-ftree-sra"}, + " -ftree-sra \tPerform scalar replacement of aggregates.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-sra")); + +maplecl::Option oFtreeSwitchConversion({"-ftree-switch-conversion"}, + " -ftree-switch-conversion \tPerform conversions of switch " + "initializations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-switch-conversion")); + +maplecl::Option oFtreeTailMerge({"-ftree-tail-merge"}, + " -ftree-tail-merge \tEnable tail merging on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-tail-merge")); + +maplecl::Option oFtreeTer({"-ftree-ter"}, + " -ftree-ter \tReplace temporary expressions in the SSA->normal pass.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-ter")); + +maplecl::Option oFtreeVrp({"-ftree-vrp"}, + " -ftree-vrp \tPerform Value Range Propagation on trees.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-tree-vrp")); + +maplecl::Option oFunconstrainedCommons({"-funconstrained-commons"}, + " -funconstrained-commons \tAssume common declarations may be " + "overridden with ones with a larger trailing array.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unconstrained-commons")); + +maplecl::Option oFunitAtATime({"-funit-at-a-time"}, + " -funit-at-a-time \tCompile whole compilation unit at a time.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unit-at-a-time")); + +maplecl::Option oFunrollAllLoops({"-funroll-all-loops"}, + " -funroll-all-loops \tPerform loop unrolling for all loops.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unroll-all-loops")); + +maplecl::Option oFunrollLoops({"-funroll-loops"}, + " -funroll-loops \tPerform loop unrolling when iteration count is known.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unroll-loops")); + +maplecl::Option oFunsafeMathOptimizations({"-funsafe-math-optimizations"}, + " -funsafe-math-optimizations \tAllow math optimizations that may " + "violate IEEE or ISO standards.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unsafe-math-optimizations")); + +maplecl::Option oFunsignedBitfields({"-funsigned-bitfields"}, + " -funsigned-bitfields \tWhen \"signed\" or \"unsigned\" is not given " + "make the bitfield unsigned.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unsigned-bitfields")); + +maplecl::Option oFunswitchLoops({"-funswitch-loops"}, + " -funswitch-loops \tPerform loop unswitching.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unswitch-loops")); + +maplecl::Option oFunwindTables({"-funwind-tables"}, + " -funwind-tables \tJust generate unwind tables for exception handling.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-unwind-tables")); + +maplecl::Option oFuseCxaAtexit({"-fuse-cxa-atexit"}, + " -fuse-cxa-atexit \tUse __cxa_atexit to register destructors.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-use-cxa-atexit")); + +maplecl::Option oFuseLdBfd({"-fuse-ld=bfd"}, + " -fuse-ld=bfd \tUse the bfd linker instead of the default linker.\n", + {driverCategory, ldCategory}); + +maplecl::Option oFuseLdGold({"-fuse-ld=gold"}, + " -fuse-ld=gold \tUse the gold linker instead of the default linker.\n", + {driverCategory, ldCategory}); + +maplecl::Option oFuseLinkerPlugin({"-fuse-linker-plugin"}, + " -fuse-linker-plugin \tEnables the use of a linker plugin during " + "link-time optimization.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-use-linker-plugin")); + +maplecl::Option oFvarTracking({"-fvar-tracking"}, + " -fvar-tracking \tPerform variable tracking.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-var-tracking")); + +maplecl::Option oFvarTrackingAssignments({"-fvar-tracking-assignments"}, + " -fvar-tracking-assignments \tPerform variable tracking by " + "annotating assignments.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-var-tracking-assignments")); + +maplecl::Option oFvarTrackingAssignmentsToggle({"-fvar-tracking-assignments-toggle"}, + " -fvar-tracking-assignments-toggle \tToggle -fvar-tracking-assignments.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-var-tracking-assignments-toggle")); + +maplecl::Option oFvariableExpansionInUnroller({"-fvariable-expansion-in-unroller"}, + " -fvariable-expansion-in-unroller \tApply variable expansion when " + "loops are unrolled.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-variable-expansion-in-unroller")); + +maplecl::Option oFvectCostModel({"-fvect-cost-model"}, + " -fvect-cost-model \tEnables the dynamic vectorizer cost model. " + "Preserved for backward compatibility.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-vect-cost-model")); + +maplecl::Option oFverboseAsm({"-fverbose-asm"}, + " -fverbose-asm \tAdd extra commentary to assembler output.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-verbose-asm")); + +maplecl::Option oFvisibilityInlinesHidden({"-fvisibility-inlines-hidden"}, + " -fvisibility-inlines-hidden \tMarks all inlined functions and methods" + " as having hidden visibility.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-visibility-inlines-hidden")); + +maplecl::Option oFvisibilityMsCompat({"-fvisibility-ms-compat"}, + " -fvisibility-ms-compat \tChanges visibility to match Microsoft Visual" + " Studio by default.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-visibility-ms-compat")); + +maplecl::Option oFvpt({"-fvpt"}, + " -fvpt \tUse expression value profiles in optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-vpt")); + +maplecl::Option oFvtableVerify({"-fvtable-verify"}, + " -fvtable-verify \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-vtable-verify")); + +maplecl::Option oFvtvCounts({"-fvtv-counts"}, + " -fvtv-counts \tOutput vtable verification counters.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-vtv-counts")); + +maplecl::Option oFvtvDebug({"-fvtv-debug"}, + " -fvtv-debug \tOutput vtable verification pointer sets information.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-vtv-debug")); + +maplecl::Option oFweb({"-fweb"}, + " -fweb \tConstruct webs and split unrelated uses of single variable.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-web")); + +maplecl::Option oFwholeProgram({"-fwhole-program"}, + " -fwhole-program \tPerform whole program optimizations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-whole-program")); + +maplecl::Option oFwideExecCharset({"-fwide-exec-charset="}, + " -fwide-exec-charset= \tConvert all wide strings and character " + "constants to character set .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwarnDynamicstack({"-mwarn-dynamicstack"}, + " -mwarn-dynamicstack \tEmit a warning if the function calls alloca or " + "uses dynamically-sized arrays.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwarnFramesize({"-mwarn-framesize"}, + " -mwarn-framesize \tEmit a warning if the current function exceeds " + "the given frame size.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwarnMcu({"-mwarn-mcu"}, + " -mwarn-mcu \tThis option enables or disables warnings about conflicts " + "between the MCU name specified by the -mmcu option and the ISA set by the -mcpu" + " option and/or the hardware multiply support set by the -mhwmult option.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-warn-mcu")); + +maplecl::Option oMwarnMultipleFastInterrupts({"-mwarn-multiple-fast-interrupts"}, + " -mwarn-multiple-fast-interrupts \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-warn-multiple-fast-interrupts")); + +maplecl::Option oMwarnReloc({"-mwarn-reloc"}, + " -mwarn-reloc \t-mwarn-reloc generates a warning instead.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-merror-reloc")); + +maplecl::Option oMwideBitfields({"-mwide-bitfields"}, + " -mwide-bitfields \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-wide-bitfields")); + +maplecl::Option oMwin32({"-mwin32"}, + " -mwin32 \tThis option is available for Cygwin and MinGW targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwindows({"-mwindows"}, + " -mwindows \tThis option is available for MinGW targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwordRelocations({"-mword-relocations"}, + " -mword-relocations \tOnly generate absolute relocations on " + "word-sized values (i.e. R_ARM_ABS32). \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMx32({"-mx32"}, + " -mx32 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxgot({"-mxgot"}, + " -mxgot \tLift (do not lift) the usual restrictions on the " + "size of the global offset table.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-xgot")); + +maplecl::Option oMxilinxFpu({"-mxilinx-fpu"}, + " -mxilinx-fpu \tPerform optimizations for the floating-point unit " + "on Xilinx PPC 405/440.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlBarrelShift({"-mxl-barrel-shift"}, + " -mxl-barrel-shift \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlCompat({"-mxl-compat"}, + " -mxl-compat \tProduce code that conforms more closely to IBM XL " + "compiler semantics when using AIX-compatible ABI. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-xl-compat")); + +maplecl::Option oMxlFloatConvert({"-mxl-float-convert"}, + " -mxl-float-convert \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlFloatSqrt({"-mxl-float-sqrt"}, + " -mxl-float-sqrt \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlGpOpt({"-mxl-gp-opt"}, + " -mxl-gp-opt \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlMultiplyHigh({"-mxl-multiply-high"}, + " -mxl-multiply-high \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlPatternCompare({"-mxl-pattern-compare"}, + " -mxl-pattern-compare \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlReorder({"-mxl-reorder"}, + " -mxl-reorder \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlSoftDiv({"-mxl-soft-div"}, + " -mxl-soft-div \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlSoftMul({"-mxl-soft-mul"}, + " -mxl-soft-mul \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxlStackCheck({"-mxl-stack-check"}, + " -mxl-stack-check \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxop({"-mxop"}, + " -mxop \tThese switches enable the use of instructions in the mxop.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxpa({"-mxpa"}, + " -mxpa \tUse the MIPS eXtended Physical Address (XPA) instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-xpa")); + +maplecl::Option oMxsave({"-mxsave"}, + " -mxsave \tThese switches enable the use of instructions in " + "the mxsave.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxsavec({"-mxsavec"}, + " -mxsavec \tThese switches enable the use of instructions in " + "the mxsavec.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxsaveopt({"-mxsaveopt"}, + " -mxsaveopt \tThese switches enable the use of instructions in " + "the mxsaveopt.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxsaves({"-mxsaves"}, + " -mxsaves \tThese switches enable the use of instructions in " + "the mxsaves.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMxy({"-mxy"}, + " -mxy \tPassed down to the assembler to enable the XY memory " + "extension. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMyellowknife({"-myellowknife"}, + " -myellowknife \tOn embedded PowerPC systems, assume that the startup " + "module is called crt0.o and the standard C libraries are libyk.a and libc.a.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMzarch({"-mzarch"}, + " -mzarch \tWhen -mzarch is specified, generate code using the " + "instructions available on z/Architecture.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMzda({"-mzda"}, + " -mzda \tPut static or global variables whose size is n bytes or less " + "into the first 32 kilobytes of memory.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMzdcbranch({"-mzdcbranch"}, + " -mzdcbranch \tAssume (do not assume) that zero displacement conditional" + " branch instructions bt and bf are fast. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-zdcbranch")); + +maplecl::Option oMzeroExtend({"-mzero-extend"}, + " -mzero-extend \tWhen reading data from memory in sizes shorter than " + "64 bits, use zero-extending load instructions by default, rather than " + "sign-extending ones.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-zero-extend")); + +maplecl::Option oMzvector({"-mzvector"}, + " -mzvector \tThe -mzvector option enables vector language extensions and " + "builtins using instructions available with the vector extension facility introduced " + "with the IBM z13 machine generation. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-zvector")); + +maplecl::Option oNo80387({"-no-80387"}, + " -no-80387 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoCanonicalPrefixes({"-no-canonical-prefixes"}, + " -no-canonical-prefixes \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoIntegratedCpp({"-no-integrated-cpp"}, + " -no-integrated-cpp \tPerform preprocessing as a separate pass before compilation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoSysrootSuffix({"-no-sysroot-suffix"}, + " -no-sysroot-suffix \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoall_load({"-noall_load"}, + " -noall_load \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNocpp({"-nocpp"}, + " -nocpp \tDisable preprocessing.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNodefaultlibs({"-nodefaultlibs"}, + " -nodefaultlibs \tDo not use the standard system libraries when " + "linking. \n", + {driverCategory, ldCategory}); + +maplecl::Option oNodevicelib({"-nodevicelib"}, + " -nodevicelib \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNofixprebinding({"-nofixprebinding"}, + " -nofixprebinding \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNolibdld({"-nolibdld"}, + " -nolibdld \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNomultidefs({"-nomultidefs"}, + " -nomultidefs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNonStatic({"-non-static"}, + " -non-static \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoprebind({"-noprebind"}, + " -noprebind \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoseglinkedit({"-noseglinkedit"}, + " -noseglinkedit \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNostartfiles({"-nostartfiles"}, + " -nostartfiles \tDo not use the standard system startup files " + "when linking. \n", + {driverCategory, ldCategory}); + +maplecl::Option oNostdinc({"-nostdinc++"}, + " -nostdinc++ \tDo not search standard system include directories for" + " C++.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNo_dead_strip_inits_and_terms({"-no_dead_strip_inits_and_terms"}, + " -no_dead_strip_inits_and_terms \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oOfast({"-Ofast"}, + " -Ofast \tOptimize for speed disregarding exact standards compliance.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oOg({"-Og"}, + " -Og \tOptimize for debugging experience rather than speed or size.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oP({"-p"}, + " -p \tEnable function profiling.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oLargeP({"-P"}, + " -P \tDo not generate #line directives.\n", + {driverCategory, clangCategory}); + +maplecl::Option oPagezero_size({"-pagezero_size"}, + " -pagezero_size \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oParam({"--param"}, + " --param = \tSet parameter to value. See below for a complete " + "list of parameters.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPassExitCodes({"-pass-exit-codes"}, + " -pass-exit-codes \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPedantic({"-pedantic"}, + " -pedantic \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPedanticErrors({"-pedantic-errors"}, + " -pedantic-errors \tLike -pedantic but issue them as errors.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPg({"-pg"}, + " -pg \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPlt({"-plt"}, + " -plt \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrebind({"-prebind"}, + " -prebind \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrebind_all_twolevel_modules({"-prebind_all_twolevel_modules"}, + " -prebind_all_twolevel_modules \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintFileName({"-print-file-name"}, + " -print-file-name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintLibgccFileName({"-print-libgcc-file-name"}, + " -print-libgcc-file-name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintMultiDirectory({"-print-multi-directory"}, + " -print-multi-directory \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintMultiLib({"-print-multi-lib"}, + " -print-multi-lib \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintMultiOsDirectory({"-print-multi-os-directory"}, + " -print-multi-os-directory \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintMultiarch({"-print-multiarch"}, + " -print-multiarch \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintObjcRuntimeInfo({"-print-objc-runtime-info"}, + " -print-objc-runtime-info \tGenerate C header of platform-specific " + "features.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintProgName({"-print-prog-name"}, + " -print-prog-name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintSearchDirs({"-print-search-dirs"}, + " -print-search-dirs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintSysroot({"-print-sysroot"}, + " -print-sysroot \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrintSysrootHeadersSuffix({"-print-sysroot-headers-suffix"}, + " -print-sysroot-headers-suffix \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPrivate_bundle({"-private_bundle"}, + " -private_bundle \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oPthreads({"-pthreads"}, + " -pthreads \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oQ({"-Q"}, + " -Q \tMakes the compiler print out each function name as it is " + "compiled, and print some statistics about each pass when it finishes.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oQn({"-Qn"}, + " -Qn \tRefrain from adding .ident directives to the output " + "file (this is the default).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oQy({"-Qy"}, + " -Qy \tIdentify the versions of each tool used by the compiler, " + "in a .ident assembler directive in the output.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oRead_only_relocs({"-read_only_relocs"}, + " -read_only_relocs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oRemap({"-remap"}, + " -remap \tRemap file names when including files.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSectalign({"-sectalign"}, + " -sectalign \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSectcreate({"-sectcreate"}, + " -sectcreate \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSectobjectsymbols({"-sectobjectsymbols"}, + " -sectobjectsymbols \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSectorder({"-sectorder"}, + " -sectorder \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSeg1addr({"-seg1addr"}, + " -seg1addr \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSegaddr({"-segaddr"}, + " -segaddr \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSeglinkedit({"-seglinkedit"}, + " -seglinkedit \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSegprot({"-segprot"}, + " -segprot \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSegs_read_only_addr({"-segs_read_only_addr"}, + " -segs_read_only_addr \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSegs_read_write_addr({"-segs_read_write_addr"}, + " -segs_read_write_addr \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSeg_addr_table({"-seg_addr_table"}, + " -seg_addr_table \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSeg_addr_table_filename({"-seg_addr_table_filename"}, + " -seg_addr_table_filename \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSharedLibgcc({"-shared-libgcc"}, + " -shared-libgcc \tOn systems that provide libgcc as a shared library, " + "these options force the use of either the shared or static version, respectively.\n", + {driverCategory, ldCategory}); + +maplecl::Option oSim({"-sim"}, + " -sim \tThis option, recognized for the cris-axis-elf, arranges to " + "link with input-output functions from a simulator library\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSim2({"-sim2"}, + " -sim2 \tLike -sim, but pass linker options to locate initialized " + "data at 0x40000000 and zero-initialized data at 0x80000000.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSingle_module({"-single_module"}, + " -single_module \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStaticLibgcc({"-static-libgcc"}, + " -static-libgcc \tOn systems that provide libgcc as a shared library," + " these options force the use of either the shared or static version, respectively.\n", + {driverCategory, ldCategory}); + +maplecl::Option oStaticLibstdc({"-static-libstdc++"}, + " -static-libstdc++ \tWhen the g++ program is used to link a C++ program," + " it normally automatically links against libstdc++. \n", + {driverCategory, ldCategory}); + +maplecl::Option oSub_library({"-sub_library"}, + " -sub_library \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oSub_umbrella({"-sub_umbrella"}, + " -sub_umbrella \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oT({"-T"}, + " -T \tUse script as the linker script. This option is supported " + "by most systems using the GNU linker\n", + {driverCategory, ldCategory}); + +maplecl::Option oTargetHelp({"-target-help"}, + " -target-help \tPrint (on the standard output) a description of " + "target-specific command-line options for each tool.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oThreads({"-threads"}, + " -threads \tAdd support for multithreading with the dce thread " + "library under HP-UX. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oTime({"-time"}, + " -time \tReport the CPU time taken by each subprocess in the " + "compilation sequence. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oTnoAndroidCc({"-tno-android-cc"}, + " -tno-android-cc \tDisable compilation effects of -mandroid, i.e., " + "do not enable -mbionic, -fPIC, -fno-exceptions and -fno-rtti by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oTnoAndroidLd({"-tno-android-ld"}, + " -tno-android-ld \tDisable linking effects of -mandroid, i.e., pass " + "standard Linux linking options to the linker.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oTraditional({"-traditional"}, + " -traditional \tEnable traditional preprocessing. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oTraditionalCpp({"-traditional-cpp"}, + " -traditional-cpp \tEnable traditional preprocessing.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oTrigraphs({"-trigraphs"}, + " -trigraphs \t-trigraphs Support ISO C trigraphs.\n", + {driverCategory, clangCategory}); + +maplecl::Option oTwolevel_namespace({"-twolevel_namespace"}, + " -twolevel_namespace \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oUmbrella({"-umbrella"}, + " -umbrella \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oUndef({"-undef"}, + " -undef \tDo not predefine system-specific and GCC-specific macros.\n", + {driverCategory, clangCategory}); + +maplecl::Option oUndefined({"-undefined"}, + " -undefined \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oUnexported_symbols_list({"-unexported_symbols_list"}, + " -unexported_symbols_list \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWa({"-Wa"}, + " -Wa \tPass option as an option to the assembler. If option contains" + " commas, it is split into multiple options at the commas.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWhatsloaded({"-whatsloaded"}, + " -whatsloaded \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWhyload({"-whyload"}, + " -whyload \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWLtoTypeMismatch({"-Wlto-type-mismatch"}, + " -Wno-lto-type-mismatch \tDuring the link-time optimization warn about" + " type mismatches in global declarations from different compilation units. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-Wno-lto-type-mismatch")); + +maplecl::Option oWmisspelledIsr({"-Wmisspelled-isr"}, + " -Wmisspelled-isr \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWp({"-Wp"}, + " -Wp \tYou can use -Wp,option to bypass the compiler driver and pass " + "option directly through to the preprocessor. \n", + {driverCategory, clangCategory}, maplecl::joinedValue); + +maplecl::Option oWrapper({"-wrapper"}, + " -wrapper \tInvoke all subcommands under a wrapper program. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oXassembler({"-Xassembler"}, + " -Xassembler \tPass option as an option to the assembler. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oXbindLazy({"-Xbind-lazy"}, + " -Xbind-lazy \tEnable lazy binding of function calls. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oXbindNow({"-Xbind-now"}, + " -Xbind-now \tDisable lazy binding of function calls. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oXlinker({"-Xlinker"}, + " -Xlinker \tPass option as an option to the linker. \n", + {driverCategory, ldCategory}); + +maplecl::Option oXpreprocessor({"-Xpreprocessor"}, + " -Xpreprocessor \tPass option as an option to the preprocessor. \n", + {driverCategory, clangCategory}); + +maplecl::Option oYm({"-Ym"}, + " -Ym \tLook in the directory dir to find the M4 preprocessor. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oYP({"-YP"}, + " -YP \tSearch the directories dirs, and no others, for " + "libraries specified with -l.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oZ({"-z"}, + " -z \tPassed directly on to the linker along with the keyword keyword.\n", + {driverCategory, ldCategory}); + +maplecl::Option oU({"-u"}, + " -u \tPretend the symbol symbol is undefined, to force linking of " + "library modules to define it. \n", + {driverCategory, ldCategory}); + +maplecl::Option oStd03({"-std=c++03"}, + " -std=c++03 \ttConform to the ISO 1998 C++ standard revised by the 2003 technical " + "corrigendum.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd0x({"-std=c++0x"}, + " -std=c++0x \tDeprecated in favor of -std=c++11.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd11({"-std=c++11"}, + " -std=c++11 \tConform to the ISO 2011 C++ standard.\n", + {driverCategory, ldCategory, clangCategory}); + +maplecl::Option oStd14({"-std=c++14"}, + " -std=c++14 \tConform to the ISO 2014 C++ standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd17({"-std=c++17"}, + " -std=c++17 \ttConform to the ISO 2017 C++ standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd1y({"-std=c++1y"}, + " -std=c++1y \tDeprecated in favor of -std=c++14.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd1z({"-std=c++1z"}, + " -std=c++1z \tConform to the ISO 2017(?) C++ draft standard (experimental and " + "incomplete support).\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd98({"-std=c++98"}, + " -std=c++98 \tConform to the ISO 1998 C++ standard revised by the 2003 technical " + "corrigendum.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd11p({"-std=c11"}, + " -std=c11 \tConform to the ISO 2011 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdc1x({"-std=c1x"}, + " -std=c1x \tDeprecated in favor of -std=c11.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd89({"-std=c89"}, + " -std=c89 \tConform to the ISO 1990 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd90({"-std=c90"}, + " -std \tConform to the ISO 1998 C++ standard revised by the 2003 technical " + "corrigendum.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd99({"-std=c99"}, + " -std=c99 \tConform to the ISO 1999 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd9x({"-std=c9x"}, + " -std=c9x \tDeprecated in favor of -std=c99.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd2003({"-std=f2003"}, + " -std=f2003 \tConform to the ISO Fortran 2003 standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd2008({"-std=f2008"}, + " -std=f2008 \tConform to the ISO Fortran 2008 standard.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStd2008ts({"-std=f2008ts"}, + " -std=f2008ts \tConform to the ISO Fortran 2008 standard including TS 29113.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStdf95({"-std=f95"}, + " -std=f95 \tConform to the ISO Fortran 95 standard.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStdgnu({"-std=gnu"}, + " -std=gnu \tConform to nothing in particular.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oStdgnu03p({"-std=gnu++03"}, + " -std=gnu++03 \tConform to the ISO 1998 C++ standard revised by the 2003 technical " + "corrigendum with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnuoxp({"-std=gnu++0x"}, + " -std=gnu++0x \tDeprecated in favor of -std=gnu++11.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu11p({"-std=gnu++11"}, + " -std=gnu++11 \tConform to the ISO 2011 C++ standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu14p({"-std=gnu++14"}, + " -std=gnu++14 \tConform to the ISO 2014 C++ standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu17p({"-std=gnu++17"}, + " -std=gnu++17 \tConform to the ISO 2017 C++ standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu1yp({"-std=gnu++1y"}, + " -std=gnu++1y \tDeprecated in favor of -std=gnu++14.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu1zp({"-std=gnu++1z"}, + " -std=gnu++1z \tConform to the ISO 201z(7?) C++ draft standard with GNU extensions " + "(experimental and incomplete support).\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu98p({"-std=gnu++98"}, + " -std=gnu++98 \tConform to the ISO 1998 C++ standard revised by the 2003 technical " + "corrigendum with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu11({"-std=gnu11"}, + " -std=gnu11 \tConform to the ISO 2011 C standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu1x({"-std=gnu1x"}, + " -std=gnu1x \tDeprecated in favor of -std=gnu11.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu89({"-std=gnu89"}, + " -std=gnu89 \tConform to the ISO 1990 C standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu90({"-std=gnu90"}, + " -std=gnu90 \tConform to the ISO 1990 C standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu99({"-std=gnu99"}, + " -std=gnu99 \tConform to the ISO 1999 C standard with GNU extensions.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdgnu9x({"-std=gnu9x"}, + " -std=gnu9x \tDeprecated in favor of -std=gnu99.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd1990({"-std=iso9899:1990"}, + " -std=iso9899:1990 \tConform to the ISO 1990 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd1994({"-std=iso9899:199409"}, + " -std=iso9899:199409 \tConform to the ISO 1990 C standard as amended in 1994.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd1999({"-std=iso9899:1999"}, + " -std=iso9899:1999 \tConform to the ISO 1999 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd199x({"-std=iso9899:199x"}, + " -std=iso9899:199x \tDeprecated in favor of -std=iso9899:1999.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStd2011({"-std=iso9899:2011"}, + " -std=iso9899:2011\tConform to the ISO 2011 C standard.\n", + {driverCategory, clangCategory, unSupCategory}); + +maplecl::Option oStdlegacy({"-std=legacy"}, + " -std=legacy\tAccept extensions to support legacy code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oFworkingDirectory({"-fworking-directory"}, + " -fworking-directory \tGenerate a #line directive pointing at the current working directory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-working-directory")); + +maplecl::Option oFwrapv({"-fwrapv"}, + " -fwrapv \tAssume signed arithmetic overflow wraps around.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-fwrapv")); + +maplecl::Option oFzeroLink({"-fzero-link"}, + " -fzero-link \tGenerate lazy class lookup (via objc_getClass()) for use " + "inZero-Link mode.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-fno-zero-link")); + +maplecl::Option oG({"-G"}, + " -G \tOn embedded PowerPC systems, put global and static items less than" + " or equal to num bytes into the small data or BSS sections instead of the normal data " + "or BSS section. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oGcoff({"-gcoff"}, + " -gcoff \tGenerate debug information in COFF format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGcolumnInfo({"-gcolumn-info"}, + " -gcolumn-info \tRecord DW_AT_decl_column and DW_AT_call_column " + "in DWARF.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGdwarf({"-gdwarf"}, + " -gdwarf \tGenerate debug information in default version of DWARF " + "format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGenDecls({"-gen-decls"}, + " -gen-decls \tDump declarations to a .decl file.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGfull({"-gfull"}, + " -gfull \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGgdb({"-ggdb"}, + " -ggdb \tGenerate debug information in default extended format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGgnuPubnames({"-ggnu-pubnames"}, + " -ggnu-pubnames \tGenerate DWARF pubnames and pubtypes sections with " + "GNU extensions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGnoColumnInfo({"-gno-column-info"}, + " -gno-column-info \tDon't record DW_AT_decl_column and DW_AT_call_column" + " in DWARF.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGnoRecordGccSwitches({"-gno-record-gcc-switches"}, + " -gno-record-gcc-switches \tDon't record gcc command line switches " + "in DWARF DW_AT_producer.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGnoStrictDwarf({"-gno-strict-dwarf"}, + " -gno-strict-dwarf \tEmit DWARF additions beyond selected version.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGpubnames({"-gpubnames"}, + " -gpubnames \tGenerate DWARF pubnames and pubtypes sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGrecordGccSwitches({"-grecord-gcc-switches"}, + " -grecord-gcc-switches \tRecord gcc command line switches in " + "DWARF DW_AT_producer.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGsplitDwarf({"-gsplit-dwarf"}, + " -gsplit-dwarf \tGenerate debug information in separate .dwo files.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGstabs({"-gstabs"}, + " -gstabs \tGenerate debug information in STABS format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGstabsA({"-gstabs+"}, + " -gstabs+ \tGenerate debug information in extended STABS format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGstrictDwarf({"-gstrict-dwarf"}, + " -gstrict-dwarf \tDon't emit DWARF additions beyond selected version.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGtoggle({"-gtoggle"}, + " -gtoggle \tToggle debug information generation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGused({"-gused"}, + " -gused \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGvms({"-gvms"}, + " -gvms \tGenerate debug information in VMS format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGxcoff({"-gxcoff"}, + " -gxcoff \tGenerate debug information in XCOFF format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGxcoffA({"-gxcoff+"}, + " -gxcoff+ \tGenerate debug information in extended XCOFF format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oGz({"-gz"}, + " -gz \tGenerate compressed debug sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oH({"-H"}, + " -H \tPrint the name of header files as they are used.\n", + {driverCategory, clangCategory}); + +maplecl::Option oHeaderpad_max_install_names({"-headerpad_max_install_names"}, + " -headerpad_max_install_names \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oI({"-I-"}, + " -I- \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIdirafter({"-idirafter"}, + " -idirafter \t-idirafter o Add oto the end of the system " + "include path.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIframework({"-iframework"}, + " -iframework \tLike -F except the directory is a treated as a system " + "directory. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oImage_base({"-image_base"}, + " -image_base \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oImultilib({"-imultilib"}, + " -imultilib \t-imultilib o Set oto be the multilib include" + " subdirectory.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oInclude({"-include"}, + " -include \t-include o Include the contents of o" + "before other files.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oInit({"-init"}, + " -init \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oInstall_name({"-install_name"}, + " -install_name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIplugindir({"-iplugindir="}, + " -iplugindir= \t-iplugindir= o Set oto be the default plugin " + "directory.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIprefix({"-iprefix"}, + " -iprefix \t-iprefix o Specify oas a prefix for next " + "two options.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIquote({"-iquote"}, + " -iquote \t-iquote o Add oto the end of the quote include " + "path.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIsysroot({"-isysroot"}, + " -isysroot \t-isysroot o Set oto be the system root " + "directory.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIwithprefix({"-iwithprefix"}, + " -iwithprefix \t-iwithprefix oAdd oto the end of the system " + "include path.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oIwithprefixbefore({"-iwithprefixbefore"}, + " -iwithprefixbefore \t-iwithprefixbefore o Add oto the end " + "ofthe main include path.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oKeep_private_externs({"-keep_private_externs"}, + " -keep_private_externs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM({"-M"}, + " -M \tGenerate make dependencies.\n", + {driverCategory, clangCategory}); + +maplecl::Option oM1({"-m1"}, + " -m1 \tGenerate code for the SH1.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM10({"-m10"}, + " -m10 \tGenerate code for a PDP-11/10.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM128bitLongDouble({"-m128bit-long-double"}, + " -m128bit-long-double \tControl the size of long double type.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM16({"-m16"}, + " -m16 \tGenerate code for a 16-bit.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM16Bit({"-m16-bit"}, + " -m16-bit \tArrange for stack frame, writable data and " + "constants to all be 16-bit.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-16-bit")); + +maplecl::Option oM2({"-m2"}, + " -m2 \tGenerate code for the SH2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM210({"-m210"}, + " -m210 \tGenerate code for the 210 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM2a({"-m2a"}, + " -m2a \tGenerate code for the SH2a-FPU assuming the floating-point" + " unit is in double-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM2e({"-m2e"}, + " -m2e \tGenerate code for the SH2e.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM2aNofpu({"-m2a-nofpu"}, + " -m2a-nofpu \tGenerate code for the SH2a without FPU, or for a" + " SH2a-FPU in such a way that the floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM2aSingle({"-m2a-single"}, + " -m2a-single \tGenerate code for the SH2a-FPU assuming the " + "floating-point unit is in single-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM2aSingleOnly({"-m2a-single-only"}, + " -m2a-single-only \tGenerate code for the SH2a-FPU, in such a way" + " that no double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM3({"-m3"}, + " -m3 \tGenerate code for the SH3.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM31({"-m31"}, + " -m31 \tWhen -m31 is specified, generate code compliant to" + " the GNU/Linux for S/390 ABI. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32({"-m32"}, + " -m32 \tGenerate code for 32-bit ABI.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32Bit({"-m32-bit"}, + " -m32-bit \tArrange for stack frame, writable data and " + "constants to all be 32-bit.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32bitDoubles({"-m32bit-doubles"}, + " -m32bit-doubles \tMake the double data type 32 bits " + "(-m32bit-doubles) in size.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32r({"-m32r"}, + " -m32r \tGenerate code for the M32R.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32r2({"-m32r2"}, + " -m32r2 \tGenerate code for the M32R/2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM32rx({"-m32rx"}, + " -m32rx \tGenerate code for the M32R/X.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM340({"-m340"}, + " -m340 \tGenerate code for the 210 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM3dnow({"-m3dnow"}, + " -m3dnow \tThese switches enable the use of instructions " + "in the m3dnow.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM3dnowa({"-m3dnowa"}, + " -m3dnowa \tThese switches enable the use of instructions " + "in the m3dnowa.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM3e({"-m3e"}, + " -m3e \tGenerate code for the SH3e.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4({"-m4"}, + " -m4 \tGenerate code for the SH4.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4100({"-m4-100"}, + " -m4-100 \tGenerate code for SH4-100.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4100Nofpu({"-m4-100-nofpu"}, + " -m4-100-nofpu \tGenerate code for SH4-100 in such a way that the " + "floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4100Single({"-m4-100-single"}, + " -m4-100-single \tGenerate code for SH4-100 assuming the floating-point " + "unit is in single-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4100SingleOnly({"-m4-100-single-only"}, + " -m4-100-single-only \tGenerate code for SH4-100 in such a way that no " + "double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4200({"-m4-200"}, + " -m4-200 \tGenerate code for SH4-200.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4200Nofpu({"-m4-200-nofpu"}, + " -m4-200-nofpu \tGenerate code for SH4-200 without in such a way that" + " the floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4200Single({"-m4-200-single"}, + " -m4-200-single \tGenerate code for SH4-200 assuming the floating-point " + "unit is in single-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4200SingleOnly({"-m4-200-single-only"}, + " -m4-200-single-only \tGenerate code for SH4-200 in such a way that no " + "double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4300({"-m4-300"}, + " -m4-300 \tGenerate code for SH4-300.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4300Nofpu({"-m4-300-nofpu"}, + " -m4-300-nofpu \tGenerate code for SH4-300 without in such a way that" + " the floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4300Single({"-m4-300-single"}, + " -m4-300-single \tGenerate code for SH4-300 in such a way that no " + "double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4300SingleOnly({"-m4-300-single-only"}, + " -m4-300-single-only \tGenerate code for SH4-300 in such a way that " + "no double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4340({"-m4-340"}, + " -m4-340 \tGenerate code for SH4-340 (no MMU, no FPU).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4500({"-m4-500"}, + " -m4-500 \tGenerate code for SH4-500 (no FPU). Passes " + "-isa=sh4-nofpu to the assembler.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4Nofpu({"-m4-nofpu"}, + " -m4-nofpu \tGenerate code for the SH4al-dsp, or for a " + "SH4a in such a way that the floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4Single({"-m4-single"}, + " -m4-single \tGenerate code for the SH4a assuming the floating-point " + "unit is in single-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4SingleOnly({"-m4-single-only"}, + " -m4-single-only \tGenerate code for the SH4a, in such a way that " + "no double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM40({"-m40"}, + " -m40 \tGenerate code for a PDP-11/40.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM45({"-m45"}, + " -m45 \tGenerate code for a PDP-11/45. This is the default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4a({"-m4a"}, + " -m4a \tGenerate code for the SH4a.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4aNofpu({"-m4a-nofpu"}, + " -m4a-nofpu \tGenerate code for the SH4al-dsp, or for a SH4a in such " + "a way that the floating-point unit is not used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4aSingle({"-m4a-single"}, + " -m4a-single \tGenerate code for the SH4a assuming the floating-point " + "unit is in single-precision mode by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4aSingleOnly({"-m4a-single-only"}, + " -m4a-single-only \tGenerate code for the SH4a, in such a way that no" + " double-precision floating-point operations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4al({"-m4al"}, + " -m4al \tSame as -m4a-nofpu, except that it implicitly passes -dsp" + " to the assembler. GCC doesn't generate any DSP instructions at the moment.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM4byteFunctions({"-m4byte-functions"}, + " -m4byte-functions \tForce all functions to be aligned to a 4-byte" + " boundary.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-4byte-functions")); + +maplecl::Option oM5200({"-m5200"}, + " -m5200 \tGenerate output for a 520X ColdFire CPU.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM5206e({"-m5206e"}, + " -m5206e \tGenerate output for a 5206e ColdFire CPU. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM528x({"-m528x"}, + " -m528x \tGenerate output for a member of the ColdFire 528X family. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM5307({"-m5307"}, + " -m5307 \tGenerate output for a ColdFire 5307 CPU.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM5407({"-m5407"}, + " -m5407 \tGenerate output for a ColdFire 5407 CPU.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM64({"-m64"}, + " -m64 \tGenerate code for 64-bit ABI.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM64bitDoubles({"-m64bit-doubles"}, + " -m64bit-doubles \tMake the double data type be 64 bits (-m64bit-doubles)" + " in size.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68000({"-m68000"}, + " -m68000 \tGenerate output for a 68000.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68010({"-m68010"}, + " -m68010 \tGenerate output for a 68010.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68020({"-m68020"}, + " -m68020 \tGenerate output for a 68020. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM6802040({"-m68020-40"}, + " -m68020-40 \tGenerate output for a 68040, without using any of " + "the new instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM6802060({"-m68020-60"}, + " -m68020-60 \tGenerate output for a 68060, without using any of " + "the new instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMTLS({"-mTLS"}, + " -mTLS \tAssume a large TLS segment when generating thread-local code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mtls")); + +maplecl::Option oMtlsDialect({"-mtls-dialect"}, + " -mtls-dialect \tSpecify TLS dialect.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtlsDirectSegRefs({"-mtls-direct-seg-refs"}, + " -mtls-direct-seg-refs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtlsMarkers({"-mtls-markers"}, + " -mtls-markers \tMark calls to __tls_get_addr with a relocation " + "specifying the function argument. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-tls-markers")); + +maplecl::Option oMtoc({"-mtoc"}, + " -mtoc \tOn System V.4 and embedded PowerPC systems do not assume " + "that register 2 contains a pointer to a global area pointing to the addresses " + "used in the program.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-toc")); + +maplecl::Option oMtomcatStats({"-mtomcat-stats"}, + " -mtomcat-stats \tCause gas to print out tomcat statistics.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtoplevelSymbols({"-mtoplevel-symbols"}, + " -mtoplevel-symbols \tPrepend (do not prepend) a ':' to all global " + "symbols, so the assembly code can be used with the PREFIX assembly directive.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-toplevel-symbols")); + +maplecl::Option oMtp({"-mtp"}, + " -mtp \tSpecify the access model for the thread local storage pointer. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtpRegno({"-mtp-regno"}, + " -mtp-regno \tSpecify thread pointer register number.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtpcsFrame({"-mtpcs-frame"}, + " -mtpcs-frame \tGenerate a stack frame that is compliant with the Thumb " + "Procedure Call Standard for all non-leaf functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtpcsLeafFrame({"-mtpcs-leaf-frame"}, + " -mtpcs-leaf-frame \tGenerate a stack frame that is compliant with " + "the Thumb Procedure Call Standard for all leaf functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtpfTrace({"-mtpf-trace"}, + " -mtpf-trace \tGenerate code that adds in TPF OS specific branches to " + "trace routines in the operating system. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-tpf-trace")); + +maplecl::Option oMtrapPrecision({"-mtrap-precision"}, + " -mtrap-precision \tIn the Alpha architecture, floating-point traps " + "are imprecise.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtune({"-mtune="}, + " -mtune= \tOptimize for CPU. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtuneCtrlFeatureList({"-mtune-ctrl=feature-list"}, + " -mtune-ctrl=feature-list \tThis option is used to do fine grain " + "control of x86 code generation features. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMuclibc({"-muclibc"}, + " -muclibc \tUse uClibc C library.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMuls({"-muls"}, + " -muls \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMultcost({"-multcost"}, + " -multcost \tReplaced by -mmultcost.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMultcostNumber({"-multcost=number"}, + " -multcost=number \tSet the cost to assume for a multiply insn.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMultilibLibraryPic({"-multilib-library-pic"}, + " -multilib-library-pic \tLink with the (library, not FD) pic libraries.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmultiplyEnabled({"-mmultiply-enabled"}, + " -multiply-enabled \tEnable multiply instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMultiply_defined({"-multiply_defined"}, + " -multiply_defined \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMultiply_defined_unused({"-multiply_defined_unused"}, + " -multiply_defined_unused \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMulti_module({"-multi_module"}, + " -multi_module \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMunalignProbThreshold({"-munalign-prob-threshold"}, + " -munalign-prob-threshold \tSet probability threshold for unaligning" + " branches. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMunalignedAccess({"-munaligned-access"}, + " -munaligned-access \tEnables (or disables) reading and writing of 16- " + "and 32- bit values from addresses that are not 16- or 32- bit aligned.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-unaligned-access")); + +maplecl::Option oMunalignedDoubles({"-munaligned-doubles"}, + " -munaligned-doubles \tAssume that doubles have 8-byte alignment. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-unaligned-doubles")); + +maplecl::Option oMunicode({"-municode"}, + " -municode \tThis option is available for MinGW-w64 targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMuniformSimt({"-muniform-simt"}, + " -muniform-simt \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMuninitConstInRodata({"-muninit-const-in-rodata"}, + " -muninit-const-in-rodata \tPut uninitialized const variables in the " + "read-only data section. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-uninit-const-in-rodata")); + +maplecl::Option oMunix({"-munix"}, + " -munix \tGenerate compiler predefines and select a startfile for " + "the specified UNIX standard. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMunixAsm({"-munix-asm"}, + " -munix-asm \tUse Unix assembler syntax. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMupdate({"-mupdate"}, + " -mupdate \tGenerate code that uses the load string instructions and " + "the store string word instructions to save multiple registers and do small " + "block moves. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-update")); + +maplecl::Option oMupperRegs({"-mupper-regs"}, + " -mupper-regs \tGenerate code that uses (does not use) the scalar " + "instructions that target all 64 registers in the vector/scalar floating point " + "register set, depending on the model of the machine.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-upper-regs")); + +maplecl::Option oMupperRegsDf({"-mupper-regs-df"}, + " -mupper-regs-df \tGenerate code that uses (does not use) the scalar " + "double precision instructions that target all 64 registers in the vector/scalar " + "floating point register set that were added in version 2.06 of the PowerPC ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-upper-regs-df")); + +maplecl::Option oMupperRegsDi({"-mupper-regs-di"}, + " -mupper-regs-di \tGenerate code that uses (does not use) the scalar " + "instructions that target all 64 registers in the vector/scalar floating point " + "register set that were added in version 2.06 of the PowerPC ISA when processing " + "integers. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-upper-regs-di")); + +maplecl::Option oMupperRegsSf({"-mupper-regs-sf"}, + " -mupper-regs-sf \tGenerate code that uses (does not use) the scalar " + "single precision instructions that target all 64 registers in the vector/scalar " + "floating point register set that were added in version 2.07 of the PowerPC ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-upper-regs-sf")); + +maplecl::Option oMuserEnabled({"-muser-enabled"}, + " -muser-enabled \tEnable user-defined instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMuserMode({"-muser-mode"}, + " -muser-mode \tDo not generate code that can only run in supervisor " + "mode.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-user-mode")); + +maplecl::Option oMusermode({"-musermode"}, + " -musermode \tDon't allow (allow) the compiler generating " + "privileged mode code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-usermode")); + +maplecl::Option oMv3push({"-mv3push"}, + " -mv3push \tGenerate v3 push25/pop25 instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-v3push")); + +maplecl::Option oMv850({"-mv850"}, + " -mv850 \tSpecify that the target processor is the V850.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e({"-mv850e"}, + " -mv850e \tSpecify that the target processor is the V850E.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e1({"-mv850e1"}, + " -mv850e1 \tSpecify that the target processor is the V850E1. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e2({"-mv850e2"}, + " -mv850e2 \tSpecify that the target processor is the V850E2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e2v3({"-mv850e2v3"}, + " -mv850e2v3 \tSpecify that the target processor is the V850E2V3.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e2v4({"-mv850e2v4"}, + " -mv850e2v4 \tSpecify that the target processor is the V850E3V5.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850e3v5({"-mv850e3v5"}, + " -mv850e3v5 \tpecify that the target processor is the V850E3V5.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv850es({"-mv850es"}, + " -mv850es \tSpecify that the target processor is the V850ES.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMv8plus({"-mv8plus"}, + " -mv8plus \tWith -mv8plus, Maple generates code for the SPARC-V8+ ABI.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-v8plus")); + +maplecl::Option oMveclibabi({"-mveclibabi"}, + " -mveclibabi \tSpecifies the ABI type to use for vectorizing " + "intrinsics using an external library. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMvect8RetInMem({"-mvect8-ret-in-mem"}, + " -mvect8-ret-in-mem \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMvirt({"-mvirt"}, + " -mvirt \tUse the MIPS Virtualization (VZ) instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-virt")); + +maplecl::Option oMvis({"-mvis"}, + " -mvis \tWith -mvis, Maple generates code that takes advantage of the " + "UltraSPARC Visual Instruction Set extensions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vis")); + +maplecl::Option oMvis2({"-mvis2"}, + " -mvis2 \tWith -mvis2, Maple generates code that takes advantage of " + "version 2.0 of the UltraSPARC Visual Instruction Set extensions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vis2")); + +maplecl::Option oMvis3({"-mvis3"}, + " -mvis3 \tWith -mvis3, Maple generates code that takes advantage of " + "version 3.0 of the UltraSPARC Visual Instruction Set extensions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vis3")); + +maplecl::Option oMvis4({"-mvis4"}, + " -mvis4 \tWith -mvis4, GCC generates code that takes advantage of " + "version 4.0 of the UltraSPARC Visual Instruction Set extensions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vis4")); + +maplecl::Option oMvis4b({"-mvis4b"}, + " -mvis4b \tWith -mvis4b, GCC generates code that takes advantage of " + "version 4.0 of the UltraSPARC Visual Instruction Set extensions, plus the additional " + "VIS instructions introduced in the Oracle SPARC Architecture 2017.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vis4b")); + +maplecl::Option oMvliwBranch({"-mvliw-branch"}, + " -mvliw-branch \tRun a pass to pack branches into VLIW instructions " + "(default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vliw-branch")); + +maplecl::Option oMvmsReturnCodes({"-mvms-return-codes"}, + " -mvms-return-codes \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMvolatileAsmStop({"-mvolatile-asm-stop"}, + " -mvolatile-asm-stop \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-volatile-asm-stop")); + +maplecl::Option oMvolatileCache({"-mvolatile-cache"}, + " -mvolatile-cache \tUse ordinarily cached memory accesses for " + "volatile references. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-volatile-cache")); + +maplecl::Option oMvr4130Align({"-mvr4130-align"}, + " -mvr4130-align \tThe VR4130 pipeline is two-way superscalar, but " + "can only issue two instructions together if the first one is 8-byte aligned. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-vr4130-align")); + +maplecl::Option oMvrsave({"-mvrsave"}, + " -mvrsave \tGenerate VRSAVE instructions when generating AltiVec code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vrsave")); + +maplecl::Option oMvsx({"-mvsx"}, + " -mvsx \tGenerate code that uses (does not use) vector/scalar (VSX)" + " instructions, and also enable the use of built-in functions that allow more direct " + "access to the VSX instruction set.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vsx")); + +maplecl::Option oMvx({"-mvx"}, + " -mvx \tGenerate code using the instructions available with the vector " + "extension facility introduced with the IBM z13 machine generation. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-vx")); + +maplecl::Option oMvxworks({"-mvxworks"}, + " -mvxworks \tOn System V.4 and embedded PowerPC systems, specify that " + "you are compiling for a VxWorks system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMvzeroupper({"-mvzeroupper"}, + " -mvzeroupper \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMwarnCellMicrocode({"-mwarn-cell-microcode"}, + " -mwarn-cell-microcode \tWarn when a Cell microcode instruction is emitted. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oA({"-A"}, + " -A= \tAssert the to . Putting '-' before disables " + "the to assertion missing after %qs.\n", + {driverCategory, clangCategory}, maplecl::joinedValue); + +maplecl::Option oO({"-O"}, + " -O \tReduce code size and execution time, without performing any optimizations that " + "take a great deal of compilation time.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oNoPie({"-no-pie"}, + " -no-pie \tDon't create a position independent executable.\n", + {driverCategory, ldCategory}); + +maplecl::Option staticLibmplpgo({"--static-libmplpgo"}, + " --static-libmplpgo \tStatic link using libmplpgo\n", + {driverCategory, cgCategory}); + + /* #################################################################################################### */ } /* namespace opts */ diff --git a/src/mapleall/maple_driver/src/driver_runner.cpp b/src/mapleall/maple_driver/src/driver_runner.cpp index 59d5c8907b0a95070a8576fe7747ab27e605bd0f..9efd2b782e7718ecf8ed491560951208495478fb 100644 --- a/src/mapleall/maple_driver/src/driver_runner.cpp +++ b/src/mapleall/maple_driver/src/driver_runner.cpp @@ -214,9 +214,9 @@ ErrorCode DriverRunner::ParseInput() const { MIRParser parser(*theModule); ErrorCode ret = kErrorNoError; if (!fileParsed) { - if (inputFileType != kFileTypeBpl && - inputFileType != kFileTypeMbc && - inputFileType != kFileTypeLmbc) { + if (inputFileType != InputFileType::kFileTypeBpl && + inputFileType != InputFileType::kFileTypeMbc && + inputFileType != InputFileType::kFileTypeLmbc) { bool parsed = parser.ParseMIR(0, 0, false, true); if (!parsed) { ret = kErrorExit; @@ -245,9 +245,9 @@ ErrorCode DriverRunner::ParseInput() const { ErrorCode DriverRunner::ParseSrcLang(MIRSrcLang &srcLang) const { ErrorCode ret = kErrorNoError; - if (inputFileType != kFileTypeBpl && - inputFileType != kFileTypeMbc && - inputFileType != kFileTypeLmbc) { + if (inputFileType != InputFileType::kFileTypeBpl && + inputFileType != InputFileType::kFileTypeMbc && + inputFileType != InputFileType::kFileTypeLmbc) { MIRParser parser(*theModule); bool parsed = parser.ParseSrcLang(srcLang); if (!parsed) { @@ -325,7 +325,7 @@ void DriverRunner::ProcessCGPhase(const std::string &output, const std::string & theMIRModule = theModule; if (withDwarf && !theModule->IsWithDbgInfo()) { theMIRModule->GetDbgInfo()->BuildDebugInfo(); -#if DEBUG +#if defined(DEBUG) && DEBUG if (cgOptions) { cgOptions->SetOption(CGOptions::kVerboseAsm); } @@ -370,6 +370,7 @@ void DriverRunner::ProcessCGPhase(const std::string &output, const std::string & if (opts::debug) { LogInfo::MapleLogger() << "Mplcg consumed " << timer.ElapsedMilliseconds() << "ms" << '\n'; } + Globals::GetInstance()->ClearMAD(); } void DriverRunner::InitProfile() const { diff --git a/src/mapleall/maple_driver/src/ld_compiler.cpp b/src/mapleall/maple_driver/src/ld_compiler.cpp index 46541176077271d1e322a7ce6d85fc47ebfdff41..dce6a74c0a7daa0a55e35d0a3abff1c28935eef3 100644 --- a/src/mapleall/maple_driver/src/ld_compiler.cpp +++ b/src/mapleall/maple_driver/src/ld_compiler.cpp @@ -28,12 +28,8 @@ std::string LdCompilerBeILP32::GetBinPath(const MplOptions &mplOptions [[maybe_u kAarch64BeIlp32Gcc : kAarch64BeGcc; std::string gccToolPath = gccPath + gccTool; - if (!FileUtils::IsFileExists(gccToolPath)) { - LogInfo::MapleLogger(kLlErr) << kGccBePathEnv << " environment variable must be set as the path to " - << gccTool << "\n"; - CHECK_FATAL(false, "%s environment variable must be set as the path to %s\n", - kGccBePathEnv, gccTool.c_str()); - } + CHECK_FATAL(FileUtils::IsFileExists(gccToolPath), "%s environment variable must be set as the path to %s\n", + kGccBePathEnv, gccTool.c_str()); return gccPath; } @@ -60,12 +56,12 @@ std::string LdCompiler::GetBin(const MplOptions &mplOptions [[maybe_unused]]) co #ifdef ANDROID return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; #else - if (FileUtils::SafeGetenv(kMapleRoot) != "") { - return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/aarch64-linux-gnu-gcc"; - } else if (FileUtils::SafeGetenv(kGccPath) != "") { + if (FileUtils::SafeGetenv(kGccPath) != "") { std::string gccPath = FileUtils::SafeGetenv(kGccPath) + " -dumpversion"; FileUtils::CheckGCCVersion(gccPath.c_str()); return FileUtils::SafeGetenv(kGccPath); + } else if (FileUtils::SafeGetenv(kMapleRoot) != "") { + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/aarch64-linux-gnu-gcc"; } std::string gccPath = FileUtils::SafeGetPath("which aarch64-linux-gnu-gcc", "aarch64-linux-gnu-gcc") + " -dumpversion"; diff --git a/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp b/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp index 032a66593c9aa6f1d4096c61368a2c6653997e54..959219c0d41fc06bf803c507d21b98c2ca0a897a 100644 --- a/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp +++ b/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp @@ -26,10 +26,6 @@ const std::string &MapleCombCompilerWrp::GetBinName() const { } std::string MapleCombCompilerWrp::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { - if (FileUtils::SafeGetenv(kMapleRoot) != "") { - return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + - FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; - } return mplOptions.GetExeFolder(); } @@ -46,7 +42,7 @@ DefaultOption MapleCombCompilerWrp::GetDefaultOptions(const MplOptions &options */ defaultOptions.mplOptions[0].SetKey("--maple-phase"); defaultOptions.mplOptions[0].SetValue(""); - defaultOptions.mplOptions[1].SetKey("-p"); + defaultOptions.mplOptions[1].SetKey("-tmp-folder"); defaultOptions.mplOptions[1].SetValue(opts::onlyCompile.IsEnabledByUser() ? action.GetInputFolder() : action.GetOutputFolder()); diff --git a/src/mapleall/maple_driver/src/mpl_options.cpp b/src/mapleall/maple_driver/src/mpl_options.cpp index 72aab00802837a18725227213060bed703e60ac4..69510cfbc07f872c3f660ef6379f28241ea970eb 100644 --- a/src/mapleall/maple_driver/src/mpl_options.cpp +++ b/src/mapleall/maple_driver/src/mpl_options.cpp @@ -63,6 +63,22 @@ const std::vector kMapleCompilers = { "jbc2mpl", "hir2mpl", ErrorCode MplOptions::Parse(int argc, char **argv) { (void)maplecl::CommandLine::GetCommandLine().Parse(argc, argv); exeFolder = FileUtils::GetFileFolder(FileUtils::GetExecutable()); + if (maplecl::CommandLine::GetCommandLine().GetUseLitePgoGen() && + !maplecl::CommandLine::GetCommandLine().GetHasPgoLib()) { + std::string libpgoName = opts::staticLibmplpgo.IsEnabledByUser() ? "libmplpgo.a" : "libmplpgo.so"; + std::string pgoLibPath = ""; + if (FileUtils::SafeGetenv(kMapleRoot) != "" && FileUtils::SafeGetenv(kGetOsVersion) != "") { + pgoLibPath = FileUtils::SafeGetenv(kMapleRoot) + "/libpgo/lib_" + + FileUtils::SafeGetenv(kGetOsVersion) + "/" + libpgoName; + } else { + std::string tmpFilePath = maple::StringUtils::GetStrBeforeLast(exeFolder, "/"); + pgoLibPath = maple::StringUtils::GetStrBeforeLast(tmpFilePath, "/") + "/lib/libc_pgo/" + libpgoName; + } + CHECK_FATAL(FileUtils::IsFileExists(pgoLibPath), "%s not exit.", pgoLibPath.c_str()); + std::string threadLibPath = "-lpthread"; + maplecl::CommandLine::GetCommandLine().GetLinkOptions().push_back(pgoLibPath); + maplecl::CommandLine::GetCommandLine().GetLinkOptions().push_back(threadLibPath); + } // We should recognize O0, O2 and run options firstly to decide the real options ErrorCode ret = HandleEarlyOptions(); @@ -103,7 +119,8 @@ ErrorCode MplOptions::Parse(int argc, char **argv) { } ErrorCode MplOptions::HandleOptions() { - if (opts::output.IsEnabledByUser() && GetActions().size() > 1) { + if (opts::output.IsEnabledByUser() && inputInfos.size() > 1 && + (opts::onlyCompile.IsEnabledByUser() || opts::compileWOLink.IsEnabledByUser())) { LogInfo::MapleLogger(kLlErr) << "Cannot specify -o with -c, -S when generating multiple output\n"; return kErrorInvalidParameter; } @@ -145,13 +162,9 @@ void MplOptions::HandleSafeOptions() { } ErrorCode MplOptions::HandleEarlyOptions() { - if (opts::version) { + if (opts::version || opts::oDumpversion) { LogInfo::MapleLogger() << kMapleDriverVersion << "\n"; - - /* exit, if only one "version" option is set. Else: continue compilation */ - if (driverCategory.GetEnabledOptions().size() == 1) { - return kErrorExitHelp; - } + exit(0); } if (opts::printDriverPhases) { @@ -277,14 +290,21 @@ std::unique_ptr MplOptions::DecideRunningPhasesByType(const InputInfo *c UpdateRunningExe(kBinNameClang); newAction = std::make_unique(kBinNameClang, inputInfo, currentAction); currentAction = std::move(newAction); - if (inputFileType == InputFileType::kFileTypeH || opts::onlyPreprocess.IsEnabledByUser()) { + if (inputFileType == InputFileType::kFileTypeH || opts::onlyPreprocess.IsEnabledByUser() || + (opts::linkerTimeOpt.IsEnabledByUser() && opts::compileWOLink.IsEnabledByUser()) || + opts::oM.IsEnabledByUser() || opts::oMM.IsEnabledByUser() || opts::oMG.IsEnabledByUser() || + opts::oMQ.IsEnabledByUser()) { return currentAction; } [[clang::fallthrough]]; + case InputFileType::kFileTypeOast: case InputFileType::kFileTypeAst: UpdateRunningExe(kBinNameCpp2mpl); newAction = std::make_unique(kBinNameCpp2mpl, inputInfo, currentAction); currentAction = std::move(newAction); + if (isAllAst) { + return currentAction; + } break; case InputFileType::kFileTypeJar: // fall-through @@ -365,9 +385,28 @@ ErrorCode MplOptions::DecideRunningPhases() { ErrorCode ret = kErrorNoError; std::vector> linkActions; std::unique_ptr lastAction; - bool isMultipleFiles = (inputInfos.size() > 1); + if (isAllAst) { + std::vector> tmpInputInfos; + for (auto &inputInfo : inputInfos) { + if (inputInfo->GetInputFileType() == InputFileType::kFileTypeObj) { + tmpInputInfos.push_back(std::move(inputInfo)); + } else { + hirInputFiles.push_back(inputInfo->GetInputFile()); + } + } + inputInfos.clear(); + inputInfos = std::move(tmpInputInfos); + tmpInputInfos.clear(); + auto lastOastInfo = hirInputFiles.back(); + hirInputFiles.pop_back(); + inputInfos.push_back(std::make_unique(lastOastInfo)); + inputInfos.push_back(std::make_unique(InputInfo(inputInfos.back()->GetOutputFolder() + "tmp.mpl", + InputFileType::kFileTypeMpl, "tmp.mpl", inputInfos.back()->GetOutputFolder(), + inputInfos.back()->GetOutputFolder(), "tmp", inputInfos.back()->GetOutputFolder() + "tmp"))); + } + for (auto &inputInfo : inputInfos) { CHECK_FATAL(inputInfo != nullptr, "InputInfo must be created!!"); @@ -513,7 +552,7 @@ void MplOptions::DumpActionTree(const Action &action, int indents) const { std::string MplOptions::GetCommonOptionsStr() const { std::string driverOptions; static const std::vector extraExclude = { - &opts::run, &opts::optionOpt, &opts::infile, &opts::mpl2mplOpt, &opts::meOpt,&opts::mplcgOpt, + &opts::run, &opts::optionOpt, &opts::infile, &opts::mpl2mplOpt, &opts::meOpt, &opts::mplcgOpt, &opts::o0, &opts::o1, &opts::o2, &opts::o3, &opts::os }; @@ -521,11 +560,7 @@ std::string MplOptions::GetCommonOptionsStr() const { if (!(std::find(std::begin(extraExclude), std::end(extraExclude), opt) != std::end(extraExclude))) { for (const auto &val : opt->GetRawValues()) { if (!val.empty()) { - if (opt->GetName() == "-Wl") { - driverOptions += val + " "; - } else { - driverOptions += opt->GetName() + " " + val + " "; - } + driverOptions += opt->GetName() + " " + val + " "; } else { driverOptions += opt->GetName() + " "; } @@ -573,14 +608,8 @@ ErrorCode MplOptions::CheckInputFiles() { /* Set input files directly: maple file1 file2 */ for (auto &arg : badArgs) { if (FileUtils::IsFileExists(arg.first)) { - int inedx = static_cast(arg.first.find_last_of(".")); - std::string tmp = arg.first.substr(inedx); - if (tmp == ".a" || tmp == ".so") { - linkInputFiles.push_back(arg.first); - } else { - inputFiles.push_back(arg.first); - inputInfos.push_back(std::make_unique(arg.first)); - } + inputFiles.push_back(arg.first); + inputInfos.push_back(std::make_unique(arg.first)); } else { LogInfo::MapleLogger(kLlErr) << "Unknown option or non-existent input file: " << arg.first << "\n"; if (!opts::ignoreUnkOpt) { @@ -589,6 +618,22 @@ ErrorCode MplOptions::CheckInputFiles() { } } + bool isOast = false; + bool notAstOrElf = false; + for (auto &inputInfo : inputInfos) { + if (inputInfo->GetInputFileType() == InputFileType::kFileTypeOast) { + isOast = true; + } else if (inputInfo->GetInputFileType() != InputFileType::kFileTypeObj && + inputInfo->GetInputFileType() != InputFileType::kFileTypeAst) { + notAstOrElf = true; + } + } + if (isOast && notAstOrElf) { + LogInfo::MapleLogger(kLlErr) << "Only .o and obj files in Ir format can be compiled together." << "\n"; + return kErrorInvalidParameter; + } + isAllAst = isOast; + if (inputFiles.empty()) { return kErrorFileNotFound; } diff --git a/src/mapleall/maple_driver/src/mplcg_compiler.cpp b/src/mapleall/maple_driver/src/mplcg_compiler.cpp index 96420878ffcc1fc2cdee11b704c0a2c52d97c42f..148d80022f761210a40bfe3be4b747a97403334b 100644 --- a/src/mapleall/maple_driver/src/mplcg_compiler.cpp +++ b/src/mapleall/maple_driver/src/mplcg_compiler.cpp @@ -129,10 +129,6 @@ ErrorCode MplcgCompiler::MakeCGOptions(const MplOptions &options) const { } CGOptions &cgOption = CGOptions::GetInstance(); cgOption.SetOption(CGOptions::kDefaultOptions); -#if DEBUG - /* for convinence .loc is generated by default for debug maple compiler */ - cgOption.SetOption(CGOptions::kWithLoc); -#endif /* use maple flags to set cg flags */ if (opts::withDwarf) { cgOption.SetOption(CGOptions::kWithDwarf); @@ -204,9 +200,9 @@ ErrorCode MplcgCompiler::Compile(MplOptions &options, const Action &action, theModule->SetWithMe( std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMe) != options.GetRunningExes().end()); - if (action.GetInputFileType() != kFileTypeBpl && - action.GetInputFileType() != kFileTypeMbc && - action.GetInputFileType() != kFileTypeLmbc) { + if (action.GetInputFileType() != InputFileType::kFileTypeBpl && + action.GetInputFileType() != InputFileType::kFileTypeMbc && + action.GetInputFileType() != InputFileType::kFileTypeLmbc) { std::unique_ptr theParser; theParser.reset(new MIRParser(*theModule)); bool parsed = theParser->ParseMIR(0, cgOption.GetParserOption()); diff --git a/src/mapleall/maple_driver/src/triple.cpp b/src/mapleall/maple_driver/src/triple.cpp index 6dd90fc2517c5112130e0c199b70b2e07320a1f4..2c5d5354db435e5fa68eb9891f413182b70290c3 100644 --- a/src/mapleall/maple_driver/src/triple.cpp +++ b/src/mapleall/maple_driver/src/triple.cpp @@ -19,18 +19,5086 @@ namespace opts { maplecl::Option bigendian({"-Be", "--Be", "--BigEndian", "-be", "--be", "-mbig-endian"}, - " --BigEndian/-Be \tUsing BigEndian\n" - " --no-BigEndian \tUsing LittleEndian\n", - {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}, - maplecl::DisableWith("--no-BigEndian")); + " --BigEndian/-Be \tUsing BigEndian\n" + " --no-BigEndian \tUsing LittleEndian\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}, + maplecl::DisableWith("--no-BigEndian")); maplecl::Option ilp32({"--ilp32", "-ilp32", "--arm64-ilp32"}, - " --ilp32 \tarm64 with a 32-bit ABI instead of a 64bit ABI\n", - {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + " --ilp32 \tarm64 with a 32-bit ABI instead of a 64bit ABI\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); maplecl::Option mabi({"-mabi"}, - " -mabi= \tSpecify integer and floating-point calling convention\n", - {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + " -mabi= \tSpecify integer and floating-point calling convention\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +maplecl::Option oM68030({"-m68030"}, + " -m68030 \tGenerate output for a 68030. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68040({"-m68040"}, + " -m68040 \tGenerate output for a 68040.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68060({"-m68060"}, + " -m68060 \tGenerate output for a 68060.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM68881({"-m68881"}, + " -m68881 \tGenerate floating-point instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oM8Bit({"-m8-bit"}, + " -m8-bit \tArrange for stack frame, writable data and constants to " + "all 8-bit aligned\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM8bitIdiv({"-m8bit-idiv"}, + " -m8bit-idiv \tThis option generates a run-time check\n", + {driverCategory, unSupCategory}); + +maplecl::Option oM8byteAlign({"-m8byte-align"}, + " -m8byte-align \tEnables support for double and long long types to be " + "aligned on 8-byte boundaries.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-8byte-align")); + +maplecl::Option oM96bitLongDouble({"-m96bit-long-double"}, + " -m96bit-long-double \tControl the size of long double type\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMA6({"-mA6"}, + " -mA6 \tCompile for ARC600.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMA7({"-mA7"}, + " -mA7 \tCompile for ARC700.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabicalls({"-mabicalls"}, + " -mabicalls \tGenerate (do not generate) code that is suitable for " + "SVR4-style dynamic objects. -mabicalls is the default for SVR4-based systems.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-abicalls")); + +maplecl::Option oMabm({"-mabm"}, + " -mabm \tThese switches enable the use of instructions in the mabm.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabortOnNoreturn({"-mabort-on-noreturn"}, + " -mabort-on-noreturn \tGenerate a call to the function abort at " + "the end of a noreturn function.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabs2008({"-mabs=2008"}, + " -mabs=2008 \tThe option selects the IEEE 754-2008 treatment\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabsLegacy({"-mabs=legacy"}, + " -mabs=legacy \tThe legacy treatment is selected\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabsdata({"-mabsdata"}, + " -mabsdata \tAssume that all data in static storage can be " + "accessed by LDS / STS instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabsdiff({"-mabsdiff"}, + " -mabsdiff \tEnables the abs instruction, which is the absolute " + "difference between two registers.n", + {driverCategory, unSupCategory}); + +maplecl::Option oMabshi({"-mabshi"}, + " -mabshi \tUse abshi2 pattern. This is the default.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-abshi")); + +maplecl::Option oMac0({"-mac0"}, + " -mac0 \tReturn floating-point results in ac0 " + "(fr0 in Unix assembler syntax).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-ac0")); + +maplecl::Option oMacc4({"-macc-4"}, + " -macc-4 \tUse only the first four media accumulator registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMacc8({"-macc-8"}, + " -macc-8 \tUse all eight media accumulator registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaccumulateArgs({"-maccumulate-args"}, + " -maccumulate-args \tAccumulate outgoing function arguments and " + "acquire/release the needed stack space for outgoing function arguments once in" + " function prologue/epilogue.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaccumulateOutgoingArgs({"-maccumulate-outgoing-args"}, + " -maccumulate-outgoing-args \tReserve space once for outgoing arguments " + "in the function prologue rather than around each call. Generally beneficial for " + "performance and size\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaddressModeLong({"-maddress-mode=long"}, + " -maddress-mode=long \tGenerate code for long address mode.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaddressModeShort({"-maddress-mode=short"}, + " -maddress-mode=short \tGenerate code for short address mode.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaddressSpaceConversion({"-maddress-space-conversion"}, + " -maddress-space-conversion \tAllow/disallow treating the __ea address " + "space as superset of the generic address space. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-address-space-conversion")); + +maplecl::Option oMads({"-mads"}, + " -mads \tOn embedded PowerPC systems, assume that the startup module " + "is called crt0.o and the standard C libraries are libads.a and libc.a.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaes({"-maes"}, + " -maes \tThese switches enable the use of instructions in the maes.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaixStructReturn({"-maix-struct-return"}, + " -maix-struct-return \tReturn all structures in memory " + "(as specified by the AIX ABI).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaix32({"-maix32"}, + " -maix32 \tEnable 64-bit AIX ABI and calling convention: 32-bit " + "pointers, 32-bit long type, and the infrastructure needed to support them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaix64({"-maix64"}, + " -maix64 \tEnable 64-bit AIX ABI and calling convention: 64-bit " + "pointers, 64-bit long type, and the infrastructure needed to support them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalign300({"-malign-300"}, + " -malign-300 \tOn the H8/300H and H8S, use the same alignment " + "rules as for the H8/300\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalignCall({"-malign-call"}, + " -malign-call \tDo alignment optimizations for call instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalignData({"-malign-data"}, + " -malign-data \tControl how GCC aligns variables. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalignDouble({"-malign-double"}, + " -malign-double \tControl whether GCC aligns double, long double, and " + "long long variables on a two-word boundary or a one-word boundary.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-align-double")); + +maplecl::Option oMalignInt({"-malign-int"}, + " -malign-int \tAligning variables on 32-bit boundaries produces code that" + " runs somewhat faster on processors with 32-bit busses at the expense of more " + "memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-align-int")); + +maplecl::Option oMalignLabels({"-malign-labels"}, + " -malign-labels \tTry to align labels to an 8-byte boundary by inserting" + " NOPs into the previous packet. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalignLoops({"-malign-loops"}, + " -malign-loops \tAlign all loops to a 32-byte boundary.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-align-loops")); + +maplecl::Option oMalignNatural({"-malign-natural"}, + " -malign-natural \tThe option -malign-natural overrides the ABI-defined " + "alignment of larger types\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMalignPower({"-malign-power"}, + " -malign-power \tThe option -malign-power instructs Maple to follow " + "the ABI-specified alignment rules\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMallOpts({"-mall-opts"}, + " -mall-opts \tEnables all the optional instructions—average, multiply, " + "divide, bit operations, leading zero, absolute difference, min/max, clip, " + "and saturation.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-opts")); + +maplecl::Option oMallocCc({"-malloc-cc"}, + " -malloc-cc \tDynamically allocate condition code registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMallowStringInsns({"-mallow-string-insns"}, + " -mallow-string-insns \tEnables or disables the use of the string " + "manipulation instructions SMOVF, SCMPU, SMOVB, SMOVU, SUNTIL SWHILE and also the " + "RMPA instruction.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-allow-string-insns")); + +maplecl::Option oMallregs({"-mallregs"}, + " -mallregs \tAllow the compiler to use all of the available registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaltivec({"-maltivec"}, + " -maltivec \tGenerate code that uses (does not use) AltiVec instructions, " + "and also enable the use of built-in functions that allow more direct access to the " + "AltiVec instruction set. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-altivec")); + +maplecl::Option oMaltivecBe({"-maltivec=be"}, + " -maltivec=be \tGenerate AltiVec instructions using big-endian element " + "order, regardless of whether the target is big- or little-endian. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaltivecLe({"-maltivec=le"}, + " -maltivec=le \tGenerate AltiVec instructions using little-endian element" + " order, regardless of whether the target is big- or little-endian.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMam33({"-mam33"}, + " -mam33 \tGenerate code using features specific to the AM33 processor.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-am33")); + +maplecl::Option oMam332({"-mam33-2"}, + " -mam33-2 \tGenerate code using features specific to the " + "AM33/2.0 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMam34({"-mam34"}, + " -mam34 \tGenerate code using features specific to the AM34 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMandroid({"-mandroid"}, + " -mandroid \tCompile code compatible with Android platform.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMannotateAlign({"-mannotate-align"}, + " -mannotate-align \tExplain what alignment considerations lead to the " + "decision to make an instruction short or long.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMapcs({"-mapcs"}, + " -mapcs \tThis is a synonym for -mapcs-frame and is deprecated.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMapcsFrame({"-mapcs-frame"}, + " -mapcs-frame \tGenerate a stack frame that is compliant with the ARM " + "Procedure Call Standard for all functions, even if this is not strictly necessary " + "for correct execution of the code. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMappRegs({"-mapp-regs"}, + " -mapp-regs \tSpecify -mapp-regs to generate output using the global " + "registers 2 through 4, which the SPARC SVR4 ABI reserves for applications. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-app-regs")); + +maplecl::Option oMARC600({"-mARC600"}, + " -mARC600 \tCompile for ARC600.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMARC601({"-mARC601"}, + " -mARC601 \tCompile for ARC601.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMARC700({"-mARC700"}, + " -mARC700 \tCompile for ARC700.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMarclinux({"-marclinux"}, + " -marclinux \tPassed through to the linker, to specify use of the " + "arclinux emulation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMarclinux_prof({"-marclinux_prof"}, + " -marclinux_prof \tPassed through to the linker, to specify use of " + "the arclinux_prof emulation. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMargonaut({"-margonaut"}, + " -margonaut \tObsolete FPX.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMarm({"-marm"}, + " -marm \tSelect between generating code that executes in ARM and " + "Thumb states. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMas100Syntax({"-mas100-syntax"}, + " -mas100-syntax \tWhen generating assembler output use a syntax that " + "is compatible with Renesas’s AS100 assembler. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-as100-syntax")); + +maplecl::Option oMasmHex({"-masm-hex"}, + " -masm-hex \tForce assembly output to always use hex constants.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMasmSyntaxUnified({"-masm-syntax-unified"}, + " -masm-syntax-unified \tAssume inline assembler is using unified " + "asm syntax. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMasmDialect({"-masm=dialect"}, + " -masm=dialect \tOutput assembly instructions using selected dialect. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMatomic({"-matomic"}, + " -matomic \tThis enables use of the locked load/store conditional " + "extension to implement atomic memory built-in functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMatomicModel({"-matomic-model"}, + " -matomic-model \tSets the model of atomic operations and additional " + "parameters as a comma separated list. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMatomicUpdates({"-matomic-updates"}, + " -matomic-updates \tThis option controls the version of libgcc that the " + "compiler links to an executable and selects whether atomic updates to the " + "software-managed cache of PPU-side variables are used. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-atomic-updates")); + +maplecl::Option oMautoLitpools({"-mauto-litpools"}, + " -mauto-litpools \tControl the treatment of literal pools.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-auto-litpools")); + +maplecl::Option oMautoModifyReg({"-mauto-modify-reg"}, + " -mauto-modify-reg \tEnable the use of pre/post modify with " + "register displacement.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMautoPic({"-mauto-pic"}, + " -mauto-pic \tGenerate code that is self-relocatable. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaverage({"-maverage"}, + " -maverage \tEnables the ave instruction, which computes the " + "average of two registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavoidIndexedAddresses({"-mavoid-indexed-addresses"}, + " -mavoid-indexed-addresses \tGenerate code that tries to avoid " + "(not avoid) the use of indexed load or store instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-avoid-indexed-addresses")); + +maplecl::Option oMavx({"-mavx"}, + " -mavx \tMaple depresses SSEx instructions when -mavx is used. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx2({"-mavx2"}, + " -mavx2 \tEnable the use of instructions in the mavx2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx256SplitUnalignedLoad({"-mavx256-split-unaligned-load"}, + " -mavx256-split-unaligned-load \tSplit 32-byte AVX unaligned load .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx256SplitUnalignedStore({"-mavx256-split-unaligned-store"}, + " -mavx256-split-unaligned-store \tSplit 32-byte AVX unaligned store.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512bw({"-mavx512bw"}, + " -mavx512bw \tEnable the use of instructions in the mavx512bw.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512cd({"-mavx512cd"}, + " -mavx512cd \tEnable the use of instructions in the mavx512cd.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512dq({"-mavx512dq"}, + " -mavx512dq \tEnable the use of instructions in the mavx512dq.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512er({"-mavx512er"}, + " -mavx512er \tEnable the use of instructions in the mavx512er.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512f({"-mavx512f"}, + " -mavx512f \tEnable the use of instructions in the mavx512f.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512ifma({"-mavx512ifma"}, + " -mavx512ifma \tEnable the use of instructions in the mavx512ifma.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512pf({"-mavx512pf"}, + " -mavx512pf \tEnable the use of instructions in the mavx512pf.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512vbmi({"-mavx512vbmi"}, + " -mavx512vbmi \tEnable the use of instructions in the mavx512vbmi.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMavx512vl({"-mavx512vl"}, + " -mavx512vl \tEnable the use of instructions in the mavx512vl.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMaxVectAlign({"-max-vect-align"}, + " -max-vect-align \tThe maximum alignment for SIMD vector mode types.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMb({"-mb"}, + " -mb \tCompile code for the processor in big-endian mode.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbackchain({"-mbackchain"}, + " -mbackchain \tStore (do not store) the address of the caller's frame as" + " backchain pointer into the callee's stack frame.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-backchain")); + +maplecl::Option oMbarrelShiftEnabled({"-mbarrel-shift-enabled"}, + " -mbarrel-shift-enabled \tEnable barrel-shift instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbarrelShifter({"-mbarrel-shifter"}, + " -mbarrel-shifter \tGenerate instructions supported by barrel shifter. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbarrel_shifter({"-mbarrel_shifter"}, + " -mbarrel_shifter \tReplaced by -mbarrel-shifter.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbaseAddresses({"-mbase-addresses"}, + " -mbase-addresses \tGenerate code that uses base addresses. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-base-addresses")); + +maplecl::Option oMbased({"-mbased"}, + " -mbased \tVariables of size n bytes or smaller are placed in the .based " + "section by default\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbbitPeephole({"-mbbit-peephole"}, + " -mbbit-peephole \tEnable bbit peephole2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbcopy({"-mbcopy"}, + " -mbcopy \tDo not use inline movmemhi patterns for copying memory.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbcopyBuiltin({"-mbcopy-builtin"}, + " -mbcopy-builtin \tUse inline movmemhi patterns for copying memory. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbig({"-mbig"}, + " -mbig \tCompile code for big-endian targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbigEndianData({"-mbig-endian-data"}, + " -mbig-endian-data \tStore data (but not code) in the big-endian " + "format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbigSwitch({"-mbig-switch"}, + " -mbig-switch \tGenerate code suitable for big switch tables.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbigtable({"-mbigtable"}, + " -mbigtable \tUse 32-bit offsets in switch tables. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbionic({"-mbionic"}, + " -mbionic \tUse Bionic C library.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbitAlign({"-mbit-align"}, + " -mbit-align \tOn System V.4 and embedded PowerPC systems do not force " + "structures and unions that contain bit-fields to be aligned to the base type of the " + "bit-field.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-bit-align")); + +maplecl::Option oMbitOps({"-mbit-ops"}, + " -mbit-ops \tGenerates sbit/cbit instructions for bit manipulations.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbitfield({"-mbitfield"}, + " -mbitfield \tDo use the bit-field instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-bitfield")); + +maplecl::Option oMbitops({"-mbitops"}, + " -mbitops \tEnables the bit operation instructions—bit test (btstm), " + "set (bsetm), clear (bclrm), invert (bnotm), and test-and-set (tas).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMblockMoveInlineLimit({"-mblock-move-inline-limit"}, + " -mblock-move-inline-limit \tInline all block moves (such as calls " + "to memcpy or structure copies) less than or equal to num bytes. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbmi({"-mbmi"}, + " -mbmi \tThese switches enable the use of instructions in the mbmi.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbranchCheap({"-mbranch-cheap"}, + " -mbranch-cheap \tDo not pretend that branches are expensive. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbranchCost({"-mbranch-cost"}, + " -mbranch-cost \tSet the cost of branches to roughly num “simple” " + "instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbranchExpensive({"-mbranch-expensive"}, + " -mbranch-expensive \tPretend that branches are expensive.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbranchHints({"-mbranch-hints"}, + " -mbranch-hints \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbranchLikely({"-mbranch-likely"}, + " -mbranch-likely \tEnable or disable use of Branch Likely instructions, " + "regardless of the default for the selected architecture.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-branch-likely")); + +maplecl::Option oMbranchPredict({"-mbranch-predict"}, + " -mbranch-predict \tUse the probable-branch instructions, when static " + "branch prediction indicates a probable branch.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-branch-predict")); + +maplecl::Option oMbssPlt({"-mbss-plt"}, + " -mbss-plt \tGenerate code that uses a BSS .plt section that ld.so " + "fills in, and requires .plt and .got sections that are both writable and executable.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbuildConstants({"-mbuild-constants"}, + " -mbuild-constants \tConstruct all integer constants using code, even " + "if it takes more instructions (the maximum is six).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMbwx({"-mbwx"}, + " -mbwx \tGenerate code to use the optional BWX instruction sets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-bwx")); + +maplecl::Option oMbypassCache({"-mbypass-cache"}, + " -mbypass-cache \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-bypass-cache")); + +maplecl::Option oMc68000({"-mc68000"}, + " -mc68000 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMc68020({"-mc68020"}, + " -mc68020 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMc({"-mc"}, + " -mc \tSelects which section constant data is placed in. name may " + "be 'tiny', 'near', or 'far'.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcacheBlockSize({"-mcache-block-size"}, + " -mcache-block-size \tSpecify the size of each cache block, which must " + "be a power of 2 between 4 and 512.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcacheSize({"-mcache-size"}, + " -mcache-size \tThis option controls the version of libgcc that the " + "compiler links to an executable and selects a software-managed cache for accessing " + "variables in the __ea address space with a particular cache size.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcacheVolatile({"-mcache-volatile"}, + " -mcache-volatile \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cache-volatile")); + +maplecl::Option oMcallEabi({"-mcall-eabi"}, + " -mcall-eabi \tSpecify both -mcall-sysv and -meabi options.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallAixdesc({"-mcall-aixdesc"}, + " -mcall-aixdesc \tOn System V.4 and embedded PowerPC systems " + "compile code for the AIX operating system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallFreebsd({"-mcall-freebsd"}, + " -mcall-freebsd \tOn System V.4 and embedded PowerPC systems compile " + "code for the FreeBSD operating system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallLinux({"-mcall-linux"}, + " -mcall-linux \tOn System V.4 and embedded PowerPC systems compile " + "code for the Linux-based GNU system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallOpenbsd({"-mcall-openbsd"}, + " -mcall-openbsd \tOn System V.4 and embedded PowerPC systems compile " + "code for the OpenBSD operating system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallNetbsd({"-mcall-netbsd"}, + " -mcall-netbsd \tOn System V.4 and embedded PowerPC systems compile" + " code for the NetBSD operating system.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallPrologues({"-mcall-prologues"}, + " -mcall-prologues \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallSysv({"-mcall-sysv"}, + " -mcall-sysv \tOn System V.4 and embedded PowerPC systems compile code " + "using calling conventions that adhere to the March 1995 draft of the System V " + "Application Binary Interface, PowerPC processor supplement. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallSysvEabi({"-mcall-sysv-eabi"}, + " -mcall-sysv-eabi \tSpecify both -mcall-sysv and -meabi options.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallSysvNoeabi({"-mcall-sysv-noeabi"}, + " -mcall-sysv-noeabi \tSpecify both -mcall-sysv and -mno-eabi options.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcalleeSuperInterworking({"-mcallee-super-interworking"}, + " -mcallee-super-interworking \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallerCopies({"-mcaller-copies"}, + " -mcaller-copies \tThe caller copies function arguments " + "passed by hidden reference.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallerSuperInterworking({"-mcaller-super-interworking"}, + " -mcaller-super-interworking \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcallgraphData({"-mcallgraph-data"}, + " -mcallgraph-data \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-callgraph-data")); + +maplecl::Option oMcaseVectorPcrel({"-mcase-vector-pcrel"}, + " -mcase-vector-pcrel \tUse PC-relative switch case tables to enable " + "case table shortening. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcbcond({"-mcbcond"}, + " -mcbcond \tWith -mcbcond, Maple generates code that takes advantage of " + "the UltraSPARC Compare-and-Branch-on-Condition instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cbcond")); + +maplecl::Option oMcbranchForceDelaySlot({"-mcbranch-force-delay-slot"}, + " -mcbranch-force-delay-slot \tForce the usage of delay slots for " + "conditional branches, which stuffs the delay slot with a nop if a suitable " + "instruction cannot be found. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMccInit({"-mcc-init"}, + " -mcc-init \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcfv4e({"-mcfv4e"}, + " -mcfv4e \tGenerate output for a ColdFire V4e family CPU " + "(e.g. 547x/548x).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcheckZeroDivision({"-mcheck-zero-division"}, + " -mcheck-zero-division \tTrap (do not trap) on integer division " + "by zero.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-check-zero-division")); + +maplecl::Option oMcix({"-mcix"}, + " -mcix \tGenerate code to use the optional CIX instruction sets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cix")); + +maplecl::Option oMcld({"-mcld"}, + " -mcld \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMclearHwcap({"-mclear-hwcap"}, + " -mclear-hwcap \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMclflushopt({"-mclflushopt"}, + " -mclflushopt \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMclip({"-mclip"}, + " -mclip \tEnables the clip instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMclzero({"-mclzero"}, + " -mclzero \tThese switches enable the use of instructions in the m" + "clzero.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcmodel({"-mcmodel"}, + " -mcmodel \tSpecify the code model.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcmov({"-mcmov"}, + " -mcmov \tGenerate conditional move instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cmov")); + +maplecl::Option oMcmove({"-mcmove"}, + " -mcmove \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcmpb({"-mcmpb"}, + " -mcmpb \tSpecify which instructions are available on " + "the processor you are using. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cmpb")); + +maplecl::Option oMcmse({"-mcmse"}, + " -mcmse \tGenerate secure code as per the ARMv8-M Security Extensions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcodeDensity({"-mcode-density"}, + " -mcode-density \tEnable code density instructions for ARC EM. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcodeReadable({"-mcode-readable"}, + " -mcode-readable \tSpecify whether Maple may generate code that reads " + "from executable sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcodeRegion({"-mcode-region"}, + " -mcode-region \tthe compiler where to place functions and data that " + "do not have one of the lower, upper, either or section attributes.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcompactBranchesAlways({"-mcompact-branches=always"}, + " -mcompact-branches=always \tThe -mcompact-branches=always option " + "ensures that a compact branch instruction will be generated if available. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcompactBranchesNever({"-mcompact-branches=never"}, + " -mcompact-branches=never \tThe -mcompact-branches=never option ensures " + "that compact branch instructions will never be generated.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcompactBranchesOptimal({"-mcompact-branches=optimal"}, + " -mcompact-branches=optimal \tThe -mcompact-branches=optimal option " + "will cause a delay slot branch to be used if one is available in the current ISA " + "and the delay slot is successfully filled.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcompactCasesi({"-mcompact-casesi"}, + " -mcompact-casesi \tEnable compact casesi pattern.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcompatAlignParm({"-mcompat-align-parm"}, + " -mcompat-align-parm \tGenerate code to pass structure parameters with a" + " maximum alignment of 64 bits, for compatibility with older versions of maple.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcondExec({"-mcond-exec"}, + " -mcond-exec \tEnable the use of conditional execution (default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-cond-exec")); + +maplecl::Option oMcondMove({"-mcond-move"}, + " -mcond-move \tEnable the use of conditional-move " + "instructions (default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-cond-move")); + +maplecl::Option oMconfig({"-mconfig"}, + " -mconfig \tSelects one of the built-in core configurations. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMconsole({"-mconsole"}, + " -mconsole \tThis option specifies that a console application is to " + "be generated, by instructing the linker to set the PE header subsystem type required " + "for console applications.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMconstAlign({"-mconst-align"}, + " -mconst-align \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-const-align")); + +maplecl::Option oMconst16({"-mconst16"}, + " -mconst16 \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-const16")); + +maplecl::Option oMconstantGp({"-mconstant-gp"}, + " -mconstant-gp \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcop({"-mcop"}, + " -mcop \tEnables the coprocessor instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcop32({"-mcop32"}, + " -mcop32 \tEnables the 32-bit coprocessor's instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcop64({"-mcop64"}, + " -mcop64 \tEnables the 64-bit coprocessor's instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcorea({"-mcorea"}, + " -mcorea \tBuild a standalone application for Core A of BF561 when " + "using the one-application-per-core programming model.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcoreb({"-mcoreb"}, + " -mcoreb \tBuild a standalone application for Core B of BF561 when " + "using the one-application-per-core programming model.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcpu({"-mcpu"}, + " -mcpu \tSpecify the name of the target processor, optionally suffixed " + "by one or more feature modifiers. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcpu32({"-mcpu32"}, + " -mcpu32 \tGenerate output for a CPU32. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcr16c({"-mcr16c"}, + " -mcr16c \tGenerate code for CR16C architecture. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcr16cplus({"-mcr16cplus"}, + " -mcr16cplus \tGenerate code for CR16C+ architecture. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcrc32({"-mcrc32"}, + " -mcrc32 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcrypto({"-mcrypto"}, + " -mcrypto \tEnable the use of the built-in functions that allow direct " + "access to the cryptographic instructions that were added in version 2.07 of " + "the PowerPC ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-crypto")); + +maplecl::Option oMcsyncAnomaly({"-mcsync-anomaly"}, + " -mcsync-anomaly \tWhen enabled, the compiler ensures that the generated " + "code does not contain CSYNC or SSYNC instructions too soon after conditional" + " branches.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-csync-anomaly")); + +maplecl::Option oMctorDtor({"-mctor-dtor"}, + " -mctor-dtor \tEnable constructor/destructor feature.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcustomFpuCfg({"-mcustom-fpu-cfg"}, + " -mcustom-fpu-cfg \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMcustomInsn({"-mcustom-insn"}, + " -mcustom-insn \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-custom-insn")); + +maplecl::Option oMcx16({"-mcx16"}, + " -mcx16 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdalign({"-mdalign"}, + " -mdalign \tAlign doubles at 64-bit boundaries.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdataAlign({"-mdata-align"}, + " -mdata-align \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-data-align")); + +maplecl::Option oMdataModel({"-mdata-model"}, + " -mdata-model \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdataRegion({"-mdata-region"}, + " -mdata-region \ttell the compiler where to place functions and data " + "that do not have one of the lower, upper, either or section attributes.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdc({"-mdc"}, + " -mdc \tCauses constant variables to be placed in the .near section.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdebug({"-mdebug"}, + " -mdebug \tPrint additional debug information when compiling. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-debug")); + +maplecl::Option oMdebugMainPrefix({"-mdebug-main=prefix"}, + " -mdebug-main=prefix \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdecAsm({"-mdec-asm"}, + " -mdec-asm \tUse DEC assembler syntax. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdirectMove({"-mdirect-move"}, + " -mdirect-move \tGenerate code that uses the instructions to move " + "data between the general purpose registers and the vector/scalar (VSX) registers " + "that were added in version 2.07 of the PowerPC ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-direct-move")); + +maplecl::Option oMdisableCallt({"-mdisable-callt"}, + " -mdisable-callt \tThis option suppresses generation of the " + "CALLT instruction for the v850e, v850e1, v850e2, v850e2v3 and v850e3v5 flavors " + "of the v850 architecture.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-disable-callt")); + +maplecl::Option oMdisableFpregs({"-mdisable-fpregs"}, + " -mdisable-fpregs \tPrevent floating-point registers from being " + "used in any manner.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdisableIndexing({"-mdisable-indexing"}, + " -mdisable-indexing \tPrevent the compiler from using indexing address " + "modes.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdiv({"-mdiv"}, + " -mdiv \tEnables the div and divu instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-div")); + +maplecl::Option oMdivRem({"-mdiv-rem"}, + " -mdiv-rem \tEnable div and rem instructions for ARCv2 cores.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdivStrategy({"-mdiv=strategy"}, + " -mdiv=strategy \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdivideBreaks({"-mdivide-breaks"}, + " -mdivide-breaks \tMIPS systems check for division by zero by generating " + "either a conditional trap or a break instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdivideEnabled({"-mdivide-enabled"}, + " -mdivide-enabled \tEnable divide and modulus instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdivideTraps({"-mdivide-traps"}, + " -mdivide-traps \tMIPS systems check for division by zero by generating " + "either a conditional trap or a break instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdivsi3_libfuncName({"-mdivsi3_libfunc"}, + " -mdivsi3_libfunc \tSet the name of the library function " + "used for 32-bit signed division to name.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdll({"-mdll"}, + " -mdll \tThis option is available for Cygwin and MinGW targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdlmzb({"-mdlmzb"}, + " -mdlmzb \tGenerate code that uses the string-search 'dlmzb' " + "instruction on the IBM 405, 440, 464 and 476 processors. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-dlmzb")); + +maplecl::Option oMdmx({"-mdmx"}, + " -mdmx \tUse MIPS Digital Media Extension instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mdmx")); + +maplecl::Option oMdouble({"-mdouble"}, + " -mdouble \tUse floating-point double instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-double")); + +maplecl::Option oMdoubleFloat({"-mdouble-float"}, + " -mdouble-float \tGenerate code for double-precision floating-point " + "operations.", + {driverCategory, unSupCategory}); + +maplecl::Option oMdpfp({"-mdpfp"}, + " -mdpfp \tGenerate double-precision FPX instructions, tuned for the " + "compact implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdpfpCompact({"-mdpfp-compact"}, + " -mdpfp-compact \tGenerate double-precision FPX instructions, tuned " + "for the compact implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdpfpFast({"-mdpfp-fast"}, + " -mdpfp-fast \tGenerate double-precision FPX instructions, tuned " + "for the fast implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdpfp_compact({"-mdpfp_compact"}, + " -mdpfp_compact \tReplaced by -mdpfp-compact.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdpfp_fast({"-mdpfp_fast"}, + " -mdpfp_fast \tReplaced by -mdpfp-fast.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdsp({"-mdsp"}, + " -mdsp \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-dsp")); + +maplecl::Option oMdspPacka({"-mdsp-packa"}, + " -mdsp-packa \tPassed down to the assembler to enable the DSP Pack A " + "extensions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdspr2({"-mdspr2"}, + " -mdspr2 \tUse revision 2 of the MIPS DSP ASE.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-dspr2")); + +maplecl::Option oMdsp_packa({"-mdsp_packa"}, + " -mdsp_packa \tReplaced by -mdsp-packa.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdualNops({"-mdual-nops"}, + " -mdual-nops \tBy default, GCC inserts NOPs to increase dual issue when " + "it expects it to increase performance.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdualNopsE({"-mdual-nops="}, + " -mdual-nops= \tBy default, GCC inserts NOPs to increase dual issue when " + "it expects it to increase performance.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdumpTuneFeatures({"-mdump-tune-features"}, + " -mdump-tune-features \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdvbf({"-mdvbf"}, + " -mdvbf \tPassed down to the assembler to enable the dual Viterbi " + "butterfly extension.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMdwarf2Asm({"-mdwarf2-asm"}, + " -mdwarf2-asm \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-dwarf2-asm")); + +maplecl::Option oMdword({"-mdword"}, + " -mdword \tChange ABI to use double word insns.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-dword")); + +maplecl::Option oMdynamicNoPic({"-mdynamic-no-pic"}, + " -mdynamic-no-pic \tOn Darwin and Mac OS X systems, compile code so " + "that it is not relocatable, but that its external references are relocatable. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMea({"-mea"}, + " -mea \tGenerate extended arithmetic instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMEa({"-mEa"}, + " -mEa \tReplaced by -mea.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMea32({"-mea32"}, + " -mea32 \tCompile code assuming that pointers to the PPU address space " + "accessed via the __ea named address space qualifier are either 32 bits wide. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMea64({"-mea64"}, + " -mea64 \tCompile code assuming that pointers to the PPU address space " + "accessed via the __ea named address space qualifier are either 64 bits wide. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMeabi({"-meabi"}, + " -meabi \tOn System V.4 and embedded PowerPC systems adhere to the " + "Embedded Applications Binary Interface (EABI), which is a set of modifications to " + "the System V.4 specifications. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-eabi")); + +maplecl::Option oMearlyCbranchsi({"-mearly-cbranchsi"}, + " -mearly-cbranchsi \tEnable pre-reload use of the cbranchsi pattern.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMearlyStopBits({"-mearly-stop-bits"}, + " -mearly-stop-bits \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-early-stop-bits")); + +maplecl::Option oMeb({"-meb"}, + " -meb \tGenerate big-endian code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMel({"-mel"}, + " -mel \tGenerate little-endian code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMelf({"-melf"}, + " -melf \tGenerate an executable in the ELF format, rather than the " + "default 'mmo' format used by the mmix simulator.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMemb({"-memb"}, + " -memb \tOn embedded PowerPC systems, set the PPC_EMB bit in the ELF " + "flags header to indicate that 'eabi' extended relocations are used.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMembeddedData({"-membedded-data"}, + " -membedded-data \tAllocate variables to the read-only data section " + "first if possible, then next in the small data section if possible, otherwise " + "in data.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-embedded-data")); + +maplecl::Option oMemregsE({"-memregs="}, + " -memregs= \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMep({"-mep"}, + " -mep \tDo not optimize basic blocks that use the same index pointer 4 " + "or more times to copy pointer into the ep register, and use the shorter sld and sst " + "instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-ep")); + +maplecl::Option oMepsilon({"-mepsilon"}, + " -mepsilon \tGenerate floating-point comparison instructions that " + "compare with respect to the rE epsilon register.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-epsilon")); + +maplecl::Option oMesa({"-mesa"}, + " -mesa \tWhen -mesa is specified, generate code using the " + "instructions available on ESA/390.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMetrax100({"-metrax100"}, + " -metrax100 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMetrax4({"-metrax4"}, + " -metrax4 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMeva({"-meva"}, + " -meva \tUse the MIPS Enhanced Virtual Addressing instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-eva")); + +maplecl::Option oMexpandAdddi({"-mexpand-adddi"}, + " -mexpand-adddi \tExpand adddi3 and subdi3 at RTL generation time " + "into add.f, adc etc.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMexplicitRelocs({"-mexplicit-relocs"}, + " -mexplicit-relocs \tUse assembler relocation operators when dealing " + "with symbolic addresses.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-explicit-relocs")); + +maplecl::Option oMexr({"-mexr"}, + " -mexr \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-exr")); + +maplecl::Option oMexternSdata({"-mextern-sdata"}, + " -mextern-sdata \tAssume (do not assume) that externally-defined data " + "is in a small data section if the size of that data is within the -G limit. " + "-mextern-sdata is the default for all configurations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-extern-sdata")); + +maplecl::Option oMf16c({"-mf16c"}, + " -mf16c \tThese switches enable the use of instructions in the mf16c.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfastFp({"-mfast-fp"}, + " -mfast-fp \tLink with the fast floating-point library. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfastIndirectCalls({"-mfast-indirect-calls"}, + " -mfast-indirect-calls \tGenerate code that assumes calls never " + "cross space boundaries.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfastSwDiv({"-mfast-sw-div"}, + " -mfast-sw-div \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fast-sw-div")); + +maplecl::Option oMfasterStructs({"-mfaster-structs"}, + " -mfaster-structs \tWith -mfaster-structs, the compiler assumes that " + "structures should have 8-byte alignment.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-faster-structs")); + +maplecl::Option oMfdiv({"-mfdiv"}, + " -mfdiv \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfdpic({"-mfdpic"}, + " -mfdpic \tSelect the FDPIC ABI, which uses function descriptors to " + "represent pointers to functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfentry({"-mfentry"}, + " -mfentry \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfix({"-mfix"}, + " -mfix \tGenerate code to use the optional FIX instruction sets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix")); + +maplecl::Option oMfix24k({"-mfix-24k"}, + " -mfix-24k \tWork around the 24K E48 (lost data on stores during refill) " + "errata. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-24k")); + +maplecl::Option oMfixAndContinue({"-mfix-and-continue"}, + " -mfix-and-continue \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixAt697f({"-mfix-at697f"}, + " -mfix-at697f \tEnable the documented workaround for the single erratum " + "of the Atmel AT697F processor (which corresponds to erratum #13 of the " + "AT697E processor).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixCortexA53835769({"-mfix-cortex-a53-835769"}, + " -mfix-cortex-a53-835769 \tWorkaround for ARM Cortex-A53 Erratum " + "number 835769.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-cortex-a53-835769")); + +maplecl::Option oMfixCortexA53843419({"-mfix-cortex-a53-843419"}, + " -mfix-cortex-a53-843419 \tWorkaround for ARM Cortex-A53 Erratum " + "number 843419.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-cortex-a53-843419")); + +maplecl::Option oMfixCortexM3Ldrd({"-mfix-cortex-m3-ldrd"}, + " -mfix-cortex-m3-ldrd \tSome Cortex-M3 cores can cause data corruption " + "when ldrd instructions with overlapping destination and base registers are used. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixGr712rc({"-mfix-gr712rc"}, + " -mfix-gr712rc \tEnable the documented workaround for the back-to-back " + "store errata of the GR712RC processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixR10000({"-mfix-r10000"}, + " -mfix-r10000 \tWork around certain R10000 errata\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-r10000")); + +maplecl::Option oMfixR4000({"-mfix-r4000"}, + " -mfix-r4000 \tWork around certain R4000 CPU errata\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-r4000")); + +maplecl::Option oMfixR4400({"-mfix-r4400"}, + " -mfix-r4400 \tWork around certain R4400 CPU errata\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fix-r4400")); + +maplecl::Option oMfixRm7000({"-mfix-rm7000"}, + " -mfix-rm7000 \tWork around the RM7000 dmult/dmultu errata.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-fix-rm7000")); + +maplecl::Option oMfixSb1({"-mfix-sb1"}, + " -mfix-sb1 \tWork around certain SB-1 CPU core errata.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-fix-sb1")); + +maplecl::Option oMfixUt699({"-mfix-ut699"}, + " -mfix-ut699 \tEnable the documented workarounds for the floating-point " + "errata and the data cache nullify errata of the UT699 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixUt700({"-mfix-ut700"}, + " -mfix-ut700 \tEnable the documented workaround for the back-to-back " + "store errata of the UT699E/UT700 processor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixVr4120({"-mfix-vr4120"}, + " -mfix-vr4120 \tWork around certain VR4120 errata\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-fix-vr4120")); + +maplecl::Option oMfixVr4130({"-mfix-vr4130"}, + " -mfix-vr4130 \tWork around the VR4130 mflo/mfhi errata.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixedCc({"-mfixed-cc"}, + " -mfixed-cc \tDo not try to dynamically allocate condition code " + "registers, only use icc0 and fcc0.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfixedRange({"-mfixed-range"}, + " -mfixed-range \tGenerate code treating the given register range " + "as fixed registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMflat({"-mflat"}, + " -mflat \tWith -mflat, the compiler does not generate save/restore " + "instructions and uses a “flat” or single register window model.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-flat")); + +maplecl::Option oMflipMips16({"-mflip-mips16"}, + " -mflip-mips16 \tGenerate MIPS16 code on alternating functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfloatAbi({"-mfloat-abi"}, + " -mfloat-abi \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfloatGprs({"-mfloat-gprs"}, + " -mfloat-gprs \tThis switch enables the generation of floating-point " + "operations on the general-purpose registers for architectures that support it.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfloatIeee({"-mfloat-ieee"}, + " -mfloat-ieee \tGenerate code that does not use VAX F and G " + "floating-point arithmetic instead of IEEE single and double precision.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfloatVax({"-mfloat-vax"}, + " -mfloat-vax \tGenerate code that uses VAX F and G " + "floating-point arithmetic instead of IEEE single and double precision.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfloat128({"-mfloat128"}, + " -mfloat128 \tEisable the __float128 keyword for IEEE 128-bit floating " + "point and use either software emulation for IEEE 128-bit floating point or hardware " + "instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-float128")); + +maplecl::Option oMfloat128Hardware({"-mfloat128-hardware"}, + " -mfloat128-hardware \tEisable using ISA 3.0 hardware instructions to " + "support the __float128 data type.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-float128-hardware")); + +maplecl::Option oMfloat32({"-mfloat32"}, + " -mfloat32 \tUse 32-bit float.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-float32")); + +maplecl::Option oMfloat64({"-mfloat64"}, + " -mfloat64 \tUse 64-bit float.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-float64")); + +maplecl::Option oMflushFunc({"-mflush-func"}, + " -mflush-func \tSpecifies the function to call to flush the I and D " + "caches, or to not call any such function. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-flush-func")); + +maplecl::Option oMflushTrap({"-mflush-trap"}, + " -mflush-trap \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-flush-trap")); + +maplecl::Option oMfma({"-mfma"}, + " -mfma \tThese switches enable the use of instructions in the mfma.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fmaf")); + +maplecl::Option oMfma4({"-mfma4"}, + " -mfma4 \tThese switches enable the use of instructions in the mfma4.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfmaf({"-mfmaf"}, + " -mfmaf \tWith -mfmaf, Maple generates code that takes advantage of the " + "UltraSPARC Fused Multiply-Add Floating-point instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfmovd({"-mfmovd"}, + " -mfmovd \tEnable the use of the instruction fmovd. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMforceNoPic({"-mforce-no-pic"}, + " -mforce-no-pic \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpExceptions({"-mfp-exceptions"}, + " -mfp-exceptions \tSpecifies whether FP exceptions are enabled. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-fp-exceptions")); + +maplecl::Option oMfpMode({"-mfp-mode"}, + " -mfp-mode \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpRoundingMode({"-mfp-rounding-mode"}, + " -mfp-rounding-mode \tSelects the IEEE rounding mode.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpTrapMode({"-mfp-trap-mode"}, + " -mfp-trap-mode \tThis option controls what floating-point " + "related traps are enabled.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfp16Format({"-mfp16-format"}, + " -mfp16-format \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfp32({"-mfp32"}, + " -mfp32 \tAssume that floating-point registers are 32 bits wide.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfp64({"-mfp64"}, + " -mfp64 \tAssume that floating-point registers are 64 bits wide.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpmath({"-mfpmath"}, + " -mfpmath \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpr32({"-mfpr-32"}, + " -mfpr-32 \tUse only the first 32 floating-point registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfpr64({"-mfpr-64"}, + " -mfpr-64 \tUse all 64 floating-point registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfprnd({"-mfprnd"}, + " -mfprnd \tSpecify which instructions are available on the " + "processor you are using. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fprnd")); + +maplecl::Option oMfpu({"-mfpu"}, + " -mfpu \tEnables support for specific floating-point hardware " + "extensions for ARCv2 cores.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fpu")); + +maplecl::Option oMfpxx({"-mfpxx"}, + " -mfpxx \tDo not assume the width of floating-point registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfractConvertTruncate({"-mfract-convert-truncate"}, + " -mfract-convert-truncate \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMframeHeaderOpt({"-mframe-header-opt"}, + " -mframe-header-opt \tEnable (disable) frame header optimization " + "in the o32 ABI. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-frame-header-opt")); + +maplecl::Option oMfriz({"-mfriz"}, + " -mfriz \tGenerate the friz instruction when the " + "-funsafe-math-optimizations option is used to optimize rounding of floating-point " + "values to 64-bit integer and back to floating point. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfsca({"-mfsca"}, + " -mfsca \tAllow or disallow the compiler to emit the fsca instruction " + "for sine and cosine approximations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fsca")); + +maplecl::Option oMfsgsbase({"-mfsgsbase"}, + " -mfsgsbase \tThese switches enable the use of instructions in the " + "mfsgsbase.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfsmuld({"-mfsmuld"}, + " -mfsmuld \tWith -mfsmuld, Maple generates code that takes advantage of " + "the Floating-point Multiply Single to Double (FsMULd) instruction. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fsmuld")); + +maplecl::Option oMfsrra({"-mfsrra"}, + " -mfsrra \tAllow or disallow the compiler to emit the fsrra instruction" + " for reciprocal square root approximations.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fsrra")); + +maplecl::Option oMfullRegs({"-mfull-regs"}, + " -mfull-regs \tUse full-set registers for register allocation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfullToc({"-mfull-toc"}, + " -mfull-toc \tModify generation of the TOC (Table Of Contents), " + "which is created for every executable file. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMfusedMadd({"-mfused-madd"}, + " -mfused-madd \tGenerate code that uses the floating-point multiply " + "and accumulate instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fused-madd")); + +maplecl::Option oMfxsr({"-mfxsr"}, + " -mfxsr \tThese switches enable the use of instructions in the mfxsr.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMG({"-MG"}, + " -MG \tTreat missing header files as generated files.\n", + {driverCategory, clangCategory}); + +maplecl::Option oMg10({"-mg10"}, + " -mg10 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMg13({"-mg13"}, + " -mg13 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMg14({"-mg14"}, + " -mg14 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgas({"-mgas"}, + " -mgas \tEnable the use of assembler directives only GAS understands.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgccAbi({"-mgcc-abi"}, + " -mgcc-abi \tEnables support for the old GCC version of the V850 ABI.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgenCellMicrocode({"-mgen-cell-microcode"}, + " -mgen-cell-microcode \tGenerate Cell microcode instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgeneralRegsOnly({"-mgeneral-regs-only"}, + " -mgeneral-regs-only \tGenerate code which uses only the general " + "registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMghs({"-mghs"}, + " -mghs \tEnables support for the RH850 version of the V850 ABI.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMglibc({"-mglibc"}, + " -mglibc \tUse GNU C library.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgnu({"-mgnu"}, + " -mgnu \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgnuAs({"-mgnu-as"}, + " -mgnu-as \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-gnu-as")); + +maplecl::Option oMgnuAttribute({"-mgnu-attribute"}, + " -mgnu-attribute \tEmit .gnu_attribute assembly directives to set " + "tag/value pairs in a .gnu.attributes section that specify ABI variations in " + "function parameters or return values.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-gnu-attribute")); + +maplecl::Option oMgnuLd({"-mgnu-ld"}, + " -mgnu-ld \tUse options specific to GNU ld.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-gnu-ld")); + +maplecl::Option oMgomp({"-mgomp"}, + " -mgomp \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgotplt({"-mgotplt"}, + " -mgotplt \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-gotplt")); + +maplecl::Option oMgp32({"-mgp32"}, + " -mgp32 \tAssume that general-purpose registers are 32 bits wide.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgp64({"-mgp64"}, + " -mgp64 \tAssume that general-purpose registers are 64 bits wide.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgpopt({"-mgpopt"}, + " -mgpopt \tUse GP-relative accesses for symbols that are known " + "to be in a small data section\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-gpopt")); + +maplecl::Option oMgpr32({"-mgpr-32"}, + " -mgpr-32 \tOnly use the first 32 general-purpose registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgpr64({"-mgpr-64"}, + " -mgpr-64 \tUse all 64 general-purpose registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMgprelRo({"-mgprel-ro"}, + " -mgprel-ro \tEnable the use of GPREL relocations in the FDPIC ABI " + "for data that is known to be in read-only sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMh({"-mh"}, + " -mh \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhal({"-mhal"}, + " -mhal \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhalfRegFile({"-mhalf-reg-file"}, + " -mhalf-reg-file \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhardDfp({"-mhard-dfp"}, + " -mhard-dfp \tUse the hardware decimal-floating-point instructions " + "for decimal-floating-point operations. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-hard-dfp")); + +maplecl::Option oMhardFloat({"-mhard-float"}, + " -mhard-float \tUse the hardware floating-point instructions and " + "registers for floating-point operations.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhardQuadFloat({"-mhard-quad-float"}, + " -mhard-quad-float \tGenerate output containing quad-word (long double) " + "floating-point instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhardlit({"-mhardlit"}, + " -mhardlit \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-hardlit")); + +maplecl::Option oMhintMaxDistance({"-mhint-max-distance"}, + " -mhint-max-distance \tThe encoding of the branch hint instruction " + "limits the hint to be within 256 instructions of the branch it is affecting.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhintMaxNops({"-mhint-max-nops"}, + " -mhint-max-nops \tMaximum number of NOPs to insert for a branch hint.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhotpatch({"-mhotpatch"}, + " -mhotpatch \tIf the hotpatch option is enabled, a “hot-patching” " + "function prologue is generated for all functions in the compilation unit. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhpLd({"-mhp-ld"}, + " -mhp-ld \tUse options specific to HP ld.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMhtm({"-mhtm"}, + " -mhtm \tThe -mhtm option enables a set of builtins making use of " + "instructions available with the transactional execution facility introduced with " + "the IBM zEnterprise EC12 machine generation S/390 System z Built-in Functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-htm")); + +maplecl::Option oMhwDiv({"-mhw-div"}, + " -mhw-div \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-hw-div")); + +maplecl::Option oMhwMul({"-mhw-mul"}, + " -mhw-mul \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-hw-mul")); + +maplecl::Option oMhwMulx({"-mhw-mulx"}, + " -mhw-mulx \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-hw-mulx")); + +maplecl::Option oMhwmultE({"-mhwmult="}, + " -mhwmult= \tDescribes the type of hardware multiply " + "supported by the target.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMiamcu({"-miamcu"}, + " -miamcu \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMicplb({"-micplb"}, + " -micplb \tAssume that ICPLBs are enabled at run time.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMidSharedLibrary({"-mid-shared-library"}, + " -mid-shared-library \tGenerate code that supports shared libraries " + "via the library ID method.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-id-shared-library")); + +maplecl::Option oMieee({"-mieee"}, + " -mieee \tThis option generates code fully IEEE-compliant code except" + " that the inexact-flag is not maintained (see below).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-ieee")); + +maplecl::Option oMieeeConformant({"-mieee-conformant"}, + " -mieee-conformant \tThis option marks the generated code as IEEE " + "conformant. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMieeeFp({"-mieee-fp"}, + " -mieee-fp \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-ieee-fp")); + +maplecl::Option oMieeeWithInexact({"-mieee-with-inexact"}, + " -mieee-with-inexact \tTurning on this option causes the generated " + "code to implement fully-compliant IEEE math. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMilp32({"-milp32"}, + " -milp32 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMimadd({"-mimadd"}, + " -mimadd \tEnable (disable) use of the madd and msub integer " + "instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-imadd")); + +maplecl::Option oMimpureText({"-mimpure-text"}, + " -mimpure-text \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMincomingStackBoundary({"-mincoming-stack-boundary"}, + " -mincoming-stack-boundary \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMindexedLoads({"-mindexed-loads"}, + " -mindexed-loads \tEnable the use of indexed loads. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineAllStringops({"-minline-all-stringops"}, + " -minline-all-stringops \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineFloatDivideMaxThroughput({"-minline-float-divide-max-throughput"}, + " -minline-float-divide-max-throughput \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineFloatDivideMinLatency({"-minline-float-divide-min-latency"}, + " -minline-float-divide-min-latency \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineIc_invalidate({"-minline-ic_invalidate"}, + " -minline-ic_invalidate \tInline code to invalidate instruction cache " + "entries after setting up nested function trampolines.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineIntDivideMaxThroughput({"-minline-int-divide-max-throughput"}, + " -minline-int-divide-max-throughput \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineIntDivideMinLatency({"-minline-int-divide-min-latency"}, + " -minline-int-divide-min-latency \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlinePlt({"-minline-plt"}, + " -minline-plt \tEnable inlining of PLT entries in function calls to " + "functions that are not known to bind locally.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineSqrtMaxThroughput({"-minline-sqrt-max-throughput"}, + " -minline-sqrt-max-throughput \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineSqrtMinLatency({"-minline-sqrt-min-latency"}, + " -minline-sqrt-min-latency \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinlineStringopsDynamically({"-minline-stringops-dynamically"}, + " -minline-stringops-dynamically \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinrt({"-minrt"}, + " -minrt \tEnable the use of a minimum runtime environment - no " + "static initializers or constructors.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinsertSchedNops({"-minsert-sched-nops"}, + " -minsert-sched-nops \tThis option controls which NOP insertion " + "scheme is used during the second scheduling pass.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMintRegister({"-mint-register"}, + " -mint-register \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMint16({"-mint16"}, + " -mint16 \tUse 16-bit int.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-int16")); + +maplecl::Option oMint32({"-mint32"}, + " -mint32 \tUse 32-bit int.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-int32")); + +maplecl::Option oMint8({"-mint8"}, + " -mint8 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMinterlinkCompressed({"-minterlink-compressed"}, + " -minterlink-compressed \tRequire that code using the standard " + "(uncompressed) MIPS ISA be link-compatible with MIPS16 and microMIPS code, " + "and vice versa.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-interlink-compressed")); + +maplecl::Option oMinterlinkMips16({"-minterlink-mips16"}, + " -minterlink-mips16 \tPredate the microMIPS ASE and are retained for " + "backwards compatibility.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-interlink-mips16")); + +maplecl::Option oMioVolatile({"-mio-volatile"}, + " -mio-volatile \tTells the compiler that any variable marked with the " + "io attribute is to be considered volatile.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips1({"-mips1"}, + " -mips1 \tEquivalent to -march=mips1.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips16({"-mips16"}, + " -mips16 \tGenerate MIPS16 code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mips16")); + +maplecl::Option oMips2({"-mips2"}, + " -mips2 \tEquivalent to -march=mips2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips3({"-mips3"}, + " -mips3 \tEquivalent to -march=mips3.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips32({"-mips32"}, + " -mips32 \tEquivalent to -march=mips32.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips32r3({"-mips32r3"}, + " -mips32r3 \tEquivalent to -march=mips32r3.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips32r5({"-mips32r5"}, + " -mips32r5 \tEquivalent to -march=mips32r5.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips32r6({"-mips32r6"}, + " -mips32r6 \tEquivalent to -march=mips32r6.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips3d({"-mips3d"}, + " -mips3d \tUse (do not use) the MIPS-3D ASE.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mips3d")); + +maplecl::Option oMips4({"-mips4"}, + " -mips4 \tEquivalent to -march=mips4.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips64({"-mips64"}, + " -mips64 \tEquivalent to -march=mips64.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips64r2({"-mips64r2"}, + " -mips64r2 \tEquivalent to -march=mips64r2.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips64r3({"-mips64r3"}, + " -mips64r3 \tEquivalent to -march=mips64r3.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips64r5({"-mips64r5"}, + " -mips64r5 \tEquivalent to -march=mips64r5.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMips64r6({"-mips64r6"}, + " -mips64r6 \tEquivalent to -march=mips64r6.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMiselE({"-misel="}, + " -misel= \tThis switch has been deprecated. " + "Use -misel and -mno-isel instead.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMisel({"-misel"}, + " -misel \tThis switch enables or disables the generation of " + "ISEL instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-isel")); + +maplecl::Option oMisize({"-misize"}, + " -misize \tAnnotate assembler instructions with estimated addresses.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMisrVectorSize({"-misr-vector-size"}, + " -misr-vector-size \tSpecify the size of each interrupt vector, which " + "must be 4 or 16.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMissueRateNumber({"-missue-rate=number"}, + " -missue-rate=number \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMivc2({"-mivc2"}, + " -mivc2 \tEnables IVC2 scheduling. IVC2 is a 64-bit VLIW coprocessor.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMjsr({"-mjsr"}, + " -mjsr \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-jsr")); + +maplecl::Option oMjumpInDelay({"-mjump-in-delay"}, + " -mjump-in-delay \tThis option is ignored and provided for compatibility " + "purposes only.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMkernel({"-mkernel"}, + " -mkernel \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMknuthdiv({"-mknuthdiv"}, + " -mknuthdiv \tMake the result of a division yielding a remainder " + "have the same sign as the divisor. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-knuthdiv")); + +maplecl::Option oMl({"-ml"}, + " -ml \tCauses variables to be assigned to the .far section by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlarge({"-mlarge"}, + " -mlarge \tUse large-model addressing (20-bit pointers, 32-bit size_t).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlargeData({"-mlarge-data"}, + " -mlarge-data \tWith this option the data area is limited to just " + "below 2GB.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlargeDataThreshold({"-mlarge-data-threshold"}, + " -mlarge-data-threshold \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlargeMem({"-mlarge-mem"}, + " -mlarge-mem \tWith -mlarge-mem code is generated that assumes a " + "full 32-bit address.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlargeText({"-mlarge-text"}, + " -mlarge-text \tWhen -msmall-data is used, the compiler can assume that " + "all local symbols share the same $gp value, and thus reduce the number of instructions" + " required for a function call from 4 to 1.\n", + {driverCategory, unSupCategory}); + +maplecl::Option wUnusedMacro({"-Wunused-macros"}, + " -Wunused-macros \twarning: macro is not used\n", + {driverCategory, clangCategory}); + +maplecl::Option wBadFunctionCast({"-Wbad-function-cast"}, + " -Wbad-function-cast \twarning: " + "cast from function call of type A to non-matching type B\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-bad-function-cast")); + +maplecl::Option wStrictPrototypes({"-Wstrict-prototypes"}, + " -Wstrict-prototypes \twarning: " + "Warn if a function is declared or defined without specifying the argument types\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-strict-prototypes")); + +maplecl::Option wUndef({"-Wundef"}, + " -Wundef \twarning: " + "Warn if an undefined identifier is evaluated in an #if directive. " + "Such identifiers are replaced with zero\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-undef")); + +maplecl::Option wCastQual({"-Wcast-qual"}, + " -Wcast-qual \twarning: " + "Warn whenever a pointer is cast so as to remove a type qualifier from the target type. " + "For example, warn if a const char * is cast to an ordinary char *\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-cast-qual")); + +maplecl::Option wMissingFieldInitializers({"-Wmissing-field-initializers"}, + " -Wmissing-field-initializers\twarning: " + "Warn if a structure's initializer has some fields missing\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-field-initializers")); + +maplecl::Option wUnusedParameter({"-Wunused-parameter"}, + " -Wunused-parameter \twarning: " + "Warn whenever a function parameter is unused aside from its declaration\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-parameter")); + +maplecl::Option wAll({"-Wall"}, + " -Wall \tThis enables all the warnings about constructions " + "that some users consider questionable\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-all")); + +maplecl::Option wExtra({"-Wextra"}, + " -Wextra \tEnable some extra warning flags that are not enabled by -Wall\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-extra")); + +maplecl::Option wWriteStrings({"-Wwrite-strings"}, + " -Wwrite-strings \tWhen compiling C, give string constants the type " + "const char[length] so that copying the address of one into " + "a non-const char * pointer produces a warning\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-write-strings")); + +maplecl::Option wVla({"-Wvla"}, + " -Wvla \tWarn if a variable-length array is used in the code\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-vla")); + +maplecl::Option wFormatSecurity({"-Wformat-security"}, + " -Wformat-security \tWwarn about uses of format " + "functions that represent possible security problems\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-security")); + +maplecl::Option wShadow({"-Wshadow"}, + " -Wshadow \tWarn whenever a local variable " + "or type declaration shadows another variable\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shadow")); + +maplecl::Option wTypeLimits({"-Wtype-limits"}, + " -Wtype-limits \tWarn if a comparison is always true or always " + "false due to the limited range of the data type\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-type-limits")); + +maplecl::Option wSignCompare({"-Wsign-compare"}, + " -Wsign-compare \tWarn when a comparison between signed and " + " unsigned values could produce an incorrect result when the signed value is converted " + "to unsigned\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sign-compare")); + +maplecl::Option wShiftNegativeValue({"-Wshift-negative-value"}, + " -Wshift-negative-value \tWarn if left " + "shifting a negative value\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shift-negative-value")); + +maplecl::Option wPointerArith({"-Wpointer-arith"}, + " -Wpointer-arith \tWarn about anything that depends on the " + "“size of” a function type or of void\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pointer-arith")); + +maplecl::Option wIgnoredQualifiers({"-Wignored-qualifiers"}, + " -Wignored-qualifiers \tWarn if the return type of a " + "function has a type qualifier such as const\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-ignored-qualifiers")); + +maplecl::Option wFormat({"-Wformat"}, + " -Wformat \tCheck calls to printf and scanf, etc., " + "to make sure that the arguments supplied have types appropriate " + "to the format string specified\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format")); + +maplecl::Option wFloatEqual({"-Wfloat-equal"}, + " -Wfloat-equal \tWarn if floating-point values are used " + "in equality comparisons\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-float-equal")); + +maplecl::Option wDateTime({"-Wdate-time"}, + " -Wdate-time \tWarn when macros __TIME__, __DATE__ or __TIMESTAMP__ " + "are encountered as they might prevent bit-wise-identical reproducible compilations\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-date-time")); + +maplecl::Option wImplicitFallthrough({"-Wimplicit-fallthrough"}, + " -Wimplicit-fallthrough \tWarn when a switch case falls through\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-implicit-fallthrough")); + +maplecl::Option wShiftOverflow({"-Wshift-overflow"}, + " -Wshift-overflow \tWarn about left shift overflows\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shift-overflow")); + +maplecl::Option oWnounusedcommandlineargument({"-Wno-unused-command-line-argument"}, + " -Wno-unused-command-line-argument \tno unused command line argument\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnoconstantconversion({"-Wno-constant-conversion"}, + " -Wno-constant-conversion \tno constant conversion\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnounknownwarningoption({"-Wno-unknown-warning-option"}, + " -Wno-unknown-warning-option \tno unknown warning option\n", + {driverCategory, clangCategory}); + +maplecl::Option oW({"-W"}, + " -W \tThis switch is deprecated; use -Wextra instead.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWabi({"-Wabi"}, + " -Wabi \tWarn about things that will change when compiling with an " + "ABI-compliant compiler.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-abi")); + +maplecl::Option oWabiTag({"-Wabi-tag"}, + " -Wabi-tag \tWarn if a subobject has an abi_tag attribute that the " + "complete object type does not have.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWaddrSpaceConvert({"-Waddr-space-convert"}, + " -Waddr-space-convert \tWarn about conversions between address spaces in" + " the case where the resulting address space is not contained in the incoming address " + "space.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWaddress({"-Waddress"}, + " -Waddress \tWarn about suspicious uses of memory addresses.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-address")); + +maplecl::Option oWaggregateReturn({"-Waggregate-return"}, + " -Waggregate-return \tWarn about returning structures\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-aggregate-return")); + +maplecl::Option oWaggressiveLoopOptimizations({"-Waggressive-loop-optimizations"}, + " -Waggressive-loop-optimizations \tWarn if a loop with constant number " + "of iterations triggers undefined behavior.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-aggressive-loop-optimizations")); + +maplecl::Option oWalignedNew({"-Waligned-new"}, + " -Waligned-new \tWarn about 'new' of type with extended alignment " + "without -faligned-new.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-aligned-new")); + +maplecl::Option oWallocZero({"-Walloc-zero"}, + " -Walloc-zero \t-Walloc-zero Warn for calls to allocation functions that" + " specify zero bytes.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-alloc-zero")); + +maplecl::Option oWalloca({"-Walloca"}, + " -Walloca \tWarn on any use of alloca.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-alloca")); + +maplecl::Option oWarrayBounds({"-Warray-bounds"}, + " -Warray-bounds \tWarn if an array is accessed out of bounds.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-array-bounds")); + +maplecl::Option oWassignIntercept({"-Wassign-intercept"}, + " -Wassign-intercept \tWarn whenever an Objective-C assignment is being" + " intercepted by the garbage collector.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-assign-intercept")); + +maplecl::Option oWattributes({"-Wattributes"}, + " -Wattributes \tWarn about inappropriate attribute usage.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-attributes")); + +maplecl::Option oWboolCompare({"-Wbool-compare"}, + " -Wbool-compare \tWarn about boolean expression compared with an " + "integer value different from true/false.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-bool-compare")); + +maplecl::Option oWboolOperation({"-Wbool-operation"}, + " -Wbool-operation \tWarn about certain operations on boolean" + " expressions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-bool-operation")); + +maplecl::Option oWbuiltinDeclarationMismatch({"-Wbuiltin-declaration-mismatch"}, + " -Wbuiltin-declaration-mismatch \tWarn when a built-in function is" + " declared with the wrong signature.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-builtin-declaration-mismatch")); + +maplecl::Option oWbuiltinMacroRedefined({"-Wbuiltin-macro-redefined"}, + " -Wbuiltin-macro-redefined \tWarn when a built-in preprocessor macro " + "is undefined or redefined.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-builtin-macro-redefined")); + +maplecl::Option oW11Compat({"-Wc++11-compat"}, + " -Wc++11-compat \tWarn about C++ constructs whose meaning differs between" + " ISO C++ 1998 and ISO C++ 2011.\n", + {driverCategory, clangCategory}); + +maplecl::Option oW14Compat({"-Wc++14-compat"}, + " -Wc++14-compat \tWarn about C++ constructs whose meaning differs between" + " ISO C++ 2011 and ISO C++ 2014.\n", + {driverCategory, clangCategory}); + +maplecl::Option oW1zCompat({"-Wc++1z-compat"}, + " -Wc++1z-compat \tWarn about C++ constructs whose meaning differs between" + " ISO C++ 2014 and (forthcoming) ISO C++ 201z(7?).\n", + {driverCategory, clangCategory}); + +maplecl::Option oWc90C99Compat({"-Wc90-c99-compat"}, + " -Wc90-c99-compat \tWarn about features not present in ISO C90\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-c90-c99-compat")); + +maplecl::Option oWc99C11Compat({"-Wc99-c11-compat"}, + " -Wc99-c11-compat \tWarn about features not present in ISO C99\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-c99-c11-compat")); + +maplecl::Option oWcastAlign({"-Wcast-align"}, + " -Wcast-align \tWarn about pointer casts which increase alignment.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-cast-align")); + +maplecl::Option oWcharSubscripts({"-Wchar-subscripts"}, + " -Wchar-subscripts \tWarn about subscripts whose type is \"char\".\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-char-subscripts")); + +maplecl::Option oWchkp({"-Wchkp"}, + " -Wchkp \tWarn about memory access errors found by Pointer Bounds " + "Checker.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWclobbered({"-Wclobbered"}, + " -Wclobbered \tWarn about variables that might be changed " + "by \"longjmp\" or \"vfork\".\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-clobbered")); + +maplecl::Option oWcomment({"-Wcomment"}, + " -Wcomment \tWarn about possibly nested block comments\n", + {driverCategory, clangCategory}); + +maplecl::Option oWcomments({"-Wcomments"}, + " -Wcomments \tSynonym for -Wcomment.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWconditionallySupported({"-Wconditionally-supported"}, + " -Wconditionally-supported \tWarn for conditionally-supported" + " constructs.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-conditionally-supported")); + +maplecl::Option oWconversion({"-Wconversion"}, + " -Wconversion \tWarn for implicit type conversions that may " + "change a value.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-conversion")); + +maplecl::Option oWconversionNull({"-Wconversion-null"}, + " -Wconversion-null \tWarn for converting NULL from/to" + " a non-pointer type.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-conversion-null")); + +maplecl::Option oWctorDtorPrivacy({"-Wctor-dtor-privacy"}, + " -Wctor-dtor-privacy \tWarn when all constructors and destructors are" + " private.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-ctor-dtor-privacy")); + +maplecl::Option oWdanglingElse({"-Wdangling-else"}, + " -Wdangling-else \tWarn about dangling else.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-dangling-else")); + +maplecl::Option oWdeclarationAfterStatement({"-Wdeclaration-after-statement"}, + " -Wdeclaration-after-statement \tWarn when a declaration is found after" + " a statement.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-declaration-after-statement")); + +maplecl::Option oWdeleteIncomplete({"-Wdelete-incomplete"}, + " -Wdelete-incomplete \tWarn when deleting a pointer to incomplete type.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-delete-incomplete")); + +maplecl::Option oWdeleteNonVirtualDtor({"-Wdelete-non-virtual-dtor"}, + " -Wdelete-non-virtual-dtor \tWarn about deleting polymorphic objects " + "with non-virtual destructors.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-delete-non-virtual-dtor")); + +maplecl::Option oWdeprecated({"-Wdeprecated"}, + " -Wdeprecated \tWarn if a deprecated compiler feature\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-deprecated")); + +maplecl::Option oWdeprecatedDeclarations({"-Wdeprecated-declarations"}, + " -Wdeprecated-declarations \tWarn about uses of " + "__attribute__((deprecated)) declarations.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-deprecated-declarations")); + +maplecl::Option oWdisabledOptimization({"-Wdisabled-optimization"}, + " -Wdisabled-optimization \tWarn when an optimization pass is disabled.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-disabled-optimization")); + +maplecl::Option oWdiscardedArrayQualifiers({"-Wdiscarded-array-qualifiers"}, + " -Wdiscarded-array-qualifiers \tWarn if qualifiers on arrays which " + "are pointer targets are discarded.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-discarded-array-qualifiers")); + +maplecl::Option oWdiscardedQualifiers({"-Wdiscarded-qualifiers"}, + " -Wdiscarded-qualifiers \tWarn if type qualifiers on pointers are" + " discarded.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-discarded-qualifiers")); + +maplecl::Option oWdivByZero({"-Wdiv-by-zero"}, + " -Wdiv-by-zero \tWarn about compile-time integer division by zero.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-div-by-zero")); + +maplecl::Option oWdoublePromotion({"-Wdouble-promotion"}, + " -Wdouble-promotion \tWarn about implicit conversions from " + "\"float\" to \"double\".\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-double-promotion")); + +maplecl::Option oWduplicateDeclSpecifier({"-Wduplicate-decl-specifier"}, + " -Wduplicate-decl-specifier \tWarn when a declaration has " + "duplicate const\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-duplicate-decl-specifier")); + +maplecl::Option oWduplicatedBranches({"-Wduplicated-branches"}, + " -Wduplicated-branches \tWarn about duplicated branches in " + "if-else statements.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-duplicated-branches")); + +maplecl::Option oWduplicatedCond({"-Wduplicated-cond"}, + " -Wduplicated-cond \tWarn about duplicated conditions in an " + "if-else-if chain.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-duplicated-cond")); + +maplecl::Option oWeak_reference_mismatches({"-weak_reference_mismatches"}, + " -weak_reference_mismatches \tSpecifies what to do if a symbol import conflicts between file " + "(weak in one and not in another) the default is to treat the symbol as non-weak.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWeffc({"-Weffc++"}, + " -Weffc++ \tWarn about violations of Effective C++ style rules.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-effc++")); + +maplecl::Option oWemptyBody({"-Wempty-body"}, + " -Wempty-body \tWarn about an empty body in an if or else statement.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-empty-body")); + +maplecl::Option oWendifLabels({"-Wendif-labels"}, + " -Wendif-labels \tWarn about stray tokens after #else and #endif.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-endif-labels")); + +maplecl::Option oWenumCompare({"-Wenum-compare"}, + " -Wenum-compare \tWarn about comparison of different enum types.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-enum-compare")); + +maplecl::Option oWerror({"-Werror"}, + " -Werror \tTreat all warnings as errors.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-error")); + +maplecl::Option oWerrorE({"-Werror="}, + " -Werror= \tTreat specified warning as error.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-error=")); + +maplecl::Option oWexpansionToDefined({"-Wexpansion-to-defined"}, + " -Wexpansion-to-defined \tWarn if 'defined' is used outside #if.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWfatalErrors({"-Wfatal-errors"}, + " -Wfatal-errors \tExit on the first error occurred.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-fatal-errors")); + +maplecl::Option oWfloatConversion({"-Wfloat-conversion"}, + " -Wfloat-conversion \tWarn for implicit type conversions that cause " + "loss of floating point precision.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-float-conversion")); + +maplecl::Option oWformatContainsNul({"-Wformat-contains-nul"}, + " -Wformat-contains-nul \tWarn about format strings that contain NUL" + " bytes.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-contains-nul")); + +maplecl::Option oWformatExtraArgs({"-Wformat-extra-args"}, + " -Wformat-extra-args \tWarn if passing too many arguments to a function" + " for its format string.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-extra-args")); + +maplecl::Option oWformatNonliteral({"-Wformat-nonliteral"}, + " -Wformat-nonliteral \tWarn about format strings that are not literals.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-nonliteral")); + +maplecl::Option oWformatOverflow({"-Wformat-overflow"}, + " -Wformat-overflow \tWarn about function calls with format strings " + "that write past the end of the destination region. Same as -Wformat-overflow=1.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-overflow")); + +maplecl::Option oWformatSignedness({"-Wformat-signedness"}, + " -Wformat-signedness \tWarn about sign differences with format " + "functions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-signedness")); + +maplecl::Option oWformatTruncation({"-Wformat-truncation"}, + " -Wformat-truncation \tWarn about calls to snprintf and similar " + "functions that truncate output. Same as -Wformat-truncation=1.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-truncation")); + +maplecl::Option oWformatY2k({"-Wformat-y2k"}, + " -Wformat-y2k \tWarn about strftime formats yielding 2-digit years.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-y2k")); + +maplecl::Option oWformatZeroLength({"-Wformat-zero-length"}, + " -Wformat-zero-length \tWarn about zero-length formats.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-format-zero-length")); + +maplecl::Option oWframeAddress({"-Wframe-address"}, + " -Wframe-address \tWarn when __builtin_frame_address or" + " __builtin_return_address is used unsafely.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-frame-address")); + +maplecl::Option oWframeLargerThan({"-Wframe-larger-than"}, + " -Wframe-larger-than \t\n", + {driverCategory, clangCategory}); + +maplecl::Option oWfreeNonheapObject({"-Wfree-nonheap-object"}, + " -Wfree-nonheap-object \tWarn when attempting to free a non-heap" + " object.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-free-nonheap-object")); + +maplecl::Option oWignoredAttributes({"-Wignored-attributes"}, + " -Wignored-attributes \tWarn whenever attributes are ignored.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-ignored-attributes")); + +maplecl::Option oWimplicit({"-Wimplicit"}, + " -Wimplicit \tWarn about implicit declarations.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-implicit")); + +maplecl::Option oWimplicitFunctionDeclaration({"-Wimplicit-function-declaration"}, + " -Wimplicit-function-declaration \tWarn about implicit function " + "declarations.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-implicit-function-declaration")); + +maplecl::Option oWimplicitInt({"-Wimplicit-int"}, + " -Wimplicit-int \tWarn when a declaration does not specify a type.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-implicit-int")); + +maplecl::Option oWincompatiblePointerTypes({"-Wincompatible-pointer-types"}, + " -Wincompatible-pointer-types \tWarn when there is a conversion " + "between pointers that have incompatible types.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-incompatible-pointer-types")); + +maplecl::Option oWinheritedVariadicCtor({"-Winherited-variadic-ctor"}, + " -Winherited-variadic-ctor \tWarn about C++11 inheriting constructors " + "when the base has a variadic constructor.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-inherited-variadic-ctor")); + +maplecl::Option oWinitSelf({"-Winit-self"}, + " -Winit-self \tWarn about variables which are initialized to " + "themselves.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-init-self")); + +maplecl::Option oWinline({"-Winline"}, + " -Winline \tWarn when an inlined function cannot be inlined.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-inline")); + +maplecl::Option oWintConversion({"-Wint-conversion"}, + " -Wint-conversion \tWarn about incompatible integer to pointer and" + " pointer to integer conversions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-int-conversion")); + +maplecl::Option oWintInBoolContext({"-Wint-in-bool-context"}, + " -Wint-in-bool-context \tWarn for suspicious integer expressions in" + " boolean context.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-int-in-bool-context")); + +maplecl::Option oWintToPointerCast({"-Wint-to-pointer-cast"}, + " -Wint-to-pointer-cast \tWarn when there is a cast to a pointer from" + " an integer of a different size.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-int-to-pointer-cast")); + +maplecl::Option oWinvalidMemoryModel({"-Winvalid-memory-model"}, + " -Winvalid-memory-model \tWarn when an atomic memory model parameter" + " is known to be outside the valid range.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-invalid-memory-model")); + +maplecl::Option oWinvalidOffsetof({"-Winvalid-offsetof"}, + " -Winvalid-offsetof \tWarn about invalid uses of " + "the \"offsetof\" macro.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-invalid-offsetof")); + +maplecl::Option oWLiteralSuffix({"-Wliteral-suffix"}, + " -Wliteral-suffix \tWarn when a string or character literal is " + "followed by a ud-suffix which does not begin with an underscore.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-literal-suffix")); + +maplecl::Option oWLogicalNotParentheses({"-Wlogical-not-parentheses"}, + " -Wlogical-not-parentheses \tWarn about logical not used on the left" + " hand side operand of a comparison.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-logical-not-parentheses")); + +maplecl::Option oWinvalidPch({"-Winvalid-pch"}, + " -Winvalid-pch \tWarn about PCH files that are found but not used.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-invalid-pch")); + +maplecl::Option oWjumpMissesInit({"-Wjump-misses-init"}, + " -Wjump-misses-init \tWarn when a jump misses a variable " + "initialization.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-jump-misses-init")); + +maplecl::Option oWLogicalOp({"-Wlogical-op"}, + " -Wlogical-op \tWarn about suspicious uses of logical " + "operators in expressions. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-logical-op")); + +maplecl::Option oWLongLong({"-Wlong-long"}, + " -Wlong-long \tWarn if long long type is used.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-long-long")); + +maplecl::Option oWmain({"-Wmain"}, + " -Wmain \tWarn about suspicious declarations of \"main\".\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-main")); + +maplecl::Option oWmaybeUninitialized({"-Wmaybe-uninitialized"}, + " -Wmaybe-uninitialized \tWarn about maybe uninitialized automatic" + " variables.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-maybe-uninitialized")); + +maplecl::Option oWmemsetEltSize({"-Wmemset-elt-size"}, + " -Wmemset-elt-size \tWarn about suspicious calls to memset where " + "the third argument contains the number of elements not multiplied by the element " + "size.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-memset-elt-size")); + +maplecl::Option oWmemsetTransposedArgs({"-Wmemset-transposed-args"}, + " -Wmemset-transposed-args \tWarn about suspicious calls to memset where" + " the third argument is constant literal zero and the second is not.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-memset-transposed-args")); + +maplecl::Option oWmisleadingIndentation({"-Wmisleading-indentation"}, + " -Wmisleading-indentation \tWarn when the indentation of the code " + "does not reflect the block structure.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-misleading-indentatio")); + +maplecl::Option oWmissingBraces({"-Wmissing-braces"}, + " -Wmissing-braces \tWarn about possibly missing braces around" + " initializers.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-bracesh")); + +maplecl::Option oWmissingDeclarations({"-Wmissing-declarations"}, + " -Wmissing-declarations \tWarn about global functions without previous" + " declarations.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-declarations")); + +maplecl::Option oWmissingFormatAttribute({"-Wmissing-format-attribute"}, + " -Wmissing-format-attribute \t\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-format-attribute")); + +maplecl::Option oWmissingIncludeDirs({"-Wmissing-include-dirs"}, + " -Wmissing-include-dirs \tWarn about user-specified include " + "directories that do not exist.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-include-dirs")); + +maplecl::Option oWmissingParameterType({"-Wmissing-parameter-type"}, + " -Wmissing-parameter-type \tWarn about function parameters declared" + " without a type specifier in K&R-style functions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-parameter-type")); + +maplecl::Option oWmissingPrototypes({"-Wmissing-prototypes"}, + " -Wmissing-prototypes \tWarn about global functions without prototypes.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-missing-prototypes")); + +maplecl::Option oWmultichar({"-Wmultichar"}, + " -Wmultichar \tWarn about use of multi-character character constants.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-multichar")); + +maplecl::Option oWmultipleInheritance({"-Wmultiple-inheritance"}, + " -Wmultiple-inheritance \tWarn on direct multiple inheritance.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnamespaces({"-Wnamespaces"}, + " -Wnamespaces \tWarn on namespace definition.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnarrowing({"-Wnarrowing"}, + " -Wnarrowing \tWarn about narrowing conversions within { } " + "that are ill-formed in C++11.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-narrowing")); + +maplecl::Option oWnestedExterns({"-Wnested-externs"}, + " -Wnested-externs \tWarn about \"extern\" declarations not at file" + " scope.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-nested-externs")); + +maplecl::Option oWnoexcept({"-Wnoexcept"}, + " -Wnoexcept \tWarn when a noexcept expression evaluates to false even" + " though the expression can't actually throw.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-noexcept")); + +maplecl::Option oWnoexceptType({"-Wnoexcept-type"}, + " -Wnoexcept-type \tWarn if C++1z noexcept function type will change " + "the mangled name of a symbol.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-noexcept-type")); + +maplecl::Option oWnonTemplateFriend({"-Wnon-template-friend"}, + " -Wnon-template-friend \tWarn when non-templatized friend functions" + " are declared within a template.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-non-template-friend")); + +maplecl::Option oWnonVirtualDtor({"-Wnon-virtual-dtor"}, + " -Wnon-virtual-dtor \tWarn about non-virtual destructors.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-non-virtual-dtor")); + +maplecl::Option oWnonnull({"-Wnonnull"}, + " -Wnonnull \tWarn about NULL being passed to argument slots marked as" + " requiring non-NULL.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-nonnull")); + +maplecl::Option oWnonnullCompare({"-Wnonnull-compare"}, + " -Wnonnull-compare \tWarn if comparing pointer parameter with nonnull" + " attribute with NULL.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-nonnull-compare")); + +maplecl::Option oWnormalized({"-Wnormalized"}, + " -Wnormalized \tWarn about non-normalized Unicode strings.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-normalized")); + +maplecl::Option oWnormalizedE({"-Wnormalized="}, + " -Wnormalized= \t-Wnormalized=[none|id|nfc|nfkc] Warn about " + "non-normalized Unicode strings.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnullDereference({"-Wnull-dereference"}, + " -Wnull-dereference \tWarn if dereferencing a NULL pointer may lead " + "to erroneous or undefined behavior.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-null-dereference")); + +maplecl::Option oWodr({"-Wodr"}, + " -Wodr \tWarn about some C++ One Definition Rule violations during " + "link time optimization.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-odr")); + +maplecl::Option oWoldStyleCast({"-Wold-style-cast"}, + " -Wold-style-cast \tWarn if a C-style cast is used in a program.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-old-style-cast")); + +maplecl::Option oWoldStyleDeclaration({"-Wold-style-declaration"}, + " -Wold-style-declaration \tWarn for obsolescent usage in a declaration.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-old-style-declaration")); + +maplecl::Option oWoldStyleDefinition({"-Wold-style-definition"}, + " -Wold-style-definition \tWarn if an old-style parameter definition" + " is used.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-old-style-definition")); + +maplecl::Option oWopenmSimd({"-Wopenm-simd"}, + " -Wopenm-simd \tWarn if the vectorizer cost model overrides the OpenMP" + " or the Cilk Plus simd directive set by user. \n", + {driverCategory, clangCategory}); + +maplecl::Option oWoverflow({"-Woverflow"}, + " -Woverflow \tWarn about overflow in arithmetic expressions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-overflow")); + +maplecl::Option oWoverlengthStrings({"-Woverlength-strings"}, + " -Woverlength-strings \tWarn if a string is longer than the maximum " + "portable length specified by the standard.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-overlength-strings")); + +maplecl::Option oWoverloadedVirtual({"-Woverloaded-virtual"}, + " -Woverloaded-virtual \tWarn about overloaded virtual function names.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-overloaded-virtual")); + +maplecl::Option oWoverrideInit({"-Woverride-init"}, + " -Woverride-init \tWarn about overriding initializers without side" + " effects.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-override-init")); + +maplecl::Option oWoverrideInitSideEffects({"-Woverride-init-side-effects"}, + " -Woverride-init-side-effects \tWarn about overriding initializers with" + " side effects.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-override-init-side-effects")); + +maplecl::Option oWpacked({"-Wpacked"}, + " -Wpacked \tWarn when the packed attribute has no effect on struct " + "layout.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-packed")); + +maplecl::Option oWpackedBitfieldCompat({"-Wpacked-bitfield-compat"}, + " -Wpacked-bitfield-compat \tWarn about packed bit-fields whose offset" + " changed in GCC 4.4.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-packed-bitfield-compat")); + +maplecl::Option oWpadded({"-Wpadded"}, + " -Wpadded \tWarn when padding is required to align structure members.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-padded")); + +maplecl::Option oWparentheses({"-Wparentheses"}, + " -Wparentheses \tWarn about possibly missing parentheses.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-parentheses")); + +maplecl::Option oWpedantic({"-Wpedantic"}, + " -Wpedantic \tIssue warnings needed for strict compliance to the " + "standard.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWpedanticMsFormat({"-Wpedantic-ms-format"}, + " -Wpedantic-ms-format \t\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pedantic-ms-format")); + +maplecl::Option oWplacementNew({"-Wplacement-new"}, + " -Wplacement-new \tWarn for placement new expressions with undefined " + "behavior.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-placement-new")); + +maplecl::Option oWplacementNewE({"-Wplacement-new="}, + " -Wplacement-new= \tWarn for placement new expressions with undefined " + "behavior.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWpmfConversions({"-Wpmf-conversions"}, + " -Wpmf-conversions \tWarn when converting the type of pointers to " + "member functions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pmf-conversions")); + +maplecl::Option oWpointerCompare({"-Wpointer-compare"}, + " -Wpointer-compare \tWarn when a pointer is compared with a zero " + "character constant.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pointer-compare")); + +maplecl::Option oWpointerSign({"-Wpointer-sign"}, + " -Wpointer-sign \tWarn when a pointer differs in signedness in an " + "assignment.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pointer-sign")); + +maplecl::Option oWpointerToIntCast({"-Wpointer-to-int-cast"}, + " -Wpointer-to-int-cast \tWarn when a pointer is cast to an integer of " + "a different size.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pointer-to-int-cast")); + +maplecl::Option oWpragmas({"-Wpragmas"}, + " -Wpragmas \tWarn about misuses of pragmas.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-pragmas")); + +maplecl::Option oWprotocol({"-Wprotocol"}, + " -Wprotocol \tWarn if inherited methods are unimplemented.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-protocol")); + +maplecl::Option oWredundantDecls({"-Wredundant-decls"}, + " -Wredundant-decls \tWarn about multiple declarations of the same" + " object.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-redundant-decls")); + +maplecl::Option oWregister({"-Wregister"}, + " -Wregister \tWarn about uses of register storage specifier.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-register")); + +maplecl::Option oWreorder({"-Wreorder"}, + " -Wreorder \tWarn when the compiler reorders code.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-reorder")); + +maplecl::Option oWrestrict({"-Wrestrict"}, + " -Wrestrict \tWarn when an argument passed to a restrict-qualified " + "parameter aliases with another argument.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-restrict")); + +maplecl::Option oWreturnLocalAddr({"-Wreturn-local-addr"}, + " -Wreturn-local-addr \tWarn about returning a pointer/reference to a" + " local or temporary variable.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-return-local-addr")); + +maplecl::Option oWreturnType({"-Wreturn-type"}, + " -Wreturn-type \tWarn whenever a function's return type defaults to" + " \"int\" (C)\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-return-type")); + +maplecl::Option oWselector({"-Wselector"}, + " -Wselector \tWarn if a selector has multiple methods.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-selector")); + +maplecl::Option oWsequencePoint({"-Wsequence-point"}, + " -Wsequence-point \tWarn about possible violations of sequence point " + "rules.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sequence-point")); + +maplecl::Option oWshadowIvar({"-Wshadow-ivar"}, + " -Wshadow-ivar \tWarn if a local declaration hides an instance " + "variable.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shadow-ivar")); + +maplecl::Option oWshiftCountNegative({"-Wshift-count-negative"}, + " -Wshift-count-negative \tWarn if shift count is negative.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shift-count-negative")); + +maplecl::Option oWshiftCountOverflow({"-Wshift-count-overflow"}, + " -Wshift-count-overflow \tWarn if shift count >= width of type.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-shift-count-overflow")); + +maplecl::Option oWsignConversion({"-Wsign-conversion"}, + " -Wsign-conversion \tWarn for implicit type conversions between signed " + "and unsigned integers.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sign-conversion")); + +maplecl::Option oWsignPromo({"-Wsign-promo"}, + " -Wsign-promo \tWarn when overload promotes from unsigned to signed.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sign-promo")); + +maplecl::Option oWsizedDeallocation({"-Wsized-deallocation"}, + " -Wsized-deallocation \tWarn about missing sized deallocation " + "functions.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sized-deallocation")); + +maplecl::Option oWsizeofArrayArgument({"-Wsizeof-array-argument"}, + " -Wsizeof-array-argument \tWarn when sizeof is applied on a parameter " + "declared as an array.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sizeof-array-argument")); + +maplecl::Option oWsizeofPointerMemaccess({"-Wsizeof-pointer-memaccess"}, + " -Wsizeof-pointer-memaccess \tWarn about suspicious length parameters to" + " certain string functions if the argument uses sizeof.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sizeof-pointer-memaccess")); + +maplecl::Option oWstackProtector({"-Wstack-protector"}, + " -Wstack-protector \tWarn when not issuing stack smashing protection " + "for some reason.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-stack-protector")); + +maplecl::Option oWstackUsage({"-Wstack-usage="}, + " -Wstack-usage= \t-Wstack-usage= Warn if stack usage might exceed .\n", + {driverCategory, unSupCategory}); + +maplecl::Option oWstrictAliasing({"-Wstrict-aliasing"}, + " -Wstrict-aliasing \tWarn about code which might break strict aliasing " + "rules.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-strict-aliasing")); + +maplecl::Option oWstrictAliasingE({"-Wstrict-aliasing="}, + " -Wstrict-aliasing= \tWarn about code which might break strict aliasing " + "rules.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWstrictNullSentinel({"-Wstrict-null-sentinel"}, + " -Wstrict-null-sentinel \tWarn about uncasted NULL used as sentinel.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-strict-null-sentinel")); + +maplecl::Option oWstrictOverflow({"-Wstrict-overflow"}, + " -Wstrict-overflow \tWarn about optimizations that assume that signed " + "overflow is undefined.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-strict-overflow")); + +maplecl::Option oWstrictSelectorMatch({"-Wstrict-selector-match"}, + " -Wstrict-selector-match \tWarn if type signatures of candidate methods " + "do not match exactly.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-strict-selector-match")); + +maplecl::Option oWstringopOverflow({"-Wstringop-overflow"}, + " -Wstringop-overflow \tWarn about buffer overflow in string manipulation" + " functions like memcpy and strcpy.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-stringop-overflow")); + +maplecl::Option oWsubobjectLinkage({"-Wsubobject-linkage"}, + " -Wsubobject-linkage \tWarn if a class type has a base or a field whose" + " type uses the anonymous namespace or depends on a type with no linkage.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-subobject-linkage")); + +maplecl::Option oWsuggestAttributeConst({"-Wsuggest-attribute=const"}, + " -Wsuggest-attribute=const \tWarn about functions which might be " + "candidates for __attribute__((const)).\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-attribute=const")); + +maplecl::Option oWsuggestAttributeFormat({"-Wsuggest-attribute=format"}, + " -Wsuggest-attribute=format \tWarn about functions which might be " + "candidates for format attributes.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-attribute=format")); + +maplecl::Option oWsuggestAttributeNoreturn({"-Wsuggest-attribute=noreturn"}, + " -Wsuggest-attribute=noreturn \tWarn about functions which might be" + " candidates for __attribute__((noreturn)).\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-attribute=noreturn")); + +maplecl::Option oWsuggestAttributePure({"-Wsuggest-attribute=pure"}, + " -Wsuggest-attribute=pure \tWarn about functions which might be " + "candidates for __attribute__((pure)).\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-attribute=pure")); + +maplecl::Option oWsuggestFinalMethods({"-Wsuggest-final-methods"}, + " -Wsuggest-final-methods \tWarn about C++ virtual methods where adding" + " final keyword would improve code quality.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-final-methods")); + +maplecl::Option oWsuggestFinalTypes({"-Wsuggest-final-types"}, + " -Wsuggest-final-types \tWarn about C++ polymorphic types where adding" + " final keyword would improve code quality.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-suggest-final-types")); + +maplecl::Option oWswitch({"-Wswitch"}, + " -Wswitch \tWarn about enumerated switches\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-switch")); + +maplecl::Option oWswitchBool({"-Wswitch-bool"}, + " -Wswitch-bool \tWarn about switches with boolean controlling " + "expression.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-switch-bool")); + +maplecl::Option oWswitchDefault({"-Wswitch-default"}, + " -Wswitch-default \tWarn about enumerated switches missing a \"default:\"" + " statement.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-switch-default")); + +maplecl::Option oWswitchEnum({"-Wswitch-enum"}, + " -Wswitch-enum \tWarn about all enumerated switches missing a specific " + "case.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-switch-enum")); + +maplecl::Option oWswitchUnreachable({"-Wswitch-unreachable"}, + " -Wswitch-unreachable \tWarn about statements between switch's " + "controlling expression and the first case.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-switch-unreachable")); + +maplecl::Option oWsyncNand({"-Wsync-nand"}, + " -Wsync-nand \tWarn when __sync_fetch_and_nand and __sync_nand_and_fetch " + "built-in functions are used.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-sync-nand")); + +maplecl::Option oWsystemHeaders({"-Wsystem-headers"}, + " -Wsystem-headers \tDo not suppress warnings from system headers.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-system-headers")); + +maplecl::Option oWtautologicalCompare({"-Wtautological-compare"}, + " -Wtautological-compare \tWarn if a comparison always evaluates to true" + " or false.\n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-tautological-compare")); + +maplecl::Option oWtemplates({"-Wtemplates"}, + " -Wtemplates \tWarn on primary template declaration.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWterminate({"-Wterminate"}, + " -Wterminate \tWarn if a throw expression will always result in a call " + "to terminate(). \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-terminate")); + +maplecl::Option oWtraditional({"-Wtraditional"}, + " -Wtraditional \tWarn about features not present in traditional C. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-traditional")); + +maplecl::Option oWtraditionalConversion({"-Wtraditional-conversion"}, + " -Wtraditional-conversion \tWarn of prototypes causing type conversions " + "different from what would happen in the absence of prototype. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-traditional-conversion")); + +maplecl::Option oWtrampolines({"-Wtrampolines"}, + " -Wtrampolines \tWarn whenever a trampoline is generated. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-trampolines")); + +maplecl::Option oWtrigraphs({"-Wtrigraphs"}, + " -Wtrigraphs \tWarn if trigraphs are encountered that might affect the " + "meaning of the program. \n", + {driverCategory, clangCategory}); + +maplecl::Option oWundeclaredSelector({"-Wundeclared-selector"}, + " -Wundeclared-selector \tWarn about @selector()s without previously " + "declared methods. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-undeclared-selector")); + +maplecl::Option oWuninitialized({"-Wuninitialized"}, + " -Wuninitialized \tWarn about uninitialized automatic variables. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-uninitialized")); + +maplecl::Option oWunknownPragmas({"-Wunknown-pragmas"}, + " -Wunknown-pragmas \tWarn about unrecognized pragmas. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unknown-pragmas")); + +maplecl::Option oWunsafeLoopOptimizations({"-Wunsafe-loop-optimizations"}, + " -Wunsafe-loop-optimizations \tWarn if the loop cannot be optimized due" + " to nontrivial assumptions. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unsafe-loop-optimizations")); + +maplecl::Option oWunsuffixedFloatConstants({"-Wunsuffixed-float-constants"}, + " -Wunsuffixed-float-constants \tWarn about unsuffixed float constants. \n", + {driverCategory, clangCategory}); + +maplecl::Option oWunused({"-Wunused"}, + " -Wunused \tEnable all -Wunused- warnings. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused")); + +maplecl::Option oWunusedButSetParameter({"-Wunused-but-set-parameter"}, + " -Wunused-but-set-parameter \tWarn when a function parameter is only set" + ", otherwise unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-but-set-parameter")); + +maplecl::Option oWunusedButSetVariable({"-Wunused-but-set-variable"}, + " -Wunused-but-set-variable \tWarn when a variable is only set," + " otherwise unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-but-set-variable")); + +maplecl::Option oWunusedConstVariable({"-Wunused-const-variable"}, + " -Wunused-const-variable \tWarn when a const variable is unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-const-variable")); + +maplecl::Option oWunusedFunction({"-Wunused-function"}, + " -Wunused-function \tWarn when a function is unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-function")); + +maplecl::Option oWunusedLabel({"-Wunused-label"}, + " -Wunused-label \tWarn when a label is unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-label")); + +maplecl::Option oWunusedLocalTypedefs({"-Wunused-local-typedefs"}, + " -Wunused-local-typedefs \tWarn when typedefs locally defined in a" + " function are not used. \n", + {driverCategory, clangCategory}); + +maplecl::Option oWunusedResult({"-Wunused-result"}, + " -Wunused-result \tWarn if a caller of a function, marked with attribute " + "warn_unused_result, does not use its return value. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-result")); + +maplecl::Option oWunusedValue({"-Wunused-value"}, + " -Wunused-value \tWarn when an expression value is unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-value")); + +maplecl::Option oWunusedVariable({"-Wunused-variable"}, + " -Wunused-variable \tWarn when a variable is unused. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-unused-variable")); + +maplecl::Option oWuselessCast({"-Wuseless-cast"}, + " -Wuseless-cast \tWarn about useless casts. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-useless-cast")); + +maplecl::Option oWvarargs({"-Wvarargs"}, + " -Wvarargs \tWarn about questionable usage of the macros used to " + "retrieve variable arguments. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-varargs")); + +maplecl::Option oWvariadicMacros({"-Wvariadic-macros"}, + " -Wvariadic-macros \tWarn about using variadic macros. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-variadic-macros")); + +maplecl::Option oWvectorOperationPerformance({"-Wvector-operation-performance"}, + " -Wvector-operation-performance \tWarn when a vector operation is " + "compiled outside the SIMD. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-vector-operation-performance")); + +maplecl::Option oWvirtualInheritance({"-Wvirtual-inheritance"}, + " -Wvirtual-inheritance \tWarn on direct virtual inheritance.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWvirtualMoveAssign({"-Wvirtual-move-assign"}, + " -Wvirtual-move-assign \tWarn if a virtual base has a non-trivial move " + "assignment operator. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-virtual-move-assign")); + +maplecl::Option oWvolatileRegisterVar({"-Wvolatile-register-var"}, + " -Wvolatile-register-var \tWarn when a register variable is declared " + "volatile. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-volatile-register-var")); + +maplecl::Option oWzeroAsNullPointerConstant({"-Wzero-as-null-pointer-constant"}, + " -Wzero-as-null-pointer-constant \tWarn when a literal '0' is used as " + "null pointer. \n", + {driverCategory, clangCategory}, + maplecl::DisableWith("-Wno-zero-as-null-pointer-constant")); + +maplecl::Option oWnoScalarStorageOrder({"-Wno-scalar-storage-order"}, + " -Wno-scalar-storage-order \tDo not warn on suspicious constructs " + "involving reverse scalar storage order.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-Wscalar-storage-order")); + +maplecl::Option oWnoReservedIdMacro({"-Wno-reserved-id-macro"}, + " -Wno-reserved-id-macro \tDo not warn when macro name is a reserved identifier.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnoGnuZeroVariadicMacroArguments({"-Wno-gnu-zero-variadic-macro-arguments"}, + " -Wno-gnu-zero-variadic-macro-arguments \tDo not warn when token pasting of ',' and __VA_ARGS__ " + "is a GNU extension.\n", + {driverCategory, clangCategory}); + +maplecl::Option oWnoGnuStatementExpression({"-Wno-gnu-statement-expression"}, + " -Wno-gnu-statement-expression \tDo not warn when use of GNU statement expression extension.\n", + {driverCategory, clangCategory}); + +maplecl::Option oMleadz({"-mleadz"}, + " -mleadz \tnables the leadz (leading zero) instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMleafIdSharedLibrary({"-mleaf-id-shared-library"}, + " -mleaf-id-shared-library \tenerate code that supports shared libraries " + "via the library ID method, but assumes that this library or executable won't link " + "against any other ID shared libraries.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-leaf-id-shared-library")); + +maplecl::Option oMlibfuncs({"-mlibfuncs"}, + " -mlibfuncs \tSpecify that intrinsic library functions are being " + "compiled, passing all values in registers, no matter the size.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-libfuncs")); + +maplecl::Option oMlibraryPic({"-mlibrary-pic"}, + " -mlibrary-pic \tGenerate position-independent EABI code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlinkedFp({"-mlinked-fp"}, + " -mlinked-fp \tFollow the EABI requirement of always creating a " + "frame pointer whenever a stack frame is allocated.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlinkerOpt({"-mlinker-opt"}, + " -mlinker-opt \tEnable the optimization pass in the HP-UX linker.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlinux({"-mlinux"}, + " -mlinux \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlittle({"-mlittle"}, + " -mlittle \tAssume target CPU is configured as little endian.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlittleEndian({"-mlittle-endian"}, + " -mlittle-endian \tAssume target CPU is configured as little endian.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlittleEndianData({"-mlittle-endian-data"}, + " -mlittle-endian-data \tStore data (but not code) in the big-endian " + "format.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMliw({"-mliw"}, + " -mliw \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMll64({"-mll64"}, + " -mll64 \tEnable double load/store operations for ARC HS cores.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMllsc({"-mllsc"}, + " -mllsc \tUse (do not use) 'll', 'sc', and 'sync' instructions to " + "implement atomic memory built-in functions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-llsc")); + +maplecl::Option oMloadStorePairs({"-mload-store-pairs"}, + " -mload-store-pairs \tEnable (disable) an optimization that pairs " + "consecutive load or store instructions to enable load/store bonding. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-load-store-pairs")); + +maplecl::Option oMlocalSdata({"-mlocal-sdata"}, + " -mlocal-sdata \tExtend (do not extend) the -G behavior to local data " + "too, such as to static variables in C. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-local-sdata")); + +maplecl::Option oMlock({"-mlock"}, + " -mlock \tPassed down to the assembler to enable the locked " + "load/store conditional extension. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongCalls({"-mlong-calls"}, + " -mlong-calls \tGenerate calls as register indirect calls, thus " + "providing access to the full 32-bit address range.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-long-calls")); + +maplecl::Option oMlongDouble128({"-mlong-double-128"}, + " -mlong-double-128 \tControl the size of long double type. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongDouble64({"-mlong-double-64"}, + " -mlong-double-64 \tControl the size of long double type.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongDouble80({"-mlong-double-80"}, + " -mlong-double-80 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongJumpTableOffsets({"-mlong-jump-table-offsets"}, + " -mlong-jump-table-offsets \tUse 32-bit offsets in switch tables. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongJumps({"-mlong-jumps"}, + " -mlong-jumps \tDisable (or re-enable) the generation of PC-relative " + "jump instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-long-jumps")); + +maplecl::Option oMlongLoadStore({"-mlong-load-store"}, + " -mlong-load-store \tGenerate 3-instruction load and store " + "sequences as sometimes required by the HP-UX 10 linker.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlong32({"-mlong32"}, + " -mlong32 \tForce long, int, and pointer types to be 32 bits wide.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlong64({"-mlong64"}, + " -mlong64 \tForce long types to be 64 bits wide. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlongcall({"-mlongcall"}, + " -mlongcall \tBy default assume that all calls are far away so that a " + "longer and more expensive calling sequence is required. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-longcall")); + +maplecl::Option oMlongcalls({"-mlongcalls"}, + " -mlongcalls \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-longcalls")); + +maplecl::Option oMloop({"-mloop"}, + " -mloop \tEnables the use of the e3v5 LOOP instruction. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlow64k({"-mlow-64k"}, + " -mlow-64k \tWhen enabled, the compiler is free to take advantage of " + "the knowledge that the entire program fits into the low 64k of memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-low-64k")); + +maplecl::Option oMlowPrecisionRecipSqrt({"-mlow-precision-recip-sqrt"}, + " -mlow-precision-recip-sqrt \tEnable the reciprocal square root " + "approximation. Enabling this reduces precision of reciprocal square root results " + "to about 16 bits for single precision and to 32 bits for double precision.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-low-precision-recip-sqrt")); + +maplecl::Option oMlp64({"-mlp64"}, + " -mlp64 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlra({"-mlra"}, + " -mlra \tEnable Local Register Allocation. By default the port uses " + "LRA. (i.e. -mno-lra).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-lra")); + +maplecl::Option oMlraPriorityCompact({"-mlra-priority-compact"}, + " -mlra-priority-compact \tIndicate target register priority for " + "r0..r3 / r12..r15.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlraPriorityNoncompact({"-mlra-priority-noncompact"}, + " -mlra-priority-noncompact \tReduce target register priority for " + "r0..r3 / r12..r15.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlraPriorityNone({"-mlra-priority-none"}, + " -mlra-priority-none \tDon't indicate any priority for target " + "registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlwp({"-mlwp"}, + " -mlwp \tThese switches enable the use of instructions in the mlwp.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlxc1Sxc1({"-mlxc1-sxc1"}, + " -mlxc1-sxc1 \tWhen applicable, enable (disable) the " + "generation of lwxc1, swxc1, ldxc1, sdxc1 instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMlzcnt({"-mlzcnt"}, + " -mlzcnt \these switches enable the use of instructions in the mlzcnt\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMM({"-MM"}, + " -MM \tLike -M but ignore system header files.\n", + {driverCategory, clangCategory}); + +maplecl::Option oMm({"-Mm"}, + " -Mm \tCauses variables to be assigned to the .near section by default.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmac({"-mmac"}, + " -mmac \tEnable the use of multiply-accumulate instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmac24({"-mmac-24"}, + " -mmac-24 \tPassed down to the assembler. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmacD16({"-mmac-d16"}, + " -mmac-d16 \tPassed down to the assembler.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmac_24({"-mmac_24"}, + " -mmac_24 \tReplaced by -mmac-24.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmac_d16({"-mmac_d16"}, + " -mmac_d16 \tReplaced by -mmac-d16.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmad({"-mmad"}, + " -mmad \tEnable (disable) use of the mad, madu and mul instructions, " + "as provided by the R4650 ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mad")); + +maplecl::Option oMmadd4({"-mmadd4"}, + " -mmadd4 \tWhen applicable, enable (disable) the generation of " + "4-operand madd.s, madd.d and related instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmainkernel({"-mmainkernel"}, + " -mmainkernel \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmalloc64({"-mmalloc64"}, + " -mmalloc64 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmax({"-mmax"}, + " -mmax \tGenerate code to use MAX instruction sets.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-max")); + +maplecl::Option oMmaxConstantSize({"-mmax-constant-size"}, + " -mmax-constant-size \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmaxStackFrame({"-mmax-stack-frame"}, + " -mmax-stack-frame \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmcountRaAddress({"-mmcount-ra-address"}, + " -mmcount-ra-address \tEmit (do not emit) code that allows _mcount " + "to modify the calling function’s return address.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mcount-ra-address")); + +maplecl::Option oMmcu({"-mmcu"}, + " -mmcu \tUse the MIPS MCU ASE instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mcu")); + +maplecl::Option oMmcuE({"-mmcu="}, + " -mmcu= \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMMD({"-MMD"}, + " -MMD \tLike -MD but ignore system header files.\n", + {driverCategory, clangCategory}); + +maplecl::Option oMmedia({"-mmedia"}, + " -mmedia \tUse media instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-media")); + +maplecl::Option oMmediumCalls({"-mmedium-calls"}, + " -mmedium-calls \tDon’t use less than 25-bit addressing range for calls," + " which is the offset available for an unconditional branch-and-link instruction. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmemcpy({"-mmemcpy"}, + " -mmemcpy \tForce (do not force) the use of memcpy for non-trivial " + "block moves.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-memcpy")); + +maplecl::Option oMmemcpyStrategyStrategy({"-mmemcpy-strategy=strategy"}, + " -mmemcpy-strategy=strategy \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmemoryLatency({"-mmemory-latency"}, + " -mmemory-latency \tSets the latency the scheduler should assume for " + "typical memory references as seen by the application.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmemoryModel({"-mmemory-model"}, + " -mmemory-model \tSet the memory model in force on the processor to " + "one of.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmemsetStrategyStrategy({"-mmemset-strategy=strategy"}, + " -mmemset-strategy=strategy \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmfcrf({"-mmfcrf"}, + " -mmfcrf \tSpecify which instructions are available on " + "the processor you are using. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mfcrf")); + +maplecl::Option oMmfpgpr({"-mmfpgpr"}, + " -mmfpgpr \tSpecify which instructions are available on the " + "processor you are using.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mfpgpr")); + +maplecl::Option oMmicromips({"-mmicromips"}, + " -mmicromips \tGenerate microMIPS code.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mmicromips")); + +maplecl::Option oMminimalToc({"-mminimal-toc"}, + " -mminimal-toc \tModify generation of the TOC (Table Of Contents), which " + "is created for every executable file. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMminmax({"-mminmax"}, + " -mminmax \tEnables the min and max instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmitigateRop({"-mmitigate-rop"}, + " -mmitigate-rop \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmixedCode({"-mmixed-code"}, + " -mmixed-code \tTweak register allocation to help 16-bit instruction" + " generation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmmx({"-mmmx"}, + " -mmmx \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmodelLarge({"-mmodel=large"}, + " -mmodel=large \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmodelMedium({"-mmodel=medium"}, + " -mmodel=medium \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmodelSmall({"-mmodel=small"}, + " -mmodel=small \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmovbe({"-mmovbe"}, + " -mmovbe \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmpx({"-mmpx"}, + " -mmpx \tThese switches enable the use of instructions in the mmpx.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmpyOption({"-mmpy-option"}, + " -mmpy-option \tCompile ARCv2 code with a multiplier design option. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmsBitfields({"-mms-bitfields"}, + " -mms-bitfields \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-ms-bitfields")); + +maplecl::Option oMmt({"-mmt"}, + " -mmt \tUse MT Multithreading instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mt")); + +maplecl::Option oMmul({"-mmul"}, + " -mmul \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmulBugWorkaround({"-mmul-bug-workaround"}, + " -mmul-bug-workaround \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mul-bug-workaround")); + +maplecl::Option oMmulx({"-mmul.x"}, + " -mmul.x \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmul32x16({"-mmul32x16"}, + " -mmul32x16 \tGenerate 32x16-bit multiply and multiply-accumulate " + "instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmul64({"-mmul64"}, + " -mmul64 \tGenerate mul64 and mulu64 instructions. " + "Only valid for -mcpu=ARC600.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmuladd({"-mmuladd"}, + " -mmuladd \tUse multiply and add/subtract instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-muladd")); + +maplecl::Option oMmulhw({"-mmulhw"}, + " -mmulhw \tGenerate code that uses the half-word multiply and " + "multiply-accumulate instructions on the IBM 405, 440, 464 and 476 processors.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mulhw")); + +maplecl::Option oMmult({"-mmult"}, + " -mmult \tEnables the multiplication and multiply-accumulate " + "instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmultBug({"-mmult-bug"}, + " -mmult-bug \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mult-bug")); + +maplecl::Option oMmultcost({"-mmultcost"}, + " -mmultcost \tCost to assume for a multiply instruction, " + "with '4' being equal to a normal instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmultiCondExec({"-mmulti-cond-exec"}, + " -mmulti-cond-exec \tEnable optimization of && and || in conditional " + "execution (default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-multi-cond-exec")); + +maplecl::Option oMmulticore({"-mmulticore"}, + " -mmulticore \tBuild a standalone application for multicore " + "Blackfin processors. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmultiple({"-mmultiple"}, + " -mmultiple \tGenerate code that uses (does not use) the load multiple " + "word instructions and the store multiple word instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-multiple")); + +maplecl::Option oMmusl({"-mmusl"}, + " -mmusl \tUse musl C library.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmvcle({"-mmvcle"}, + " -mmvcle \tGenerate code using the mvcle instruction to perform " + "block moves.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-mvcle")); + +maplecl::Option oMmvme({"-mmvme"}, + " -mmvme \tOn embedded PowerPC systems, assume that the startup module " + "is called crt0.o and the standard C libraries are libmvme.a and libc.a.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMmwaitx({"-mmwaitx"}, + " -mmwaitx \tThese switches enable the use of instructions in " + "the mmwaitx.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMn({"-mn"}, + " -mn \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnFlash({"-mn-flash"}, + " -mn-flash \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnan2008({"-mnan=2008"}, + " -mnan=2008 \tControl the encoding of the special not-a-number " + "(NaN) IEEE 754 floating-point data.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnanLegacy({"-mnan=legacy"}, + " -mnan=legacy \tControl the encoding of the special not-a-number " + "(NaN) IEEE 754 floating-point data.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMneonFor64bits({"-mneon-for-64bits"}, + " -mneon-for-64bits \tEnables using Neon to handle scalar 64-bits " + "operations.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnestedCondExec({"-mnested-cond-exec"}, + " -mnested-cond-exec \tEnable nested conditional execution " + "optimizations (default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-nested-cond-exec")); + +maplecl::Option oMnhwloop({"-mnhwloop"}, + " -mnhwloop \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoAlignStringops({"-mno-align-stringops"}, + " -mno-align-stringops \tDo not align the destination of inlined " + "string operations.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoBrcc({"-mno-brcc"}, + " -mno-brcc \tThis option disables a target-specific pass in arc_reorg " + "to generate compare-and-branch (brcc) instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoClearbss({"-mno-clearbss"}, + " -mno-clearbss \tThis option is deprecated. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoCrt0({"-mno-crt0"}, + " -mno-crt0 \tDo not link in the C run-time initialization object file.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoDefault({"-mno-default"}, + " -mno-default \tThis option instructs Maple to turn off all tunable " + "features.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoDpfpLrsr({"-mno-dpfp-lrsr"}, + " -mno-dpfp-lrsr \tDisable lr and sr instructions from using FPX " + "extension aux registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoEflags({"-mno-eflags"}, + " -mno-eflags \tDo not mark ABI switches in e_flags.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoFancyMath387({"-mno-fancy-math-387"}, + " -mno-fancy-math-387 \tSome 387 emulators do not support the sin, cos " + "and sqrt instructions for the 387. Specify this option to avoid generating those " + "instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoFloat({"-mno-float"}, + " -mno-float \tEquivalent to -msoft-float, but additionally asserts that " + "the program being compiled does not perform any floating-point operations. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoFpInToc({"-mno-fp-in-toc"}, + " -mno-fp-in-toc \tModify generation of the TOC (Table Of Contents), " + "which is created for every executable file.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMFpReg({"-mfp-reg"}, + " -mfp-reg \tGenerate code that uses (does not use) the " + "floating-point register set. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-fp-regs")); + +maplecl::Option oMnoFpRetIn387({"-mno-fp-ret-in-387"}, + " -mno-fp-ret-in-387 \tDo not use the FPU registers for return values of " + "functions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoInlineFloatDivide({"-mno-inline-float-divide"}, + " -mno-inline-float-divide \tDo not generate inline code for divides of " + "floating-point values.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoInlineIntDivide({"-mno-inline-int-divide"}, + " -mno-inline-int-divide \tDo not generate inline code for divides of " + "integer values.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoInlineSqrt({"-mno-inline-sqrt"}, + " -mno-inline-sqrt \tDo not generate inline code for sqrt.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoInterrupts({"-mno-interrupts"}, + " -mno-interrupts \tGenerated code is not compatible with hardware " + "interrupts.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoLsim({"-mno-lsim"}, + " -mno-lsim \tAssume that runtime support has been provided and so there " + "is no need to include the simulator library (libsim.a) on the linker command line.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoMillicode({"-mno-millicode"}, + " -mno-millicode \tWhen optimizing for size (using -Os), prologues and " + "epilogues that have to save or restore a large number of registers are often " + "shortened by using call to a special function in libgcc\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoMpy({"-mno-mpy"}, + " -mno-mpy \tDo not generate mpy-family instructions for ARC700. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoOpts({"-mno-opts"}, + " -mno-opts \tDisables all the optional instructions enabled " + "by -mall-opts.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoPic({"-mno-pic"}, + " -mno-pic \tGenerate code that does not use a global pointer register. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoPostinc({"-mno-postinc"}, + " -mno-postinc \tCode generation tweaks that disable, respectively, " + "splitting of 32-bit loads, generation of post-increment addresses, and generation " + "of post-modify addresses.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoPostmodify({"-mno-postmodify"}, + " -mno-postmodify \tCode generation tweaks that disable, respectively, " + "splitting of 32-bit loads, generation of post-increment addresses, and generation " + "of post-modify addresses. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoRedZone({"-mno-red-zone"}, + " -mno-red-zone \tDo not use a so-called “red zone” for x86-64 code.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoRoundNearest({"-mno-round-nearest"}, + " -mno-round-nearest \tMake the scheduler assume that the rounding " + "mode has been set to truncating.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoSchedProlog({"-mno-sched-prolog"}, + " -mno-sched-prolog \tPrevent the reordering of instructions in the " + "function prologue, or the merging of those instruction with the instructions in the " + "function's body. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoSideEffects({"-mno-side-effects"}, + " -mno-side-effects \tDo not emit instructions with side effects in " + "addressing modes other than post-increment.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoSoftCmpsf({"-mno-soft-cmpsf"}, + " -mno-soft-cmpsf \tFor single-precision floating-point comparisons, emit" + " an fsub instruction and test the flags. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoSpaceRegs({"-mno-space-regs"}, + " -mno-space-regs \tGenerate code that assumes the target has no " + "space registers.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMSpe({"-mspe"}, + " -mspe \tThis switch enables the generation of SPE simd instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-spe")); + +maplecl::Option oMnoSumInToc({"-mno-sum-in-toc"}, + " -mno-sum-in-toc \tModify generation of the TOC (Table Of Contents), " + "which is created for every executable file. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoVectDouble({"-mnovect-double"}, + " -mno-vect-double \tChange the preferred SIMD mode to SImode. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnobitfield({"-mnobitfield"}, + " -mnobitfield \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnodiv({"-mnodiv"}, + " -mnodiv \tDo not use div and mod instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnoliw({"-mnoliw"}, + " -mnoliw \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnomacsave({"-mnomacsave"}, + " -mnomacsave \tMark the MAC register as call-clobbered, even if " + "-mrenesas is given.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnopFunDllimport({"-mnop-fun-dllimport"}, + " -mnop-fun-dllimport \tThis option is available for Cygwin and " + "MinGW targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnopMcount({"-mnop-mcount"}, + " -mnop-mcount \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnops({"-mnops"}, + " -mnops \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnorm({"-mnorm"}, + " -mnorm \tGenerate norm instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnosetlb({"-mnosetlb"}, + " -mnosetlb \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMnosplitLohi({"-mnosplit-lohi"}, + " -mnosplit-lohi \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oModdSpreg({"-modd-spreg"}, + " -modd-spreg \tEnable the use of odd-numbered single-precision " + "floating-point registers for the o32 ABI.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-odd-spreg")); + +maplecl::Option oMoneByteBool({"-mone-byte-bool"}, + " -mone-byte-bool \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMoptimize({"-moptimize"}, + " -moptimize \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMoptimizeMembar({"-moptimize-membar"}, + " -moptimize-membar \tThis switch removes redundant membar instructions " + "from the compiler-generated code. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-optimize-membar")); + +maplecl::Option oMoverrideE({"-moverride="}, + " -moverride= \tPower users only! Override CPU optimization parameters.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMP({"-MP"}, + " -MP \tGenerate phony targets for all headers.\n", + {driverCategory, clangCategory}); + +maplecl::Option oMpaRisc10({"-mpa-risc-1-0"}, + " -mpa-risc-1-0 \tSynonyms for -march=1.0 respectively.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpaRisc11({"-mpa-risc-1-1"}, + " -mpa-risc-1-1 \tSynonyms for -march=1.1 respectively.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpaRisc20({"-mpa-risc-2-0"}, + " -mpa-risc-2-0 \tSynonyms for -march=2.0 respectively.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpack({"-mpack"}, + " -mpack \tPack VLIW instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-pack")); + +maplecl::Option oMpackedStack({"-mpacked-stack"}, + " -mpacked-stack \tUse the packed stack layout. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-packed-stack")); + +maplecl::Option oMpadstruct({"-mpadstruct"}, + " -mpadstruct \tThis option is deprecated.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpaired({"-mpaired"}, + " -mpaired \tThis switch enables or disables the generation of PAIRED " + "simd instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-paired")); + +maplecl::Option oMpairedSingle({"-mpaired-single"}, + " -mpaired-single \tUse paired-single floating-point instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-paired-single")); + +maplecl::Option oMpcRelativeLiteralLoads({"-mpc-relative-literal-loads"}, + " -mpc-relative-literal-loads \tPC relative literal loads.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpc32({"-mpc32"}, + " -mpc32 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpc64({"-mpc64"}, + " -mpc64 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpc80({"-mpc80"}, + " -mpc80 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpclmul({"-mpclmul"}, + " -mpclmul \tThese switches enable the use of instructions in " + "the mpclmul.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpcrel({"-mpcrel"}, + " -mpcrel \tUse the pc-relative addressing mode of the 68000 directly, " + "instead of using a global offset table.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpdebug({"-mpdebug"}, + " -mpdebug \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpe({"-mpe"}, + " -mpe \tSupport IBM RS/6000 SP Parallel Environment (PE). \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpeAlignedCommons({"-mpe-aligned-commons"}, + " -mpe-aligned-commons \tThis option is available for Cygwin and " + "MinGW targets.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMperfExt({"-mperf-ext"}, + " -mperf-ext \tGenerate performance extension instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-perf-ext")); + +maplecl::Option oMpicDataIsTextRelative({"-mpic-data-is-text-relative"}, + " -mpic-data-is-text-relative \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpicRegister({"-mpic-register"}, + " -mpic-register \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpid({"-mpid"}, + " -mpid \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-pid")); + +maplecl::Option oMpku({"-mpku"}, + " -mpku \tThese switches enable the use of instructions in the mpku.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpointerSizeSize({"-mpointer-size=size"}, + " -mpointer-size=size \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpointersToNestedFunctions({"-mpointers-to-nested-functions"}, + " -mpointers-to-nested-functions \tGenerate (do not generate) code to load" + " up the static chain register (r11) when calling through a pointer on AIX and 64-bit " + "Linux systems where a function pointer points to a 3-word descriptor giving the " + "function address, TOC value to be loaded in register r2, and static chain value to " + "be loaded in register r11. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpokeFunctionName({"-mpoke-function-name"}, + " -mpoke-function-name \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpopc({"-mpopc"}, + " -mpopc \tWith -mpopc, Maple generates code that takes advantage of " + "the UltraSPARC Population Count instruction.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-popc")); + +maplecl::Option oMpopcnt({"-mpopcnt"}, + " -mpopcnt \tThese switches enable the use of instructions in the " + "mpopcnt.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpopcntb({"-mpopcntb"}, + " -mpopcntb \tSpecify which instructions are available on " + "the processor you are using. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-popcntb")); + +maplecl::Option oMpopcntd({"-mpopcntd"}, + " -mpopcntd \tSpecify which instructions are available on " + "the processor you are using. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-popcntd")); + +maplecl::Option oMportableRuntime({"-mportable-runtime"}, + " -mportable-runtime \tUse the portable calling conventions proposed by " + "HP for ELF systems.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpower8Fusion({"-mpower8-fusion"}, + " -mpower8-fusion \tGenerate code that keeps some integer operations " + "adjacent so that the instructions can be fused together on power8 and later " + "processors.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-power8-fusion")); + +maplecl::Option oMpower8Vector({"-mpower8-vector"}, + " -mpower8-vector \tGenerate code that uses the vector and scalar " + "instructions that were added in version 2.07 of the PowerPC ISA.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-power8-vector")); + +maplecl::Option oMpowerpcGfxopt({"-mpowerpc-gfxopt"}, + " -mpowerpc-gfxopt \tSpecify which instructions are available on the" + " processor you are using\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-powerpc-gfxopt")); + +maplecl::Option oMpowerpcGpopt({"-mpowerpc-gpopt"}, + " -mpowerpc-gpopt \tSpecify which instructions are available on the" + " processor you are using\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-powerpc-gpopt")); + +maplecl::Option oMpowerpc64({"-mpowerpc64"}, + " -mpowerpc64 \tSpecify which instructions are available on the" + " processor you are using\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-powerpc64")); + +maplecl::Option oMpreferAvx128({"-mprefer-avx128"}, + " -mprefer-avx128 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpreferShortInsnRegs({"-mprefer-short-insn-regs"}, + " -mprefer-short-insn-regs \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMprefergot({"-mprefergot"}, + " -mprefergot \tWhen generating position-independent code, " + "emit function calls using the Global Offset Table instead of the Procedure " + "Linkage Table.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpreferredStackBoundary({"-mpreferred-stack-boundary"}, + " -mpreferred-stack-boundary \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMprefetchwt1({"-mprefetchwt1"}, + " -mprefetchwt1 \tThese switches enable the use of instructions in " + "the mprefetchwt1.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpretendCmove({"-mpretend-cmove"}, + " -mpretend-cmove \tPrefer zero-displacement conditional branches " + "for conditional move instruction patterns.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMprintTuneInfo({"-mprint-tune-info"}, + " -mprint-tune-info \tPrint CPU tuning information as comment " + "in assembler file.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMprioritizeRestrictedInsns({"-mprioritize-restricted-insns"}, + " -mprioritize-restricted-insns \tThis option controls the priority that " + "is assigned to dispatch-slot restricted instructions during the second scheduling " + "pass. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMprologFunction({"-mprolog-function"}, + " -mprolog-function \tDo use external functions to save and restore " + "registers at the prologue and epilogue of a function.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-prolog-function")); + +maplecl::Option oMprologueEpilogue({"-mprologue-epilogue"}, + " -mprologue-epilogue \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-prologue-epilogue")); + +maplecl::Option oMprototype({"-mprototype"}, + " -mprototype \tOn System V.4 and embedded PowerPC systems assume " + "that all calls to variable argument functions are properly prototyped. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-prototype")); + +maplecl::Option oMpureCode({"-mpure-code"}, + " -mpure-code \tDo not allow constant data to be placed in " + "code sections.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMpushArgs({"-mpush-args"}, + " -mpush-args \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-push-args")); + +maplecl::Option oMQ({"-MQ"}, + " -MQ \t-MQ o Add a MAKE-quoted target.\n", + {driverCategory, clangCategory}, maplecl::joinedValue); + +maplecl::Option oMqClass({"-mq-class"}, + " -mq-class \tEnable 'q' instruction alternatives.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMquadMemory({"-mquad-memory"}, + " -mquad-memory \tGenerate code that uses the non-atomic quad word memory " + "instructions. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-quad-memory")); + +maplecl::Option oMquadMemoryAtomic({"-mquad-memory-atomic"}, + " -mquad-memory-atomic \tGenerate code that uses the " + "atomic quad word memory instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-quad-memory-atomic")); + +maplecl::Option oMr10kCacheBarrier({"-mr10k-cache-barrier"}, + " -mr10k-cache-barrier \tSpecify whether Maple should insert cache barriers" + " to avoid the side-effects of speculation on R10K processors.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMRcq({"-mRcq"}, + " -mRcq \tEnable 'Rcq' constraint handling. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMRcw({"-mRcw"}, + " -mRcw \tEnable 'Rcw' constraint handling. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrdrnd({"-mrdrnd"}, + " -mrdrnd \tThese switches enable the use of instructions in the mrdrnd.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMreadonlyInSdata({"-mreadonly-in-sdata"}, + " -mreadonly-in-sdata \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-readonly-in-sdata")); + +maplecl::Option oMrecip({"-mrecip"}, + " -mrecip \tThis option enables use of the reciprocal estimate and " + "reciprocal square root estimate instructions with additional Newton-Raphson steps " + "to increase precision instead of doing a divide or square root and divide for " + "floating-point arguments. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrecipPrecision({"-mrecip-precision"}, + " -mrecip-precision \tAssume (do not assume) that the reciprocal estimate " + "instructions provide higher-precision estimates than is mandated by the PowerPC" + " ABI. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrecipE({"-mrecip="}, + " -mrecip= \tThis option controls which reciprocal estimate instructions " + "may be used\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrecordMcount({"-mrecord-mcount"}, + " -mrecord-mcount \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMreducedRegs({"-mreduced-regs"}, + " -mreduced-regs \tUse reduced-set registers for register allocation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMregisterNames({"-mregister-names"}, + " -mregister-names \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-register-names")); + +maplecl::Option oMregnames({"-mregnames"}, + " -mregnames \tOn System V.4 and embedded PowerPC systems do emit " + "register names in the assembly language output using symbolic forms.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-regnames")); + +maplecl::Option oMregparm({"-mregparm"}, + " -mregparm \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrelax({"-mrelax"}, + " -mrelax \tGuide linker to relax instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-relax")); + +maplecl::Option oMrelaxImmediate({"-mrelax-immediate"}, + " -mrelax-immediate \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-relax-immediate")); + +maplecl::Option oMrelaxPicCalls({"-mrelax-pic-calls"}, + " -mrelax-pic-calls \tTry to turn PIC calls that are normally " + "dispatched via register $25 into direct calls.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrelocatable({"-mrelocatable"}, + " -mrelocatable \tGenerate code that allows a static executable to be " + "relocated to a different address at run time. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-relocatable")); + +maplecl::Option oMrelocatableLib({"-mrelocatable-lib"}, + " -mrelocatable-lib \tGenerates a .fixup section to allow static " + "executables to be relocated at run time, but -mrelocatable-lib does not use the " + "smaller stack alignment of -mrelocatable.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-relocatable-lib")); + +maplecl::Option oMrenesas({"-mrenesas"}, + " -mrenesas \tComply with the calling conventions defined by Renesas.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-renesas")); + +maplecl::Option oMrepeat({"-mrepeat"}, + " -mrepeat \tEnables the repeat and erepeat instructions, " + "used for low-overhead looping.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrestrictIt({"-mrestrict-it"}, + " -mrestrict-it \tRestricts generation of IT blocks to " + "conform to the rules of ARMv8.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMreturnPointerOnD0({"-mreturn-pointer-on-d0"}, + " -mreturn-pointer-on-d0 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrh850Abi({"-mrh850-abi"}, + " -mrh850-abi \tEnables support for the RH850 version of the V850 ABI.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrl78({"-mrl78"}, + " -mrl78 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrmw({"-mrmw"}, + " -mrmw \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrtd({"-mrtd"}, + " -mrtd \tUse a different function-calling convention, in which " + "functions that take a fixed number of arguments return with the rtd instruction, " + "which pops their arguments while returning. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-rtd")); + +maplecl::Option oMrtm({"-mrtm"}, + " -mrtm \tThese switches enable the use of instructions in the mrtm.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrtp({"-mrtp"}, + " -mrtp \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMrtsc({"-mrtsc"}, + " -mrtsc \tPassed down to the assembler to enable the 64-bit time-stamp " + "counter extension instruction. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMs({"-ms"}, + " -ms \tCauses all variables to default to the .tiny section.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMs2600({"-ms2600"}, + " -ms2600 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsafeDma({"-msafe-dma"}, + " -msafe-dma \ttell the compiler to treat the DMA instructions as " + "potentially affecting all memory.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-munsafe-dma")); + +maplecl::Option oMsafeHints({"-msafe-hints"}, + " -msafe-hints \tWork around a hardware bug that causes the SPU to " + "stall indefinitely. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsahf({"-msahf"}, + " -msahf \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsatur({"-msatur"}, + " -msatur \tEnables the saturation instructions. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsaveAccInInterrupts({"-msave-acc-in-interrupts"}, + " -msave-acc-in-interrupts \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsaveMducInInterrupts({"-msave-mduc-in-interrupts"}, + " -msave-mduc-in-interrupts \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-save-mduc-in-interrupts")); + +maplecl::Option oMsaveRestore({"-msave-restore"}, + " -msave-restore \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsaveTocIndirect({"-msave-toc-indirect"}, + " -msave-toc-indirect \tGenerate code to save the TOC value in the " + "reserved stack location in the function prologue if the function calls through " + "a pointer on AIX and 64-bit Linux systems. I\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMscc({"-mscc"}, + " -mscc \tEnable the use of conditional set instructions (default).\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-scc")); + +maplecl::Option oMschedArDataSpec({"-msched-ar-data-spec"}, + " -msched-ar-data-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-ar-data-spec")); + +maplecl::Option oMschedArInDataSpec({"-msched-ar-in-data-spec"}, + " -msched-ar-in-data-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-ar-in-data-spec")); + +maplecl::Option oMschedBrDataSpec({"-msched-br-data-spec"}, + " -msched-br-data-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-br-data-spec")); + +maplecl::Option oMschedBrInDataSpec({"-msched-br-in-data-spec"}, + " -msched-br-in-data-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-br-in-data-spec")); + +maplecl::Option oMschedControlSpec({"-msched-control-spec"}, + " -msched-control-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-control-spec")); + +maplecl::Option oMschedCostlyDep({"-msched-costly-dep"}, + " -msched-costly-dep \tThis option controls which dependences are " + "considered costly by the target during instruction scheduling.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedCountSpecInCriticalPath({"-msched-count-spec-in-critical-path"}, + " -msched-count-spec-in-critical-path \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-count-spec-in-critical-path")); + +maplecl::Option oMschedFpMemDepsZeroCost({"-msched-fp-mem-deps-zero-cost"}, + " -msched-fp-mem-deps-zero-cost \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedInControlSpec({"-msched-in-control-spec"}, + " -msched-in-control-spec \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-in-control-spec")); + +maplecl::Option oMschedMaxMemoryInsns({"-msched-max-memory-insns"}, + " -msched-max-memory-insns \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedMaxMemoryInsnsHardLimit({"-msched-max-memory-insns-hard-limit"}, + " -msched-max-memory-insns-hard-limit \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedPreferNonControlSpecInsns({"-msched-prefer-non-control-spec-insns"}, + " -msched-prefer-non-control-spec-insns \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-prefer-non-control-spec-insns")); + +maplecl::Option oMschedPreferNonDataSpecInsns({"-msched-prefer-non-data-spec-insns"}, + " -msched-prefer-non-data-spec-insns \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sched-prefer-non-data-spec-insns")); + +maplecl::Option oMschedSpecLdc({"-msched-spec-ldc"}, + " -msched-spec-ldc \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedStopBitsAfterEveryCycle({"-msched-stop-bits-after-every-cycle"}, + " -msched-stop-bits-after-every-cycle \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMschedule({"-mschedule"}, + " -mschedule \tSchedule code according to the constraints for " + "the machine type cpu-type. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMscore5({"-mscore5"}, + " -mscore5 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMscore5u({"-mscore5u"}, + " -mscore5u \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMscore7({"-mscore7"}, + " -mscore7 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMscore7d({"-mscore7d"}, + " -mscore7d \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsda({"-msda"}, + " -msda \tPut static or global variables whose size is n bytes or less " + "into the small data area that register gp points to.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdata({"-msdata"}, + " -msdata \tOn System V.4 and embedded PowerPC systems, if -meabi is " + "used, compile code the same as -msdata=eabi, otherwise compile code the same as " + "-msdata=sysv.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sdata")); + +maplecl::Option oMsdataAll({"-msdata=all"}, + " -msdata=all \tPut all data, not just small objects, into the sections " + "reserved for small data, and use addressing relative to the B14 register to " + "access them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataData({"-msdata=data"}, + " -msdata=data \tOn System V.4 and embedded PowerPC systems, " + "put small global data in the .sdata section.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataDefault({"-msdata=default"}, + " -msdata=default \tOn System V.4 and embedded PowerPC systems, if -meabi " + "is used, compile code the same as -msdata=eabi, otherwise compile code the same as " + "-msdata=sysv.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataEabi({"-msdata=eabi"}, + " -msdata=eabi \tOn System V.4 and embedded PowerPC systems, put small " + "initialized const global and static data in the .sdata2 section, which is pointed to " + "by register r2. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataNone({"-msdata=none"}, + " -msdata=none \tOn embedded PowerPC systems, put all initialized global " + "and static data in the .data section, and all uninitialized data in the .bss" + " section.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataSdata({"-msdata=sdata"}, + " -msdata=sdata \tPut small global and static data in the small data " + "area, but do not generate special code to reference them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataSysv({"-msdata=sysv"}, + " -msdata=sysv \tOn System V.4 and embedded PowerPC systems, put small " + "global and static data in the .sdata section, which is pointed to by register r13.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdataUse({"-msdata=use"}, + " -msdata=use \tPut small global and static data in the small data area, " + "and generate special instructions to reference them.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsdram({"-msdram"}, + " -msdram \tLink the SDRAM-based runtime instead of the default " + "ROM-based runtime.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsecurePlt({"-msecure-plt"}, + " -msecure-plt \tenerate code that allows ld and ld.so to build " + "executables and shared libraries with non-executable .plt and .got sections. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMselSchedDontCheckControlSpec({"-msel-sched-dont-check-control-spec"}, + " -msel-sched-dont-check-control-spec \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsepData({"-msep-data"}, + " -msep-data \tGenerate code that allows the data segment to be " + "located in a different area of memory from the text segment.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sep-data")); + +maplecl::Option oMserializeVolatile({"-mserialize-volatile"}, + " -mserialize-volatile \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-serialize-volatile")); + +maplecl::Option oMsetlb({"-msetlb"}, + " -msetlb \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsha({"-msha"}, + " -msha \tThese switches enable the use of instructions in the msha.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsharedLibraryId({"-mshared-library-id"}, + " -mshared-library-id \tSpecifies the identification number of the " + "ID-based shared library being compiled.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMshort({"-mshort"}, + " -mshort \tConsider type int to be 16 bits wide, like short int.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-short")); + +maplecl::Option oMsignExtendEnabled({"-msign-extend-enabled"}, + " -msign-extend-enabled \tEnable sign extend instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsignReturnAddress({"-msign-return-address"}, + " -msign-return-address \tSelect return address signing scope.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsiliconErrata({"-msilicon-errata"}, + " -msilicon-errata \tThis option passes on a request to assembler to " + "enable the fixes for the named silicon errata.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsiliconErrataWarn({"-msilicon-errata-warn"}, + " -msilicon-errata-warn \tThis option passes on a request to the " + "assembler to enable warning messages when a silicon errata might need to be applied.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsim({"-msim"}, + " -msim \tLink the simulator run-time libraries.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sim")); + +maplecl::Option oMsimd({"-msimd"}, + " -msimd \tEnable generation of ARC SIMD instructions via target-specific " + "builtins. Only valid for -mcpu=ARC700.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsimnovec({"-msimnovec"}, + " -msimnovec \tLink the simulator runtime libraries, excluding " + "built-in support for reset and exception vectors and tables.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsimpleFpu({"-msimple-fpu"}, + " -msimple-fpu \tDo not generate sqrt and div instructions for " + "hardware floating-point unit.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsingleExit({"-msingle-exit"}, + " -msingle-exit \tForce generated code to have a single exit " + "point in each function.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-single-exit")); + +maplecl::Option oMsingleFloat({"-msingle-float"}, + " -msingle-float \tGenerate code for single-precision floating-point " + "operations. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsinglePicBase({"-msingle-pic-base"}, + " -msingle-pic-base \tTreat the register used for PIC addressing as " + "read-only, rather than loading it in the prologue for each function.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsio({"-msio"}, + " -msio \tGenerate the predefine, _SIO, for server IO. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsizeLevel({"-msize-level"}, + " -msize-level \tFine-tune size optimization with regards to " + "instruction lengths and alignment. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMskipRaxSetup({"-mskip-rax-setup"}, + " -mskip-rax-setup \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMslowBytes({"-mslow-bytes"}, + " -mslow-bytes \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-slow-bytes")); + +maplecl::Option oMslowFlashData({"-mslow-flash-data"}, + " -mslow-flash-data \tAssume loading data from flash is slower " + "than fetching instruction.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmall({"-msmall"}, + " -msmall \tUse small-model addressing (16-bit pointers, 16-bit size_t).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallData({"-msmall-data"}, + " -msmall-data \tWhen -msmall-data is used, objects 8 bytes long or " + "smaller are placed in a small data area (the .sdata and .sbss sections) and are " + "accessed via 16-bit relocations off of the $gp register. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallDataLimit({"-msmall-data-limit"}, + " -msmall-data-limit \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallDivides({"-msmall-divides"}, + " -msmall-divides \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallExec({"-msmall-exec"}, + " -msmall-exec \tGenerate code using the bras instruction to do " + "subroutine calls. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-small-exec")); + +maplecl::Option oMsmallMem({"-msmall-mem"}, + " -msmall-mem \tBy default, Maple generates code assuming that addresses " + "are never larger than 18 bits.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallModel({"-msmall-model"}, + " -msmall-model \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallText({"-msmall-text"}, + " -msmall-text \tWhen -msmall-text is used, the compiler assumes that " + "the code of the entire program (or shared library) fits in 4MB, and is thus reachable " + "with a branch instruction. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmall16({"-msmall16"}, + " -msmall16 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmallc({"-msmallc"}, + " -msmallc \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsmartmips({"-msmartmips"}, + " -msmartmips \tUse the MIPS SmartMIPS ASE.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-smartmips")); + +maplecl::Option oMsoftFloat({"-msoft-float"}, + " -msoft-float \tThis option ignored; it is provided for compatibility " + "purposes only. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-soft-float")); + +maplecl::Option oMsoftQuadFloat({"-msoft-quad-float"}, + " -msoft-quad-float \tGenerate output containing library calls for " + "quad-word (long double) floating-point instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsoftStack({"-msoft-stack"}, + " -msoft-stack \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsp8({"-msp8"}, + " -msp8 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspace({"-mspace"}, + " -mspace \tTry to make the code as small as possible.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspe({"-mspe="}, + " -mspe= \tThis option has been deprecated. Use -mspe and -mno-spe " + "instead.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspecldAnomaly({"-mspecld-anomaly"}, + " -mspecld-anomaly \tWhen enabled, the compiler ensures that the " + "generated code does not contain speculative loads after jump instructions.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-specld-anomaly")); + +maplecl::Option oMspfp({"-mspfp"}, + " -mspfp \tGenerate single-precision FPX instructions, tuned " + "for the compact implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspfpCompact({"-mspfp-compact"}, + " -mspfp-compact \tGenerate single-precision FPX instructions, " + "tuned for the compact implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspfpFast({"-mspfp-fast"}, + " -mspfp-fast \tGenerate single-precision FPX instructions, " + "tuned for the fast implementation.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspfp_compact({"-mspfp_compact"}, + " -mspfp_compact \tReplaced by -mspfp-compact.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMspfp_fast({"-mspfp_fast"}, + " -mspfp_fast \tReplaced by -mspfp-fast.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsplitAddresses({"-msplit-addresses"}, + " -msplit-addresses \tEnable (disable) use of the %hi() and %lo() " + "assembler relocation operators.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-split-addresses")); + +maplecl::Option oMsplitVecmoveEarly({"-msplit-vecmove-early"}, + " -msplit-vecmove-early \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse({"-msse"}, + " -msse \tThese switches enable the use of instructions in the msse\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse2({"-msse2"}, + " -msse2 \tThese switches enable the use of instructions in the msse2\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse2avx({"-msse2avx"}, + " -msse2avx \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse3({"-msse3"}, + " -msse3 \tThese switches enable the use of instructions in the msse2avx\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse4({"-msse4"}, + " -msse4 \tThese switches enable the use of instructions in the msse4\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse41({"-msse4.1"}, + " -msse4.1 \tThese switches enable the use of instructions in the " + "msse4.1\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse42({"-msse4.2"}, + " -msse4.2 \tThese switches enable the use of instructions in the " + "msse4.2\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsse4a({"-msse4a"}, + " -msse4a \tThese switches enable the use of instructions in the msse4a\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsseregparm({"-msseregparm"}, + " -msseregparm \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMssse3({"-mssse3"}, + " -mssse3 \tThese switches enable the use of instructions in the mssse3\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackAlign({"-mstack-align"}, + " -mstack-align \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-stack-align")); + +maplecl::Option oMstackBias({"-mstack-bias"}, + " -mstack-bias \tWith -mstack-bias, GCC assumes that the stack pointer, " + "and frame pointer if present, are offset by -2047 which must be added back when making" + " stack frame references.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-stack-bias")); + +maplecl::Option oMstackCheckL1({"-mstack-check-l1"}, + " -mstack-check-l1 \tDo stack checking using information placed into L1 " + "scratchpad memory by the uClinux kernel.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackGuard({"-mstack-guard"}, + " -mstack-guard \tThe S/390 back end emits additional instructions in the " + "function prologue that trigger a trap if the stack size is stack-guard bytes above " + "the stack-size (remember that the stack on S/390 grows downward). \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackIncrement({"-mstack-increment"}, + " -mstack-increment \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackOffset({"-mstack-offset"}, + " -mstack-offset \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackProtectorGuard({"-mstack-protector-guard"}, + " -mstack-protector-guard \tGenerate stack protection code using canary at " + "guard.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackProtectorGuardOffset({"-mstack-protector-guard-offset"}, + " -mstack-protector-guard-offset \tWith the latter choice the options " + "-mstack-protector-guard-reg=reg and -mstack-protector-guard-offset=offset furthermore " + "specify which register to use as base register for reading the canary, and from what " + "offset from that base register. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackProtectorGuardReg({"-mstack-protector-guard-reg"}, + " -mstack-protector-guard-reg \tWith the latter choice the options " + "-mstack-protector-guard-reg=reg and -mstack-protector-guard-offset=offset furthermore " + "specify which register to use as base register for reading the canary, and from what " + "offset from that base register. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackSize({"-mstack-size"}, + " -mstack-size \tThe S/390 back end emits additional instructions in the " + "function prologue that trigger a trap if the stack size is stack-guard bytes above " + "the stack-size (remember that the stack on S/390 grows downward). \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstackrealign({"-mstackrealign"}, + " -mstackrealign \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstdStructReturn({"-mstd-struct-return"}, + " -mstd-struct-return \tWith -mstd-struct-return, the compiler generates " + "checking code in functions returning structures or unions to detect size mismatches " + "between the two sides of function calls, as per the 32-bit ABI.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-std-struct-return")); + +maplecl::Option oMstdmain({"-mstdmain"}, + " -mstdmain \tWith -mstdmain, Maple links your program against startup code" + " that assumes a C99-style interface to main, including a local copy of argv strings.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstrictAlign({"-mstrict-align"}, + " -mstrict-align \tDon't assume that unaligned accesses are handled " + "by the system.\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-strict-align")); + +maplecl::Option oMstrictX({"-mstrict-X"}, + " -mstrict-X \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstring({"-mstring"}, + " -mstring \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-string")); + +maplecl::Option oMstringopStrategyAlg({"-mstringop-strategy=alg"}, + " -mstringop-strategy=alg \tOverride the internal decision heuristic for " + "the particular algorithm to use for inlining string operations. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMstructureSizeBoundary({"-mstructure-size-boundary"}, + " -mstructure-size-boundary \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsubxc({"-msubxc"}, + " -msubxc \tWith -msubxc, Maple generates code that takes advantage of " + "the UltraSPARC Subtract-Extended-with-Carry instruction. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-subxc")); + +maplecl::Option oMsvMode({"-msv-mode"}, + " -msv-mode \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsvr4StructReturn({"-msvr4-struct-return"}, + " -msvr4-struct-return \tReturn structures smaller than 8 bytes in " + "registers (as specified by the SVR4 ABI).\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMswap({"-mswap"}, + " -mswap \tGenerate swap instructions.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMswape({"-mswape"}, + " -mswape \tPassed down to the assembler to enable the swap byte " + "ordering extension instruction. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsym32({"-msym32"}, + " -msym32 \tAssume that all symbols have 32-bit values, " + "regardless of the selected ABI. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-sym32")); + +maplecl::Option oMsynci({"-msynci"}, + " -msynci \tEnable (disable) generation of synci instructions on " + "architectures that support it. \n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("-mno-synci")); + +maplecl::Option oMsysCrt0({"-msys-crt0"}, + " -msys-crt0 \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMsysLib({"-msys-lib"}, + " -msys-lib \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtargetAlign({"-mtarget-align"}, + " -mtarget-align \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-target-align")); + +maplecl::Option oMtas({"-mtas"}, + " -mtas \tGenerate the tas.b opcode for __atomic_test_and_set. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtbm({"-mtbm"}, + " -mtbm \tThese switches enable the use of instructions in the mtbm.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtda({"-mtda"}, + " -mtda \tPut static or global variables whose size is n bytes or less " + "into the tiny data area that register ep points to.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtelephony({"-mtelephony"}, + " -mtelephony \tPassed down to the assembler to enable dual- and " + "single-operand instructions for telephony. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtextSectionLiterals({"-mtext-section-literals"}, + " -mtext-section-literals \t\n", + {driverCategory, unSupCategory}, + maplecl::DisableWith("--mno-text-section-literals")); + +maplecl::Option oMtf({"-mtf"}, + " -mtf \tCauses all functions to default to the .far section.\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMthread({"-mthread"}, + " -mthread \tThis option is available for MinGW targets. \n", + {driverCategory, unSupCategory}); + +maplecl::Option oMthreads({"-mthreads"}, + " -mthreads \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMthumb({"-mthumb"}, + " -mthumb \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMthumbInterwork({"-mthumb-interwork"}, + " -mthumb-interwork \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtinyStack({"-mtiny-stack"}, + " -mtiny-stack \t\n", + {driverCategory, unSupCategory}); + +maplecl::Option oMtiny({"-mtiny"}, + " -mtiny \tVariables that are n bytes or smaller are allocated to " + "the .tiny section.\n", + {driverCategory, unSupCategory}); } diff --git a/src/mapleall/maple_ipa/include/ipa_collect.h b/src/mapleall/maple_ipa/include/ipa_collect.h index 1c0e6c1eb046b2355882c5e5d6c86b6991273108..d12f2ed386285f6445aaf87c8e8fb0f94b57d107 100644 --- a/src/mapleall/maple_ipa/include/ipa_collect.h +++ b/src/mapleall/maple_ipa/include/ipa_collect.h @@ -62,12 +62,12 @@ class CollectIpaInfo { bool CollectBrImportantExpression(const MeStmt &meStmt, uint32 &index) const; void TransformStmtToIntegerSeries(MeStmt &meStmt); DefUsePositions &GetDefUsePositions(OriginalSt &ost, StmtInfoId position); - void CollectDefUsePosition(ScalarMeExpr &var, StmtInfoId position, + void CollectDefUsePosition(ScalarMeExpr &scalar, StmtInfoId position, std::unordered_set &cycleCheck); void CollectJumpInfo(MeStmt &meStmt); void SetLabel(size_t currStmtInfoId, LabelIdx label); StmtInfoId GetRealFirstStmtInfoId(BB &bb); - void TraverseMeExpr(MeExpr &meExpr, StmtInfoId position, + void TraverseMeExpr(MeExpr &meExpr, StmtInfoId stmtInfoId, std::unordered_set &cycleCheck); void TraverseMeStmt(MeStmt &meStmt); bool CollectSwitchImportantExpression(const MeStmt &meStmt, uint32 &index) const; @@ -94,7 +94,7 @@ class CollectIpaInfo { return ++currNewStmtIndex; } - uint GetTotalStmtInfoCount() { + uint GetTotalStmtInfoCount() const { return currNewStmtIndex; } diff --git a/src/mapleall/maple_ipa/include/ipa_phase_manager.h b/src/mapleall/maple_ipa/include/ipa_phase_manager.h index 9c3d61f68af5b9b76b33871cd6a87c220728b98a..25fd4ba8a5f6d3ffa68f65bb03ae7d91f876d129 100644 --- a/src/mapleall/maple_ipa/include/ipa_phase_manager.h +++ b/src/mapleall/maple_ipa/include/ipa_phase_manager.h @@ -30,7 +30,9 @@ class IpaSccPM : public SccPM { void Init(MIRModule &m); bool PhaseRun(MIRModule &m) override; PHASECONSTRUCTOR(IpaSccPM); - ~IpaSccPM() override {} + ~IpaSccPM() override { + ipaInfo = nullptr; + } std::string PhaseName() const override; CollectIpaInfo *GetResult() { return ipaInfo; diff --git a/src/mapleall/maple_ipa/include/ipa_side_effect.h b/src/mapleall/maple_ipa/include/ipa_side_effect.h index 079528d6acb9775b3c7880b2ccc857b88becc085..42a53a5e9a6ab726eeffec22beef02fabae367c1 100644 --- a/src/mapleall/maple_ipa/include/ipa_side_effect.h +++ b/src/mapleall/maple_ipa/include/ipa_side_effect.h @@ -28,6 +28,7 @@ class SideEffect { vstsValueAliasWithFormal.resize(std::min(meFunc->GetMirFunc()->GetFormalCount(), kMaxParamCount)); } ~SideEffect() { + callGraph = nullptr; alias = nullptr; dom = nullptr; meFunc = nullptr; @@ -54,6 +55,7 @@ class SideEffect { void SolveVarArgs(MeFunction &f) const; void CollectFormalOst(MeFunction &f); void CollectAllLevelOst(size_t vstIdx, std::set &result); + void FilterComplicatedPrametersForNoGlobalAccess(MeFunction &f); std::set> analysisLater; std::vector> vstsValueAliasWithFormal; diff --git a/src/mapleall/maple_ipa/include/old/ea_connection_graph.h b/src/mapleall/maple_ipa/include/old/ea_connection_graph.h index 6a4de2f388f11d13c0045bf655c03b8ef207b7ab..e1b346eb06711252f031086bb45b370cfaf57d3c 100644 --- a/src/mapleall/maple_ipa/include/old/ea_connection_graph.h +++ b/src/mapleall/maple_ipa/include/old/ea_connection_graph.h @@ -406,7 +406,7 @@ class EACGPointerNode : public EACGBaseNode { EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, int indirectL) : EACGBaseNode(md, alloc, kPointerNode, ec, expr, initialEas, i), indirectLevel(indirectL) {}; - ~EACGPointerNode() = default; + ~EACGPointerNode() override = default; void SetLocation(Location *loc) { this->locInfo = loc; @@ -474,7 +474,7 @@ class EACGObjectNode : public EACGBaseNode { (void)pointsBy.insert(this); (void)pointsTo.insert(this); }; - ~EACGObjectNode() = default; + ~EACGObjectNode() override = default; bool IsPhantom() const { return isPhantom; }; @@ -559,7 +559,7 @@ class EACGRefNode : public EACGBaseNode { isStaticField(isS), sym(nullptr), version(0) {}; - ~EACGRefNode() = default; + ~EACGRefNode() override = default; bool IsStaticRef() const { return isStaticField; }; @@ -606,7 +606,7 @@ class EACGFieldNode : public EACGBaseNode { (void)belongsTo.insert(bt); }; - ~EACGFieldNode() = default; + ~EACGFieldNode() override = default; FieldID GetFieldID() const { return fieldID; @@ -665,7 +665,7 @@ class EACGActualNode : public EACGBaseNode { isPhantom(isPh), argIdx(aI), callSiteInfo(callSite) {}; - ~EACGActualNode() = default; + ~EACGActualNode() override = default; bool IsReturn() const { return isReturn; diff --git a/src/mapleall/maple_ipa/include/region_identify.h b/src/mapleall/maple_ipa/include/region_identify.h index 58ff3dd572441d227e2879c132d0b16cb044f429..c7eff637c389b227b04306791bb99abfef7cbbb7 100644 --- a/src/mapleall/maple_ipa/include/region_identify.h +++ b/src/mapleall/maple_ipa/include/region_identify.h @@ -37,7 +37,9 @@ class RegionCandidate { public: RegionCandidate(StmtInfoId startId, StmtInfoId endId, StmtInfo *start, StmtInfo *end, MIRFunction* function) : startId(startId), endId(endId), start(start), end(end), function(function), length(endId - startId + 1) {} - virtual ~RegionCandidate() = default; + virtual ~RegionCandidate() { + function = nullptr; + } void CollectRegionInputAndOutput(StmtInfo &stmtInfo, CollectIpaInfo &ipaInfo); const bool HasDefinitionOutofRegion(DefUsePositions &defUse) const; bool HasJumpOutOfRegion(StmtInfo &stmtInfo, bool isStart); @@ -153,7 +155,7 @@ class RegionCandidate { } } - bool IsOverlapWith(RegionCandidate &rhs) { + bool IsOverlapWith(RegionCandidate &rhs) const { return (startId >= rhs.GetStartId() && startId <= rhs.GetEndId()) || (rhs.GetStartId() >= startId && rhs.GetStartId() <= endId); } @@ -164,7 +166,7 @@ class RegionCandidate { } template - void TraverseRegion(Functor processor) { + void TraverseRegion(Functor processor) const { auto &stmtList = start->GetCurrBlock()->GetStmtNodes(); auto begin = StmtNodes::iterator(start->GetStmtNode()); for (auto it = begin; it != stmtList.end() && it->GetStmtInfoId() <= endId ; ++it) { @@ -210,7 +212,7 @@ class RegionGroup { return groups; } - uint32 GetGroupId() { + uint32 GetGroupId() const { return groupId; } @@ -228,14 +230,16 @@ class RegionGroup { private: std::vector groups; - uint32 groupId; + uint32 groupId = 0; int64 cost = 0; }; class RegionIdentify { public: explicit RegionIdentify(CollectIpaInfo *ipaInfo) : ipaInfo(ipaInfo) {} - virtual ~RegionIdentify() = default; + virtual ~RegionIdentify() { + ipaInfo = nullptr; + } void RegionInit(); std::vector &GetRegionGroups() { return regionGroups; diff --git a/src/mapleall/maple_ipa/include/stmt_identify.h b/src/mapleall/maple_ipa/include/stmt_identify.h index 7b7258682e12e3ce9c0480b5698a24319938db0e..9624dcdfeec35ded8dbb0b5dcb5408529026414c 100644 --- a/src/mapleall/maple_ipa/include/stmt_identify.h +++ b/src/mapleall/maple_ipa/include/stmt_identify.h @@ -55,7 +55,9 @@ class StmtInfo { CreateHashCandidate(); } } - virtual ~StmtInfo() = default; + virtual ~StmtInfo() { + stmt = nullptr; + } bool IsValid() { switch (hashCandidate[0]) { @@ -113,24 +115,24 @@ class StmtInfo { MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); hashCandidate.emplace_back(static_cast(type)->GetPointedType()->GetPrimType()); hashCandidate.emplace_back(ivar.GetFieldID()); - valid &= ivar.GetFieldID() == 0; + valid &= (ivar.GetFieldID() == 0); } if (meExpr.GetMeOp() == kMeOpVar) { auto &var = static_cast(meExpr); hashCandidate.emplace_back(var.GetFieldID()); - valid &= var.GetFieldID() == 0; + valid &= (var.GetFieldID() == 0); } if (meExpr.GetMeOp() == kMeOpAddrof) { auto &addr = static_cast(meExpr); hashCandidate.emplace_back(addr.GetFieldID()); - valid &= addr.GetFieldID() == 0; + valid &= (addr.GetFieldID() == 0); } if (meExpr.GetMeOp() == kMeOpOp) { auto &opExpr = static_cast(meExpr); hashCandidate.emplace_back(opExpr.GetFieldID()); hashCandidate.emplace_back(opExpr.GetBitsOffSet()); hashCandidate.emplace_back(opExpr.GetBitsSize()); - valid &= opExpr.GetFieldID() == 0; + valid &= (opExpr.GetFieldID() == 0); } for (auto i = 0; i < meExpr.GetNumOpnds(); ++i) { GetExprHashCandidate(*meExpr.GetOpnd(i)); diff --git a/src/mapleall/maple_ipa/src/ipa_side_effect.cpp b/src/mapleall/maple_ipa/src/ipa_side_effect.cpp index f609243d360710e5bbeb82729e96583b5294dc4c..3d6460c9b38cee97511c76fd8db91ad2d330f31a 100644 --- a/src/mapleall/maple_ipa/src/ipa_side_effect.cpp +++ b/src/mapleall/maple_ipa/src/ipa_side_effect.cpp @@ -97,15 +97,7 @@ void SideEffect::PropParamInfoFromCallee(const MeStmt &call, MIRFunction &callee void SideEffect::PropAllInfoFromCallee(const MeStmt &call, MIRFunction &callee) { const FuncDesc &desc = callee.GetFuncDesc(); - if (!desc.IsPure() && !desc.IsConst()) { - curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); - } - if (desc.IsPure()) { - curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); - } - if (desc.IsConst()) { - curFuncDesc->SetFuncInfoNoBetterThan(FI::kConst); - } + curFuncDesc->SetFuncInfoNoBetterThan(desc.GetFuncInfo()); PropParamInfoFromCallee(call, callee); } @@ -138,6 +130,9 @@ void SideEffect::DealWithMayUse(MeStmt &stmt) { } void SideEffect::DealWithStmt(MeStmt &stmt) { + if (stmt.GetOp() == OP_asm) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { DealWithOperand(stmt.GetOpnd(i)); } @@ -366,7 +361,7 @@ void SideEffect::AnalysisFormalOst() { } if (meExpr->GetMeOp() == kMeOpAddrof) { curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); - curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kNoDirectGlobleAccess); continue; } CHECK_FATAL(meExpr->IsScalar(), "must be me scalar"); @@ -382,7 +377,7 @@ void SideEffect::AnalysisFormalOst() { } else { analysisLater.insert(std::make_pair(ost, formalIndex)); curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kWriteMemoryOnly); - curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kNoDirectGlobleAccess); } continue; } @@ -392,13 +387,39 @@ void SideEffect::AnalysisFormalOst() { curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); } else { curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); - curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kNoDirectGlobleAccess); } } } } } +inline static bool IsComplicatedType(const MIRType &type) { + return type.IsIncomplete() || type.GetPrimType() == PTY_agg; +} + +void SideEffect::FilterComplicatedPrametersForNoGlobalAccess(MeFunction &f) { + if (!curFuncDesc->NoDirectGlobleAccess()) { + return; + } + for (auto &formalDef : f.GetMirFunc()->GetFormalDefVec()) { + auto *formalSym = formalDef.formalSym; + auto *formalType = formalSym->GetType(); + if (IsComplicatedType(*formalType)) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + break; + } + if (!formalType->IsMIRPtrType()) { + continue; + } + auto *pointToType = static_cast(formalType)->GetPointedType(); + if (IsComplicatedType(*pointToType) || pointToType->IsMIRPtrType()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + break; + } + } +} + bool SideEffect::Perform(MeFunction &f) { MIRFunction *func = f.GetMirFunc(); curFuncDesc = &func->GetFuncDesc(); @@ -416,6 +437,7 @@ bool SideEffect::Perform(MeFunction &f) { DealWithStmt(stmt); } } + FilterComplicatedPrametersForNoGlobalAccess(f); return !curFuncDesc->Equals(oldDesc); } diff --git a/src/mapleall/maple_ir/include/all_attributes.def b/src/mapleall/maple_ir/include/all_attributes.def index 6a5b61379913024e9a136cd40f8ddace14d9a1e9..e46585d1637671d2a6d856ca260f29be22b47ce7 100644 --- a/src/mapleall/maple_ir/include/all_attributes.def +++ b/src/mapleall/maple_ir/include/all_attributes.def @@ -92,6 +92,10 @@ ATTR(incomplete_array) ATTR(may_alias) ATTR(static_init_zero) + ATTR(local_exec) + ATTR(initial_exec) + ATTR(local_dynamic) + ATTR(global_dynamic) #endif #ifdef FUNC_ATTR ATTR(firstarg_return) @@ -99,6 +103,7 @@ #endif #ifdef STMT_ATTR ATTR(insaferegion) + ATTR(mayTailcall) #endif ATTR(oneelem_simd) ATTR(nonnull) diff --git a/src/mapleall/maple_ir/include/bin_mpl_export.h b/src/mapleall/maple_ir/include/bin_mpl_export.h index ddc962cbb51d2594dec31f66d30f7bd0725f06e6..77b6857622ff69c953df2f52e0de05a7bb7162ec 100644 --- a/src/mapleall/maple_ir/include/bin_mpl_export.h +++ b/src/mapleall/maple_ir/include/bin_mpl_export.h @@ -127,7 +127,7 @@ class BinaryMplExport { void OutputPragmaVec(const std::vector &pragmaVec); void OutputClassTypeData(const MIRClassType &type); void OutputSymbol(MIRSymbol *sym); - void OutputEnumeration(MIREnum *mirEnum); + void OutputEnumeration(const MIREnum *mirEnum); void OutputFunction(PUIdx puIdx); void OutputInterfaceTypeData(const MIRInterfaceType &type); void OutputSrcPos(const SrcPosition &pos); diff --git a/src/mapleall/maple_ir/include/bin_mpl_import.h b/src/mapleall/maple_ir/include/bin_mpl_import.h index bc3ba10fb8fbe9c3e4ae0330c8c438e3eee64e03..42d9ebd1bb291f4a1bb678c0272c9f5df06688ea 100644 --- a/src/mapleall/maple_ir/include/bin_mpl_import.h +++ b/src/mapleall/maple_ir/include/bin_mpl_import.h @@ -90,7 +90,7 @@ class BinaryMplImport { void Reset(); void SkipTotalSize(); void ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize); - MIRType &InsertInTypeTables(MIRType &ptype); + MIRType &InsertInTypeTables(MIRType &type); void InsertInHashTable(MIRType &type); void SetupEHRootType(); void UpdateMethodSymbols(); @@ -128,7 +128,7 @@ class BinaryMplImport { void ImportFuncIdInfo(MIRFunction *func); void ImportEnumeration(); MIRSymbol *ImportLocalSymbol(MIRFunction *func); - PregIdx ImportPreg(MIRFunction *func); + PregIdx ImportPreg(const MIRFunction *func); LabelIdx ImportLabel(MIRFunction *func); void ImportFormalsStIdx(MIRFunction *func); void ImportAliasMap(MIRFunction *func); diff --git a/src/mapleall/maple_ir/include/cmpl.h b/src/mapleall/maple_ir/include/cmpl.h index 90a85c6736575c82a563b9d7aa9d36d71b35d746..c01f2f6af01b6266d349bef9577691da373f1dc5 100644 --- a/src/mapleall/maple_ir/include/cmpl.h +++ b/src/mapleall/maple_ir/include/cmpl.h @@ -96,7 +96,7 @@ struct MirModuleT { PUIdx mainFuncID; // the entry function; 0 if no main function uint32 numFuncs; // because puIdx 0 is reserved, numFuncs is also the highest puIdx MirFuncT **funcs; // list of all funcs in the module. -#if 1 // the js2mpl buld always set HAVE_MMAP to 1 // binmir file mmap info +#if defined(HAVE_MMAP) && HAVE_MMAP == 1 // the js2mpl buld always set HAVE_MMAP to 1 // binmir file mmap info int binMirImageFd; // file handle for mmap #endif // HAVE_MMAP void *binMirImageStart; // binimage memory start diff --git a/src/mapleall/maple_ir/include/debug_info.h b/src/mapleall/maple_ir/include/debug_info.h index f36613c17e26f7737178d31c1ed0065e9adc6b0c..aaefe343e02358dff431777bf336af9badb9e814 100644 --- a/src/mapleall/maple_ir/include/debug_info.h +++ b/src/mapleall/maple_ir/include/debug_info.h @@ -91,6 +91,13 @@ using DwAte = uint32; // for DW_ATE_* using DwForm = uint32; // for DW_FORM_* using DwCfa = uint32; // for DW_CFA_* +const char *GetDwTagName(unsigned n); +const char *GetDwFormName(unsigned n); +const char *GetDwAtName(unsigned n); +const char *GetDwOpName(unsigned n); +const char *GetDwAteName(unsigned n); +DwAte GetAteFromPTY(PrimType pty); + class DBGDieAttr; class DBGExpr { @@ -341,7 +348,7 @@ class DBGDie { virtual ~DBGDie() {} void AddSubVec(DBGDie *die); void AddAttr(DBGDieAttr *attr); - void AddAttr(DwAt at, DwForm form, uint64 val, bool keep = true); + void AddAttr(DwAt at, DwForm form, uint64 val, bool keepFlag = true); void AddSimpLocAttr(DwAt at, DwForm form, DwOp op, uint64 val); void AddGlobalLocAttr(DwAt at, DwForm form, uint64 val); void AddFrmBaseAttr(DwAt at, DwForm form); @@ -697,7 +704,7 @@ class DebugInfo { return parentDieStack.size(); } - void SetErrPos(uint32 lnum, uint32 cnum) { + void SetErrPos(uint32 lnum, uint32 cnum) const { compileMsg->SetErrPos(lnum, cnum); } @@ -782,7 +789,7 @@ class DebugInfo { DBGDie *GetLocalDie(GStrIdx strIdx); LabelIdx GetLabelIdx(GStrIdx strIdx); - LabelIdx GetLabelIdx(MIRFunction *func, GStrIdx strIdx); + LabelIdx GetLabelIdx(MIRFunction *func, const GStrIdx &strIdx) const; void SetLabelIdx(const GStrIdx &strIdx, LabelIdx labIdx); void SetLabelIdx(MIRFunction *func, const GStrIdx &strIdx, LabelIdx labIdx); void InsertBaseTypeMap(const std::string &inputName, const std::string &outpuName, PrimType type); @@ -814,12 +821,12 @@ class DebugInfo { DBGDie *CreateVarDie(MIRSymbol *sym); DBGDie *CreateVarDie(MIRSymbol *sym, const GStrIdx &strIdx); // use alt name DBGDie *CreateFormalParaDie(MIRFunction *func, uint32 idx, bool isDef); - DBGDie *CreateFieldDie(maple::FieldPair pair); + DBGDie *CreateFieldDie(const maple::FieldPair &pair); DBGDie *CreateBitfieldDie(const MIRBitFieldType *type, const GStrIdx &sidx, uint32 &prevBits); void CreateStructTypeFieldsDies(const MIRStructType *structType, DBGDie *die); void CreateStructTypeParentFieldsDies(const MIRStructType *structType, DBGDie *die); void CreateStructTypeMethodsDies(const MIRStructType *structType, DBGDie *die); - DBGDie *CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structType, bool update = false); + DBGDie *CreateStructTypeDie(const GStrIdx &strIdx, const MIRStructType *structType, bool update = false); DBGDie *CreateClassTypeDie(const GStrIdx &strIdx, const MIRClassType *classType); DBGDie *CreateInterfaceTypeDie(const GStrIdx &strIdx, const MIRInterfaceType *interfaceType); DBGDie *CreatePointedFuncTypeDie(MIRFuncType *fType); @@ -833,21 +840,21 @@ class DebugInfo { DBGDie *GetOrCreateTypeDie(TyIdx tyidx); DBGDie *GetOrCreateTypeDie(MIRType *type); DBGDie *GetOrCreateTypeDieWithAttr(AttrKind attr, DBGDie *typeDie); - DBGDie *GetOrCreateTypeDieWithAttr(TypeAttrs attrs, DBGDie *typeDie); + DBGDie *GetOrCreateTypeDieWithAttr(const TypeAttrs &attrs, DBGDie *typeDie); DBGDie *GetOrCreatePointTypeDie(const MIRPtrType *ptrType); DBGDie *GetOrCreateArrayTypeDie(const MIRArrayType *arrayType); DBGDie *GetOrCreateStructTypeDie(const MIRType *type); DBGDie *GetOrCreateTypedefDie(GStrIdx stridx, TyIdx tyidx); DBGDie *GetOrCreateEnumTypeDie(uint32 idx); - DBGDie *GetOrCreateEnumTypeDie(MIREnum *mirEnum); + DBGDie *GetOrCreateEnumTypeDie(const MIREnum *mirEnum); DBGDie *GetOrCreateTypeByNameDie(const MIRType &type); - GStrIdx GetPrimTypeCName(PrimType pty); + GStrIdx GetPrimTypeCName(PrimType pty) const; void AddScopeDie(MIRScope *scope); DBGDie *GetAliasVarTypeDie(const MIRAliasVars &aliasVar, TyIdx tyidx); void HandleTypeAlias(MIRScope &scope); - void AddAliasDies(MIRScope &scope, bool isLocal); + void AddAliasDies(const MIRScope &scope, bool isLocal); void CollectScopePos(MIRFunction *func, MIRScope *scope); // Functions for calculating the size and offset of each DW_TAG_xxx and DW_AT_xxx diff --git a/src/mapleall/maple_ir/include/dwarf.h b/src/mapleall/maple_ir/include/dwarf.h index c446eb5b292f8e9dae045beb59a4a8c2d6f38e7b..175fcd3279c71cfc307a4f3dbeefdb3f829fa100 100644 --- a/src/mapleall/maple_ir/include/dwarf.h +++ b/src/mapleall/maple_ir/include/dwarf.h @@ -17,7 +17,7 @@ #define MAPLE_IR_INCLUDE_DWARF_H enum Tag : uint16_t { -#define DW_TAG(ID, NAME) DW_TAG_##NAME = ID, +#define DW_TAG(ID, NAME) DW_TAG_##NAME = (ID), #include "dwarf.def" DW_TAG_lo_user = 0x4080, DW_TAG_hi_user = 0xffff, @@ -25,27 +25,27 @@ enum Tag : uint16_t { }; enum Attribute : uint16_t { -#define DW_AT(ID, NAME) DW_AT_##NAME = ID, +#define DW_AT(ID, NAME) DW_AT_##NAME = (ID), #include "dwarf.def" DW_AT_lo_user = 0x2000, DW_AT_hi_user = 0x3fff, }; enum Form : uint16_t { -#define DW_FORM(ID, NAME) DW_FORM_##NAME = ID, +#define DW_FORM(ID, NAME) DW_FORM_##NAME = (ID), #include "dwarf.def" DW_FORM_lo_user = 0x1f00, }; enum LocationAtom { -#define DW_OP(ID, NAME) DW_OP_##NAME = ID, +#define DW_OP(ID, NAME) DW_OP_##NAME = (ID), #include "dwarf.def" DW_OP_lo_user = 0xe0, DW_OP_hi_user = 0xff, }; enum TypeKind : uint8_t { -#define DW_ATE(ID, NAME) DW_ATE_##NAME = ID, +#define DW_ATE(ID, NAME) DW_ATE_##NAME = (ID), #include "dwarf.def" DW_ATE_lo_user = 0x80, DW_ATE_hi_user = 0xff @@ -59,7 +59,7 @@ enum AccessAttribute { enum SourceLanguage { -#define DW_LANG(ID, NAME, LOWER_BOUND) DW_LANG_##NAME = ID, +#define DW_LANG(ID, NAME, LOWER_BOUND) DW_LANG_##NAME = (ID), #include "dwarf.def" DW_LANG_lo_user = 0x8000, DW_LANG_hi_user = 0xffff diff --git a/src/mapleall/maple_ir/include/func_desc.h b/src/mapleall/maple_ir/include/func_desc.h index 3cea54fb68613884d45569e81d82b9c785f3e5cf..a5c33b5a110f16e326477e22acaa23229e3b1050 100644 --- a/src/mapleall/maple_ir/include/func_desc.h +++ b/src/mapleall/maple_ir/include/func_desc.h @@ -19,12 +19,13 @@ namespace maple { enum class FI { kUnknown = 0, + kNoDirectGlobleAccess, // no global memory access without parameters kPure, // means this function will not modify any global memory. kConst, // means this function will not read/modify any global memory. }; static std::string kFIStr[] = { - "kUnknown", "kPure", "kConst" + "kUnknown", "kNoDirectGlobleAccess", "kPure", "kConst" }; enum class RI { @@ -114,6 +115,10 @@ struct FuncDesc { return funcInfo == FI::kPure; } + bool NoDirectGlobleAccess() const { + return funcInfo == FI::kNoDirectGlobleAccess; + } + bool IsReturnNoAlias() const { return returnInfo == RI::kNoAlias; } @@ -164,6 +169,10 @@ struct FuncDesc { funcInfo = fi; } + const FI &GetFuncInfo() const { + return funcInfo; + } + void SetFuncInfoNoBetterThan(const FI fi) { auto oldValue = static_cast(funcInfo); auto newValue = static_cast(fi); diff --git a/src/mapleall/maple_ir/include/global_tables.h b/src/mapleall/maple_ir/include/global_tables.h index a7c3516ef4c8237df5caf23802ab5c8b73c8fa3c..f54d35025b88cfedf1a168613271649916ab467b 100644 --- a/src/mapleall/maple_ir/include/global_tables.h +++ b/src/mapleall/maple_ir/include/global_tables.h @@ -29,6 +29,7 @@ #include "mir_type.h" #include "mir_const.h" #include "mir_enum.h" +#include "int128_util.h" namespace maple { using TyIdxFieldAttrPair = std::pair; @@ -72,11 +73,30 @@ class IntConstKey { TyIdx tyIdx; }; +class Int128ConstKey { + friend class IntConstHash; + friend class IntConstCmp; + + public: + Int128ConstKey(const Int128ElemTy *v, TyIdx tyIdx) : tyIdx(tyIdx) { + Int128Util::CopyInt128(val, v); + } + virtual ~Int128ConstKey() {} + + private: + Int128Arr val = {0, 0}; + TyIdx tyIdx; +}; + class IntConstHash { public: std::size_t operator() (const IntConstKey &key) const { return std::hash{}(key.val) ^ (std::hash{}(static_cast(key.tyIdx)) << 1); } + std::size_t operator()(const Int128ConstKey &key) const { + return std::hash{}(key.val[0]) ^ (std::hash{}(key.val[1])) ^ + (std::hash{}(static_cast(key.tyIdx)) << 1); + } }; class IntConstCmp { @@ -84,6 +104,9 @@ class IntConstCmp { bool operator() (const IntConstKey &lkey, const IntConstKey &rkey) const { return lkey.val == rkey.val && lkey.tyIdx == rkey.tyIdx; } + bool operator()(const Int128ConstKey &lkey, const Int128ConstKey &rkey) const { + return lkey.val[0] == rkey.val[0] && lkey.val[1] == rkey.val[1]; + } }; class TypeTable { @@ -211,6 +234,16 @@ class TypeTable { return typeTable.at(PTY_u64); } + MIRType *GetInt128() const { + ASSERT(PTY_i128 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i128); + } + + MIRType *GetUInt128() const { + ASSERT(PTY_u128 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u128); + } + MIRType *GetPtr() const { ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); return typeTable.at(PTY_ptr); @@ -413,6 +446,10 @@ class TypeTable { MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs = TypeAttrs()); MIRType *GetOrCreateFarrayType(const MIRType &elem); MIRType *GetOrCreateJarrayType(const MIRType &elem); + MIRType *GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, + const std::vector &vecAttrs, + const FuncAttrs &funcAttrs, + const TypeAttrs &retAttrs = TypeAttrs()); MIRType *GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, const std::vector &vecAttrs, bool isVarg = false, const TypeAttrs &retAttrs = TypeAttrs()); @@ -465,7 +502,7 @@ class TypeTable { void CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, bool isObject, bool isIncomplete); MIRType *CreateAndUpdateMirTypeNode(MIRType &pType); - MIRType *GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, const FieldVector &printFields, + MIRType *GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, const FieldVector &parentFields, MIRModule &module, bool forStruct = true, const TypeAttrs &attrs = TypeAttrs()); MIRType *GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass); @@ -619,10 +656,10 @@ class FPConstTable { void PostInit(); MIRFloatConst *DoGetOrCreateFloatConst(float floatVal); MIRDoubleConst *DoGetOrCreateDoubleConst(double doubleVal); - MIRFloat128Const *DoGetOrCreateFloat128Const(const uint64_t*); + MIRFloat128Const *DoGetOrCreateFloat128Const(const uint64_t *v); MIRFloatConst *DoGetOrCreateFloatConstThreadSafe(float floatVal); MIRDoubleConst *DoGetOrCreateDoubleConstThreadSafe(double doubleVal); - MIRFloat128Const *DoGetOrCreateFloat128ConstThreadSafe(const uint64_t*); + MIRFloat128Const *DoGetOrCreateFloat128ConstThreadSafe(const uint64_t *v); std::shared_timed_mutex floatMtx; std::shared_timed_mutex doubleMtx; std::shared_timed_mutex ldoubleMtx; @@ -664,8 +701,11 @@ class IntConstTable { IntConstTable() = default; MIRIntConst *DoGetOrCreateIntConst(uint64 val, MIRType &type); MIRIntConst *DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type); + MIRIntConst *DoGetOrCreateInt128Const(const Int128ElemTy *pVal, MIRType &type); + MIRIntConst *DoGetOrCreateInt128ConstTreadSafe(const Int128ElemTy *pVal, MIRType &type); std::shared_timed_mutex mtx; std::unordered_map intConstTable; + std::unordered_map int128ConstTable; }; // STypeNameTable is only used to store class and interface types. diff --git a/src/mapleall/maple_ir/include/intrinsic_c.def b/src/mapleall/maple_ir/include/intrinsic_c.def index 2f7dc4c772b40ba6c803f80c3b1e200edb62d294..1ba0e73e514dac164c9360ae12f8eb9bcf3842cc 100644 --- a/src/mapleall/maple_ir/include/intrinsic_c.def +++ b/src/mapleall/maple_ir/include/intrinsic_c.def @@ -84,6 +84,8 @@ DEF_MIR_INTRINSIC(C_ffs,\ "ffs", 4, INTRNISPURE, kArgTyI32, kArgTyI32) DEF_MIR_INTRINSIC(C_fabsl,\ "fabsl", 1, INTRNISPURE, kArgTyF128, kArgTyF128) +DEF_MIR_INTRINSIC(C_fmaxl, "fmaxl", 1, INTRNISPURE, kArgTyF128, kArgTyF128, kArgTyF128) +DEF_MIR_INTRINSIC(C_fminl, "fminl", 1, INTRNISPURE, kArgTyF128, kArgTyF128, kArgTyF128) DEF_MIR_INTRINSIC(C_va_start,\ "va_start", 10,\ INTRNISPURE | INTRNISSPECIAL | INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND, \ @@ -327,3 +329,9 @@ DEF_MIR_INTRINSIC(C___atomic_compare_exchange_n,\ "__atomic_compare_exchange_n", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyDynany, kArgTyU1, kArgTyI32, kArgTyI32) DEF_MIR_INTRINSIC(C___atomic_compare_exchange,\ "__atomic_compare_exchange", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU1, kArgTyI32, kArgTyI32) + + +DEF_MIR_INTRINSIC(C___tls_get_tbss_anchor,\ + "__tls_get_tbss_anchor", 5, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64) +DEF_MIR_INTRINSIC(C___tls_get_tdata_anchor,\ + "__tls_get_tdata_anchor", 5, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64) \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/intrinsic_vector.def b/src/mapleall/maple_ir/include/intrinsic_vector.def index e451ee412b8f9700c057d2de693f481435d8a49e..a89666126610c57f5e895b913f101ecce9607649 100644 --- a/src/mapleall/maple_ir/include/intrinsic_vector.def +++ b/src/mapleall/maple_ir/include/intrinsic_vector.def @@ -60,17 +60,17 @@ DEF_MIR_INTRINSIC(vector_abs_v2f64, "vector_abs_v2f64", // The result element is half the width of the operand element, and values are saturated to the result width. // The results are the same type as the operands. DEF_MIR_INTRINSIC(vector_mov_narrow_v8u16, "vector_mov_narrow_v8u16", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV8U8, kArgTyV8U16) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, kArgTyV8U16) DEF_MIR_INTRINSIC(vector_mov_narrow_v4u32, "vector_mov_narrow_v4u32", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV4U16, kArgTyV4U32) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, kArgTyV4U32) DEF_MIR_INTRINSIC(vector_mov_narrow_v2u64, "vector_mov_narrow_v2u64", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV2U32, kArgTyV2U64) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, kArgTyV2U64) DEF_MIR_INTRINSIC(vector_mov_narrow_v8i16, "vector_mov_narrow_v8i16", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV8I8, kArgTyV8I16) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, kArgTyV8I16) DEF_MIR_INTRINSIC(vector_mov_narrow_v4i32, "vector_mov_narrow_v4i32", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV4I16, kArgTyV4I32) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, kArgTyV4I32) DEF_MIR_INTRINSIC(vector_mov_narrow_v2i64, "vector_mov_narrow_v2i64", 1, - INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV2I32, kArgTyV2I64) + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, kArgTyV2I64) // vecTy vector_addl_low(vecTy src1, vecTy src2) // Add each element of the source vector to second source diff --git a/src/mapleall/maple_ir/include/intrinsics.h b/src/mapleall/maple_ir/include/intrinsics.h index 91e4249dbe6dabdc4fa8cd6b68f9d7d0faef0032..d0a89f448cc8ca1cc955d8a6de4802d2844397d9 100644 --- a/src/mapleall/maple_ir/include/intrinsics.h +++ b/src/mapleall/maple_ir/include/intrinsics.h @@ -18,7 +18,7 @@ #include "intrinsic_op.h" namespace maple { -enum IntrinProperty { +enum IntrinProperty : uint32 { kIntrnUndef, kIntrnIsJs, kIntrnIsJsUnary, @@ -53,7 +53,7 @@ enum IntrinProperty { kIntrnReadSixthOpnd, }; -enum IntrinArgType { +enum IntrinArgType : uint32 { kArgTyUndef, kArgTyVoid, kArgTyI8, diff --git a/src/mapleall/maple_ir/include/java_eh_lower.h b/src/mapleall/maple_ir/include/java_eh_lower.h index 57fcc92309edf69ab5cdd45aeb37a400d3d18965..8f7b912bdaea69848f74b1ccf02733c0e45c8af4 100644 --- a/src/mapleall/maple_ir/include/java_eh_lower.h +++ b/src/mapleall/maple_ir/include/java_eh_lower.h @@ -22,7 +22,7 @@ namespace maple { class JavaEHLowerer : public FuncOptimizeImpl { public: JavaEHLowerer(MIRModule &mod, KlassHierarchy *kh, bool dump) : FuncOptimizeImpl(mod, kh, dump) {} - ~JavaEHLowerer() = default; + ~JavaEHLowerer() override = default; FuncOptimizeImpl *Clone() override { return new JavaEHLowerer(*this); diff --git a/src/mapleall/maple_ir/include/lexer.h b/src/mapleall/maple_ir/include/lexer.h index 09dc31a929cc10d122036be8228a221a0801eaca..95f3e6e82d2b96fd950aa3e2738805b1ac4cf76b 100644 --- a/src/mapleall/maple_ir/include/lexer.h +++ b/src/mapleall/maple_ir/include/lexer.h @@ -20,6 +20,7 @@ #include "tokens.h" #include "mempool_allocator.h" #include "mir_module.h" +#include "mpl_int_val.h" namespace maple { class MIRParser; // circular dependency exists, no other choice @@ -29,6 +30,7 @@ class MIRLexer { public: MIRLexer(DebugInfo *debugInfo, MapleAllocator &alloc); ~MIRLexer() { + dbgInfo = nullptr; airFile = nullptr; if (airFileInternal.is_open()) { airFileInternal.close(); @@ -56,6 +58,10 @@ class MIRLexer { return name; } + IntVal GetTheInt128Val() const { + return theInt128Val; + } + uint64 GetTheIntVal() const { return theIntVal; } @@ -79,6 +85,7 @@ class MIRLexer { DebugInfo *dbgInfo = nullptr; // for storing the different types of constant values int64 theIntVal = 0; // also indicates preg number under TK_preg + IntVal theInt128Val; float theFloatVal = 0.0; double theDoubleVal = 0.0; uint64 theLongDoubleVal[2] {0x0ULL, 0x0ULL}; @@ -124,7 +131,7 @@ class MIRLexer { TokenKind GetTokenWithPrefixDoubleQuotation(); TokenKind GetTokenSpecial(); - void UpdateDbgMsg(uint32 lineNum); + void UpdateDbgMsg(uint32 dbgLineNum); char GetCharAt(uint32 idx) const { return line[idx]; diff --git a/src/mapleall/maple_ir/include/metadata_layout.h b/src/mapleall/maple_ir/include/metadata_layout.h index 86b975177a1a4d1174f28ac721e4783d70d274a7..9b3ffcb6dec1e330d1407d627a6eb1a87f5e7712 100644 --- a/src/mapleall/maple_ir/include/metadata_layout.h +++ b/src/mapleall/maple_ir/include/metadata_layout.h @@ -15,6 +15,7 @@ #ifndef METADATA_LAYOUT_H #define METADATA_LAYOUT_H #include +#include // metadata layout is shared between maple compiler and runtime, thus not in namespace maplert // some of the reference field of metadata is stored as relative offset @@ -345,7 +346,7 @@ struct ClassMetadata { // Note 1: here we don't do NULL-check and type-compatibility check // NOte 2: C should be of jclass/ClassMetata* type template -static inline void MRTSetMetadataShadow(M *meta, C cls) { +inline void MRTSetMetadataShadow(M *meta, C cls) { meta->shadow = static_cast(reinterpret_cast(cls)); } diff --git a/src/mapleall/maple_ir/include/mir_builder.h b/src/mapleall/maple_ir/include/mir_builder.h index 5fa1c63502ec240651f6a8a446c7d9816ba521e7..4918980162b34edd78b9e4125ee50f358394bb51 100644 --- a/src/mapleall/maple_ir/include/mir_builder.h +++ b/src/mapleall/maple_ir/include/mir_builder.h @@ -102,8 +102,8 @@ class MIRBuilder { MIRFunction *GetOrCreateFunction(const std::string &str, TyIdx retTyIdx); MIRFunction *GetFunctionFromSymbol(const MIRSymbol &funcSymbol) const; - MIRFunction *GetFunctionFromStidx(StIdx stIdx); - MIRFunction *GetFunctionFromName(const std::string &str); + MIRFunction *GetFunctionFromStidx(const StIdx &stIdx) const; + MIRFunction *GetFunctionFromName(const std::string &str) const; // For compiler-generated metadata struct void AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue) const; void AddAddrofFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, @@ -152,6 +152,7 @@ class MIRBuilder { // for creating Expression ConstvalNode *CreateConstval(MIRConst *mirConst); ConstvalNode *CreateIntConst(uint64 val, PrimType pty); + ConstvalNode *CreateInt128Const(const Int128ElemTy *value, PrimType pty); ConstvalNode *CreateFloatConst(float val); ConstvalNode *CreateDoubleConst(double val); ConstvalNode *CreateFloat128Const(const uint64 *val); @@ -339,7 +340,7 @@ class MIRBuilder { class MIRBuilderExt : public MIRBuilder { public: explicit MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex = nullptr); - virtual ~MIRBuilderExt() = default; + ~MIRBuilderExt() override = default; void SetCurrentFunction(MIRFunction &func) override { curFunction = &func; diff --git a/src/mapleall/maple_ir/include/mir_const.h b/src/mapleall/maple_ir/include/mir_const.h index 497861aee1a23be8e3a1bc76b074b3af783a31db..11463d3a190f849c2440810298bd002947274b06 100644 --- a/src/mapleall/maple_ir/include/mir_const.h +++ b/src/mapleall/maple_ir/include/mir_const.h @@ -98,7 +98,9 @@ class MIRIntConst : public MIRConst { MIRIntConst(uint64 val, MIRType &type) : MIRConst(type, kConstInt), value(val, type.GetPrimType()) {} - MIRIntConst(const IntVal &val, MIRType &type) : MIRConst(type, kConstInt), value(val) { + MIRIntConst(const uint64 *pVal, MIRType &type) : MIRConst(type, kConstInt), value(pVal, type.GetPrimType()) {} + + MIRIntConst(const IntVal &val, MIRType &type) : MIRConst(type, kConstInt), value(val, type.GetPrimType()) { [[maybe_unused]] PrimType pType = type.GetPrimType(); ASSERT(IsPrimitiveInteger(pType) && GetPrimTypeActualBitSize(pType) <= value.GetBitWidth(), "Constant is tried to be constructed with non-integral type or bit-width is not appropriate for it"); @@ -107,7 +109,7 @@ class MIRIntConst : public MIRConst { MIRIntConst(MIRType &type) : MIRConst(type, kConstInvalid) {} /// @return number of used bits in the value - uint8 GetActualBitWidth() const; + uint16 GetActualBitWidth() const; void Trunc(uint8 width) { value.TruncInPlace(width); @@ -174,7 +176,7 @@ class MIRAddrofConst : public MIRConst { MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty, int32 ofst) : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(ofst) {} - ~MIRAddrofConst() = default; + ~MIRAddrofConst() override = default; StIdx GetSymbolIndex() const { return stIdx; @@ -211,7 +213,7 @@ class MIRAddroffuncConst : public MIRConst { MIRAddroffuncConst(PUIdx idx, MIRType &ty) : MIRConst(ty, kConstAddrofFunc), puIdx(idx) {} - ~MIRAddroffuncConst() = default; + ~MIRAddroffuncConst() override = default; PUIdx GetValue() const { return puIdx; @@ -234,7 +236,7 @@ class MIRLblConst : public MIRConst { MIRLblConst(LabelIdx val, PUIdx pidx, MIRType &type) : MIRConst(type, kConstLblConst), value(val), puIdx(pidx) {} - ~MIRLblConst() = default; + ~MIRLblConst() override = default; void Dump(const MIRSymbolTable *localSymTab) const override; bool operator==(const MIRConst &rhs) const override; @@ -262,7 +264,7 @@ class MIRStrConst : public MIRConst { MIRStrConst(const std::string &str, MIRType &type); - ~MIRStrConst() = default; + ~MIRStrConst() override = default; void Dump(const MIRSymbolTable *localSymTab) const override; bool operator==(const MIRConst &rhs) const override; @@ -289,7 +291,7 @@ class MIRStr16Const : public MIRConst { MIRStr16Const(const U16StrIdx &val, MIRType &type) : MIRConst(type, kConstStr16Const), value(val) {} MIRStr16Const(const std::u16string &str, MIRType &type); - ~MIRStr16Const() = default; + ~MIRStr16Const() override = default; static PrimType GetPrimType() { return kPrimType; @@ -318,25 +320,25 @@ class MIRFloatConst : public MIRConst { value.floatValue = val; } - ~MIRFloatConst() = default; + ~MIRFloatConst() override = default; std::pair GetFloat128Value() const { // check special values - if (std::isinf(value.floatValue) && ((static_cast(value.intValue) & (1ull << 31)) >> 31) == 0x0) { + if (std::isinf(value.floatValue) && ((static_cast(value.intValue) & (1U << 31)) >> 31) == 0x0) { return {0x7fff000000000000, 0x0}; - } else if (std::isinf(value.floatValue) && ((static_cast(value.intValue) & (1ull << 31)) >> 31) == 0x1) { + } else if (std::isinf(value.floatValue) && ((static_cast(value.intValue) & (1U << 31)) >> 31) == 0x1) { return {0xffff000000000000, 0x0}; - } else if ((static_cast(value.intValue) ^ (0x1 << 31)) == 0x0) { + } else if ((static_cast(value.intValue) ^ (0x1 << 31)) == 0x0) { return {0x8000000000000000, 0x0}; - } else if ((static_cast(value.intValue) ^ 0x0) == 0x0) { + } else if ((static_cast(value.intValue) ^ 0x0) == 0x0) { return {0x0, 0x0}; } else if (std::isnan(value.floatValue)) { return {0x7fff800000000000, 0x0}; } - uint64 sign = (static_cast(value.intValue) & (1ull << 31)) >> 31; - uint64 exp = (static_cast(value.intValue) & (0x7f800000)) >> 23; - uint64 mantiss = static_cast(value.intValue) & (0x007fffff); + uint64 sign = (static_cast(value.intValue) & (1U << 31)) >> 31; + uint64 exp = (static_cast(value.intValue) & (0x7f800000)) >> 23; + uint64 mantiss = static_cast(value.intValue) & (0x007fffff); const int float_exp_offset = 0x7f; const int float_min_exp = -0x7e; @@ -355,7 +357,7 @@ class MIRFloatConst : public MIRConst { uint64 ldouble_exp = float_min_exp - (num_pos + 1) + ldouble_exp_offset; int num_ldouble_mantiss_bits = float_mantiss_bits - static_cast((num_pos + 1)); - uint64 ldouble_mantiss_mask = (1 << num_ldouble_mantiss_bits) - 1; + uint64 ldouble_mantiss_mask = (1 << static_cast(num_ldouble_mantiss_bits)) - 1; uint64 ldouble_mantiss = mantiss & ldouble_mantiss_mask; uint64 high_byte = (sign << 63 | (ldouble_exp << 48) | (ldouble_mantiss << (25 + num_pos + 1))); uint64 low_byte = 0; @@ -429,7 +431,7 @@ class MIRDoubleConst : public MIRConst { value.dValue = val; } - ~MIRDoubleConst() = default; + ~MIRDoubleConst() override = default; uint32 GetIntLow32() const { auto unsignVal = static_cast(value.intValue); @@ -459,14 +461,14 @@ class MIRDoubleConst : public MIRConst { uint64 exp = (static_cast(value.intValue) & (0x7ff0000000000000)) >> 52; uint64 mantiss = static_cast(value.intValue) & (0x000fffffffffffff); - const int double_exp_offset = 0x3ff; - const int double_min_exp = -0x3fe; + const int32 double_exp_offset = 0x3ff; + const int32 double_min_exp = -0x3fe; const int double_mantiss_bits = 52; - const int ldouble_exp_offset = 0x3fff; + const int32 ldouble_exp_offset = 0x3fff; if (exp > 0x0 && exp < 0x7ff) { - uint64 ldouble_exp = static_cast(static_cast(exp) - double_exp_offset + ldouble_exp_offset); + uint64 ldouble_exp = static_cast(static_cast(exp) - double_exp_offset + ldouble_exp_offset); uint64 ldouble_mantiss_first_bits = mantiss >> 4; uint64 ldouble_mantiss_second_bits = (mantiss & 0xf) << 60; @@ -477,10 +479,10 @@ class MIRDoubleConst : public MIRConst { int num_pos = 0; for (; ((mantiss >> (51 - num_pos)) & 0x1) != 1; ++num_pos) {}; - uint64 ldouble_exp = static_cast(double_min_exp - (num_pos + 1) + ldouble_exp_offset); + uint64 ldouble_exp = static_cast(double_min_exp - (num_pos + 1) + ldouble_exp_offset); int num_ldouble_mantiss_bits = double_mantiss_bits - (num_pos + 1); - uint64 ldouble_mantiss_mask = (1ull << num_ldouble_mantiss_bits) - 1; + uint64 ldouble_mantiss_mask = (1ULL << num_ldouble_mantiss_bits) - 1; uint64 ldouble_mantiss = mantiss & ldouble_mantiss_mask; uint64 ldouble_mantiss_high_bits = 0; if (4 - (num_pos + 1) > 0) { @@ -564,7 +566,7 @@ class MIRFloat128Const : public MIRConst { val[1] = val_[1]; } - ~MIRFloat128Const() = default; + ~MIRFloat128Const() override = default; const unsigned int *GetWordPtr() const { union ValPtrs { @@ -605,7 +607,7 @@ class MIRFloat128Const : public MIRConst { }; // if long double value is too huge to be represented in double, then return inf if (GetExponent() - ldouble_exp_offset > double_max_exp) { - return GetSign() ? -std::numeric_limits::infinity() : std::numeric_limits::infinity(); + return GetSign() != 0 ? -std::numeric_limits::infinity() : std::numeric_limits::infinity(); } // if long double value us too small to be represented in double, then return 0.0 else if (GetExponent() - ldouble_exp_offset < double_min_exp - double_mantissa_bits) { @@ -618,7 +620,8 @@ class MIRFloat128Const : public MIRConst { * and then with '|' add remain 4 bits to get full double mantiss */ uint64 double_mantiss = ((val[0] & 0x0000ffffffffffff) << 4) | (val[1] >> 60); - uint64 double_exp = static_cast(GetExponent() - ldouble_exp_offset + double_exp_offset); + uint64 double_exp = static_cast(static_cast(GetExponent() - + ldouble_exp_offset + double_exp_offset)); uint64 double_sign = GetSign(); union HexVal data; data.doubleHex = (double_sign << (k64BitSize - 1)) | (double_exp << double_mantissa_bits) | double_mantiss; @@ -684,8 +687,7 @@ class MIRFloat128Const : public MIRConst { return static_cast(val); } - void SetValue(long double val) const { - (void)val; + void SetValue(long double /* val */) const { CHECK_FATAL(false, "Cant't use This Interface with This Object"); } @@ -702,7 +704,7 @@ class MIRAggConst : public MIRConst { constVec(mod.GetMPAllocator().Adapter()), fieldIdVec(mod.GetMPAllocator().Adapter()) {} - ~MIRAggConst() = default; + ~MIRAggConst() override = default; MIRConst *GetAggConstElement(unsigned int fieldId) { for (size_t i = 0; i < fieldIdVec.size(); ++i) { @@ -813,7 +815,7 @@ class MIRStConst : public MIRConst { return res; } - ~MIRStConst() = default; + ~MIRStConst() override = default; private: MapleVector stVec; // symbols that in the st const @@ -828,4 +830,4 @@ bool IsDivSafe(const MIRIntConst& dividend, const MIRIntConst& divisor, PrimType #define LOAD_SAFE_CAST_FOR_MIR_CONST #include "ir_safe_cast_traits.def" -#endif // MAPLE_IR_INCLUDE_MIR_CONST_H +#endif // MAPLE_IR_INCLUDE_MIR_CONST_H \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/mir_enum.h b/src/mapleall/maple_ir/include/mir_enum.h index a7ad83a7b832f3bda1fe83fb23ddc0a9889f7d85..03184c04e2365830c295dc76ffbc3b95a9f280e1 100644 --- a/src/mapleall/maple_ir/include/mir_enum.h +++ b/src/mapleall/maple_ir/include/mir_enum.h @@ -15,9 +15,9 @@ #ifndef MAPLEIR_INCLUDE_MIR_ENUMERATION_H #define MAPLEIR_INCLUDE_MIR_ENUMERATION_H +#include #include "cfg_primitive_types.h" #include "mpl_int_val.h" -#include namespace maple { @@ -29,16 +29,16 @@ class MIREnum { ~MIREnum() = default; void NewElement(GStrIdx sidx, IntVal value) { - elements.push_back(EnumElem(sidx, value)); + elements.emplace_back(EnumElem(sidx, value)); } void AddNextElement(GStrIdx sidx) { if (elements.empty()) { - elements.push_back(EnumElem(sidx, IntVal(static_cast(0), primType))); + elements.emplace_back(EnumElem(sidx, IntVal(static_cast(0), primType))); return; } IntVal newValue = elements.back().second + 1; - elements.push_back(EnumElem(sidx, newValue)); + elements.emplace_back(EnumElem(sidx, newValue)); } void SetPrimType(PrimType pt) { diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h index 59a1a9337d1fc81ffbdec37ada432c9cf918bd38..b7dd4c490db7f473ef08074c42a27e2fdc10b9c1 100644 --- a/src/mapleall/maple_ir/include/mir_function.h +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -194,7 +194,7 @@ class MIRFunction { void UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs = false); void UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, - bool clearOldArgs = false); + bool clearOldArgs = false, bool firstArgRet = false); LabelIdx GetOrCreateLableIdxFromName(const std::string &name); GStrIdx GetLabelStringIndex(LabelIdx labelIdx) const { CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); @@ -437,8 +437,6 @@ class MIRFunction { void SetStructReturnedInRegs(); bool StructReturnedInRegs() const; - void SetReturnStruct(const MIRType *retType); - bool IsEmpty() const; bool IsClinit() const; uint32 GetInfo(GStrIdx strIdx) const; @@ -630,7 +628,7 @@ class MIRFunction { CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); return typeNameTab->GetTyIdxFromGStrIdx(idx); } - void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) const { CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); typeNameTab->SetGStrIdxToTyIdx(gStrIdx, tyIdx); } @@ -658,9 +656,7 @@ class MIRFunction { pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); } } - MIRPreg *GetPregItem(PregIdx idx) { - return const_cast(const_cast(this)->GetPregItem(idx)); - } + const MIRPreg *GetPregItem(PregIdx idx) const { return pregTab->PregFromPregIdx(idx); } @@ -675,7 +671,7 @@ class MIRFunction { body = node; } - bool HasBody() { + bool HasBody() const { return body != nullptr; } @@ -1425,7 +1421,7 @@ class MIRFunction { // count; the bitvector's size is given by // BlockSize2BitvectorSize(frameSize) // removed. label table size - // lbl2stmt table, removed; + // lbl2stmt table, removed, // to hold unmangled class and function names MeFunction *meFunc = nullptr; EAConnectionGraph *eacg = nullptr; diff --git a/src/mapleall/maple_ir/include/mir_lower.h b/src/mapleall/maple_ir/include/mir_lower.h index 44f1713276eb0564ff2c57a77e92748980a646d9..f74a4cab651f7923ca5e1f4a5ed323539d401d0a 100644 --- a/src/mapleall/maple_ir/include/mir_lower.h +++ b/src/mapleall/maple_ir/include/mir_lower.h @@ -14,7 +14,6 @@ */ #ifndef MAPLE_IR_INCLUDE_MIR_LOWER_H #define MAPLE_IR_INCLUDE_MIR_LOWER_H -#include #include "mir_builder.h" #include "opcodes.h" @@ -148,7 +147,7 @@ class MIRLower { FuncProfInfo *GetFuncProfData() const { return mirFunc->GetFuncProfData(); } - void CopyStmtFrequency(StmtNode *newStmt, StmtNode *oldStmt) { + void CopyStmtFrequency(const StmtNode *newStmt, const StmtNode *oldStmt) { ASSERT(GetFuncProfData() != nullptr, "nullptr check"); if (newStmt == oldStmt) { return; diff --git a/src/mapleall/maple_ir/include/mir_module.h b/src/mapleall/maple_ir/include/mir_module.h index b5b024126c7b469be7efcb8aacf8df06e2fce6e1..54d92933290ef163bbcb53953fa16d1b2c321ce5 100644 --- a/src/mapleall/maple_ir/include/mir_module.h +++ b/src/mapleall/maple_ir/include/mir_module.h @@ -120,7 +120,7 @@ class ImpExpr { // blksize gives the size of the memory block in bytes; there are (blksize+3)/4 // words; 1 bit for each word, so the bit vector's length in bytes is // ((blksize+3)/4+7)/8 -static inline uint32 BlockSize2BitVectorSize(uint32 blkSize) { +inline uint32 BlockSize2BitVectorSize(uint32 blkSize) { uint32 bitVectorLen = ((blkSize + 3) / 4 + 7) / 8; return ((bitVectorLen + 3) >> 2) << 2; // round up to word boundary } @@ -694,6 +694,39 @@ class MIRModule { bool HasNotWarned(uint32 position, uint32 stmtOriginalID); + // now we use the full path name as the ankor name. Maybe a better way later. + void SetTlsAnchorHashString() { + std::string fName = GetFileName(); + std::replace(fName.begin(), fName.end(), '.', '$'); + std::replace(fName.begin(), fName.end(), '/', '$'); + std::replace(fName.begin(), fName.end(), '-', '$'); + tlsAnchorHashString = fName; + } + + std::string &GetTlsAnchorHashString() { + return tlsAnchorHashString; + } + + MapleMap &GetTdataVarOffset() { + return tdataVarOffset; + } + MapleMap &GetTbssVarOffset() { + return tbssVarOffset; + } + + MIRSymbol *GetTdataAnchor() { + return tdataAnchor; + } + MIRSymbol *GetTbssAnchor() { + return tbssAnchor; + } + void SetTdataAnchor(MIRSymbol *st) { + tdataAnchor = st; + } + void SetTbssAnchor(MIRSymbol *st) { + tbssAnchor = st; + } + private: void DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const; @@ -789,6 +822,12 @@ class MIRModule { std::map>> calleeParamAboutDouble; std::map>> calleeParamAboutFloat; std::map> funcImportantExpr; + + std::string tlsAnchorHashString = ""; + MapleMap tdataVarOffset; + MapleMap tbssVarOffset; + MIRSymbol *tdataAnchor; + MIRSymbol *tbssAnchor; }; #endif // MIR_FEATURE_FULL } // namespace maple diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h index 1df878e4f6debf5f38640c1a10fe4aee8935d9f4..d3ff224d121bdbf92df18387453fe0af7c8c0fbb 100644 --- a/src/mapleall/maple_ir/include/mir_nodes.h +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -101,7 +101,7 @@ class BaseNode : public BaseNodeT { numOpnds = numOpr; } - virtual ~BaseNode() = default; + ~BaseNode() override = default; virtual BaseNode *CloneTree(MapleAllocator &allocator) const { return allocator.GetMemPool()->New(*this); @@ -202,7 +202,7 @@ class UnaryNode : public BaseNode { UnaryNode(Opcode o, PrimType typ, BaseNode *expr) : BaseNode(o, typ, 1), uOpnd(expr) {} - virtual ~UnaryNode() override = default; + ~UnaryNode() override = default; void DumpOpnd(const MIRModule &mod, int32 indent) const; void DumpOpnd(int32 indent) const; @@ -254,7 +254,7 @@ class TypeCvtNode : public UnaryNode { TypeCvtNode(Opcode o, PrimType typ, PrimType fromtyp, BaseNode *expr) : UnaryNode(o, typ, expr), fromPrimType(fromtyp) {} - virtual ~TypeCvtNode() = default; + ~TypeCvtNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -293,7 +293,7 @@ class RetypeNode : public TypeCvtNode { RetypeNode(PrimType typ, PrimType fromtyp, TyIdx idx, BaseNode *expr) : TypeCvtNode(OP_retype, typ, fromtyp, expr), tyIdx(idx) {} - virtual ~RetypeNode() = default; + ~RetypeNode() override = default; void Dump(int32 indent) const override; bool Verify(VerifyResult &verifyResult) const override; @@ -311,6 +311,8 @@ class RetypeNode : public TypeCvtNode { tyIdx = tyIdxVal; } + bool IsSameContent(const BaseNode *node) const override; + private: bool VerifyPrimTypesAndOpnd() const; bool CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; @@ -350,7 +352,7 @@ class ExtractbitsNode : public UnaryNode { ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *expr) : UnaryNode(o, typ, expr), bitsOffset(offset), bitsSize(size) {} - virtual ~ExtractbitsNode() = default; + ~ExtractbitsNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -377,6 +379,8 @@ class ExtractbitsNode : public UnaryNode { bitsSize = size; } + bool IsSameContent(const BaseNode *node) const override; + private: uint8 bitsOffset = 0; uint8 bitsSize = 0; @@ -388,7 +392,7 @@ class GCMallocNode : public BaseNode { GCMallocNode(Opcode o, PrimType typ, TyIdx tIdx) : BaseNode(o, typ, 0), tyIdx(tIdx) {} - virtual ~GCMallocNode() = default; + ~GCMallocNode() override = default; void Dump(int32 indent) const override; @@ -424,7 +428,7 @@ class JarrayMallocNode : public UnaryNode { JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx, BaseNode *opnd) : UnaryNode(o, typ, opnd), tyIdx(typeIdx) {} - virtual ~JarrayMallocNode() = default; + ~JarrayMallocNode() override = default; void Dump(int32 indent) const override; @@ -458,7 +462,7 @@ class IreadNode : public UnaryNode { IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid, BaseNode *expr) : UnaryNode(o, typ, expr), tyIdx(typeIdx), fieldID(fid) {} - virtual ~IreadNode() = default; + ~IreadNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -515,7 +519,7 @@ class IreadoffNode : public UnaryNode { IreadoffNode(PrimType ptyp, BaseNode *opnd, int32 ofst) : UnaryNode(OP_ireadoff, ptyp, opnd), offset(ofst) {} - virtual ~IreadoffNode() = default; + ~IreadoffNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -546,7 +550,7 @@ class IreadFPoffNode : public BaseNode { IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} - virtual ~IreadFPoffNode() = default; + ~IreadFPoffNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -615,7 +619,7 @@ class BinaryNode : public BaseNode, public BinaryOpnds { SetBOpnd(r, 1); } - virtual ~BinaryNode() = default; + ~BinaryNode() override = default; using BaseNode::Dump; void Dump(int32 indent) const override; @@ -676,7 +680,7 @@ class CompareNode : public BinaryNode { CompareNode(Opcode o, PrimType typ, PrimType otype, BaseNode *l, BaseNode *r) : BinaryNode(o, typ, l, r), opndType(otype) {} - virtual ~CompareNode() = default; + ~CompareNode() override = default; using BinaryNode::Dump; void Dump(int32 indent) const override; @@ -697,6 +701,14 @@ class CompareNode : public BinaryNode { opndType = type; } + bool IsSameContent(const BaseNode *node) const override { + if (!BinaryNode::IsSameContent(node)) { + return false; + } + auto compareNode = static_cast(node); + return opndType == compareNode->GetOpndType(); + } + private: PrimType opndType = kPtyInvalid; // type of operands. }; @@ -710,7 +722,7 @@ class DepositbitsNode : public BinaryNode { DepositbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *l, BaseNode *r) : BinaryNode(o, typ, l, r), bitsOffset(offset), bitsSize(size) {} - virtual ~DepositbitsNode() = default; + ~DepositbitsNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -738,6 +750,14 @@ class DepositbitsNode : public BinaryNode { bitsSize = size; } + bool IsSameContent(const BaseNode *node) const override { + if (!BinaryNode::IsSameContent(node)) { + return false; + } + auto depositebitsNode = static_cast(node); + return bitsOffset == depositebitsNode->GetBitsOffset() && bitsSize == depositebitsNode->GetBitsSize(); + } + private: uint8 bitsOffset = 0; uint8 bitsSize = 0; @@ -757,7 +777,7 @@ class ResolveFuncNode : public BinaryNode { ResolveFuncNode(Opcode o, PrimType typ, PUIdx pIdx, BaseNode *opnd0, BaseNode *opnd1) : BinaryNode(o, typ, opnd0, opnd1), puIdx(pIdx) {} - virtual ~ResolveFuncNode() = default; + ~ResolveFuncNode() override = default; void Dump(int32 indent) const override; @@ -784,6 +804,14 @@ class ResolveFuncNode : public BinaryNode { puIdx = idx; } + bool IsSameContent(const BaseNode *node) const override { + if (!BinaryNode::IsSameContent(node)) { + return false; + } + auto resolveFuncNode = static_cast(node); + return puIdx == resolveFuncNode->GetPuIdx(); + } + private: PUIdx puIdx = 0; }; @@ -800,7 +828,7 @@ class TernaryNode : public BaseNode { topnd[2] = e2; } - virtual ~TernaryNode() = default; + ~TernaryNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -895,7 +923,7 @@ class NaryNode : public BaseNode, public NaryOpnds { NaryNode(NaryNode &node) = delete; NaryNode &operator=(const NaryNode &node) = delete; - virtual ~NaryNode() = default; + ~NaryNode() override = default; void Dump(int32 indent) const override; @@ -956,7 +984,7 @@ class IntrinsicopNode : public NaryNode { IntrinsicopNode(IntrinsicopNode &node) = delete; IntrinsicopNode &operator=(const IntrinsicopNode &node) = delete; - virtual ~IntrinsicopNode() = default; + ~IntrinsicopNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1008,7 +1036,7 @@ class ConstvalNode : public BaseNode { explicit ConstvalNode(MIRConst *constv) : BaseNode(OP_constval), constVal(constv) {} ConstvalNode(PrimType typ, MIRConst *constv) : BaseNode(OP_constval, typ, 0), constVal(constv) {} - virtual ~ConstvalNode() = default; + ~ConstvalNode() override = default; void Dump(int32 indent) const override; ConstvalNode *CloneTree(MapleAllocator &allocator) const override { @@ -1041,7 +1069,7 @@ class ConststrNode : public BaseNode { ConststrNode(PrimType typ, UStrIdx i) : BaseNode(OP_conststr, typ, 0), strIdx(i) {} - virtual ~ConststrNode() = default; + ~ConststrNode() override = default; void Dump(int32 indent) const override; bool IsSameContent(const BaseNode *node) const override; @@ -1070,7 +1098,7 @@ class Conststr16Node : public BaseNode { Conststr16Node(PrimType typ, U16StrIdx i) : BaseNode(OP_conststr16, typ, 0), strIdx(i) {} - virtual ~Conststr16Node() = default; + ~Conststr16Node() override = default; void Dump(int32 indent) const override; bool IsSameContent(const BaseNode *node) const override; @@ -1099,7 +1127,7 @@ class SizeoftypeNode : public BaseNode { SizeoftypeNode(PrimType type, TyIdx t) : BaseNode(OP_sizeoftype, type, 0), tyIdx(t) {} - virtual ~SizeoftypeNode() = default; + ~SizeoftypeNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1129,7 +1157,7 @@ class FieldsDistNode : public BaseNode { FieldsDistNode(PrimType typ, TyIdx t, FieldID f1, FieldID f2) : BaseNode(OP_fieldsdist, typ, 0), tyIdx(t), fieldID1(f1), fieldID2(f2) {} - virtual ~FieldsDistNode() = default; + ~FieldsDistNode() override = default; void Dump(int32 indent) const override; @@ -1190,11 +1218,11 @@ class ArrayNode : public NaryNode { ArrayNode(ArrayNode &node) = delete; ArrayNode &operator=(const ArrayNode &node) = delete; - virtual ~ArrayNode() = default; + ~ArrayNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; - bool IsSameBase(ArrayNode *arry); + bool IsSameBase(const ArrayNode *arry) const; size_t NumOpnds() const override { ASSERT(numOpnds == GetNopndSize(), "ArrayNode has wrong numOpnds field"); @@ -1254,7 +1282,7 @@ class AddrofNode : public BaseNode { AddrofNode(Opcode o, PrimType typ, StIdx sIdx, FieldID fid) : BaseNode(o, typ, 0), stIdx(sIdx), fieldID(fid) {} - virtual ~AddrofNode() = default; + ~AddrofNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1303,7 +1331,7 @@ class DreadoffNode : public BaseNode { DreadoffNode(Opcode o, PrimType typ) : BaseNode(o, typ, 0), stIdx() {} - virtual ~DreadoffNode() = default; + ~DreadoffNode() override = default; void Dump(int32 indent) const override; @@ -1333,7 +1361,7 @@ class RegreadNode : public BaseNode { ptyp = primType; } - virtual ~RegreadNode() = default; + ~RegreadNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1361,7 +1389,7 @@ class AddroffuncNode : public BaseNode { AddroffuncNode(PrimType typ, PUIdx pIdx) : BaseNode(OP_addroffunc, typ, 0), puIdx(pIdx) {} - virtual ~AddroffuncNode() = default; + ~AddroffuncNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1390,7 +1418,7 @@ class AddroflabelNode : public BaseNode { explicit AddroflabelNode(uint32 ofst) : BaseNode(OP_addroflabel), offset(ofst) {} - virtual ~AddroflabelNode() = default; + ~AddroflabelNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -1446,7 +1474,7 @@ class StmtNode : public BaseNode, public PtrListNodeBase { ++stmtIDNext; } - virtual ~StmtNode() = default; + ~StmtNode() override = default; using BaseNode::Dump; void DumpBase(int32 indent) const override; @@ -1544,19 +1572,27 @@ class StmtNode : public BaseNode, public PtrListNodeBase { this->stmtAttrs.AppendAttr(stmtAttr.GetTargetAttrFlag(STMTATTR_insaferegion)); } + void SetMayTailcall(bool flag = true) { + stmtAttrs.SetAttr(STMTATTR_mayTailcall, flag); + } + + bool GetMayTailCall() const { + return stmtAttrs.GetAttr(STMTATTR_mayTailcall); + } + const StmtAttrs &GetStmtAttrs() const { return stmtAttrs; } void SetStmtInfoId(size_t index) { - stmtInfoId = index; + stmtInfoId = static_cast(index); } const uint32 GetStmtInfoId() const { return stmtInfoId; } - bool operator==(const StmtNode &rhs) { + bool operator==(const StmtNode &rhs) const { return this == &rhs; } @@ -1582,7 +1618,7 @@ class IassignNode : public StmtNode { BaseNodeT::SetNumOpnds(kOperandNumBinary); } - virtual ~IassignNode() = default; + ~IassignNode() override = default; TyIdx GetTyIdx() const { return tyIdx; @@ -1680,7 +1716,7 @@ class GotoNode : public StmtNode { GotoNode(Opcode o, uint32 ofst) : StmtNode(o), offset(ofst) {} - virtual ~GotoNode() = default; + ~GotoNode() override = default; void Dump(int32 indent) const override; @@ -1710,7 +1746,7 @@ class JsTryNode : public StmtNode { JsTryNode(uint16 catchofst, uint16 finallyofset) : StmtNode(OP_jstry), catchOffset(catchofst), finallyOffset(finallyofset) {} - virtual ~JsTryNode() = default; + ~JsTryNode() override = default; void Dump(int32 indent) const override; @@ -1752,7 +1788,7 @@ class TryNode : public StmtNode { TryNode(TryNode &node) = delete; TryNode &operator=(const TryNode &node) = delete; - virtual ~TryNode() = default; + ~TryNode() override = default; using StmtNode::Dump; void Dump(int32 indent) const override; @@ -1824,7 +1860,7 @@ class CatchNode : public StmtNode { CatchNode(CatchNode &node) = delete; CatchNode &operator=(const CatchNode &node) = delete; - virtual ~CatchNode() = default; + ~CatchNode() override = default; using StmtNode::Dump; void Dump(int32 indent) const override; @@ -1876,7 +1912,7 @@ class CppCatchNode : public StmtNode { explicit CppCatchNode(const CppCatchNode &node) = delete; CppCatchNode &operator=(const CppCatchNode &node) = delete; - ~CppCatchNode() = default; + ~CppCatchNode() override = default; void Dump(int32 indent) const override; @@ -1919,7 +1955,7 @@ class SwitchNode : public StmtNode { SwitchNode(SwitchNode &node) = delete; SwitchNode &operator=(const SwitchNode &node) = delete; - virtual ~SwitchNode() = default; + ~SwitchNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2015,7 +2051,7 @@ class MultiwayNode : public StmtNode { MultiwayNode(MultiwayNode &node) = delete; MultiwayNode &operator=(const MultiwayNode &node) = delete; - virtual ~MultiwayNode() = default; + ~MultiwayNode() override = default; void Dump(int32 indent) const override; @@ -2069,7 +2105,7 @@ class UnaryStmtNode : public StmtNode { UnaryStmtNode(Opcode o, PrimType typ, BaseNode *opnd) : StmtNode(o, typ, 1), uOpnd(opnd) {} - virtual ~UnaryStmtNode() = default; + ~UnaryStmtNode() override = default; using StmtNode::Dump; void Dump(int32 indent) const override; @@ -2134,7 +2170,7 @@ class DassignNode : public UnaryStmtNode { DassignNode(BaseNode *opnd, StIdx idx, FieldID fieldID) : DassignNode(kPtyInvalid, opnd, idx, fieldID) {} - virtual ~DassignNode() = default; + ~DassignNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2202,7 +2238,7 @@ class DassignoffNode : public UnaryStmtNode { stIdx = lhsStIdx; offset = dOffset; } - virtual ~DassignoffNode() = default; + ~DassignoffNode() override = default; void Dump(int32 indent) const override; @@ -2253,7 +2289,7 @@ class RegassignNode : public UnaryStmtNode { RegassignNode(PrimType primType, PregIdx idx, BaseNode *opnd) : UnaryStmtNode(OP_regassign, primType, opnd), regIdx(idx) {} - virtual ~RegassignNode() = default; + ~RegassignNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2294,7 +2330,7 @@ class CondGotoNode : public UnaryStmtNode { BaseNodeT::SetNumOpnds(kOperandNumUnary); } - virtual ~CondGotoNode() = default; + ~CondGotoNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2360,7 +2396,7 @@ class RangeGotoNode : public UnaryStmtNode { RangeGotoNode(RangeGotoNode &node) = delete; RangeGotoNode &operator=(const RangeGotoNode &node) = delete; - virtual ~RangeGotoNode() = default; + ~RangeGotoNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2439,7 +2475,7 @@ class BlockNode : public StmtNode { const SrcPosition &inlinedPosition = SrcPosition()); BlockNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, - std::unordered_map &fromFreqs, FreqType numer, uint64_t denom, + std::unordered_map &fromFreqs, FreqType numer, FreqType denom, uint32_t updateOp); bool IsEmpty() const { @@ -2503,10 +2539,11 @@ class BlockCallBack { if (data != nullptr) { data->Free(); } + callBack = nullptr; } void Invoke(const BlockNode &oldBlock, BlockNode &newBlock, - const StmtNode &oldStmt, StmtNode &newStmt) { + const StmtNode &oldStmt, StmtNode &newStmt) const { if (callBack != nullptr) { callBack(oldBlock, newBlock, oldStmt, newStmt, data); } @@ -2563,7 +2600,7 @@ class IfStmtNode : public UnaryStmtNode { numOpnds = kOperandNumTernary; } - virtual ~IfStmtNode() = default; + ~IfStmtNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2590,14 +2627,16 @@ class IfStmtNode : public UnaryStmtNode { FreqType oldFreq = fromFreqs[GetStmtID()]; FreqType newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * static_cast(numer / denom)) : oldFreq); toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? newFreq : 1; - if (updateOp & kUpdateOrigFreq) { + if ((updateOp & kUpdateOrigFreq) != 0) { FreqType left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; fromFreqs[GetStmtID()] = left; } } - node->thenPart = thenPart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + node->thenPart = thenPart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, static_cast(numer), + denom, updateOp); if (elsePart != nullptr) { - node->elsePart = elsePart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + node->elsePart = elsePart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, static_cast(numer), + denom, updateOp); } node->SetMeStmtID(GetMeStmtID()); return node; @@ -2652,7 +2691,7 @@ class WhileStmtNode : public UnaryStmtNode { BaseNodeT::SetNumOpnds(kOperandNumBinary); } - virtual ~WhileStmtNode() = default; + ~WhileStmtNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2675,13 +2714,13 @@ class WhileStmtNode : public UnaryStmtNode { FreqType newFreq = numer == 0 ? 0 : (denom > 0 ? static_cast(static_cast(oldFreq) * numer / denom) : oldFreq); toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? newFreq : 1; - if (updateOp & kUpdateOrigFreq) { + if ((updateOp & kUpdateOrigFreq) != 0) { FreqType left = (oldFreq - newFreq) > 0 ? (oldFreq - newFreq) : 1; fromFreqs[GetStmtID()] = left; } } node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); - node->body = body->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + node->body = body->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, static_cast(numer), denom, updateOp); return node; } @@ -2720,7 +2759,7 @@ class DoloopNode : public StmtNode { incrExpr(incrExp), doBody(doBody) {} - virtual ~DoloopNode() = default; + ~DoloopNode() override = default; void DumpDoVar(const MIRModule &mod) const; void Dump(int32 indent) const override; @@ -2764,7 +2803,8 @@ class DoloopNode : public StmtNode { node->SetStartExpr(startExpr->CloneTree(allocator)); node->SetContExpr(GetCondExpr()->CloneTree(allocator)); node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); - node->SetDoBody(GetDoBody()->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + node->SetDoBody(GetDoBody()->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, static_cast(numer), + denom, updateOp)); return node; } @@ -2871,7 +2911,7 @@ class ForeachelemNode : public StmtNode { BaseNodeT::SetNumOpnds(kOperandNumUnary); } - virtual ~ForeachelemNode() = default; + ~ForeachelemNode() override = default; const StIdx &GetElemStIdx() const { return elemStIdx; @@ -2925,7 +2965,7 @@ class BinaryStmtNode : public StmtNode, public BinaryOpnds { public: explicit BinaryStmtNode(Opcode o) : StmtNode(o, kOperandNumBinary) {} - virtual ~BinaryStmtNode() = default; + ~BinaryStmtNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -2968,7 +3008,7 @@ class IassignoffNode : public BinaryStmtNode { SetBOpnd(srcOpnd, 1); } - virtual ~IassignoffNode() = default; + ~IassignoffNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -3005,7 +3045,7 @@ class IassignFPoffNode : public UnaryStmtNode { UnaryStmtNode::SetOpnd(src, 0); } - virtual ~IassignFPoffNode() = default; + ~IassignFPoffNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -3050,7 +3090,7 @@ class BlkassignoffNode : public BinaryStmtNode { SetBOpnd(dest, 0); SetBOpnd(src, 1); } - ~BlkassignoffNode() = default; + ~BlkassignoffNode() override = default; void Dump(int32 indent) const override; @@ -3106,7 +3146,7 @@ class NaryStmtNode : public StmtNode, public NaryOpnds { explicit NaryStmtNode(const NaryStmtNode &node) = delete; NaryStmtNode &operator=(const NaryStmtNode &node) = delete; - virtual ~NaryStmtNode() = default; + ~NaryStmtNode() override = default; void Dump(int32 indent) const override; bool Verify() const override; @@ -3164,6 +3204,10 @@ class SafetyCheckStmtNode { explicit SafetyCheckStmtNode(GStrIdx funcNameIdx) : funcNameIdx(funcNameIdx) {} SafetyCheckStmtNode(const SafetyCheckStmtNode &stmtNode) : funcNameIdx(stmtNode.GetFuncNameIdx()) {} SafetyCheckStmtNode &operator=(const SafetyCheckStmtNode &stmtNode) { + // self-assignment check + if (funcNameIdx == stmtNode.GetFuncNameIdx()) { + return *this; + } funcNameIdx = stmtNode.GetFuncNameIdx(); return *this; } @@ -3334,7 +3378,9 @@ class CallNode : public NaryStmtNode { CallNode(CallNode &node) = delete; CallNode &operator=(const CallNode &node) = delete; - virtual ~CallNode() = default; + ~CallNode() override { + enclosingBlk = nullptr; + } virtual void Dump(int32 indent, bool newline) const; bool Verify() const override; MIRType *GetCallReturnType() override; @@ -3451,7 +3497,7 @@ class IcallNode : public NaryStmtNode { IcallNode(IcallNode &node) = delete; IcallNode &operator=(const IcallNode &node) = delete; - virtual ~IcallNode() = default; + ~IcallNode() override = default; virtual void Dump(int32 indent, bool newline) const; bool Verify() const override; @@ -3535,7 +3581,7 @@ class IntrinsiccallNode : public NaryStmtNode { IntrinsiccallNode(IntrinsiccallNode &node) = delete; IntrinsiccallNode &operator=(const IntrinsiccallNode &node) = delete; - virtual ~IntrinsiccallNode() = default; + ~IntrinsiccallNode() override = default; virtual void Dump(int32 indent, bool newline) const; bool Verify() const override; @@ -3623,7 +3669,7 @@ class CallinstantNode : public CallNode { CallinstantNode(CallinstantNode &node) = delete; CallinstantNode &operator=(const CallinstantNode &node) = delete; - virtual ~CallinstantNode() = default; + ~CallinstantNode() override = default; void Dump(int32 indent, bool newline) const override; void Dump(int32 indent) const override { @@ -3656,7 +3702,7 @@ class LabelNode : public StmtNode { explicit LabelNode(LabelIdx idx) : StmtNode(OP_label), labelIdx(idx) {} - virtual ~LabelNode() = default; + ~LabelNode() override = default; void Dump(int32 indent) const override; @@ -3696,7 +3742,7 @@ class CommentNode : public StmtNode { CommentNode(CommentNode &node) = delete; CommentNode &operator=(const CommentNode &node) = delete; - virtual ~CommentNode() = default; + ~CommentNode() override = default; void Dump(int32 indent) const override; @@ -3757,7 +3803,7 @@ class AsmNode : public NaryStmtNode { gotoLabels(allocator.Adapter()), qualifiers(node.qualifiers) {} - virtual ~AsmNode() = default; + ~AsmNode() override = default; AsmNode *CloneTree(MapleAllocator &allocator) const override; diff --git a/src/mapleall/maple_ir/include/mir_parser.h b/src/mapleall/maple_ir/include/mir_parser.h index 5a92aaca84594b229125c98a256adf9aead0d389..80b05435cf87a91b860d26eeeda60c8ab2e9cb38 100644 --- a/src/mapleall/maple_ir/include/mir_parser.h +++ b/src/mapleall/maple_ir/include/mir_parser.h @@ -55,7 +55,7 @@ class MIRParser { bool ParseAliasStmt(StmtNodePtr &stmt); bool ParseTypeAlias(MIRScope &scope); uint8 *ParseWordsInfo(uint32 size); - bool ParseSwitchCase(int64&, LabelIdx&); + bool ParseSwitchCase(int64 &constVal, LabelIdx &lblIdx); bool ParseExprOneOperand(BaseNodePtr &expr); bool ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1); bool ParseExprNaryOperand(MapleVector &opndVec); @@ -77,7 +77,7 @@ class MIRParser { bool ParsePackAttrs(); bool ParseFieldAttrs(FieldAttrs &attrs); bool ParseFuncAttrs(FuncAttrs &attrs); - void SetAttrContent(FuncAttrs &attrs, FuncAttrKind x, const MIRLexer &lexer) const; + void SetAttrContent(FuncAttrs &attrs, FuncAttrKind x, const MIRLexer &mirLexer) const; bool CheckPrimAndDerivedType(TokenKind tokenKind, TyIdx &tyIdx); bool ParsePrimType(TyIdx &tyIdx); bool ParseFarrayType(TyIdx &arrayTyIdx); @@ -107,7 +107,7 @@ class MIRParser { bool ParseFunction(uint32 fileIdx = 0); bool ParseStorageClass(MIRSymbol &symbol) const; bool ParseDeclareVarInitValue(MIRSymbol &symbol); - bool ParseDeclareVar(MIRSymbol&); + bool ParseDeclareVar(MIRSymbol &symbol); bool ParseDeclareReg(MIRSymbol &symbol, const MIRFunction &func); bool ParseDeclareFormal(FormalDef &formalDef); bool ParsePrototypeRemaining(MIRFunction &func, std::vector &vecTyIdx, diff --git a/src/mapleall/maple_ir/include/mir_preg.h b/src/mapleall/maple_ir/include/mir_preg.h index 12d22eaa0bde5e6c0e58835b921ea58fae08ca6e..78bdf0d91bc96807aead72b5a2a2bb09f22fa9f4 100644 --- a/src/mapleall/maple_ir/include/mir_preg.h +++ b/src/mapleall/maple_ir/include/mir_preg.h @@ -18,6 +18,7 @@ #include #include "mir_module.h" #include "global_tables.h" +#include "opcodes.h" #endif // MIR_FEATURE_FULL namespace maple { @@ -32,7 +33,9 @@ enum SpecialReg : signed int { kSregMethodhdl = 5, kSregRetval0 = 6, kSregRetval1 = 7, - kSregLast = 8, + kSregRetval2 = 8, + kSregRetval3 = 9, + kSregLast = 10, }; #if MIR_FEATURE_FULL class MIRPreg { @@ -117,6 +120,8 @@ class MIRPregTable { specPregTable[kSregMethodhdl].SetPregNo(-kSregMethodhdl); specPregTable[kSregRetval0].SetPregNo(-kSregRetval0); specPregTable[kSregRetval1].SetPregNo(-kSregRetval1); + specPregTable[kSregRetval2].SetPregNo(-kSregRetval2); + specPregTable[kSregRetval3].SetPregNo(-kSregRetval3); for (uint32 i = 0; i < kSregLast; ++i) { specPregTable[i].SetPrimType(PTY_unknown); } diff --git a/src/mapleall/maple_ir/include/mir_scope.h b/src/mapleall/maple_ir/include/mir_scope.h index 7d404f42efa0d12ab44369aefbdab2866f6ddf5c..81008108265a7f13b6aedf1674fe608ff706c484 100644 --- a/src/mapleall/maple_ir/include/mir_scope.h +++ b/src/mapleall/maple_ir/include/mir_scope.h @@ -64,7 +64,9 @@ class MIRAlias { class MIRTypeAlias { public: explicit MIRTypeAlias(MIRModule *mod) : module(mod) {} - virtual ~MIRTypeAlias() = default; + virtual ~MIRTypeAlias() { + module = nullptr; + }; bool IsEmpty() const { return typeAliasMap.size() == 0; @@ -110,7 +112,7 @@ class MIRScope { bool IsSubScope(const MIRScope *scp) const; bool HasJoinScope(const MIRScope *scp1, const MIRScope *scp2) const; - bool HasSameRange(const MIRScope *s1, const MIRScope *s2) const; + bool HasSameRange(const MIRScope *scp1, const MIRScope *scp2) const; unsigned GetId() const { return id; @@ -139,7 +141,7 @@ class MIRScope { alias->SetAliasVarMap(idx, vars); } - MapleMap &GetAliasVarMap() { + MapleMap &GetAliasVarMap() const { return alias->GetAliasVarMap(); } @@ -158,7 +160,7 @@ class MIRScope { SrcPosition GetScopeEndPos(const SrcPosition &pos); bool AddScope(MIRScope *scope); - void SetTypeAliasMap(GStrIdx gStrIdx, TyIdx tyIdx) { + void SetTypeAliasMap(GStrIdx gStrIdx, TyIdx tyIdx) const { typeAlias->SetTypeAliasMap(gStrIdx, tyIdx); } diff --git a/src/mapleall/maple_ir/include/mir_symbol.h b/src/mapleall/maple_ir/include/mir_symbol.h index 089de74411095c409b5d453c522a35fffd1fe55e..ab02ad03733cb2c1fcbd83b39c8631194d8b4ea2 100644 --- a/src/mapleall/maple_ir/include/mir_symbol.h +++ b/src/mapleall/maple_ir/include/mir_symbol.h @@ -19,10 +19,10 @@ #include "mir_preg.h" #include "src_position.h" +namespace maple { constexpr int kScopeLocal = 2; // the default scope level for function variables constexpr int kScopeGlobal = 1; // the scope level for global variables -namespace maple { enum MIRSymKind { kStInvalid, kStVar, @@ -219,7 +219,7 @@ class MIRSymbol { bool IsTypeVolatile(int fieldID) const; - bool NeedGOT(bool isPIE) const; + bool NeedGOT(bool doPIE) const; bool IsThreadLocal() const { return typeAttrs.GetAttr(ATTR_tls_static) || typeAttrs.GetAttr(ATTR_tls_dynamic); @@ -270,17 +270,26 @@ class MIRSymbol { } bool IsReadOnly() const { - return kScFstatic == storageClass && kStConst == sKind; + return storageClass == kScFstatic && sKind == kStConst; } bool IsConst() const { return sKind == kStConst || (sKind == kStVar && value.konst != nullptr); } + bool IsHiddenVisibility() const { + return typeAttrs.GetAttr(ATTR_visibility_hidden); + } + bool IsDefaultVisibility() const { return !typeAttrs.GetAttr(ATTR_visibility_hidden) && !typeAttrs.GetAttr(ATTR_visibility_protected); } + bool IsDefaultTLSModel() const { + return IsThreadLocal() && !typeAttrs.GetAttr(ATTR_local_exec) && !typeAttrs.GetAttr(ATTR_initial_exec) && + !typeAttrs.GetAttr(ATTR_local_dynamic) && !typeAttrs.GetAttr(ATTR_global_dynamic); + } + MIRType *GetType() const; const std::string &GetName() const { @@ -386,6 +395,24 @@ class MIRSymbol { return GetName() == "__eh_index__"; } + // check symbol is a retVar + bool IsReturnVar() const { + if (storageClass != kScAuto) { + return false; + } + // the prefix of the retVar symbol is 'retVar_' + constexpr uint32 kRetVarPrefixLength = 7; + if (GetName().compare(0, kRetVarPrefixLength, "retVar_") != 0) { + return false; + } + // retVar symbol has only one underscope + constexpr uint32 kRetVarUnderscopeNum = 1; + if (std::count(GetName().begin(), GetName().end(), '_') != kRetVarUnderscopeNum) { + return false; + } + return true; + } + bool HasAddrOfValues() const; bool IsLiteral() const; bool IsLiteralPtr() const; diff --git a/src/mapleall/maple_ir/include/mir_symbol_builder.h b/src/mapleall/maple_ir/include/mir_symbol_builder.h index 866d2e2e3090f581b8d0bb3a2e714d3613b083ef..097d9d8ad7d6d4c4b64cb5c098b686051ae635a4 100644 --- a/src/mapleall/maple_ir/include/mir_symbol_builder.h +++ b/src/mapleall/maple_ir/include/mir_symbol_builder.h @@ -18,17 +18,7 @@ #include #include #include -#include "opcodes.h" -#include "prim_types.h" -#include "mir_type.h" -#include "mir_const.h" -#include "mir_symbol.h" -#include "mir_nodes.h" -#include "mir_module.h" -#include "mir_preg.h" -#include "mir_function.h" -#include "printing.h" -#include "intrinsic_op.h" +#include "mir_builder.h" #include "opcode_info.h" #include "global_tables.h" diff --git a/src/mapleall/maple_ir/include/mir_type.h b/src/mapleall/maple_ir/include/mir_type.h index 9e7b56e6d2f1ba32f3dbb39f59eefa261761b9e3..a442bdc618fc29f7fd92082d9ee788120a9f91bb 100644 --- a/src/mapleall/maple_ir/include/mir_type.h +++ b/src/mapleall/maple_ir/include/mir_type.h @@ -35,7 +35,12 @@ using TyIdxFieldAttrPair = std::pair; using FieldPair = std::pair; using FieldVector = std::vector; using MIRTypePtr = MIRType*; - +// if it is a bitfield, byteoffset gives the offset of the container for +// extracting the bitfield and bitoffset is with respect to the current byte +struct OffsetPair { + int32 byteOffset; + int32 bitOffset; +}; constexpr size_t kMaxArrayDim = 20; const std::string kJstrTypeName = "constStr"; constexpr uint32 kInvalidFieldNum = UINT32_MAX; @@ -53,6 +58,7 @@ extern const char *GetPrimTypeName(PrimType primType); extern const char *GetPrimTypeJavaName(PrimType primType); extern int64 MinValOfSignedInteger(PrimType primType); extern PrimType GetVecElemPrimType(PrimType primType); +// size in bits constexpr uint32 k0BitSize = 0; constexpr uint32 k1BitSize = 1; constexpr uint32 k2BitSize = 2; @@ -65,6 +71,19 @@ constexpr uint32 k10BitSize = 10; constexpr uint32 k16BitSize = 16; constexpr uint32 k32BitSize = 32; constexpr uint32 k64BitSize = 64; +// size in bytes +constexpr uint32 k0ByteSize = 0; +constexpr uint32 k1ByteSize = 1; +constexpr uint32 k2ByteSize = 2; +constexpr uint32 k3ByteSize = 3; +constexpr uint32 k4ByteSize = 4; +constexpr uint32 k5ByteSize = 5; +constexpr uint32 k8ByteSize = 8; +constexpr uint32 k9ByteSize = 9; +constexpr uint32 k10ByteSize = 10; +constexpr uint32 k16ByteSize = 16; +constexpr uint32 k32ByteSize = 32; +constexpr uint32 k64ByteSize = 64; inline const std::string kDbgLong = "long."; inline const std::string kDbgULong = "Ulong."; @@ -125,6 +144,10 @@ inline bool MustBeAddress(PrimType tp) { return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_a64 || tp == PTY_a32); } +inline bool IsInt128Ty(PrimType type) { + return type == PTY_u128 || type == PTY_i128; +} + inline bool IsPrimitivePureScalar(PrimType type) { PrimitiveType primitiveType(type); return primitiveType.IsInteger() && !primitiveType.IsAddress() && @@ -518,8 +541,12 @@ class StmtAttrs { StmtAttrs &operator=(const StmtAttrs &p) = default; ~StmtAttrs() = default; - void SetAttr(StmtAttrKind x) { - attrFlag |= (1u << static_cast(x)); + void SetAttr(StmtAttrKind x, bool flag = true) { + if (flag) { + attrFlag |= (1u << static_cast(x)); + } else { + attrFlag &= ~(1u << static_cast(x)); + } } bool GetAttr(StmtAttrKind x) const { @@ -1070,7 +1097,7 @@ class MIRArrayType : public MIRType { bool HasFields() const override; uint32 NumberOfFieldIDs() const override; MIRStructType *EmbeddedStructType() override; - size_t ElemNumber(); + size_t ElemNumber() const; private: TyIdx eTyIdx{ 0 }; @@ -1151,7 +1178,9 @@ class MIRStructType : public MIRType { MIRStructType(MIRTypeKind typeKind, GStrIdx strIdx) : MIRType(typeKind, PTY_agg, strIdx) {} - ~MIRStructType() override = default; + ~MIRStructType() override { + alias = nullptr; + } bool IsStructType() const override { return true; @@ -1523,6 +1552,8 @@ class MIRStructType : public MIRType { int64 GetBitOffsetFromBaseAddr(FieldID fieldID) const override; + OffsetPair GetFieldOffsetFromBaseAddr(FieldID fieldID) const; + bool HasPadding() const; void SetAlias(MIRAlias *mirAlias) { @@ -1532,6 +1563,8 @@ class MIRStructType : public MIRType { return alias; } + bool HasZeroWidthBitField() const; + protected: FieldVector fields{}; std::vector fieldInferredTyIdx{}; @@ -1559,8 +1592,10 @@ class MIRStructType : public MIRType { FieldPair TraverseToField(GStrIdx fieldStrIdx) const ; bool HasVolatileFieldInFields(const FieldVector &fieldsOfStruct) const; bool HasTypeParamInFields(const FieldVector &fieldsOfStruct) const; - int64 GetBitOffsetFromUnionBaseAddr(FieldID fieldID) const; - int64 GetBitOffsetFromStructBaseAddr(FieldID fieldID) const; + // compute the offset of the field given by fieldID within the struct type + OffsetPair GetFieldOffsetFromStructBaseAddr(FieldID fieldID) const; + // compute the offset of the field given by fieldID within the union type + OffsetPair GetFieldOffsetFromUnionBaseAddr(FieldID fieldID) const; MIRAlias *alias = nullptr; }; @@ -1925,8 +1960,19 @@ class MIRFuncType : public MIRType { explicit MIRFuncType(const GStrIdx &strIdx) : MIRType(kTypeFunction, PTY_ptr, strIdx) {} - MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, const std::vector &vecAt, + MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, + const std::vector &vecAt, const FuncAttrs &funcAttrsIn, const TypeAttrs &retAttrsIn) + : MIRType(kTypeFunction, PTY_ptr), + funcAttrs(funcAttrsIn), + retTyIdx(retTyIdx), + paramTypeList(vecTy), + paramAttrsList(vecAt), + retAttrs(retAttrsIn) {} + + // Deprecated + MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, + const std::vector &vecAt, const TypeAttrs &retAttrsIn) : MIRType(kTypeFunction, PTY_ptr), retTyIdx(retTyIdx), paramTypeList(vecTy), @@ -2004,6 +2050,7 @@ class MIRFuncType : public MIRType { return funcAttrs.GetAttr(FUNCATTR_varargs); } + // Deprecated, set this attribute during construction. void SetVarArgs() { funcAttrs.SetAttr(FUNCATTR_varargs); } @@ -2012,10 +2059,15 @@ class MIRFuncType : public MIRType { return funcAttrs.GetAttr(FUNCATTR_firstarg_return); } + // Deprecated, set this attribute during construction. void SetFirstArgReturn() { funcAttrs.SetAttr(FUNCATTR_firstarg_return); } + const FuncAttrs &GetFuncAttrs() const { + return funcAttrs; + } + const TypeAttrs &GetRetAttrs() const { return retAttrs; } @@ -2066,7 +2118,7 @@ class MIRTypeByName : public MIRType { size_t GetHashIndex() const override { constexpr uint8 idxShift = 2; uint8 nameIsLocalValue = nameIsLocal ? 1 : 0; - return ((static_cast(nameStrIdx) << idxShift) + nameIsLocalValue + (typeKind << kShiftNumOfTypeKind)) % + return ((static_cast(nameStrIdx) << idxShift) + nameIsLocalValue + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; } }; @@ -2197,6 +2249,14 @@ inline size_t GetTypeBitSize(const MIRType &type) { } MIRType *GetElemType(const MIRType &arrayType); + +#ifdef TARGAARCH64 +bool IsHomogeneousAggregates(const MIRType &ty, PrimType &primType, size_t &elemNum, + bool firstDepth = true); +#endif // TARGAARCH64 +bool IsParamStructCopyToMemory(const MIRType &ty); +bool IsReturnInMemory(const MIRType &ty); +void UpdateMIRFuncTypeFirstArgRet(); #endif // MIR_FEATURE_FULL } // namespace maple diff --git a/src/mapleall/maple_ir/include/mpl2mpl_options.h b/src/mapleall/maple_ir/include/mpl2mpl_options.h index edb3c95cd52c8798db52df82b259a5e118b44a38..e600dd30712b263f639b4ade1bdc2f2f9e394cd2 100644 --- a/src/mapleall/maple_ir/include/mpl2mpl_options.h +++ b/src/mapleall/maple_ir/include/mpl2mpl_options.h @@ -17,11 +17,6 @@ #define MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H #include "cl_option.h" -#include "cl_parser.h" - -#include -#include -#include namespace opts::mpl2mpl { diff --git a/src/mapleall/maple_ir/include/option.h b/src/mapleall/maple_ir/include/option.h index 8b21b83b2a150c71a874de58a85ea371c9fc05e9..1b9af761833837a4fa3b3f99966b8ec96df5667c 100644 --- a/src/mapleall/maple_ir/include/option.h +++ b/src/mapleall/maple_ir/include/option.h @@ -123,7 +123,7 @@ class Options { static bool mapleLinker; static bool dumpMuidFile; static bool emitVtableImpl; -#if MIR_JAVA +#if defined(MIR_JAVA) && MIR_JAVA static bool skipVirtualMethod; #endif // Ready to be deleted. @@ -192,6 +192,7 @@ class Options { static bool doOutline; static size_t outlineThreshold; static size_t outlineRegionMax; + static bool tailcall; private: void DecideMpl2MplRealLevel() const; diff --git a/src/mapleall/maple_ir/include/prim_types.h b/src/mapleall/maple_ir/include/prim_types.h index c5cb29bbc7a1665c38ed350d45457be19b408acd..d26fc9a9d83693a3282d22cff2b050221986658b 100644 --- a/src/mapleall/maple_ir/include/prim_types.h +++ b/src/mapleall/maple_ir/include/prim_types.h @@ -14,7 +14,6 @@ */ #ifndef MAPLE_IR_INCLUDE_PRIM_TYPES_H #define MAPLE_IR_INCLUDE_PRIM_TYPES_H -#include "types_def.h" #include "cfg_primitive_types.h" namespace maple { diff --git a/src/mapleall/maple_ir/include/src_position.h b/src/mapleall/maple_ir/include/src_position.h index 49804960bba4f8f24c545f6d98689949d61925d7..ca73641bd1b0d454208c6d8bca7c8bfddaf2ae37 100644 --- a/src/mapleall/maple_ir/include/src_position.h +++ b/src/mapleall/maple_ir/include/src_position.h @@ -15,6 +15,7 @@ #ifndef MAPLE_IR_INCLUDE_SRC_POSITION_H #define MAPLE_IR_INCLUDE_SRC_POSITION_H #include "mpl_logging.h" +#include "types_def.h" namespace maple { // to store source position information diff --git a/src/mapleall/maple_ir/include/verification.h b/src/mapleall/maple_ir/include/verification.h index 3c0c3eff813f5c1d566bbe389455942e8f623535..8300e362d7c8c3c5c4d5a2ac35bbd42bee80a78c 100644 --- a/src/mapleall/maple_ir/include/verification.h +++ b/src/mapleall/maple_ir/include/verification.h @@ -93,7 +93,7 @@ class VerificationPhaseResult : public AnalysisResult { public: VerificationPhaseResult(MemPool &mp, const VerifyResult &verifyResult) : AnalysisResult(&mp), verifyResult(verifyResult) {} - ~VerificationPhaseResult() = default; + ~VerificationPhaseResult() override = default; const ClassVerifyPragmas &GetDeferredClassesPragma() const { return verifyResult.GetDeferredClassesPragma(); diff --git a/src/mapleall/maple_ir/include/verify_mark.h b/src/mapleall/maple_ir/include/verify_mark.h index 6fb72a498e3f0cb0b53c52faf056f53e526d8c72..7d6f2f56e92cdc49b956718137c3c5c3b88ba4cd 100644 --- a/src/mapleall/maple_ir/include/verify_mark.h +++ b/src/mapleall/maple_ir/include/verify_mark.h @@ -14,8 +14,6 @@ */ #ifndef MAPLEALL_VERIFY_MARK_H #define MAPLEALL_VERIFY_MARK_H -#include "class_hierarchy.h" -#include "verify_pragma_info.h" namespace maple { #ifdef NOT_USED diff --git a/src/mapleall/maple_ir/include/verify_pragma_info.h b/src/mapleall/maple_ir/include/verify_pragma_info.h index 7fe43caf005d35f8f0457cc1e62b944ab2f7f9c8..6239459ab6694b208f71e8d471c897c3330be4bc 100644 --- a/src/mapleall/maple_ir/include/verify_pragma_info.h +++ b/src/mapleall/maple_ir/include/verify_pragma_info.h @@ -53,7 +53,7 @@ class ThrowVerifyErrorPragma : public VerifyPragmaInfo { explicit ThrowVerifyErrorPragma(std::string errorMessage) : VerifyPragmaInfo(), errorMessage(std::move(errorMessage)) {} - ~ThrowVerifyErrorPragma() = default; + ~ThrowVerifyErrorPragma() override = default; PragmaInfoType GetPragmaType() const override { return kThrowVerifyError; @@ -73,7 +73,7 @@ class AssignableCheckPragma : public VerifyPragmaInfo { : VerifyPragmaInfo(), fromType(std::move(fromType)), toType(std::move(toType)) {} - ~AssignableCheckPragma() = default; + ~AssignableCheckPragma() override = default; PragmaInfoType GetPragmaType() const override { return kAssignableCheck; @@ -99,7 +99,7 @@ class AssignableCheckPragma : public VerifyPragmaInfo { class ExtendFinalCheckPragma : public VerifyPragmaInfo { public: ExtendFinalCheckPragma() : VerifyPragmaInfo() {} - ~ExtendFinalCheckPragma() = default; + ~ExtendFinalCheckPragma() override = default; PragmaInfoType GetPragmaType() const override { return kExtendFinalCheck; @@ -109,7 +109,7 @@ class ExtendFinalCheckPragma : public VerifyPragmaInfo { class OverrideFinalCheckPragma : public VerifyPragmaInfo { public: OverrideFinalCheckPragma() : VerifyPragmaInfo() {} - ~OverrideFinalCheckPragma() = default; + ~OverrideFinalCheckPragma() override = default; PragmaInfoType GetPragmaType() const override { return kOverrideFinalCheck; diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp index 46ee019132df51613d80c582805c7f6762667c07..f55334846397a17f635942e4d1d0a81a23ab9eed 100644 --- a/src/mapleall/maple_ir/src/bin_func_import.cpp +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -89,7 +89,7 @@ MIRSymbol *BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { return sym; } -PregIdx BinaryMplImport::ImportPreg(MIRFunction *func) { +PregIdx BinaryMplImport::ImportPreg(const MIRFunction *func) { int64 tag = ReadNum(); if (tag == 0) { return 0; @@ -401,7 +401,7 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { case OP_intrinsicopwithtype: { IntrinsicopNode *intrnNode = mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), OP_intrinsicopwithtype, typ); - intrnNode->SetIntrinsic((MIRIntrinsicID)ReadNum()); + intrnNode->SetIntrinsic(static_cast(ReadNum())); intrnNode->SetTyIdx(ImportType()); auto n = static_cast(ReadNum()); for (uint32 i = 0; i < n; ++i) { @@ -518,7 +518,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_iassignoff: { IassignoffNode *s = func->GetCodeMemPool()->New(); - s->SetPrimType((PrimType)Read()); + s->SetPrimType(static_cast(Read())); s->SetOffset(static_cast(ReadNum())); s->SetOpnd(ImportExpression(func), kFirstOpnd); s->SetOpnd(ImportExpression(func), kSecondOpnd); @@ -664,7 +664,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { case OP_intrinsiccallassigned: case OP_xintrinsiccallassigned: { IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetIntrinsic(static_cast(ReadNum())); ImportReturnValues(func, &s->GetReturnVec()); numOpr = static_cast(ReadNum()); s->SetNumOpnds(numOpr); @@ -682,7 +682,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_intrinsiccallwithtype: { IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetIntrinsic(static_cast(ReadNum())); s->SetTyIdx(ImportType()); numOpr = static_cast(ReadNum()); s->SetNumOpnds(numOpr); @@ -694,7 +694,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_intrinsiccallwithtypeassigned: { IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetIntrinsic(static_cast(ReadNum())); s->SetTyIdx(ImportType()); ImportReturnValues(func, &s->GetReturnVec()); numOpr = static_cast(ReadNum()); diff --git a/src/mapleall/maple_ir/src/bin_mpl_export.cpp b/src/mapleall/maple_ir/src/bin_mpl_export.cpp index ee21bc26f6d0639a09dd92eef17e0c8cfa47deb7..044f98359e5b31d0058e482ffb9bf96e4856d9a1 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_export.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -713,14 +713,14 @@ void BinaryMplExport::OutputFunction(PUIdx puIdx) { } // store Side Effect for each func if (func2SEMap) { - uint32 isSee = func->IsIpaSeen() == true ? 1 : 0; - uint32 isPure = func->IsPure() == true ? 1 : 0; - uint32 noDefArg = func->IsNoDefArgEffect() == true ? 1 : 0; - uint32 noDef = func->IsNoDefEffect() == true ? 1 : 0; - uint32 noRetGlobal = func->IsNoRetGlobal() == true ? 1 : 0; - uint32 noThr = func->IsNoThrowException() == true ? 1 : 0; - uint32 noRetArg = func->IsNoRetArg() == true ? 1 : 0; - uint32 noPriDef = func->IsNoPrivateDefEffect() == true ? 1 : 0; + uint32 isSee = func->IsIpaSeen() ? 1 : 0; + uint32 isPure = func->IsPure() ? 1 : 0; + uint32 noDefArg = func->IsNoDefArgEffect() ? 1 : 0; + uint32 noDef = func->IsNoDefEffect() ? 1 : 0; + uint32 noRetGlobal = func->IsNoRetGlobal() ? 1 : 0; + uint32 noThr = func->IsNoThrowException() ? 1 : 0; + uint32 noRetArg = func->IsNoRetArg() ? 1 : 0; + uint32 noPriDef = func->IsNoPrivateDefEffect() ? 1 : 0; uint32 i = 0; uint8 se = noThr << i++; se |= noRetGlobal << i++; @@ -1123,7 +1123,7 @@ void BinaryMplExport::WriteEnumField(uint64 contentIdx) { OutputEnumeration(mirEnum); size++; } - Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); Fixup(outEnumSizeIdx, size); WriteNum(~kBinEnumStart); return; @@ -1330,7 +1330,7 @@ void BinaryMplExport::OutputType(const TyIdx &tyIdx) { } } -void BinaryMplExport::OutputEnumeration(MIREnum *mirEnum) { +void BinaryMplExport::OutputEnumeration(const MIREnum *mirEnum) { WriteNum(kBinEnumeration); Write(static_cast(mirEnum->GetPrimType())); OutputStr(mirEnum->GetNameIdx()); diff --git a/src/mapleall/maple_ir/src/bin_mpl_import.cpp b/src/mapleall/maple_ir/src/bin_mpl_import.cpp index 2f3a7a67edb007a7cf8b15290f231d20d19e980e..878cbf5d04831ff1471ff727d88b94ed50446cc0 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_import.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -889,7 +889,7 @@ MIRType &BinaryMplImport::InsertInTypeTables(MIRType &type) { } else { // New definition wins type.SetTypeIndex(prevTyIdx); - CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().empty() == false, "container check"); + CHECK_FATAL(!GlobalTables::GetTypeTable().GetTypeTable().empty(), "container check"); GlobalTables::GetTypeTable().SetTypeWithTyIdx(prevTyIdx, *type.CopyMIRTypeNode()); resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); if (!IsIncomplete(*resultTypePtr)) { @@ -1276,7 +1276,7 @@ void BinaryMplImport::ReadCgField() { MIRSymbol *tmpInSymbol = InSymbol(nullptr); CHECK_FATAL(tmpInSymbol != nullptr, "null ptr check"); PUIdx methodPuidx = tmpInSymbol->GetFunction()->GetPuidx(); - CHECK_FATAL(methodPuidx, "should not be 0"); + CHECK_FATAL(methodPuidx != 0, "should not be 0"); if (mod.GetMethod2TargetMap().find(methodPuidx) == mod.GetMethod2TargetMap().end()) { std::vector targetSetTmp; mod.AddMemToMethod2TargetMap(methodPuidx, targetSetTmp); @@ -1555,11 +1555,11 @@ bool BinaryMplImport::ImportForSrcLang(const std::string &fname, MIRSrcLang &src Reset(); ReadFileAt(fname, 0); int32 magic = ReadInt(); - if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + if (magic != kMpltMagicNumber && magic != (kMpltMagicNumber + 0x10)) { buf.clear(); return false; } - importingFromMplt = kMpltMagicNumber == magic; + importingFromMplt = magic == kMpltMagicNumber; int64 fieldID = ReadNum(); while (fieldID != kBinFinish) { switch (fieldID) { @@ -1583,11 +1583,11 @@ bool BinaryMplImport::Import(const std::string &fname, bool readSymbols, bool re Reset(); ReadFileAt(fname, 0); int32 magic = ReadInt(); - if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + if (magic != kMpltMagicNumber && magic != (kMpltMagicNumber + 0x10)) { buf.clear(); return false; } - importingFromMplt = kMpltMagicNumber == magic; + importingFromMplt = magic == kMpltMagicNumber; int64 fieldID = ReadNum(); if (readSe) { while (fieldID != kBinFinish) { diff --git a/src/mapleall/maple_ir/src/debug_info.cpp b/src/mapleall/maple_ir/src/debug_info.cpp index a0bc92d3edd58fd55e7ceffad77e7fcd0850ec1f..51600e99ff0aad0ed08e79d0b195f6b595909e8f 100644 --- a/src/mapleall/maple_ir/src/debug_info.cpp +++ b/src/mapleall/maple_ir/src/debug_info.cpp @@ -25,14 +25,6 @@ #include "triple.h" namespace maple { -extern const char *GetDwTagName(unsigned n); -extern const char *GetDwFormName(unsigned n); -extern const char *GetDwAtName(unsigned n); -extern const char *GetDwOpName(unsigned n); -extern const char *GetDwAteName(unsigned n); -extern const char *GetDwCfaName(unsigned n); -extern DwAte GetAteFromPTY(PrimType pty); - constexpr uint32 kIndx2 = 2; constexpr uint32 kStructDBGSize = 8888; @@ -242,7 +234,7 @@ void DebugInfo::Init() { InitBaseTypeMap(); } -GStrIdx DebugInfo::GetPrimTypeCName(PrimType pty) { +GStrIdx DebugInfo::GetPrimTypeCName(PrimType pty) const { GStrIdx strIdx = GStrIdx(0); switch (pty) { #define TYPECNAME(p, n) \ @@ -443,7 +435,7 @@ void DebugInfo::HandleTypeAlias(MIRScope &scope) { } } -void DebugInfo::AddAliasDies(MIRScope &scope, bool isLocal) { +void DebugInfo::AddAliasDies(const MIRScope &scope, bool isLocal) { MIRFunction *func = GetCurFunction(); const std::string &funcName = func == nullptr ? "" : func->GetName(); GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); @@ -553,7 +545,7 @@ void DebugInfo::Finish() { void DebugInfo::BuildDebugInfoEnums() { auto size = GlobalTables::GetEnumTable().enumTable.size(); for (size_t i = 0; i < size; ++i) { - DBGDie *die = GetOrCreateEnumTypeDie(i); + DBGDie *die = GetOrCreateEnumTypeDie(static_cast(i)); compUnit->AddSubVec(die); } } @@ -620,7 +612,7 @@ void DebugInfo::BuildDebugInfoFunctions() { // function decl if (stridxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == stridxDieIdMap.end()) { DBGDie *funcDie = GetOrCreateFuncDeclDie(func); - if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + if (func->GetClassTyIdx().GetIdx() == 0 && func->GetBody()) { compUnit->AddSubVec(funcDie); } } @@ -628,7 +620,7 @@ void DebugInfo::BuildDebugInfoFunctions() { unsigned idx = func->GetNameStrIdx().GetIdx(); if (func->GetBody() && funcDefStrIdxDieIdMap.find(idx) == funcDefStrIdxDieIdMap.end()) { DBGDie *funcDie = GetOrCreateFuncDefDie(func); - if (!func->GetClassTyIdx().GetIdx()) { + if (func->GetClassTyIdx().GetIdx() == 0) { compUnit->AddSubVec(funcDie); } } @@ -678,7 +670,7 @@ DBGDie *DebugInfo::GetGlobalDie(const GStrIdx &strIdx) { unsigned idx = strIdx.GetIdx(); auto it = globalStridxDieIdMap.find(idx); if (it != globalStridxDieIdMap.end()) { - return idDieMap[it->second]; + return idDieMap.at(it->second); } return nullptr; } @@ -704,8 +696,8 @@ void DebugInfo::SetLabelIdx(MIRFunction *func, const GStrIdx &strIdx, LabelIdx l (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()] = labIdx; } -LabelIdx DebugInfo::GetLabelIdx(MIRFunction *func, GStrIdx strIdx) { - LabelIdx labidx = (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()]; +LabelIdx DebugInfo::GetLabelIdx(MIRFunction *func, const GStrIdx &strIdx) const { + LabelIdx labidx = (funcLstrIdxLabIdxMap.at(func)).at(strIdx.GetIdx()); return labidx; } @@ -767,7 +759,7 @@ DBGDie *DebugInfo::GetOrCreateLabelDie(LabelIdx labid) { MIRFunction *func = GetCurFunction(); CHECK(labid < func->GetLabelTab()->GetLabelTableSize(), "index out of range in DebugInfo::GetOrCreateLabelDie"); GStrIdx strid = func->GetLabelTab()->GetSymbolFromStIdx(labid); - if ((funcLstrIdxDieIdMap[func]).size() && + if ((funcLstrIdxDieIdMap[func]).size() > 0 && (funcLstrIdxDieIdMap[func]).find(strid.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { return GetLocalDie(strid); } @@ -802,8 +794,8 @@ DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym) { if (isLocal) { MIRFunction *func = GetCurFunction(); - if ((funcLstrIdxDieIdMap[func]).size() && - (funcLstrIdxDieIdMap[func]).find(strIdx.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + if (!funcLstrIdxDieIdMap[func].empty() && + funcLstrIdxDieIdMap[func].find(strIdx.GetIdx()) != funcLstrIdxDieIdMap[func].end()) { return GetLocalDie(strIdx); } } else { @@ -1103,7 +1095,7 @@ DBGDie *DebugInfo::GetOrCreateTypeDieWithAttr(AttrKind attr, DBGDie *typeDie) { return die; } -DBGDie *DebugInfo::GetOrCreateTypeDieWithAttr(TypeAttrs attrs, DBGDie *typeDie) { +DBGDie *DebugInfo::GetOrCreateTypeDieWithAttr(const TypeAttrs &attrs, DBGDie *typeDie) { if (attrs.GetAttr(ATTR_const)) { typeDie = GetOrCreateTypeDieWithAttr(ATTR_const, typeDie); } @@ -1202,12 +1194,12 @@ DBGDie *DebugInfo::GetOrCreateTypedefDie(GStrIdx stridx, TyIdx tyidx) { return die; } -DBGDie *DebugInfo::GetOrCreateEnumTypeDie(unsigned idx) { +DBGDie *DebugInfo::GetOrCreateEnumTypeDie(uint32 idx) { MIREnum *mirEnum = GlobalTables::GetEnumTable().enumTable[idx]; return GetOrCreateEnumTypeDie(mirEnum); } -DBGDie *DebugInfo::GetOrCreateEnumTypeDie(MIREnum *mirEnum) { +DBGDie *DebugInfo::GetOrCreateEnumTypeDie(const MIREnum *mirEnum) { uint32 sid = mirEnum->GetNameIdx().GetIdx(); auto it = stridxDieIdMap.find(sid); if (it != stridxDieIdMap.end()) { @@ -1231,7 +1223,7 @@ DBGDie *DebugInfo::GetOrCreateEnumTypeDie(MIREnum *mirEnum) { for (auto &elemIt : mirEnum->GetElements()) { DBGDie *elem = module->GetMemPool()->New(module, DW_TAG_enumerator); elem->AddAttr(DW_AT_name, DW_FORM_strp, elemIt.first.GetIdx()); - elem->AddAttr(DW_AT_const_value, DW_FORM_data8, elemIt.second.GetExtValue()); + elem->AddAttr(DW_AT_const_value, DW_FORM_data8, static_cast(elemIt.second.GetExtValue())); die->AddSubVec(elem); } @@ -1338,7 +1330,7 @@ DBGDie *DebugInfo::GetOrCreateArrayTypeDie(const MIRArrayType *arrayType) { return die; } -DBGDie *DebugInfo::CreateFieldDie(maple::FieldPair pair) { +DBGDie *DebugInfo::CreateFieldDie(const maple::FieldPair &pair) { DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(pair.first); @@ -1483,7 +1475,7 @@ void DebugInfo::CreateStructTypeMethodsDies(const MIRStructType *structType, DBG } // shared between struct and union, also used as part by class and interface -DBGDie *DebugInfo::CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structType, bool update) { +DBGDie *DebugInfo::CreateStructTypeDie(const GStrIdx &strIdx, const MIRStructType *structType, bool update) { DBGDie *die = nullptr; uint32 tid = structType->GetTypeIndex().GetIdx(); @@ -1638,7 +1630,7 @@ void DebugInfo::BuildAbbrev() { void DebugInfo::BuildDieTree() { for (auto it : idDieMap) { - if (!it.first) { + if (it.first == 0) { continue; } DBGDie *die = it.second; diff --git a/src/mapleall/maple_ir/src/debug_info_util.cpp b/src/mapleall/maple_ir/src/debug_info_util.cpp index 12396c41ee373c55e7d7b180011ad0efa2cdad45..043c1c39b6a0fb9c147321cbbd0988fd13503875 100644 --- a/src/mapleall/maple_ir/src/debug_info_util.cpp +++ b/src/mapleall/maple_ir/src/debug_info_util.cpp @@ -13,16 +13,10 @@ * See the MulanPSL - 2.0 for more details. */ -#include #include "mir_builder.h" -#include "printing.h" -#include "maple_string.h" -#include "namemangler.h" #include "debug_info.h" #include "global_tables.h" #include "mir_type.h" -#include "securec.h" -#include "mpl_logging.h" namespace maple { #define TOSTR(s) #s diff --git a/src/mapleall/maple_ir/src/global_tables.cpp b/src/mapleall/maple_ir/src/global_tables.cpp index 668cb5f7242162f332b5cdb59c209db9629c0004..e045d78a4abd123737b64c62a992649165bb428f 100644 --- a/src/mapleall/maple_ir/src/global_tables.cpp +++ b/src/mapleall/maple_ir/src/global_tables.cpp @@ -220,16 +220,26 @@ MIRType *TypeTable::GetOrCreateJarrayType(const MIRType &elem) { return typeTable.at(tyIdx); } +MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, + const std::vector &vecType, + const std::vector &vecAttrs, + const FuncAttrs &funcAttrs, + const TypeAttrs &retAttrs) { + MIRFuncType funcType(retTyIdx, vecType, vecAttrs, funcAttrs, retAttrs); + TyIdx tyIdx = GetOrCreateMIRType(&funcType); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); + return typeTable.at(tyIdx); +} + + MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, const std::vector &vecAttrs, bool isVarg, const TypeAttrs &retAttrs) { - MIRFuncType funcType(retTyIdx, vecType, vecAttrs, retAttrs); + FuncAttrs funcAttrs; if (isVarg) { - funcType.SetVarArgs(); + funcAttrs.SetAttr(FUNCATTR_varargs); } - TyIdx tyIdx = GetOrCreateMIRType(&funcType); - ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); - return typeTable.at(tyIdx); + return GetOrCreateFunctionType(retTyIdx, vecType, vecAttrs, funcAttrs, retAttrs); } MIRType *TypeTable::GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, @@ -302,9 +312,17 @@ void FPConstTable::PostInit() { } MIRIntConst *IntConstTable::GetOrCreateIntConst(const IntVal &val, MIRType &type) { + PrimType pt = type.GetPrimType(); if (ThreadEnv::IsMeParallel()) { + if (IsInt128Ty(pt)) { + return DoGetOrCreateInt128ConstTreadSafe(val.GetRawData(), type); + } return DoGetOrCreateIntConstTreadSafe(static_cast(val.GetExtValue()), type); } + + if (IsInt128Ty(pt)) { + return DoGetOrCreateInt128Const(val.GetRawData(), type); + } return DoGetOrCreateIntConst(static_cast(val.GetExtValue()), type); } @@ -324,6 +342,15 @@ MIRIntConst *IntConstTable::DoGetOrCreateIntConst(uint64 val, MIRType &type) { return intConstTable[key]; } +MIRIntConst *IntConstTable::DoGetOrCreateInt128Const(const Int128ElemTy *pVal, MIRType &type) { + Int128ConstKey key(pVal, type.GetTypeIndex()); + if (int128ConstTable.find(key) != int128ConstTable.end()) { + return int128ConstTable[key]; + } + int128ConstTable[key] = new MIRIntConst(pVal, type); + return int128ConstTable[key]; +} + MIRIntConst *IntConstTable::DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type) { IntConstKey key(val, type.GetTypeIndex()); { @@ -337,6 +364,19 @@ MIRIntConst *IntConstTable::DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType & return intConstTable[key]; } +MIRIntConst *IntConstTable::DoGetOrCreateInt128ConstTreadSafe(const Int128ElemTy *pVal, MIRType &type) { + Int128ConstKey key(pVal, type.GetTypeIndex()); + { + std::shared_lock lock(mtx); + if (int128ConstTable.find(key) != int128ConstTable.end()) { + return int128ConstTable[key]; + } + } + std::unique_lock lock(mtx); + int128ConstTable[key] = new MIRIntConst(pVal, type); + return int128ConstTable[key]; +} + IntConstTable::~IntConstTable() { for (auto pair : intConstTable) { delete pair.second; @@ -437,9 +477,9 @@ MIRFloat128Const *FPConstTable::GetOrCreateFloat128Const(const uint64 *fvalPtr) return nanFloat128Const; } if (f128Const.IsInf()) { - return (f128Const.GetSign()) ? minusInfFloat128Const : infFloat128Const; + return (f128Const.GetSign() != 0) ? minusInfFloat128Const : infFloat128Const; } - if (f128Const.IsZero() && f128Const.GetSign()) { + if (f128Const.IsZero() && f128Const.GetSign() != 0) { return minusZeroFloat128Const; } if (ThreadEnv::IsMeParallel()) { @@ -497,7 +537,7 @@ FPConstTable::~FPConstTable() { for (const auto &doubleConst : doubleConstTable) { delete doubleConst.second; } - for(const auto &float128Const : float128ConstTable) { + for (const auto &float128Const : float128ConstTable) { delete float128Const.second; } } diff --git a/src/mapleall/maple_ir/src/lexer.cpp b/src/mapleall/maple_ir/src/lexer.cpp index 4016d59b34c4c826272a3bf853cad9f199b377ef..4e2a2daa4eabdb185ef4231fb2700baed5c080cf 100644 --- a/src/mapleall/maple_ir/src/lexer.cpp +++ b/src/mapleall/maple_ir/src/lexer.cpp @@ -14,13 +14,13 @@ */ #include "lexer.h" #include -#include #include #include "mpl_logging.h" #include "debug_info.h" #include "mir_module.h" #include "securec.h" #include "utils.h" +#include "int128_util.h" namespace maple { int32 HexCharToDigit(char c) { @@ -90,9 +90,9 @@ void MIRLexer::PrepareForFile(const std::string &filename) { kind = TK_invalid; } -void MIRLexer::UpdateDbgMsg(uint32 lineNum) { +void MIRLexer::UpdateDbgMsg(uint32 dbgLineNum) { if (dbgInfo) { - dbgInfo->UpdateMsg(lineNum, line.c_str()); + dbgInfo->UpdateMsg(dbgLineNum, line.c_str()); } } @@ -197,22 +197,23 @@ TokenKind MIRLexer::GetHexConst(uint32 valStart, bool negative) { name = line.substr(valStart, curIdx - valStart); return TK_invalid; } - uint64 tmp = static_cast(HexCharToDigit(c)); + IntVal tmp(static_cast(HexCharToDigit(c)), kInt128BitSize, negative); c = GetNextCurrentCharWithUpperCheck(); while (isxdigit(c)) { tmp = (tmp << 4) + static_cast(HexCharToDigit(c)); c = GetNextCurrentCharWithUpperCheck(); } - theIntVal = static_cast(static_cast(tmp)); if (negative) { - theIntVal = -theIntVal; + tmp = -tmp; } + theIntVal = tmp.Trunc(PTY_i64).GetExtValue(); theFloatVal = static_cast(theIntVal); theDoubleVal = static_cast(theIntVal); if (negative && theIntVal == 0) { theFloatVal = -theFloatVal; theDoubleVal = -theDoubleVal; } + theInt128Val.Assign(tmp); name = line.substr(valStart, curIdx - valStart); return TK_intconst; } @@ -223,14 +224,15 @@ TokenKind MIRLexer::GetLongHexConst(uint32 valStart, bool negative) { name = line.substr(valStart, curIdx - valStart); return TK_invalid; } - __int128 tmp = 0; + unsigned __int128 tmp = 0; + uint32 buf = 0; while (isxdigit(c)) { - tmp = static_cast(HexCharToDigit(c)); - tmp = (static_cast<__int128>(theLongDoubleVal[1] << 4)) + tmp; - theLongDoubleVal[1] = static_cast(tmp); - theLongDoubleVal[0] = (theLongDoubleVal[0] << 4) + (tmp >> 64); + buf = static_cast(HexCharToDigit(c)); + tmp = (tmp << 4) + buf; c = GetNextCurrentCharWithUpperCheck(); } + theLongDoubleVal[1] = static_cast(tmp); + theLongDoubleVal[0] = static_cast(tmp >> 64); theIntVal = static_cast(static_cast(theLongDoubleVal[1])); if (negative) { theIntVal = -theIntVal; @@ -246,16 +248,16 @@ TokenKind MIRLexer::GetLongHexConst(uint32 valStart, bool negative) { } TokenKind MIRLexer::GetIntConst(uint32 valStart, bool negative) { - auto negOrSelf = [negative](int64 val) { return negative ? ~val + 1 : val; }; - - theIntVal = static_cast(HexCharToDigit(GetCharAtWithUpperCheck(curIdx))); - - int64 radix = theIntVal == 0 ? 8 : 10; - - char c = GetNextCurrentCharWithUpperCheck(); - - for (theIntVal = negOrSelf(theIntVal); isdigit(c); c = GetNextCurrentCharWithUpperCheck()) { - theIntVal = (theIntVal * radix) + negOrSelf(static_cast(HexCharToDigit(c))); + char c = GetCharAtWithUpperCheck(curIdx); + if (!isxdigit(c)) { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + uint64 radix = HexCharToDigit(c) == 0 ? 8 : 10; + IntVal tmp(static_cast(0), kInt128BitSize, negative); + while (isdigit(c)) { + tmp = (tmp * radix) + static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); } if (c == 'u' || c == 'U') { // skip 'u' or 'U' @@ -274,6 +276,12 @@ TokenKind MIRLexer::GetIntConst(uint32 valStart, bool negative) { name = line.substr(valStart, curIdx - valStart); + if (negative) { + tmp = -tmp; + } + + theInt128Val.Assign(tmp); + theIntVal = tmp.Trunc(PTY_u64).GetExtValue(); if (negative) { theFloatVal = static_cast(static_cast(theIntVal)); theDoubleVal = static_cast(static_cast(theIntVal)); @@ -487,8 +495,8 @@ TokenKind MIRLexer::GetTokenWithPrefixDoubleQuotation() { const uint32 hexLength = 2; uint8 c1 = Char2num(GetCharAtWithLowerCheck(curIdx + 1)); uint8 c2 = Char2num(GetCharAtWithLowerCheck(curIdx + 2)); - uint32 cNew = static_cast(c1 << hexShift) + c2; - line[curIdx - shift] = cNew; + uint8 cNew = static_cast(c1 << hexShift) + c2; + line[curIdx - shift] = static_cast(cNew); curIdx += hexLength; shift += hexLength; break; diff --git a/src/mapleall/maple_ir/src/mir_builder.cpp b/src/mapleall/maple_ir/src/mir_builder.cpp index 30f428553fd3f27a52de07bffa2304a91b46dae8..49414437609a1cc5c44262bee3900f72d81b96e7 100644 --- a/src/mapleall/maple_ir/src/mir_builder.cpp +++ b/src/mapleall/maple_ir/src/mir_builder.cpp @@ -136,7 +136,7 @@ bool MIRBuilder::TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &struct TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); ASSERT(fieldType != nullptr, "fieldType is null"); - if (matchStyle && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (matchStyle != 0 && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { if (typeIdx == 0u || fieldTyIdx == typeIdx || fieldType->IsOfSameType(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx))) { return true; @@ -240,13 +240,13 @@ MIRFunction *MIRBuilder::GetFunctionFromSymbol(const MIRSymbol &funcSymbol) cons return funcSymbol.GetFunction(); } -MIRFunction *MIRBuilder::GetFunctionFromName(const std::string &str) { +MIRFunction *MIRBuilder::GetFunctionFromName(const std::string &str) const { auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(str)); return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; } -MIRFunction *MIRBuilder::GetFunctionFromStidx(StIdx stIdx) { +MIRFunction *MIRBuilder::GetFunctionFromStidx(const StIdx &stIdx) const { auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; } @@ -276,8 +276,12 @@ MIRFunction *MIRBuilder::CreateFunction(const std::string &name, const MIRType & (void)fn->GetSymTab()->AddToStringSymbolMap(*formalDef.formalSym); } } + FuncAttrs funcAttrs; + if (isVarg) { + funcAttrs.SetAttr(FUNCATTR_varargs); + } funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType( - returnType.GetTypeIndex(), funcVecType, funcVecAttrs, isVarg)->GetTypeIndex()); + returnType.GetTypeIndex(), funcVecType, funcVecAttrs, funcAttrs)->GetTypeIndex()); auto *funcType = static_cast(funcSymbol->GetType()); fn->SetMIRFuncType(funcType); funcSymbol->SetFunction(fn); @@ -498,6 +502,14 @@ ConstvalNode *MIRBuilder::CreateConstval(MIRConst *mirConst) { return GetCurrentFuncCodeMp()->New(mirConst->GetType().GetPrimType(), mirConst); } +ConstvalNode *MIRBuilder::CreateInt128Const(const Int128ElemTy *value, PrimType pty) { + ASSERT(IsInt128Ty(pty), "unxecpected prim type"); + IntVal intVal(value, pty); + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(intVal, *GlobalTables::GetTypeTable().GetPrimType(pty)); + return GetCurrentFuncCodeMp()->New(pty, mirConst); +} + ConstvalNode *MIRBuilder::CreateIntConst(uint64 val, PrimType pty) { auto *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetPrimType(pty)); @@ -559,7 +571,7 @@ ConstvalNode *MIRBuilder::CreateAddroffuncConst(const BaseNode &node) { ConstvalNode *MIRBuilder::CreateStrConst(const BaseNode &node) { ASSERT(node.GetOpCode() == OP_conststr, "illegal op for conststr const"); UStrIdx strIdx = static_cast(node).GetStrIdx(); - CHECK_FATAL(PTY_u8 < GlobalTables::GetTypeTable().GetTypeTable().size(), + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > PTY_u8, "index is out of range in MIRBuilder::CreateStrConst"); TyIdx tyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8))->GetTypeIndex(); MIRPtrType ptrType(tyIdx); @@ -572,7 +584,7 @@ ConstvalNode *MIRBuilder::CreateStrConst(const BaseNode &node) { ConstvalNode *MIRBuilder::CreateStr16Const(const BaseNode &node) { ASSERT(node.GetOpCode() == OP_conststr16, "illegal op for conststr16 const"); U16StrIdx strIdx = static_cast(node).GetStrIdx(); - CHECK_FATAL(PTY_u16 < GlobalTables::GetTypeTable().GetTypeTable().size(), + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > PTY_u16, "index out of range in MIRBuilder::CreateStr16Const"); TyIdx ptyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u16))->GetTypeIndex(); MIRPtrType ptrType(ptyIdx); diff --git a/src/mapleall/maple_ir/src/mir_const.cpp b/src/mapleall/maple_ir/src/mir_const.cpp index a708597e64460666c0f265662a4ce3211dc63de3..4fbb11f538cac963312e52d2dcb48d24bce51bda 100644 --- a/src/mapleall/maple_ir/src/mir_const.cpp +++ b/src/mapleall/maple_ir/src/mir_const.cpp @@ -34,21 +34,8 @@ bool MIRIntConst::operator==(const MIRConst &rhs) const { return ((&intConst.GetType() == &GetType()) && (intConst.value == value)); } -uint8 MIRIntConst::GetActualBitWidth() const { - if (value == 0) { - return 1; - } - - int64 val = GetExtValue(); - uint64 tmp = static_cast(val < 0 ? -(val + 1) : val); - - uint8 width = 0; - while (tmp != 0) { - ++width; - tmp = tmp >> 1u; - } - - return width; +uint16 MIRIntConst::GetActualBitWidth() const { + return value.CountSignificantBits(); } void MIRAddrofConst::Dump(const MIRSymbolTable *localSymTab) const { diff --git a/src/mapleall/maple_ir/src/mir_function.cpp b/src/mapleall/maple_ir/src/mir_function.cpp index 6ea0bc62013630794e901a62e9aca504664496c8..93f56d7b32cca478174b1d943bffdfa0ae45a313 100644 --- a/src/mapleall/maple_ir/src/mir_function.cpp +++ b/src/mapleall/maple_ir/src/mir_function.cpp @@ -123,9 +123,12 @@ void MIRFunction::UpdateFuncTypeAndFormals(const std::vector &symbol } void MIRFunction::UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, - bool clearOldArgs) { + bool clearOldArgs, bool firstArgRet) { auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); newFuncType->SetRetTyIdx(retTyIdx); + if (firstArgRet) { + newFuncType->funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); delete newFuncType; @@ -157,21 +160,7 @@ void MIRFunction::SetReturnStruct() { } void MIRFunction::SetReturnStruct(const MIRType &retType) { if (retType.IsStructType()) { - flag |= kFuncPropRetStruct; - } -} -void MIRFunction::SetReturnStruct(const MIRType *retType) { - switch (retType->GetKind()) { - case kTypeUnion: - case kTypeStruct: - case kTypeStructIncomplete: - case kTypeClass: - case kTypeClassIncomplete: - case kTypeInterface: - case kTypeInterfaceIncomplete: - flag |= kFuncPropRetStruct; - break; - default:; + SetReturnStruct(); } } @@ -578,10 +567,10 @@ void MIRFunction::SetBaseClassFuncNames(GStrIdx strIdx) { } baseFuncSigStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); size_t newPos = name.find(delimiter, pos + width); - while (newPos < std::string::npos && (name[newPos - 1] == '_' && name[newPos - 2] != '_')) { + while (newPos != std::string::npos && (name[newPos - 1] == '_' && name[newPos - 2] != '_')) { newPos = name.find(delimiter, newPos + width); } - if (newPos != 0) { + if (newPos != std::string::npos && newPos > 0) { std::string funcName = name.substr(pos + width, newPos - pos - width); baseFuncStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); std::string signature = name.substr(newPos + width, name.length() - newPos - width); diff --git a/src/mapleall/maple_ir/src/mir_lower.cpp b/src/mapleall/maple_ir/src/mir_lower.cpp index 884004ff5b109a0611907b431b2710b42f09082e..eab5d7cca42c70a7c38d0e219fb1666c6d1ba8bd 100644 --- a/src/mapleall/maple_ir/src/mir_lower.cpp +++ b/src/mapleall/maple_ir/src/mir_lower.cpp @@ -455,7 +455,7 @@ BlockNode *MIRLower::LowerDoloopStmt(DoloopNode &doloop) { } blk->AppendStatementsFromBlock(*doloop.GetDoBody()); if (doloop.IsPreg()) { - PregIdx regIdx = (PregIdx)doloop.GetDoVarStIdx().FullIdx(); + PregIdx regIdx = static_cast(doloop.GetDoVarStIdx().FullIdx()); MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); PrimType doVarPType = mirPreg->GetPrimType(); ASSERT(doVarPType != kPtyInvalid, "runtime check error"); @@ -560,15 +560,11 @@ BlockNode *MIRLower::LowerBlock(BlockNode &block) { case OP_icall: { if (mirModule.IsCModule()) { // convert to icallproto/icallprotoassigned - IcallNode *ic = static_cast(stmt); + IcallNode *ic = static_cast(stmt); ic->SetOpCode(stmt->GetOpCode() == OP_icall ? OP_icallproto : OP_icallprotoassigned); MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(stmt->Opnd(0)); CHECK_FATAL(funcType != nullptr, "MIRLower::LowerBlock: cannot find prototype for icall"); ic->SetRetTyIdx(funcType->GetTypeIndex()); - MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); - if (retType->GetPrimType() == PTY_agg && retType->GetSize() > k16BitSize) { - funcType->funcAttrs.SetAttr(FUNCATTR_firstarg_return); - } } newBlock->AddStatement(stmt); break; @@ -814,7 +810,7 @@ BaseNode *MIRLower::LowerCArray(ArrayNode *array) { resNode = newValNode; } } - if (i > 0 && isConst == false) { + if (i > 0 && !isConst) { resNode = array->GetIndex(i); } diff --git a/src/mapleall/maple_ir/src/mir_module.cpp b/src/mapleall/maple_ir/src/mir_module.cpp index 3479b4dc7b73a93a3e4f3c7b0b8152352f3e8806..d734c8aeb3f2711ebe4d5c4f97a72557816d5584 100644 --- a/src/mapleall/maple_ir/src/mir_module.cpp +++ b/src/mapleall/maple_ir/src/mir_module.cpp @@ -50,7 +50,9 @@ MIRModule::MIRModule(const std::string &fn) puIdxFieldInitializedMap(std::less(), memPoolAllocator.Adapter()), inliningGlobals(memPoolAllocator.Adapter()), partO2FuncList(memPoolAllocator.Adapter()), - safetyWarningMap(memPoolAllocator.Adapter()) { + safetyWarningMap(memPoolAllocator.Adapter()), + tdataVarOffset(memPoolAllocator.Adapter()), + tbssVarOffset(memPoolAllocator.Adapter()) { GlobalTables::GetGsymTable().SetModule(this); typeNameTab = memPool->New(memPoolAllocator); mirBuilder = memPool->New(this); diff --git a/src/mapleall/maple_ir/src/mir_nodes.cpp b/src/mapleall/maple_ir/src/mir_nodes.cpp index 9105a57f5bc680aa6ec1ed0721e81b80fb2da4a0..f0f6ccc729690c4fa7ee8dd9fa6fadd6b63d7535 100644 --- a/src/mapleall/maple_ir/src/mir_nodes.cpp +++ b/src/mapleall/maple_ir/src/mir_nodes.cpp @@ -35,10 +35,10 @@ const int32 CondGotoNode::probAll = 10000; const char *GetIntrinsicName(MIRIntrinsicID intrn) { switch (intrn) { - default: #define DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) \ - case INTRN_##STR: \ - return #STR; + case INTRN_##STR: \ + return #STR; + default: #include "intrinsics.def" #undef DEF_MIR_INTRINSIC } @@ -284,13 +284,13 @@ BlockNode *BlockNode::CloneTreeWithSrcPosition(const MIRModule &mod, const GStrI BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map& toFreqs, std::unordered_map& fromFreqs, - FreqType numer, uint64_t denom, uint32_t updateOp) { + FreqType numer, FreqType denom, uint32_t updateOp) { auto *nnode = allocator.GetMemPool()->New(); nnode->SetStmtID(stmtIDNext++); if (fromFreqs.count(GetStmtID()) > 0) { FreqType oldFreq = fromFreqs[GetStmtID()]; FreqType newFreq; - if (updateOp & kUpdateUnrollRemainderFreq) { + if ((updateOp & static_cast(kUpdateUnrollRemainderFreq)) != 0) { newFreq = denom > 0 ? (oldFreq * numer % static_cast(denom)) : oldFreq; } else { newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / static_cast(denom)) : oldFreq); @@ -298,7 +298,7 @@ BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, toFreqs[nnode->GetStmtID()] = (newFreq > 0 || (numer == 0)) ? newFreq : 1; if ((updateOp & kUpdateOrigFreq) != 0) { // upateOp & 1 : update from int64_t left = static_cast(((oldFreq - newFreq) > 0 || (oldFreq == 0)) ? (oldFreq - newFreq) : 1); - fromFreqs[GetStmtID()] = static_cast(left); + fromFreqs[GetStmtID()] = left; } } for (auto &stmt : stmtNodeList) { @@ -308,14 +308,17 @@ BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); } else if (stmt.GetOpCode() == OP_if) { newStmt = static_cast( - (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, + static_cast(numer), denom, updateOp)); } else if (stmt.GetOpCode() == OP_while) { newStmt = static_cast( (static_cast(&stmt))->CloneTreeWithFreqs(allocator, - toFreqs, fromFreqs, numer, denom, updateOp)); + toFreqs, fromFreqs, static_cast(numer), + denom, updateOp)); } else if (stmt.GetOpCode() == OP_doloop) { newStmt = static_cast( - (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, + static_cast(numer), denom, updateOp)); } else { newStmt = static_cast(stmt.CloneTree(allocator)); if (fromFreqs.count(stmt.GetStmtID()) > 0) { @@ -327,10 +330,10 @@ BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / static_cast(denom)) : oldFreq); } toFreqs[newStmt->GetStmtID()] = - (newFreq > 0 || oldFreq == 0 || numer == 0) ? static_cast(newFreq) : 1; + (newFreq > 0 || oldFreq == 0 || numer == 0) ? newFreq : 1; if ((updateOp & kUpdateOrigFreq) != 0) { - FreqType left = static_cast(((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1); - fromFreqs[stmt.GetStmtID()] = static_cast(left); + FreqType left = static_cast(((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1); + fromFreqs[stmt.GetStmtID()] = left; } } } @@ -600,7 +603,7 @@ void ArrayNode::Dump(int32 indent) const { NaryOpnds::Dump(indent); } -bool ArrayNode::IsSameBase(ArrayNode *arry) { +bool ArrayNode::IsSameBase(const ArrayNode *arry) const { ASSERT(arry != nullptr, "null ptr check"); if (arry == this) { return true; @@ -911,7 +914,7 @@ void TryNode::Dump(int32 indent) const { LogInfo::MapleLogger() << " {"; for (size_t i = 0; i < offsets.size(); ++i) { uint32 offset = offsets[i]; - LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName((LabelIdx)offset); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); } LogInfo::MapleLogger() << " }\n"; } @@ -2687,6 +2690,34 @@ bool BinaryNode::IsSameContent(const BaseNode *node) const { } } +bool RetypeNode::IsSameContent(const BaseNode *node) const { + auto *retyeNode = dynamic_cast(node); + if (!retyeNode) { + return false; + } + if (retyeNode == this) { + return true; + } + if (retyeNode->tyIdx == tyIdx && TypeCvtNode::IsSameContent(node)) { + return true; + } + return false; +} + +bool ExtractbitsNode::IsSameContent(const BaseNode *node) const { + auto *extractNode = dynamic_cast(node); + if (!extractNode) { + return false; + } + if (extractNode == this) { + return true; + } + if (extractNode->bitsSize == bitsSize && extractNode->bitsOffset == bitsOffset && UnaryNode::IsSameContent(node)) { + return true; + } + return false; +} + bool ConstvalNode::IsSameContent(const BaseNode *node) const { auto *constvalNode = dynamic_cast(node); if (this == constvalNode) { diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp index e04c635fa8126018e5e99052e8e389c8b776cd8d..83b56b2862c7eb60bc041e9c78bee0e9336e2408 100644 --- a/src/mapleall/maple_ir/src/mir_parser.cpp +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -992,7 +992,7 @@ bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned) { : OP_xintrinsiccallassigned); auto *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); lexer.NextToken(); - if (o == (!isAssigned) ? OP_intrinsiccall : OP_intrinsiccallassigned) { + if (o == ((!isAssigned) ? OP_intrinsiccall : OP_intrinsiccallassigned)) { intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); } else { intrnCallNode->SetIntrinsic(static_cast(lexer.GetTheIntVal())); @@ -1125,7 +1125,7 @@ bool MIRParser::ParseCallReturnPair(CallReturnPair &retpair) { TyIdx tyidx(0); // RegreadNode regreadexpr; bool ret = ParsePrimType(tyidx); - if (ret != true) { + if (!ret) { Error("call ParsePrimType failed in ParseCallReturns"); return false; } @@ -1831,7 +1831,7 @@ bool MIRParser::ParseLoc() { return true; } -bool MIRParser::ParseLocStmt(StmtNodePtr&) { +bool MIRParser::ParseLocStmt(StmtNodePtr &stmt) { return ParseLoc(); } @@ -3196,7 +3196,11 @@ bool MIRParser::ParseScalarValue(MIRConstPtr &stype, MIRType &type) { Error("constant value incompatible with integer type at "); return false; } - stype = GlobalTables::GetIntConstTable().GetOrCreateIntConst(lexer.GetTheIntVal(), type); + if (IsInt128Ty(ptp)) { + stype = GlobalTables::GetIntConstTable().GetOrCreateIntConst(lexer.GetTheInt128Val(), type); + } else { + stype = GlobalTables::GetIntConstTable().GetOrCreateIntConst(lexer.GetTheIntVal(), type); + } } else if (ptp == PTY_f32) { if (lexer.GetTokenKind() != TK_floatconst) { Error("constant value incompatible with single-precision float type at "); diff --git a/src/mapleall/maple_ir/src/mir_symbol.cpp b/src/mapleall/maple_ir/src/mir_symbol.cpp index 4c2ea9ad4b29c38a32d914a76694f3bd68b12b6f..479b74886f2e793a0188803c3ee02d0b107cd6f5 100644 --- a/src/mapleall/maple_ir/src/mir_symbol.cpp +++ b/src/mapleall/maple_ir/src/mir_symbol.cpp @@ -29,8 +29,9 @@ uint32 MIRSymbol::lastPrintedLineNum = 0; uint16 MIRSymbol::lastPrintedColumnNum = 0; bool MIRSymbol::NeedGOT(bool doPIE) const { - return (storageClass == kScExtern) || (!doPIE && ((storageClass == kScGlobal) || - (sKind == kStFunc && !GetFunction()->GetFuncAlias()->IsStatic()))); + return (storageClass == kScExtern) || + (!doPIE && ((storageClass == kScGlobal) || (sKind == kStFunc && !GetFunction()->GetFuncAlias()->IsStatic()))) || + (sKind == kStFunc && !GetFunction()->IsStatic() && !GetFunction()->HasBody()); } bool MIRSymbol::IsTypeVolatile(int fieldID) const { diff --git a/src/mapleall/maple_ir/src/mir_type.cpp b/src/mapleall/maple_ir/src/mir_type.cpp index 342b4fa773f0b75ad76a316204842961def48c49..9b65f21a54d5a9d68d6628b364860c495a0bbf78 100644 --- a/src/mapleall/maple_ir/src/mir_type.cpp +++ b/src/mapleall/maple_ir/src/mir_type.cpp @@ -195,47 +195,47 @@ bool NeedCvtOrRetype(PrimType origin, PrimType compared) { } uint8 GetPointerSize() { -#if TARGX86 || TARGARM32 || TARGVM +#if (defined(TARGX86) && TARGX86) || (defined(TARGARM32) && TARGARM32) || (defined(TARGVM) && TARGVM) return 4; -#elif TARGX86_64 +#elif defined(TARGX86_64) && TARGX86_64 return 8; -#elif TARGAARCH64 +#elif defined(TARGAARCH64) && TARGAARCH64 ASSERT(Triple::GetTriple().GetEnvironment() != Triple::UnknownEnvironment, "Triple must be initialized before using"); uint8 size = (Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? 4 : 8; return size; #else - #error "Unsupported target" +#error "Unsupported target" #endif } uint8 GetP2Size() { -#if TARGX86 || TARGARM32 || TARGVM +#if (defined(TARGX86) && TARGX86) || (defined(TARGARM32) && TARGARM32) || (defined(TARGVM) && TARGVM) return 2; -#elif TARGX86_64 +#elif defined(TARGX86_64) && TARGX86_64 return 3; -#elif TARGAARCH64 +#elif defined(TARGAARCH64) && TARGAARCH64 ASSERT(Triple::GetTriple().GetEnvironment() != Triple::UnknownEnvironment, "Triple must be initialized before using"); uint8 size = (Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? 2 : 3; return size; #else - #error "Unsupported target" +#error "Unsupported target" #endif } PrimType GetLoweredPtrType() { -#if TARGX86 || TARGARM32 || TARGVM +#if (defined(TARGX86) && TARGX86) || (defined(TARGARM32) && TARGARM32) || (defined(TARGVM) && TARGVM) return PTY_a32; -#elif TARGX86_64 +#elif defined(TARGX86_64) && TARGX86_64 return PTY_a64; -#elif TARGAARCH64 +#elif defined(TARGAARCH64) && TARGAARCH64 ASSERT(Triple::GetTriple().GetEnvironment() != Triple::UnknownEnvironment, "Triple must be initialized before using"); auto pty = (Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? PTY_a32 : PTY_a64; return pty; #else - #error "Unsupported target" +#error "Unsupported target" #endif } @@ -248,24 +248,24 @@ uint32 GetPrimTypeSize(PrimType primType) { switch (primType) { case PTY_void: case PTY_agg: - return k0BitSize; + return k0ByteSize; case PTY_ptr: case PTY_ref: return GetPointerSize(); case PTY_u1: case PTY_i8: case PTY_u8: - return k1BitSize; + return k1ByteSize; case PTY_i16: case PTY_u16: - return k2BitSize; + return k2ByteSize; case PTY_a32: case PTY_f32: case PTY_i32: case PTY_u32: case PTY_simplestr: case PTY_simpleobj: - return k4BitSize; + return k4ByteSize; case PTY_a64: case PTY_c64: case PTY_f64: @@ -280,7 +280,7 @@ uint32 GetPrimTypeSize(PrimType primType) { case PTY_v2f32: case PTY_v1i64: case PTY_v1u64: - return k8BitSize; + return k8ByteSize; case PTY_u128: case PTY_i128: case PTY_c128: @@ -295,7 +295,7 @@ uint32 GetPrimTypeSize(PrimType primType) { case PTY_v16u8: case PTY_v2f64: case PTY_v4f32: - return k16BitSize; + return k16ByteSize; #ifdef DYNAMICLANG case PTY_dynf32: case PTY_dyni32: @@ -304,13 +304,13 @@ uint32 GetPrimTypeSize(PrimType primType) { case PTY_dynundef: case PTY_dynnull: case PTY_dynbool: - return k8BitSize; + return k8ByteSize; case PTY_dynany: case PTY_dynf64: - return k8BitSize; + return k8ByteSize; #endif default: - return k0BitSize; + return k0ByteSize; } } @@ -323,17 +323,17 @@ uint32 GetPrimTypeP2Size(PrimType primType) { case PTY_u1: case PTY_i8: case PTY_u8: - return k0BitSize; + return k0ByteSize; case PTY_i16: case PTY_u16: - return k1BitSize; + return k1ByteSize; case PTY_a32: case PTY_f32: case PTY_i32: case PTY_u32: case PTY_simplestr: case PTY_simpleobj: - return k2BitSize; + return k2ByteSize; case PTY_a64: case PTY_c64: case PTY_f64: @@ -346,7 +346,7 @@ uint32 GetPrimTypeP2Size(PrimType primType) { case PTY_v4u16: case PTY_v8u8: case PTY_v2f32: - return k3BitSize; + return k3ByteSize; case PTY_c128: case PTY_f128: case PTY_v2i64: @@ -359,7 +359,7 @@ uint32 GetPrimTypeP2Size(PrimType primType) { case PTY_v16u8: case PTY_v2f64: case PTY_v4f32: - return k4BitSize; + return k4ByteSize; #ifdef DYNAMICLANG case PTY_dynf32: case PTY_dyni32: @@ -370,11 +370,11 @@ uint32 GetPrimTypeP2Size(PrimType primType) { case PTY_dynbool: case PTY_dynany: case PTY_dynf64: - return k3BitSize; + return k3ByteSize; #endif default: ASSERT(false, "Power-of-2 size only applicable to sizes of 1, 2, 4, 8 or 16 bytes."); - return k10BitSize; + return k10ByteSize; } } @@ -1048,7 +1048,7 @@ bool MIRClassType::IsExceptionType() const { } FieldID MIRClassType::GetLastFieldID() const { - FieldID fieldID = fields.size(); + FieldID fieldID = static_cast(fields.size()); if (parentTyIdx != 0u) { const auto *parentClassType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(parentTyIdx)); @@ -1159,9 +1159,9 @@ static void DumpFields(FieldVector fields, int indent, bool otherFields = false) fa.DumpAttributes(); if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { - const char *fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fields[i].first).c_str(); MIRSymbol *fieldVar = - GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(fieldName)); + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName( + GlobalTables::GetStrTable().GetStringFromStrIdx(fields[i].first).c_str())); if (fieldVar != nullptr && fieldVar->GetKonst() != nullptr && fieldVar->GetKonst()->GetKind() == kConstStr16Const) { LogInfo::MapleLogger() << " = "; @@ -1360,23 +1360,27 @@ size_t MIRStructType::GetSize() const { uint32 MIRStructType::GetAlign() const { if (fields.size() == 0) { - return 0; + return 1; } uint32 maxAlign = 1; + uint32 maxZeroBitFieldAlign = 1; + auto structPack = GetTypeAttrs().GetPack(); for (size_t i = 0; i < fields.size(); ++i) { TyIdxFieldAttrPair tfap = GetTyidxFieldAttrPair(i); MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tfap.first); - uint32 algn = fieldType->GetAlign(); - if (fieldType->GetKind() == kTypeBitField) { - algn = GetPrimTypeSize(fieldType->GetPrimType()); - } else { - algn = std::max(algn, tfap.second.GetAlign()); - } - if (maxAlign < algn) { - maxAlign = algn; + auto attrAlign = tfap.second.GetAlign(); + auto originAlign = std::max(attrAlign, fieldType->GetAlign()); + uint32 fieldAlign = tfap.second.IsPacked() ? static_cast(1U) : std::min(originAlign, structPack); + CHECK_FATAL(fieldAlign != 0, "expect fieldAlign not equal 0"); + maxAlign = std::max(maxAlign, fieldAlign); + if (fieldType->IsMIRBitFieldType() && static_cast(fieldType)->GetFieldSize() == 0) { + maxZeroBitFieldAlign = std::max(maxZeroBitFieldAlign, GetPrimTypeSize(fieldType->GetPrimType())); } } - return std::min(maxAlign, GetTypeAttrs().GetPack()); + if (HasZeroWidthBitField()) { + return std::max(maxZeroBitFieldAlign, maxAlign); + } + return maxAlign; } void MIRStructType::DumpFieldsAndMethods(int indent, bool hasMethod) const { @@ -1630,9 +1634,9 @@ int64 MIRArrayType::GetBitOffsetFromArrayAddress(std::vector &indexArray) CHECK_FATAL(indexArray.size() == dim, "dimension mismatch!"); } int64 sum = 0; // element numbers before the specified element - uint32 numberOfElemInLowerDim = 1; + uint64 numberOfElemInLowerDim = 1; for (uint32 id = 1; id <= dim; ++id) { - sum += indexArray[dim - id] * numberOfElemInLowerDim; + sum += static_cast(static_cast(indexArray[dim - id]) * numberOfElemInLowerDim); numberOfElemInLowerDim *= sizeArray[dim - id]; } size_t elemsize = GetElemType()->GetSize(); @@ -1640,8 +1644,8 @@ int64 MIRArrayType::GetBitOffsetFromArrayAddress(std::vector &indexArray) return 0; } elemsize = RoundUp(elemsize, typeAttrs.GetAlign()); - constexpr int64 bitsPerByte = 8; - int64 offset = static_cast(sum) * elemsize * static_cast(bitsPerByte); + constexpr uint64 bitsPerByte = 8; + int64 offset = static_cast(static_cast(sum) * elemsize * bitsPerByte); if (GetElemType()->GetKind() == kTypeArray && indexArray.size() > dim) { std::vector subIndexArray(indexArray.begin() + dim, indexArray.end()); offset += static_cast(GetElemType())->GetBitOffsetFromArrayAddress(subIndexArray); @@ -1649,7 +1653,7 @@ int64 MIRArrayType::GetBitOffsetFromArrayAddress(std::vector &indexArray) return offset; } -size_t MIRArrayType::ElemNumber() { +size_t MIRArrayType::ElemNumber() const { size_t elemNum = 1; for (uint16 id = 0; id < dim; ++id) { elemNum *= sizeArray[id]; @@ -1918,7 +1922,7 @@ bool MIRGenericInstantType::EqualTo(const MIRType &type) const { // in the search, curfieldid is being decremented until it reaches 1 FieldPair MIRStructType::TraverseToFieldRef(FieldID &fieldID) const { - if (!fields.size()) { + if (fields.empty()) { return FieldPair(GStrIdx(0), TyIdxFieldAttrPair(TyIdx(0), FieldAttrs())); } @@ -1948,7 +1952,7 @@ FieldPair MIRStructType::TraverseToField(FieldID fieldID) const { return TraverseToFieldRef(fieldID); } // in parentfields - uint32 parentFieldIdx = -fieldID; + uint32 parentFieldIdx = static_cast(-fieldID); if (parentFields.empty() || parentFieldIdx > parentFields.size()) { return { GStrIdx(0), TyIdxFieldAttrPair(TyIdx(0), FieldAttrs()) }; } @@ -1965,6 +1969,22 @@ static bool TraverseToFieldInFields(const FieldVector &fields, const GStrIdx &fi return false; } +// On the ARM platform, when using both zero-sized bitfields and the pack attribute simultaneously, +// the size of a struct should be calculated according to the default alignment without the pack attribute. +bool MIRStructType::HasZeroWidthBitField() const { +#ifndef TARGAARCH64 + return false; +#endif + for (FieldPair field : fields) { + TyIdx fieldTyIdx = field.second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + if (fieldType->GetKind() == kTypeBitField && fieldType->GetSize() == 0) { + return true; + } + } + return false; +} + FieldPair MIRStructType::TraverseToField(GStrIdx fieldStrIdx) const { FieldPair fieldPair; if ((!fields.empty() && TraverseToFieldInFields(fields, fieldStrIdx, fieldPair)) || @@ -1996,22 +2016,32 @@ bool MIRStructType::HasVolatileField() const { return HasVolatileFieldInFields(fields) || HasVolatileFieldInFields(parentFields); } -int64 MIRStructType::GetBitOffsetFromUnionBaseAddr(FieldID fieldID) const { - CHECK_FATAL(fieldID <= static_cast(NumberOfFieldIDs()), "GetBitOffsetFromUnionBaseAddr: fieldID too large"); +int64 MIRStructType::GetBitOffsetFromBaseAddr(FieldID fieldID) const { + CHECK_FATAL(fieldID <= static_cast(NumberOfFieldIDs()), "GetBitOffsetFromBaseAddr: fieldID too large"); if (fieldID == 0) { return 0; } + constexpr int64 bitsPerByte = 8; // 8 bits per byte + OffsetPair offsetPair = GetFieldOffsetFromBaseAddr(fieldID); + int64 offset = static_cast(offsetPair.byteOffset) * bitsPerByte + static_cast(offsetPair.bitOffset); + return offset; +} + +OffsetPair MIRStructType::GetFieldOffsetFromUnionBaseAddr(FieldID fieldID) const { + CHECK_FATAL(fieldID <= static_cast(NumberOfFieldIDs()), "GetBitOffsetFromUnionBaseAddr: fieldID too large"); + if (fieldID == 0) { + return {0, 0}; + } FieldID curFieldID = 1; - FieldVector fieldPairs = GetFields(); // for unions, bitfields are treated as non-bitfields - for (FieldPair field : fieldPairs) { + for (FieldPair field : fields) { TyIdx fieldTyIdx = field.second.first; MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); if (curFieldID == fieldID) { // target field id is found // offset of field in union direcly(i.e. not embedded in other struct) is zero. - return 0; + return {0, 0}; } else { MIRStructType *subStructType = fieldType->EmbeddedStructType(); if (subStructType == nullptr) { @@ -2024,40 +2054,39 @@ int64 MIRStructType::GetBitOffsetFromUnionBaseAddr(FieldID fieldID) const { // 1 represents subStructType itself curFieldID += static_cast(subStructType->NumberOfFieldIDs()) + 1; } else { - return subStructType->GetBitOffsetFromBaseAddr(fieldID - curFieldID); + return subStructType->GetFieldOffsetFromBaseAddr(fieldID - curFieldID); } } } } - CHECK_FATAL(false, "GetBitOffsetFromUnionBaseAddr() fails to find field"); - return kOffsetUnknown; + CHECK_FATAL(false, "GetFieldOffsetFromUnionBaseAddr() fails to find field"); + return {0, 0}; } -int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { +OffsetPair MIRStructType::GetFieldOffsetFromStructBaseAddr(FieldID fieldID) const{ CHECK_FATAL(fieldID <= static_cast(NumberOfFieldIDs()), "GetBitOffsetFromUnionBaseAddr: fieldID too large"); if (fieldID == 0) { - return 0; + return {0, 0}; } uint64 allocedSize = 0; // space need for all fields before currentField uint64 allocedBitSize = 0; FieldID curFieldID = 1; constexpr uint32 bitsPerByte = 8; // 8 bits per byte - FieldVector fieldPairs = GetFields(); // process the struct fields // There are 3 possible kinds of field in a MIRStructureType: // case 1 : bitfield (including zero-width bitfield); - // case 2 : primtive field; + // case 2 : primitive field; // case 3 : normal (empty/non-empty) structure(struct/union) field; - for (FieldPair field : fieldPairs) { - TyIdx fieldTyIdx = field.second.first; - auto fieldAttr = field.second.second; + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); uint32 fieldBitSize = static_cast(fieldType)->GetFieldSize(); size_t fieldTypeSize = fieldType->GetSize(); uint32 fieldTypeSizeBits = static_cast(fieldTypeSize) * bitsPerByte; - auto originAlign = fieldType->GetKind() == kTypeBitField ? GetPrimTypeSize(fieldType->GetPrimType()) - : std::max(fieldType->GetAlign(), fieldAttr.GetAlign()); + auto originAlign = fieldType->GetKind() == kTypeBitField ? GetPrimTypeSize(fieldType->GetPrimType()) : + std::max(fieldType->GetAlign(), fieldAttr.GetAlign()); uint32 fieldAlign = fieldAttr.IsPacked() ? 1 : std::min(GetTypeAttrs().GetPack(), originAlign); auto fieldAlignBits = fieldAlign * bitsPerByte; // case 1 : bitfield (including zero-width bitfield); @@ -2065,7 +2094,7 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { fieldTypeSizeBits = static_cast(GetPrimTypeSize(fieldType->GetPrimType())) * bitsPerByte; // Is this field is crossing the align boundary of its base type? // for example: - // struct Expamle { + // struct Example { // int32 fld1 : 30; // int32 fld2 : 3; // 30 + 3 > 32(= int32 align), cross the align boundary, start from next int32 boundary // } @@ -2093,7 +2122,8 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { // target field id is found if (curFieldID == fieldID) { - return static_cast(allocedBitSize); + return {static_cast((allocedBitSize / fieldAlignBits) * fieldAlign), + static_cast(allocedBitSize % fieldAlignBits)}; } else { ++curFieldID; } @@ -2102,7 +2132,11 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { allocedSize = std::max(allocedSize, RoundUp(allocedBitSize, fieldAlignBits) / bitsPerByte); continue; } // case 1 end - + // If a non-bit zero-sized field is interspersed between bit field fields, compression is not performed. + if (j > 0 && fieldTypeSize == 0 && + GlobalTables::GetTypeTable().GetTypeFromTyIdx(fields[j - 1].second.first)->GetKind() == kTypeBitField) { + allocedBitSize = RoundUp(allocedBitSize, fieldAlignBits); + } bool leftOverBits = false; uint64 offset = 0; // no bit field before current field @@ -2121,10 +2155,10 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { } // target field id is found if (curFieldID == fieldID) { - return static_cast(offset * bitsPerByte); + return {static_cast(offset), 0}; } MIRStructType *subStructType = fieldType->EmbeddedStructType(); - // case 2 : primtive field; + // case 2 : primitive field; if (subStructType == nullptr) { ++curFieldID; } else { @@ -2134,8 +2168,8 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { if ((curFieldID + static_cast(subStructType->NumberOfFieldIDs())) < fieldID) { curFieldID += static_cast(subStructType->NumberOfFieldIDs()) + 1; // 1 represents subStructType itself } else { - int64 result = subStructType->GetBitOffsetFromBaseAddr(fieldID - curFieldID); - return result + static_cast(offset * bitsPerByte); + OffsetPair result = subStructType->GetFieldOffsetFromBaseAddr(fieldID - curFieldID); + return {result.byteOffset + static_cast(offset), result.bitOffset}; } } @@ -2147,32 +2181,36 @@ int64 MIRStructType::GetBitOffsetFromStructBaseAddr(FieldID fieldID) const { allocedBitSize = allocedSize * bitsPerByte; } } - CHECK_FATAL(false, "GetBitOffsetFromStructBaseAddr() fails to find field"); - return kOffsetUnknown; + CHECK_FATAL(false, "GetFieldOffsetFromUnionBaseAddr() fails to find field"); + return {0, 0}; } -int64 MIRStructType::GetBitOffsetFromBaseAddr(FieldID fieldID) const { +// compute the offset of the field given by fieldID within the structure type +// structy; it returns the answer in the pair (byteoffset, bitoffset) such that +// if it is a bitfield, byteoffset gives the offset of the container for +// extracting the bitfield and bitoffset is with respect to the current byte +OffsetPair MIRStructType::GetFieldOffsetFromBaseAddr(FieldID fieldID) const { CHECK_FATAL(fieldID <= static_cast(NumberOfFieldIDs()), "GetBitOffsetFromBaseAddr: fieldID too large"); if (fieldID == 0) { - return 0; + return {0, 0}; } switch (GetKind()) { case kTypeClass: { // NYI: should know class layout, for different language, the result is different - return kOffsetUnknown; // Invalid offset + return {0, 0}; // Invalid offset } case kTypeUnion: { - return GetBitOffsetFromUnionBaseAddr(fieldID); + return GetFieldOffsetFromUnionBaseAddr(fieldID); } case kTypeStruct: { - return GetBitOffsetFromStructBaseAddr(fieldID); + return GetFieldOffsetFromStructBaseAddr(fieldID); } default: { CHECK_FATAL(false, "Wrong type kind for MIRStructType!"); } } CHECK_FATAL(false, "Should never reach here!"); - return kOffsetUnknown; + return {0, 0}; } // Whether the memory layout of struct has paddings @@ -2396,5 +2434,174 @@ MIRType *GetElemType(const MIRType &arrayType) { } return nullptr; } + +#ifdef TARGAARCH64 +static constexpr size_t kMaxHfaOrHvaElemNumber = 4; + +bool CheckHomogeneousAggregatesBaseTypeAndAlign(const MIRType &mirType, PrimType type) { + if (mirType.GetAlign() > GetPrimTypeSize(type)) { + return false; + } + if (type == PTY_f32 || type == PTY_f64 || type == PTY_f128 || IsPrimitiveVector(type)) { + return true; + } + return false; +} + +bool IsSameHomogeneousAggregatesBaseType(PrimType type, PrimType nextType) { + if ((type == PTY_f32 || type == PTY_f64 || type == PTY_f128) && type == nextType) { + return true; + } else if (IsPrimitiveVector(type) && IsPrimitiveVector(nextType) && + GetPrimTypeSize(type) == GetPrimTypeSize(nextType)) { + return true; + } + return false; +} + +bool IsUnionHomogeneousAggregates(const MIRStructType &ty, PrimType &primType, size_t &elemNum) { + primType = PTY_begin; + elemNum = 0; + for (const auto &field : ty.GetFields()) { + MIRType *filedType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(field.second.first); + if (filedType->GetSize() == 0) { + continue; + } + PrimType filedPrimType = PTY_begin; + size_t filedElemNum = 0; + if (!IsHomogeneousAggregates(*filedType, filedPrimType, filedElemNum, false)) { + return false; + } + primType = (primType == PTY_begin) ? filedPrimType : primType; + if (!IsSameHomogeneousAggregatesBaseType(primType, filedPrimType)) { + return false; + } + elemNum = std::max(elemNum, filedElemNum); + } + return (elemNum != 0); +} + +bool IsStructHomogeneousAggregates(const MIRStructType &ty, PrimType &primType, size_t &elemNum) { + primType = PTY_begin; + elemNum = 0; + for (const auto &field : ty.GetFields()) { + MIRType *filedType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(field.second.first); + if (filedType->GetSize() == 0) { + continue; + } + PrimType filedPrimType = PTY_begin; + size_t filedElemNum = 0; + if (!IsHomogeneousAggregates(*filedType, filedPrimType, filedElemNum, false)) { + return false; + } + elemNum += filedElemNum; + primType = (primType == PTY_begin) ? filedPrimType : primType; + if (elemNum > kMaxHfaOrHvaElemNumber || + !IsSameHomogeneousAggregatesBaseType(primType, filedPrimType) || + !CheckHomogeneousAggregatesBaseTypeAndAlign(ty, filedPrimType)) { + return false; + } + } + return true; +} + +bool IsArrayHomogeneousAggregates(const MIRArrayType &ty, PrimType &primType, size_t &elemNum) { + primType = PTY_begin; + elemNum = 0; + MIRType *elemMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ty.GetElemTyIdx()); + if (elemMirType->GetKind() == kTypeArray || elemMirType->GetKind() == kTypeStruct) { + if (!IsHomogeneousAggregates(*elemMirType, primType, elemNum, false)) { + return false; + } + elemNum *= ty.ElemNumber(); + } else { + primType = elemMirType->GetPrimType(); + elemNum = ty.ElemNumber(); + } + if (elemNum > kMaxHfaOrHvaElemNumber || + !CheckHomogeneousAggregatesBaseTypeAndAlign(ty, primType)) { + return false; + } + return true; +} + +bool IsHomogeneousAggregates(const MIRType &ty, PrimType &primType, size_t &elemNum, + bool firstDepth) { + if (firstDepth && ty.GetKind() == kTypeUnion) { + return IsUnionHomogeneousAggregates(static_cast(ty), primType, elemNum); + } + if (firstDepth && ty.GetKind() != kTypeStruct) { + return false; + } + primType = PTY_begin; + elemNum = 0; + if (ty.GetKind() == kTypeStruct) { + auto &structType = static_cast(ty); + return IsStructHomogeneousAggregates(structType, primType, elemNum); + } else if (ty.GetKind() == kTypeUnion) { + auto &unionType = static_cast(ty); + return IsUnionHomogeneousAggregates(unionType, primType, elemNum); + } else if (ty.GetKind() == kTypeArray) { + auto &arrType = static_cast(ty); + return IsArrayHomogeneousAggregates(arrType, primType, elemNum); + } else { + primType = ty.GetPrimType(); + elemNum = 1; + if (!CheckHomogeneousAggregatesBaseTypeAndAlign(ty, primType)) { + return false; + } + } + return (elemNum != 0); +} + +bool IsParamStructCopyToMemory(const MIRType &ty) { + if (ty.GetPrimType() == PTY_agg) { + PrimType primType = PTY_begin; + size_t elemNum = 0; + return !IsHomogeneousAggregates(ty, primType, elemNum) && ty.GetSize() > k16BitSize; + } + return false; +} + +bool IsReturnInMemory(const MIRType &ty) { + if (ty.GetPrimType() == PTY_agg) { + PrimType primType = PTY_begin; + size_t elemNum = 0; + return !IsHomogeneousAggregates(ty, primType, elemNum) && ty.GetSize() > k16BitSize; + } + return false; +} +#else +bool IsParamStructCopyToMemory(const MIRType &ty) { + return ty.GetPrimType() == PTY_agg && ty.GetSize() > k16BitSize; +} + +bool IsReturnInMemory(const MIRType &ty) { + return ty.GetPrimType() == PTY_agg && ty.GetSize() > k16BitSize; +} +#endif // TARGAARCH64 + +void UpdateMIRFuncTypeFirstArgRet(MIRFuncType &funcType) { + auto *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType.GetRetTyIdx()); + if (IsReturnInMemory(*retType)) { + auto ¶mTypeList = funcType.GetParamTypeList(); + auto ¶mAttrList = funcType.GetParamAttrsList(); + auto *firstArg = GlobalTables::GetTypeTable().GetOrCreatePointerType(funcType.GetRetTyIdx(), + PTY_ptr, funcType.GetRetAttrs()); + (void)paramTypeList.insert(paramTypeList.begin(), firstArg->GetTypeIndex()); + (void)paramAttrList.insert(paramAttrList.begin(), TypeAttrs()); + funcType.SetFirstArgReturn(); + funcType.SetRetTyIdx(GlobalTables::GetTypeTable().GetPrimType(PTY_void)->GetTypeIndex()); + } +} + +// Traverse type table and update func type +void UpdateMIRFuncTypeFirstArgRet() { + for (auto *mirType : GlobalTables::GetTypeTable().GetTypeTable()) { + if (mirType && mirType->IsMIRFuncType()) { + UpdateMIRFuncTypeFirstArgRet(static_cast(*mirType)); + } + } +} + } // namespace maple #endif // MIR_FEATURE_FULL diff --git a/src/mapleall/maple_ir/src/mpl2mpl_options.cpp b/src/mapleall/maple_ir/src/mpl2mpl_options.cpp index 2a6921c216b07d7d9a28e7a48f1a3b78e3ea3e65..59fafd2e1041bc8f1cbedfd612427d6ef9c1cb3b 100644 --- a/src/mapleall/maple_ir/src/mpl2mpl_options.cpp +++ b/src/mapleall/maple_ir/src/mpl2mpl_options.cpp @@ -75,7 +75,7 @@ maplecl::Option inlineOpt({"--inline"}, " --inline \tEnable function inlining\n" " --no-inline \tDisable function inlining\n", {driverCategory, mpl2mplCategory}, - maplecl::DisableWith("--no-inline")); + maplecl::DisableEvery({"-fno-inline", "--no-inline"})); maplecl::Option ipaClone({"--ipa-clone"}, " --ipa-clone \tEnable ipa constant_prop and clone\n" @@ -343,22 +343,22 @@ maplecl::Option buildApp({"--build-app"}, " --build-app[=0,1,2] \tbuild the app dex" " 0:off, 1:method1, 2:method2, ignore:method1\n", {mpl2mplCategory}, - maplecl::optionalValue, maplecl::Init(1)); + maplecl::kOptionalValue, maplecl::Init(1)); maplecl::Option partialAot({"--partialAot"}, " --partialAot \tenerate the detailed information for the partialAot\n", {mpl2mplCategory}, - maplecl::optionalValue); + maplecl::kOptionalValue); maplecl::Option decoupleInit({"--decouple-init"}, " --decouple-init \tdecouple the constructor method\n", {mpl2mplCategory}, - maplecl::optionalValue, maplecl::Init(1)); + maplecl::kOptionalValue, maplecl::Init(1)); maplecl::Option sourceMuid({"--source-muid"}, " --source-muid="" \tWrite the source file muid into the mpl file\n", {mpl2mplCategory}, - maplecl::optionalValue); + maplecl::kOptionalValue); maplecl::Option deferredVisit({"--deferred-visit"}, " --deferred-visit \tGenerate deferred MCC call for undefined type\n" @@ -418,7 +418,7 @@ maplecl::Option appPackageName({"--app-package-name"}, " --app-package-name \tSet APP package name\n" " \t--app-package-name=package_name\n", {mpl2mplCategory}, - maplecl::optionalValue); + maplecl::kOptionalValue); maplecl::Option checkClInvocation({"--check_cl_invocation"}, " --check_cl_invocation \tFor classloader invocation checking\n" @@ -463,7 +463,7 @@ maplecl::Option genPGOReport({"--gen-pgo-report"}, maplecl::Option inlineCache({"--inlineCache"}, " --inlineCache \tbuild inlineCache 0,1,2,3\n", {mpl2mplCategory}, - maplecl::optionalValue, maplecl::Init(0)); + maplecl::kOptionalValue, maplecl::Init(0)); maplecl::Option noComment({"--no-comment"}, " --no-comment \tbuild inlineCache 0:off, 1:open\n", @@ -484,7 +484,7 @@ maplecl::Option sideEffect({"--side-effect"}, maplecl::Option sideEffectWhiteList({"--side-effect-white-list"}, " --side-effect-white-list \tIPA side-effect: using function white list\n" " --side-effect-no-white-list\n", - {mpl2mplCategory}, + {driverCategory, mpl2mplCategory}, maplecl::DisableWith("--side-effect-no-white-list")); maplecl::Option dumpIPA({"--dump-ipa"}, diff --git a/src/mapleall/maple_ir/src/mpl_verify.cpp b/src/mapleall/maple_ir/src/mpl_verify.cpp index aefc83d47dad1bf3cfa6dccfbb2015fc20829b8c..1392f6b4964ded14ed7936e1eb094f6948e8a89a 100644 --- a/src/mapleall/maple_ir/src/mpl_verify.cpp +++ b/src/mapleall/maple_ir/src/mpl_verify.cpp @@ -14,7 +14,6 @@ */ #include -#include #include "bin_mplt.h" #include "mir_function.h" @@ -58,7 +57,7 @@ int main(int argc, const char *argv[]) { const char *mirInfile = nullptr; for (int i = 1; i < argc; ++i) { - if (!strncmp(argv[i], "--dump", dumpNum)) { + if (strncmp(argv[i], "--dump", dumpNum) == 0) { dumpit = true; } else if (argv[i][0] != '-') { mirInfile = argv[i]; diff --git a/src/mapleall/maple_ir/src/option.cpp b/src/mapleall/maple_ir/src/option.cpp index 046ef56c04eaadc7b6dca48fe44a6e8621dd85ca..fc4448580e3a25491c919b9c2cc93d2209c9bb12 100644 --- a/src/mapleall/maple_ir/src/option.cpp +++ b/src/mapleall/maple_ir/src/option.cpp @@ -145,6 +145,7 @@ bool Options::wpaa = false; // whole program alias analysis bool Options::doOutline = false; size_t Options::outlineThreshold = 12800; size_t Options::outlineRegionMax = 512; +bool Options::tailcall = true; Options &Options::GetInstance() { static Options instance; @@ -371,6 +372,7 @@ bool Options::SolveOptions(bool isDebug) const { maplecl::CopyIfEnabled(sideEffectWhiteList, opts::mpl2mpl::sideEffectWhiteList); maplecl::CopyIfEnabled(dumpIPA, opts::mpl2mpl::dumpIPA); maplecl::CopyIfEnabled(wpaa, opts::mpl2mpl::wpaa); + maplecl::CopyIfEnabled(tailcall, opts::tailcall); return true; } diff --git a/src/mapleall/maple_ir/src/parser.cpp b/src/mapleall/maple_ir/src/parser.cpp index 11306501ed66155520e75814c08f2c9d9a83aca3..f7563379a8b868f9c6ae851b930596c60e673832 100644 --- a/src/mapleall/maple_ir/src/parser.cpp +++ b/src/mapleall/maple_ir/src/parser.cpp @@ -150,12 +150,14 @@ PrimType MIRParser::GetPrimitiveType(TokenKind tk) const { MIRIntrinsicID MIRParser::GetIntrinsicID(TokenKind tk) const { switch (tk) { - default: #define DEF_MIR_INTRINSIC(P, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) \ case TK_##P: \ return INTRN_##P; #include "intrinsics.def" #undef DEF_MIR_INTRINSIC + default: + ASSERT(false, "\n =====GetIntrinsicID failed===== \n"); + return INTRN_UNDEFINED; } } @@ -426,7 +428,7 @@ bool MIRParser::ParsePragmaElementForArray(MIRPragmaElement &elem) { } int64 size = static_cast(lexer.GetTheIntVal()); tk = lexer.NextToken(); - if (tk != TK_coma && size) { + if (tk != TK_coma && size != 0) { Error("parsing pragma error: expecting , but get "); return false; } @@ -470,13 +472,13 @@ bool MIRParser::ParsePragmaElementForAnnotation(MIRPragmaElement &elem) { Error("parsing pragma error: expecting int but get "); return false; } - int64 size = static_cast(lexer.GetTheIntVal()); + uint64 size = lexer.GetTheIntVal(); tk = lexer.NextToken(); - if (tk != TK_coma && size) { + if (tk != TK_coma && size > 0) { Error("parsing pragma error: expecting , but get "); return false; } - for (int64 i = 0; i < size; ++i) { + for (uint64 i = 0; i < size; ++i) { auto *e0 = mod.GetMemPool()->New(mod); tk = lexer.NextToken(); if (tk != TK_label) { @@ -1382,11 +1384,7 @@ bool MIRParser::ParseFuncType(TyIdx &tyIdx) { Error("bad attribute in function ret type at "); return false; } - MIRFuncType functype(retTyIdx, vecTyIdx, vecAttrs, retTypeAttrs); - functype.funcAttrs = fAttrs; - if (varargs) { - functype.SetVarArgs(); - } + MIRFuncType functype(retTyIdx, vecTyIdx, vecAttrs, fAttrs, retTypeAttrs); tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&functype); return true; } @@ -1914,9 +1912,9 @@ bool MIRParser::ParseDeclareVar(MIRSymbol &symbol) { std::string symbolStrName = lexer.GetName(); GStrIdx symbolStrID = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(symbolStrName); symbol.SetNameStrIdx(symbolStrID); - tk = lexer.NextToken(); + (void)lexer.NextToken(); if (ParseStorageClass(symbol)) { - lexer.NextToken(); + (void)lexer.NextToken(); } // i32 TyIdx tyIdx(0); @@ -1932,31 +1930,31 @@ bool MIRParser::ParseDeclareVar(MIRSymbol &symbol) { symbol.SetTyIdx(tyIdx); /* parse section/register attribute from inline assembly */ if (lexer.GetTokenKind() == TK_section) { - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_lparen) { Error("expect ( for section attribute but get "); return false; } - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_string) { Error("expect string literal for section attribute but get "); return false; } UStrIdx literalStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); symbol.sectionAttr = literalStrIdx; - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_rparen) { Error("expect ) for section attribute but get "); return false; } - lexer.NextToken(); + (void)lexer.NextToken(); } else if (lexer.GetTokenKind() == TK_asmattr) { /* Specifying Registers for Local Variables */ - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_lparen) { Error("expect ( for register inline-asm attribute but get "); return false; } - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_string) { Error("expect string literal for section attribute but get "); return false; @@ -1964,12 +1962,12 @@ bool MIRParser::ParseDeclareVar(MIRSymbol &symbol) { UStrIdx literalStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); symbol.asmAttr = literalStrIdx; - lexer.NextToken(); + (void)lexer.NextToken(); if (lexer.GetTokenKind() != TK_rparen) { Error("expect ) for section attribute but get "); return false; } - lexer.NextToken(); + (void)lexer.NextToken(); } if (!ParseVarTypeAttrs(symbol)) { Error("bad type attribute in variable declaration at "); @@ -2044,17 +2042,17 @@ bool MIRParser::ParsePrototype(MIRFunction &func, MIRSymbol &funcSymbol, TyIdx & } std::vector vecTy; // for storing the parameter types std::vector vecAt; // for storing the parameter type attributes + FuncAttrs funcAttrs; // for storing the func type attributes // this part for parsing the argument list and return type if (lexer.GetTokenKind() != TK_lparen) { Error("expect ( for func but get "); return false; } // parse parameters - bool varArgs = false; TokenKind pmTk = lexer.NextToken(); while (pmTk != TK_rparen) { if (pmTk == TK_dotdotdot) { - varArgs = true; + funcAttrs.SetAttr(FUNCATTR_varargs); func.SetVarArgs(); pmTk = lexer.NextToken(); if (pmTk != TK_rparen) { @@ -2090,8 +2088,11 @@ bool MIRParser::ParsePrototype(MIRFunction &func, MIRSymbol &funcSymbol, TyIdx & } MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); func.SetReturnStruct(*retType); + if (func.IsFirstArgReturn()) { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } MIRType *funcType = - GlobalTables::GetTypeTable().GetOrCreateFunctionType(tyIdx, vecTy, vecAt, varArgs); + GlobalTables::GetTypeTable().GetOrCreateFunctionType(tyIdx, vecTy, vecAt, funcAttrs); funcTyIdx = funcType->GetTypeIndex(); funcSymbol.SetTyIdx(funcTyIdx); func.SetMIRFuncType(static_cast(funcType)); @@ -2123,7 +2124,7 @@ bool MIRParser::ParseFunction(uint32 fileIdx) { return false; } if (funcSymbol != nullptr && funcSymbol->GetSKind() == kStFunc && - funcSymbol->IsNeedForwDecl() == true && !funcSymbol->GetFunction()->GetBody()) { + funcSymbol->IsNeedForwDecl() && !funcSymbol->GetFunction()->GetBody()) { SetSrcPos(funcSymbol->GetSrcPosition(), lexer.GetLineNum()); } if (funcSymbol != nullptr) { @@ -2606,7 +2607,7 @@ bool MIRParser::ParseScope() { return status; } -bool MIRParser::ParseScopeStmt(StmtNodePtr&) { +bool MIRParser::ParseScopeStmt(StmtNodePtr &stmt) { return ParseScope(); } @@ -2697,7 +2698,7 @@ bool MIRParser::ParseAlias() { return true; } -bool MIRParser::ParseAliasStmt(StmtNodePtr&) { +bool MIRParser::ParseAliasStmt(StmtNodePtr &stmt) { return ParseAlias(); } @@ -2931,11 +2932,11 @@ bool MIRParser::ParseMIRForFunc() { return false; } // when parsing function in mplt_inline file, set fromMpltInline as true. - if ((this->options & kParseInlineFuncBody) && curFunc) { + if ((this->options & kParseInlineFuncBody) != 0 && curFunc) { curFunc->SetFromMpltInline(true); return true; } - if ((this->options & kParseOptFunc) && curFunc) { + if ((this->options & kParseOptFunc) != 0 && curFunc) { curFunc->SetAttr(FUNCATTR_optimized); mod.AddOptFuncs(curFunc); } @@ -3409,10 +3410,6 @@ bool MIRParser::ParseMPLT(std::ifstream &mpltFile, const std::string &importFile while (!atEof) { TokenKind tokenKind = lexer.GetTokenKind(); switch (tokenKind) { - default: { - Error("expect func or var but get "); - return false; - } case TK_eof: atEof = true; break; @@ -3434,6 +3431,10 @@ bool MIRParser::ParseMPLT(std::ifstream &mpltFile, const std::string &importFile } break; } + default: { + Error("expect func or var but get "); + return false; + } } } // restore old values to continue reading from the main input file @@ -3506,7 +3507,7 @@ bool MIRParser::ParsePrototypeRemaining(MIRFunction &func, std::vector &v } void MIRParser::EmitError(const std::string &fileName) { - if (!strlen(GetError().c_str())) { + if (strlen(GetError().c_str()) == 0) { return; } mod.GetDbgInfo()->EmitMsg(); @@ -3514,7 +3515,7 @@ void MIRParser::EmitError(const std::string &fileName) { } void MIRParser::EmitWarning(const std::string &fileName) const { - if (!strlen(GetWarning().c_str())) { + if (strlen(GetWarning().c_str()) == 0) { return; } WARN(kLncWarn, "%s \n%s\n", fileName.c_str(), GetWarning().c_str()); diff --git a/src/mapleall/maple_ir/src/printing.cpp b/src/mapleall/maple_ir/src/printing.cpp index c6d33aa4c45b86d605429324684f4bde40678b65..5220204e36a1c8b675e90a7c6bef39e2d0aa91bd 100644 --- a/src/mapleall/maple_ir/src/printing.cpp +++ b/src/mapleall/maple_ir/src/printing.cpp @@ -24,7 +24,7 @@ void PrintIndentation(int32 indent) { int64 indentAmount = static_cast(indent) * kIndentunit; do { LogInfo::MapleLogger() << kBlankString.substr(0, indentAmount); - indentAmount -= kBlankString.length(); + indentAmount -= static_cast(kBlankString.length()); } while (indentAmount > 0); } diff --git a/src/mapleall/maple_ir/src/verify_mark.cpp b/src/mapleall/maple_ir/src/verify_mark.cpp index fa7898615351c7eea2d5cba57d219eb6bbb4bd78..564262ce13a9f9e3a4a51189f3c238f8e5574b90 100644 --- a/src/mapleall/maple_ir/src/verify_mark.cpp +++ b/src/mapleall/maple_ir/src/verify_mark.cpp @@ -14,9 +14,6 @@ */ #include "verify_mark.h" #include "verification.h" -#include "verify_annotation.h" -#include "class_hierarchy.h" -#include "utils.h" namespace maple { #ifdef NOT_USED diff --git a/src/mapleall/maple_me/BUILD.gn b/src/mapleall/maple_me/BUILD.gn index 39f740975a0e4d71967605400e0e0a1a8ae023fa..e2fadff4952ea76990b5687d934e773ac9561184 100755 --- a/src/mapleall/maple_me/BUILD.gn +++ b/src/mapleall/maple_me/BUILD.gn @@ -79,6 +79,7 @@ src_libmplme = [ "src/me_toplevel_ssa.cpp", "src/me_ssa_tab.cpp", "src/me_ssa_update.cpp", + "src/me_sra.cpp", "src/me_stmt_fre.cpp", "src/me_stmt_pre.cpp", "src/me_store_pre.cpp", @@ -132,6 +133,7 @@ src_libmplme = [ "src/lmbc_memlayout.cpp", "src/lmbc_lower.cpp", "src/mc_ssa_pre.cpp", + "src/me_tailcall.cpp", ] src_libmplmewpo = [ diff --git a/src/mapleall/maple_me/CMakeLists.txt b/src/mapleall/maple_me/CMakeLists.txt index 5937205f214acfb34189a17a2830ea68dcfbd64c..7aff562836f02fc8680e5c889613cb2919863037 100755 --- a/src/mapleall/maple_me/CMakeLists.txt +++ b/src/mapleall/maple_me/CMakeLists.txt @@ -81,6 +81,7 @@ set(src_libmplme src/me_toplevel_ssa.cpp src/me_ssa_tab.cpp src/me_ssa_update.cpp + src/me_sra.cpp src/me_stmt_fre.cpp src/me_stmt_pre.cpp src/me_store_pre.cpp @@ -134,6 +135,7 @@ set(src_libmplme src/lmbc_memlayout.cpp src/lmbc_lower.cpp src/mc_ssa_pre.cpp + src/me_tailcall.cpp ) set(src_libmplmewpo diff --git a/src/mapleall/maple_me/include/alias_class.h b/src/mapleall/maple_me/include/alias_class.h index f769abd8e1e651a15234494e2bdcc408905f79f6..b21290305a997cfda1620a9175230e7ba8015708 100644 --- a/src/mapleall/maple_me/include/alias_class.h +++ b/src/mapleall/maple_me/include/alias_class.h @@ -129,9 +129,10 @@ class AliasClass : public AnalysisResult { globalsMayAffectedByClinitCheck(acAlloc.Adapter()), aggsToUnion(acAlloc.Adapter()), nadsOsts(acAlloc.Adapter()), - lhsWithUndefinedOffsets(acAlloc.Adapter()), + ostWithUndefinedOffsets(acAlloc.Adapter()), assignSetOfVst(acAlloc.Adapter()), aliasSetOfOst(acAlloc.Adapter()), + addrofVstNextLevNotAllDefsSeen(acAlloc.Adapter()), vstNextLevNotAllDefsSeen(acAlloc.Adapter()), ostNotAllDefsSeen(acAlloc.Adapter()), lessThrowAlias(lessThrowAliasParam), @@ -167,13 +168,18 @@ class AliasClass : public AnalysisResult { void ApplyUnionForPhi(const PhiNode &phi); void ApplyUnionForIntrinsicCall(const IntrinsiccallNode &intrinsicCall); void ApplyUnionForCopies(StmtNode &stmt); + void ApplyUnionForDirectAssign(const StmtNode &stmt); + void ApplyUnionForIndirectAssign(const StmtNode &stmt); + void ApplyUnionForCommonDirectCalls(StmtNode &stmt); + void ApplyUnionForJavaSpecialCalls(StmtNode &stmt); + void ApplyUnionForCallAssigned(const StmtNode &stmt); void ApplyUnionForFieldsInCopiedAgg(); void ApplyUnionForElementsInCopiedArray(); bool IsGlobalOstTypeUnsafe(const OriginalSt &ost) const; void PropagateTypeUnsafe(); void PropagateTypeUnsafeVertically(const VersionSt &vst) const; MIRType *GetAliasInfoRealType(const AliasInfo &ai, const BaseNode &expr); - bool IsAddrTypeConsistent(MIRType *typeA, MIRType *typeB) const; + bool IsAddrTypeConsistent(const MIRType *typeA, const MIRType *typeB) const; void SetTypeUnsafeForAddrofUnion(const VersionSt *vst) const; void SetTypeUnsafeForTypeConversion(const VersionSt *lhsVst, BaseNode *rhsExpr); void CreateAssignSets(); @@ -242,7 +248,7 @@ class AliasClass : public AnalysisResult { private: bool CallHasNoSideEffectOrPrivateDefEffect(const CallNode &stmt, FuncAttrKind attrKind) const; - const FuncDesc &GetFuncDescFromCallStmt(const CallNode &stmt) const; + const FuncDesc &GetFuncDescFromCallStmt(const CallNode &callstmt) const; bool CallHasNoPrivateDefEffect(StmtNode *stmt) const; void RecordAliasAnalysisInfo(const VersionSt &vst); VersionSt *FindOrCreateVstOfExtraLevOst( @@ -252,7 +258,7 @@ class AliasClass : public AnalysisResult { void SetPtrOpndNextLevNADS(const BaseNode &opnd, VersionSt *vst, bool hasNoPrivateDefEffect); void SetPtrOpndsNextLevNADS(unsigned int start, unsigned int end, MapleVector &opnds, bool hasNoPrivateDefEffect); - void SetAggPtrFieldsNextLevNADS(const OriginalSt &ost); + void SetAggPtrFieldsNextLevNADS(const VersionSt &vst); void SetPtrFieldsOfAggNextLevNADS(const BaseNode *opnd, const VersionSt *vst); void SetAggOpndPtrFieldsNextLevNADS(MapleVector &opnds); void ApplyUnionForDassignCopy(VersionSt &lhsVst, VersionSt *rhsVst, BaseNode &rhs); @@ -264,7 +270,7 @@ class AliasClass : public AnalysisResult { void CollectMayUseForNextLevel(const VersionSt &vst, OstPtrSet &mayUseOsts, const StmtNode &stmt, bool isFirstOpnd); void CollectMayUseForIntrnCallOpnd(const StmtNode &stmt, OstPtrSet &mayDefOsts, OstPtrSet &mayUseOsts); - void CollectMayDefUseForIthOpnd(const VersionSt &vst, OstPtrSet &mayUseOsts, + void CollectMayDefUseForIthOpnd(const VersionSt &vstOfIthOpnd, OstPtrSet &mayUseOsts, const StmtNode &stmt, bool isFirstOpnd); void CollectMayDefUseForCallOpnd(const StmtNode &stmt, OstPtrSet &mayDefOsts, OstPtrSet &mayUseOsts, @@ -283,7 +289,7 @@ class AliasClass : public AnalysisResult { void CollectMayDefForDassign(const StmtNode &stmt, OstPtrSet &mayDefOsts); void InsertMayDefNode(OstPtrSet &mayDefOsts, AccessSSANodes *ssaPart, StmtNode &stmt, BBId bbid); void InsertMayDefDassign(StmtNode &stmt, BBId bbid); - bool IsEquivalentField(TyIdx tyIdxA, FieldID fldA, TyIdx tyIdxB, FieldID fldB) const; + bool IsEquivalentField(TyIdx tyIdxA, FieldID fieldA, TyIdx tyIdxB, FieldID fieldB) const; bool IsAliasInfoEquivalentToExpr(const AliasInfo &ai, const BaseNode *expr); void CollectMayDefForIassign(StmtNode &stmt, OstPtrSet &mayDefOsts); void InsertMayDefNodeExcludeFinalOst(OstPtrSet &mayDefOsts, AccessSSANodes *ssaPart, @@ -293,7 +299,7 @@ class AliasClass : public AnalysisResult { void InsertMayUseNodeExcludeFinalOst(const OstPtrSet &mayUseOsts, AccessSSANodes *ssaPart); void InsertMayDefUseIntrncall(StmtNode &stmt, BBId bbid); void InsertMayDefUseClinitCheck(IntrinsiccallNode &stmt, BBId bbid); - void InsertMayDefUseAsm(StmtNode &stmt, const BBId bbid); + void InsertMayDefUseAsm(StmtNode &stmt, const BBId bbID); virtual BB *GetBB(BBId id) = 0; void ProcessIdsAliasWithRoot(const std::set &idsAliasWithRoot, std::vector &newGroups); int GetOffset(const Klass &super, const Klass &base) const; @@ -310,6 +316,22 @@ class AliasClass : public AnalysisResult { return GetAssignSet(vst.GetIndex()); } + bool IsAddrofVstNextLevNotAllDefSeen(size_t vstIdx) { + if (vstIdx >= addrofVstNextLevNotAllDefsSeen.size()) { + return false; + } + return addrofVstNextLevNotAllDefsSeen[vstIdx]; + } + + void SetAddrofVstNextLevNotAllDefsSeen(size_t vstIdx) { + if (vstIdx >= addrofVstNextLevNotAllDefsSeen.size()) { + size_t bufferSize = 5; + size_t incNum = vstIdx + bufferSize - addrofVstNextLevNotAllDefsSeen.size(); + addrofVstNextLevNotAllDefsSeen.insert(addrofVstNextLevNotAllDefsSeen.end(), incNum, false); + } + addrofVstNextLevNotAllDefsSeen[vstIdx] = true; + } + bool IsNextLevNotAllDefsSeen(size_t vstIdx) { if (vstIdx >= vstNextLevNotAllDefsSeen.size()) { return false; @@ -350,11 +372,12 @@ class AliasClass : public AnalysisResult { MapleSet globalsAffectedByCalls; // aliased at calls; needed only when wholeProgramScope is true MapleSet globalsMayAffectedByClinitCheck; - MapleMap aggsToUnion; // aggs are copied, their fields should be unioned + MapleUnorderedMultiMap aggsToUnion; // aggs are copied, their fields should be unioned MapleSet nadsOsts; - MapleVector lhsWithUndefinedOffsets; + MapleVector ostWithUndefinedOffsets; VstIdx2AssignSet assignSetOfVst; OstIdx2AliasSet aliasSetOfOst; + AliasAttrVec addrofVstNextLevNotAllDefsSeen; AliasAttrVec vstNextLevNotAllDefsSeen; AliasAttrVec ostNotAllDefsSeen; diff --git a/src/mapleall/maple_me/include/bb.h b/src/mapleall/maple_me/include/bb.h index a810a6643a0333f8b0dd28becd8889c4cdc16626..462fca7c27c32057f44adc52b1d9b8d54cd500e1 100644 --- a/src/mapleall/maple_me/include/bb.h +++ b/src/mapleall/maple_me/include/bb.h @@ -96,7 +96,7 @@ class BB : public BaseGraphNode { succ.pop_back(); } - virtual ~BB() = default; + ~BB() override = default; SCCNode *GetSCCNode() { return sccNode; } @@ -327,6 +327,18 @@ class BB : public BaseGraphNode { return kind == kBBReturn && !stmtNodeList.empty() && stmtNodeList.back().GetOpCode() == OP_return; } + // Whether the BB is the first BB of UNLIKELY path (inferred from __builtin_expect) and it has only 1 predecessor. + bool IsImmediateUnlikelyBB() const; + // Whether the BB is the first BB of LIKELY path (inferred from __builtin_expect) and it has only 1 predecessor. + bool IsImmediateLikelyBB() const; + + // The kind of current BB must be kBBCondGoto (denoted as condBB) + // Return the UNLIKELY successor of condBB (inferred from __builtin_expect) + // Return nullptr if there is no builtin_expect info. + BB *GetUnlikelySuccOfCondBB(); + // Same as `GetUnlikelySuccOfCondBB` but returns the LIKELY successor. + BB *GetLikelySuccOfCondBB(); + void FindReachableBBs(std::vector &visitedBBs) const; void FindWillExitBBs(std::vector &visitedBBs) const; const PhiNode *PhiofVerStInserted(const VersionSt &versionSt) const; @@ -367,6 +379,10 @@ class BB : public BaseGraphNode { bbLabel = idx; } + FreqType GetNodeFrequency() const override { + return frequency; + } + FreqType GetFrequency() const { return frequency; } @@ -510,6 +526,14 @@ class BB : public BaseGraphNode { mePhiList.clear(); } + FreqType GetEdgeFrequency(const BaseGraphNode &node) const override { + return GetEdgeFreq(static_cast(&node)); + } + + FreqType GetEdgeFrequency(size_t idx) const override { + return GetEdgeFreq(idx); + } + FreqType GetEdgeFreq(const BB *bb) const { auto iter = std::find(succ.begin(), succ.end(), bb); CHECK_FATAL(iter != std::end(succ), "%d is not the successor of %d", bb->UintID(), this->UintID()); diff --git a/src/mapleall/maple_me/include/cast_opt.h b/src/mapleall/maple_me/include/cast_opt.h index 8f975ae9281601425130df1b4d9aa6c41095c996..880fe7e6e0c5ad631fc9756d4484667ab8477999 100644 --- a/src/mapleall/maple_me/include/cast_opt.h +++ b/src/mapleall/maple_me/include/cast_opt.h @@ -56,6 +56,9 @@ class CastInfo { bool IsInvalid() const { return kind == CAST_unknown; } + bool IsExtension() const { + return kind == CAST_sext || kind == CAST_zext; + } CastKind kind = CAST_unknown; // CastInfo is invalid if kind is CAST_unknown PrimType srcType = PTY_begin; PrimType dstType = PTY_end; @@ -65,7 +68,7 @@ class CastInfo { class MeExprCastInfo : public CastInfo { public: explicit MeExprCastInfo(MeExpr *expr) : CastInfo(expr) {} - ~MeExprCastInfo() = default; + ~MeExprCastInfo() override = default; Opcode GetOp() override { return expr->GetOp(); @@ -78,7 +81,6 @@ class MeExprCastInfo : public CastInfo { return static_cast(expr)->GetBitsSize(); default: CHECK_FATAL(false, "NYI"); - break; } } @@ -101,7 +103,6 @@ class MeExprCastInfo : public CastInfo { } default: CHECK_FATAL(false, "NYI"); - break; } } }; @@ -109,7 +110,7 @@ class MeExprCastInfo : public CastInfo { class BaseNodeCastInfo : public CastInfo { public: explicit BaseNodeCastInfo(BaseNode *expr) : CastInfo(expr) {} - ~BaseNodeCastInfo() = default; + ~BaseNodeCastInfo() override = default; Opcode GetOp() override { return expr->GetOpCode(); @@ -122,7 +123,6 @@ class BaseNodeCastInfo : public CastInfo { return static_cast(expr)->GetBitsSize(); default: CHECK_FATAL(false, "NYI"); - break; } } @@ -140,7 +140,7 @@ class BaseNodeCastInfo : public CastInfo { case OP_regread: { const auto *regread = static_cast(expr); PregIdx regIdx = regread->GetRegIdx(); - MIRPreg *preg = theMIRModule->CurFunction()->GetPregItem(regIdx); + const MIRPreg *preg = theMIRModule->CurFunction()->GetPregItem(regIdx); return preg->GetPrimType(); } case OP_iread: { @@ -156,7 +156,6 @@ class BaseNodeCastInfo : public CastInfo { } default: CHECK_FATAL(false, "NYI"); - break; } } }; diff --git a/src/mapleall/maple_me/include/copy_prop.h b/src/mapleall/maple_me/include/copy_prop.h index a607a09f9f2b1704706a61a93841735ba7a3f63a..552a1d4c41c6d32c44d6927a2b7d0e12eba43c33 100644 --- a/src/mapleall/maple_me/include/copy_prop.h +++ b/src/mapleall/maple_me/include/copy_prop.h @@ -26,7 +26,7 @@ class CopyProp : public Prop { MemPool &memPool, uint32 bbVecSize, const PropConfig &config) : Prop(irMap, dom, pdom, memPool, bbVecSize, config), func(meFunc), useInfo(ui), loopInfo(loops) {} - virtual ~CopyProp() = default; + ~CopyProp() override = default; void ReplaceSelfAssign(); private: diff --git a/src/mapleall/maple_me/include/demand_driven_alias_analysis.h b/src/mapleall/maple_me/include/demand_driven_alias_analysis.h index d0b70c329d5b082572a82fbff46504a9c863b590..3c4fc97898d265a44e036432b3e4832dbd06ae27 100644 --- a/src/mapleall/maple_me/include/demand_driven_alias_analysis.h +++ b/src/mapleall/maple_me/include/demand_driven_alias_analysis.h @@ -94,6 +94,12 @@ class PEGNode { attr[kAliasAttrEscaped] = attr[kAliasAttrEscaped] || other->attr[kAliasAttrEscaped]; } + void UpdateAttrWhenReachingGlobalNode(const PEGNode *other) { + attr[kAliasAttrNextLevNotAllDefsSeen] = attr[kAliasAttrNextLevNotAllDefsSeen] || other->attr[kAliasAttrGlobal]; + attr[kAliasAttrEscaped] = + attr[kAliasAttrEscaped] || other->attr[kAliasAttrEscaped] || other->attr[kAliasAttrGlobal]; + } + void SetMultiDefined() { multiDefed = true; } @@ -160,9 +166,9 @@ class PEGBuilder { void BuildPEG(); private: - void UpdateAttributes(); - PtrValueRecorder BuildPEGNodeOfDread(const AddrofSSANode *dread); - PtrValueRecorder BuildPEGNodeOfAddrof(const AddrofSSANode *dread); + void UpdateAttributes() const; + PtrValueRecorder BuildPEGNodeOfDread(const AddrofSSANode *dread) const; + PtrValueRecorder BuildPEGNodeOfAddrof(const AddrofSSANode *addrof); PtrValueRecorder BuildPEGNodeOfRegread(const RegreadSSANode *regread); PtrValueRecorder BuildPEGNodeOfIread(const IreadSSANode *iread); PtrValueRecorder BuildPEGNodeOfIaddrof(const IreadNode *iaddrof); diff --git a/src/mapleall/maple_me/include/hdse.h b/src/mapleall/maple_me/include/hdse.h index 0a34e20817d18aa8c2db005b5181a6f284fe2a5b..2b7f8df39976ccd6c2a597aa9db5162ad165275d 100644 --- a/src/mapleall/maple_me/include/hdse.h +++ b/src/mapleall/maple_me/include/hdse.h @@ -42,11 +42,8 @@ class HDSE { virtual ~HDSE() = default; - void DoHDSE(); + void DoHDSESafely(const MeFunction *f, AnalysisInfoHook &anaRes); void InvokeHDSEUpdateLive(); - bool NeedUNClean() const { - return needUNClean; - } void SetRemoveRedefine(bool val) { removeRedefine = val; } @@ -56,7 +53,7 @@ class HDSE { void SetUpdateFreq(bool update) { updateFreq = update; } - bool UpdateFreq() { + bool UpdateFreq() const { return updateFreq; } @@ -89,12 +86,14 @@ class HDSE { static const uint8 kExprTypeNotNull = 2; bool decoupleStatic = false; bool needUNClean = false; // used to record if there's unreachable BB + bool cfgChanged = false; bool removeRedefine = false; // used to control if run ResolveContinuousRedefine() bool updateFreq = false; MapleVector verstUseCounts; // index is vstIdx std::forward_list backSubsCands; // backward substitution candidates private: + void DoHDSE(); void DseInit(); void MarkSpecialStmtRequired(); void InitIrreducibleBrRequiredStmts(); diff --git a/src/mapleall/maple_me/include/irmap.h b/src/mapleall/maple_me/include/irmap.h index 6dd2fccd86d34ed78c529bd536adf2b0e17f26c3..104f8a710759a547df8161ea700e21baddaa9042 100644 --- a/src/mapleall/maple_me/include/irmap.h +++ b/src/mapleall/maple_me/include/irmap.h @@ -26,6 +26,21 @@ class IRMapBuild; // circular dependency exists, no other choice class IRMap : public AnalysisResult { friend IRMapBuild; public: + struct IreadPairInfo { + IreadPairInfo() {} + + void SetInfoOfIvar(MeExpr &baseArg, int64 offsetArg, size_t sizeArg) { + base = &baseArg; + bitOffset += offsetArg; + byteSize = sizeArg; + } + + IvarMeExpr *ivar = nullptr; + MeExpr *base = nullptr; + int64 bitOffset = 0; + size_t byteSize = 0; + }; + IRMap(SSATab &ssaTab, MemPool &memPool, uint32 hashTableSize) : AnalysisResult(&memPool), ssaTab(ssaTab), @@ -38,16 +53,16 @@ class IRMap : public AnalysisResult { vst2Decrefs(irMapAlloc.Adapter()), exprUseInfo(&memPool) {} - virtual ~IRMap() = default; + ~IRMap() override = default; virtual BB *GetBB(BBId id) = 0; virtual BB *GetBBForLabIdx(LabelIdx lidx, PUIdx pidx = 0) = 0; MeExpr *HashMeExpr(MeExpr &meExpr); IvarMeExpr *BuildLHSIvarFromIassMeStmt(IassignMeStmt &iassignMeStmt); IvarMeExpr *BuildLHSIvar(MeExpr &baseAddr, PrimType primType, const TyIdx &tyIdx, FieldID fieldID); IvarMeExpr *BuildLHSIvar(MeExpr &baseAddr, IassignMeStmt &iassignMeStmt, FieldID fieldID); - MeExpr *CreateAddrofMeExpr(MeExpr&); - MeExpr *CreateAddroffuncMeExpr(PUIdx PuIdx); - MeExpr *CreateAddrofMeExprFromSymbol(MIRSymbol& sym, PUIdx puIdx); + MeExpr *CreateAddrofMeExpr(MeExpr &expr); + MeExpr *CreateAddroffuncMeExpr(PUIdx puIdx); + MeExpr *CreateAddrofMeExprFromSymbol(MIRSymbol &st, PUIdx puIdx); MeExpr *CreateIaddrofMeExpr(FieldID fieldId, TyIdx tyIdx, MeExpr *base); MeExpr *CreateIvarMeExpr(MeExpr &expr, TyIdx tyIdx, MeExpr &base); NaryMeExpr *CreateNaryMeExpr(const NaryMeExpr &nMeExpr); @@ -59,18 +74,18 @@ class IRMap : public AnalysisResult { } RegMeExpr *CreateRegRefMeExpr(const MeExpr &meExpr); VarMeExpr *GetOrCreateZeroVersionVarMeExpr(OriginalSt &ost); - VarMeExpr *CreateNewVar(GStrIdx strIdx, PrimType primType, bool isGlobal); + VarMeExpr *CreateNewVar(GStrIdx strIdx, PrimType pType, bool isGlobal); VarMeExpr *CreateNewLocalRefVarTmp(GStrIdx strIdx, TyIdx tIdx); // for creating RegMeExpr - RegMeExpr *CreateRegMeExprVersion(OriginalSt&); + RegMeExpr *CreateRegMeExprVersion(OriginalSt &pregOSt); RegMeExpr *CreateRegMeExprVersion(const RegMeExpr ®x) { return CreateRegMeExprVersion(*regx.GetOst()); } ScalarMeExpr *CreateRegOrVarMeExprVersion(OStIdx ostIdx); - RegMeExpr *CreateRegMeExpr(PrimType); - RegMeExpr *CreateRegMeExpr(MIRType&); + RegMeExpr *CreateRegMeExpr(PrimType pType); + RegMeExpr *CreateRegMeExpr(MIRType &mirType); RegMeExpr *CreateRegMeExpr(const MeExpr &meexpr) { MIRType *mirType = meexpr.GetType(); if (mirType == nullptr || mirType->GetPrimType() == PTY_agg) { @@ -82,8 +97,8 @@ class IRMap : public AnalysisResult { return CreateRegMeExpr(*mirType); } - MeExpr *ReplaceMeExprExpr(MeExpr&, const MeExpr&, MeExpr&); - bool ReplaceMeExprStmt(MeStmt&, const MeExpr&, MeExpr&); + MeExpr *ReplaceMeExprExpr(MeExpr &origExpr, const MeExpr &meExpr, MeExpr &repExpr); + bool ReplaceMeExprStmt(MeStmt &meStmt, const MeExpr &meExpr, MeExpr &repexpr); MeExpr *GetMeExprByVerID(uint32 verid) const { return verst2MeExprTable[verid]; } @@ -97,10 +112,11 @@ class IRMap : public AnalysisResult { return meExpr; } - IassignMeStmt *CreateIassignMeStmt(TyIdx, IvarMeExpr&, MeExpr&, const MapleMap&); - AssignMeStmt *CreateAssignMeStmt(ScalarMeExpr&, MeExpr&, BB&); + IassignMeStmt *CreateIassignMeStmt(TyIdx tyIdx, IvarMeExpr &lhs, MeExpr &rhs, + const MapleMap &clist); + AssignMeStmt *CreateAssignMeStmt(ScalarMeExpr &lhs, MeExpr &rhs, BB &currBB); void InsertMeStmtBefore(BB&, MeStmt&, MeStmt&); - MePhiNode *CreateMePhi(ScalarMeExpr&); + MePhiNode *CreateMePhi(ScalarMeExpr &meExpr); void DumpBB(const BB &bb) { int i = 0; @@ -116,15 +132,15 @@ class IRMap : public AnalysisResult { virtual void SetCurFunction(const BB&) {} MeExpr *CreateIntConstMeExpr(const IntVal &value, PrimType pType); - MeExpr *CreateIntConstMeExpr(int64, PrimType); - MeExpr *CreateConstMeExpr(PrimType, MIRConst&); - MeExpr *CreateMeExprUnary(Opcode, PrimType, MeExpr&); - MeExpr *CreateMeExprBinary(Opcode, PrimType, MeExpr&, MeExpr&); - MeExpr *CreateMeExprCompare(Opcode, PrimType, PrimType, MeExpr&, MeExpr&); - MeExpr *CreateMeExprSelect(PrimType, MeExpr&, MeExpr&, MeExpr&); - MeExpr *CreateMeExprTypeCvt(PrimType, PrimType, MeExpr&); - MeExpr *CreateMeExprRetype(PrimType, TyIdx, MeExpr&); - MeExpr *CreateMeExprExt(Opcode, PrimType, uint32, MeExpr&); + MeExpr *CreateIntConstMeExpr(int64 value, PrimType pType); + MeExpr *CreateConstMeExpr(PrimType pType, MIRConst &mirConst); + MeExpr *CreateMeExprUnary(Opcode op, PrimType pType, MeExpr &expr0); + MeExpr *CreateMeExprBinary(Opcode op, PrimType pType, MeExpr &expr0, MeExpr &expr1); + MeExpr *CreateMeExprCompare(Opcode op, PrimType resptyp, PrimType opndptyp, MeExpr &opnd0, MeExpr &opnd1); + MeExpr *CreateMeExprSelect(PrimType pType, MeExpr &expr0, MeExpr &expr1, MeExpr &expr2); + MeExpr *CreateMeExprTypeCvt(PrimType pType, PrimType opndptyp, MeExpr &opnd0); + MeExpr *CreateMeExprRetype(PrimType pType, TyIdx tyIdx, MeExpr &opnd); + MeExpr *CreateMeExprExt(Opcode op, PrimType pType, uint32 bitsSize, MeExpr &opnd); UnaryMeStmt *CreateUnaryMeStmt(Opcode op, MeExpr *opnd); UnaryMeStmt *CreateUnaryMeStmt(Opcode op, MeExpr *opnd, BB *bb, const SrcPosition *src); RetMeStmt *CreateRetMeStmt(MeExpr *opnd); @@ -148,15 +164,15 @@ class IRMap : public AnalysisResult { MeExpr *SimplifyAddExpr(const OpMeExpr *addExpr); MeExpr *SimplifyMulExpr(const OpMeExpr *mulExpr); MeExpr *SimplifyCmpExpr(OpMeExpr *cmpExpr); - MeExpr *SimplifySelExpr(OpMeExpr *selExpr); + MeExpr *SimplifySelExpr(const OpMeExpr *selExpr); MeExpr *SimplifyOpMeExpr(OpMeExpr *opmeexpr); MeExpr *SimplifyOrMeExpr(OpMeExpr *opmeexpr); - MeExpr *SimplifyAshrMeExpr(OpMeExpr *opmeexpr); + MeExpr *SimplifyAshrMeExpr(const OpMeExpr *opmeexpr); MeExpr *SimplifyXorMeExpr(OpMeExpr *opmeexpr); MeExpr *SimplifyDepositbits(const OpMeExpr &opmeexpr); MeExpr *SimplifyExtractbits(const OpMeExpr &opmeexpr); MeExpr *SimplifyMeExpr(MeExpr *x); - void SimplifyCastForAssign(MeStmt *assignStmt); + void SimplifyCastForAssign(MeStmt *assignStmt) const; void SimplifyAssign(AssignMeStmt *assignStmt); MeExpr *SimplifyCast(MeExpr *expr); MeExpr* SimplifyIvarWithConstOffset(IvarMeExpr *ivar, bool lhsIvar); @@ -166,6 +182,14 @@ class IRMap : public AnalysisResult { MeExpr *SimplifyIvar(IvarMeExpr *ivar, bool lhsIvar); void UpdateIncDecAttr(MeStmt &meStmt); static MIRType *GetArrayElemType(const MeExpr &opnd); + bool DealWithIaddrofWhenGetInfoOfIvar(IreadPairInfo &info) const; + bool GetInfoOfIvar(MeExpr &expr, IreadPairInfo &info) const; + MeExpr *ReadContinuousMemory(const OpMeExpr &opMeExpr); + MeExpr *OptBandWithIread(MeExpr &opnd0, MeExpr &opnd1); + MeExpr *MergeAdjacentIread(MeExpr &opnd0, MeExpr &opnd1); + bool GetIreadsInfo(MeExpr &opnd0, MeExpr &opnd1, IreadPairInfo &info0, IreadPairInfo &info1) const; + MeExpr *CreateNewIvarForAdjacentIread( + MeExpr &base0, const IvarMeExpr &ivar0, const IvarMeExpr &ivar1, PrimType ivarPTy, int64 newOffset); template T *NewInPool(Arguments&&... args) { @@ -173,7 +197,7 @@ class IRMap : public AnalysisResult { } template - T *New(Arguments&&... args) { + T *New(Arguments&&... args) const { return irMapAlloc.GetMemPool()->New(std::forward(args)...); } @@ -286,9 +310,9 @@ class IRMap : public AnalysisResult { bool dumpStmtNum = false; BB *curBB = nullptr; // current maple_me::BB being visited - bool ReplaceMeExprStmtOpnd(uint32, MeStmt&, const MeExpr&, MeExpr&); - void PutToBucket(uint32, MeExpr&); - const BB *GetFalseBrBB(const CondGotoMeStmt&); + bool ReplaceMeExprStmtOpnd(uint32 opndID, MeStmt &meStmt, const MeExpr &meExpr, MeExpr &repExpr); + void PutToBucket(uint32 hashIdx, MeExpr &meExpr); + const BB *GetFalseBrBB(const CondGotoMeStmt &condgoto); MeExpr *ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &newExpr, size_t opndsSize, const MeExpr &meExpr, MeExpr &repExpr); MeExpr *SimplifyCompareSameExpr(OpMeExpr *opmeexpr); bool IfMeExprIsU1Type(const MeExpr *expr) const; diff --git a/src/mapleall/maple_me/include/irmap_build.h b/src/mapleall/maple_me/include/irmap_build.h index 4d300b8c56762027bfc53f3ebe4870d5c3039a74..39915ded201d398065728ce2d9cc67f346d7f465 100644 --- a/src/mapleall/maple_me/include/irmap_build.h +++ b/src/mapleall/maple_me/include/irmap_build.h @@ -43,11 +43,11 @@ class IRMapBuild { MeExpr *BuildLHSVar(const VersionSt &vst, DassignMeStmt &defMeStmt); MeExpr *BuildLHSReg(const VersionSt &vst, AssignMeStmt &defMeStmt, const RegassignNode ®assign); - void BuildChiList(MeStmt&, TypeOfMayDefList&, MapleMap&); - void BuildMustDefList(MeStmt &meStmt, TypeOfMustDefList&, MapleVector&); - void BuildMuList(TypeOfMayUseList&, MapleMap&); - void BuildPhiMeNode(BB&); - void SetMeExprOpnds(MeExpr &meExpr, BaseNode &mirNode, bool atparm, bool noProp); + void BuildChiList(MeStmt &meStmt, TypeOfMayDefList &mayDefNodes, MapleMap &outList); + void BuildMustDefList(MeStmt &meStmt, TypeOfMustDefList &mustDefList, MapleVector &mustDefMeList); + void BuildMuList(TypeOfMayUseList &mayUseList, MapleMap &muList); + void BuildPhiMeNode(BB &bb); + void SetMeExprOpnds(MeExpr &meExpr, BaseNode &mirNode, bool atParm, bool noProp); std::unique_ptr BuildOpMeExpr(const BaseNode &mirNode) const { auto meExpr = std::make_unique(kInvalidExprID, mirNode.GetOpCode(), @@ -76,7 +76,7 @@ class IRMapBuild { std::unique_ptr BuildNaryMeExprForArray(const BaseNode &mirNode) const; std::unique_ptr BuildNaryMeExprForIntrinsicop(const BaseNode &mirNode) const; std::unique_ptr BuildNaryMeExprForIntrinsicWithType(const BaseNode &mirNode) const; - MeExpr *BuildExpr(BaseNode&, bool atParm, bool noProp); + MeExpr *BuildExpr(BaseNode &mirNode, bool atParm, bool noProp); static void InitMeExprBuildFactory(); MeStmt *BuildMeStmtWithNoSSAPart(StmtNode &stmt); @@ -92,7 +92,7 @@ class IRMapBuild { MeStmt *BuildThrowMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart); MeStmt *BuildSyncMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart); MeStmt *BuildAsmMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart); - MeStmt *BuildMeStmt(StmtNode&); + MeStmt *BuildMeStmt(StmtNode &stmt); static void InitMeStmtFactory(); IRMap *irMap; diff --git a/src/mapleall/maple_me/include/lfo_loop_vec.h b/src/mapleall/maple_me/include/lfo_loop_vec.h index a23905db4ae28985cb5d6975cbac2ce4568a09be..aa84b82392455967f9e10c5ad06c34e3c5623e88 100644 --- a/src/mapleall/maple_me/include/lfo_loop_vec.h +++ b/src/mapleall/maple_me/include/lfo_loop_vec.h @@ -62,7 +62,8 @@ class LoopVecInfo { void ResetStmtRHSTypeSize() { currentRHSTypeSize = 0; } bool UpdateRHSTypeSize(PrimType ptype); // record rhs node typesize // used when profileUse is true - void UpdateDoloopProfData(MIRFunction *mirFunc, DoloopNode *doLoop, int32_t vecLanes, bool isRemainder = false); + void UpdateDoloopProfData(MIRFunction &mirFunc, const DoloopNode *doLoop, + int32_t vecLanes, bool isRemainder = false) const; uint32_t largestTypeSize; // largest size type in vectorizable stmtnodes uint32_t smallestTypeSize; // smallest size type in vectorizable stmtnodes uint32_t currentRHSTypeSize; // largest size of current stmt's RHS, this is temp value and update for each stmt @@ -130,21 +131,21 @@ class LoopVectorization { void Perform(); void TransformLoop(); - void VectorizeDoLoop(DoloopNode *, LoopTransPlan*); - void VectorizeStmt(BaseNode *, LoopTransPlan *); - void VectorizeExpr(BaseNode *, LoopTransPlan *, MapleVector&, uint32_t); - MIRType *GenVecType(PrimType sPrimType, uint8_t lanes) const; + void VectorizeDoLoop(DoloopNode *doloop, LoopTransPlan *tp); + void VectorizeStmt(BaseNode *node, LoopTransPlan *tp); + void VectorizeExpr(BaseNode *node, LoopTransPlan *tp, MapleVector &vectorizedNode, uint32_t depth); + MIRType *GenVecType(PrimType sPrimType, uint8 lanes) const; IntrinsicopNode *GenDupScalarExpr(BaseNode *scalar, PrimType vecPrimType); - bool ExprVectorizable(DoloopInfo *doloopInfo, LoopVecInfo*, BaseNode *x); - bool Vectorizable(DoloopInfo *doloopInfo, LoopVecInfo*, BlockNode *block); - void widenDoloop(DoloopNode *doloop, LoopTransPlan *); - DoloopNode *PrepareDoloop(DoloopNode *, LoopTransPlan *); - DoloopNode *GenEpilog(DoloopNode *) const; + bool ExprVectorizable(DoloopInfo *doloopInfo, LoopVecInfo *vecInfo, BaseNode *x); + bool Vectorizable(DoloopInfo *doloopInfo, LoopVecInfo *vecInfo, BlockNode *block); + void WidenDoloop(DoloopNode *doloop, LoopTransPlan *tp); + DoloopNode *PrepareDoloop(DoloopNode *doloop, LoopTransPlan *tp); + DoloopNode *GenEpilog(DoloopNode *doloop) const; const MemPool *GetLocalMp() const { return localMP; } const MapleMap *GetVecPlans() const { return &vecPlans; } std::string PhaseName() const { return "lfoloopvec"; } - bool CanConvert(uint32_t, uint32_t) const; - bool CanAdjustRhsConstType(PrimType, ConstvalNode *); + bool CanConvert(uint32_t lshtypeSize, uint32_t rhstypeSize) const; + bool CanAdjustRhsConstType(PrimType targetType, ConstvalNode *rhs); bool IsReductionOp(Opcode op) const; bool CanWidenOpcode(const BaseNode *target, PrimType opndType) const; IntrinsicopNode *GenSumVecStmt(BaseNode *vecTemp, PrimType vecPrimType); @@ -168,7 +169,7 @@ class LoopVectorization { RegreadNode *GenVectorReductionVar(StmtNode *stmt, LoopTransPlan *tp); bool IassignIsReduction(IassignNode *iassign, LoopVecInfo* vecInfo); RegreadNode *GetorNewVectorReductionVar(StmtNode *stmt, LoopTransPlan *tp); - MIRType *VectorizeIassignLhs(IassignNode *iassign, LoopTransPlan *tp); + MIRType *VectorizeIassignLhs(IassignNode &iassign, const LoopTransPlan &tp) const; void VectorizeReductionStmt(StmtNode *stmt, LoopTransPlan *tp); void GenConstVar(LoopVecInfo *vecInfo, uint8_t vecLanes); diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h index 95d57b53e49bf1b144b7912c6aa325382db10190..460d9cf168e48a422b77d43ee84a962bea04d6f8 100644 --- a/src/mapleall/maple_me/include/lmbc_lower.h +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -27,22 +27,22 @@ class LMBCLowerer { : mirModule(mod), func(f), becommon(becmmn), mirBuilder(mod->GetMIRBuilder()), globmemlayout(gmemlayout), memlayout(lmemlayout) {} - PregIdx GetSpecialRegFromSt(const MIRSymbol *); - BaseNode *LowerAddrof(AddrofNode *); - BaseNode *LowerDread(const AddrofNode *); - BaseNode *LowerDreadoff(DreadoffNode *); + PregIdx GetSpecialRegFromSt(const MIRSymbol *sym); + BaseNode *LowerAddrof(const AddrofNode *expr); + BaseNode *LowerDread(const AddrofNode *expr); + BaseNode *LowerDreadoff(DreadoffNode *dreadoff); BaseNode *LowerIread(const IreadNode &expr); - BaseNode *LowerIaddrof(IreadNode *); + BaseNode *LowerIaddrof(const IreadNode *expr); BaseNode *LowerExpr(BaseNode *expr); - void LowerAggDassign(const DassignNode *, MIRType *lhsty, int32 offset, BlockNode *); - void LowerDassign(DassignNode *, BlockNode *); - void LowerDassignoff(DassignoffNode *, BlockNode *); - void LowerIassign(IassignNode *, BlockNode *); - void LowerAggIassign(IassignNode *, MIRType *type, int32 offset, BlockNode *); + void LowerAggDassign(const DassignNode &dsnode, const MIRType *lhsty, int32 offset, BlockNode *newblk); + void LowerDassign(DassignNode *dsnode, BlockNode *newblk); + void LowerDassignoff(DassignoffNode *dsnode, BlockNode *newblk); + void LowerIassign(IassignNode *iassign, BlockNode *newblk); + void LowerAggIassign(const IassignNode &iassign, const MIRType *lhsty, int32 offset, BlockNode &newblk) const; void LowerReturn(NaryStmtNode &retNode, BlockNode &newblk); void LowerCall(NaryStmtNode *stmt, BlockNode *newblk); - BlockNode *LowerBlock(BlockNode *); - void FixPrototype4FirstArgReturn(IcallNode *icall); + BlockNode *LowerBlock(BlockNode *block); + void FixPrototype4FirstArgReturn(const IcallNode *icall) const; void LowerFunction(); MIRModule *mirModule; diff --git a/src/mapleall/maple_me/include/lmbc_memlayout.h b/src/mapleall/maple_me/include/lmbc_memlayout.h index fc7b532f695aba7adf4c76b97573781aa8493150..ec3374e528ea42d62636efd34494326b2df86bc8 100644 --- a/src/mapleall/maple_me/include/lmbc_memlayout.h +++ b/src/mapleall/maple_me/include/lmbc_memlayout.h @@ -24,7 +24,7 @@ namespace maple { -typedef enum { +enum MemSegmentKind { MS_unknown, MS_upformal, // for the incoming parameters that are passed on the caller's stack MS_formal, // for the incoming parameters that are passed in registers @@ -33,13 +33,13 @@ typedef enum { MS_FPbased, // addressed via offset from the frame pointer MS_GPbased, // addressed via offset from the global pointer MS_largeStructActual, // for storing large struct actuals passed by value for ARM CPU -} MemSegmentKind; +}; class MemSegment; // describes where a symbol is allocated struct SymbolAlloc { - MemSegment *mem_segment = nullptr; + MemSegment *memSegment = nullptr; int32 offset = 0; }; // class SymbolAlloc diff --git a/src/mapleall/maple_me/include/mc_ssa_pre.h b/src/mapleall/maple_me/include/mc_ssa_pre.h index f77fbc02e6527c7d188d1a7e18cb71b3e8ec7756..53c251bc4cf88187d1a080f83ee3ac7e20253cde 100644 --- a/src/mapleall/maple_me/include/mc_ssa_pre.h +++ b/src/mapleall/maple_me/include/mc_ssa_pre.h @@ -43,9 +43,15 @@ class Visit { friend class McSSAPre; private: Visit(RGNode *nd, uint32 idx) : node(nd), predIdx(idx) {} - FreqType AvailableCapacity() const { return node->inEdgesCap[predIdx] - node->usedCap[predIdx]; } - void IncreUsedCapacity(FreqType val) { node->usedCap[predIdx] += val; } - bool operator==(const Visit *rhs) const { return node == rhs->node && predIdx == rhs->predIdx; } + FreqType AvailableCapacity() const { + return node->inEdgesCap[predIdx] - node->usedCap[predIdx]; + } + void IncreUsedCapacity(FreqType val) { + node->usedCap[predIdx] += val; + } + bool operator==(const Visit *rhs) const { + return node == rhs->node && predIdx == rhs->predIdx; + } RGNode *node; uint32 predIdx; // the index in node's pred @@ -55,7 +61,7 @@ class Visit { class Route { friend class McSSAPre; public: - Route(MapleAllocator *alloc) : visits(alloc->Adapter()) {} + explicit Route(MapleAllocator *alloc) : visits(alloc->Adapter()) {} private: MapleVector visits; FreqType flowValue = 0; @@ -69,23 +75,26 @@ class McSSAPre : public SSAPre { occ2RGNodeMap(ssaPreAllocator.Adapter()), maxFlowRoutes(ssaPreAllocator.Adapter()), minCut(ssaPreAllocator.Adapter()) {} - virtual ~McSSAPre() = default; + ~McSSAPre() override = default; void ApplyMCSSAPRE(); - void SetPreUseProfileLimit(uint32 n) { preUseProfileLimit = n; } + void SetPreUseProfileLimit(uint32 n) { + preUseProfileLimit = n; + } private: // step 8 willbeavail void ResetMCWillBeAvail(MePhiOcc *phiOcc) const; void ComputeMCWillBeAvail() const; // step 7 max flow/min cut - bool AmongMinCut(RGNode *, uint32 idx) const; + bool AmongMinCut(const RGNode *nd, uint32 idx) const; void DumpRGToFile(); // dump reduced graph to dot file - bool IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx); - void RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route); - bool SearchRelaxedMinCut(Visit **cut, std::unordered_multiset &cutSet, uint32 nextRouteIdx, FreqType flowSoFar); + bool IncludedEarlier(Visit **cut, const Visit &curVisit, uint32 nextRouteIdx) const; + void RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route &route) const; + bool SearchRelaxedMinCut(Visit **cut, std::unordered_multiset &cutSet, uint32 nextRouteIdx, + FreqType flowSoFar); bool SearchMinCut(Visit **cut, std::unordered_multiset &cutSet, uint32 nextRouteIdx, FreqType flowSoFar); void DetermineMinCut(); - bool VisitANode(RGNode *node, Route *route, std::vector &visitedNodes); + bool VisitANode(RGNode &node, Route *route, std::vector &visitedNodes); bool FindAnotherRoute(); void FindMaxFlow(); // step 6 single sink @@ -95,7 +104,7 @@ class McSSAPre : public SSAPre { // step 4 graph reduction void GraphReduction(); // step 3 data flow methods - void SetPartialAnt(MePhiOpndOcc *phiOpnd) const; + void SetPartialAnt(MePhiOpndOcc &phiOpnd) const; void ComputePartialAnt() const; void ResetFullAvail(MePhiOcc *occ) const; void ComputeFullAvail() const; diff --git a/src/mapleall/maple_me/include/me_abco.h b/src/mapleall/maple_me/include/me_abco.h index fd7c710b6710e8720bc49d3e8887f12b787b8fe0..23bf7367540a6e98287f35145fd5e0d569accf4a 100644 --- a/src/mapleall/maple_me/include/me_abco.h +++ b/src/mapleall/maple_me/include/me_abco.h @@ -105,7 +105,7 @@ class MeABC { ESSABaseNode *GetOrCreateRHSNode(MeExpr &expr); void BuildPhiInGraph(MePhiNode &phi); void BuildSoloPiInGraph(const PiassignMeStmt &piMeStmt); - bool PiExecuteBeforeCurrentCheck(const PiassignMeStmt &piMeStmt); + bool PiExecuteBeforeCurrentCheck(const PiassignMeStmt &piMeStmt) const; void AddEdgePair(ESSABaseNode &from, ESSABaseNode &to, int64 value, EdgeType type); bool BuildArrayCheckInGraph(MeStmt &meStmt); bool BuildBrMeStmtInGraph(MeStmt &meStmt); diff --git a/src/mapleall/maple_me/include/me_alias_class.h b/src/mapleall/maple_me/include/me_alias_class.h index 9f16175de358d6653fb6b590eec933ea6ef77dc2..605579931470d33e3bbbf2bccf8d7fa0a133fa25 100644 --- a/src/mapleall/maple_me/include/me_alias_class.h +++ b/src/mapleall/maple_me/include/me_alias_class.h @@ -27,7 +27,7 @@ class MeAliasClass : public AliasClass { : AliasClass(memPool, mod, ssaTab, lessAliasAtThrow, ignoreIPA, setCalleeHasSideEffect, kh), func(func), cfg(func.GetCfg()), localMemPool(&localMemPool), enabledDebug(debug) {} - virtual ~MeAliasClass() = default; + ~MeAliasClass() override = default; void DoAliasAnalysis(); diff --git a/src/mapleall/maple_me/include/me_analyze_rc.h b/src/mapleall/maple_me/include/me_analyze_rc.h index 8fbbad7b582910371db4c671efbd4c7a70a6a622..508a18fcc9bc137febf330324f9e3be6db683878 100644 --- a/src/mapleall/maple_me/include/me_analyze_rc.h +++ b/src/mapleall/maple_me/include/me_analyze_rc.h @@ -66,7 +66,7 @@ class AnalyzeRC { void CreateCleanupIntrinsics(); void RenameRefPtrs(BB *bb); void OptimizeRC(); - void RemoveUnneededCleanups(); + void RemoveUnneededCleanups() const; void RenameUses(MeStmt &meStmt); RCItem *FindOrCreateRCItem(OriginalSt &ost); OriginalSt *GetOriginalSt(const MeExpr &refLHS) const; diff --git a/src/mapleall/maple_me/include/me_bb_analyze.h b/src/mapleall/maple_me/include/me_bb_analyze.h index 26f1d3e2e385ae9d444af13c358abc4c49c1115f..3173f3adde6035314045fe9e9b7574a6dd2e3612 100644 --- a/src/mapleall/maple_me/include/me_bb_analyze.h +++ b/src/mapleall/maple_me/include/me_bb_analyze.h @@ -26,7 +26,7 @@ class BBAnalyze : public AnalysisResult { public: BBAnalyze(MemPool &memPool, const MeFunction &f) : AnalysisResult(&memPool), meBBAlloc(&memPool), cfg(f.GetCfg()) {} - virtual ~BBAnalyze() = default; + ~BBAnalyze() override = default; void SetHotAndColdBBCountThreshold(); bool CheckBBHot(const BBId bbId) const; diff --git a/src/mapleall/maple_me/include/me_bb_layout.h b/src/mapleall/maple_me/include/me_bb_layout.h index da5d463b81c058c51b9001bfce98f2cb35fd959c..9902dd7376336eb32119716335620734747de1bc 100644 --- a/src/mapleall/maple_me/include/me_bb_layout.h +++ b/src/mapleall/maple_me/include/me_bb_layout.h @@ -21,137 +21,6 @@ #include "me_loop_analysis.h" namespace maple { -class BBChain { - public: - using iterator = MapleVector::iterator; - using const_iterator = MapleVector::const_iterator; - BBChain(MapleAllocator &alloc, MapleVector &bb2chain, BB *bb, uint32 inputId) - : id(inputId), bbVec(1, bb, alloc.Adapter()), bb2chain(bb2chain) { - bb2chain[bb->GetBBId()] = this; - } - - iterator begin() { - return bbVec.begin(); - } - const_iterator begin() const { - return bbVec.begin(); - } - iterator end() { - return bbVec.end(); - } - const_iterator end() const { - return bbVec.end(); - } - - bool empty() const { - return bbVec.empty(); - } - - size_t size() const { - return bbVec.size(); - } - - uint32 GetId() const { - return id; - } - - BB *GetHeader() { - CHECK_FATAL(!bbVec.empty(), "cannot get header from a empty bb chain"); - return bbVec.front(); - } - BB *GetTail() { - CHECK_FATAL(!bbVec.empty(), "cannot get tail from a empty bb chain"); - return bbVec.back(); - } - - // update unlaidPredCnt if needed. The chain is ready to layout only if unlaidPredCnt == 0 - bool IsReadyToLayout(const MapleVector *context) { - MayRecalculateUnlaidPredCnt(context); - return (unlaidPredCnt == 0); - } - - // Merge src chain to this one - void MergeFrom(BBChain *srcChain) { - CHECK_FATAL(this != srcChain, "merge same chain?"); - ASSERT_NOT_NULL(srcChain); - if (srcChain->empty()) { - return; - } - for (BB *bb : *srcChain) { - bbVec.push_back(bb); - bb2chain[bb->GetBBId()] = this; - } - srcChain->bbVec.clear(); - srcChain->unlaidPredCnt = 0; - srcChain->isCacheValid = false; - isCacheValid = false; // is this necessary? - } - - void UpdateSuccChainBeforeMerged(const BBChain &destChain, const MapleVector *context, - MapleSet &readyToLayoutChains) { - for (BB *bb : bbVec) { - for (BB *succ : bb->GetSucc()) { - if (context != nullptr && !(*context)[succ->GetBBId()]) { - continue; - } - if (bb2chain[succ->GetBBId()] == this || bb2chain[succ->GetBBId()] == &destChain) { - continue; - } - BBChain *succChain = bb2chain[succ->GetBBId()]; - succChain->MayRecalculateUnlaidPredCnt(context); - if (succChain->unlaidPredCnt != 0) { - --succChain->unlaidPredCnt; - } - if (succChain->unlaidPredCnt == 0) { - readyToLayoutChains.insert(succChain); - } - } - } - } - - void Dump() const { - LogInfo::MapleLogger() << "bb chain with " << bbVec.size() << " blocks: "; - for (BB *bb : bbVec) { - LogInfo::MapleLogger() << bb->GetBBId() << " "; - } - LogInfo::MapleLogger() << std::endl; - } - - void DumpOneLine() const { - for (BB *bb : bbVec) { - LogInfo::MapleLogger() << bb->GetBBId() << " "; - } - } - - private: - void MayRecalculateUnlaidPredCnt(const MapleVector *context) { - if (isCacheValid) { - return; // If cache is trustable, no need to recalculate - } - unlaidPredCnt = 0; - for (BB *bb : bbVec) { - for (BB *pred : bb->GetPred()) { - // exclude blocks out of context - if (context != nullptr && !(*context)[pred->GetBBId()]) { - continue; - } - // exclude blocks within the same chain - if (bb2chain[pred->GetBBId()] == this) { - continue; - } - ++unlaidPredCnt; - } - } - isCacheValid = true; - } - - uint32 id = 0; - MapleVector bbVec; - MapleVector &bb2chain; - uint32 unlaidPredCnt = 0; // how many predecessors are not laid out - bool isCacheValid = false; // whether unlaidPredCnt is trustable -}; - class BBLayout { public: BBLayout(MemPool &memPool, MeFunction &f, bool enabledDebug, MaplePhase *phase) @@ -161,8 +30,6 @@ class BBLayout { startTryBBVec(func.GetCfg()->GetAllBBs().size(), false, layoutAlloc.Adapter()), bbVisited(func.GetCfg()->GetAllBBs().size(), false, layoutAlloc.Adapter()), allEdges(layoutAlloc.Adapter()), - bb2chain(layoutAlloc.Adapter()), - readyToLayoutChains(layoutAlloc.Adapter()), laidOut(func.GetCfg()->GetAllBBs().size(), false, layoutAlloc.Adapter()), enabledDebug(enabledDebug), profValid(func.IsIRProfValid()), @@ -250,16 +117,6 @@ class BBLayout { void UpdateNewBBWithAttrTry(const BB &bb, BB &fallthru) const; void SetAttrTryForTheCanBeMovedBB(BB &bb, BB &canBeMovedBB) const; void RebuildFreq(); - bool IsBBInCurrContext(const BB &bb, const MapleVector *context) const; - void InitBBChains(); - void BuildChainForFunc(); - void BuildChainForLoops(); - void BuildChainForLoop(LoopDesc *loop, MapleVector *context); - BB *FindBestStartBBForLoop(LoopDesc *loop, const MapleVector *context); - void DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context); - BB *GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, bool considerBetterPredForSucc); - bool IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context); - bool HasBetterLayoutPred(const BB &bb, BB &succ); MeFunction &func; MapleAllocator layoutAlloc; @@ -267,12 +124,8 @@ class BBLayout { MapleVector startTryBBVec; // record the try BB to fix the try&endtry map MapleVector bbVisited; // mark the bb as visited when accessed MapleVector allEdges; - MapleVector bb2chain; // mapping bb id to the chain that bb belongs to - MapleSet readyToLayoutChains; IdentifyLoops *meLoop = nullptr; Dominance *dom = nullptr; - uint32 rpoSearchPos = 0; // reverse post order search beginning position - bool debugChainLayout = false; bool needDealWithTryBB = false; BBId curBBId { 0 }; // to index into func.bb_vec_ to return the next BB bool bbCreated = false; // new create bb will change mefunction::bb_vec_ and diff --git a/src/mapleall/maple_me/include/me_cfg.h b/src/mapleall/maple_me/include/me_cfg.h index 9e57184ac213a3f584e293410b2625205a78182e..4ab1d5cbb5f32786d026426962e9dbbbe7b7f6b3 100644 --- a/src/mapleall/maple_me/include/me_cfg.h +++ b/src/mapleall/maple_me/include/me_cfg.h @@ -51,7 +51,7 @@ class MeCFG : public AnalysisResult { } } - ~MeCFG() = default; + ~MeCFG() override = default; bool IfReplaceWithAssertNonNull(const BB &bb) const; void ReplaceWithAssertnonnull(); @@ -289,6 +289,7 @@ class MeCFG : public AnalysisResult { void SetBBTryBBMap(BB *currBB, BB *tryBB) { endTryBB2TryBB[currBB] = tryBB; } + void SetTryBBByOtherEndTryBB(BB *endTryBB, BB *otherTryBB) { endTryBB2TryBB[endTryBB] = endTryBB2TryBB[otherTryBB]; } @@ -300,11 +301,11 @@ class MeCFG : public AnalysisResult { } void BBTopologicalSort(SCCOfBBs &scc); void BuildSCC(); - void UpdateBranchTarget(BB &currBB, const BB &oldTarget, BB &newTarget, MeFunction &meFunc); + void UpdateBranchTarget(BB &currBB, const BB &oldTarget, BB &newTarget, MeFunction &meFunc) const; void SwapBBId(BB &bb1, BB &bb2); void ConstructBBFreqFromStmtFreq(); void ConstructStmtFreq(); - void ConstructEdgeFreqFromBBFreq(); + void ConstructEdgeFreqFromBBFreq() const; void UpdateEdgeFreqWithBBFreq(); int VerifyBBFreq(bool checkFatal = false); void SetUpdateCFGFreq(bool b) { diff --git a/src/mapleall/maple_me/include/me_cfg_opt.h b/src/mapleall/maple_me/include/me_cfg_opt.h index 4eca13fe7688e0e97ad1feb9b4d95bd90e284bc9..8e133fa54de5cb36c1f960ad7d3a0e0da4cf9919 100644 --- a/src/mapleall/maple_me/include/me_cfg_opt.h +++ b/src/mapleall/maple_me/include/me_cfg_opt.h @@ -23,7 +23,7 @@ class MeCfgOpt { explicit MeCfgOpt(MeIRMap *irMap) : meIrMap(irMap) {} ~MeCfgOpt() = default; - bool Run(MeCFG &cfg); + bool Run(MeCFG &cfg) const; private: bool PreCheck(const MeCFG &cfg) const; diff --git a/src/mapleall/maple_me/include/me_check_cast.h b/src/mapleall/maple_me/include/me_check_cast.h index bb53a586ff729e37f6d56d3b88066e3fb4391725..30ccafa4c884bdf3e215765d8f6fb8dab392fb9e 100644 --- a/src/mapleall/maple_me/include/me_check_cast.h +++ b/src/mapleall/maple_me/include/me_check_cast.h @@ -50,7 +50,7 @@ class CheckCast { } } private: - void RemoveRedundantCheckCast(MeStmt &stmt, BB &bb); + void RemoveRedundantCheckCast(MeStmt &stmt, BB &bb) const; bool ProvedByAnnotationInfo(const IntrinsiccallMeStmt &callNode); void TryToResolveCall(MeStmt &meStmt); bool TryToResolveVar(VarMeExpr &var, MIRStructType *callStruct = nullptr, bool checkFirst = false); @@ -64,12 +64,12 @@ class CheckCast { void TryToResolveFuncGeneric(MIRFunction &callee, const CallMeStmt &callMeStmt, size_t thisIdx); void AddClassInheritanceInfo(MIRType &mirType); bool NeedChangeVarType(MIRStructType *varStruct, MIRStructType *callStruct); - bool ExactlyMatch(MIRStructType &varStruct, MIRStructType &callStruct); + bool ExactlyMatch(MIRStructType &varStruct, MIRStructType &callStruct) const; AnnotationType *CloneNewAnnotationType(AnnotationType *at, MIRStructType *callStruct); void AddNextNode(GenericNode &from, GenericNode &to) const; - bool RetIsGenericRelative(MIRFunction &callee); + bool RetIsGenericRelative(MIRFunction &callee) const; void DumpGenericGraph(); - void DumpGenericNode(GenericNode &node, std::ostream &out); + void DumpGenericNode(GenericNode &node, std::ostream &out) const; bool ProvedBySSI(const IntrinsiccallMeStmt &callNode); ProveRes TraverseBackProve(MeExpr &expr, MIRType &targetType, std::set &visitedPhi); diff --git a/src/mapleall/maple_me/include/me_cond_based.h b/src/mapleall/maple_me/include/me_cond_based.h index bbdfce29f6b35beb8f039a85adce19663df9a063..370b068fd9e6d926ed678d8a39d96a1e771f3124 100644 --- a/src/mapleall/maple_me/include/me_cond_based.h +++ b/src/mapleall/maple_me/include/me_cond_based.h @@ -25,19 +25,19 @@ class MeCondBased { MeCondBased(MeFunction &func, Dominance &dom, Dominance &pdom) : func(&func), dominance(&dom), postDominance(&pdom) {} ~MeCondBased() = default; - bool NullValueFromTestCond(const VarMeExpr&, const BB&, bool) const; - bool IsNotNullValue(const VarMeExpr&, const UnaryMeStmt&, const BB&) const; + bool NullValueFromTestCond(const VarMeExpr &varMeExpr, const BB &bb, bool expectedEq0) const; + bool IsNotNullValue(const VarMeExpr &varMeExpr, const UnaryMeStmt &assertMeStmt, const BB &bb) const; const MeFunction *GetFunc() const { return func; } private: - bool NullValueFromOneTestCond(const VarMeExpr&, const BB&, const BB&, bool) const; - bool PointerWasDereferencedBefore(const VarMeExpr&, const UnaryMeStmt&, const BB&) const; - bool PointerWasDereferencedRightAfter(const VarMeExpr&, const UnaryMeStmt&) const; - bool IsIreadWithTheBase(const VarMeExpr&, const MeExpr&) const; - bool StmtHasDereferencedBase(const MeStmt&, const VarMeExpr&) const; + bool NullValueFromOneTestCond(const VarMeExpr &varMeExpr, const BB &cdBB, const BB &bb, bool expectedEq0) const; + bool PointerWasDereferencedBefore(const VarMeExpr &var, const UnaryMeStmt &assertMeStmt, const BB &bb) const; + bool PointerWasDereferencedRightAfter(const VarMeExpr &var, const UnaryMeStmt &assertMeStmt) const; + bool IsIreadWithTheBase(const VarMeExpr &var, const MeExpr &meExpr) const; + bool StmtHasDereferencedBase(const MeStmt &stmt, const VarMeExpr &var) const; MeFunction *func; Dominance *dominance; diff --git a/src/mapleall/maple_me/include/me_dominance.h b/src/mapleall/maple_me/include/me_dominance.h index 6592d50e8cd2c08f6605130f5e76a8a113de1ea0..876a79fd8c983873287c472ea83dd0b92f49f4b8 100644 --- a/src/mapleall/maple_me/include/me_dominance.h +++ b/src/mapleall/maple_me/include/me_dominance.h @@ -27,10 +27,9 @@ MAPLE_FUNC_PHASE_DECLARE_BEGIN(MEDominance, MeFunction) Dominance *GetPdomResult() { return pdom; } - private: +OVERRIDE_DEPENDENCE Dominance *dom = nullptr; Dominance *pdom = nullptr; -OVERRIDE_DEPENDENCE MAPLE_MODULE_PHASE_DECLARE_END } // namespace maple #endif // MAPLE_ME_INCLUDE_ME_DOMINANCE_H diff --git a/src/mapleall/maple_me/include/me_fsaa.h b/src/mapleall/maple_me/include/me_fsaa.h index a6343b3c6f24e64e14bdaddbc8b2f7eafaaeb740..f23b8ad3118de276ef37dbaa91426de8512c69a6 100644 --- a/src/mapleall/maple_me/include/me_fsaa.h +++ b/src/mapleall/maple_me/include/me_fsaa.h @@ -32,7 +32,7 @@ class FSAA { bool needUpdateSSA = false; private: - bool IfChiSameAsRHS(const VersionSt &chiOpnd, const VersionSt &rhsVst, const BaseNode &rhsOrigSrc); + bool IfChiSameAsRHS(const VersionSt &chiOpnd, const VersionSt &rhsVst, const BaseNode &rhsOrigSrc) const; void RemoveMayDefIfSameAsRHS(const IassignNode &stmt); void RemoveMayDefByIreadRHS(const IreadSSANode &rhs, TypeOfMayDefList &mayDefNodes); void RemoveMayDefByDreadRHS(const AddrofSSANode &rhs, TypeOfMayDefList &mayDefNodes); diff --git a/src/mapleall/maple_me/include/me_function.h b/src/mapleall/maple_me/include/me_function.h index 4fe826e3f70040a22fd99e3d49165e2f7d722051..5a143638f4575cd58bfd92bf15db95eca1271c78 100644 --- a/src/mapleall/maple_me/include/me_function.h +++ b/src/mapleall/maple_me/include/me_function.h @@ -31,7 +31,7 @@ namespace maple { class MeCFG; // circular dependency exists, no other choice class MeIRMap; // circular dependency exists, no other choice -#if DEBUG +#if defined(DEBUG) && DEBUG extern MIRModule *globalMIRModule; extern MeFunction *globalFunc; extern MeIRMap *globalIRMap; @@ -138,7 +138,7 @@ bool FilterNullPtr(Iterator it, Iterator endIt) { return it == endIt || *it != nullptr; } -enum MeFuncHint { +enum MeFuncHint : uint32 { kReserved = 0x00, // reserved kPlacementRCed = 0x01, // method processed by placementrc kAnalyzeRCed = 0x02, // method processed by analyzerc diff --git a/src/mapleall/maple_me/include/me_gc_lowering.h b/src/mapleall/maple_me/include/me_gc_lowering.h index a7bdbf4bb50f267580ce8f70d1a41fe851b44ea1..97922b79eab9b261096bee23ce235f8251a44ab1 100644 --- a/src/mapleall/maple_me/include/me_gc_lowering.h +++ b/src/mapleall/maple_me/include/me_gc_lowering.h @@ -42,9 +42,9 @@ class GCLowering { void HandleAssignMeStmt(MeStmt &stmt); void HandleVarAssignMeStmt(MeStmt &stmt); void HandleIvarAssignMeStmt(MeStmt &stmt); - MeExpr *GetBase(IvarMeExpr &ivar); - MIRIntrinsicID SelectWriteBarrier(const MeStmt &stmt); - MIRIntrinsicID PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId); + MeExpr *GetBase(IvarMeExpr &ivar) const; + MIRIntrinsicID SelectWriteBarrier(const MeStmt &stmt) const; + MIRIntrinsicID PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId) const; void HandleWriteReferent(IassignMeStmt &stmt); void CheckRefs(); void ParseCheckFlag(); diff --git a/src/mapleall/maple_me/include/me_gc_write_barrier_opt.h b/src/mapleall/maple_me/include/me_gc_write_barrier_opt.h index ab6e658c8d220b7ce71bce3cd40bb574a44db5ce..9e66d53af685ce60c50e16d2e0796a86dcbcba71 100644 --- a/src/mapleall/maple_me/include/me_gc_write_barrier_opt.h +++ b/src/mapleall/maple_me/include/me_gc_write_barrier_opt.h @@ -43,9 +43,9 @@ class GCWriteBarrierOpt { OStIdx GetOStIdx(MeExpr &meExpr); bool IsCall(const MeStmt &stmt) const; bool HasYieldPoint(const MeStmt &start, const MeStmt &end); - bool HasCallAfterStmt(const MeStmt &stmt); - bool HasCallBeforeStmt(const MeStmt &stmt); - bool HasCallBetweenStmt(const MeStmt &start, const MeStmt &end); + bool HasCallAfterStmt(const MeStmt &stmt) const; + bool HasCallBeforeStmt(const MeStmt &stmt) const; + bool HasCallBetweenStmt(const MeStmt &start, const MeStmt &end) const; bool IsBackEdgeDest(const BB &bb); bool HasCallInBB(const BB &bb); diff --git a/src/mapleall/maple_me/include/me_hdse.h b/src/mapleall/maple_me/include/me_hdse.h index 59fb2426568c7dbd67f661b0e30a0037e0ced369..79193239a0660225b3ea84221340865e2c91b108 100644 --- a/src/mapleall/maple_me/include/me_hdse.h +++ b/src/mapleall/maple_me/include/me_hdse.h @@ -31,7 +31,7 @@ class MeHDSE : public HDSE { dom, pdom, map, aliasClass, enabledDebug, MeOption::decoupleStatic), func(f) {} - virtual ~MeHDSE() = default; + ~MeHDSE() override = default; void BackwardSubstitution(); std::string PhaseName() const { return "hdse"; diff --git a/src/mapleall/maple_me/include/me_inequality_graph.h b/src/mapleall/maple_me/include/me_inequality_graph.h index 34f92e646779c94529eb83b57f386c3bb94f7296..8df35687b3e34938686a9570204580bbe179cb73 100644 --- a/src/mapleall/maple_me/include/me_inequality_graph.h +++ b/src/mapleall/maple_me/include/me_inequality_graph.h @@ -245,19 +245,19 @@ class ESSABaseNode { class ESSAVarNode : public ESSABaseNode { public: ESSAVarNode(int64 i, MeExpr &e) : ESSABaseNode(i, &e, kVarNode) {} - ~ESSAVarNode() = default; + ~ESSAVarNode() override = default; }; class ESSAConstNode : public ESSABaseNode { public: ESSAConstNode(int64 i, int64 v) : ESSABaseNode(i, nullptr, kConstNode), value(v) {} - ~ESSAConstNode() = default; + ~ESSAConstNode() override = default; int64 GetValue() const { return value; } - virtual std::string GetExprID() const override { + std::string GetExprID() const override { return std::to_string(GetValue()) + " Const"; } @@ -268,13 +268,13 @@ class ESSAConstNode : public ESSABaseNode { class ESSAArrayNode : public ESSABaseNode { public: ESSAArrayNode(int64 i, MeExpr &e) : ESSABaseNode(i, &e, kArrayNode) {} - ~ESSAArrayNode() = default; + ~ESSAArrayNode() override = default; }; class ESSAPhiNode : public ESSABaseNode { public: ESSAPhiNode(int64 i, MeExpr &e) : ESSABaseNode(i, &e, kPhiNode) {} - ~ESSAPhiNode() = default; + ~ESSAPhiNode() override = default; const std::vector &GetPhiOpnds() const { return phiOpnds; @@ -318,8 +318,8 @@ class InequalityGraph { ESSAPhiNode *GetOrCreatePhiNode(MePhiNode &phiNode); ESSAArrayNode *GetOrCreateArrayNode(MeExpr &meExpr); InequalEdge *AddEdge(ESSABaseNode &from, ESSABaseNode &to, int64 value, EdgeType type) const; - void AddPhiEdge(ESSABaseNode &from, ESSABaseNode &to, EdgeType type); - void AddEdge(ESSABaseNode &from, ESSABaseNode &to, MeExpr &value, bool positive, EdgeType type); + void AddPhiEdge(ESSABaseNode &from, ESSABaseNode &to, EdgeType type) const; + void AddEdge(ESSABaseNode &from, ESSABaseNode &to, MeExpr &value, bool positive, EdgeType type) const; void ConnectTrivalEdge(); void DumpDotFile(DumpType dumpType) const; ESSABaseNode &GetNode(const MeExpr &meExpr); diff --git a/src/mapleall/maple_me/include/me_ir.h b/src/mapleall/maple_me/include/me_ir.h index caae3a15376a611a4ca484dd81b8bff45c751e5b..01f382433ea8213e5236b0f74a70fa9b0a740382 100644 --- a/src/mapleall/maple_me/include/me_ir.h +++ b/src/mapleall/maple_me/include/me_ir.h @@ -140,7 +140,7 @@ class MeExpr { bool ContainsVolatile() const; - bool IsTheSameWorkcand(const MeExpr&) const; + bool IsTheSameWorkcand(const MeExpr &expr) const; virtual void SetDefByStmt(MeStmt&) {} virtual MeExpr *GetOpnd(size_t) const { @@ -166,7 +166,7 @@ class MeExpr { return !kOpcodeInfo.NotPure(op); } - virtual bool IsSameVariableValue(const VarMeExpr&) const; + virtual bool IsSameVariableValue(const VarMeExpr &expr) const; MeExpr *ResolveMeExprValue(); bool CouldThrowException() const; bool IsAllOpndsIdentical(const MeExpr &meExpr) const; @@ -221,14 +221,14 @@ class ScalarMeExpr : public MeExpr { def.defStmt = nullptr; } - ~ScalarMeExpr() = default; + ~ScalarMeExpr() override = default; bool IsIdentical(MeExpr&) const { CHECK_FATAL(false, "ScalarMeExpr::IsIdentical() should not be called"); return true; } - bool IsUseSameSymbol(const MeExpr&) const override; + bool IsUseSameSymbol(const MeExpr &expr) const override; void SetDefByStmt(MeStmt &defStmt) override { defBy = kDefByStmt; @@ -345,9 +345,9 @@ class ScalarMeExpr : public MeExpr { MeStmt *GetDefByMeStmt() const; BB *GetDefByBBMeStmt(const MeCFG &cfg, MeStmtPtr &defMeStmt) const; - void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - bool IsSameVariableValue(const VarMeExpr&) const override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + bool IsSameVariableValue(const VarMeExpr &expr) const override; ScalarMeExpr *FindDefByStmt(std::set &visited); bool IsZeroVersion() const; @@ -371,16 +371,16 @@ class VarMeExpr final : public ScalarMeExpr { VarMeExpr(int32 exprid, OriginalSt *ost, size_t vidx, PrimType ptyp) : ScalarMeExpr(exprid, ost, vidx, kMeOpVar, OP_dread, ptyp) {} - ~VarMeExpr() = default; + ~VarMeExpr() override = default; - void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; bool IsValidVerIdx() const; bool IsVolatile() const override; // indicate if the variable is local variable but not a function formal variable - bool IsPureLocal(const MIRFunction&) const; - bool IsSameVariableValue(const VarMeExpr&) const override; + bool IsPureLocal(const MIRFunction &irFunc) const; + bool IsSameVariableValue(const VarMeExpr &expr) const override; VarMeExpr &ResolveVarMeValue(); bool PointsToStringLiteral(); @@ -534,7 +534,7 @@ class ConstMeExpr : public MeExpr { ConstMeExpr(int32 exprID, MIRConst *constValParam, PrimType t) : MeExpr(exprID, kMeOpConst, OP_constval, t, 0), constVal(constValParam) {} - ~ConstMeExpr() = default; + ~ConstMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; BaseNode &EmitExpr(MapleAllocator &alloc) override; @@ -560,14 +560,20 @@ class ConstMeExpr : public MeExpr { return constVal; } - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; uint32 GetHashIndex() const override { CHECK_FATAL(constVal != nullptr, "constVal is null"); if (constVal->GetKind() == kConstInt) { auto *intConst = safe_cast(constVal); CHECK_NULL_FATAL(intConst); - return static_cast(intConst->GetExtValue()); + const IntVal &intValConst = intConst->GetValue(); + if (!intValConst.IsOneSignificantWord()) { + const uint64 *val = intValConst.GetRawData(); + return std::hash{}(val[0]) ^ std::hash{}(val[1]); + } else { + return static_cast(intValConst.GetExtValue()); + } } if (constVal->GetKind() == kConstFloatConst) { auto *floatConst = safe_cast(constVal); @@ -602,11 +608,11 @@ class ConststrMeExpr : public MeExpr { ConststrMeExpr(int32 exprID, UStrIdx idx, PrimType t) : MeExpr(exprID, kMeOpConststr, OP_conststr, t, 0), strIdx(idx) {} - ~ConststrMeExpr() = default; + ~ConststrMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; UStrIdx GetStrIdx() const { return strIdx; @@ -626,11 +632,11 @@ class Conststr16MeExpr : public MeExpr { Conststr16MeExpr(int32 exprID, U16StrIdx idx, PrimType t) : MeExpr(exprID, kMeOpConststr16, OP_conststr16, t, 0), strIdx(idx) {} - ~Conststr16MeExpr() = default; + ~Conststr16MeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; U16StrIdx GetStrIdx() { return strIdx; @@ -650,11 +656,11 @@ class SizeoftypeMeExpr : public MeExpr { SizeoftypeMeExpr(int32 exprid, PrimType t, TyIdx idx) : MeExpr(exprid, kMeOpSizeoftype, OP_sizeoftype, t, 0), tyIdx(idx) {} - ~SizeoftypeMeExpr() = default; + ~SizeoftypeMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; TyIdx GetTyIdx() const { return tyIdx; @@ -674,10 +680,10 @@ class FieldsDistMeExpr : public MeExpr { FieldsDistMeExpr(int32 exprid, PrimType t, TyIdx idx, FieldID f1, FieldID f2) : MeExpr(exprid, kMeOpFieldsDist, OP_fieldsdist, t, 0), tyIdx(idx), fieldID1(f1), fieldID2(f2) {} - ~FieldsDistMeExpr() = default; + ~FieldsDistMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; TyIdx GetTyIdx() const { return tyIdx; @@ -709,12 +715,12 @@ class AddrofMeExpr : public MeExpr { AddrofMeExpr(int32 exprid, PrimType t, OriginalSt *ost) : MeExpr(exprid, kMeOpAddrof, OP_addrof, t, 0), ost(ost) {} - ~AddrofMeExpr() = default; + ~AddrofMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - bool IsUseSameSymbol(const MeExpr&) const override; + bool IsUseSameSymbol(const MeExpr &expr) const override; BaseNode &EmitExpr(MapleAllocator &alloc) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; OStIdx GetOstIdx() const { return ost->GetIndex(); @@ -744,11 +750,11 @@ class AddroffuncMeExpr : public MeExpr { AddroffuncMeExpr(int32 exprID, PUIdx puIdx) : MeExpr(exprID, kMeOpAddroffunc, OP_addroffunc, PTY_ptr, 0), puIdx(puIdx) {} - ~AddroffuncMeExpr() = default; + ~AddroffuncMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; BaseNode &EmitExpr(MapleAllocator &alloc) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; PUIdx GetPuIdx() const { return puIdx; @@ -777,7 +783,7 @@ class AddroflabelMeExpr : public MeExpr { ~AddroflabelMeExpr() override {} - void Dump(const IRMap *, int32 indent = 0) const override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; bool IsIdentical(const MeExpr *meexpr) const { if (meexpr->GetOp() != GetOp()) { return false; @@ -788,7 +794,7 @@ class AddroflabelMeExpr : public MeExpr { } return true; } - BaseNode &EmitExpr(MapleAllocator&) override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; uint32 GetHashIndex() const override { @@ -802,10 +808,10 @@ class GcmallocMeExpr : public MeExpr { GcmallocMeExpr(int32 exprid, Opcode o, PrimType t, TyIdx tyid) : MeExpr(exprid, kMeOpGcmalloc, o, t, 0), tyIdx(tyid) {} - ~GcmallocMeExpr() = default; + ~GcmallocMeExpr() override = default; void Dump(const IRMap*, int32 indent = 0) const override; - BaseNode &EmitExpr(MapleAllocator&) override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; TyIdx GetTyIdx() const { return tyIdx; @@ -834,7 +840,7 @@ class OpMeExpr : public MeExpr { // binary OpMeExpr(int32 exprID, Opcode o, PrimType t, MeExpr *opnd0, MeExpr *opnd1, bool order = true) : MeExpr(exprID, kMeOpOp, o, t, 2), tyIdx(TyIdx(0)) { - if (order == true) { + if (order) { SetOpndCheck(0, opnd0); SetOpndCheck(1, opnd1); } else { @@ -855,18 +861,18 @@ class OpMeExpr : public MeExpr { fieldID(opMeExpr.fieldID), hasAddressValue(opMeExpr.hasAddressValue) {} - ~OpMeExpr() = default; + ~OpMeExpr() override = default; OpMeExpr(const OpMeExpr&) = delete; OpMeExpr &operator=(const OpMeExpr&) = delete; - bool IsIdentical(const OpMeExpr &meexpr) const; + bool IsIdentical(const OpMeExpr &meExpr) const; bool IsAllOpndsIdentical(const OpMeExpr &meExpr) const; bool IsCompareIdentical(const OpMeExpr &meExpr) const; - void Dump(const IRMap*, int32 indent = 0) const override; - bool IsUseSameSymbol(const MeExpr&) const override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; - BaseNode &EmitExpr(MapleAllocator&) override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; + bool IsUseSameSymbol(const MeExpr &expr) const override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; uint8 GetDepth() const override { return depth; } @@ -1005,17 +1011,17 @@ class IvarMeExpr : public MeExpr { IvarMeExpr() = delete; // Disable default ctor - ~IvarMeExpr() = default; + ~IvarMeExpr() override = default; - void Dump(const IRMap*, int32 indent = 0) const override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; uint8 GetDepth() const override { return base->GetDepth() + 1; } - BaseNode &EmitExpr(MapleAllocator&) override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; bool IsVolatile() const override; bool IsFinal(); bool IsRCWeak() const; - bool IsUseSameSymbol(const MeExpr&) const override; + bool IsUseSameSymbol(const MeExpr &expr) const override; bool IsIdentical(IvarMeExpr &expr, bool inConstructor) const; bool IsMuListIdentical(IvarMeExpr &expr) const; MeExpr *GetIdenticalExpr(MeExpr &expr, bool inConstructor) const override; @@ -1134,6 +1140,10 @@ class IvarMeExpr : public MeExpr { } } + void Push2MuList(ScalarMeExpr &expr) { + muList.push_back(&expr); + } + uint32 GetMuCount() const { return static_cast(muList.size()); } @@ -1195,16 +1205,16 @@ class NaryMeExpr : public MeExpr { } } - ~NaryMeExpr() = default; + ~NaryMeExpr() override = default; - void Dump(const IRMap*, int32 indent = 0) const override; + void Dump(const IRMap *irMap, int32 indent = 0) const override; uint8 GetDepth() const override { return depth; } - bool IsIdentical(const NaryMeExpr&) const; - bool IsUseSameSymbol(const MeExpr&) const override; - BaseNode &EmitExpr(MapleAllocator&) override; - MeExpr *GetIdenticalExpr(MeExpr &expr, bool) const override; + bool IsIdentical(const NaryMeExpr& meExpr) const; + bool IsUseSameSymbol(const MeExpr &expr) const override; + BaseNode &EmitExpr(MapleAllocator &alloc) override; + MeExpr *GetIdenticalExpr(MeExpr &expr, bool isConstructor) const override; MeExpr *GetOpnd(size_t idx) const override { ASSERT(idx < opnds.size(), "NaryMeExpr operand out of bounds"); return opnds[idx]; @@ -1306,7 +1316,7 @@ class MeStmt { isLive = value; } - virtual void Dump(const IRMap*) const; + virtual void Dump(const IRMap *irMap) const; MeStmt *GetNextMeStmt() const; virtual size_t NumMeStmtOpnds() const { return 0; @@ -1372,7 +1382,7 @@ class MeStmt { stmtAttrs = meStmt.stmtAttrs; } - bool IsTheSameWorkcand(const MeStmt&) const; + bool IsTheSameWorkcand(const MeStmt& mestmt) const; virtual bool NeedDecref() const { return false; } @@ -1524,6 +1534,14 @@ class MeStmt { this->stmtAttrs.AppendAttr(stmtAttr.GetTargetAttrFlag(STMTATTR_insaferegion)); } + void SetMayTailcall() { + stmtAttrs.SetAttr(STMTATTR_mayTailcall); + } + + bool GetMayTailCall() const { + return stmtAttrs.GetAttr(STMTATTR_mayTailcall); + } + const StmtAttrs &GetStmtAttr() const { return stmtAttrs; } @@ -1671,7 +1689,7 @@ class PiassignMeStmt : public MeStmt { explicit PiassignMeStmt(MapleAllocator*) : MeStmt(OP_piassign) { } - ~PiassignMeStmt() = default; + ~PiassignMeStmt() override = default; void SetLHS(VarMeExpr &l) { lhs = &l; @@ -1705,7 +1723,7 @@ class PiassignMeStmt : public MeStmt { return isToken; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; private: VarMeExpr *rhs = nullptr; @@ -1720,7 +1738,7 @@ class AssignMeStmt : public MeStmt { AssignMeStmt(Opcode op, ScalarMeExpr *theLhs, MeExpr *rhsVal) : MeStmt(op), rhs(rhsVal), lhs(theLhs) {} - ~AssignMeStmt() = default; + ~AssignMeStmt() override = default; size_t NumMeStmtOpnds() const override { return kOperandNumUnary; @@ -1734,13 +1752,13 @@ class AssignMeStmt : public MeStmt { rhs = val; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; bool NeedIncref() const override { return needIncref; } - void SetNeedIncref(bool value = true) override { + void SetNeedIncref(bool value) override { needIncref = value; } @@ -1815,7 +1833,7 @@ class DassignMeStmt : public AssignMeStmt { : AssignMeStmt(dass->GetOp(), dass->GetLHS(), dass->GetRHS()), chiList(std::less(), alloc->Adapter()) {} - ~DassignMeStmt() = default; + ~DassignMeStmt() override = default; const MapleMap *GetChiList() const override { return &chiList; @@ -1845,7 +1863,7 @@ class DassignMeStmt : public AssignMeStmt { wasMayDassign = value; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; ScalarMeExpr *GetVarLHS() const override { return static_cast(lhs); @@ -1893,7 +1911,7 @@ class MaydassignMeStmt : public MeStmt { fieldID(maydass.GetFieldID()), chiList(std::less(), alloc->Adapter()), needDecref(maydass.NeedDecref()), needIncref(maydass.NeedIncref()) {} - ~MaydassignMeStmt() = default; + ~MaydassignMeStmt() override = default; size_t NumMeStmtOpnds() const override { return kOperandNumUnary; @@ -1935,7 +1953,7 @@ class MaydassignMeStmt : public MeStmt { return needIncref; } - void SetNeedIncref(bool val = true) override { + void SetNeedIncref(bool val) override { needIncref = val; } @@ -1967,7 +1985,7 @@ class MaydassignMeStmt : public MeStmt { fieldID = fieldIDVal; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; ScalarMeExpr *GetLHS() const override { return chiList.find(mayDSSym->GetIndex())->second->GetLHS(); } @@ -2024,7 +2042,7 @@ class IassignMeStmt : public MeStmt { l.SetDefStmt(this); } - ~IassignMeStmt() = default; + ~IassignMeStmt() override = default; TyIdx GetTyIdx() const { return tyIdx; @@ -2079,7 +2097,7 @@ class IassignMeStmt : public MeStmt { return needIncref; } - void SetNeedIncref(bool val = true) override { + void SetNeedIncref(bool val) override { needIncref = val; } @@ -2091,7 +2109,7 @@ class IassignMeStmt : public MeStmt { needIncref = false; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MeExpr *GetRHS() const override { return rhs; } @@ -2159,7 +2177,7 @@ class NaryMeStmt : public MeStmt { } } - virtual ~NaryMeStmt() = default; + ~NaryMeStmt() override = default; size_t NumMeStmtOpnds() const override { return opnds.size(); @@ -2202,8 +2220,8 @@ class NaryMeStmt : public MeStmt { (void)opnds.insert(begin, expr); } - void DumpOpnds(const IRMap*) const; - void Dump(const IRMap*) const override; + void DumpOpnds(const IRMap *irMap) const; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return nullptr; } @@ -2286,7 +2304,7 @@ class CallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { AssignedPart(alloc, this, cstmt->mustDefList), puIdx(cstmt->GetPUIdx()) {} - virtual ~CallMeStmt() = default; + ~CallMeStmt() override = default; PUIdx GetPUIdx() const { return puIdx; @@ -2302,7 +2320,7 @@ class CallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { return stmtID; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } @@ -2315,7 +2333,7 @@ class CallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { return &chiList; } - void SetChiListAndUpdateBase(MapleMap &list) { + void SetChiListAndUpdateBase(const MapleMap &list) { chiList = list; for (auto &chiNode : chiList) { chiNode.second->SetBase(this); @@ -2330,7 +2348,7 @@ class CallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { return &mustDefList; } - void SetMustDefListAndUpdateBase(MapleVector &list) { + void SetMustDefListAndUpdateBase(const MapleVector &list) { mustDefList = list; for (auto &mustDef : mustDefList) { mustDef.SetBase(this); @@ -2397,7 +2415,7 @@ class CallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { MIRFunction &GetTargetFunction(); StmtNode &EmitStmt(MapleAllocator &alloc) override; - void SetCallReturn(ScalarMeExpr&); + void SetCallReturn(ScalarMeExpr& curexpr); private: PUIdx puIdx = 0; @@ -2432,9 +2450,9 @@ class IcallMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { retTyIdx(idx), stmtID(id) {} - virtual ~IcallMeStmt() = default; + ~IcallMeStmt() override = default; - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } @@ -2556,9 +2574,9 @@ class IntrinsiccallMeStmt : public NaryMeStmt, public MuChiMePart, public Assign tyIdx(idx), retPType(type) {} - virtual ~IntrinsiccallMeStmt() = default; + ~IntrinsiccallMeStmt() override = default; - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } @@ -2687,8 +2705,8 @@ class AsmMeStmt : public NaryMeStmt, public MuChiMePart, public AssignedPart { gotoLabels = stt->gotoLabels; } - virtual ~AsmMeStmt() = default; - void Dump(const IRMap*) const override; + ~AsmMeStmt() override = default; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } @@ -2721,9 +2739,9 @@ class RetMeStmt : public NaryMeStmt { RetMeStmt(MapleAllocator *alloc, const StmtNode *stt) : NaryMeStmt(alloc, stt), muList(std::less(), alloc->Adapter()) {} - ~RetMeStmt() = default; + ~RetMeStmt() override = default; - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } @@ -2741,7 +2759,9 @@ class UnaryMeStmt : public MeStmt { explicit UnaryMeStmt(const UnaryMeStmt *umestmt) : MeStmt(umestmt->GetOp()), opnd(umestmt->opnd) {} - virtual ~UnaryMeStmt() = default; + ~UnaryMeStmt() override = default; + + UnaryMeStmt(const UnaryMeStmt &other) = default; size_t NumMeStmtOpnds() const override { return kOperandNumUnary; @@ -2763,22 +2783,27 @@ class UnaryMeStmt : public MeStmt { opnd = val; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; StmtNode &EmitStmt(MapleAllocator &alloc) override; private: MeExpr *opnd = nullptr; + UnaryMeStmt &operator=(const UnaryMeStmt &other) = default; }; class SafetyCallCheckMeStmt { public: SafetyCallCheckMeStmt(GStrIdx funcNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) : funcNameIdx(funcNameIdx), paramIndex(paramIndex), stmtFuncNameIdx(stmtFuncNameIdx) {} - explicit SafetyCallCheckMeStmt(const SafetyCallCheckMeStmt& stmt) + + // for this copy constructor, no need to add explicit + SafetyCallCheckMeStmt(const SafetyCallCheckMeStmt& stmt) : funcNameIdx(stmt.GetFuncNameIdx()), paramIndex(stmt.GetParamIndex()), stmtFuncNameIdx(stmt.GetStmtFuncNameIdx()) {} + SafetyCallCheckMeStmt& operator=(const SafetyCallCheckMeStmt&) = default; + virtual ~SafetyCallCheckMeStmt() = default; const std::string& GetFuncName() const { @@ -2822,8 +2847,16 @@ class SafetyCheckMeStmt { protected: explicit SafetyCheckMeStmt(GStrIdx funcNameIdx) : funcNameIdx(funcNameIdx) {} - explicit SafetyCheckMeStmt(const SafetyCheckMeStmt& stmt) + + SafetyCheckMeStmt(const SafetyCheckMeStmt& stmt) : funcNameIdx(stmt.GetFuncNameIdx()) {} + + SafetyCheckMeStmt& operator=(const SafetyCheckMeStmt& stmt) { + if (this != &stmt) { + funcNameIdx = stmt.GetFuncNameIdx(); + } + return *this; + } SafetyCheckMeStmt() {} private: @@ -2836,10 +2869,8 @@ class AssertNonnullMeStmt : public UnaryMeStmt, public SafetyCheckMeStmt { : UnaryMeStmt(stt), SafetyCheckMeStmt(stt->GetFuncNameIdx()) {} explicit AssertNonnullMeStmt(const UnaryStmtNode *stt) : UnaryMeStmt(stt), SafetyCheckMeStmt() {} - explicit AssertNonnullMeStmt(const AssertNonnullMeStmt &stt) - : UnaryMeStmt(&stt), SafetyCheckMeStmt(static_cast(stt)) {} - ~AssertNonnullMeStmt() = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + ~AssertNonnullMeStmt() override = default; + StmtNode &EmitStmt(MapleAllocator &alloc) override; }; class CallAssertNonnullMeStmt : public UnaryMeStmt, public SafetyCallCheckMeStmt { @@ -2853,8 +2884,8 @@ class CallAssertNonnullMeStmt : public UnaryMeStmt, public SafetyCallCheckMeStmt : UnaryMeStmt(stt), SafetyCallCheckMeStmt(static_cast(stt)) {} explicit CallAssertNonnullMeStmt(const CallAssertNonnullMeStmt *stt) : UnaryMeStmt(*stt), SafetyCallCheckMeStmt(static_cast(*stt)) {} - ~CallAssertNonnullMeStmt() = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + ~CallAssertNonnullMeStmt() override = default; + StmtNode &EmitStmt(MapleAllocator &alloc) override; }; class AssertBoundaryMeStmt : public NaryMeStmt, public SafetyCheckMeStmt { @@ -2864,7 +2895,7 @@ class AssertBoundaryMeStmt : public NaryMeStmt, public SafetyCheckMeStmt { AssertBoundaryMeStmt(MapleAllocator *alloc, const AssertBoundaryMeStmt &stt) : NaryMeStmt(alloc, static_cast(&stt)), SafetyCheckMeStmt(static_cast(stt)) {} - ~AssertBoundaryMeStmt() = default; + ~AssertBoundaryMeStmt() override = default; StmtNode &EmitStmt(MapleAllocator &alloc) override; }; @@ -2876,7 +2907,7 @@ class CallAssertBoundaryMeStmt : public NaryMeStmt, public SafetyCallCheckMeStmt CallAssertBoundaryMeStmt(MapleAllocator *alloc, const CallAssertBoundaryMeStmt &stt) : NaryMeStmt(alloc, static_cast(&stt)), SafetyCallCheckMeStmt(static_cast(stt)) {} - ~CallAssertBoundaryMeStmt() = default; + ~CallAssertBoundaryMeStmt() override = default; StmtNode &EmitStmt(MapleAllocator &alloc) override; }; @@ -2884,9 +2915,11 @@ class GotoMeStmt : public MeStmt { public: explicit GotoMeStmt(const StmtNode *stt) : MeStmt(stt), offset(static_cast(stt)->GetOffset()) {} explicit GotoMeStmt(const GotoMeStmt &condGoto) : MeStmt(MeStmt(condGoto.GetOp())), offset(condGoto.GetOffset()) {} + GotoMeStmt& operator=(const GotoMeStmt &condGoto) = default; + explicit GotoMeStmt(uint32 o) : MeStmt(OP_goto), offset(o) {} - ~GotoMeStmt() = default; + ~GotoMeStmt() override = default; uint32 GetOffset() const { return offset; @@ -2896,7 +2929,7 @@ class GotoMeStmt : public MeStmt { offset = o; } - StmtNode &EmitStmt(MapleAllocator &alloc); + StmtNode &EmitStmt(MapleAllocator &alloc) override; private: uint32 offset; // the label @@ -2915,7 +2948,7 @@ class CondGotoMeStmt : public UnaryMeStmt { CondGotoMeStmt(const UnaryMeStmt &unaryMeStmt, uint32 o) : UnaryMeStmt(&unaryMeStmt), offset(o) {} - ~CondGotoMeStmt() = default; + ~CondGotoMeStmt() override = default; uint32 GetOffset() const { return offset; @@ -2949,7 +2982,7 @@ class CondGotoMeStmt : public UnaryMeStmt { } } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; StmtNode &EmitStmt(MapleAllocator &alloc) override; private: @@ -2964,9 +2997,9 @@ class JsTryMeStmt : public MeStmt { catchOffset(static_cast(stt)->GetCatchOffset()), finallyOffset(static_cast(stt)->GetFinallyOffset()) {} - ~JsTryMeStmt() = default; + ~JsTryMeStmt() override = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + StmtNode &EmitStmt(MapleAllocator &alloc) override; private: uint16 catchOffset; @@ -2977,7 +3010,7 @@ class TryMeStmt : public MeStmt { public: TryMeStmt(MapleAllocator *alloc, const StmtNode *stt) : MeStmt(stt), offsets(alloc->Adapter()) {} - ~TryMeStmt() = default; + ~TryMeStmt() override = default; void OffsetsPushBack(LabelIdx curr) { offsets.push_back(curr); @@ -2987,7 +3020,7 @@ class TryMeStmt : public MeStmt { return offsets; } - StmtNode &EmitStmt(MapleAllocator &alloc); + StmtNode &EmitStmt(MapleAllocator &alloc) override; private: MapleVector offsets; @@ -3001,9 +3034,9 @@ class CatchMeStmt : public MeStmt { } } - ~CatchMeStmt() = default; + ~CatchMeStmt() override = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + StmtNode &EmitStmt(MapleAllocator &alloc) override; const MapleVector &GetExceptionTyIdxVec() const { return exceptionTyIdxVec; } @@ -3019,8 +3052,8 @@ class CppCatchMeStmt : public MeStmt { (void)alloc; } - ~CppCatchMeStmt() = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + ~CppCatchMeStmt() override = default; + StmtNode &EmitStmt(MapleAllocator &alloc) override; }; class SwitchMeStmt : public UnaryMeStmt { @@ -3032,7 +3065,7 @@ class SwitchMeStmt : public UnaryMeStmt { switchTable = static_cast(stt)->GetSwitchTable(); } - ~SwitchMeStmt() = default; + ~SwitchMeStmt() override = default; LabelIdx GetDefaultLabel() const { return defaultLabel; @@ -3054,7 +3087,7 @@ class SwitchMeStmt : public UnaryMeStmt { return switchTable; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; StmtNode &EmitStmt(MapleAllocator &alloc) override; private: @@ -3068,9 +3101,9 @@ class CommentMeStmt : public MeStmt { comment = static_cast(stt)->GetComment(); } - ~CommentMeStmt() = default; + ~CommentMeStmt() override = default; - StmtNode &EmitStmt(MapleAllocator &alloc); + StmtNode &EmitStmt(MapleAllocator &alloc) override; const MapleString& GetComment() { return comment; } const MapleString& GetComment() const { return comment; } private: @@ -3082,7 +3115,7 @@ class WithMuMeStmt : public MeStmt { WithMuMeStmt(MapleAllocator *alloc, const StmtNode *stt) : MeStmt(stt), muList(std::less(), alloc->Adapter()) {} - virtual ~WithMuMeStmt() = default; + ~WithMuMeStmt() override = default; MapleMap *GetMuList() override { return &muList; @@ -3101,9 +3134,9 @@ class GosubMeStmt : public WithMuMeStmt { GosubMeStmt(MapleAllocator *alloc, const StmtNode *stt) : WithMuMeStmt(alloc, stt), offset(static_cast(stt)->GetOffset()) {} - ~GosubMeStmt() = default; + ~GosubMeStmt() override = default; - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; StmtNode &EmitStmt(MapleAllocator &alloc) override; private: @@ -3114,7 +3147,7 @@ class ThrowMeStmt : public WithMuMeStmt { public: ThrowMeStmt(MapleAllocator *alloc, const StmtNode *stt) : WithMuMeStmt(alloc, stt) {} - ~ThrowMeStmt() = default; + ~ThrowMeStmt() override = default; size_t NumMeStmtOpnds() const override { return kOperandNumUnary; @@ -3136,7 +3169,7 @@ class ThrowMeStmt : public WithMuMeStmt { opnd = val; } - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; StmtNode &EmitStmt(MapleAllocator &alloc) override; private: @@ -3147,9 +3180,9 @@ class SyncMeStmt : public NaryMeStmt, public MuChiMePart { public: SyncMeStmt(MapleAllocator *alloc, const StmtNode *stt) : NaryMeStmt(alloc, stt), MuChiMePart(alloc) {} - ~SyncMeStmt() = default; + ~SyncMeStmt() override = default; - void Dump(const IRMap*) const override; + void Dump(const IRMap *irMap) const override; MapleMap *GetMuList() override { return &muList; } diff --git a/src/mapleall/maple_me/include/me_irmap.h b/src/mapleall/maple_me/include/me_irmap.h index ffd3da8c9cd30b9a5cac66aea502206d4e835a18..a2bc6f510cc4a0cd780bd533ce58d2a520527c76 100644 --- a/src/mapleall/maple_me/include/me_irmap.h +++ b/src/mapleall/maple_me/include/me_irmap.h @@ -29,7 +29,7 @@ class MeIRMap : public IRMap { SetDumpStmtNum(MeOption::stmtNum); } - ~MeIRMap() = default; + ~MeIRMap() override = default; BB *GetBB(BBId id) override { return cfg->GetBBFromID(id); diff --git a/src/mapleall/maple_me/include/me_jump_threading.h b/src/mapleall/maple_me/include/me_jump_threading.h index ebd95a4b2f7902d7994784fcbc2b61581a76c110..035181ddd4d56cae5e3143941612ff7669aa8fc7 100644 --- a/src/mapleall/maple_me/include/me_jump_threading.h +++ b/src/mapleall/maple_me/include/me_jump_threading.h @@ -36,7 +36,9 @@ class JumpThreading { stmtCostAnalyzer(costAnalyzer), cands(candsTem) { path = std::make_unique>(); } - ~JumpThreading() = default; + ~JumpThreading() { + loops = nullptr; + } void Execute(); @@ -57,7 +59,7 @@ class JumpThreading { bool FindSubPathFromUseToDefOfExpr(BB &defBB, BB &useBB, std::vector &subPath, std::set &visited); bool CanJumpThreading(BB &bb, MeExpr &opnd0, MeExpr *opnd1 = nullptr); bool CanJumpThreadingWithSwitch(BB &bb, ValueRange *vrOfOpnd0); - bool CanJumpThreadingWithCondGoto(BB &bb, MeExpr *opnd1, ValueRange *vrOfOpnd0); + bool CanJumpThreadingWithCondGoto(BB &bb, MeExpr *opnd, ValueRange *vrOfOpnd0); void FindPathWhenDefPointInCurrBB(BB &defBB, BB &predBB, MeExpr &opnd0, MeExpr *opnd1 = nullptr); void FindPathWhenDefPointIsNotInCurrBB(BB &defBB, BB &useBB, MeExpr &opnd0, MeExpr *opnd1 = nullptr); void FindPathWhenDefByIsNotStmtAndPhi(BB &defBB, BB &predBB, CompareOpnds &cmpOpnds); diff --git a/src/mapleall/maple_me/include/me_loop_analysis.h b/src/mapleall/maple_me/include/me_loop_analysis.h index 7f45f814a79e880bd85223117977f449a801a5f7..7e7fbf7350db509b26f3f974bf49c4224a2537fd 100644 --- a/src/mapleall/maple_me/include/me_loop_analysis.h +++ b/src/mapleall/maple_me/include/me_loop_analysis.h @@ -131,7 +131,7 @@ class IdentifyLoops : public AnalysisResult { meLoops(meLoopAlloc.Adapter()), bbLoopParent(func.GetCfg()->GetAllBBs().size(), nullptr, meLoopAlloc.Adapter()) {} - virtual ~IdentifyLoops() = default; + ~IdentifyLoops() override = default; const MapleVector &GetMeLoops() const { return meLoops; @@ -150,10 +150,10 @@ class IdentifyLoops : public AnalysisResult { LoopDesc *CreateLoopDesc(BB &hd, BB &tail); void SetLoopParent4BB(const BB &bb, LoopDesc &loopDesc); - void SetExitBB(LoopDesc &loop); + void SetExitBB(LoopDesc &loop) const; void ProcessBB(BB *bb); void Dump() const; - void ProcessPreheaderAndLatch(LoopDesc &loop); + void ProcessPreheaderAndLatch(LoopDesc &loop) const; private: MemPool *meLoopMemPool; diff --git a/src/mapleall/maple_me/include/me_loop_inversion.h b/src/mapleall/maple_me/include/me_loop_inversion.h index 984b819e5d1b762399b665b100e5baff508230c5..33fa500859db8278a18bd58b48751550aa359e8b 100644 --- a/src/mapleall/maple_me/include/me_loop_inversion.h +++ b/src/mapleall/maple_me/include/me_loop_inversion.h @@ -31,7 +31,7 @@ class MeLoopInversion { private: using Key = std::pair; - void Convert(MeFunction &func, BB &bb, BB &pred, MapleMap &swapSuccs); + void Convert(MeFunction &func, BB &bb, BB &pred, MapleMap &swapSuccs) const; bool NeedConvert(MeFunction *func, BB &bb, BB &pred, MapleAllocator &localAlloc, MapleMap &swapSuccs) const; diff --git a/src/mapleall/maple_me/include/me_loop_unrolling.h b/src/mapleall/maple_me/include/me_loop_unrolling.h index 647a618ac2686911111567a16114dd3d4d1a696f..cd9ed2d18c40b38b9c638ce2c6b72445a91fb028 100644 --- a/src/mapleall/maple_me/include/me_loop_unrolling.h +++ b/src/mapleall/maple_me/include/me_loop_unrolling.h @@ -59,7 +59,7 @@ class LoopUnrolling { } private: - bool SplitCondGotoBB(); + bool SplitCondGotoBB() const; VarMeExpr *CreateIndVarOrTripCountWithName(const std::string &name); void RemoveCondGoto(); diff --git a/src/mapleall/maple_me/include/me_merge_stmts.h b/src/mapleall/maple_me/include/me_merge_stmts.h index 4807eadca6cf753a4708751158e1c5e5f7e97edd..bdac85bea3f7aa553abba550e749ac8fab2b527c 100644 --- a/src/mapleall/maple_me/include/me_merge_stmts.h +++ b/src/mapleall/maple_me/include/me_merge_stmts.h @@ -29,19 +29,19 @@ class MergeStmts { void MergeMeStmts(); private: - int32 GetStructFieldBitSize(const MIRStructType *structType, FieldID fieldID); - void mergeIassigns(vOffsetStmt& iassignCandidates); - void mergeDassigns(vOffsetStmt& dassignCandidates); - int32 GetPointedTypeBitSize(TyIdx ptrTypeIdx); - IassignMeStmt *genSimdIassign(int32 offset, IvarMeExpr iVar1, IvarMeExpr iVar2, + int32 GetStructFieldBitSize(const MIRStructType *structType, FieldID fieldID) const; + void MergeIassigns(vOffsetStmt& iassignCandidates); + void MergeDassigns(vOffsetStmt& dassignCandidates); + int32 GetPointedTypeBitSize(TyIdx ptrTypeIdx) const; + IassignMeStmt *GenSimdIassign(int32 offset, IvarMeExpr iVar1, IvarMeExpr iVar2, const MapleMap &stmtChi, TyIdx ptrTypeIdx); - IassignMeStmt *genSimdIassign(int32 offset, IvarMeExpr iVar, MeExpr &valMeExpr, + IassignMeStmt *GenSimdIassign(int32 offset, IvarMeExpr iVar, MeExpr &valMeExpr, const MapleMap &stmtChi, TyIdx ptrTypeIdx); void GenShortSet(MeExpr *dstMeExpr, uint32 offset, const MIRType *uXTgtMirType, RegMeExpr *srcRegMeExpr, IntrinsiccallMeStmt* memsetCallStmt, const MapleMap &memsetCallStmtChi); - void simdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt); - void simdMemset(IntrinsiccallMeStmt* memsetCallStmt); + void SimdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt); + void SimdMemset(IntrinsiccallMeStmt* memsetCallStmt); MeFunction &func; }; diff --git a/src/mapleall/maple_me/include/me_obj_size.h b/src/mapleall/maple_me/include/me_obj_size.h index 083a2c5229cafb954c6696c52e9a5f9233e07878..116df732560e46369951286fba805e01d2314773 100644 --- a/src/mapleall/maple_me/include/me_obj_size.h +++ b/src/mapleall/maple_me/include/me_obj_size.h @@ -51,8 +51,10 @@ class OBJSize { size_t DealWithSprintfAndVsprintf(const CallMeStmt &callMeStmt, const MIRFunction &calleeFunc); size_t DealWithArray(const MeExpr &opnd, int64 type) const; size_t DealWithAddrof(const MeExpr &opnd, bool getSizeOfWholeVar) const; - size_t DealWithDread(const MeExpr &opnd, int64 type, bool getMaxSizeOfObjs) const; + size_t DealWithDread(MeExpr &opnd, int64 type, bool getMaxSizeOfObjs) const; size_t DealWithIaddrof(const MeExpr &opnd, int64 type, bool getSizeOfWholeVar) const; + bool DealWithOpnd(const MeExpr &opnd, std::set &visitedPhi) const; + bool PhiOpndIsDefPointOfOtherPhi(MeExpr& expr, std::set& visitedPhi) const; MeFunction &func; MeIRMap &irMap; diff --git a/src/mapleall/maple_me/include/me_option.h b/src/mapleall/maple_me/include/me_option.h index 044f1e57baea9c78ea42357a8d5b4c86f3cc4639..c5f2f6be7dcd7190f25c726f3e48598b00e782de 100644 --- a/src/mapleall/maple_me/include/me_option.h +++ b/src/mapleall/maple_me/include/me_option.h @@ -188,6 +188,7 @@ class MeOption { static bool enableLFO; static uint8 rematLevel; static bool layoutWithPredict; + static bool layoutColdPath; static bool unifyRets; static bool dumpCfgOfPhases; static bool epreUseProfile; @@ -201,7 +202,7 @@ class MeOption { static SafetyCheckMode boundaryCheckMode; static bool safeRegionMode; // safety check option end -#if MIR_JAVA +#if defined(MIR_JAVA) && MIR_JAVA static std::string acquireFuncName; static std::string releaseFuncName; static unsigned int warningLevel; diff --git a/src/mapleall/maple_me/include/me_options.h b/src/mapleall/maple_me/include/me_options.h index d9085b896037fd2feb325537d036732b9b155608..633d608ec4032cddef3c1bfe69805f08ecdae316 100644 --- a/src/mapleall/maple_me/include/me_options.h +++ b/src/mapleall/maple_me/include/me_options.h @@ -58,7 +58,7 @@ extern maplecl::Option dumpAfter; extern maplecl::Option realcheckcast; extern maplecl::Option eprelimit; extern maplecl::Option eprepulimit; -extern maplecl::Option epreuseprofilelimit; +extern maplecl::Option epreUseProfileLimit; extern maplecl::Option stmtprepulimit; extern maplecl::Option lprelimit; extern maplecl::Option lprepulimit; @@ -126,6 +126,7 @@ extern maplecl::Option sinkPUlimit; extern maplecl::Option loopvec; extern maplecl::Option seqvec; extern maplecl::Option layoutwithpredict; +extern maplecl::Option layoutColdPath; extern maplecl::Option veclooplimit; extern maplecl::Option ivoptslimit; extern maplecl::Option acquireFunc; diff --git a/src/mapleall/maple_me/include/me_phase_manager.h b/src/mapleall/maple_me/include/me_phase_manager.h index 75cb9fd9b4fb5f7432c4f27fb8ec9e1d4c375b5b..1b9ecd88ab316ced27d7815870b99992753028a5 100644 --- a/src/mapleall/maple_me/include/me_phase_manager.h +++ b/src/mapleall/maple_me/include/me_phase_manager.h @@ -48,6 +48,7 @@ #include "me_ssa_lpre.h" #include "me_ssa_epre.h" #include "me_gvn.h" +#include "me_sra.h" #include "me_stmt_pre.h" #include "me_store_pre.h" #include "me_cond_based_rc.h" @@ -57,7 +58,6 @@ #include "me_subsum_rc.h" #include "me_predict.h" #include "me_side_effect.h" -#include "do_ipa_escape_analysis.h" #include "me_gc_lowering.h" #include "me_gc_write_barrier_opt.h" #include "preg_renamer.h" @@ -94,6 +94,7 @@ #include "lfo_unroll.h" #include "me_safety_warning.h" #include "me_sink.h" +#include "me_tailcall.h" namespace maple { using MeFuncOptTy = MapleFunctionPhase; diff --git a/src/mapleall/maple_me/include/me_placement_rc.h b/src/mapleall/maple_me/include/me_placement_rc.h index 08d843c7f344a70191004db0a086c3fa73383afe..4326fae4618a48454073f7e06e8a3a8286e53d8d 100644 --- a/src/mapleall/maple_me/include/me_placement_rc.h +++ b/src/mapleall/maple_me/include/me_placement_rc.h @@ -24,7 +24,7 @@ class PlacementRC : public MeSSUPre { placementRCTemp(nullptr), bbHasReal(f.GetCfg()->GetAllBBs().size(), false, spreAllocator.Adapter()) {} - virtual ~PlacementRC() = default; + ~PlacementRC() override = default; private: // Step 6 methods @@ -61,7 +61,7 @@ class PlacementRC : public MeSSUPre { void LookForRealOccOfStores(MeStmt &stmt, BB &bb); void LookForUseOccOfLocalRefVars(MeStmt &stmt); void TraverseStatementsBackwards(BB &bb); - void AddCleanupArg(); + void AddCleanupArg() const; MeStmt *GetDefStmt(BB &bb); void PerCandInit() override { diff --git a/src/mapleall/maple_me/include/me_predict.h b/src/mapleall/maple_me/include/me_predict.h index 422b6ccd3efe2d3b500fff372fbf9b5cf2fe40e6..ebbb5028488456e98ce7a3143232a20d2f21d6a0 100644 --- a/src/mapleall/maple_me/include/me_predict.h +++ b/src/mapleall/maple_me/include/me_predict.h @@ -88,7 +88,7 @@ class MePrediction : public AnalysisResult { backEdges(tmpAlloc.Adapter()), predictDebug(false) {} - virtual ~MePrediction() = default; + ~MePrediction() override = default; Edge *FindEdge(const BB &src, const BB &dest) const; bool IsBackEdge(const Edge &edge) const; Predictor ReturnPrediction(const MeExpr *meExpr, Prediction &prediction) const; @@ -99,7 +99,7 @@ class MePrediction : public AnalysisResult { void BBLevelPredictions(); void Init(); bool PredictedByLoopHeuristic(const BB &bb) const; - void SortLoops(); + void SortLoops() const; void PredictLoops(); void PredictByOpcode(BB *bb); void EstimateBBProb(BB &bb); diff --git a/src/mapleall/maple_me/include/me_profile_gen.h b/src/mapleall/maple_me/include/me_profile_gen.h index 207d080fa35bbf7f4695cb3424ff5d1169ad57b2..7cc96cb08982b0b98f19c90890f689759cec7ba5 100644 --- a/src/mapleall/maple_me/include/me_profile_gen.h +++ b/src/mapleall/maple_me/include/me_profile_gen.h @@ -36,7 +36,7 @@ class MeProfGen : public PGOInstrument { private: void Init(); void InstrumentBB(BB &bb); - void SaveProfile(); + void SaveProfile() const; MeFunction *func; MeIRMap *hMap; static uint64 counterIdx; diff --git a/src/mapleall/maple_me/include/me_profile_use.h b/src/mapleall/maple_me/include/me_profile_use.h index 66e6fb3754a3a3cfd2400c33c03c61b07f2e5f55..7bce3ac51e5f74f2b37c198747e53c92cd9c76ff 100644 --- a/src/mapleall/maple_me/include/me_profile_use.h +++ b/src/mapleall/maple_me/include/me_profile_use.h @@ -138,15 +138,15 @@ class MeProfUse : public PGOInstrument { return succCalcuAllEdgeFreq; } bool MapleProfRun(); - void CheckSumFail(const uint64 hash, const uint32 expectedCheckSum, const std::string &tag); + void CheckSumFail(const uint64 hash, const uint32 expectedCheckSum, const std::string &tag) const; private: bool IsAllZero(Profile::BBInfo &result) const; - void SetEdgeCount(BBUseEdge &edge, FreqType value); - void SetEdgeCount(MapleVector &edges, FreqType value); + void SetEdgeCount(BBUseEdge &edge, FreqType value) const; + void SetEdgeCount(MapleVector &edges, FreqType value) const; void ComputeEdgeFreq(); void InitBBEdgeInfo(); - void ComputeBBFreq(BBUseInfo &bbInfo, bool &changed); - FuncProfInfo *GetFuncData(); + void ComputeBBFreq(BBUseInfo &bbInfo, bool &changed) const; + FuncProfInfo *GetFuncData() const; FreqType SumEdgesCount(const MapleVector &edges) const; BBUseInfo *GetBBUseInfo(const BB &bb) const; diff --git a/src/mapleall/maple_me/include/me_prop.h b/src/mapleall/maple_me/include/me_prop.h index 45ed6ab6ae092bb160cce0cdc90ac555b7ed3f33..98370be8b47a6741f23f8a2ce33a692cb50308e3 100644 --- a/src/mapleall/maple_me/include/me_prop.h +++ b/src/mapleall/maple_me/include/me_prop.h @@ -26,7 +26,7 @@ class MeProp : public Prop { : Prop(irMap, dom, pdom, memPool, irMap.GetFunc().GetCfg()->GetAllBBs().size(), config, limit), func(&irMap.GetFunc()) {} - virtual ~MeProp() = default; + ~MeProp() override = default; private: MeFunction *func; diff --git a/src/mapleall/maple_me/include/me_rc_lowering.h b/src/mapleall/maple_me/include/me_rc_lowering.h index e319b2fc484e5a2faef82264ef9022749da3ff2f..8c91eaa169739d4ab92d20f33ac0d7a3249f14bd 100644 --- a/src/mapleall/maple_me/include/me_rc_lowering.h +++ b/src/mapleall/maple_me/include/me_rc_lowering.h @@ -55,7 +55,7 @@ class RCLowering { void TraverseAllStmts(BB &bb); void RestoreVersionRecords(std::map &savedStackSize); void UnmarkNotNeedDecRefOpnds(); - void EpreFixup(BB &bb); + void EpreFixup(BB &bb) const; void BBLower(BB &bb); void CreateCleanupIntrinsics(); void HandleArguments(); @@ -65,16 +65,16 @@ class RCLowering { void ReplaceDecResetWithDec(MeStmt &prevStmt, MeStmt &stmt); void CompactAdjacentDecReset(MeStmt &prevStmt, MeStmt &stmt); // create new symbol from name and return its ost - OriginalSt *RetrieveOSt(const std::string &name, bool isLocalrefvar) const; + OriginalSt *RetrieveOSt(const std::string &name, bool isLocalRefVar) const; // create new symbol from temp name and return its VarMeExpr // new symbols are stored in a set - VarMeExpr *CreateNewTmpVarMeExpr(bool isLocalrefvar); + VarMeExpr *CreateNewTmpVarMeExpr(bool isLocalRefVar); VarMeExpr *CreateVarMeExprFromSym(MIRSymbol &sym) const; // return true if the rhs is simple so we can adjust RC count before assignments - bool RCFirst(MeExpr &rhs); + bool RCFirst(MeExpr &rhs) const; IntrinsiccallMeStmt *GetVarRHSHandleStmt(const MeStmt &stmt); IntrinsiccallMeStmt *GetIvarRHSHandleStmt(const MeStmt &stmt); - MIRIntrinsicID PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID index = INTRN_UNDEFINED); + MIRIntrinsicID PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId = INTRN_UNDEFINED) const; IntrinsiccallMeStmt *CreateRCIntrinsic(MIRIntrinsicID intrnID, const MeStmt &stmt, std::vector &opnds, bool assigned = false); MeExpr *HandleIncRefAndDecRefStmt(MeStmt &stmt); @@ -103,7 +103,7 @@ class RCLowering { void HandlePerManent(MeStmt &stmt); bool HasCallOrBranch(const MeStmt &from, const MeStmt &to) const; MIRIntrinsicID SelectWriteBarrier(const MeStmt &stmt); - MIRType *GetArrayNodeType(const VarMeExpr &var); + MIRType *GetArrayNodeType(const VarMeExpr &var) const; void CheckArrayStore(IntrinsiccallMeStmt &writeRefCall); void FastLowerThrowStmt(MeStmt &stmt, MapleMap &exceptionAllocsites); void FastLowerRetStmt(MeStmt &stmt); diff --git a/src/mapleall/maple_me/include/me_rename2preg.h b/src/mapleall/maple_me/include/me_rename2preg.h index 6c894b0639b09a8905df7ee340fc23d46df79e87..58d21079afba805a6f0232d01e65d7a7d7c5f2d3 100644 --- a/src/mapleall/maple_me/include/me_rename2preg.h +++ b/src/mapleall/maple_me/include/me_rename2preg.h @@ -48,19 +48,19 @@ class SSARename2Preg { return aliasclass->GetAliasSet(ost->GetIndex()); } - void Rename2PregStmt(MeStmt *); - void Rename2PregExpr(MeStmt *, MeExpr *); - void Rename2PregLeafRHS(MeStmt *, const VarMeExpr *); + void Rename2PregStmt(MeStmt *stmt); + void Rename2PregExpr(MeStmt *mestmt, MeExpr *meexpr); + void Rename2PregLeafRHS(MeStmt *mestmt, const VarMeExpr *varmeexpr); void Rename2PregLeafLHS(MeStmt &mestmt, const VarMeExpr &varmeexpr); - RegMeExpr *CreatePregForVar(const VarMeExpr *varMeExpr); + RegMeExpr *CreatePregForVar(const VarMeExpr &varMeExpr); RegMeExpr *FindOrCreatePregForVarPhiOpnd(const VarMeExpr *varMeExpr); - bool Rename2PregPhi(MePhiNode *, MapleMap &); - void UpdateRegPhi(MePhiNode *, MePhiNode *, const VarMeExpr *); - void Rename2PregCallReturn(MapleVector &); + bool Rename2PregPhi(MePhiNode &mevarphinode, MapleMap ®PhiList); + void UpdateRegPhi(MePhiNode &mevarphinode, MePhiNode ®phinode, const VarMeExpr *lhs); + void Rename2PregCallReturn(MapleVector &mustdeflist); bool VarMeExprIsRenameCandidate(const VarMeExpr &varMeExpr) const; - RegMeExpr *RenameVar(const VarMeExpr *); + RegMeExpr *RenameVar(const VarMeExpr *varMeExpr); void UpdateMirFunctionFormal(); - void SetupParmUsed(const VarMeExpr *); + void SetupParmUsed(const VarMeExpr *varmeexpr); void Init(); void CollectUsedOst(const MeExpr *meExpr); void CollectDefUseInfoOfOst(); diff --git a/src/mapleall/maple_me/include/me_safety_warning.h b/src/mapleall/maple_me/include/me_safety_warning.h index 8e5f55273709176f86ce5d2f8098d075fcc5542f..a2c44ba63f14b2420a1c970e57ad91d74e35cd41 100644 --- a/src/mapleall/maple_me/include/me_safety_warning.h +++ b/src/mapleall/maple_me/include/me_safety_warning.h @@ -38,13 +38,13 @@ class MESafetyWarning : public MapleFunctionPhase { return createMP->New(createMP); } - bool PhaseRun(MeFunction &f) override; + bool PhaseRun(MeFunction &meFunction) override; std::string PhaseName() const override; private: void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; bool IsStaticModeForOp(Opcode op) const; - SafetyWarningHandler *FindHandler(Opcode op); + SafetyWarningHandler *FindHandler(Opcode op) const; SafetyWarningHandlers *realNpeHandleMap; SafetyWarningHandlers *realBoundaryHandleMap; diff --git a/src/mapleall/maple_me/include/me_scalar_analysis.h b/src/mapleall/maple_me/include/me_scalar_analysis.h index e04463738ba933ea0b058123e5cdce39ad0aaf29..1f345742a97f6e07d9f2123a3366fc2abb58d927 100644 --- a/src/mapleall/maple_me/include/me_scalar_analysis.h +++ b/src/mapleall/maple_me/include/me_scalar_analysis.h @@ -75,19 +75,19 @@ class CRNode { class CRUnKnownNode : public CRNode { public: explicit CRUnKnownNode(MeExpr *e) : CRNode(e, kCRUnKnown) {} - ~CRUnKnownNode() = default; + ~CRUnKnownNode() override = default; }; class CRConstNode : public CRNode { public: CRConstNode(MeExpr *e, int64 v) : CRNode(e, kCRConstNode), value(v) {} - ~CRConstNode() = default; + ~CRConstNode() override = default; int64 GetConstValue() const { return value; } - size_t SizeOfOperand() const { + size_t SizeOfOperand() const override { return 1; } @@ -98,9 +98,9 @@ class CRConstNode : public CRNode { class CRVarNode : public CRNode { public: explicit CRVarNode(MeExpr *e) : CRNode(e, kCRVarNode) {} - ~CRVarNode() = default; + ~CRVarNode() override = default; - size_t SizeOfOperand() const { + size_t SizeOfOperand() const override { return 1; } }; @@ -116,7 +116,7 @@ struct CRNodeComparator { class CRAddNode : public CRNode { public: explicit CRAddNode(MeExpr *e) : CRNode(e, kCRAddNode) {} - ~CRAddNode() = default; + ~CRAddNode() override = default; void SetOpnds(const std::vector &o) { opnds = o; @@ -149,7 +149,7 @@ class CRAddNode : public CRNode { class CRMulNode : public CRNode { public: explicit CRMulNode(MeExpr *e) : CRNode(e, kCRMulNode) {} - ~CRMulNode() = default; + ~CRMulNode() override = default; const CRNode *GetOpnd(size_t i) const { return opnds.at(i); @@ -186,7 +186,7 @@ class CRMulNode : public CRNode { class CRDivNode : public CRNode { public: CRDivNode(MeExpr *e, CRNode &l, CRNode &r) : CRNode(e, kCRDivNode), lhs(&l), rhs(&r) {} - ~CRDivNode() = default; + ~CRDivNode() override = default; const CRNode *GetLHS() const { return lhs; @@ -212,7 +212,7 @@ class CRDivNode : public CRNode { class CR : public CRNode { public: explicit CR(MeExpr *e) : CRNode(e, kCRNode) {} - ~CR() = default; + ~CR() override = default; void SetOpnds(const std::vector &o) { opnds = o; @@ -325,16 +325,16 @@ class LoopScalarAnalysisResult { CRNode *GetOrCreateCRDivNode(MeExpr *expr, CRNode &lhsCRNode, CRNode &rhsCRNode); CRNode *ComputeCRNodeWithOperator(MeExpr &expr, CRNode &lhsCRNode, CRNode &rhsCRNode, Opcode op); CRNode *CreateSimpleCRForPhi(const MePhiNode &phiNode, VarMeExpr &startExpr, const VarMeExpr &backEdgeExpr); - CRNode *CreateCRForPhi(MePhiNode &phiNode); + CRNode *CreateCRForPhi(const MePhiNode &phiNode); CRNode *GetOrCreateCRNode(MeExpr &expr); CRNode *DealWithMeOpOp(MeExpr &currOpMeExpr, MeExpr &expr); TripCountType ComputeTripCount(const MeFunction &func, uint64 &tripCountResult, CRNode *&conditionCRNode, CR *&itCR); - void PutTheAddrExprAtTheFirstOfVector(std::vector &crNodeOperands, const MeExpr &addrExpr); - CRNode &SortCROperand(CRNode &crNode, MeExpr &expr); + void PutTheAddrExprAtTheFirstOfVector(std::vector &crNodeOperands, const MeExpr &addrExpr) const; + CRNode &SortCROperand(CRNode &crNode, MeExpr &addrExpr); void SortOperatorCRNode(std::vector &crNodeOperands, MeExpr &addrExpr); bool NormalizationWithByteCount(std::vector &crNodeVector, uint8 byteSize); - uint8 GetByteSize(std::vector &crNodeVector); - PrimType GetPrimType(std::vector &crNodeVector); + uint8 GetByteSize(std::vector &crNodeVector) const; + PrimType GetPrimType(std::vector &crNodeVector) const; private: MeIRMap *irMap; diff --git a/src/mapleall/maple_me/include/me_side_effect.h b/src/mapleall/maple_me/include/me_side_effect.h index 252aecb341b048584cc5886be83ed2d3fc6228b0..7f68797e836fe58602f9b7bb79e0b6e62886056f 100644 --- a/src/mapleall/maple_me/include/me_side_effect.h +++ b/src/mapleall/maple_me/include/me_side_effect.h @@ -102,14 +102,14 @@ class IpaSideEffect { void GetEffectFromAllCallees(MIRFunction &baseFunc); bool AnalyzeReturnAllocObjVst(MeExpr&, const std::vector&); bool MatchPuidxAndSetSideEffects(PUIdx idx); - void ReadSummary(); + void ReadSummary() const; void SetEffectsTrue(); - void CopySccSideEffectToAllFunctions(SCCNode &scc, uint8 seMask); + void CopySccSideEffectToAllFunctions(SCCNode &scc, uint8 seMask) const; void GetEffectFromCallee(MIRFunction &callee, const MIRFunction &caller); void DumpFuncInfo(const std::string &msg, const std::string &name); - uint32 GetOrSetSCCNodeId(MIRFunction &func); + uint32 GetOrSetSCCNodeId(MIRFunction &mirfunc); bool IsCallingIntoSCC(uint32 sccID) const; - void UpdateExternalFuncSideEffects(MIRFunction &externCaller); + void UpdateExternalFuncSideEffects(MIRFunction &func); bool AnalyzeDefExpr(VersionSt &baseVar, std::vector &varVector); bool MEAnalyzeDefExpr(MeExpr &baseExprMe, std::vector &varVector); bool UpdateSideEffectWithStmt(MeStmt &meStmt, diff --git a/src/mapleall/maple_me/include/me_slp.h b/src/mapleall/maple_me/include/me_slp.h index 1002fdc39808b349359da4d920c34a40db98d64c..fc0255db71dc95ed9df49c6fd790c9832c84b11d 100644 --- a/src/mapleall/maple_me/include/me_slp.h +++ b/src/mapleall/maple_me/include/me_slp.h @@ -62,7 +62,7 @@ class MemBasePtr { // Make sure `addends` is sorted before calling operator== bool operator==(const MemBasePtr &rhs) const { - if (static_cast(IsFromIvar()) ^ static_cast(rhs.IsFromIvar())) { + if ((static_cast(IsFromIvar()) ^ static_cast(rhs.IsFromIvar())) != 0) { return false; } if (IsFromIvar()) { diff --git a/src/mapleall/maple_me/include/me_sra.h b/src/mapleall/maple_me/include/me_sra.h new file mode 100644 index 0000000000000000000000000000000000000000..f16f917afd6c1a6d4f8d541b41011441160186c9 --- /dev/null +++ b/src/mapleall/maple_me/include/me_sra.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_ME_INCLUDE_ME_SRA_H +#define MAPLE_ME_INCLUDE_ME_SRA_H +#include "me_function.h" +#include "maple_phase.h" +namespace maple { +MAPLE_FUNC_PHASE_DECLARE(MESRA, MeFunction) +} // namespace maple +#endif // MAPLE_ME_INCLUDE_ME_SRA_H diff --git a/src/mapleall/maple_me/include/me_ssa.h b/src/mapleall/maple_me/include/me_ssa.h index 442fb7e18df34fe8652f02d0c0746264b0322e0e..f70c2d6a57f36f2393c27d487b074d7e44a5e6e3 100644 --- a/src/mapleall/maple_me/include/me_ssa.h +++ b/src/mapleall/maple_me/include/me_ssa.h @@ -33,7 +33,7 @@ class MeSSA : public SSA, public AnalysisResult { AnalysisResult(&memPool), func(&func), eDebug(enabledDebug) {} - ~MeSSA() = default; + ~MeSSA() override = default; void VerifySSA() const; void InsertIdentifyAssignments(IdentifyLoops *identloops); diff --git a/src/mapleall/maple_me/include/me_ssa_devirtual.h b/src/mapleall/maple_me/include/me_ssa_devirtual.h index e5407e7d5c1eeee7afab52ae438c9753a11b5303..2b50ac6e40b567de61bd84cbf1f4240ecb901c80 100644 --- a/src/mapleall/maple_me/include/me_ssa_devirtual.h +++ b/src/mapleall/maple_me/include/me_ssa_devirtual.h @@ -30,7 +30,7 @@ class MeSSADevirtual : public SSADevirtual { : SSADevirtual(memPool, mod, irMap, kh, dom, func.GetCfg()->GetAllBBs().size(), clone, skipReturnTypeOpt), func(&func) {} - ~MeSSADevirtual() = default; + ~MeSSADevirtual() override = default; protected: BB *GetBB(BBId id) const override { diff --git a/src/mapleall/maple_me/include/me_ssa_epre.h b/src/mapleall/maple_me/include/me_ssa_epre.h index 04f5dab2c86e7a82b4750e894637cb27858995e4..9640e62c0a9bdf4f19fea1ce6563ad0db93740ea 100644 --- a/src/mapleall/maple_me/include/me_ssa_epre.h +++ b/src/mapleall/maple_me/include/me_ssa_epre.h @@ -35,7 +35,7 @@ class MeSSAEPre : public SSAEPre { epreLocalRefVar(epreLocalRefVar), klassHierarchy(kh) {} - virtual ~MeSSAEPre() = default; + ~MeSSAEPre() override = default; bool ScreenPhiBB(BBId) const override { return true; } diff --git a/src/mapleall/maple_me/include/me_ssa_lpre.h b/src/mapleall/maple_me/include/me_ssa_lpre.h index 3228d5a586b6708df606448d739b2ab9c4362d07..eccb73a3a7fd9e10ff2ff8d1f1c1c37485f8f3aa 100644 --- a/src/mapleall/maple_me/include/me_ssa_lpre.h +++ b/src/mapleall/maple_me/include/me_ssa_lpre.h @@ -32,7 +32,7 @@ class MeSSALPre : public SSAPre { loopHeadBBs(ssaPreAllocator.Adapter()), candsForSSAUpdate() {} - virtual ~MeSSALPre() = default; + ~MeSSALPre() override = default; void FindLoopHeadBBs(const IdentifyLoops &identLoops); std::map>> &GetCandsForSSAUpdate() { @@ -43,10 +43,10 @@ class MeSSALPre : public SSAPre { MeSSAUpdate::InsertOstToSSACands(ostIdx, bb, &candsForSSAUpdate); } private: - void GenerateSaveRealOcc(MeRealOcc&) override; - MeExpr *GetTruncExpr(const VarMeExpr &theLHS, MeExpr &savedRHS); - void GenerateReloadRealOcc(MeRealOcc&) override; - MeExpr *PhiOpndFromRes(MeRealOcc&, size_t) const override; + void GenerateSaveRealOcc(MeRealOcc &realOcc) override; + MeExpr *GetTruncExpr(const VarMeExpr &theLHS, MeExpr &savedRHS) const; + void GenerateReloadRealOcc(MeRealOcc &realOcc) override; + MeExpr *PhiOpndFromRes(MeRealOcc &realZ, size_t j) const override; void ComputeVarAndDfPhis() override; bool ScreenPhiBB(BBId) const override { return true; diff --git a/src/mapleall/maple_me/include/me_ssa_update.h b/src/mapleall/maple_me/include/me_ssa_update.h index b46fdc54b337515e8774d42bc7997b9119d4fe2a..8c415f5f171c80de2e2e43b8147c1f11dfae23a2 100644 --- a/src/mapleall/maple_me/include/me_ssa_update.h +++ b/src/mapleall/maple_me/include/me_ssa_update.h @@ -59,7 +59,7 @@ class VersionStacks { class VectorVersionStacks : public VersionStacks { public: VectorVersionStacks() = default; - virtual ~VectorVersionStacks() = default; + ~VectorVersionStacks() override = default; std::stack *GetRenameStack(OStIdx idx) override; void InsertZeroVersion2RenameStack(SSATab &ssaTab, IRMap &irMap) override; @@ -79,7 +79,7 @@ class MapVersionStacks : public VersionStacks { public: MapVersionStacks() : renameWithMapStacks(std::less()) {} - virtual ~MapVersionStacks() = default; + ~MapVersionStacks() override = default; std::stack *GetRenameStack(OStIdx idx) override; void InsertZeroVersion2RenameStack(SSATab &ssaTab, IRMap &irMap) override; @@ -113,10 +113,10 @@ class MeSSAUpdate { private: void InsertPhis(); - void RenamePhi(const BB &bb); + void RenamePhi(const BB &bb) const; MeExpr *RenameExpr(MeExpr &meExpr, bool &changed); void RenameStmts(BB &bb); - void RenamePhiOpndsInSucc(const BB &bb); + void RenamePhiOpndsInSucc(const BB &bb) const; void RenameBB(BB &bb); MeFunction &func; IRMap &irMap; diff --git a/src/mapleall/maple_me/include/me_ssi.h b/src/mapleall/maple_me/include/me_ssi.h index aa83b62ade45cd35704bf1a7a9986b40952ecec2..951b0487695c497aff738787a0e3310d45a98f06 100644 --- a/src/mapleall/maple_me/include/me_ssi.h +++ b/src/mapleall/maple_me/include/me_ssi.h @@ -194,13 +194,13 @@ class MeSSI { } MIRType *GetInferredType(MeExpr *expr); private: - NaryMeExpr *GetInstanceOfType(MeExpr &e); + NaryMeExpr *GetInstanceOfType(MeExpr &e) const; void AddPiForABCOpt(BB &bb); void AddNullPointerInfoForVar() const; - uint8_t AnalysisBranch(MeStmt &meStmt); + uint8_t AnalysisBranch(MeStmt &meStmt) const; void RemoveExtraNodes(); void InsertPiNodes(); - bool ExistedPhiNode(BB &bb, const VarMeExpr &rhs); + bool ExistedPhiNode(BB &bb, const VarMeExpr &rhs) const; void InsertPhiNodes(); void Rename(); void RenameStartPiBr(DefPoint &newDefPoint); @@ -209,9 +209,9 @@ class MeSSI { void ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar); bool ReplaceStmt(MeStmt &meStmt, VarMeExpr &newVar, VarMeExpr &oldVar); void ReplaceBB(BB &bb, BB &parentBB, DefPoint &newDefPoint); - bool ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs); + bool ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs) const; void CreatePhi(VarMeExpr &rhs, BB &dfBB); - VarMeExpr *CreateNewPiExpr(const MeExpr &opnd); + VarMeExpr *CreateNewPiExpr(const MeExpr &opnd) const; void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, BB &bb, MeStmt &generatedBy, bool isToken); void CreateNewPiStmt(VarMeExpr *lhs, MeExpr &rhs, MeStmt &generatedBy); MeExpr *ReplaceMeExprExpr(MeExpr &origExpr, MeExpr &oldVar, MeExpr &repExpr); diff --git a/src/mapleall/maple_me/include/me_ssu_pre.h b/src/mapleall/maple_me/include/me_ssu_pre.h index 1ffdcaf4711b7cbe7cbdde9484e6fedf62e85b5d..1af0797b3842c02476cdc582923e4916ab730d32 100644 --- a/src/mapleall/maple_me/include/me_ssu_pre.h +++ b/src/mapleall/maple_me/include/me_ssu_pre.h @@ -36,7 +36,7 @@ class SOcc { virtual ~SOcc() = default; virtual void Dump() const = 0; - bool IsPostDominate(Dominance *pdom, const SOcc *occ) const { + bool IsPostDominate(const Dominance *pdom, const SOcc *occ) const { CHECK_NULL_FATAL(occ); CHECK_NULL_FATAL(pdom); return pdom->Dominate(mirBB, occ->mirBB); @@ -91,7 +91,7 @@ class SRealOcc : public SOcc { : SOcc(kSOccReal, *s.GetBB()), meStmt(&s), vMeExpr(&v), realFromDef(false), redundant(true) {} SRealOcc(BB &bb, VarMeExpr &v) : SOcc(kSOccReal, bb), meStmt(nullptr), vMeExpr(&v), realFromDef(false), redundant(true) {} - virtual ~SRealOcc() = default; + ~SRealOcc() override = default; void Dump() const override { LogInfo::MapleLogger() << "RealOcc at bb" << GetBB().GetBBId(); if (realFromDef) { @@ -138,8 +138,8 @@ class SLambdaResOcc : public SOcc { explicit SLambdaResOcc(BB &bb) : SOcc(kSOccLambdaRes, bb), useLambdaOcc(nullptr), hasRealUse(false), insertHere(false) {} - virtual ~SLambdaResOcc() = default; - void Dump() const { + ~SLambdaResOcc() override = default; + void Dump() const override { LogInfo::MapleLogger() << "LambdaResOcc at bb" << GetBB().GetBBId() << " classId" << GetClassId(); } @@ -178,12 +178,12 @@ class SLambdaOcc : public SOcc { SLambdaOcc(BB &bb, MapleAllocator &alloc) : SOcc(kSOccLambda, bb), isUpsafe(true), isCanBeAnt(true), isEarlier(true), lambdaRes(alloc.Adapter()) {} - virtual ~SLambdaOcc() = default; + ~SLambdaOcc() override = default; bool WillBeAnt() const { return isCanBeAnt && !isEarlier; } - void Dump() const { + void Dump() const override { LogInfo::MapleLogger() << "LambdaOcc at bb" << GetBB().GetBBId() << " classId" << GetClassId() << " Lambda["; for (size_t i = 0; i < lambdaRes.size(); i++) { lambdaRes[i]->Dump(); @@ -233,8 +233,8 @@ class SEntryOcc : public SOcc { public: explicit SEntryOcc(BB &bb) : SOcc(kSOccEntry, bb) {} - virtual ~SEntryOcc() = default; - void Dump() const { + ~SEntryOcc() override = default; + void Dump() const override { LogInfo::MapleLogger() << "EntryOcc at bb" << GetBB().GetBBId(); } }; @@ -243,8 +243,8 @@ class SUseOcc : public SOcc { public: explicit SUseOcc(BB &bb) : SOcc(kSOccUse, bb) {} - virtual ~SUseOcc() = default; - void Dump() const { + ~SUseOcc() override = default; + void Dump() const override { LogInfo::MapleLogger() << "UseOcc at bb" << GetBB().GetBBId(); } }; @@ -253,7 +253,7 @@ class SPhiOcc : public SOcc { public: SPhiOcc(BB &bb, MePhiNode &p, VarMeExpr &v) : SOcc(kSOccPhi, bb), phi(&p), vMeExpr(&v) {}; - virtual ~SPhiOcc() = default; + ~SPhiOcc() override = default; MePhiNode *GetPhiNode() { return phi; @@ -271,7 +271,7 @@ class SPhiOcc : public SOcc { return vMeExpr; } - void Dump() const { + void Dump() const override { LogInfo::MapleLogger() << "PhiOcc at bb" << GetBB().GetBBId(); } diff --git a/src/mapleall/maple_me/include/me_stack_protect.h b/src/mapleall/maple_me/include/me_stack_protect.h index a6b94635f3391fb29233d330aa5d7aacf9075511..7fc32b46a963970621bcc9754ff855be50cbd232 100644 --- a/src/mapleall/maple_me/include/me_stack_protect.h +++ b/src/mapleall/maple_me/include/me_stack_protect.h @@ -26,12 +26,12 @@ class MeStackProtect { explicit MeStackProtect(MeFunction &func) : f(&func) {} ~MeStackProtect() = default; void CheckAddrofStack(); - bool MayWriteStack(); + bool MayWriteStack() const; private: bool IsMeStmtSafe(const MeStmt &stmt) const; - bool IsCallSafe(const MeStmt &stmt, bool isIcall, const FuncDesc *funcDesc = nullptr) const; bool IsIntrnCallSafe(const MeStmt &stmt) const; + bool IsCallSafe(const MeStmt &stmt, bool isIcall, const FuncDesc *funcDesc = nullptr) const; bool IsStackSymbol(const OriginalSt &ost) const; bool IsAddressOfStackVar(const MeExpr &expr) const; bool IsWriteFromSourceSafe(const MeStmt &stmt, uint64 numOfBytesToWrite) const; diff --git a/src/mapleall/maple_me/include/me_stmt_pre.h b/src/mapleall/maple_me/include/me_stmt_pre.h index 574a35d88463532264851abfd943cf998157eea7..f59216dc69d10315e8391551361c68883c014ae5 100644 --- a/src/mapleall/maple_me/include/me_stmt_pre.h +++ b/src/mapleall/maple_me/include/me_stmt_pre.h @@ -31,7 +31,7 @@ class MeStmtPre : public SSAEPre { versionStackVec(ssaTab->GetOriginalStTable().GetOriginalStVector().size(), nullptr, ssaPreAllocator.Adapter()), useOccurMap(std::less(), ssaPreAllocator.Adapter()) {} - virtual ~MeStmtPre() = default; + ~MeStmtPre() override = default; bool ScreenPhiBB(BBId) const override { return true; } @@ -53,7 +53,7 @@ class MeStmtPre : public SSAEPre { void Finalize1() override; void Finalize2() override {}; // fully available (replaces downsafety, canbeavail and later under SSAFRE) - void ResetFullyAvail(MePhiOcc &occ); + void ResetFullyAvail(MePhiOcc &occ) const; void ComputeFullyAvail(); // rename phase bool AllVarsSameVersion(const MeRealOcc &realOcc1, const MeRealOcc &realOcc2) const override; diff --git a/src/mapleall/maple_me/include/me_store_pre.h b/src/mapleall/maple_me/include/me_store_pre.h index cc66d03af5dec94db0722448bc9870dd9b85d774..60165cc31f09fd635fd272d271f2274c2360b828 100644 --- a/src/mapleall/maple_me/include/me_store_pre.h +++ b/src/mapleall/maple_me/include/me_store_pre.h @@ -27,7 +27,7 @@ class MeStorePre : public MeSSUPre { bbCurTempMap(spreAllocator.Adapter()), candsForSSAUpdate() {} - virtual ~MeStorePre() = default; + ~MeStorePre() override = default; std::map>> &CandsForSSAUpdate() { return candsForSSAUpdate; diff --git a/src/mapleall/maple_me/include/me_subsum_rc.h b/src/mapleall/maple_me/include/me_subsum_rc.h index 01a26e31f5b582f2597246b67a6220667373b84e..a0d42658f636219c2e227ef1e328a3cb39c86350 100644 --- a/src/mapleall/maple_me/include/me_subsum_rc.h +++ b/src/mapleall/maple_me/include/me_subsum_rc.h @@ -37,7 +37,7 @@ class SubsumRC : public MeSSUPre { bbVisited(f.GetCfg()->GetAllBBs().size(), false, spreAllocator.Adapter()), verstCantSubsum(f.GetIRMap()->GetVerst2MeExprTable().size(), false, spreAllocator.Adapter()) {} - virtual ~SubsumRC() = default; + ~SubsumRC() override = default; void RunSSUPre(); protected: diff --git a/src/mapleall/maple_me/include/me_tailcall.h b/src/mapleall/maple_me/include/me_tailcall.h new file mode 100644 index 0000000000000000000000000000000000000000..d2bad416a22bdfb539e27e4a9e979b92759fa225 --- /dev/null +++ b/src/mapleall/maple_me/include/me_tailcall.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_ME_INCLUDE_ME_TAILCALL_H +#define MAPLE_ME_INCLUDE_ME_TAILCALL_H +#include "maple_phase.h" +#include "me_function.h" +#include "mempool_allocator.h" +namespace maple { + +// This file will mark the callstmts as maytailcall of current function if they have no +// risk of stack address escaped. We do flow sensitive escape analysis, propagating the +// escape point from BB to its successor. This is a necessary but not sufficent condition +// for doing tailcall, we will do further analysis on the back-end +class TailcallOpt : public AnalysisResult { + public: + ~TailcallOpt() override = default; + TailcallOpt(MeFunction &f, MemPool &mempool); + + void Walk(); + void WalkTroughBB(BB &bb); + + private: + MeFunction &func; + MapleAllocator tailcallAlloc; + MapleVector callCands; + MapleVector escapedPoints; +}; + +MAPLE_FUNC_PHASE_DECLARE(METailcall, MeFunction) + +} // namespace maple +#endif diff --git a/src/mapleall/maple_me/include/me_toplevel_ssa.h b/src/mapleall/maple_me/include/me_toplevel_ssa.h index 877b5229fa3097c3e33cfcd2b1ccec70361513ce..b13f0d9f6b178be965fc79aee2619a75bb64443a 100644 --- a/src/mapleall/maple_me/include/me_toplevel_ssa.h +++ b/src/mapleall/maple_me/include/me_toplevel_ssa.h @@ -33,7 +33,7 @@ class MeTopLevelSSA : public SSA, public AnalysisResult { func(&f), vstUseInfo(&memPool) {} - ~MeTopLevelSSA() = default; + ~MeTopLevelSSA() override = default; void CollectUseInfo(); VstUseInfo *GetVstUseInfo() { diff --git a/src/mapleall/maple_me/include/me_value_range_prop.h b/src/mapleall/maple_me/include/me_value_range_prop.h index 4c274072d7a5c224d39a17ef48cd4552c27388a3..048d90678e99bcf195d707bbd946286240325310 100644 --- a/src/mapleall/maple_me/include/me_value_range_prop.h +++ b/src/mapleall/maple_me/include/me_value_range_prop.h @@ -72,10 +72,10 @@ class SafetyCheckWithBoundaryError : public SafetyCheck { ValueRangePropagation &vrp; }; -int64 GetMinNumber(PrimType pType); +int64 GetMinNumber(PrimType primType); int64 GetMaxNumber(PrimType primType); -bool IsNeededPrimType(PrimType pType); -int64 GetRealValue(int64 value, PrimType pType); +bool IsNeededPrimType(PrimType prim); +int64 GetRealValue(int64 value, PrimType primType); bool IsPrimTypeUint64(PrimType pType); class Bound { @@ -128,7 +128,7 @@ class Bound { bool IsGreaterThanOrEqualToMin(PrimType pType) const { CHECK_FATAL(IsNeededPrimType(pType), "must not be here"); - return IsPrimTypeUint64(pType) ? static_cast(constant) >= static_cast(GetMinNumber(pType)): + return IsPrimTypeUint64(pType) ? static_cast(constant) >= static_cast(GetMinNumber(pType)) : GetRealValue(constant, pType) >= GetRealValue(GetMinNumber(pType), pType); } @@ -146,7 +146,7 @@ class Bound { bool IsGreaterThanOrEqualTo(const Bound rightBound, PrimType pType) const { CHECK_FATAL(IsNeededPrimType(pType), "must not be here"); - return IsPrimTypeUint64(pType) ? static_cast(constant) >= static_cast(rightBound.GetConstant()): + return IsPrimTypeUint64(pType) ? static_cast(constant) >= static_cast(rightBound.GetConstant()) : GetRealValue(constant, pType) >= GetRealValue(rightBound.GetConstant(), pType); } @@ -164,9 +164,9 @@ class Bound { return true; } if (IsPrimTypeUint64(fromType)) { - return static_cast(constant) == GetRealValue(constant, toType); + return constant == GetRealValue(constant, toType); } else if (IsPrimTypeUint64(toType)) { - return static_cast(constant) == GetRealValue(constant, fromType); + return constant == GetRealValue(constant, fromType); } else { return GetRealValue(constant, fromType) == GetRealValue(constant, toType); } @@ -424,6 +424,9 @@ class ValueRange { CHECK_FATAL(false, "can not be here"); } } + bool IsKEqualAndConstantRange() const { + return rangeType == kEqual && range.bound.GetVar() == nullptr; + } bool IsZeroInRange() const { return IsConstantLowerAndUpper() && GetUpper().GetConstant() >= 0 && @@ -551,7 +554,7 @@ class ValueRangePropagation { void JudgeTheConsistencyOfDefPointsOfBoundaryCheck( BB &bb, MeExpr &expr, std::set &visitedLHS, std::vector &stmts, bool &crossPhiNode); bool TheValueOfOpndIsInvaliedInABCO(const BB &bb, const MeStmt *meStmt, MeExpr &boundOpnd, bool updateCaches = true); - ValueRange *FindValueRange(const BB &bb, MeExpr &expr, uint32 &numberOfRecursions, + ValueRange *FindValueRange(const BB &bb, MeExpr &expr, uint32 &numberOfRecursionsArg, std::unordered_set &foundExprs, uint32 maxThreshold); bool BrStmtInRange(const BB &bb, const ValueRange &leftRange, const ValueRange &rightRange, Opcode op, PrimType opndType, bool judgeNotInRange = false); @@ -723,7 +726,8 @@ class ValueRangePropagation { return false; } - void JudgeEqual(MeExpr &expr, ValueRange &vrOfLHS, ValueRange &vrOfRHS, std::unique_ptr &valueRangePtr); + void JudgeEqual(MeExpr &expr, ValueRange &vrOfLHS, ValueRange &vrOfRHS, + std::unique_ptr &valueRangePtr) const; // The pairOfExprs map collects the exprs which have the same valueRange in bbs, // the pair of expr and preExpr is element of pairOfExprs. @@ -737,19 +741,19 @@ class ValueRangePropagation { void DealWithBrStmtWithOneOpnd(BB &bb, const CondGotoMeStmt &stmt, MeExpr &opnd, Opcode op); bool OverflowOrUnderflow(PrimType pType, int64 lhs, int64 rhs) const; void DealWithAssign(BB &bb, const MeStmt &stmt); - bool IsConstant(const BB &bb, MeExpr &expr, int64 &constant, bool canNotBeNotEqual = true); + bool IsConstant(const BB &bb, MeExpr &expr, int64 &value, bool canNotBeNotEqual = true); std::unique_ptr CreateValueRangeForPhi( LoopDesc &loop, const BB &bb, ScalarMeExpr &init, ScalarMeExpr &backedge, const ScalarMeExpr &lhsOfPhi); - bool AddOrSubWithConstant(PrimType pType, Opcode op, int64 lhsConstant, int64 rhsConstant, int64 &res) const; + bool AddOrSubWithConstant(PrimType primType, Opcode op, int64 lhsConstant, int64 rhsConstant, int64 &res) const; std::unique_ptr NegValueRange(const BB &bb, MeExpr &opnd, uint32 &numberOfRecursions, std::unordered_set &foundExprs); - bool AddOrSubWithBound(Bound oldBound, Bound &resBound, int64 rhsConstant, Opcode op); - std::unique_ptr AddOrSubWithValueRange(Opcode op, ValueRange &valueRange, int64 rhsConstant); + bool AddOrSubWithBound(Bound oldBound, Bound &resBound, int64 rhsConstant, Opcode op) const; + std::unique_ptr AddOrSubWithValueRange(Opcode op, ValueRange &valueRange, int64 rhsConstant) const; std::unique_ptr AddOrSubWithValueRange( - Opcode op, ValueRange &valueRangeLeft, ValueRange &valueRangeRight); + Opcode op, ValueRange &valueRangeLeft, ValueRange &valueRangeRight) const; std::unique_ptr DealWithAddOrSub(const BB &bb, const OpMeExpr &opMeExpr); - bool CanComputeLoopIndVar(const MeExpr &phiLHS, MeExpr &expr, int64 &constant); + bool CanComputeLoopIndVar(const MeExpr &phiLHS, MeExpr &expr, int64 &constant) const; std::unique_ptr RemWithValueRange(const BB &bb, const OpMeExpr &opMeExpr, int64 rhsConstant); std::unique_ptr RemWithRhsValueRange(const OpMeExpr &opMeExpr, int64 rhsConstant) const; std::unique_ptr DealWithRem(const BB &bb, const MeExpr &lhsVar, const OpMeExpr &opMeExpr); @@ -779,13 +783,13 @@ class ValueRangePropagation { ValueRange &rightRange, const BB &trueBranch, const BB &falseBranch); void DealWithCondGoto(BB &bb, Opcode op, ValueRange *leftRange, ValueRange &rightRange, const CondGotoMeStmt &brMeStmt); - bool CreateNewBoundWhenAddOrSub(Opcode op, Bound bound, int64 rhsConstant, Bound &res); - std::unique_ptr CopyValueRange(ValueRange &valueRange, PrimType pType = PTY_begin); + bool CreateNewBoundWhenAddOrSub(Opcode op, Bound bound, int64 rhsConstant, Bound &res) const; + std::unique_ptr CopyValueRange(ValueRange &valueRange, PrimType primType = PTY_begin) const; bool LowerInRange(const BB &bb, Bound lowerTemp, Bound lower, bool lowerIsZero); bool UpperInRange(const BB &bb, Bound upperTemp, Bound upper, bool upperIsArrayLength); void PrepareForSSAUpdateWhenPredBBIsRemoved(const BB &pred, BB &bb, ScalarMeExpr *updateSSAExceptTheScalarExpr, std::map> &ssaupdateCandsForCondExpr); - void InsertOstOfPhi2Cands(BB &bb, size_t i, ScalarMeExpr *updateSSAExceptTheScalarExpr, + void InsertOstOfPhi2Cands(BB &bb, size_t i, const ScalarMeExpr *updateSSAExceptTheScalarExpr, std::map> &ssaupdateCandsForCondExpr, bool setPhiIsDead = false); void InsertOstOfPhi2Cands(BB &bb, size_t i); void AnalysisUnreachableBBOrEdge(BB &bb, BB &unreachableBB, BB &succBB); @@ -820,11 +824,11 @@ class ValueRangePropagation { size_t GetRealPredSize(const BB &bb) const; bool RemoveTheEdgeOfPredBB(BB &pred, BB &bb, BB &trueBranch, ScalarMeExpr *updateSSAExceptTheScalarExpr, std::map> &ssaupdateCandsForCondExpr); - void DealWithCondGotoWhenRightRangeIsNotExist(BB &bb, const MeExpr &opnd0, MeExpr &opnd1, - Opcode opOfBrStmt, Opcode conditionalOp, ValueRange *valueRangeOfLeft); + void DealWithCondGotoWhenRightRangeIsNotExist( + BB &bb, const OpMeExpr &opMeExpr, Opcode opOfBrStmt, ValueRange *valueRangeOfLeft); MeExpr *GetDefOfBase(const IvarMeExpr &ivar) const; std::unique_ptr DealWithMeOp(const BB &bb, const MeStmt &stmt); - void ReplaceOpndByDef(const BB &bb, MeExpr &currOpnd, MeExpr *&predOpnd, MePhiNode *&phi, bool &thePhiIsInBB); + void ReplaceOpndByDef(const BB &bb, MeExpr &currOpnd, MeExpr *&predOpnd, MePhiNode *&phi, bool &thePhiIsInBB) const; bool AnalysisValueRangeInPredsOfCondGotoBB(BB &bb, MeExpr *opnd0, MeExpr &currOpnd, ValueRange *rightRange, BB &falseBranch, BB &trueBranch, PrimType opndType, Opcode op, BB &condGoto); void CreateLabelForTargetBB(BB &pred, BB &newBB); @@ -849,12 +853,12 @@ class ValueRangePropagation { void CollectIndexOpndWithBoundInLoop( LoopDesc &loop, BB &bb, MeStmt &meStmt, MeExpr &opnd, std::map &index2NewExpr); bool CompareConstantOfIndexAndLength( - const MeStmt &meStmt, const ValueRange &valueRangeOfIndex, ValueRange &valueRangeOfLengthPtr, Opcode op); + const MeStmt &meStmt, const ValueRange &valueRangeOfIndex, ValueRange &valueRangeOfLengthPtr, Opcode op) const; bool CompareIndexWithUpper(const BB &bb, const MeStmt &meStmt, const ValueRange &valueRangeOfIndex, ValueRange &valueRangeOfLengthPtr, Opcode op, const MeExpr *indexOpnd = nullptr); bool DealWithAssertLtOrLe(BB &bb, MeStmt &meStmt, CRNode &indexCR, CRNode &boundCR, Opcode op); void DealWithCVT(const BB &bb, MeStmt &stmt, MeExpr *operand, size_t i, bool dealWithStmt = false); - std::unique_ptr ZeroIsInRange(const ValueRange &valueRange); + std::unique_ptr ZeroIsInRange(const ValueRange &valueRange) const; void DealWithNeg(const BB &bb, const OpMeExpr &opMeExpr); bool DealWithCVT(const BB &bb, MeStmt &stmt, OpMeExpr &opMeExpr); bool IfTheLowerOrUpperOfLeftRangeEqualToTheRightRange( @@ -869,7 +873,7 @@ class ValueRangePropagation { void DeleteAssertNonNull(); void DeleteBoundaryCheck(); bool MustBeFallthruOrGoto(const BB &defBB, const BB &bb) const; - std::unique_ptr AntiValueRange(ValueRange &valueRange); + std::unique_ptr AntiValueRange(ValueRange &valueRange) const; void DeleteUnreachableBBs(BB &curBB, BB &falseBranch, BB &trueBranch); void PropValueRangeFromCondGotoToTrueAndFalseBranch( const MeExpr &opnd0, ValueRange &rightRange, const BB &falseBranch, const BB &trueBranch); @@ -891,7 +895,7 @@ class ValueRangePropagation { std::vector> &valueRangeOfInitExprs, size_t indexOfInitExpr); std::unique_ptr MakeMonotonicIncreaseOrDecreaseValueRangeForPhi(int64 stride, Bound &initBound) const; bool MergeVrOrInitAndBackedge(MePhiNode &mePhiNode, ValueRange &vrOfInitExpr, - ValueRange &valueRange, Bound &resBound); + ValueRange &valueRange, Bound &resBound) const; void ReplaceUsePoints(MePhiNode *phi); void CreateVRWithBitsSize(const BB &bb, const OpMeExpr &opMeExpr); MeExpr &GetVersionOfOpndInPred(const BB &pred, const BB &bb, MeExpr &expr, const BB &condGoto); @@ -899,20 +903,19 @@ class ValueRangePropagation { Opcode GetOpAfterSwapThePositionsOfTwoOperands(Opcode op) const; bool IsSubOpndOfExpr(const MeExpr &expr, const MeExpr &subExpr) const; void UpdateProfile(BB &pred, BB &bb, const BB &targetBB) const; - bool TheValueRangeOfOpndAndSubOpndAreEqual(const MeExpr &opnd) const; void CalculateVROfSubOpnd(BBId bbID, const MeExpr &opnd, ValueRange &valueRange); void CreateValueRangeForSubOpnd(const MeExpr &opnd, const BB &trueBranch, const BB &falseBranch, ValueRange &resTrueBranchVR, ValueRange &resFalseBranchVR); ValueRange *DealWithNegWhenFindValueRange(const BB &bb, const MeExpr &expr, - uint32 &numberOfRecursions, std::unordered_set &foundExprs, uint32 maxThreshold); + uint32 &numberOfRecursions, std::unordered_set &foundExprs); void MergeNotEqualRanges(const MeExpr &opnd, const ValueRange *leftRange, ValueRange &rightRange, const BB &trueBranch); bool DealWithMulNode(const BB &bb, CRNode &opndOfCRNode, std::unique_ptr &resValueRange, PrimType pTypeOfArray); template - bool IsOverflowAfterMul(T lhs, T rhs, PrimType pty); - bool HasDefPointInPred(const BB &begin, const BB &end, const ScalarMeExpr &opnd); + bool IsOverflowAfterMul(T lhs, T rhs, PrimType pty) const; + bool HasDefPointInPred(const BB &begin, const BB &end, const ScalarMeExpr &opnd) const; bool CanIgnoreTheDefPoint(const MeStmt &stmt, const BB &end, const ScalarMeExpr &expr) const; MeFunction &func; diff --git a/src/mapleall/maple_me/include/occur.h b/src/mapleall/maple_me/include/occur.h index 0e7f440913c3cbdcb8bdb23130b20f88caa1c45a..f33fc230834c7c9e89e530c97de1128549f2d089 100644 --- a/src/mapleall/maple_me/include/occur.h +++ b/src/mapleall/maple_me/include/occur.h @@ -41,7 +41,7 @@ class MeOccur { MeOccur(OccType ty, int cId, BB &bb, MeOccur *df) : occTy(ty), classID(cId), mirBB(&bb), def(df) {} virtual ~MeOccur() = default; virtual void Dump(const IRMap &irMap) const; - void DumpOccur(IRMap &irMap); + void DumpOccur(IRMap &irMap) const; bool IsDominate(Dominance &dom, MeOccur &occ); const BB *GetBB() const { return mirBB; @@ -109,7 +109,7 @@ class MeRealOcc : public MeOccur { } } - ~MeRealOcc() = default; + ~MeRealOcc() override = default; void Dump(const IRMap &irMap) const override; const MeStmt *GetMeStmt() const { return meStmt; @@ -224,7 +224,7 @@ class MeInsertedOcc : public MeOccur { MeInsertedOcc(MeExpr *expr, MeStmt *stmt, BB &bb) : MeOccur(kOccInserted, 0, bb, nullptr), meExpr(expr), meStmt(stmt), savedExpr(nullptr) {} - ~MeInsertedOcc() = default; + ~MeInsertedOcc() override = default; void Dump(const IRMap &irMap) const override; const MeStmt *GetMeStmt() const { return meStmt; @@ -283,7 +283,7 @@ class MePhiOpndOcc : public MeOccur { currentExpr.meStmt = nullptr; } - ~MePhiOpndOcc() = default; + ~MePhiOpndOcc() override = default; void Dump(const IRMap &irMap) const override; bool IsProcessed() const { return isProcessed; @@ -392,7 +392,7 @@ class MePhiOcc : public MeOccur { regPhi(nullptr), varPhi(nullptr) {} - virtual ~MePhiOcc() = default; + ~MePhiOcc() override = default; bool IsWillBeAvail() const { return isCanBeAvail && !isLater; } @@ -516,7 +516,7 @@ class MePhiOcc : public MeOccur { } bool IsOpndDefByRealOrInserted() const; - void Dump(const IRMap &irMap) const; + void Dump(const IRMap &irMap) const override; private: bool isDownSafe; // default is true @@ -687,7 +687,7 @@ class PreStmtWorkCand : public PreWorkCand { PreStmtWorkCand(MapleAllocator &alloc, MeStmt &meStmt, PUIdx pIdx) : PreWorkCand(alloc, nullptr, pIdx), theMeStmt(&meStmt), lhsIsFinal(false) {} - virtual ~PreStmtWorkCand() = default; + ~PreStmtWorkCand() override = default; void DumpCand(IRMap &irMap) const override { theMeStmt->Dump(&irMap); diff --git a/src/mapleall/maple_me/include/orig_symbol.h b/src/mapleall/maple_me/include/orig_symbol.h index 384f095d1142b2fc5c0d0f47c14d82c600f6ac9e..c52fcc3dd8a094ad40e3c995b2e89bd155a7c38f 100644 --- a/src/mapleall/maple_me/include/orig_symbol.h +++ b/src/mapleall/maple_me/include/orig_symbol.h @@ -480,13 +480,13 @@ class OriginalStTable { MIRType *GetTypeFromBaseAddressAndFieldId(TyIdx tyIdx, FieldID fieldId, bool isFieldArrayType) const; OriginalSt *FindOrCreateExtraLevOriginalSt( - const VersionSt *vst, TyIdx tyIdx, FieldID fieldId, + const VersionSt &vst, TyIdx tyIdx, FieldID fieldId, const OffsetType &offset = OffsetType(kOffsetUnknown), bool isFieldArrayType = false); OriginalSt *FindExtraLevOriginalSt( - const MapleVector &nextLevelOsts, const TyIdx &tyIdxOfPtr, const MIRType *type, FieldID fieldId, + const MapleVector &nextLevelOsts, const TyIdx &tyIdxOfPtr, const MIRType *type, FieldID fld, const OffsetType &offset = OffsetType(kOffsetUnknown)) const; OriginalSt *FindExtraLevOriginalSt( - const VersionSt *vst, const TyIdx &tyIdxOfPtr, const MIRType *typeOfOst, FieldID fieldId, + const VersionSt *vst, const TyIdx &tyIdxOfPtr, const MIRType *typeOfOst, FieldID fld, const OffsetType &offset = OffsetType(kOffsetUnknown)) const; OriginalSt *FindOrCreateAddrofSymbolOriginalSt(OriginalSt *ost); MapleVector *GetNextLevelOstsOfVst(size_t vstIdx) const; diff --git a/src/mapleall/maple_me/include/pme_emit.h b/src/mapleall/maple_me/include/pme_emit.h index bfd4e03b7ace7cf0853244efebe4e8078c16d764..4082057c8fac37831c86c0c326c9a89cf8df7d15 100644 --- a/src/mapleall/maple_me/include/pme_emit.h +++ b/src/mapleall/maple_me/include/pme_emit.h @@ -82,17 +82,17 @@ class PreMeEmitter : public AnalysisResult { MapleMap *GetPreMeExprExtensionMap() { return &preMeExprExtensionMap; } FuncProfInfo *GetFuncProfData() { return mirFunc->GetFuncProfData(); } void SetIpaInfo(CollectIpaInfo *info) { ipaInfo = info; } - void UpdateStmtInfo(const MeStmt &meStmt, StmtNode &stmt, BlockNode &currBlock, FreqType frequency); - void UpdateStmtInfoForLabelNode(LabelNode &label, BB &bb); + void UpdateStmtInfo(const MeStmt &meStmt, StmtNode &stmt, BlockNode &currBlock, FreqType frequency) const; + void UpdateStmtInfoForLabelNode(LabelNode &label, BB &bb) const; private: - ArrayNode *ConvertToArray(BaseNode *x, TyIdx ptrTyIdx); - BaseNode *EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent); - StmtNode* EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent); - void EmitBB(BB *bb, BlockNode *curBlk); - DoloopNode *EmitPreMeDoloop(BB *meWhileBB, BlockNode *curBlk, PreMeWhileInfo *whileInfo); - WhileStmtNode *EmitPreMeWhile(BB *meWhileBB, BlockNode *curBlk); + ArrayNode *ConvertToArray(BaseNode &x, TyIdx ptrTyIdx); + BaseNode *EmitPreMeExpr(MeExpr &meExpr, BaseNode *parent); + StmtNode* EmitPreMeStmt(MeStmt &meStmt, BaseNode *parent); + void EmitBB(BB &bb, BlockNode &curBlk); + DoloopNode *EmitPreMeDoloop(BB &meWhileBB, BlockNode &curBlk, PreMeWhileInfo &whileInfo); + WhileStmtNode *EmitPreMeWhile(BB &meWhileBB, BlockNode &curBlk); uint32 Raise2PreMeWhile(uint32 curJ, BlockNode *curBlk); - uint32 Raise2PreMeIf(uint32 curJ, BlockNode *curBlk); + uint32 Raise2PreMeIf(uint32 curJ, BlockNode &curBlk); MeIRMap *meirmap; PreMeFunction *preMeFunc; diff --git a/src/mapleall/maple_me/include/pme_mir_extension.h b/src/mapleall/maple_me/include/pme_mir_extension.h index 4b80f60f189002354293626cccce55461727d6af..922da2051897cd84560bb9ae3f7ac44b6ba0a838 100644 --- a/src/mapleall/maple_me/include/pme_mir_extension.h +++ b/src/mapleall/maple_me/include/pme_mir_extension.h @@ -27,9 +27,9 @@ class PreMeMIRExtension { MeStmt *mestmt; }; - explicit PreMeMIRExtension (BaseNode *p) : parent(p), meexpr(nullptr) {} - PreMeMIRExtension (BaseNode *p, MeExpr *expr) : parent(p), meexpr(expr) {} - PreMeMIRExtension (BaseNode *p, MeStmt *stmt) : parent(p), mestmt(stmt) {} + explicit PreMeMIRExtension(BaseNode *p) : parent(p), meexpr(nullptr) {} + PreMeMIRExtension(BaseNode *p, MeExpr *expr) : parent(p), meexpr(expr) {} + PreMeMIRExtension(BaseNode *p, MeStmt *stmt) : parent(p), mestmt(stmt) {} virtual ~PreMeMIRExtension() = default; BaseNode *GetParent() { return parent; } MeExpr *GetMeExpr() { return meexpr; } diff --git a/src/mapleall/maple_me/include/pme_mir_lower.h b/src/mapleall/maple_me/include/pme_mir_lower.h index 4f16553c67bfefe81a5fbaaaa9d5a6402cea4a6b..d9eab2959698677fd8ec00cf13d4b77d483d9270 100644 --- a/src/mapleall/maple_me/include/pme_mir_lower.h +++ b/src/mapleall/maple_me/include/pme_mir_lower.h @@ -27,7 +27,7 @@ class PreMeMIRLower : public MIRLower { : MIRLower(mod, f->GetMirFunc()), func(f), preMeFunc(f->GetPreMeFunc()) {} - virtual ~PreMeMIRLower() = default; + ~PreMeMIRLower() override = default; BlockNode *LowerWhileStmt(WhileStmtNode &whileStmt) override; BlockNode *LowerIfStmt(IfStmtNode &ifstmt, bool recursive = true) override; diff --git a/src/mapleall/maple_me/include/seqvec.h b/src/mapleall/maple_me/include/seqvec.h index dd9e9f5d35c3c13cc87dc69ed9077dfc181af152..04804980cf75d4ffee6844d919a8d323e82e0785 100644 --- a/src/mapleall/maple_me/include/seqvec.h +++ b/src/mapleall/maple_me/include/seqvec.h @@ -37,19 +37,19 @@ class SeqVectorize { void Perform(); void VisitNode(StmtNode *stmt); void CollectStores(IassignNode *iassign); - void DumpCandidates(const MeExpr *base, const StoreList *storelist) const; + void DumpCandidates(const MeExpr &base, const StoreList &storelist) const; void CheckAndTransform(); - bool IsOpExprConsecutiveMem(MeExpr *off1, MeExpr *off2, int32_t diff) const; + bool IsOpExprConsecutiveMem(const MeExpr &off1, const MeExpr &off2, int32_t diff) const; bool CanSeqVec(const IassignNode *s1, const IassignNode *s2, bool reverse = false); bool CanSeqVecRhs(MeExpr *rhs1, MeExpr *rhs2); - void LegalityCheckAndTransform(const StoreList *storelist); + void LegalityCheckAndTransform(const StoreList &storelist); bool HasVecType(PrimType sPrimType, uint8 lanes) const; MIRType* GenVecType(PrimType sPrimType, uint8 lanes) const; RegassignNode *GenDupScalarStmt(BaseNode *scalar, PrimType vecPrimType); - bool SameIntConstValue(const MeExpr *e1, const MeExpr *e2) const; - bool CanAdjustRhsType(PrimType targetType, const ConstvalNode *rhs) const; + bool SameIntConstValue(const MeExpr &e1, const MeExpr &e2) const; + bool CanAdjustRhsType(PrimType targetType, const ConstvalNode &rhs) const; void MergeIassigns(MapleVector &cands); - bool IsIvarExprConsecutiveMem(IvarMeExpr *ivar1, IvarMeExpr *ivar2, PrimType ptrType); + bool IsIvarExprConsecutiveMem(IvarMeExpr *ivar1, IvarMeExpr *ivar2, PrimType ptrType) const; static uint32_t seqVecStores; // iassignnode in same level block MemPool *localMP; diff --git a/src/mapleall/maple_me/include/ssa.h b/src/mapleall/maple_me/include/ssa.h index d50b9c8620cc1878d02739bc4067a718831130f1..e99e6946870e8b89b6a71901d0d649f9be1193ae 100644 --- a/src/mapleall/maple_me/include/ssa.h +++ b/src/mapleall/maple_me/include/ssa.h @@ -99,7 +99,7 @@ class SSA { virtual ~SSA() = default; virtual void InsertPhiNode(); - void RenameAllBBs(const MeCFG *cfg); + void RenameAllBBs(const MeCFG &cfg); void UpdateDom(Dominance *dm) { dom = dm; diff --git a/src/mapleall/maple_me/include/ssa_devirtual.h b/src/mapleall/maple_me/include/ssa_devirtual.h index 7cc296fb24f20efe2e786cce07954d0b2ad4098e..ea2020c643bcd414e2f2fc6d1a4d30985fc1569d 100644 --- a/src/mapleall/maple_me/include/ssa_devirtual.h +++ b/src/mapleall/maple_me/include/ssa_devirtual.h @@ -24,7 +24,7 @@ namespace maple { class SSADevirtual { public: static bool debug; - SSADevirtual(MemPool &memPool, MIRModule &currMod, IRMap &irMap, KlassHierarchy &currKh, + SSADevirtual(MemPool &memPool, const MIRModule &currMod, const IRMap &irMap, KlassHierarchy &currKh, Dominance &currDom, size_t bbVecSize, bool skipReturnTypeOpt) : devirtualAlloc(&memPool), mod(&currMod), @@ -42,7 +42,7 @@ class SSADevirtual { optedInterfaceCalls(0), nullCheckCount(0), skipReturnTypeOpt(skipReturnTypeOpt) {} - SSADevirtual(MemPool &memPool, MIRModule &currMod, IRMap &irMap, KlassHierarchy &currKh, + SSADevirtual(MemPool &memPool, const MIRModule &currMod, IRMap &irMap, KlassHierarchy &currKh, Dominance &currDom, size_t bbVecSize, Clone &currClone, bool skipReturnTypeOpt) : SSADevirtual(memPool, currMod, irMap, currKh, currDom, bbVecSize, skipReturnTypeOpt) { clone = &currClone; @@ -77,8 +77,8 @@ class SSADevirtual { private: MapleAllocator devirtualAlloc; - MIRModule *mod; - IRMap *irMap; + const MIRModule *mod; + const IRMap *irMap; KlassHierarchy *kh; Dominance *dom; MapleVector bbVisited; // needed because dominator tree is a DAG in wpo diff --git a/src/mapleall/maple_me/include/ssa_epre.h b/src/mapleall/maple_me/include/ssa_epre.h index 2101208feaab6b8844073b7e9e20fbe76baa5871..62f1e4b9d67ac2ed05b9a1c9f42cab1dd34a2b0e 100644 --- a/src/mapleall/maple_me/include/ssa_epre.h +++ b/src/mapleall/maple_me/include/ssa_epre.h @@ -35,7 +35,7 @@ class SSAEPre : public McSSAPre { void ComputeVarAndDfPhis() override; void BuildWorkListExpr(MeStmt &meStmt, int32 seqStmt, MeExpr &meExpr, bool isRebuild, MeExpr *tempVar, bool isRootExpr, bool insertSorted) override; - void BuildWorkListIvarLHSOcc(MeStmt &meStmt, int32 seqStmt, bool isReBuild, MeExpr *tempVar) override; + void BuildWorkListIvarLHSOcc(MeStmt &meStmt, int32 seqStmt, bool isRebuild, MeExpr *tempVar) override; void CollectVarForMeExpr(MeExpr &meExpr, std::vector &varVec) const override; void CollectVarForCand(MeRealOcc &realOcc, std::vector &varVec) const override; bool LeafIsVolatile(const MeExpr *x) const { @@ -64,9 +64,9 @@ class SSAEPre : public McSSAPre { return static_cast(ResolveAllInjuringDefs(static_cast(x))); } void SubstituteOpnd(MeExpr *x, MeExpr *oldopnd, MeExpr *newopnd) override; - bool OpndInDefOcc(const MeExpr *opnd, MeOccur *defocc, uint32 i) const; + bool OpndInDefOcc(const MeExpr &opnd, MeOccur &defocc, uint32 i) const; void SRSetNeedRepair(MeOccur *useocc, std::set *needRepairInjuringDefs) override; - MeExpr *InsertRepairStmt(MeExpr *temp, int64 increAmt, MeStmt *injuringDef) const; + MeExpr *InsertRepairStmt(MeExpr &temp, int64 increAmt, MeStmt &injuringDef) const; MeExpr *SRRepairOpndInjuries(MeExpr *curopnd, MeOccur *defocc, int32 i, MeExpr *tempAtDef, std::set *needRepairInjuringDefs, std::set *repairedInjuringDefs); @@ -77,7 +77,7 @@ class SSAEPre : public McSSAPre { bool epreIncludeRef; bool enableLHSIvar; // here starts methods related to linear function test replacement - ScalarMeExpr *FindScalarVersion(ScalarMeExpr *scalar, MeStmt *stmt) const; + ScalarMeExpr *FindScalarVersion(ScalarMeExpr &scalar, MeStmt *stmt) const; OpMeExpr *FormLFTRCompare(MeRealOcc *compOcc, MeExpr *regorvar) override; void CreateCompOcc(MeStmt *meStmt, int seqStmt, OpMeExpr *compare, bool isRebuilt) override; }; diff --git a/src/mapleall/maple_me/include/ssa_pre.h b/src/mapleall/maple_me/include/ssa_pre.h index 0004230e34a5174d8e215deb64af29e210958e7e..b06e3b74c9701780ec62bec377ba3c5bf08abd2c 100644 --- a/src/mapleall/maple_me/include/ssa_pre.h +++ b/src/mapleall/maple_me/include/ssa_pre.h @@ -163,7 +163,7 @@ class SSAPre { } return phiOcc->IsMCWillBeAvail(); } - bool OKToInsert(MePhiOpndOcc *phiOpnd); + bool OKToInsert(MePhiOpndOcc &phiOpnd) const; virtual void Finalize1(); void SetSave(MeOccur &defX); void SetReplacement(MePhiOcc &occ, MeOccur &repDef); diff --git a/src/mapleall/maple_me/include/ssa_tab.h b/src/mapleall/maple_me/include/ssa_tab.h index 78f10f563c7cad8fc0443fe9f578532ce150cc31..e54323c864d0c2f2f896dab6ed68ff9cf4a741c6 100644 --- a/src/mapleall/maple_me/include/ssa_tab.h +++ b/src/mapleall/maple_me/include/ssa_tab.h @@ -77,7 +77,7 @@ class SSATab : public AnalysisResult { // |-----offset------|---|---|-----| // prevLevOst tyIdx field OriginalSt *FindOrCreateExtraLevOst( - const VersionSt *ptrVst, const TyIdx tyIdx, FieldID field, const OffsetType &offset, + const VersionSt &ptrVst, const TyIdx tyIdx, FieldID field, const OffsetType &offset, bool isFieldArrayType = false) { auto *nextLevOst = originalStTable.FindOrCreateExtraLevOriginalSt(ptrVst, tyIdx, field, offset, isFieldArrayType); versionStTable.CreateZeroVersionSt(nextLevOst); diff --git a/src/mapleall/maple_me/include/type_based_alias_analysis.h b/src/mapleall/maple_me/include/type_based_alias_analysis.h index bb5d4b4190fffd8eb8603a413083db100a33d62e..cd9a72f74f4df45ebb53503b9c41799102ab8138 100644 --- a/src/mapleall/maple_me/include/type_based_alias_analysis.h +++ b/src/mapleall/maple_me/include/type_based_alias_analysis.h @@ -21,8 +21,8 @@ namespace maple { class TypeBasedAliasAnalysis { public: static bool MayAlias(const OriginalSt *ostA, const OriginalSt *ostB); - static bool FilterAliasElemOfRHSForIassign(const OriginalSt *aliasElemOst, const OriginalSt *lhsOst, - const OriginalSt *rhsOst); + static bool FilterAliasElemOfRHSForIassign(const OriginalSt &aliasElemOst, const OriginalSt &lhsOst, + const OriginalSt &rhsOst); static bool MayAliasTBAAForC(const OriginalSt *ostA, const OriginalSt *ostB); static void ClearOstTypeUnsafeInfo(); static std::vector &GetPtrTypeUnsafe() { @@ -55,7 +55,7 @@ class TypeBasedAliasAnalysis { } return ptrValueTypeUnsafe[prevLevVstIdx]; } - static bool IsFieldTypeOfAggType(MIRType *aggType, MIRType *checkedType); + static bool IsFieldTypeOfAggType(MIRType *aggType, MIRType &checkedType); private: static std::vector ptrValueTypeUnsafe; // index is OStIdx }; diff --git a/src/mapleall/maple_me/include/union_find.h b/src/mapleall/maple_me/include/union_find.h index 3e8db4f9ea20b5e46178c65da668bdf395793a7d..713473a37e97896abdf1162c556c2f9f33740163 100644 --- a/src/mapleall/maple_me/include/union_find.h +++ b/src/mapleall/maple_me/include/union_find.h @@ -38,7 +38,7 @@ class UnionFind { ~UnionFind() { // for the root id's, the sum of their size should be population size -#if DEBUG +#if defined(DEBUG) && DEBUG size_t sum = 0; for (size_t i = 0; i < num; ++i) if (rootIDs[i] == i) { diff --git a/src/mapleall/maple_me/include/vst_use_info.h b/src/mapleall/maple_me/include/vst_use_info.h index 254b9ff9016045be7627d65e9b5f4ee8bea4c436..46a78c520a017a2cee86ef6ccc2f8abd34ca7acb 100644 --- a/src/mapleall/maple_me/include/vst_use_info.h +++ b/src/mapleall/maple_me/include/vst_use_info.h @@ -108,7 +108,7 @@ class VstUseInfo final { return *useSites; } - VstUseSiteList *GetUseSitesOf(const VersionSt &vst) { + VstUseSiteList *GetUseSitesOf(const VersionSt &vst) const { return (*useSites)[vst.GetIndex()]; } diff --git a/src/mapleall/maple_me/src/alias_class.cpp b/src/mapleall/maple_me/src/alias_class.cpp index 9dd7bec1accc9b26d9c227a03b7616bb976e6d27..339fc3e72a21007e7b2bde169dc4795c83a176be 100644 --- a/src/mapleall/maple_me/src/alias_class.cpp +++ b/src/mapleall/maple_me/src/alias_class.cpp @@ -179,6 +179,9 @@ void AliasClass::RecordAliasAnalysisInfo(const VersionSt &vst) { } } + if (ost.GetPointerVstIdx() && IsAddrofVstNextLevNotAllDefSeen(ost.GetPointerVstIdx())) { + SetNextLevNotAllDefsSeen(vst.GetIndex()); + } if ((ost.IsFormal() && !IsRestrictPointer(&ost) && ost.GetIndirectLev() >= 0) || ost.GetIndirectLev() > 0) { SetNextLevNotAllDefsSeen(vst.GetIndex()); } @@ -252,7 +255,7 @@ static void UpdateFieldIdAndPtrType(const MIRType &baseType, FieldID baseFieldId return; } MIRType *baseMemType = static_cast(baseType).GetPointedType(); - if (baseMemType->GetKind() != kTypeStruct || !TypeBasedAliasAnalysis::IsFieldTypeOfAggType(baseMemType, memType)) { + if (baseMemType->GetKind() != kTypeStruct || !TypeBasedAliasAnalysis::IsFieldTypeOfAggType(baseMemType, *memType)) { return; } auto *structType = static_cast(baseMemType); @@ -298,7 +301,7 @@ VersionSt *AliasClass::FindOrCreateVstOfExtraLevOst( FieldID baseFieldId = aliasInfoOfBaseAddress.fieldID; UpdateFieldIdAndPtrType(*baseType, baseFieldId, offset, newTyIdx, fieldId); - auto *nextLevOst = ssaTab.FindOrCreateExtraLevOst(vstOfBaseAddress, newTyIdx, fieldId, offset, isNextLevelArrayType); + auto *nextLevOst = ssaTab.FindOrCreateExtraLevOst(*vstOfBaseAddress, newTyIdx, fieldId, offset, isNextLevelArrayType); ASSERT(nextLevOst != nullptr, "failed in creating next-level-ost"); auto *zeroVersionOfNextLevOst = ssaTab.GetVerSt(nextLevOst->GetZeroVersionIndex()); RecordAliasAnalysisInfo(*zeroVersionOfNextLevOst); @@ -468,13 +471,21 @@ AliasInfo AliasClass::CreateAliasInfoExpr(BaseNode &expr) { void AliasClass::SetNotAllDefsSeenForMustDefs(const StmtNode &callas) { MapleVector &mustDefs = ssaTab.GetStmtsSSAPart().GetMustDefNodesOf(callas); for (auto &mustDef : mustDefs) { - RecordAliasAnalysisInfo(*mustDef.GetResult()); - SetNextLevNotAllDefsSeen(mustDef.GetResult()->GetIndex()); + auto *vst = mustDef.GetResult(); + if (!vst) { + continue; + } + if (vst->GetOst()->GetType()->GetPrimType() == PTY_agg) { + auto &prevLevVst = FindOrCreateVstOfAddrofOSt(*vst->GetOst()); + SetAddrofVstNextLevNotAllDefsSeen(prevLevVst.GetIndex()); + } + RecordAliasAnalysisInfo(*vst); + SetNextLevNotAllDefsSeen(vst->GetIndex()); } } void AliasClass::ApplyUnionForElementsInCopiedArray() { - for (auto *ost : lhsWithUndefinedOffsets) { + for (auto *ost : ostWithUndefinedOffsets) { auto *prevLevOfLHSOst = ssaTab.GetVerSt(ost->GetPointerVstIdx()); if (!prevLevOfLHSOst) { continue; @@ -503,41 +514,45 @@ void AliasClass::ApplyUnionForFieldsInCopiedAgg() { preLevOfRHSOst = &FindOrCreateVstOfAddrofOSt(*rhsost); } - MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsost->GetTyIdx()); - MIRStructType *mirStructType = static_cast(mirType); + MIRStructType *mirStructType = static_cast(lhsost->GetType()); FieldID numFieldIDs = static_cast(mirStructType->NumberOfFieldIDs()); - auto tyIdxOfPrevLevOst = preLevOfLHSOst->GetOst()->GetTyIdx(); + auto tyIdxOfLhsPrevLevOst = lhsost->GetPrevLevelPointerType()->GetTypeIndex(); + auto tyIdxOfRhsPrevLevOst = rhsost->GetPrevLevelPointerType()->GetTypeIndex(); for (FieldID fieldID = 1; fieldID <= numFieldIDs; fieldID++) { MIRType *fieldType = mirStructType->GetFieldType(fieldID); if (!IsPotentialAddress(fieldType->GetPrimType())) { continue; } - OffsetType offset(mirStructType->GetBitOffsetFromBaseAddr(fieldID)); - auto fieldOstLHS = ssaTab.GetOriginalStTable().FindExtraLevOriginalSt( - preLevOfLHSOst, tyIdxOfPrevLevOst, fieldType, fieldID, offset); - auto fieldOstRHS = ssaTab.GetOriginalStTable().FindExtraLevOriginalSt( - preLevOfRHSOst, tyIdxOfPrevLevOst, fieldType, fieldID, offset); + auto offset = OffsetType(mirStructType->GetBitOffsetFromBaseAddr(fieldID)); + auto lhsFieldOffset = offset + lhsost->GetOffset(); + auto rhsFieldOffset = offset + rhsost->GetOffset(); + auto lhsFieldID = fieldID + lhsost->GetFieldID(); + auto rhsFieldID = fieldID + rhsost->GetFieldID(); + auto *fieldOstLHS = ssaTab.GetOriginalStTable().FindExtraLevOriginalSt( + preLevOfLHSOst, tyIdxOfLhsPrevLevOst, fieldType, lhsFieldID, lhsFieldOffset); + auto *fieldOstRHS = ssaTab.GetOriginalStTable().FindExtraLevOriginalSt( + preLevOfRHSOst, tyIdxOfRhsPrevLevOst, fieldType, rhsFieldID, rhsFieldOffset); + if (fieldOstLHS == nullptr && fieldOstRHS == nullptr) { continue; } if (fieldOstLHS == nullptr) { - auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(lhsost->GetTyIdx()); fieldOstLHS = ssaTab.GetOriginalStTable().FindOrCreateExtraLevOriginalSt( - preLevOfLHSOst, ptrType->GetTypeIndex(), fieldID, offset); + *preLevOfLHSOst, tyIdxOfLhsPrevLevOst, lhsFieldID, lhsFieldOffset); } if (fieldOstRHS == nullptr) { - auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(rhsost->GetTyIdx()); fieldOstRHS = ssaTab.GetOriginalStTable().FindOrCreateExtraLevOriginalSt( - preLevOfRHSOst, ptrType->GetTypeIndex(), fieldID, offset); + *preLevOfRHSOst, tyIdxOfRhsPrevLevOst, rhsFieldID, rhsFieldOffset); } - auto *zeroVersionStOfFieldOstLHS = ssaTab.GetVersionStTable().GetOrCreateZeroVersionSt(*fieldOstLHS); - RecordAliasAnalysisInfo(*zeroVersionStOfFieldOstLHS); - auto *zeroVersionStOfFieldOstRHS = ssaTab.GetVersionStTable().GetOrCreateZeroVersionSt(*fieldOstRHS); RecordAliasAnalysisInfo(*zeroVersionStOfFieldOstRHS); - + RecordAliasAnalysisInfo(*zeroVersionStOfFieldOstLHS); + if (IsNextLevNotAllDefsSeen(fieldOstLHS->GetZeroVersionIndex())) { + ASSERT_NOT_NULL(fieldOstRHS); + SetNextLevNotAllDefsSeen(fieldOstRHS->GetZeroVersionIndex()); + } CHECK_FATAL(fieldOstLHS, "fieldOstLHS is nullptr!"); CHECK_FATAL(fieldOstRHS, "fieldOstRHS is nullptr!"); unionFind.Union(fieldOstLHS->GetZeroVersionIndex(), fieldOstRHS->GetZeroVersionIndex()); @@ -592,6 +607,7 @@ void AliasClass::ApplyUnionForDassignCopy(VersionSt &lhsVst, VersionSt *rhsVst, return; } + auto *lhsOst = lhsVst.GetOst(); auto *rhsOst = rhsVst->GetOst(); if (rhsOst->GetIndirectLev() < 0) { for (auto *ost : *ssaTab.GetOriginalStTable().GetNextLevelOstsOfVst(rhsVst)) { @@ -599,23 +615,28 @@ void AliasClass::ApplyUnionForDassignCopy(VersionSt &lhsVst, VersionSt *rhsVst, } } - if (mirModule.IsCModule()) { - auto *lhsOst = lhsVst.GetOst(); + auto collectAggsForLaterUnion = [lhsOst, rhsOst, this] () { + if (!mirModule.IsCModule()) { + return; + } + // ost with invlid offset should be union with all other osts with same prev level + if (lhsOst->GetOffset().IsInvalid()) { + ostWithUndefinedOffsets.push_back(lhsOst); + } + if (rhsOst->GetOffset().IsInvalid()) { + ostWithUndefinedOffsets.push_back(rhsOst); + } + // collect osts assigned with struct/union to prop their field properties TyIdx lhsTyIdx = lhsOst->GetTyIdx(); TyIdx rhsTyIdx = rhsOst->GetTyIdx(); MIRType *rhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsTyIdx); - if (lhsTyIdx == rhsTyIdx && - (rhsType->GetKind() == kTypeStruct || rhsType->GetKind() == kTypeUnion)) { - if (lhsOst->GetIndex() < rhsOst->GetIndex()) { - aggsToUnion[lhsOst] = rhsOst; - } else { - aggsToUnion[rhsOst] = lhsOst; - } - } - if (lhsOst->GetOffset().IsInvalid()) { - lhsWithUndefinedOffsets.push_back(lhsOst); + auto rhsTypeKind = rhsType->GetKind(); + if (lhsTyIdx != rhsTyIdx || (rhsTypeKind != kTypeStruct && rhsTypeKind != kTypeUnion)) { + return; } - } + aggsToUnion.insert(std::make_pair(lhsOst, rhsOst)); + }; + collectAggsForLaterUnion(); if (rhsOst->GetIndirectLev() > 0 || IsNotAllDefsSeen(rhsOst->GetIndex())) { SetNextLevNotAllDefsSeen(lhsVst.GetIndex()); @@ -623,8 +644,7 @@ void AliasClass::ApplyUnionForDassignCopy(VersionSt &lhsVst, VersionSt *rhsVst, } PrimType rhsPtyp = rhsOst->GetType()->GetPrimType(); if (!(IsPrimitiveInteger(rhsPtyp) && GetPrimTypeSize(rhsPtyp) == GetPrimTypeSize(PTY_ptr)) || - kOpcodeInfo.NotPure(rhs.GetOpCode()) || - HasMallocOpnd(&rhs) || + kOpcodeInfo.NotPure(rhs.GetOpCode()) || HasMallocOpnd(&rhs) || (rhs.GetOpCode() == OP_addrof && IsReadOnlyOst(*rhsOst))) { return; } @@ -669,7 +689,11 @@ void AliasClass::SetPtrOpndsNextLevNADS(unsigned int start, unsigned int end, } } -void AliasClass::SetAggPtrFieldsNextLevNADS(const OriginalSt &ost) { +void AliasClass::SetAggPtrFieldsNextLevNADS(const VersionSt &vst) { + auto &ost = *(vst.GetOst()); + if (IsNextLevNotAllDefsSeen(vst.GetIndex()) && ost.GetIndirectLev() > 0) { + return; + } MIRTypeKind typeKind = ost.GetType()->GetKind(); if (typeKind == kTypeStruct || typeKind == kTypeUnion || typeKind == kTypeStructIncomplete) { auto *structType = static_cast(ost.GetType()); @@ -727,12 +751,10 @@ void AliasClass::SetAggOpndPtrFieldsNextLevNADS(MapleVector &opnds) { continue; } AliasInfo aInfo = CreateAliasInfoExpr(*opnd); - if (aInfo.vst == nullptr || - (IsNextLevNotAllDefsSeen(aInfo.vst->GetIndex()) && - aInfo.vst->GetOst()->GetIndirectLev() > 0)) { + if (!aInfo.vst) { continue; } - SetAggPtrFieldsNextLevNADS(*aInfo.vst->GetOst()); + SetAggPtrFieldsNextLevNADS(*aInfo.vst); } } @@ -740,12 +762,10 @@ void AliasClass::SetPtrFieldsOfAggNextLevNADS(const BaseNode *opnd, const Versio if (opnd->GetPrimType() != PTY_agg) { return; } - if (vst == nullptr || - (IsNextLevNotAllDefsSeen(vst->GetIndex()) && - vst->GetOst()->GetIndirectLev() > 0)) { + if (!vst) { return; } - SetAggPtrFieldsNextLevNADS(*vst->GetOst()); + SetAggPtrFieldsNextLevNADS(*vst); } // Iteratively propagate type unsafe info to next level. @@ -838,7 +858,7 @@ void AliasClass::PropagateTypeUnsafe() { } // type may be potential address type like u64/ptr etc. -bool AliasClass::IsAddrTypeConsistent(MIRType *typeA, MIRType *typeB) const { +bool AliasClass::IsAddrTypeConsistent(const MIRType *typeA, const MIRType *typeB) const { if (typeA == nullptr || typeB == nullptr) { return false; } @@ -849,8 +869,8 @@ bool AliasClass::IsAddrTypeConsistent(MIRType *typeA, MIRType *typeB) const { return false; } // <* [N] elemType> and <* elemType> are consistent - MIRType *pointedTypeA = static_cast(typeA)->GetPointedType(); - MIRType *pointedTypeB = static_cast(typeB)->GetPointedType(); + MIRType *pointedTypeA = static_cast(typeA)->GetPointedType(); + MIRType *pointedTypeB = static_cast(typeB)->GetPointedType(); if (pointedTypeA->IsMIRArrayType()) { pointedTypeA = static_cast(pointedTypeA)->GetElemType(); } @@ -974,38 +994,127 @@ void AliasClass::ApplyUnionForIntrinsicCall(const IntrinsiccallNode &intrinsicCa } } +void AliasClass::ApplyUnionForDirectAssign(const StmtNode &stmt) { + // RHS + ASSERT_NOT_NULL(stmt.Opnd(0)); + AliasInfo rhsAinfo = CreateAliasInfoExpr(*stmt.Opnd(0)); + // LHS + auto *lhsVst = ssaTab.GetStmtsSSAPart().GetAssignedVarOf(stmt); + OriginalSt *lhsOst = lhsVst->GetOst(); + if (lhsOst->GetFieldID() != 0) { + (void)FindOrCreateVstOfAddrofOSt(*lhsOst); + } + RecordAliasAnalysisInfo(*lhsVst); + ApplyUnionForDassignCopy(*lhsVst, rhsAinfo.vst, *stmt.Opnd(0)); + SetTypeUnsafeForAddrofUnion(rhsAinfo.vst); +} + +void AliasClass::ApplyUnionForIndirectAssign(const StmtNode &stmt) { + auto &iassignNode = static_cast(stmt); + AliasInfo rhsAinfo = CreateAliasInfoExpr(*iassignNode.Opnd(1)); + bool isNextLevelArrayType = iassignNode.GetLHSType()->GetKind() == kTypeArray; + auto *lhsVst = FindOrCreateVstOfExtraLevOst( + *iassignNode.Opnd(0), iassignNode.GetTyIdx(), iassignNode.GetFieldID(), false, isNextLevelArrayType); + if (lhsVst != nullptr) { + ApplyUnionForDassignCopy(*lhsVst, rhsAinfo.vst, *iassignNode.Opnd(1)); + } + SetTypeUnsafeForAddrofUnion(rhsAinfo.vst); + if (iassignNode.IsExpandedFromArrayOfCharFunc()) { + TypeBasedAliasAnalysis::SetVstValueTypeUnsafe(lhsVst->GetOst()->GetPointerVstIdx()); + } +} + +void AliasClass::ApplyUnionForCommonDirectCalls(StmtNode &stmt) { + const FuncDesc &desc = GetFuncDescFromCallStmt(static_cast(stmt)); + bool hasnoprivatedefeffect = CallHasNoPrivateDefEffect(&stmt); + for (uint32 i = 0; i < stmt.NumOpnds(); ++i) { + const AliasInfo &ainfo = CreateAliasInfoExpr(*stmt.Opnd(i)); + // no need to solve args that are not used or readSelfOnly. + if (desc.IsArgUnused(i)) { + continue; + } + if (desc.IsReturnNoAlias() && desc.IsArgReadSelfOnly(i)) { + continue; + } + if (desc.IsReturnNoAlias() && desc.IsArgReadMemoryOnly(i) && + ainfo.vst != nullptr && ssaTab.GetNextLevelOsts(*ainfo.vst) != nullptr) { + // Arg reads memory, we should set mayUse(*arg) here. + // If it has next level, memory alias of its nextLev will be inserted to MayUse later. + // If it has no next level, no elements will be inserted thru this arg. + continue; + } + SetPtrOpndNextLevNADS(*stmt.Opnd(i), ainfo.vst, hasnoprivatedefeffect); + SetPtrFieldsOfAggNextLevNADS(stmt.Opnd(i), ainfo.vst); + if (!desc.NoDirectGlobleAccess()) { + continue; + } + auto *vst = ainfo.vst; + if (!vst) { + continue; + } + auto *mirType = vst->GetOst()->GetType(); + if (mirType->IsMIRPtrType()) { + (void)FindOrCreateVstOfExtraLevOst(*stmt.Opnd(i), mirType->GetTypeIndex(), 0, true, false); + } + } +} + +void AliasClass::ApplyUnionForJavaSpecialCalls(StmtNode &stmt) { + const FuncDesc &desc = GetFuncDescFromCallStmt(static_cast(stmt)); + bool hasnoprivatedefeffect = CallHasNoPrivateDefEffect(&stmt); + for (uint32 i = 0; i < stmt.NumOpnds(); ++i) { + const AliasInfo &ainfo = CreateAliasInfoExpr(*stmt.Opnd(i)); + if (ainfo.vst == nullptr) { + continue; + } + if (i == 0) { + continue; + } + // no need to solve args that are not used. + if (desc.IsArgUnused(i)) { + continue; + } + if (hasnoprivatedefeffect && ainfo.vst->GetOst()->IsPrivate()) { + continue; + } + if (!IsPotentialAddress(stmt.Opnd(i)->GetPrimType())) { + continue; + } + if (stmt.Opnd(i)->GetOpCode() == OP_addrof && IsReadOnlyOst(*ainfo.vst->GetOst())) { + continue; + } + SetNextLevNotAllDefsSeen(ainfo.vst->GetIndex()); + } +} + +void AliasClass::ApplyUnionForCallAssigned(const StmtNode &stmt) { + if (!kOpcodeInfo.IsCallAssigned(stmt.GetOpCode())) { + return; + } + if (stmt.GetOpCode() == OP_callassigned) { + auto &callStmt = static_cast(stmt); + auto *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt.GetPUIdx()); + if (mirFunc != nullptr && mirFunc->GetFuncDesc().IsReturnNoAlias()) { + MapleVector &mustDefs = ssaTab.GetStmtsSSAPart().GetMustDefNodesOf(callStmt); + for (auto &mustDef : mustDefs) { + RecordAliasAnalysisInfo(*mustDef.GetResult()); + } + return; + } + } + SetNotAllDefsSeenForMustDefs(stmt); +} + void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { switch (stmt.GetOpCode()) { case OP_maydassign: case OP_dassign: case OP_regassign: { - // RHS - ASSERT_NOT_NULL(stmt.Opnd(0)); - AliasInfo rhsAinfo = CreateAliasInfoExpr(*stmt.Opnd(0)); - // LHS - auto *lhsVst = ssaTab.GetStmtsSSAPart().GetAssignedVarOf(stmt); - OriginalSt *lhsOst = lhsVst->GetOst(); - if (lhsOst->GetFieldID() != 0) { - (void)FindOrCreateVstOfAddrofOSt(*lhsOst); - } - RecordAliasAnalysisInfo(*lhsVst); - ApplyUnionForDassignCopy(*lhsVst, rhsAinfo.vst, *stmt.Opnd(0)); - SetTypeUnsafeForAddrofUnion(rhsAinfo.vst); + ApplyUnionForDirectAssign(stmt); return; } case OP_iassign: { - auto &iassignNode = static_cast(stmt); - AliasInfo rhsAinfo = CreateAliasInfoExpr(*iassignNode.Opnd(1)); - bool isNextLevelArrayType = iassignNode.GetLHSType()->GetKind() == kTypeArray; - auto *lhsVst = FindOrCreateVstOfExtraLevOst( - *iassignNode.Opnd(0), iassignNode.GetTyIdx(), iassignNode.GetFieldID(), false, isNextLevelArrayType); - if (lhsVst != nullptr) { - ApplyUnionForDassignCopy(*lhsVst, rhsAinfo.vst, *iassignNode.Opnd(1)); - } - SetTypeUnsafeForAddrofUnion(rhsAinfo.vst); - if (iassignNode.IsExpandedFromArrayOfCharFunc()) { - TypeBasedAliasAnalysis::SetVstValueTypeUnsafe(lhsVst->GetOst()->GetPointerVstIdx()); - } + ApplyUnionForIndirectAssign(stmt); return; } case OP_throw: { @@ -1015,27 +1124,7 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { } case OP_call: case OP_callassigned: { - const FuncDesc &desc = GetFuncDescFromCallStmt(static_cast(stmt)); - bool hasnoprivatedefeffect = CallHasNoPrivateDefEffect(&stmt); - for (uint32 i = 0; i < stmt.NumOpnds(); ++i) { - const AliasInfo &ainfo = CreateAliasInfoExpr(*stmt.Opnd(i)); - // no need to solve args that are not used or readSelfOnly. - if (desc.IsArgUnused(i)) { - continue; - } - if (desc.IsReturnNoAlias() && desc.IsArgReadSelfOnly(i)) { - continue; - } - if (desc.IsReturnNoAlias() && desc.IsArgReadMemoryOnly(i) && - ainfo.vst != nullptr && ssaTab.GetNextLevelOsts(*ainfo.vst) != nullptr) { - // Arg reads memory, we should set mayUse(*arg) here. - // If it has next level, memory alias of its nextLev will be inserted to MayUse later. - // If it has no next level, no elements will be inserted thru this arg. - continue; - } - SetPtrOpndNextLevNADS(*stmt.Opnd(i), ainfo.vst, hasnoprivatedefeffect); - SetPtrFieldsOfAggNextLevNADS(stmt.Opnd(i), ainfo.vst); - } + ApplyUnionForCommonDirectCalls(stmt); break; } case OP_virtualcall: @@ -1052,31 +1141,7 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { case OP_interfaceicallassigned: case OP_customcallassigned: case OP_polymorphiccallassigned: { - const FuncDesc &desc = GetFuncDescFromCallStmt(static_cast(stmt)); - bool hasnoprivatedefeffect = CallHasNoPrivateDefEffect(&stmt); - for (uint32 i = 0; i < stmt.NumOpnds(); ++i) { - const AliasInfo &ainfo = CreateAliasInfoExpr(*stmt.Opnd(i)); - if (ainfo.vst == nullptr) { - continue; - } - if (i == 0) { - continue; - } - // no need to solve args that are not used. - if (desc.IsArgUnused(i)) { - continue; - } - if (hasnoprivatedefeffect && ainfo.vst->GetOst()->IsPrivate()) { - continue; - } - if (!IsPotentialAddress(stmt.Opnd(i)->GetPrimType())) { - continue; - } - if (stmt.Opnd(i)->GetOpCode() == OP_addrof && IsReadOnlyOst(*ainfo.vst->GetOst())) { - continue; - } - SetNextLevNotAllDefsSeen(ainfo.vst->GetIndex()); - } + ApplyUnionForJavaSpecialCalls(stmt); break; } case OP_asm: @@ -1106,20 +1171,7 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { break; } } - if (kOpcodeInfo.IsCallAssigned(stmt.GetOpCode())) { - if (stmt.GetOpCode() == OP_callassigned) { - auto &callStmt = static_cast(stmt); - auto *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt.GetPUIdx()); - if (mirFunc != nullptr && mirFunc->GetFuncDesc().IsReturnNoAlias()) { - MapleVector &mustDefs = ssaTab.GetStmtsSSAPart().GetMustDefNodesOf(callStmt); - for (auto &mustDef : mustDefs) { - RecordAliasAnalysisInfo(*mustDef.GetResult()); - } - return; - } - } - SetNotAllDefsSeenForMustDefs(stmt); - } + ApplyUnionForCallAssigned(stmt); } void AliasClass::ApplyUnionForPhi(const PhiNode &phi) { @@ -2246,7 +2298,7 @@ void AliasClass::CollectMayDefForIassign(StmtNode &stmt, OstPtrSet &mayDefOsts) if (!MayAliasBasicAA(ostOfLhs, aliasedOst)) { continue; } - if (TypeBasedAliasAnalysis::FilterAliasElemOfRHSForIassign(aliasedOst, ostOfLhs, rhsOst)) { + if (TypeBasedAliasAnalysis::FilterAliasElemOfRHSForIassign(*aliasedOst, *ostOfLhs, *rhsOst)) { continue; } } @@ -2325,7 +2377,7 @@ void AliasClass::InsertMayDefUseSyncOps(StmtNode &stmt, BBId bbid) { AccessSSANodes *theSSAPart = ssaTab.GetStmtsSSAPart().SSAPartOf(stmt); for (uint32 ostIdx : aliasSet) { OriginalSt *aliasOst = ssaTab.GetOriginalStFromID(OStIdx(ostIdx)); - if (!aliasOst->IsFinal()) { + if (aliasOst && !aliasOst->IsFinal()) { VersionSt *vst0 = ssaTab.GetVerSt(aliasOst->GetZeroVersionIndex()); CHECK_FATAL(theSSAPart, "theSSAPart is nullptr!"); theSSAPart->InsertMayUseNode(MayUseNode(vst0)); @@ -2459,7 +2511,7 @@ void AliasClass::CollectMayUseForIntrnCallOpnd(const StmtNode &stmt, bool writeOpnd = intrinDesc->WriteNthOpnd(opndId); if (mayDefUseOsts.size() == 0 && writeOpnd) { // create next-level ost as it not seen before - auto nextLevOst = ssaTab.FindOrCreateExtraLevOst(vst, vst->GetOst()->GetTyIdx(), 0, OffsetType(0)); + auto nextLevOst = ssaTab.FindOrCreateExtraLevOst(*vst, vst->GetOst()->GetTyIdx(), 0, OffsetType(0)); CHECK_FATAL(nextLevOst != nullptr, "Failed to create next-level ost"); auto *zeroVersionOfNextLevOst = ssaTab.GetVerSt(nextLevOst->GetZeroVersionIndex()); RecordAliasAnalysisInfo(*zeroVersionOfNextLevOst); @@ -2550,18 +2602,22 @@ void AliasClass::InsertMayDefUseCall(StmtNode &stmt, BBId bbid, bool isDirectCal const FuncDesc *desc = nullptr; if (isDirectCall) { desc = &GetFuncDescFromCallStmt(static_cast(stmt)); - hasSideEffect = !desc->IsPure() && !desc->IsConst(); + hasSideEffect = desc->GetFuncInfo() < FI::kNoDirectGlobleAccess; } auto *ssaPart = ssaTab.GetStmtsSSAPart().SSAPartOf(stmt); OstPtrSet mayDefOstsA; OstPtrSet mayUseOstsA; OstPtrSet mustNotDefOsts; OstPtrSet mustNotUseOsts; + bool mayUseNads = desc == nullptr || (!desc->NoDirectGlobleAccess() && !desc->IsConst()); + bool mayDefNads = hasSideEffect; // 1. collect mayDefs and mayUses caused by callee-opnds CollectMayDefUseForCallOpnd(stmt, mayDefOstsA, mayUseOstsA, mustNotDefOsts, mustNotUseOsts); // 2. collect mayDefs and mayUses caused by not_all_def_seen_ae - OstPtrSetSub(nadsOsts, mustNotUseOsts, mayUseOstsA); - if (hasSideEffect) { + if (mayUseNads) { + OstPtrSetSub(nadsOsts, mustNotUseOsts, mayUseOstsA); + } + if (mayDefNads) { OstPtrSetSub(nadsOsts, mustNotDefOsts, mayDefOstsA); } // insert mayuse node caused by opnd and not_all_def_seen_ae. @@ -2572,11 +2628,11 @@ void AliasClass::InsertMayDefUseCall(StmtNode &stmt, BBId bbid, bool isDirectCal // 3. insert mayDefs and mayUses caused by globalsAffectedByCalls OstPtrSet mayDefUseOfGOsts; CollectMayUseFromGlobalsAffectedByCalls(mayDefUseOfGOsts); - if (desc == nullptr || !desc->IsConst()) { + if (mayUseNads) { InsertMayUseNode(mayDefUseOfGOsts, ssaPart); } // insert may def node, if the callee has side-effect. - if (hasSideEffect) { + if (mayDefNads) { InsertMayDefNodeExcludeFinalOst(mayDefUseOfGOsts, ssaPart, stmt, bbid); } if (kOpcodeInfo.IsCallAssigned(stmt.GetOpCode())) { diff --git a/src/mapleall/maple_me/src/bb.cpp b/src/mapleall/maple_me/src/bb.cpp index 6b89b9445fe760b3584aa60137498e7b714d9e4e..4e496561c2d5dda9caf34de3d657855ea341e398 100644 --- a/src/mapleall/maple_me/src/bb.cpp +++ b/src/mapleall/maple_me/src/bb.cpp @@ -17,6 +17,7 @@ #include "me_ssa.h" #include "mempool_allocator.h" #include "ver_symbol.h" +#include "mir_lower.h" namespace maple { std::string BB::StrAttribute() const { @@ -135,7 +136,7 @@ bool BB::InsertPhi(MapleAllocator *alloc, VersionSt *versionSt) { auto status = phiList.emplace(std::make_pair(versionSt->GetOst()->GetIndex(), phiNode)); if (status.second) { status.first->second.GetPhiOpnds().resize(pred.size()); - for (int idx = 0; idx < pred.size(); ++idx) { + for (size_t idx = 0; idx < pred.size(); ++idx) { status.first->second.SetPhiOpnd(idx, *versionSt); } } @@ -345,6 +346,59 @@ void BB::MoveAllSuccToPred(BB *newPred, BB *commonExit) { } } +bool BB::IsImmediateUnlikelyBB() const { + if (pred.size() != 1 || pred[0]->GetKind() != kBBCondGoto) { + return false; + } + auto *condBB = pred[0]; + auto *unlikelySucc = condBB->GetUnlikelySuccOfCondBB(); + return unlikelySucc == this; +} + +bool BB::IsImmediateLikelyBB() const { + if (pred.size() != 1 || pred[0]->GetKind() != kBBCondGoto) { + return false; + } + auto *condBB = pred[0]; + auto *likelySucc = condBB->GetLikelySuccOfCondBB(); + return likelySucc == this; +} + +BB *BB::GetUnlikelySuccOfCondBB() { + CHECK_FATAL(kind == kBBCondGoto, "expect a condBB"); + // The probability of jumping to targetBB + int32 branchProb = -1; + if (!meStmtList.empty()) { // for MEIR BB + auto *condMeStmt = static_cast(GetLastMe()); + branchProb = condMeStmt->GetBranchProb(); + } else if (!stmtNodeList.empty()) { // for MapleIR BB + auto &condStmt = static_cast(GetLast()); + branchProb = condStmt.GetBranchProb(); + } else { + CHECK_FATAL_FALSE("a condBB should be never empty"); + } + auto *fallthruBB = GetSucc(0); // The first successor of condBB is always the fallthrough BB + auto *targetBB = GetSucc(1); // The second successor of condBB is always the target BB + if (branchProb == kProbUnlikely) { + return targetBB; + } + if (branchProb == kProbLikely) { + return fallthruBB; + } + return nullptr; +} + +BB *BB::GetLikelySuccOfCondBB() { + auto *unlikelySucc = GetUnlikelySuccOfCondBB(); + if (unlikelySucc == nullptr) { + return nullptr; + } + auto *fallthruBB = GetSucc(0); // The first successor of condBB is always the fallthrough BB + auto *targetBB = GetSucc(1); // The second successor of condBB is always the target BB + auto *likelySucc = (unlikelySucc == targetBB) ? fallthruBB : targetBB; + return likelySucc; +} + void BB::FindReachableBBs(std::vector &visitedBBs) const { CHECK_FATAL(GetBBId() < visitedBBs.size(), "out of range in BB::FindReachableBBs"); if (visitedBBs[GetBBId()]) { @@ -420,6 +474,7 @@ void BB::InsertMeStmtBefore(const MeStmt *meStmt, MeStmt *inStmt) { } void BB::InsertMeStmtAfter(const MeStmt *meStmt, MeStmt *inStmt) { + CHECK_FATAL(inStmt != nullptr, "null ptr check"); meStmtList.insertAfter(meStmt, inStmt); inStmt->SetBB(this); } @@ -471,8 +526,11 @@ void BB::DumpMePhiList(const IRMap *irMap) { for (const auto &phi : mePhiList) { phi.second->Dump(irMap); int dumpVsyNum = DumpOptions::GetDumpVsyNum(); - if (dumpVsyNum > 0 && ++count >= dumpVsyNum) { - break; + if (dumpVsyNum > 0) { + ++count; + if (count >= dumpVsyNum) { + break; + } } ASSERT(count >= 0, "mePhiList too large"); } @@ -494,7 +552,9 @@ void BB::UpdateEdgeFreqs(bool updateBBFreqOfSucc) { } for (size_t i = 0; i < len; ++i) { FreqType sfreq = GetSuccFreq()[i]; - FreqType scalefreq = (succFreqs == 0 ? (frequency / len) : (sfreq * frequency / succFreqs)); + ASSERT(frequency != -1, "frequency != -1"); + FreqType scalefreq = (succFreqs == 0 ? + static_cast(static_cast(frequency) / len) : (sfreq * frequency / succFreqs)); SetSuccFreq(static_cast(i), scalefreq); // update succ frequency with new difference if needed if (updateBBFreqOfSucc) { diff --git a/src/mapleall/maple_me/src/cast_opt.cpp b/src/mapleall/maple_me/src/cast_opt.cpp index 78594e20c7166b8cdc564825d9b6a2ea2dc74e50..b191d6520c8eb24f223f45fe75ce7c89d853b000 100644 --- a/src/mapleall/maple_me/src/cast_opt.cpp +++ b/src/mapleall/maple_me/src/cast_opt.cpp @@ -16,6 +16,7 @@ #include "irmap.h" #include "mir_builder.h" #include "constantfold.h" +#include "mir_type.h" namespace maple { // For controlling cast elimination @@ -400,6 +401,7 @@ MeExpr *MeCastOpt::SimplifyCastSingle(IRMap &irMap, const MeExprCastInfo &castIn } if (varExpr->GetDefBy() == kDefByStmt && !varExpr->IsVolatile()) { MeStmt *defStmt = varExpr->GetDefByMeStmt(); + ASSERT_NOT_NULL(defStmt); if (defStmt->GetOp() == OP_dassign && IsCompareOp(static_cast(defStmt)->GetRHS()->GetOp())) { // zext/sext + dread non-u1 %var (%var is defined by compare op) ==> dread non-u1 %var return opnd; @@ -441,13 +443,17 @@ MeExpr *MeCastOpt::SimplifyCastPair(IRMap &irMap, const MeExprCastInfo &firstCas // To improved: do more powerful optimization for firstCastImplicit bool isFirstCastImplicit = !IsExplicitCastOp(firstCastExpr->GetOp()); if (isFirstCastImplicit) { - // Wrong example: zext u32 u8 (iread u32 <* u16>) =[x]=> iread u32 <* u16> + // Wrong examples: + // zext u32 u8 (iread u32 <* u16>) =[x]=> iread u32 <* u16> // srcType may be modified, we should use origSrcType - if (resultCastKind != CAST_unknown && dstType == midType1 && - GetPrimTypeActualBitSize(midType2) >= GetPrimTypeActualBitSize(origSrcType)) { - return firstCastExpr; - } else { + const auto outerFromTypeLowerThanInner = GetPrimTypeActualBitSize(midType2) < GetPrimTypeActualBitSize(origSrcType); + // sext u32 i8 (iread u32 <* u8>) =[x]=> iread u32 <* u8> + const auto extsWithSignDiffer = firstCastInfo.IsExtension() && secondCastInfo.IsExtension() && + IsPrimitiveUnsigned(midType2) != IsPrimitiveUnsigned(origSrcType); + if (resultCastKind == CAST_unknown || dstType != midType1 || outerFromTypeLowerThanInner || extsWithSignDiffer) { return nullptr; + } else { + return firstCastExpr; } } diff --git a/src/mapleall/maple_me/src/copy_prop.cpp b/src/mapleall/maple_me/src/copy_prop.cpp index 706ec31a5f2d2936479d9eed233f956886aed980..5f9934c28f9c3f4bab7977d4dace05271740f404 100644 --- a/src/mapleall/maple_me/src/copy_prop.cpp +++ b/src/mapleall/maple_me/src/copy_prop.cpp @@ -14,6 +14,7 @@ */ #include "copy_prop.h" #include "me_cfg.h" +#include "me_hdse.h" namespace maple { static constexpr uint kMaxDepth = 5; @@ -22,59 +23,6 @@ static bool PropagatableByCopyProp(const MeExpr *newExpr) { return newExpr->GetMeOp() == kMeOpReg || newExpr->GetMeOp() == kMeOpConst; } -static bool PropagatableBaseOfIvar(const IvarMeExpr *ivar, const MeExpr *newExpr) { - if (PropagatableByCopyProp(newExpr)) { - return true; - } - if ((ivar->GetFieldID() != 0 && ivar->GetFieldID() != 1) || ivar->GetOffset() != 0) { - return false; - } - -#if TARGX86_64 || TARGX86 || TARGVM || TARGARM32 - return false; -#endif - - if (newExpr->GetOp() == OP_add) { - auto opndA = newExpr->GetOpnd(0); - if (!PropagatableByCopyProp(opndA)) { - return false; - } - - auto opndB = newExpr->GetOpnd(1); - if (PropagatableByCopyProp(opndB)) { - return true; - } - - if (opndB->GetOp() == OP_cvt || opndB->GetOp() == OP_retype) { - if (PropagatableByCopyProp(opndB->GetOpnd(0))) { - return true; - } - } else if (opndB->GetOp() == OP_mul) { - auto opndC = opndB->GetOpnd(0); - if (!PropagatableByCopyProp(opndC)) { - return false; - } - - auto opndD = opndB->GetOpnd(1); - if (opndD->GetMeOp() != kMeOpConst) { - return false; - } - auto constVal = static_cast(opndD)->GetConstVal(); - if (constVal->GetKind() != kConstInt) { - return false; - } - int64 val = static_cast(constVal)->GetExtValue(); - if (val == 0 || val == GetPrimTypeSize(PTY_ptr)) { - return true; - } - } - } - if (newExpr->GetOp() == OP_iaddrof) { - return PropagatableByCopyProp(newExpr->GetOpnd(0)); - } - return false; -} - static bool PropagatableOpndOfOperator(const MeExpr *meExpr, Opcode op, size_t opndId) { if (PropagatableByCopyProp(meExpr)) { return true; @@ -303,8 +251,7 @@ MeExpr &CopyProp::PropMeExpr(MeExpr &meExpr, bool &isproped, bool atParm) { return meExpr; } - if (!(base->GetMeOp() == kMeOpVar || base->GetMeOp() == kMeOpReg) || - PropagatableBaseOfIvar(ivarMeExpr, propedExpr)) { + if (!(base->GetMeOp() == kMeOpVar || base->GetMeOp() == kMeOpReg) || PropagatableByCopyProp(propedExpr)) { isproped = true; IvarMeExpr newMeExpr(&irMap.GetIRMapAlloc(), -1, *ivarMeExpr); newMeExpr.SetBase(propedExpr); @@ -484,6 +431,7 @@ void MECopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { aDep.AddRequired(); aDep.AddRequired(); aDep.AddRequired(); + aDep.AddRequired(); aDep.SetPreservedAll(); } @@ -508,6 +456,13 @@ bool MECopyProp::PhaseRun(maple::MeFunction &f) { copyProp.ReplaceSelfAssign(); copyProp.TraversalBB(*f.GetCfg()->GetCommonEntryBB()); useInfo.InvalidUseInfo(); + // run hdse to remove unused stmts + auto *aliasClass0 = GET_ANALYSIS(MEAliasClass, f); + MeHDSE hdse(f, *dom, *pdom, *f.GetIRMap(), aliasClass0, DEBUGFUNC_NEWPM(f)); + if (!MeOption::quiet) { + LogInfo::MapleLogger() << " == " << PhaseName() << " invokes [ " << hdse.PhaseName() << " ] ==\n"; + } + hdse.DoHDSESafely(&f, *GetAnalysisInfoHook()); if (DEBUGFUNC_NEWPM(f)) { LogInfo::MapleLogger() << "\n============== After Copy Propagation =============" << '\n'; f.Dump(false); diff --git a/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp b/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp index 2118f9e7afe57c6825651dad6eff495e0e1e6946..df7fc82997fd4c76bd4c24aaabb0d6f6c54657df 100644 --- a/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp +++ b/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp @@ -154,7 +154,7 @@ void ProgramExprGraph::Dump() const { } } -PEGBuilder::PtrValueRecorder PEGBuilder::BuildPEGNodeOfDread(const AddrofSSANode *dread) { +PEGBuilder::PtrValueRecorder PEGBuilder::BuildPEGNodeOfDread(const AddrofSSANode *dread) const { auto *dreadNode = static_cast(dread); auto *vst = dreadNode->GetSSAVar(); auto *ost = vst->GetOst(); @@ -198,7 +198,7 @@ PEGBuilder::PtrValueRecorder PEGBuilder::BuildPEGNodeOfIread(const IreadSSANode offset = typeHasBeenCasted ? OffsetType::InvalidOffset() : offset; auto *vstOfBase = ptrNode.pegNode->vst; - auto *mayUsedOst = ssaTab->FindOrCreateExtraLevOst(vstOfBase, iread->GetTyIdx(), iread->GetFieldID(), offset); + auto *mayUsedOst = ssaTab->FindOrCreateExtraLevOst(*vstOfBase, iread->GetTyIdx(), iread->GetFieldID(), offset); auto *zeroVersionOfMayUsedOst = ssaTab->GetVersionStTable().GetZeroVersionSt(mayUsedOst); // build prevLev-nextLev relationship auto *pegNodeOfMayUsedOSt = peg->GetOrCreateNodeOf(zeroVersionOfMayUsedOst); @@ -404,6 +404,9 @@ void PEGBuilder::AddAssignEdge(const StmtNode *stmt, PEGNode *lhsNode, PEGNode * auto lhsOst = lhsNode->vst->GetOst(); auto rhsOst = rhsNode->vst->GetOst(); bool rhsIsAddress = MaybeAddress(rhsOst->GetTyIdx()); + if (lhsNode->attr[kAliasAttrGlobal] && rhsIsAddress) { + rhsNode->attr[kAliasAttrNextLevNotAllDefsSeen] = true; + } if (lhsIsAddress) { // formal has init value at function entry, // redefining makes formal be multi-defined. @@ -443,16 +446,19 @@ void PEGBuilder::AddAssignEdge(const StmtNode *stmt, PEGNode *lhsNode, PEGNode * continue; } OffsetType bitOffset(structType->GetBitOffsetFromBaseAddr(fieldId)); - + auto lhsFieldOffset = bitOffset + lhsOst->GetOffset(); + auto rhsFieldOffset = bitOffset + rhsOst->GetOffset(); + auto lhsMemberFieldId = fieldId + lhsOst->GetFieldID(); + auto rhsMemberFieldId = fieldId + rhsOst->GetFieldID(); const auto *nextLevOstsOfLHS = ssaTab->GetNextLevelOsts(*preLevOfLHSOst); auto fieldOstLHS = (nextLevOstsOfLHS == nullptr) ? nullptr : ssaTab->GetOriginalStTable().FindExtraLevOriginalSt( - *nextLevOstsOfLHS, tyIdxOfPrevLevOst, fieldType, fieldId, bitOffset); + *nextLevOstsOfLHS, tyIdxOfPrevLevOst, fieldType, lhsMemberFieldId, lhsFieldOffset); const auto *nextLevOstsOfRHS = ssaTab->GetNextLevelOsts(*preLevOfRHSOst); auto fieldOstRHS = (nextLevOstsOfRHS == nullptr) ? nullptr : ssaTab->GetOriginalStTable().FindExtraLevOriginalSt( - *nextLevOstsOfRHS, tyIdxOfPrevLevOst, fieldType, fieldId, bitOffset); + *nextLevOstsOfRHS, tyIdxOfPrevLevOst, fieldType, rhsMemberFieldId, rhsFieldOffset); if (fieldOstLHS == nullptr && fieldOstRHS == nullptr) { continue; } @@ -460,16 +466,14 @@ void PEGBuilder::AddAssignEdge(const StmtNode *stmt, PEGNode *lhsNode, PEGNode * // the OriginalSt of at least one side has appearance in code if (fieldOstLHS == nullptr) { auto *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(lhsOst->GetTyIdx()); - fieldOstLHS = ssaTab->GetOriginalStTable().FindOrCreateExtraLevOriginalSt(preLevOfLHSOst, - ptrType->GetTypeIndex(), - fieldId, bitOffset); + fieldOstLHS = ssaTab->GetOriginalStTable().FindOrCreateExtraLevOriginalSt( + *preLevOfLHSOst, ptrType->GetTypeIndex(), lhsMemberFieldId, lhsFieldOffset); } if (fieldOstRHS == nullptr) { auto *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(rhsOst->GetTyIdx()); - fieldOstRHS = ssaTab->GetOriginalStTable().FindOrCreateExtraLevOriginalSt(preLevOfRHSOst, - ptrType->GetTypeIndex(), - fieldId, bitOffset); + fieldOstRHS = ssaTab->GetOriginalStTable().FindOrCreateExtraLevOriginalSt( + *preLevOfRHSOst, ptrType->GetTypeIndex(), rhsMemberFieldId, rhsFieldOffset); } auto *zeroVersionOfFieldOstLHS = ssaTab->GetVersionStTable().GetOrCreateZeroVersionSt(*fieldOstLHS); @@ -543,8 +547,11 @@ void PEGBuilder::BuildPEGNodeInIassign(const IassignNode *iassign) { } auto *vstOfBase = baseAddrValNode.pegNode->vst; + auto fieldID = iassign->GetFieldID(); + auto *mirStructType = static_cast(GetTypeFromTyIdx(iassign->GetTyIdx())).GetPointedType(); + auto offset = OffsetType(mirStructType->GetBitOffsetFromBaseAddr(fieldID)) + baseAddrValNode.offset; OriginalSt *defedOst = - ssaTab->FindOrCreateExtraLevOst(vstOfBase, iassign->GetTyIdx(), iassign->GetFieldID(), baseAddrValNode.offset); + ssaTab->FindOrCreateExtraLevOst(*vstOfBase, iassign->GetTyIdx(), fieldID, offset); CHECK_FATAL(defedOst, "defedOst is nullptr"); auto zeroVersionSt = ssaTab->GetVerSt(defedOst->GetZeroVersionIndex()); PEGNode *lhsNode = peg->GetOrCreateNodeOf(zeroVersionSt); @@ -732,7 +739,7 @@ void PropGlobalAndFormalAttr(PEGNode *node, AliasAttribute attr, int32 indirectL } } -void PEGBuilder::UpdateAttributes() { +void PEGBuilder::UpdateAttributes() const { // multi-assign of ost results in uncertainty of value of the ost. For example: // L1: int *ptr = array; // L2: def/use *(ptr + 1) = 1; @@ -915,6 +922,7 @@ void DemandDrivenAliasAnalysis::Propagate(WorkListType &workList, PEGNode *to, c if (newNode.first) { (void)workList.emplace_back(WorkListItem(to, reachItem.src, reachItem.state, offset)); to->CopyAttrFromValueAliasedNode(reachItem.src); + reachItem.src->UpdateAttrWhenReachingGlobalNode(to); if (enableDebug) { LogInfo::MapleLogger() << "===New candidate: "; reachItem.src->vst->Dump(); diff --git a/src/mapleall/maple_me/src/hdse.cpp b/src/mapleall/maple_me/src/hdse.cpp index 28c5e5e49a40a7a8e1201918eed7741c9a0004cc..53097b0e4d6ff7e0cfe13b6b89294985aa1c8481 100644 --- a/src/mapleall/maple_me/src/hdse.cpp +++ b/src/mapleall/maple_me/src/hdse.cpp @@ -23,6 +23,8 @@ #include "ssa_mir_nodes.h" #include "utils.h" #include "ver_symbol.h" +#include "maple_phase.h" +#include "me_phase_manager.h" namespace maple { using namespace utils; @@ -260,6 +262,7 @@ void HDSE::RemoveNotRequiredStmtsInBB(BB &bb) { ASSERT(bb.GetFrequency() >= succ0Freq, "sanity check"); bb.GetSucc(0)->SetFrequency(bb.GetSucc(0)->GetFrequency() + (bb.GetFrequency() - succ0Freq)); } + cfgChanged = true; } // A ivar contained in stmt if (stmt2NotNullExpr.find(mestmt) != stmt2NotNullExpr.end()) { @@ -302,6 +305,7 @@ void HDSE::RemoveNotRequiredStmtsInBB(BB &bb) { bb.GetSucc().pop_back(); bb.SetKind(kBBFallthru); bb.RemoveMeStmt(mestmt); + cfgChanged = true; } else { // change to unconditional branch BB *succbb = bb.GetSucc().front(); @@ -316,6 +320,7 @@ void HDSE::RemoveNotRequiredStmtsInBB(BB &bb) { bb.SetKind(kBBGoto); GotoMeStmt *gotomestmt = irMap.New(condbr->GetOffset()); bb.ReplaceMeStmt(condbr, gotomestmt); + cfgChanged = true; } if (UpdateFreq()) { bb.GetSuccFreq().resize(1); @@ -953,4 +958,21 @@ void HDSE::DoHDSE() { } RemoveNotRequiredStmts(); } + +void HDSE::DoHDSESafely(const MeFunction *f, AnalysisInfoHook &anaRes) { + DoHDSE(); + if (!f) { + return; + } + if (needUNClean) { + (void)f->GetCfg()->UnreachCodeAnalysis(true); + f->GetCfg()->WontExitAnalysis(); + anaRes.ForceEraseAnalysisPhase(f->GetUniqueID(), &MEDominance::id); + return; + } + if (cfgChanged) { + f->GetCfg()->WontExitAnalysis(); + anaRes.ForceEraseAnalysisPhase(f->GetUniqueID(), &MEDominance::id); + } +} } // namespace maple diff --git a/src/mapleall/maple_me/src/irmap.cpp b/src/mapleall/maple_me/src/irmap.cpp index d7e4b600605a1bf02a6ddf77456fa29c50c2bce9..bce772f402e9c95ded1279f6eb9685e31206251b 100644 --- a/src/mapleall/maple_me/src/irmap.cpp +++ b/src/mapleall/maple_me/src/irmap.cpp @@ -50,7 +50,7 @@ MeExpr *IRMap::SimplifyCast(MeExpr *expr) { } // Try to remove redundant intTrunc for dassgin and iassign -void IRMap::SimplifyCastForAssign(MeStmt *assignStmt) { +void IRMap::SimplifyCastForAssign(MeStmt *assignStmt) const { MeCastOpt::SimplifyCastForAssign(assignStmt); } @@ -838,7 +838,8 @@ MeExpr *IRMap::CreateIntConstMeExpr(const IntVal &value, PrimType pType) { MeExpr *IRMap::CreateIntConstMeExpr(int64 value, PrimType pType) { auto *intConst = - GlobalTables::GetIntConstTable().GetOrCreateIntConst(value, *GlobalTables::GetTypeTable().GetPrimType(pType)); + GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(value), + *GlobalTables::GetTypeTable().GetPrimType(pType)); return CreateConstMeExpr(pType, *intConst); } @@ -1132,7 +1133,7 @@ MeExpr *IRMap::FoldConstExprBinary(PrimType primType, Opcode op, ConstMeExpr &op if ((op == OP_div || op == OP_rem) && !IsDivSafe(*constA, *constB, primType)) { return nullptr; } - MIRConst *resconst = ConstantFold::FoldIntConstBinaryMIRConst(op, primType, constA, constB); + MIRConst *resconst = ConstantFold::FoldIntConstBinaryMIRConst(op, primType, *constA, *constB); return CreateConstMeExpr(primType, *resconst); } @@ -1246,7 +1247,7 @@ MeExpr *IRMap::SimplifyLshrExpr(const OpMeExpr *shrExpr) { } else if (bitOneCount == 0) { return CreateIntConstMeExpr(0, shrExpr->GetPrimType()); } else { - if (bitOneCount + static_cast(shrOffset) > GetPrimTypeBitSize(shrExpr->GetPrimType())) { + if (bitOneCount + shrOffset > static_cast(GetPrimTypeBitSize(shrExpr->GetPrimType()))) { return CreateIntConstMeExpr(0, shrExpr->GetPrimType()); } auto *ret = CreateMeExprUnary(OP_extractbits, GetUnsignedPrimType(shrExpr->GetPrimType()), *opnd1); @@ -1430,8 +1431,10 @@ MeExpr *IRMap::SimplifySubExpr(const OpMeExpr *subExpr) { // addrof a64 %a c0 - addrof a64 %a c1 == offset between field_c0 and field_c1 if (opnd0->GetOp() == OP_addrof && opnd1->GetOp() == OP_addrof) { auto ost0 = ssaTab.GetOriginalStFromID(static_cast(opnd0)->GetOstIdx()); + CHECK_NULL_FATAL(ost0); auto prevLevelOfOst0 = ost0->GetPrevLevelOst(); auto ost1 = ssaTab.GetOriginalStFromID(static_cast(opnd1)->GetOstIdx()); + CHECK_NULL_FATAL(ost1); auto prevLevelOfOst1 = ost1->GetPrevLevelOst(); bool isPrevLevelOfOstSame = prevLevelOfOst0 != nullptr && prevLevelOfOst1 == prevLevelOfOst0; bool isOffsetValid = !ost0->GetOffset().IsInvalid() && !ost1->GetOffset().IsInvalid(); @@ -1858,6 +1861,9 @@ MeExpr *IRMap::SimplifyCmpExpr(OpMeExpr *cmpExpr) { } // fold constval cmp if (opnd0->GetMeOp() == kMeOpConst && opnd1->GetMeOp() == kMeOpConst) { + if (cmpExpr->GetPrimType() == PTY_f128 || cmpExpr->GetOpndType() == PTY_f128) { + return nullptr; + } maple::ConstantFold cf(mirModule); MIRConst *opnd0const = static_cast(opnd0)->GetConstVal(); MIRConst *opnd1const = static_cast(opnd1)->GetConstVal(); @@ -2044,7 +2050,7 @@ MeExpr *IRMap::SimplifyCmpExpr(OpMeExpr *cmpExpr) { return nullptr; } -MeExpr *IRMap::SimplifySelExpr(OpMeExpr *selExpr) { +MeExpr *IRMap::SimplifySelExpr(const OpMeExpr *selExpr) { if (selExpr->GetOp() != OP_select) { return nullptr; } @@ -2145,7 +2151,7 @@ std::optional &CollectBitparts(MeExpr *expr, std::map(andMask) & bit) == 0) { result->provenance[i] = BitPart::kUnset; } } @@ -2224,6 +2230,236 @@ static bool bitMapIsValidForReverse(uint32 from, uint32 to, uint8 bitwidth) { return from == bitwidth - to - 1; } +// Calculate the number of continuous binary bits as 1 and other bits must as 0. +static bool CountContinuousOnes(uint64 value, size_t &count) { + while (value != 0) { + if ((value & 0x1) == 0) { + break; + } + ++count; + value >>= 1; + } + return (value == 0); +} + +bool IRMap::DealWithIaddrofWhenGetInfoOfIvar(IreadPairInfo &info) const { + // opnd[0] = IVAR mx434 u32 TYIDX:551<* u32> (field)0 + // base = OP iaddrof a64 kPtyInvalid (field)15 mx433 + // opnd[0] = REGINDX:15 a64 %15 mx388 + // - MU: {VAR %retVar_4031{offset:0}<0>[idx:10](vstIdx:22)->{15/288}[idx:28] (field)15 mx163} + auto iaddrofMeExpr = static_cast(info.ivar->GetBase()); + if (!iaddrofMeExpr->GetOpnd(0)->IsLeaf()) { + return false; + } + auto baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iaddrofMeExpr->GetTyIdx()); + ASSERT(baseType->IsMIRPtrType(), "must be ptr type"); + auto pointedTy = static_cast(baseType)->GetPointedType(); + info.SetInfoOfIvar(*iaddrofMeExpr->GetOpnd(0), pointedTy->GetBitOffsetFromBaseAddr( + iaddrofMeExpr->GetFieldID()) + iaddrofMeExpr->GetBitsOffSet(), info.ivar->GetType()->GetSize()); + return true; +} + +bool IRMap::GetInfoOfIvar(MeExpr &expr, IreadPairInfo &info) const { + if (!IsPrimitiveInteger(expr.GetPrimType())) { + return false; + } + if (expr.GetMeOp() == kMeOpIvar) { + info.ivar = static_cast(&expr); + } else if (expr.GetOp() == OP_band) { + auto opnd0 = expr.GetOpnd(0); + if (opnd0->GetMeOp() != kMeOpIvar) { + return false; + } + info.ivar = static_cast(opnd0); + auto opnd1 = expr.GetOpnd(1); + if (opnd1->GetMeOp() != kMeOpConst || !IsPrimitiveInteger(opnd1->GetPrimType())) { + return false; + } + } else { + return false; + } + if (info.ivar->GetType()->IsMIRBitFieldType()) { + return false; + } + // convert byte size to bit size. + info.bitOffset = info.ivar->GetOffset() * static_cast(k8BitSize); + if (info.ivar->GetBase()->GetOp() == OP_add) { + auto opnd0 = info.ivar->GetBase()->GetOpnd(0); + auto opnd1 = info.ivar->GetBase()->GetOpnd(1); + if (opnd1->GetMeOp() != kMeOpConst) { + return false; + } + auto constMeExpr = static_cast(opnd1); + if (!IsPrimitiveInteger(constMeExpr->GetPrimType())) { + return false; + } + info.SetInfoOfIvar(*opnd0, constMeExpr->GetExtIntValue() * k8BitSize, + info.ivar->GetType()->GetSize()); + return true; + } + if (info.ivar->GetBase()->GetOp() == OP_iaddrof) { + return DealWithIaddrofWhenGetInfoOfIvar(info); + } + if (!info.ivar->GetBase()->IsLeaf()) { + return false; + } + auto baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(info.ivar->GetTyIdx()); + ASSERT(baseType->IsMIRPtrType(), "type of iread must be ptr type"); + auto pointedTy = static_cast(baseType)->GetPointedType(); + info.SetInfoOfIvar(*info.ivar->GetBase(), pointedTy->GetBitOffsetFromBaseAddr(info.ivar->GetFieldID()), + info.ivar->GetType()->GetSize()); + return true; +} + +MeExpr *IRMap::CreateNewIvarForAdjacentIread(MeExpr &base0, const IvarMeExpr &ivar0, const IvarMeExpr &ivar1, + PrimType ivarPTy, int64 newOffset) { + auto basePTy = base0.GetPrimType(); + auto *newBase = (newOffset / static_cast(k8BitSize) == 0) ? &base0 : CreateMeExprBinary( + OP_add, basePTy, base0, *CreateIntConstMeExpr(newOffset / static_cast(k8BitSize), basePTy)); + IvarMeExpr newIvar(&irMapAlloc, kInvalidExprID, ivarPTy, GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetPrimType(ivarPTy))->GetTypeIndex(), 0, OP_iread); + newIvar.SetBase(newBase); + for (auto mu : ivar0.GetMuList()) { + if (mu == nullptr) { + continue; + } + newIvar.Push2MuList(*mu); + } + for (auto mu : ivar1.GetMuList()) { + if (mu == nullptr) { + continue; + } + newIvar.Push2MuList(*mu); + } + if (newIvar.GetMuList().size() > 1) { + (void)newIvar.GetMuList().erase(newIvar.GetMuList().begin()); + } + return HashMeExpr(newIvar); +} + +bool IRMap::GetIreadsInfo(MeExpr &opnd0, MeExpr &opnd1, IreadPairInfo &info0, IreadPairInfo &info1) const { + if (!GetInfoOfIvar(opnd0, info0)) { + return false; + } + if (!GetInfoOfIvar(opnd1, info1)) { + return false; + } + // The bases of two ireads must be the same + if (info0.base != info1.base) { + return false; + } + // Do not opt with volatile attribute. + if (info0.ivar->GetVolatileFromBaseSymbol() || info1.ivar->GetVolatileFromBaseSymbol()) { + return false; + } + // Two fields must be adjacent and there are no additional bits between two fields. + if (info0.bitOffset + static_cast(info0.byteSize) * static_cast(k8BitSize) != info1.bitOffset) { + return false; + } + // The offset of the first filed relative to base must be an integer multiple of 8 bits. + if (info0.bitOffset % static_cast(k8BitSize) != 0) { + return false; + } + return true; +} + +MeExpr *IRMap::OptBandWithIread(MeExpr &opnd0, MeExpr &opnd1) { + IreadPairInfo info0; + IreadPairInfo info1; + if (!GetIreadsInfo(opnd0, opnd1, info0, info1)) { + return nullptr; + } + if (info0.byteSize + info1.byteSize <= k8ByteSize) { + return nullptr; + } + if (opnd1.GetOp() != OP_band) { + return nullptr; + } + if (opnd1.GetOpnd(1)->GetMeOp() != kMeOpConst) { + return nullptr; + } + size_t bitSizeOfBand = 0; + if (!CountContinuousOnes(static_cast(opnd1.GetOpnd(1))->GetZXTIntValue(), bitSizeOfBand)) { + return nullptr; + } + auto distance = info1.byteSize * static_cast(k8BitSize) - bitSizeOfBand; + if (distance % k8BitSize != 0) { + return nullptr; + } + if (info0.byteSize * k8BitSize < distance) { + return nullptr; + } + auto newOffset = info1.bitOffset - static_cast(distance); + return CreateNewIvarForAdjacentIread(*info0.base, *info0.ivar, *info1.ivar, opnd1.GetPrimType(), newOffset); +} + +MeExpr *IRMap::MergeAdjacentIread(MeExpr &opnd0, MeExpr &opnd1) { + IreadPairInfo info0; + IreadPairInfo info1; + if (!GetIreadsInfo(opnd0, opnd1, info0, info1)) { + return nullptr; + } + PrimType resPTy = PTY_begin; + size_t resBytes = 0; + if (info0.byteSize + info1.byteSize <= k1ByteSize) { + resPTy = PTY_u8; + resBytes = k1ByteSize; + } else if (info0.byteSize + info1.byteSize <= k2ByteSize) { + resPTy = PTY_u16; + resBytes = k2ByteSize; + } else if (info0.byteSize + info1.byteSize <= k4ByteSize) { + resPTy = PTY_u32; + resBytes = k4ByteSize; + } else if (info0.byteSize + info1.byteSize <= k8ByteSize) { + resPTy = PTY_u64; + resBytes = k8ByteSize; + } else { + return nullptr; + } + auto *resIvar = CreateNewIvarForAdjacentIread(*info0.base, *info0.ivar, *info1.ivar, resPTy, info0.bitOffset); + // If the number of bits in two files is not an integer multiple of 8 bytes, + // the remaining bits need to be cleared to zero. + if (info0.byteSize + info1.byteSize == resBytes) { + return resIvar; + } + auto offset = (info0.byteSize + info1.byteSize) * k8BitSize; + return CreateMeExprBinary( + OP_band, resPTy, *resIvar, *CreateIntConstMeExpr(static_cast((uint64(1) << offset)) - 1, resPTy)); +} + +// dassign %_3799__c_2702_158_SECOND_6 0 (bior i32 ( +// iread u32 <* <$HpfFlowKey>> 6 (dread a64 %_3799__key_SECOND_6), +// shl i32 ( +// iread u32 <* <$HpfFlowKey>> 7 (dread a64 %_3799__key_SECOND_6), +// constval i32 16))) +// dassign %_3799__c_2702_158_SECOND_6 0 ( +// iread u32 <* u32> 0 (add a64 (dread a64 %_3799__key_SECOND_6, constval a64 8))) +MeExpr *IRMap::ReadContinuousMemory(const OpMeExpr &opMeExpr) { + MeExpr *opnd0 = opMeExpr.GetOpnd(0); + MeExpr *opnd1 = opMeExpr.GetOpnd(1); + Opcode opcode0 = opnd0->GetOp(); + Opcode opcode1 = opnd1->GetOp(); + if (opcode0 != OP_iread && opcode1 != OP_shl) { + return nullptr; + } + MeExpr *shlOpnd0 = opnd1->GetOpnd(0); + MeExpr *shlOpnd1 = opnd1->GetOpnd(1); + if (shlOpnd0->GetOp() != OP_iread) { + return nullptr; + } + if (shlOpnd1->GetMeOp() != kMeOpConst) { + return nullptr; + } + auto iread0 = static_cast(opnd0); + auto iread1 = static_cast(shlOpnd0); + auto shlConst = static_cast(shlOpnd1)->GetZXTIntValue(); + // The number of bits shifted to the left must be the same as the number of bits in the first field. + if (iread0->GetType()->GetSize() * k8BitSize != shlConst) { + return nullptr; + } + return MergeAdjacentIread(*iread0, *iread1); +} + // match OR bit operations for bytewise reverse, replace with intrinsic rev MeExpr *IRMap::SimplifyOrMeExpr(OpMeExpr *opmeexpr) { Opcode opcode = opmeexpr->GetOp(); @@ -2255,8 +2491,8 @@ MeExpr *IRMap::SimplifyOrMeExpr(OpMeExpr *opmeexpr) { if (expr1->GetMeOp() != kMeOpConst) { return nullptr; } - auto c1 = static_cast(opnd1)->GetExtIntValue(); - auto c2 = static_cast(expr1)->GetExtIntValue(); + auto c1 = static_cast(opnd1)->GetZXTIntValue(); + auto c2 = static_cast(expr1)->GetZXTIntValue(); if ((c1 & c2) == 0) { auto newOpnd0 = CreateMeExprBinary(OP_bior, opmeexpr->GetPrimType(), *opnd0->GetOpnd(0), *opnd1); auto res = CreateMeExprBinary(OP_bxor, opnd0->GetPrimType(), *newOpnd0, *expr1); @@ -2351,7 +2587,7 @@ static bool IsSignBitZero(MeExpr *opnd, uint64 signBit, uint64 shiftAmt) { return false; } auto andValue = static_cast(opnd1)->GetExtIntValue(); - knownZeroBits |= ~andValue; + knownZeroBits |= ~(static_cast(andValue)); break; } default: @@ -2447,7 +2683,7 @@ MeExpr *IRMap::SimplifyExtractbits(const OpMeExpr &opmeexpr) { return nullptr; } -MeExpr *IRMap::SimplifyAshrMeExpr(OpMeExpr *opmeexpr) { +MeExpr *IRMap::SimplifyAshrMeExpr(const OpMeExpr *opmeexpr) { Opcode opcode = opmeexpr->GetOp(); if (opcode != OP_ashr) { return nullptr; @@ -2459,11 +2695,11 @@ MeExpr *IRMap::SimplifyAshrMeExpr(OpMeExpr *opmeexpr) { } auto shiftAmt = static_cast(opnd1)->GetExtIntValue(); auto bitWidth = GetPrimTypeBitSize(opmeexpr->GetPrimType()); - if (shiftAmt >= bitWidth) { + if (static_cast(shiftAmt) >= bitWidth) { return nullptr; } - uint64 signBit = 1 << (bitWidth - shiftAmt - 1); + uint64 signBit = 1ULL << (static_cast(bitWidth) - shiftAmt - 1); bool isSignBitZero = IsSignBitZero(opnd0, signBit, static_cast(shiftAmt)); // sign bit is known to be zero, we can replace ashr with lshr if (isSignBitZero) { @@ -2530,7 +2766,7 @@ MeExpr *IRMap::SimplifyOpMeExpr(OpMeExpr *opmeexpr) { auto foldConst = [this](MeExpr *opnd0, MeExpr *opnd1, Opcode op, PrimType ptyp) { MIRIntConst *opnd0const = static_cast(static_cast(opnd0)->GetConstVal()); MIRIntConst *opnd1const = static_cast(static_cast(opnd1)->GetConstVal()); - MIRConst *resconst = ConstantFold::FoldIntConstBinaryMIRConst(op, ptyp, opnd0const, opnd1const); + MIRConst *resconst = ConstantFold::FoldIntConstBinaryMIRConst(op, ptyp, *opnd0const, *opnd1const); return CreateConstMeExpr(ptyp, *resconst); }; switch (opop) { @@ -2678,7 +2914,7 @@ MeExpr *IRMap::SimplifyOpMeExpr(OpMeExpr *opmeexpr) { return nullptr; } MIRConst *resconst = ConstantFold::FoldIntConstBinaryMIRConst(opmeexpr->GetOp(), - opmeexpr->GetPrimType(), opnd0const, opnd1const); + opmeexpr->GetPrimType(), *opnd0const, *opnd1const); return CreateConstMeExpr(opmeexpr->GetPrimType(), *resconst); } case OP_depositbits: { diff --git a/src/mapleall/maple_me/src/irmap_build.cpp b/src/mapleall/maple_me/src/irmap_build.cpp index 13abd32c2df7368390fbe4b15852d5b8c52c07eb..cff76247e2946d2c98284205148d0acf063ded2e 100644 --- a/src/mapleall/maple_me/src/irmap_build.cpp +++ b/src/mapleall/maple_me/src/irmap_build.cpp @@ -121,6 +121,7 @@ void IRMapBuild::BuildMustDefList(MeStmt &meStmt, TypeOfMustDefList &mustDefList void IRMapBuild::BuildPhiMeNode(BB &bb) { for (auto &phi : bb.GetPhiList()) { const OriginalSt *oSt = ssaTab.GetOriginalStFromID(phi.first); + CHECK_NULL_FATAL(oSt); VersionSt *vSt = phi.second.GetResult(); auto *phiMeNode = irMap->NewInPool(); diff --git a/src/mapleall/maple_me/src/irmap_emit.cpp b/src/mapleall/maple_me/src/irmap_emit.cpp index 9dcee51bd5aa68df4e1dbe839717e1ed6b345c26..e9328d51a1a6a041d1a63b3b78551df06d846ab1 100644 --- a/src/mapleall/maple_me/src/irmap_emit.cpp +++ b/src/mapleall/maple_me/src/irmap_emit.cpp @@ -690,6 +690,9 @@ void BB::EmitBB(BlockNode &curblk, bool needAnotherPass) { stmt->SetSrcPos(meStmt.GetSrcPosition()); stmt->SetOriginalID(meStmt.GetOriginalId()); stmt->CopySafeRegionAttr(meStmt.GetStmtAttr()); + if (meStmt.GetMayTailCall()) { + stmt->SetMayTailcall(); + } curblk.AddStatement(stmt); if (bbFirstStmt == nullptr) { bbFirstStmt = stmt; diff --git a/src/mapleall/maple_me/src/lfo_iv_canon.cpp b/src/mapleall/maple_me/src/lfo_iv_canon.cpp index e9e211b7173a336a3c01ac80ebab7aec941b6ce6..69fa30e1bd8cc7a9bce077153285b01d59232ed4 100644 --- a/src/mapleall/maple_me/src/lfo_iv_canon.cpp +++ b/src/mapleall/maple_me/src/lfo_iv_canon.cpp @@ -281,7 +281,11 @@ bool IVCanon::CheckPostIncDecFixUp(CondGotoMeStmt *condbr) { // find the phi for ivOst BB *bb = condbr->GetBB(); MapleMap &mePhiList = bb->GetMePhiList(); - MePhiNode *ivPhiNode = mePhiList[ivOst->GetIndex()]; + auto itOfIVOst = mePhiList.find(ivOst->GetIndex()); + if (itOfIVOst == mePhiList.end()) { + return false; + } + MePhiNode *ivPhiNode = itOfIVOst->second; if (ivPhiNode == nullptr) { return false; } @@ -350,6 +354,9 @@ void IVCanon::ComputeTripCount() { do { trialsCount++; testExpr = static_cast(condbr->GetOpnd()); + if (!IsPrimitiveInteger(testExpr->GetOpndType())) { + return; + } // check left operand ScalarMeExpr *iv = testExpr->GetOpnd(0)->IsScalar() ? static_cast(testExpr->GetOpnd(0)) : nullptr; @@ -438,6 +445,16 @@ void IVCanon::ComputeTripCount() { } } + // check first test + OpMeExpr firstTest(*static_cast(condbr->GetOpnd()), kInvalidExprID); + firstTest.SetOpnd(0, ivdesc->initExpr); + auto *testx = irMap->HashMeExpr(firstTest); + auto *simplified = irMap->SimplifyMeExpr(testx); + if (simplified != nullptr && simplified->IsZero()) { + return; + } + testx = simplified ? simplified : testx; + // form the trip count expression MeExpr *testExprLHS = testExpr->GetOpnd(0); MeExpr *testExprRHS = testExpr->GetOpnd(1); @@ -493,7 +510,12 @@ void IVCanon::ComputeTripCount() { OpMeExpr maxExpr(-1, OP_max, divPrimType, 2); maxExpr.SetOpnd(0, tripCount); maxExpr.SetOpnd(1, irMap->CreateIntConstMeExpr(0, divPrimType)); - tripCount = irMap->HashMeExpr(maxExpr); + auto *hashedMax = irMap->HashMeExpr(maxExpr); + OpMeExpr selectExpr(-1, OP_select, divPrimType, kOperandNumTernary); + selectExpr.SetOpnd(kFirstOpnd, testx); + selectExpr.SetOpnd(kSecondOpnd, hashedMax); + selectExpr.SetOpnd(kThirdOpnd, irMap->CreateIntConstMeExpr(0, divPrimType)); + tripCount = irMap->HashMeExpr(selectExpr); } } } diff --git a/src/mapleall/maple_me/src/lfo_loop_vec.cpp b/src/mapleall/maple_me/src/lfo_loop_vec.cpp index 210351927ab0d7c4d07e8c1bde3d155155a94ec5..2abb4e66c3860f1d72c6ac8dbd4fdf7127b32762 100644 --- a/src/mapleall/maple_me/src/lfo_loop_vec.cpp +++ b/src/mapleall/maple_me/src/lfo_loop_vec.cpp @@ -24,8 +24,9 @@ constexpr uint32_t maxVecSize = 128; namespace maple { -void LoopVecInfo::UpdateDoloopProfData(MIRFunction *mirFunc, DoloopNode *doLoop, int32_t vecLanes, bool isRemainder) { - auto *profData = mirFunc->GetFuncProfData(); +void LoopVecInfo::UpdateDoloopProfData(MIRFunction &mirFunc, const DoloopNode *doLoop, int32_t vecLanes, + bool isRemainder) const { + auto *profData = mirFunc.GetFuncProfData(); if (!profData) { return; } @@ -248,7 +249,7 @@ bool LoopTransPlan::Generate(const DoloopNode *doLoop, const DoloopInfo* li, boo int64 tripCount = (upvalue - lowvalue) / (incrConst->GetExtValue()); if (static_cast(tripCount) < vecLanes) { tripCount = (tripCount / 4 * 4); // get closest 2^n - if (tripCount * vecInfo->smallestTypeSize < maplebe::k64BitSize) { + if (static_cast(tripCount) * vecInfo->smallestTypeSize < maplebe::k64BitSize) { if (enableDebug) { LogInfo::MapleLogger() << "NOT VECTORIZABLE because of doLoop trip count is small \n"; } @@ -319,6 +320,7 @@ MIRType* LoopVectorization::GenVecType(PrimType sPrimType, uint8 lanes) const { } break; } + case PTY_u1: case PTY_u8: { if (lanes == 16) { vecType = GlobalTables::GetTypeTable().GetV16UInt8(); @@ -1221,7 +1223,7 @@ static bool IsCompareNode(Opcode op) { return false; } -void LoopVectorization::VectorizeExpr(BaseNode *node, LoopTransPlan *tp, MapleVector& vectorizedNode, +void LoopVectorization::VectorizeExpr(BaseNode *node, LoopTransPlan *tp, MapleVector &vectorizedNode, uint32_t depth) { switch (node->GetOpCode()) { case OP_iread: { @@ -1321,7 +1323,7 @@ void LoopVectorization::VectorizeExpr(BaseNode *node, LoopTransPlan *tp, MapleVe // opnd2 is uniform scalar and type is different from opnd1 // widen opnd1 with same element type as opnd2 BaseNode *newopnd1 = vecopnd1[0]; - int opnd2ElemPrimTypeSize = GetPrimTypeSize(GetVecElemPrimType(opnd2PrimType)); + uint32 opnd2ElemPrimTypeSize = GetPrimTypeSize(GetVecElemPrimType(opnd2PrimType)); while (GetPrimTypeSize(GetVecElemPrimType(opnd1PrimType)) < opnd2ElemPrimTypeSize) { newopnd1 = GenVectorWidenOpnd(newopnd1, opnd1PrimType, false); opnd1PrimType = newopnd1->GetPrimType(); @@ -1465,15 +1467,15 @@ void LoopVectorization::VectorizeExpr(BaseNode *node, LoopTransPlan *tp, MapleVe } // set lhs type to vector type and return lhs pointto type -MIRType *LoopVectorization::VectorizeIassignLhs(IassignNode *iassign, LoopTransPlan *tp) { - MIRType &mirType = GetTypeFromTyIdx(iassign->GetTyIdx()); +MIRType *LoopVectorization::VectorizeIassignLhs(IassignNode &iassign, const LoopTransPlan &tp) const { + MIRType &mirType = GetTypeFromTyIdx(iassign.GetTyIdx()); CHECK_FATAL(mirType.GetKind() == kTypePointer, "iassign must have pointer type"); MIRPtrType *ptrType = static_cast(&mirType); - MIRType *lhsvecType = GenVecType(ptrType->GetPointedType()->GetPrimType(), tp->vecFactor); + MIRType *lhsvecType = GenVecType(ptrType->GetPointedType()->GetPrimType(), tp.vecFactor); ASSERT(lhsvecType != nullptr, "vector type should not be null"); - tp->vecInfo->currentLHSTypeSize = GetPrimTypeSize(GetVecElemPrimType(lhsvecType->GetPrimType())); + tp.vecInfo->currentLHSTypeSize = GetPrimTypeSize(GetVecElemPrimType(lhsvecType->GetPrimType())); MIRType *pvecType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*lhsvecType, PTY_ptr); - iassign->SetTyIdx(pvecType->GetTypeIndex()); + iassign.SetTyIdx(pvecType->GetTypeIndex()); return lhsvecType; } @@ -1551,7 +1553,7 @@ void LoopVectorization::VectorizeStmt(BaseNode *node, LoopTransPlan *tp) { if (tp->vecInfo->reductionStmts.find(iassign) != tp->vecInfo->reductionStmts.end()) { VectorizeReductionStmt(static_cast(node), tp); } else { - MIRType *lhsptvecType = VectorizeIassignLhs(iassign, tp); + MIRType *lhsptvecType = VectorizeIassignLhs(*iassign, *tp); BaseNode *rhs = iassign->GetRHS(); BaseNode *newrhs; if (tp->vecInfo->uniformVecNodes.find(rhs) != tp->vecInfo->uniformVecNodes.end()) { @@ -1599,7 +1601,7 @@ void LoopVectorization::VectorizeStmt(BaseNode *node, LoopTransPlan *tp) { // update init/stride/upper nodes of doloop // now hack code to widen const stride with value "vecFactor * original stride" -void LoopVectorization::widenDoloop(DoloopNode *doloop, LoopTransPlan *tp) { +void LoopVectorization::WidenDoloop(DoloopNode *doloop, LoopTransPlan *tp) { if (tp->vBound) { if (tp->vBound->incrNode) { doloop->SetIncrExpr(tp->vBound->incrNode); @@ -1625,7 +1627,7 @@ void LoopVectorization::widenDoloop(DoloopNode *doloop, LoopTransPlan *tp) { void LoopVectorization::VectorizeDoLoop(DoloopNode *doloop, LoopTransPlan *tp) { // LogInfo::MapleLogger() << "\n**** dump doloopnode ****\n"; // step 1: handle loop low/upper/stride - widenDoloop(doloop, tp); + WidenDoloop(doloop, tp); // step 2: insert dup stmt before doloop if ((!tp->vecInfo->uniformNodes.empty()) || diff --git a/src/mapleall/maple_me/src/lfo_unroll.cpp b/src/mapleall/maple_me/src/lfo_unroll.cpp index 4847e37115e8f06464682361ab9d48459cc354c3..d132ebe446a491669362b01c6552e27ce144afdc 100644 --- a/src/mapleall/maple_me/src/lfo_unroll.cpp +++ b/src/mapleall/maple_me/src/lfo_unroll.cpp @@ -59,7 +59,7 @@ BlockNode *LfoUnrollOneLoop::DoFullUnroll(size_t tripCount) { auto &stmtFreqs = profData->GetStmtFreqs(); uint32 updateOp = (kKeepOrigFreq | kUpdateUnrolledFreq); unrolledBlk = doloop->GetDoBody()->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), - stmtFreqs, stmtFreqs, 1, unrollTimes, updateOp); + stmtFreqs, stmtFreqs, 1, static_cast(unrollTimes), updateOp); } else { unrolledBlk = doloop->GetDoBody()->CloneTreeWithSrcPosition(*mirModule); } @@ -73,12 +73,11 @@ BlockNode *LfoUnrollOneLoop::DoFullUnroll(size_t tripCount) { preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()) { auto &stmtFreqs = preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()->GetStmtFreqs(); nextIterBlk = doloop->GetDoBody()->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), - stmtFreqs, stmtFreqs, 1, unrollTimes, - (kKeepOrigFreq | kUpdateUnrolledFreq)); + stmtFreqs, stmtFreqs, 1, static_cast(unrollTimes), (kKeepOrigFreq | kUpdateUnrolledFreq)); } else { nextIterBlk = doloop->GetDoBody()->CloneTreeWithSrcPosition(*mirModule); } - BaseNode *adjExpr = mirBuilder->CreateIntConst(stepAmount * i, ivPrimType); + BaseNode *adjExpr = mirBuilder->CreateIntConst(static_cast(stepAmount * i), ivPrimType); BaseNode *repExpr = codeMP->New(OP_add, ivPrimType, doloop->GetStartExpr(), adjExpr); ReplaceIV(nextIterBlk, repExpr); unrolledBlk->InsertBlockAfter(*nextIterBlk, unrolledBlk->GetLast()); @@ -102,7 +101,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { auto &stmtFreqs = preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()->GetStmtFreqs(); unrolledBlk = doloop->GetDoBody()->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), - stmtFreqs, stmtFreqs, 1, times, + stmtFreqs, stmtFreqs, 1, static_cast(times), (kKeepOrigFreq | kUpdateUnrollRemainderFreq)); } else { unrolledBlk = doloop->GetDoBody()->CloneTreeWithSrcPosition(*mirModule); @@ -114,7 +113,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()) { auto &stmtFreqs = preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()->GetStmtFreqs(); remDoloop = doloop->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), - stmtFreqs, stmtFreqs, 1/*numor*/, times/*denom*/, + stmtFreqs, stmtFreqs, 1 /* numor */, times /* denom */, (kKeepOrigFreq | kUpdateUnrollRemainderFreq)); } else { remDoloop = doloop->CloneTree(*preEmit->GetCodeMPAlloc()); @@ -122,7 +121,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { // generate remDoloop's termination BaseNode *terminationRHS = codeMP->New(OP_add, ivPrimType, doloop->GetStartExpr()->CloneTree(*preEmit->GetCodeMPAlloc()), - mirBuilder->CreateIntConst(static_cast(remainderTripCount), ivPrimType)); + mirBuilder->CreateIntConst(remainderTripCount, ivPrimType)); remDoloop->SetContExpr(codeMP->New(OP_lt, PTY_i32, ivPrimType, CloneIVNode(), terminationRHS)); unrolledBlk = codeMP->New(); unrolledBlk->AddStatement(remDoloop); @@ -141,7 +140,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { mirBuilder->CreateIntConst(1, ivPrimType)); } tripsExpr = codeMP->New(OP_rem, ivPrimType, tripsExpr, - mirBuilder->CreateIntConst(static_cast(times), ivPrimType)); + mirBuilder->CreateIntConst(times, ivPrimType)); BaseNode *remLoopEndExpr = codeMP->New(OP_add, ivPrimType, startExpr->CloneTree(*preEmit->GetCodeMPAlloc()), tripsExpr); // store in a preg @@ -182,12 +181,12 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { if (Options::profileUse && preMeFunc->meFunc && preMeFunc->meFunc->GetMirFunc() && preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()) { auto &stmtFreqs = preMeFunc->meFunc->GetMirFunc()->GetFuncProfData()->GetStmtFreqs(); - nextIterBlk = doloop->GetDoBody()->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), - stmtFreqs, stmtFreqs, 1/*numor*/, times/*denom*/, (kKeepOrigFreq | kUpdateUnrolledFreq)); + nextIterBlk = doloop->GetDoBody()->CloneTreeWithFreqs(mirModule->GetCurFuncCodeMPAllocator(), stmtFreqs, + stmtFreqs, 1 /* numor */, static_cast(times) /* denom */, (kKeepOrigFreq | kUpdateUnrolledFreq)); } else { nextIterBlk = doloop->GetDoBody()->CloneTreeWithSrcPosition(*mirModule); } - BaseNode *adjExpr = mirBuilder->CreateIntConst(stepAmount * i, ivPrimType); + BaseNode *adjExpr = mirBuilder->CreateIntConst(static_cast(stepAmount * i), ivPrimType); BaseNode *repExpr = codeMP->New(OP_add, ivPrimType, CloneIVNode(), adjExpr); ReplaceIV(nextIterBlk, repExpr); unrolledDoloop->GetDoBody()->InsertBlockAfter(*nextIterBlk, unrolledDoloop->GetDoBody()->GetLast()); @@ -197,7 +196,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { if (tripCount != 0) { if (remainderTripCount != 0) { BaseNode *newStartExpr = codeMP->New(OP_add, ivPrimType, unrolledDoloop->GetStartExpr(), - mirBuilder->CreateIntConst(static_cast(remainderTripCount), ivPrimType)); + mirBuilder->CreateIntConst(remainderTripCount, ivPrimType)); unrolledDoloop->SetStartExpr(newStartExpr); } } else { @@ -206,7 +205,7 @@ BlockNode *LfoUnrollOneLoop::DoUnroll(size_t times, size_t tripCount) { } // update incrExpr ConstvalNode *stepNode = static_cast(unrolledDoloop->GetIncrExpr()); - uint64 origIncr = static_cast(stepNode->GetConstVal())->GetExtValue(); + uint64 origIncr = static_cast(static_cast(stepNode->GetConstVal())->GetExtValue()); unrolledDoloop->SetIncrExpr(mirBuilder->CreateIntConst(origIncr * times, ivPrimType)); unrolledBlk->AddStatement(unrolledDoloop); return unrolledBlk; diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp index 217f7d2274643d1d3d7f91acc3ef8ca2903fee2c..120b75b43d2a89a8605b18f2a6a8ed9de0f00d83 100644 --- a/src/mapleall/maple_me/src/lmbc_lower.cpp +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -27,7 +27,7 @@ PregIdx LMBCLowerer::GetSpecialRegFromSt(const MIRSymbol *sym) { CHECK(sym->GetStIndex() < memlayout->sym_alloc_table.size(), "index out of range in LMBCLowerer::GetSpecialRegFromSt"); SymbolAlloc *symalloc = &memlayout->sym_alloc_table[sym->GetStIndex()]; - if (symalloc->mem_segment->kind == MS_FPbased) { + if (symalloc->memSegment->kind == MS_FPbased) { specreg = -kSregFp; } else { CHECK_FATAL(false, "LMBCLowerer::LowerDread: bad memory layout for local variable"); @@ -41,14 +41,14 @@ PregIdx LMBCLowerer::GetSpecialRegFromSt(const MIRSymbol *sym) { return specreg; } -BaseNode *LMBCLowerer::LowerAddrof(AddrofNode *expr) { +BaseNode *LMBCLowerer::LowerAddrof(const AddrofNode *expr) { MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(expr->GetStIdx()); ASSERT_NOT_NULL(symbol); symbol->ResetIsDeleted(); int32 offset = 0; if (expr->GetFieldID() != 0) { MIRStructType *structty = static_cast(symbol->GetType()); - offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(expr->GetFieldID()).byteOffset; } PrimType symty = (expr->GetPrimType() == PTY_simplestr || expr->GetPrimType() == PTY_simpleobj) ? expr->GetPrimType() : GetLoweredPtrType(); @@ -73,7 +73,7 @@ BaseNode *LMBCLowerer::LowerDread(const AddrofNode *expr) { MIRStructType *structty = static_cast(symbol->GetType()); FieldPair thepair = structty->TraverseToField(expr->GetFieldID()); symty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first)->GetPrimType(); - offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(expr->GetFieldID()).byteOffset; } if (!symbol->LMBCAllocateOffSpecialReg()) { BaseNode *base = mirBuilder->CreateExprDreadoff(OP_addrofoff, GetLoweredPtrType(), *symbol, 0); @@ -149,7 +149,7 @@ BaseNode *LMBCLowerer::LowerIread(const IreadNode &expr) { MIRType *type = ptrType->GetPointedType(); if (expr.GetFieldID() != 0) { MIRStructType *structty = static_cast(type); - offset = becommon->GetFieldOffset(*structty, expr.GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(expr.GetFieldID()).byteOffset; type = structty->GetFieldType(expr.GetFieldID()); } BaseNode *ireadoff = mirBuilder->CreateExprIreadoff(type->GetPrimType(), offset, expr.Opnd(0)); @@ -159,14 +159,14 @@ BaseNode *LMBCLowerer::LowerIread(const IreadNode &expr) { return mirBuilder->CreateExprTypeCvt(OP_cvt, expr.GetPrimType(), GetRegPrimType(ireadoff->GetPrimType()), *ireadoff); } -BaseNode *LMBCLowerer::LowerIaddrof(IaddrofNode *expr) { +BaseNode *LMBCLowerer::LowerIaddrof(const IaddrofNode *expr) { int32 offset = 0; if (expr->GetFieldID() != 0) { MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr->GetTyIdx()); MIRStructType *structty = static_cast( GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(type)->GetPointedTyIdx())); - offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(expr->GetFieldID()).byteOffset; } if (offset == 0) { return expr->Opnd(0); @@ -204,9 +204,9 @@ BaseNode *LMBCLowerer::LowerExpr(BaseNode *expr) { } // lower using OP_blkassignoff -void LMBCLowerer::LowerAggDassign(const DassignNode *dsnode, MIRType *lhsty, +void LMBCLowerer::LowerAggDassign(const DassignNode &dsnode, const MIRType *lhsty, int32 offset, BlockNode *newblk) { - BaseNode *rhs = dsnode->Opnd(0); + BaseNode *rhs = dsnode.Opnd(0); CHECK_FATAL(rhs->GetOpCode() == OP_dread || rhs->GetOpCode() == OP_iread, "LowerAggDassign: rhs inconsistent"); // change rhs to address of rhs @@ -218,7 +218,7 @@ void LMBCLowerer::LowerAggDassign(const DassignNode *dsnode, MIRType *lhsty, rhs->SetPrimType(GetLoweredPtrType()); // generate lhs address expression BaseNode *lhs = nullptr; - MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dsnode->GetStIdx()); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dsnode.GetStIdx()); ASSERT_NOT_NULL(symbol); symbol->ResetIsDeleted(); if (!symbol->LMBCAllocateOffSpecialReg()) { @@ -247,7 +247,7 @@ void LMBCLowerer::LowerDassign(DassignNode *dsnode, BlockNode *newblk) { ASSERT_NOT_NULL(structty); FieldPair thepair = structty->TraverseToField(dsnode->GetFieldID()); symty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); - offset = becommon->GetFieldOffset(*structty, dsnode->GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(dsnode->GetFieldID()).byteOffset; } BaseNode *rhs = LowerExpr(dsnode->Opnd(0)); if (rhs->GetPrimType() != PTY_agg || rhs->GetOpCode() == OP_regread) { @@ -280,7 +280,7 @@ void LMBCLowerer::LowerDassign(DassignNode *dsnode, BlockNode *newblk) { newblk->AddStatement(iassignoff); } } else { - LowerAggDassign(dsnode, symty, offset, newblk); + LowerAggDassign(*dsnode, symty, offset, newblk); } } @@ -311,9 +311,9 @@ void LMBCLowerer::LowerDassignoff(DassignoffNode *dsnode, BlockNode *newblk) { } } // lower using OP_blkassignoff -void LMBCLowerer::LowerAggIassign(IassignNode *iassign, MIRType *lhsty, - int32 offset, BlockNode *newblk) { - BaseNode *rhs = iassign->rhs; +void LMBCLowerer::LowerAggIassign(const IassignNode &iassign, const MIRType *lhsty, + int32 offset, BlockNode &newblk) const { + BaseNode *rhs = iassign.rhs; CHECK_FATAL(rhs->GetOpCode() == OP_dread || rhs->GetOpCode() == OP_iread || rhs->GetOpCode() == OP_ireadoff || rhs->GetOpCode() == OP_ireadfpoff, "LowerAggIassign: rhs inconsistent"); @@ -341,9 +341,9 @@ void LMBCLowerer::LowerAggIassign(IassignNode *iassign, MIRType *lhsty, BlkassignoffNode *bass = mirModule->CurFuncCodeMemPool()->New(offset, lhsty->GetSize()); bass->SetAlign(lhsty->GetAlign()); - bass->SetBOpnd(iassign->addrExpr, 0); + bass->SetBOpnd(iassign.addrExpr, 0); bass->SetBOpnd(rhs, 1); - newblk->AddStatement(bass); + newblk.AddStatement(bass); } void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { @@ -355,7 +355,7 @@ void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { if (iassign->GetFieldID() != 0) { MIRStructType *structty = static_cast( GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx())); - offset = becommon->GetFieldOffset(*structty, iassign->GetFieldID()).first; + offset = structty->GetFieldOffsetFromBaseAddr(iassign->GetFieldID()).byteOffset; TyIdx ftyidx = structty->TraverseToField(iassign->GetFieldID()).second.first; type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftyidx); } else { @@ -372,7 +372,7 @@ void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { iassign->rhs); newblk->AddStatement(iassignoff); } else { - LowerAggIassign(iassign, type, offset, newblk); + LowerAggIassign(*iassign, type, offset, *newblk); } } @@ -412,7 +412,7 @@ void LMBCLowerer::LowerCall(NaryStmtNode *stmt, BlockNode *newblk) { if (stmt->GetOpCode() == OP_icallproto) { IcallNode *icallproto = static_cast(stmt); funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx())); - paramInPrototype = (i - 1) < funcType->GetParamTypeList().size(); + paramInPrototype = (i == 0) ? false : (i - 1) < funcType->GetParamTypeList().size(); } else { CallNode *callNode = static_cast(stmt); MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); @@ -451,7 +451,7 @@ void LMBCLowerer::LowerCall(NaryStmtNode *stmt, BlockNode *newblk) { newblk->AddStatement(stmt); } -void LMBCLowerer::FixPrototype4FirstArgReturn(IcallNode *icall) { +void LMBCLowerer::FixPrototype4FirstArgReturn(const IcallNode *icall) const { MIRFuncType *ftype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(icall->GetRetTyIdx())); if (!ftype->FirstArgReturn()) { return; diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp index b202c8f813d4155ca6dae536a50c6abf9cf65a77..fef44408ac4cb8b639fb6e77776b422625b0bbc8 100644 --- a/src/mapleall/maple_me/src/lmbc_memlayout.cpp +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -31,8 +31,8 @@ void LMBCMemLayout::LayoutStackFrame(void) { } // allocate the local variables - uint32 symtabsize = func->GetSymTab()->GetSymbolTableSize(); - for (uint32 i = 0; i < symtabsize; i++) { + size_t symtabsize = func->GetSymTab()->GetSymbolTableSize(); + for (size_t i = 0; i < symtabsize; i++) { MIRSymbol *sym = func->GetSymTab()->GetSymbolFromStIdx(i); if (!sym) { continue; @@ -42,30 +42,30 @@ void LMBCMemLayout::LayoutStackFrame(void) { } if (sym->GetStorageClass() == kScPstatic && sym->LMBCAllocateOffSpecialReg()) { uint32 stindex = sym->GetStIndex(); - sym_alloc_table[stindex].mem_segment = seg_GPbased; - seg_GPbased->size = maplebe::RoundUp(seg_GPbased->size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].memSegment = seg_GPbased; + seg_GPbased->size = static_cast(maplebe::RoundUp(seg_GPbased->size, sym->GetType()->GetAlign())); sym_alloc_table[stindex].offset = seg_GPbased->size; - seg_GPbased->size += sym->GetType()->GetSize(); + seg_GPbased->size += static_cast(sym->GetType()->GetSize()); } if (sym->GetStorageClass() != kScAuto) { continue; } uint32 stindex = sym->GetStIndex(); - sym_alloc_table[stindex].mem_segment = &seg_FPbased; - seg_FPbased.size -= sym->GetType()->GetSize(); - seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].memSegment = &seg_FPbased; + seg_FPbased.size -= static_cast(sym->GetType()->GetSize()); + seg_FPbased.size = static_cast(maplebe::RoundDown(seg_FPbased.size, sym->GetType()->GetAlign())); sym_alloc_table[stindex].offset = seg_FPbased.size; } } GlobalMemLayout::GlobalMemLayout(MIRModule *mod, MapleAllocator *mallocator) : seg_GPbased(MS_GPbased), sym_alloc_table(mallocator->Adapter()), mirModule(mod) { - uint32 symtabsize = GlobalTables::GetGsymTable().GetSymbolTableSize(); + size_t symtabsize = GlobalTables::GetGsymTable().GetSymbolTableSize(); sym_alloc_table.resize(symtabsize); MIRSymbol *sym = nullptr; // allocate the global variables ordered based on alignments for (uint32 curalign = 8; curalign != 0; curalign >>= 1) { - for (uint32 i = 0; i < symtabsize; i++) { + for (size_t i = 0; i < symtabsize; i++) { sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); if (!sym) { continue; @@ -80,13 +80,13 @@ GlobalMemLayout::GlobalMemLayout(MIRModule *mod, MapleAllocator *mallocator) continue; } uint32 stindex = sym->GetStIndex(); - sym_alloc_table[stindex].mem_segment = &seg_GPbased; - seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].memSegment = &seg_GPbased; + seg_GPbased.size = static_cast(maplebe::RoundUp(seg_GPbased.size, sym->GetType()->GetAlign())); sym_alloc_table[stindex].offset = seg_GPbased.size; - seg_GPbased.size += sym->GetType()->GetSize(); + seg_GPbased.size += static_cast(sym->GetType()->GetSize()); } } - seg_GPbased.size = maplebe::RoundUp(seg_GPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_GPbased.size = static_cast(maplebe::RoundUp(seg_GPbased.size, GetPrimTypeSize(PTY_ptr))); mirModule->SetGlobalMemSize(seg_GPbased.size); } diff --git a/src/mapleall/maple_me/src/mc_ssa_pre.cpp b/src/mapleall/maple_me/src/mc_ssa_pre.cpp index 0eea372060162de82ff3eb619819fb718cb945e5..9d4fcd456a131cb488deec933057f61d28454683 100644 --- a/src/mapleall/maple_me/src/mc_ssa_pre.cpp +++ b/src/mapleall/maple_me/src/mc_ssa_pre.cpp @@ -15,9 +15,9 @@ #include #include #include -#include "mc_ssa_pre.h" #include "dominance.h" #include "mir_builder.h" +#include "mc_ssa_pre.h" // Implementation of the MC-SSAPRE algorithm based on the PLDI 2011 paper: // An SSA-based Algorithm for Optimal Speculative Code Motion Under an Execution Profile @@ -31,21 +31,21 @@ namespace maple { // ================ Step 8: WillBeAvail ================= -void McSSAPre::ResetMCWillBeAvail(MePhiOcc *occ) const { - if (!occ->IsMCWillBeAvail()) { +void McSSAPre::ResetMCWillBeAvail(MePhiOcc *phiOcc) const { + if (!phiOcc->IsMCWillBeAvail()) { return; } - occ->SetIsMCWillBeAvail(false); + phiOcc->SetIsMCWillBeAvail(false); for (auto it = phiOccs.begin(); it != phiOccs.end(); ++it) { - MePhiOcc *phiOcc = *it; - if (!phiOcc->IsMCWillBeAvail()) { + MePhiOcc *aPhiOcc = *it; + if (!aPhiOcc->IsMCWillBeAvail()) { continue; } - for (MePhiOpndOcc *phiOpnd : phiOcc->GetPhiOpnds()) { - if (phiOpnd->GetDef() != nullptr && phiOpnd->GetDef() == occ) { - // phiOpnd is a use of occ + for (MePhiOpndOcc *phiOpnd : aPhiOcc->GetPhiOpnds()) { + if (phiOpnd->GetDef() != nullptr && phiOpnd->GetDef() == aPhiOcc) { + // phiOpnd is a use of phiOcc if (!phiOpnd->HasRealUse() && !phiOpnd->IsMCInsert()) { - ResetMCWillBeAvail(phiOcc); + ResetMCWillBeAvail(aPhiOcc); break; } } @@ -82,7 +82,7 @@ void McSSAPre::ComputeMCWillBeAvail() const { // ================ Step 7: Max Flow / Min Cut ================= -bool McSSAPre::AmongMinCut(RGNode *nd, uint32 idx) const { +bool McSSAPre::AmongMinCut(const RGNode *nd, uint32 idx) const { for (Visit *visit : minCut) { if (visit->node == nd && visit->predIdx == idx) { return true; @@ -150,11 +150,11 @@ void McSSAPre::DumpRGToFile() { mirModule->GetOut() << "++++ ssapre candidate " << workCand->GetIndex() << " dumped to " << fileName << "\n"; } -bool McSSAPre::IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx) { +bool McSSAPre::IncludedEarlier(Visit **cut, const Visit &curVisit, uint32 nextRouteIdx) const { uint32 i = nextRouteIdx; while (i != 0) { i--; - if (cut[i]->node == curVisit->node && cut[i]->predIdx == curVisit->predIdx) { + if (cut[i]->node == curVisit.node && cut[i]->predIdx == curVisit.predIdx) { return true; } } @@ -162,9 +162,9 @@ bool McSSAPre::IncludedEarlier(Visit **cut, Visit *curVisit, uint32 nextRouteIdx } // remove this route's nodes from cutSet -void McSSAPre::RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route *route) { - for (uint32 i = 1; i < route->visits.size(); i++) { - Visit &curVisit = route->visits[i]; +void McSSAPre::RemoveRouteNodesFromCutSet(std::unordered_multiset &cutSet, Route &route) const { + for (uint32 i = 1; i < route.visits.size(); i++) { + Visit &curVisit = route.visits[i]; std::unordered_multiset::iterator it = cutSet.find(curVisit.node->id); ASSERT(it != cutSet.end(), "cutSet maintenance error"); cutSet.erase(it); @@ -180,7 +180,7 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset // determine starting value of visitIdx: start searching back from route end; // if any node is in cutSet, set visitIdx as that nodes's index in route; // otherwise, set visitIdx to 0 - uint32 visitIdx = curRoute->visits.size(); + size_t visitIdx = curRoute->visits.size(); do { visitIdx--; if (cutSet.count(curRoute->visits[visitIdx].node->id) != 0) { @@ -189,7 +189,7 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset } while (visitIdx != 1); // update cutSet with visited nodes lower than visitIdx if (visitIdx != 1) { - for (uint i = visitIdx - 1; i > 0; i--) { + for (size_t i = visitIdx - 1; i > 0; i--) { cutSet.insert(curRoute->visits[i].node->id); } } @@ -197,7 +197,7 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset bool success = false; do { if (visitIdx == curRoute->visits.size()) { - RemoveRouteNodesFromCutSet(cutSet, curRoute); + RemoveRouteNodesFromCutSet(cutSet, *curRoute); return false; } curVisit = &curRoute->visits[visitIdx]; @@ -206,12 +206,12 @@ bool McSSAPre::SearchRelaxedMinCut(Visit **cut, std::unordered_multiset if (visitIdx != 0) { cutSet.insert(curVisit->node->id); } - if (IncludedEarlier(cut, curVisit, nextRouteIdx)) { + if (IncludedEarlier(cut, *curVisit, nextRouteIdx)) { visitCap = 0; } success = (flowSoFar + visitCap <= relaxedMaxFlowValue); if (success && nextRouteIdx != (maxFlowRoutes.size() - 1)) { - success = SearchRelaxedMinCut(cut, cutSet, nextRouteIdx+1, flowSoFar + visitCap); + success = SearchRelaxedMinCut(cut, cutSet, nextRouteIdx + 1, flowSoFar + visitCap); } visitIdx++; } while (!success); @@ -244,7 +244,7 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet bool success = false; do { if (visitIdx == curRoute->visits.size()) { - RemoveRouteNodesFromCutSet(cutSet, curRoute); + RemoveRouteNodesFromCutSet(cutSet, *curRoute); return false; } curVisit = &curRoute->visits[visitIdx]; @@ -261,12 +261,12 @@ bool McSSAPre::SearchMinCut(Visit **cut, std::unordered_multiset &cutSet if (visitIdx != 0) { cutSet.insert(curVisit->node->id); } - if (IncludedEarlier(cut, curVisit, nextRouteIdx)) { + if (IncludedEarlier(cut, *curVisit, nextRouteIdx)) { visitCap = 0; } success = (flowSoFar + visitCap <= maxFlowValue); if (success && nextRouteIdx != (maxFlowRoutes.size() - 1)) { - success = SearchMinCut(cut, cutSet, nextRouteIdx+1, flowSoFar + visitCap); + success = SearchMinCut(cut, cutSet, nextRouteIdx + 1, flowSoFar + visitCap); } visitIdx++; } while (!success); @@ -290,7 +290,7 @@ void McSSAPre::DetermineMinCut() { if (maxFlowRoutes.size() >= 20) { // apply arbitrary heuristics to reduce search time relaxedSearch = true; - relaxedMaxFlowValue = maxFlowValue * (maxFlowRoutes.size() / 10); + relaxedMaxFlowValue = maxFlowValue * static_cast(maxFlowRoutes.size() / 10); } bool success = !relaxedSearch && SearchMinCut(cut, cutSet, 0, 0); if (!success) { @@ -305,14 +305,14 @@ void McSSAPre::DetermineMinCut() { CHECK_FATAL(false, "McSSAPre::DetermineMinCut: failed to find min cut"); } // sort cut - std::sort(cut, cut+maxFlowRoutes.size(), [](const Visit *left, const Visit *right) { + std::sort(cut, cut + maxFlowRoutes.size(), [](const Visit *left, const Visit *right) { return (left->node != right->node) ? (left->node->id < right->node->id) : (left->predIdx < right->predIdx); }); // remove duplicates in the cut to form mincut minCut.push_back(cut[0]); size_t duplicatedVisits = 0; for (uint32 i = 1; i < maxFlowRoutes.size(); i++) { - if (cut[i] != cut[i-1]) { + if (cut[i] != cut[i - 1]) { minCut.push_back(cut[i]); } else { duplicatedVisits++; @@ -331,29 +331,29 @@ void McSSAPre::DetermineMinCut() { } } -bool McSSAPre::VisitANode(RGNode *node, Route *route, std::vector &visitedNodes) { - ASSERT(node->pred.size() != 0, "McSSAPre::VisitANode: no connection to source node"); +bool McSSAPre::VisitANode(RGNode &node, Route *route, std::vector &visitedNodes) { + ASSERT(node.pred.size() != 0, "McSSAPre::VisitANode: no connection to source node"); // if any pred is the source and there's capacity to reach it, return success - for (uint32 i = 0; i < node->pred.size(); i++) { - if (node->pred[i] == source && node->inEdgesCap[i] > node->usedCap[i]) { + for (uint32 i = 0; i < node.pred.size(); i++) { + if (node.pred[i] == source && node.inEdgesCap[i] > node.usedCap[i]) { // if there is another pred never taken that also reaches source, use that instead - for (uint32 k = i + 1; k < node->pred.size(); k++) { - if (node->pred[k] == source && node->usedCap[k] == 0 && node->inEdgesCap[k] > 0) { - route->visits.emplace_back(Visit(node, k)); + for (uint32 k = i + 1; k < node.pred.size(); k++) { + if (node.pred[k] == source && node.usedCap[k] == 0 && node.inEdgesCap[k] > 0) { + route->visits.emplace_back(Visit(&node, k)); return true; } } - route->visits.push_back(Visit(node, i)); + route->visits.emplace_back(Visit(&node, i)); return true; } } // pick an never-taken predecessor path first - for (uint32 i = 0; i < node->pred.size(); i++) { - if (node->usedCap[i] == 0 && node->inEdgesCap[i] > 0 && !visitedNodes[node->pred[i]->id]) { - route->visits.push_back(Visit(node, i)); - visitedNodes[node->pred[i]->id] = true; - bool success = VisitANode(node->pred[i], route, visitedNodes); + for (uint32 i = 0; i < node.pred.size(); i++) { + if (node.usedCap[i] == 0 && node.inEdgesCap[i] > 0 && !visitedNodes[node.pred[i]->id]) { + route->visits.emplace_back(Visit(&node, i)); + visitedNodes[node.pred[i]->id] = true; + bool success = VisitANode(*node.pred[i], route, visitedNodes); if (!success) { route->visits.pop_back(); } else { @@ -362,21 +362,21 @@ bool McSSAPre::VisitANode(RGNode *node, Route *route, std::vector &visited } } - size_t numPreds = node->pred.size(); + size_t numPreds = node.pred.size(); uint32 sortedPred[numPreds]; for (uint32 i = 0; i < numPreds; i++) { sortedPred[i] = i; } // put sortedPred[] in increasing order of capacities - std::sort(sortedPred, sortedPred+numPreds, [node](uint32 m, uint32 n) { - return node->inEdgesCap[m] < node->inEdgesCap[n]; }); + std::sort(sortedPred, sortedPred + numPreds, [node](uint32 m, uint32 n) { + return node.inEdgesCap[m] < node.inEdgesCap[n]; }); // for this round, prefer predecessor with higher unused capacity for (uint32 i = 0; i < numPreds; i++) { uint32 j = sortedPred[i]; - if (!visitedNodes[node->pred[j]->id] && node->inEdgesCap[j] > node->usedCap[j]) { - route->visits.push_back(Visit(node, j)); - visitedNodes[node->pred[j]->id] = true; - bool success = VisitANode(node->pred[j], route, visitedNodes); + if (!visitedNodes[node.pred[j]->id] && node.inEdgesCap[j] > node.usedCap[j]) { + route->visits.emplace_back(Visit(&node, j)); + visitedNodes[node.pred[j]->id] = true; + bool success = VisitANode(*node.pred[j], route, visitedNodes); if (!success) { route->visits.pop_back(); } else { @@ -396,9 +396,9 @@ bool McSSAPre::FindAnotherRoute() { // pick an untaken sink predecessor first for (uint32 i = 0; i < sink->pred.size(); i++) { if (sink->usedCap[i] == 0) { - route->visits.push_back(Visit(sink, i)); + route->visits.emplace_back(Visit(sink, i)); visitedNodes[sink->pred[i]->id] = true; - success = VisitANode(sink->pred[i], route, visitedNodes); + success = VisitANode(*sink->pred[i], route, visitedNodes); if (!success) { route->visits.pop_back(); } else { @@ -409,9 +409,9 @@ bool McSSAPre::FindAnotherRoute() { if (!success) { // now, pick any sink predecessor for (uint32 i = 0; i < sink->pred.size(); i++) { - route->visits.push_back(Visit(sink, i)); + route->visits.emplace_back(Visit(sink, i)); visitedNodes[sink->pred[i]->id] = true; - success = VisitANode(sink->pred[i], route, visitedNodes); + success = VisitANode(*sink->pred[i], route, visitedNodes); if (!success) { route->visits.pop_back(); } else { @@ -423,9 +423,9 @@ bool McSSAPre::FindAnotherRoute() { return false; } // find bottleneck capacity along route - uint64 minAvailCap = route->visits[0].AvailableCapacity(); + FreqType minAvailCap = route->visits[0].AvailableCapacity(); for (size_t i = 1; i < route->visits.size(); i++) { - uint64 curAvailCap = route->visits[i].AvailableCapacity(); + FreqType curAvailCap = route->visits[i].AvailableCapacity(); minAvailCap = std::min(minAvailCap, curAvailCap); } route->flowValue = minAvailCap; @@ -513,7 +513,7 @@ void McSSAPre::AddSingleSource() { RGNode *sucNode = occ2RGNodeMap[phiOcc]; sucNode->pred.push_back(source); sucNode->phiOpndIndices.push_back(i); - sucNode->inEdgesCap.push_back(phiOcc->GetBB()->GetPred(i)->GetFrequency()+1); + sucNode->inEdgesCap.push_back(phiOcc->GetBB()->GetPred(i)->GetFrequency() + 1); sucNode->usedCap.push_back(0); numSourceEdges++; } @@ -539,7 +539,7 @@ void McSSAPre::GraphReduction() { for (MePhiOcc *phiOcc : phiOccs) { if (phiOcc->IsPartialAnt() && !phiOcc->IsFullyAvail()) { RGNode *newRGNode = perCandMemPool->New(&perCandAllocator, nextRGNodeId++, phiOcc); - occ2RGNodeMap.insert(std::pair(phiOcc, newRGNode)); + occ2RGNodeMap.emplace(std::pair(phiOcc, newRGNode)); numPhis++; } } @@ -559,7 +559,7 @@ void McSSAPre::GraphReduction() { numRealOccs++; RGNode *def = occ2RGNodeMap[defOcc]; use->pred.push_back(def); - use->inEdgesCap.push_back(realOcc->GetBB()->GetFrequency()+1); + use->inEdgesCap.push_back(realOcc->GetBB()->GetFrequency() + 1); use->usedCap.push_back(0); numType2Edges++; } @@ -586,7 +586,7 @@ void McSSAPre::GraphReduction() { } use->phiOpndIndices.push_back(i); ASSERT(i != defPhiOcc->GetPhiOpnds().size(), "McSSAPre::GraphReduction: cannot find corresponding phi opnd"); - use->inEdgesCap.push_back(defPhiOcc->GetBB()->GetPred(i)->GetFrequency()+1); + use->inEdgesCap.push_back(defPhiOcc->GetBB()->GetPred(i)->GetFrequency() + 1); use->usedCap.push_back(0); numType1Edges++; } @@ -603,8 +603,8 @@ void McSSAPre::GraphReduction() { // ================ Step 3: Data Flow Computations ================= // set partial anticipation -void McSSAPre::SetPartialAnt(MePhiOpndOcc *phiOpnd) const { - MeOccur *defOcc = phiOpnd->GetDef(); +void McSSAPre::SetPartialAnt(MePhiOpndOcc &phiOpnd) const { + MeOccur *defOcc = phiOpnd.GetDef(); if (defOcc == nullptr || defOcc->GetOccType() != kOccPhiocc) { return; } @@ -614,7 +614,7 @@ void McSSAPre::SetPartialAnt(MePhiOpndOcc *phiOpnd) const { } defPhiOcc->SetIsPartialAnt(true); for (MePhiOpndOcc *mePhiOpnd : defPhiOcc->GetPhiOpnds()) { - SetPartialAnt(mePhiOpnd); + SetPartialAnt(*mePhiOpnd); } } @@ -625,7 +625,7 @@ void McSSAPre::ComputePartialAnt() const { if (phiOcc->IsPartialAnt()) { // propagate partialAnt along use-def edges for (MePhiOpndOcc *phiOpnd : phiOcc->GetPhiOpnds()) { - SetPartialAnt(phiOpnd); + SetPartialAnt(*phiOpnd); } } } diff --git a/src/mapleall/maple_me/src/me_abco.cpp b/src/mapleall/maple_me/src/me_abco.cpp index 4929d481d3081d4d164e06fa78b8d74b923f7e39..6190f4dd5e4e6a80e063fb7953a40810a8efec27 100644 --- a/src/mapleall/maple_me/src/me_abco.cpp +++ b/src/mapleall/maple_me/src/me_abco.cpp @@ -125,7 +125,7 @@ void MeABC::BuildSoloPiInGraph(const PiassignMeStmt &piMeStmt) { (void)inequalityGraph->AddEdge(*piLHSNode, *piRHSNode, 0, EdgeType::kLower); } -bool MeABC::PiExecuteBeforeCurrentCheck(const PiassignMeStmt &piMeStmt) { +bool MeABC::PiExecuteBeforeCurrentCheck(const PiassignMeStmt &piMeStmt) const { BB *currentCheckBB = currentCheck->GetBB(); const BB *piBB = piMeStmt.GetBB(); if (currentCheckBB != piBB) { @@ -767,7 +767,7 @@ void MeABC::AddUseDef(MeExpr &meExpr) { } void MeABC::CollectCareInsns() { - for (auto pair : arrayChecks) { + for (const auto &pair : std::as_const(arrayChecks)) { MeStmt *meStmt = pair.first; if (IsCallAssigned(meStmt->GetOp())) { arrayNewChecks[meStmt] = nullptr; @@ -787,7 +787,7 @@ bool MeABC::ProveGreaterZ(const MeExpr &weight) { } void MeABC::ReSolveEdge() { - for (auto pair : unresolveEdge) { + for (const auto &pair : std::as_const(unresolveEdge)) { MeExpr *weight = pair.second; if (!inequalityGraph->HasNode(*weight)) { continue; @@ -968,8 +968,8 @@ void MeABC::InitNewStartPoint(MeStmt &meStmt, MeExpr &opnd1, MeExpr &opnd2, bool (void)inequalityGraph->GetOrCreateConstNode(static_cast(&opnd2)->GetExtIntValue()); } BB *curBB = meStmt.GetBB(); - if (curBB->GetPiList().size()) { - for (auto pair : curBB->GetPiList()) { + if (curBB->GetPiList().size() != 0) { + for (const auto &pair : std::as_const(curBB->GetPiList())) { CHECK_FATAL(pair.second.size() >= 1, "must be"); PiassignMeStmt *pi = pair.second[0]; AddUseDef(*pi->GetLHS()); @@ -1063,7 +1063,7 @@ void MeABC::ExecuteABCO() { if (CollectABC()) { ssi->ConvertToSSI(); CollectCareInsns(); - for (auto pair : arrayNewChecks) { + for (const auto &pair : std::as_const(arrayNewChecks)) { if (pair.first->GetOp() == OP_callassigned) { auto *callNode = static_cast(pair.first); ProcessCallParameters(*callNode); diff --git a/src/mapleall/maple_me/src/me_analyze_rc.cpp b/src/mapleall/maple_me/src/me_analyze_rc.cpp index dd40aba17c760cd1335d640cc010a4ea0573d144..ce538c3ac46f7735e02c951b8296fbc7086e8ea1 100644 --- a/src/mapleall/maple_me/src/me_analyze_rc.cpp +++ b/src/mapleall/maple_me/src/me_analyze_rc.cpp @@ -89,8 +89,8 @@ void RCItem::Dump() { } RCItem *AnalyzeRC::FindOrCreateRCItem(OriginalSt &ost) { - auto mapIt = rcItemsMap.find(ost.GetIndex()); - if (mapIt != rcItemsMap.end()) { + const auto mapIt = std::as_const(rcItemsMap).find(ost.GetIndex()); + if (mapIt != rcItemsMap.cend()) { return mapIt->second; } RCItem *rcItem = analyzeRCMp->New(ost, analyzeRCAllocator); @@ -390,7 +390,7 @@ bool AnalyzeRC::NeedDecRef(const VarMeExpr &var) const { // among the arguments in the intrinsiccall to INTRN_CLEANUP_LOCALREFVARS, those // that are zero version are not live, and can be deleted; if the number of // arguments left are > `kCleanupLocalRefVarsLimit`, delete the intrinsiccall. -void AnalyzeRC::RemoveUnneededCleanups() { +void AnalyzeRC::RemoveUnneededCleanups() const { for (BB *bb : cfg->GetCommonExitBB()->GetPred()) { auto &meStmts = bb->GetMeStmts(); if (meStmts.empty() || meStmts.back().GetOp() != OP_return) { @@ -425,7 +425,7 @@ void AnalyzeRC::RemoveUnneededCleanups() { } void AnalyzeRC::Run() { - if (func.GetHints() & kPlacementRCed) { + if ((func.GetHints() & kPlacementRCed) != 0) { skipLocalRefVars = true; } else { func.SetHints(func.GetHints() | kAnalyzeRCed); diff --git a/src/mapleall/maple_me/src/me_bb_layout.cpp b/src/mapleall/maple_me/src/me_bb_layout.cpp index 6f8dc28b0336357492a08b39234a1f219adfb64d..b9651d0696fdfc4b9b1c3c43a48d9b24d2b4f4b2 100644 --- a/src/mapleall/maple_me/src/me_bb_layout.cpp +++ b/src/mapleall/maple_me/src/me_bb_layout.cpp @@ -21,6 +21,8 @@ #include "me_irmap.h" #include "me_option.h" #include "me_predict.h" +#include "mir_lower.h" +#include "chain_layout.h" // This BB layout strategy strictly obeys source ordering when inside try blocks. // This Optimization will reorder the bb layout. it start from the first bb of func. @@ -63,264 +65,6 @@ static void CreateGoto(BB &bb, MeFunction &func, BB &fallthru) { bb.SetKind(kBBGoto); } -bool BBLayout::IsBBInCurrContext(const BB &bb, const MapleVector *context) const { - if (context == nullptr) { - return true; - } - return (*context)[bb.GetBBId()]; -} - -// Create chains for each BB -void BBLayout::InitBBChains() { - uint32 id = 0; - bb2chain.resize(cfg->NumBBs(), nullptr); - for (auto it = cfg->valid_begin(); it != cfg->valid_end(); ++it) { - // BBChain constructor will update bb2chain - (void)layoutAlloc.GetMemPool()->New(layoutAlloc, bb2chain, *it, id++); - } -} - -void BBLayout::BuildChainForFunc() { - debugChainLayout = enabledDebug; - uint32 validBBNum = 0; - for (auto it = cfg->valid_begin(); it != cfg->valid_end(); ++it) { - ++validBBNum; - } - --validBBNum; // exclude common entry BB - if (debugChainLayout) { - LogInfo::MapleLogger() << "\n[Chain layout] " << func.GetName() << ", valid bb num: " << validBBNum << std::endl; - } - InitBBChains(); - BuildChainForLoops(); - // init ready chains for func - for (auto it = cfg->valid_begin(); it != cfg->valid_end(); ++it) { - BB *bb = *it; - BBId bbId = bb->GetBBId(); - BBChain *chain = bb2chain[bbId]; - if (chain->IsReadyToLayout(nullptr)) { - readyToLayoutChains.insert(chain); - } - } - BB *entryBB = func.GetCfg()->GetCommonEntryBB(); - BBChain *entryChain = bb2chain[entryBB->GetBBId()]; - DoBuildChain(*entryBB, *entryChain, nullptr); - // To sure all of BBs have been laid out - CHECK_FATAL(entryChain->size() == validBBNum, "has any BB not been laid out?"); -} - -void BBLayout::BuildChainForLoops() { - if (meLoop == nullptr || meLoop->GetMeLoops().empty()) { - return; - } - auto &loops = meLoop->GetMeLoops(); - // sort loops from inner most to outer most - // need use the same sort rules as prediction? - std::stable_sort(loops.begin(), loops.end(), - [](const LoopDesc *loop1, const LoopDesc *loop2) { return loop1->nestDepth > loop2->nestDepth; }); - // build chain for loops one by one - auto *context = layoutAlloc.GetMemPool()->New>(cfg->NumBBs(), false, layoutAlloc.Adapter()); - for (auto *loop : loops) { - BuildChainForLoop(loop, context); - } -} - -void BBLayout::BuildChainForLoop(LoopDesc *loop, MapleVector *context) { - // init loop context - std::fill(context->begin(), context->end(), false); - for (BBId bbId : loop->loopBBs) { - (*context)[bbId] = true; - } - // init ready chains for loop - for (BBId bbId : loop->loopBBs) { - BBChain *chain = bb2chain[bbId]; - if (chain->IsReadyToLayout(context)) { - readyToLayoutChains.insert(chain); - } - } - // find loop chain starting BB - BB *startBB = FindBestStartBBForLoop(loop, context); - if (startBB == nullptr) { - return; // all blocks in the loop have been laid out, just return - } - BBChain *startChain = bb2chain[startBB->GetBBId()]; - DoBuildChain(*startBB, *startChain, context); - readyToLayoutChains.clear(); -} - -// Multiple loops may share the same header, we try to find the best unplaced BB in the loop -// This function can be improved -BB *BBLayout::FindBestStartBBForLoop(LoopDesc *loop, const MapleVector *context) { - // If the loop header has not been placed, take it as start BB of the loop chain - auto *headerChain = bb2chain[loop->head->GetBBId()]; - if (headerChain->size() == 1) { - return loop->head; - } - // take inner loop chain tail BB as start BB - if (headerChain->size() > 1 && IsBBInCurrContext(*headerChain->GetTail(), context)) { - return headerChain->GetTail(); - } - for (BBId bbId : loop->loopBBs) { - if (bb2chain[bbId]->size() == 1) { - return cfg->GetBBFromID(bbId); - } - } - return nullptr; -} - -void BBLayout::DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context) { - CHECK_FATAL(bb2chain[header.GetBBId()] == &chain, "bb2chain mis-match"); - BB *bb = chain.GetTail(); - BB *bestSucc = GetBestSucc(*bb, chain, context, false); - while (bestSucc != nullptr) { - BBChain *succChain = bb2chain[bestSucc->GetBBId()]; - succChain->UpdateSuccChainBeforeMerged(chain, context, readyToLayoutChains); - chain.MergeFrom(succChain); - readyToLayoutChains.erase(succChain); - bb = chain.GetTail(); - bestSucc = GetBestSucc(*bb, chain, context, false); - } - if (debugChainLayout) { - bool inLoop = context != nullptr; - LogInfo::MapleLogger() << "Finish forming " << (inLoop ? "loop" : "func") << " chain: "; - chain.Dump(); - } -} - -bool BBLayout::IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context) { - if (!IsBBInCurrContext(succ, context)) { // succ must be in the current context (current loop or current func) - return false; - } - if (succ.GetKind() == kBBNoReturn) { - return false; // noreturn BB is unlikely taken - } - if (bb2chain[succ.GetBBId()] == bb2chain[bb.GetBBId()]) { // bb and succ should belong to different chains - return false; - } - if (succ.GetBBId() == 1) { // special case, exclude common exit BB - return false; - } - return true; -} - -// Whether succ has a better layout pred than bb -bool BBLayout::HasBetterLayoutPred(const BB &bb, BB &succ) { - auto &predList = succ.GetPred(); - // predList.size() may be 0 if bb is common entry BB - if (predList.size() <= 1) { - return false; - } - FreqType sumEdgeFreq = succ.GetFrequency(); - const double hotEdgeFreqPercent = 0.8; // should further fine tuning - FreqType hotEdgeFreq = sumEdgeFreq * static_cast(hotEdgeFreqPercent); - // if edge freq(bb->succ) contribute more than 80% to succ block freq, no better layout pred than bb - for (uint32 i = 0; i < predList.size(); ++i) { - if (predList[i] == &bb) { - continue; - } - FreqType edgeFreq = predList[i]->GetEdgeFreq(&succ); - if (edgeFreq > (sumEdgeFreq - hotEdgeFreq)) { - return true; - } - } - return false; -} - -// considerBetterPredForSucc: whether consider better layout pred for succ, we found better -// performance when this argument is disabled -BB *BBLayout::GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, - bool considerBetterPredForSucc) { - // (1) search in succ - CHECK_FATAL(bb2chain[bb.GetBBId()] == &chain, "bb2chain mis-match"); - FreqType bestEdgeFreq = 0; - BB *bestSucc = nullptr; - for (uint32 i = 0; i < bb.GetSucc().size(); ++i) { - BB *succ = bb.GetSucc(i); - if (!IsCandidateSucc(bb, *succ, context)) { - continue; - } - if (considerBetterPredForSucc && HasBetterLayoutPred(bb, *succ)) { - continue; - } - FreqType currEdgeFreq = bb.GetEdgeFreq(i); // attention: entryBB->succFreq[i] is always 0 - if (bb.GetBBId() == 0) { // special case for common entry BB - CHECK_FATAL(bb.GetSucc().size() == 1, "common entry BB should not have more than 1 succ"); - bestSucc = succ; - break; - } - if (currEdgeFreq > bestEdgeFreq) { // find max edge freq - bestEdgeFreq = currEdgeFreq; - bestSucc = succ; - } - } - if (bestSucc != nullptr) { - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range1 succ ]: "; - LogInfo::MapleLogger() << bb.GetBBId() << " -> " << bestSucc->GetBBId() << std::endl; - } - return bestSucc; - } - - // (2) search in readyToLayoutChains - FreqType bestFreq = 0; - for (auto it = readyToLayoutChains.begin(); it != readyToLayoutChains.end(); ++it) { - BBChain *readyChain = *it; - BB *header = readyChain->GetHeader(); - if (!IsCandidateSucc(bb, *header, context)) { - continue; - } - bool useBBFreq = false; - if (useBBFreq) { // use bb freq - if (header->GetFrequency() > bestFreq) { // find max bb freq - bestFreq = header->GetFrequency(); - bestSucc = header; - } - } else { // use edge freq - FreqType subBestFreq = 0; - for (auto *pred : header->GetPred()) { - FreqType curFreq = pred->GetEdgeFreq(header); - if (curFreq > subBestFreq) { - subBestFreq = curFreq; - } - } - if (subBestFreq > bestFreq) { - bestFreq = subBestFreq; - bestSucc = header; - } else if (subBestFreq == bestFreq && bestSucc != nullptr && - bb2chain[header->GetBBId()]->GetId() < bb2chain[bestSucc->GetBBId()]->GetId()) { - bestSucc = header; - } - } - } - if (bestSucc != nullptr) { - readyToLayoutChains.erase(bb2chain[bestSucc->GetBBId()]); - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range2 ready]: "; - LogInfo::MapleLogger() << bb.GetBBId() << " -> " << bestSucc->GetBBId() << std::endl; - } - return bestSucc; - } - - // (3) search left part in context by topological sequence - const auto &rpoVec = dom->GetReversePostOrder(); - bool searchedAgain = false; - for (uint32 i = rpoSearchPos; i < rpoVec.size(); ++i) { - auto *candBB = cfg->GetBBFromID(BBId(rpoVec[i]->GetID())); - if (IsBBInCurrContext(*candBB, context) && bb2chain[candBB->GetBBId()] != &chain) { - rpoSearchPos = i; - if (debugChainLayout) { - LogInfo::MapleLogger() << "Select [range3 rpot ]: "; - LogInfo::MapleLogger() << bb.GetBBId() << " -> " << candBB->GetBBId() << std::endl; - } - return candBB; - } - if (i == rpoVec.size() - 1 && !searchedAgain) { - i = 0; - searchedAgain = true; - } - } - return nullptr; -} - // return true if bb is empty and its kind is fallthru. bool BBLayout::BBEmptyAndFallthru(const BB &bb) { if (bb.GetAttributes(kBBAttrIsTryEnd)) { @@ -593,7 +337,10 @@ void BBLayout::OptimizeBranchTarget(BB &bb) { } } do { - ASSERT(!bb.GetSucc().empty(), "container check"); + if (bb.GetSucc().empty()) { + // If the pred of bb is brTargetBB and remove target bb after opt, can not do opt continue. + return; + } // condgoto's succ layout: [0] fallthru succ, [1] target succ, [2-...] eh succ/wontexit succ // goto's succ layout: [0] target succ, [1-...] eh succ/wontexit succ BB *brTargetBB = bb.GetKind() == kBBCondGoto ? bb.GetSucc(1) : bb.GetSucc(0); @@ -1033,6 +780,7 @@ void BBLayout::OptimiseCFG() { } } (void)cfg->UnreachCodeAnalysis(false); + cfg->WontExitAnalysis(); } void BBLayout::SetAttrTryForTheCanBeMovedBB(BB &bb, BB &canBeMovedBB) const { @@ -1312,13 +1060,15 @@ void BBLayout::LayoutWithProf(bool useChainLayout) { BB *bb = cfg->GetFirstBB(); if (useChainLayout) { // chain BB layout - BuildChainForFunc(); - BBChain *mainChain = bb2chain[bb->GetBBId()]; + ChainLayout chainLayout(func, *layoutAlloc.GetMemPool(), enabledDebug, *meLoop, *dom); + chainLayout.BuildChainForFunc(); + NodeChain *mainChain = chainLayout.GetNode2Chain()[bb->GetID()]; + for (auto it = mainChain->begin(); it != mainChain->end(); ++it) { if (it == mainChain->begin()) { continue; // skip common entry BB } - AddBBProf(**it); + AddBBProf(*static_cast(*it)); } } else { // PH BB layout BuildEdges(); diff --git a/src/mapleall/maple_me/src/me_cfg.cpp b/src/mapleall/maple_me/src/me_cfg.cpp index 5a1ac4b2254d5a5f39aaf03b6168d26b4d42d747..78a45be369107e4f2119b65ef1b465e4398fe2f4 100644 --- a/src/mapleall/maple_me/src/me_cfg.cpp +++ b/src/mapleall/maple_me/src/me_cfg.cpp @@ -481,8 +481,8 @@ void MeCFG::FixMirCFG() { StmtNode *stmt = bb->GetTheOnlyStmtNode(); if (stmt != nullptr) { // simplify the cfg removing all succs of this bb - for (size_t si = 0; si < bb->GetSucc().size(); ++si) { - BB *sucBB = bb->GetSucc(si); + for (int64 si = 0; si < static_cast(bb->GetSucc().size()); ++si) { + BB *sucBB = bb->GetSucc(static_cast(si)); if (sucBB->GetAttributes(kBBAttrIsCatch)) { sucBB->RemovePred(*bb); --si; @@ -503,8 +503,8 @@ void MeCFG::FixMirCFG() { newBBIt, std::bind(FilterNullPtr::const_iterator>, std::placeholders::_1, end())); eIt = valid_end(); // redirect all succs of new bb to bb - for (size_t si = 0; si < newBB.GetSucc().size(); ++si) { - BB *sucBB = newBB.GetSucc(si); + for (int64 si = 0; si < static_cast(newBB.GetSucc().size()); ++si) { + BB *sucBB = newBB.GetSucc(static_cast(si)); if (sucBB->GetAttributes(kBBAttrIsCatch)) { sucBB->ReplacePred(&newBB, bb); --si; @@ -1797,7 +1797,7 @@ void MeCFG::BuildSCC() { } // After currBB's succ is changed, we can update currBB's target -void MeCFG::UpdateBranchTarget(BB &currBB, const BB &oldTarget, BB &newTarget, MeFunction &meFunc) { +void MeCFG::UpdateBranchTarget(BB &currBB, const BB &oldTarget, BB &newTarget, MeFunction &meFunc) const { bool forMeIR = meFunc.GetIRMap() != nullptr; // update statement offset if succ is goto target if (currBB.IsGoto()) { @@ -1909,7 +1909,7 @@ inline void ConstructEdgeFreqForBBWith2Succs(BB &bb) { // set bb succ frequency from bb freq // no critical edge is expected -void MeCFG::ConstructEdgeFreqFromBBFreq() { +void MeCFG::ConstructEdgeFreqFromBBFreq() const { // set succfreqs auto eIt = valid_end(); for (auto bIt = valid_begin(); bIt != eIt; ++bIt) { @@ -1941,7 +1941,9 @@ void MeCFG::ConstructBBFreqFromStmtFreq() { } auto eIt = valid_end(); for (auto bIt = valid_begin(); bIt != eIt; ++bIt) { - if ((*bIt)->IsEmpty()) continue; + if ((*bIt)->IsEmpty()) { + continue; + } StmtNode &first = (*bIt)->GetFirst(); StmtNode &last = (*bIt)->GetLast(); if (funcData->stmtFreqs.count(first.GetStmtID()) > 0) { diff --git a/src/mapleall/maple_me/src/me_cfg_opt.cpp b/src/mapleall/maple_me/src/me_cfg_opt.cpp index 5040d858303cf225ceada08581d9efa5774ac4b1..89bd625ac939b4bfa5aadc465acd16d84bf19010 100644 --- a/src/mapleall/maple_me/src/me_cfg_opt.cpp +++ b/src/mapleall/maple_me/src/me_cfg_opt.cpp @@ -235,7 +235,7 @@ bool MeCfgOpt::PreCheck(const MeCFG &cfg) const { return true; } -bool MeCfgOpt::Run(MeCFG &cfg) { +bool MeCfgOpt::Run(MeCFG &cfg) const { if (!PreCheck(cfg)) { return false; } diff --git a/src/mapleall/maple_me/src/me_check_cast.cpp b/src/mapleall/maple_me/src/me_check_cast.cpp index 712b29e1f3749c03f06fdf7d26299b3f5148d5c4..f1f71a95a57816a5759594a33037ecdf2d344ccc 100644 --- a/src/mapleall/maple_me/src/me_check_cast.cpp +++ b/src/mapleall/maple_me/src/me_check_cast.cpp @@ -54,7 +54,7 @@ std::string GetLabel(AnnotationType &aType) { return lable; } -void CheckCast::DumpGenericNode(GenericNode &node, std::ostream &out) { +void CheckCast::DumpGenericNode(GenericNode &node, std::ostream &out) const { std::string lable = GetLabel(*node.aType); out << node.aType->GetId() << " [label=\"" << lable << "\"];\n"; if (node.next != nullptr) { @@ -69,7 +69,7 @@ void CheckCast::DumpGenericGraph() { CHECK_FATAL(fileBufPtr, "open file : %s failed!", outFile.c_str()); std::ostream dotFile(&fileBuf); dotFile << "digraph InequalityGraph {\n"; - for (auto pair : created) { + for (const auto &pair : std::as_const(created)) { GenericNode *node = pair.second; DumpGenericNode(*node, dotFile); } @@ -105,7 +105,7 @@ void CheckCast::BuildGenericGraph(AnnotationType *annoType) { switch (annoType->GetKind()) { case kGenericType: { GenericType *gType = static_cast(annoType); - for (auto pair : gType->GetGenericMap()) { + for (const auto &pair : std::as_const(gType->GetGenericMap())) { GenericDeclare *gDeclare = pair.first; AnnotationType *realType = pair.second; GenericNode *gDeclareNode = GetOrCreateGenericNode(gDeclare); @@ -401,7 +401,7 @@ void CheckCast::AddClassInheritanceInfo(MIRType &mirType) { } // varStruct is parent, callStruct is child -bool CheckCast::ExactlyMatch(MIRStructType &varStruct, MIRStructType &callStruct) { +bool CheckCast::ExactlyMatch(MIRStructType &varStruct, MIRStructType &callStruct) const { if (varStruct.GetGenericDeclare().size() == 0 || callStruct.GetGenericDeclare().size() == 0) { return false; } @@ -457,7 +457,7 @@ AnnotationType *CheckCast::CloneNewAnnotationType(AnnotationType *at, MIRStructT return newGT; } -bool CheckCast::RetIsGenericRelative(MIRFunction &callee) { +bool CheckCast::RetIsGenericRelative(MIRFunction &callee) const { if (callee.GetFuncGenericRet() == nullptr) { return false; } @@ -567,7 +567,7 @@ bool CheckCast::ProvedByAnnotationInfo(const IntrinsiccallMeStmt &callNode) { return result; } -void CheckCast::RemoveRedundantCheckCast(MeStmt &stmt, BB &bb) { +void CheckCast::RemoveRedundantCheckCast(MeStmt &stmt, BB &bb) const { if (stmt.GetOp() == OP_intrinsiccallwithtypeassigned) { auto *callAssign = static_cast(&stmt); ScalarMeExpr *lhs = callAssign->GetAssignedLHS(); diff --git a/src/mapleall/maple_me/src/me_delegate_rc.cpp b/src/mapleall/maple_me/src/me_delegate_rc.cpp index 9319119451f147f94705211333b4cbc10e92bce5..ac528e629c484ae8dda5744a797073e27660e15e 100644 --- a/src/mapleall/maple_me/src/me_delegate_rc.cpp +++ b/src/mapleall/maple_me/src/me_delegate_rc.cpp @@ -62,9 +62,10 @@ const std::set canThrowIntrinsicsList { maple::INTRN_JAVA_THROW_ARITHMETIC, maple::INTRN_JAVA_THROW_CLASSCAST, }; -const std::set whitelistFunc { +const std::set kWhiteListFunc { #include "rcwhitelist.def" }; +const size_t kMinIdxInVec = 2; } namespace maple { @@ -313,7 +314,7 @@ RegMeExpr *DelegateRC::RHSTempDelegated(MeExpr &rhs, const MeStmt &useStmt) { return nullptr; } // The index number in originalStVector is bigger than two. - if ((func.GetHints() & kPlacementRCed) && ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > 2) { + if ((func.GetHints() & kPlacementRCed) != 0 && ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > kMinIdxInVec) { return nullptr; } if (rhsVar.GetDefBy() == kDefByMustDef) { @@ -439,7 +440,7 @@ void DelegateRC::DelegateRCTemp(MeStmt &stmt) { } case OP_return: { std::string funcName = func.GetMirFunc()->GetName(); - if (whitelistFunc.find(funcName) != whitelistFunc.end()) { + if (kWhiteListFunc.find(funcName) != kWhiteListFunc.end()) { break; } auto &retStmt = static_cast(stmt); @@ -549,7 +550,7 @@ bool DelegateRC::CanOmitRC4LHSVar(const MeStmt &stmt, bool &onlyWithDecref) cons const OriginalSt *ost = theLhs->GetOst(); if (!ost->IsLocal() || ost->IsFormal() || // avoid multi-version vars because of it is hard to find the decrefreset. - ((func.GetHints() & kPlacementRCed) && ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > 2)) { + ((func.GetHints() & kPlacementRCed) != 0 && ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > kMinIdxInVec)) { return false; } if (ost->GetMIRSymbol()->IsInstrumented()) { @@ -601,7 +602,8 @@ bool DelegateRC::CanOmitRC4LHSVar(const MeStmt &stmt, bool &onlyWithDecref) cons const OriginalSt *ost = theLhs->GetOst(); if (!ost->IsLocal() || ost->IsFormal() || // avoid multi-version vars because of it is hard to find the decrefreset. - ((func.GetHints() & kPlacementRCed) && ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > 2)) { + ((func.GetHints() & kPlacementRCed) != 0 && + ssaTab.GetVersionsIndicesSize(ost->GetIndex()) > kMinIdxInVec)) { return false; } if (verStCantDelegate[theLhs->GetVstIdx()]) { @@ -678,8 +680,8 @@ void DelegateRC::RenameDelegatedRefVarUses(MeStmt &meStmt, MeExpr &meExpr) { } if (meExpr.GetMeOp() == kMeOpVar) { auto &varMeExpr = static_cast(meExpr); - auto it = refVar2RegMap.find(&varMeExpr); - if (it != refVar2RegMap.end()) { + const auto it = std::as_const(refVar2RegMap).find(&varMeExpr); + if (it != refVar2RegMap.cend()) { (void)irMap.ReplaceMeExprStmt(meStmt, varMeExpr, *it->second); } } @@ -803,7 +805,7 @@ void DelegateRC::CleanUpDeadLocalRefVar(const std::set &liveLocalrefvars } intrin->EraseOpnds(intrin->GetOpnds().begin() + nextPos, intrin->GetOpnds().end()); } - if (func.GetHints() & kPlacementRCed) { // delete decref if opnd not in livelocalrefvars + if ((func.GetHints() & kPlacementRCed) != 0) { // delete decref if opnd not in livelocalrefvars auto eIt = cfg->valid_end(); for (auto bIt = cfg->valid_begin(); bIt != eIt; ++bIt) { auto *bb = *bIt; diff --git a/src/mapleall/maple_me/src/me_emit.cpp b/src/mapleall/maple_me/src/me_emit.cpp index 2379d4f4ba51fca4d5a97b5c8d6cae5f6a609510..cbce32763163ddc44bef30fee1283d5ca69a16da 100644 --- a/src/mapleall/maple_me/src/me_emit.cpp +++ b/src/mapleall/maple_me/src/me_emit.cpp @@ -52,7 +52,7 @@ static void ResetDependentedSymbolLive(MIRConst *mirConst) { void ResetDependentedSymbolLive(MIRFunction *func) { for (size_t k = 1; k < func->GetSymTab()->GetSymbolTableSize(); ++k) { - MIRSymbol *sym = func->GetSymTab()->GetSymbolFromStIdx(k); + MIRSymbol *sym = func->GetSymTab()->GetSymbolFromStIdx(static_cast(k)); CHECK_FATAL(sym, "sym is nullptr!"); if (!sym->IsConst()) { continue; diff --git a/src/mapleall/maple_me/src/me_fsaa.cpp b/src/mapleall/maple_me/src/me_fsaa.cpp index 538970c3d93c649e9c2859a7d36fc6867fd0efe1..c6182758e7c33a7be83ceeaf928f23cd40eb10f3 100644 --- a/src/mapleall/maple_me/src/me_fsaa.cpp +++ b/src/mapleall/maple_me/src/me_fsaa.cpp @@ -169,10 +169,7 @@ bool AccessSameMemory(const IreadSSANode &iread, const MayDefNode &maydef) { // if the pointer represented by vst is found to have a unique pointer value, // return the BB of the definition BB *FSAA::FindUniquePointerValueDefBB(VersionSt *vst) { - if (vst->IsInitVersion()) { - return nullptr; - } - if (vst->GetDefType() != VersionSt::kAssign) { + if (vst->IsInitVersion() || vst->GetDefType() != VersionSt::kAssign) { return nullptr; } UnaryStmtNode *ass = static_cast(vst->GetAssignNode()); @@ -223,7 +220,7 @@ void FSAA::EraseMayDefItem(TypeOfMayDefList &mayDefNodes, MapleMaprunRenameOnly = true; ssa->UpdateDom(dom); // dom info may be set invalid in dse when cfg is modified - ssa->RenameAllBBs(cfg); + ssa->RenameAllBBs(*cfg); ssa->VerifySSA(); if (DEBUGFUNC_NEWPM(f)) { diff --git a/src/mapleall/maple_me/src/me_func_opt.cpp b/src/mapleall/maple_me/src/me_func_opt.cpp index 81bdf22acfd0a723193b0db87b6cbb7fc2f83031..bf6e4055d7755dc4fc83b2d838b4c7322e1f3310 100644 --- a/src/mapleall/maple_me/src/me_func_opt.cpp +++ b/src/mapleall/maple_me/src/me_func_opt.cpp @@ -13,7 +13,6 @@ * See the Mulan PSL v2 for more details. */ #include "me_func_opt.h" -#include "thread_env.h" namespace maple { #ifdef NOT_USED diff --git a/src/mapleall/maple_me/src/me_gc_lowering.cpp b/src/mapleall/maple_me/src/me_gc_lowering.cpp index 216c405c09d63f2f83c20ef2c0b8afe7df116fa1..c6cd76589a6d00d1e499b0fa30c3ca168d3b42dc 100644 --- a/src/mapleall/maple_me/src/me_gc_lowering.cpp +++ b/src/mapleall/maple_me/src/me_gc_lowering.cpp @@ -86,7 +86,7 @@ void GCLowering::HandleVarAssignMeStmt(MeStmt &stmt) { stmt.GetBB()->ReplaceMeStmt(&stmt, writeRefCall); } -MIRIntrinsicID GCLowering::SelectWriteBarrier(const MeStmt &stmt) { +MIRIntrinsicID GCLowering::SelectWriteBarrier(const MeStmt &stmt) const { MeExpr *lhs = nullptr; if (stmt.GetOp() == OP_dassign) { lhs = stmt.GetLHS(); @@ -110,7 +110,7 @@ static void CheckRemove(MeStmt *stmt, Opcode op) { } } -MIRIntrinsicID GCLowering::PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId) { +MIRIntrinsicID GCLowering::PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId) const { CheckRemove(stmt.GetPrev(), OP_membarrelease); CheckRemove(stmt.GetNext(), OP_membarstoreload); return intrnId; @@ -127,7 +127,7 @@ void GCLowering::HandleIvarAssignMeStmt(MeStmt &stmt) { stmt.GetBB()->ReplaceMeStmt(&stmt, writeRefCall); } -MeExpr *GCLowering::GetBase(IvarMeExpr &ivar) { +MeExpr *GCLowering::GetBase(IvarMeExpr &ivar) const { MeExpr *base = ivar.GetBase(); CHECK_NULL_FATAL(base); if (!Options::buildApp || ivar.GetFieldID() != 0 || base->GetMeOp() != kMeOpReg) { diff --git a/src/mapleall/maple_me/src/me_gc_write_barrier_opt.cpp b/src/mapleall/maple_me/src/me_gc_write_barrier_opt.cpp index 93b236dbc0c2ccc99dac89cc28e0775ca06cf32b..e01db51b4c5920ab5cea6c042054866bb0f53deb 100644 --- a/src/mapleall/maple_me/src/me_gc_write_barrier_opt.cpp +++ b/src/mapleall/maple_me/src/me_gc_write_barrier_opt.cpp @@ -46,7 +46,7 @@ void GCWriteBarrierOpt::Prepare() { void GCWriteBarrierOpt::GCLower(BB &bb, std::map> &writeBarrierMap) { // to record stack size std::map savedStacksize; - for (const auto &item : writeBarrierMap) { + for (const auto &item : std::as_const(writeBarrierMap)) { savedStacksize[item.first] = item.second.size(); } for (auto &stmt : bb.GetMeStmts()) { @@ -179,17 +179,17 @@ bool GCWriteBarrierOpt::HasYieldPoint(const MeStmt &start, const MeStmt &end) { return false; } -bool GCWriteBarrierOpt::HasCallAfterStmt(const MeStmt &stmt) { +bool GCWriteBarrierOpt::HasCallAfterStmt(const MeStmt &stmt) const { const MeStmt &lastMeStmt = stmt.GetBB()->GetMeStmts().back(); return HasCallBetweenStmt(stmt, lastMeStmt); } -bool GCWriteBarrierOpt::HasCallBeforeStmt(const MeStmt &stmt) { +bool GCWriteBarrierOpt::HasCallBeforeStmt(const MeStmt &stmt) const { const MeStmt &firstMeStmt = stmt.GetBB()->GetMeStmts().front(); return HasCallBetweenStmt(firstMeStmt, stmt); } -bool GCWriteBarrierOpt::HasCallBetweenStmt(const MeStmt &start, const MeStmt &end) { +bool GCWriteBarrierOpt::HasCallBetweenStmt(const MeStmt &start, const MeStmt &end) const { CHECK_FATAL(start.GetBB() == end.GetBB(), "NYI."); for (const MeStmt *meStmt = &start; meStmt != &end; meStmt = meStmt->GetNext()) { if (IsCall(*meStmt)) { diff --git a/src/mapleall/maple_me/src/me_gvn.cpp b/src/mapleall/maple_me/src/me_gvn.cpp index 9e5f3f9c2db9dcdc2bb13edda174bc635b7479cb..4b64b56cea1b22abb1c2008cc0f8579fbac8b318 100644 --- a/src/mapleall/maple_me/src/me_gvn.cpp +++ b/src/mapleall/maple_me/src/me_gvn.cpp @@ -27,7 +27,6 @@ #include "dominance.h" #include "global_tables.h" #include "irmap.h" -#include "itab_util.h" #include "maple_phase.h" #include "me_cfg.h" #include "me_dominance.h" @@ -255,7 +254,7 @@ class VnExpr { class PhiVnExpr : public VnExpr { public: explicit PhiVnExpr(const BB &bb) : VnExpr(VnKind::kVnPhi), defBB(bb) {} - virtual ~PhiVnExpr() = default; + ~PhiVnExpr() override = default; PhiVnExpr(size_t vnExprID, const PhiVnExpr &phiVnExpr) : VnExpr(VnKind::kVnPhi, vnExprID), defBB(phiVnExpr.defBB) { opnds.insert(opnds.end(), phiVnExpr.opnds.begin(), phiVnExpr.opnds.end()); @@ -324,7 +323,10 @@ class IvarVnExpr : public VnExpr { mu(ivarVnExpr.mu), defStmtRank(ivarVnExpr.defStmtRank) {} - virtual ~IvarVnExpr() = default; + ~IvarVnExpr() override { + baseAddr = nullptr; + mu = nullptr; + } void SetBaseAddr(const CongruenceClass *baseVn) { baseAddr = baseVn; @@ -382,7 +384,7 @@ class NaryVnExpr : public VnExpr { opnds.insert(opnds.end(), naryVnExpr.opnds.begin(), naryVnExpr.opnds.end()); } - virtual ~NaryVnExpr() = default; + ~NaryVnExpr() override = default; void AddOpnd(const CongruenceClass &opnd) { opnds.emplace_back(&opnd); @@ -442,7 +444,7 @@ class OpVnExpr : public VnExpr { opnds.insert(opnds.end(), opVnExpr.opnds.begin(), opVnExpr.opnds.end()); } - virtual ~OpVnExpr() = default; + ~OpVnExpr() override = default; void AddOpnd(const CongruenceClass &opnd) { opnds.emplace_back(&opnd); @@ -640,7 +642,7 @@ class GVN { void MarkEntryBBReachable(); // mark succ of bb reachable according to last stmt of bb void MarkSuccBBReachable(const BB &bb); - SCCNode *GetSCCofBB(BB *bb) const; + SCCNode *GetSCCofBB(const BB *bb) const; // collect def-use info for SCC void CollectUseInfoInSCC(const SCCNode &scc); @@ -663,7 +665,7 @@ class GVN { // if its subexpr has vn created before, it will just use the vn and not create a new one. CongruenceClass *CreateVnForMeExprIteratively(MeExpr &expr); // try to get vn from expr2vn first, if not found, create a new one - CongruenceClass *GetOrCreateVnForMeExpr(MeExpr &rhs); + CongruenceClass *GetOrCreateVnForMeExpr(MeExpr &expr); CongruenceClass *GetOrCreateVnForPhi(const MePhiNode &phi, const BB &bb); void SetVnForExpr(MeExpr &expr, CongruenceClass &vn); @@ -687,7 +689,7 @@ class GVN { void FullRedundantElimination(); - void AddToVnVector(CongruenceClass *vn); + void AddToVnVector(const CongruenceClass &vn); void DumpVnVector() const; void DumpVnExprs() const; void DumpSCC(const SCCNode &scc) const; @@ -761,11 +763,11 @@ void GVN::RankStmtAndPhi() { for (auto *scc : sccTopologicalVec) { std::vector rpo(scc->GetNodes().begin(), scc->GetNodes().end()); const auto &bbId2RpoId = dom.GetReversePostOrderId(); - std::sort(rpo.begin(), rpo.end(), [&bbId2RpoId](BB *forward, BB *backward) { + std::sort(rpo.begin(), rpo.end(), [&bbId2RpoId](const BB *forward, const BB *backward) { return bbId2RpoId[forward->GetBBId()] < bbId2RpoId[backward->GetBBId()]; }); for (auto *bb : rpo) { - for (auto &phi : bb->GetMePhiList()) { + for (const auto &phi : std::as_const(bb->GetMePhiList())) { rank[phi.second] = ++kRankNum; // start from 1 } for (auto &stmt : bb->GetMeStmts()) { @@ -858,7 +860,7 @@ void GVN::MarkSuccBBReachable(const BB &bb) { } // NEEDFIX: Add GetSCCNode to BB -SCCNode *GVN::GetSCCofBB(BB *bb) const { +SCCNode *GVN::GetSCCofBB(const BB *bb) const { (void)bb; return nullptr; } @@ -883,7 +885,7 @@ CongruenceClass *GVN::GetOrCreateVnForHashedVnExpr(const VnExpr &hashedVnExpr) { CongruenceClass *newVn = tmpMP.New(); DEBUG_LOG() << "Create new vn GetID() << "> for vnexpr \n"; vnExpr2Vn[vnExprID] = newVn; - AddToVnVector(newVn); + AddToVnVector(*newVn); return newVn; } @@ -898,7 +900,7 @@ CongruenceClass *GVN::CreateVnForMeExpr(MeExpr &expr) { CongruenceClass *vn = tmpMP.New(expr); DEBUG_LOG() << "Create new vn GetID() << "> for expr \n"; SetVnForExpr(expr, *vn); - AddToVnVector(vn); + AddToVnVector(*vn); return vn; } @@ -1144,7 +1146,7 @@ void GVN::MarkExprNeedUpdated(const MeExpr &expr) { BB *GVN::GetFirstReachableBB(const SCCNode &scc) { std::vector rpo(scc.GetNodes().begin(), scc.GetNodes().end()); const auto &bbId2RpoId = dom.GetReversePostOrderId(); - std::sort(rpo.begin(), rpo.end(), [&bbId2RpoId](BB *forward, BB *backward) { + std::sort(rpo.begin(), rpo.end(), [&bbId2RpoId](const BB *forward, const BB *backward) { return bbId2RpoId[forward->GetBBId()] < bbId2RpoId[backward->GetBBId()]; }); auto it = std::find_if(rpo.begin(), rpo.end(), [this](const BB *bb) { @@ -1165,7 +1167,7 @@ void GVN::TouchPhisStmtsInBB(BB &bb, std::set &visited, const std::s if (!empty) { DEBUG_LOG() << "Touch stmts/phis in BB" << bb.GetBBId().GetIdx() << "\n"; } - for (auto &phi : bb.GetMePhiList()) { + for (const auto &phi : std::as_const(bb.GetMePhiList())) { if (phi.second->GetIsLive()) { touch.emplace(phi.second); needTouchSucc = false; @@ -1221,7 +1223,7 @@ void GVN::GenVnForSingleBB(BB &bb) { return; } // 1. process phis - for (auto &phiItem : bb.GetMePhiList()) { + for (const auto &phiItem : std::as_const(bb.GetMePhiList())) { if (!phiItem.second->GetIsLive()) { continue; } @@ -1284,7 +1286,7 @@ void GVN::GenVnForSingleBB(BB &bb) { } // 2.4 process chilist of stmt if (stmt.GetChiList() != nullptr) { - for (auto &chiItem : *stmt.GetChiList()) { + for (const auto &chiItem : std::as_const(*stmt.GetChiList())) { MeExpr *lhs = chiItem.second->GetLHS(); CongruenceClass *lhsVn = GetVnOfMeExpr(*lhs); if (!lhs->IsVolatile() || lhsVn == nullptr) { @@ -1370,7 +1372,7 @@ void GVN::GenVnForLhsValue(MeStmt &stmt, const std::set &touchedBB) } // process chilist of stmt if (stmt.GetChiList() != nullptr) { - for (auto &chiItem : *stmt.GetChiList()) { + for (const auto &chiItem : std::as_const(*stmt.GetChiList())) { MeExpr *chiLHS = chiItem.second->GetLHS(); if (GetVnOfMeExpr(*chiLHS) == nullptr) { (void) CreateVnForMeExpr(*chiLHS); @@ -1457,11 +1459,11 @@ void GVN::GenVnForSCCIteratively(const SCCNode &scc) { DEBUG_LOG() << "-- End Generating GVN for SCC " << scc.GetID() << "\n"; } -void GVN::AddToVnVector(CongruenceClass *vn) { - if (vnVector.size() <= vn->GetID()) { - vnVector.resize(vn->GetID() + 1, nullptr); +void GVN::AddToVnVector(const CongruenceClass &vn) { + if (vnVector.size() <= vn.GetID()) { + vnVector.resize(vn.GetID() + 1, nullptr); } - vnVector[vn->GetID()] = vn; + vnVector[vn.GetID()] = &vn; } void GVN::DumpVnVector() const { @@ -1640,10 +1642,10 @@ void GVN::DoPhiFRE(MePhiNode &phi, MeStmt *firstStmt) { } // we can not replace phi use, skip count it auto *useSites = useInfo.GetUseSitesOfExpr(recordExpr); - auto it = std::find_if(useSites->begin(), useSites->end(), [this, &phi](const UseItem &use) { + const auto it = std::find_if(useSites->cbegin(), useSites->cend(), [this, &phi](const UseItem &use) { return use.IsUseByStmt() && this->dom.Dominate(*phi.GetDefBB(), *use.GetUseBB()); }); - if (it == useSites->end()) { + if (it == useSites->cend()) { continue; } realExprCnt++; @@ -1723,7 +1725,7 @@ void GVN::FullRedundantElimination() { } // DoPhiFRE may generate new stmts at begin of BB MeStmt *stmt = bb->GetFirstMe(); - for (auto &phi : bb->GetMePhiList()) { + for (const auto &phi : std::as_const(bb->GetMePhiList())) { if (phi.second->GetIsLive()) { DoPhiFRE(*phi.second, stmt); } @@ -1798,13 +1800,7 @@ bool MEGVN::PhaseRun(maple::MeFunction &f) { auto *aliasClass = FORCE_GET(MEAliasClass); MeHDSE hdse(f, *dom, *pdom, *f.GetIRMap(), aliasClass, DEBUGFUNC_NEWPM(f)); hdse.hdseKeepRef = MeOption::dseKeepRef; - hdse.DoHDSE(); - if (hdse.NeedUNClean()) { - bool cfgChange = f.GetCfg()->UnreachCodeAnalysis(true); - if (cfgChange) { - FORCE_INVALID(MEDominance, f); - } - } + hdse.DoHDSESafely(&f, *GetAnalysisInfoHook()); return false; } } // namespace maple diff --git a/src/mapleall/maple_me/src/me_hdse.cpp b/src/mapleall/maple_me/src/me_hdse.cpp index a0de4a84975a72755c4995eadd3075cd46be1051..5bb353930655ecc0ad125339c9894099fda0f505 100644 --- a/src/mapleall/maple_me/src/me_hdse.cpp +++ b/src/mapleall/maple_me/src/me_hdse.cpp @@ -172,7 +172,6 @@ void MEHdse::GetAnalysisDependence(maple::AnalysisDep &aDep) const { aDep.AddRequired(); aDep.AddRequired(); aDep.AddRequired(); - aDep.PreservedAllExcept(); aDep.PreservedAllExcept(); } @@ -209,11 +208,15 @@ bool MEHdse::PhaseRun(maple::MeFunction &f) { if (f.hdseRuns > 2) { hdse.SetRemoveRedefine(true); } - hdse.DoHDSE(); + bool isCModule = f.GetMIRModule().IsCModule(); + hdse.DoHDSESafely(isCModule ? &f : nullptr, *GetAnalysisInfoHook()); hdse.BackwardSubstitution(); - MakeEmptyTrysUnreachable(f); - (void)f.GetCfg()->UnreachCodeAnalysis(true); - f.GetCfg()->WontExitAnalysis(); + if (!isCModule) { + MakeEmptyTrysUnreachable(f); + (void)f.GetCfg()->UnreachCodeAnalysis(true); + f.GetCfg()->WontExitAnalysis(); + FORCE_INVALID(MEDominance, f); + } // update frequency if (hdse.UpdateFreq()) { if (f.GetCfg()->DumpIRProfileFile()) { diff --git a/src/mapleall/maple_me/src/me_inequality_graph.cpp b/src/mapleall/maple_me/src/me_inequality_graph.cpp index 7b035c410767db56fa7321272098b10341004e95..824cea258d57b3b52a752a940713389d331f74e1 100644 --- a/src/mapleall/maple_me/src/me_inequality_graph.cpp +++ b/src/mapleall/maple_me/src/me_inequality_graph.cpp @@ -97,7 +97,7 @@ InequalEdge *InequalityGraph::AddEdge(ESSABaseNode &from, ESSABaseNode &to, int6 return ePtr; } -void InequalityGraph::AddPhiEdge(ESSABaseNode &from, ESSABaseNode &to, EdgeType type) { +void InequalityGraph::AddPhiEdge(ESSABaseNode &from, ESSABaseNode &to, EdgeType type) const { std::unique_ptr edge = std::make_unique(0, type); CHECK_FATAL(edge != nullptr, "new failed"); if (type == EdgeType::kUpper) { @@ -113,7 +113,7 @@ void InequalityGraph::AddPhiEdge(ESSABaseNode &from, ESSABaseNode &to, EdgeType } } -void InequalityGraph::AddEdge(ESSABaseNode &from, ESSABaseNode &to, MeExpr &value, bool positive, EdgeType type) { +void InequalityGraph::AddEdge(ESSABaseNode &from, ESSABaseNode &to, MeExpr &value, bool positive, EdgeType type) const { InequalEdge tmpEdge = InequalEdge(value, positive, type); if (HasEdge(from, to, tmpEdge)) { return; diff --git a/src/mapleall/maple_me/src/me_ir.cpp b/src/mapleall/maple_me/src/me_ir.cpp index 71e31983190f86f5c51bb3a3313f330a522f3c12..50e03b57a39b9d4dd021e2e1eecdf1c54b670b4b 100644 --- a/src/mapleall/maple_me/src/me_ir.cpp +++ b/src/mapleall/maple_me/src/me_ir.cpp @@ -623,7 +623,7 @@ bool OpMeExpr::IsUseSameSymbol(const MeExpr &expr) const { } auto &opMeExpr = static_cast(expr); for (uint32 i = 0; i < kOperandNumTernary; ++i) { - if (opnds[i]) { + if (opnds[i] != nullptr) { if (!opMeExpr.opnds[i]) { return false; } @@ -631,7 +631,7 @@ bool OpMeExpr::IsUseSameSymbol(const MeExpr &expr) const { return false; } } else { - if (opMeExpr.opnds[i]) { + if (opMeExpr.opnds[i] != nullptr) { return false; } } @@ -639,8 +639,7 @@ bool OpMeExpr::IsUseSameSymbol(const MeExpr &expr) const { return true; } -MeExpr *OpMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *OpMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { if (!kOpcodeInfo.NotPure(GetOp())) { auto *opExpr = static_cast(&expr); @@ -780,8 +779,7 @@ int64 ConstMeExpr::GetSXTIntValue() const { return safe_cast(constVal)->GetSXTValue(); } -MeExpr *ConstMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *ConstMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *constExpr = static_cast(&expr); while (constExpr != nullptr) { @@ -795,8 +793,7 @@ MeExpr *ConstMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { return nullptr; } -MeExpr *ConststrMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *ConststrMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *constStrExpr = static_cast(&expr); while (constStrExpr != nullptr) { @@ -809,8 +806,7 @@ MeExpr *ConststrMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const return nullptr; } -MeExpr *Conststr16MeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *Conststr16MeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *constStr16Expr = static_cast(&expr); while (constStr16Expr != nullptr) { @@ -823,8 +819,7 @@ MeExpr *Conststr16MeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) con return nullptr; } -MeExpr *SizeoftypeMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *SizeoftypeMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *sizeoftypeExpr = static_cast(&expr); while (sizeoftypeExpr != nullptr) { @@ -837,8 +832,7 @@ MeExpr *SizeoftypeMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) con return nullptr; } -MeExpr *FieldsDistMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *FieldsDistMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *fieldsDistExpr = static_cast(&expr); while (fieldsDistExpr != nullptr) { @@ -852,8 +846,7 @@ MeExpr *FieldsDistMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) con return nullptr; } -MeExpr *AddrofMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *AddrofMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *addrofExpr = static_cast(&expr); while (addrofExpr != nullptr) { @@ -868,8 +861,7 @@ MeExpr *AddrofMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { return nullptr; } -MeExpr *NaryMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *NaryMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *naryExpr = static_cast(&expr); while (naryExpr != nullptr) { @@ -884,8 +876,7 @@ MeExpr *NaryMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { return nullptr; } -MeExpr *AddroffuncMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *AddroffuncMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *addroffuncExpr = static_cast(&expr); while (addroffuncExpr != nullptr) { @@ -898,8 +889,7 @@ MeExpr *AddroffuncMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) con return nullptr; } -MeExpr *AddroflabelMeExpr::GetIdenticalExpr(MeExpr &expr, bool isConstructor) const { - (void)isConstructor; +MeExpr *AddroflabelMeExpr::GetIdenticalExpr(MeExpr &expr, bool /* isConstructor */) const { auto *addroflabelExpr = static_cast(&expr); while (addroflabelExpr != nullptr) { @@ -947,7 +937,7 @@ void MePhiNode::Dump(const IRMap *irMap) const { LogInfo::MapleLogger() << '\n'; } -void VarMeExpr::Dump(const IRMap *irMap, int32) const { +void VarMeExpr::Dump(const IRMap *irMap, int32 /* indent */) const { CHECK_NULL_FATAL(irMap); LogInfo::MapleLogger() << "VAR "; GetOst()->Dump(); @@ -958,7 +948,7 @@ void VarMeExpr::Dump(const IRMap *irMap, int32) const { } } -void RegMeExpr::Dump(const IRMap *irMap, int32) const { +void RegMeExpr::Dump(const IRMap *irMap, int32 /* indent */) const { CHECK_NULL_FATAL(irMap); LogInfo::MapleLogger() << "REGINDX:" << GetRegIdx(); LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); @@ -968,19 +958,19 @@ void RegMeExpr::Dump(const IRMap *irMap, int32) const { LogInfo::MapleLogger() << " mx" << GetExprID(); } -void AddroffuncMeExpr::Dump(const IRMap*, int32) const { +void AddroffuncMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << "ADDROFFUNC:"; LogInfo::MapleLogger() << GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)->GetName(); LogInfo::MapleLogger() << " mx" << GetExprID(); } -void AddroflabelMeExpr::Dump(const IRMap *irMap, int32) const { +void AddroflabelMeExpr::Dump(const IRMap *irMap, int32 /* indent */) const { LogInfo::MapleLogger() << "ADDROFLABEL:"; LogInfo::MapleLogger() << " @" << irMap->GetMIRModule().CurFunction()->GetLabelName(labelIdx); LogInfo::MapleLogger() << " mx" << GetExprID(); } -void GcmallocMeExpr::Dump(const IRMap*, int32) const { +void GcmallocMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOp()).name << " " << GetPrimTypeName(GetPrimType()); LogInfo::MapleLogger() << " "; GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); @@ -988,7 +978,7 @@ void GcmallocMeExpr::Dump(const IRMap*, int32) const { LogInfo::MapleLogger() << " "; } -void ConstMeExpr::Dump(const IRMap*, int32) const { +void ConstMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << "CONST "; LogInfo::MapleLogger() << GetPrimTypeName(constVal->GetType().GetPrimType()) << " "; CHECK_FATAL(constVal != nullptr, "constVal is null"); @@ -996,21 +986,21 @@ void ConstMeExpr::Dump(const IRMap*, int32) const { LogInfo::MapleLogger() << " mx" << GetExprID(); } -void ConststrMeExpr::Dump(const IRMap*, int32) const { +void ConststrMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << "CONSTSTR"; LogInfo::MapleLogger() << " "; LogInfo::MapleLogger() << strIdx; LogInfo::MapleLogger() << " mx" << GetExprID(); } -void Conststr16MeExpr::Dump(const IRMap*, int32 indent) const { +void Conststr16MeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << "CONSTSTR16"; LogInfo::MapleLogger() << " "; LogInfo::MapleLogger() << strIdx; LogInfo::MapleLogger() << " mx" << GetExprID(); } -void SizeoftypeMeExpr::Dump(const IRMap*, int32) const { +void SizeoftypeMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOp()).name << " " << GetPrimTypeName(GetPrimType()); LogInfo::MapleLogger() << " TYIDX:" << tyIdx; MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); @@ -1018,7 +1008,7 @@ void SizeoftypeMeExpr::Dump(const IRMap*, int32) const { LogInfo::MapleLogger() << " mx" << GetExprID(); } -void FieldsDistMeExpr::Dump(const IRMap*, int32) const { +void FieldsDistMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOp()).name << " " << GetPrimTypeName(GetPrimType()); LogInfo::MapleLogger() << " TYIDX:" << tyIdx; MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); @@ -1028,7 +1018,7 @@ void FieldsDistMeExpr::Dump(const IRMap*, int32) const { LogInfo::MapleLogger() << " mx" << GetExprID(); } -void AddrofMeExpr::Dump(const IRMap*, int32) const { +void AddrofMeExpr::Dump(const IRMap*, int32 /* indent */) const { LogInfo::MapleLogger() << "ADDROF:"; GetOst()->Dump(); LogInfo::MapleLogger() << " (field)" << GetFieldID(); @@ -1047,7 +1037,7 @@ void OpMeExpr::Dump(const IRMap *irMap, int32 indent) const { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "opnd[0] = "; opnds[0]->Dump(irMap, indent + 1); - if (opnds[1]) { + if (opnds[1] != nullptr) { LogInfo::MapleLogger() << '\n'; } else { return; @@ -1055,7 +1045,7 @@ void OpMeExpr::Dump(const IRMap *irMap, int32 indent) const { PrintIndentation(indent + 1); LogInfo::MapleLogger() << "opnd[1] = "; opnds[1]->Dump(irMap, indent + 1); - if (opnds[2]) { + if (opnds[2] != nullptr) { LogInfo::MapleLogger() << '\n'; } else { return; @@ -1147,7 +1137,7 @@ MeExpr *MaydassignMeStmt::GetLHSRef(bool excludeLocalRefVar) { return lhs; } -MeExpr *IassignMeStmt::GetLHSRef(bool) { +MeExpr *IassignMeStmt::GetLHSRef(bool /* excludeLocalRefVar */) { CHECK_FATAL(lhsVar != nullptr, "lhsVar is null"); MIRType *baseType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsVar->GetTyIdx()); ASSERT(baseType != nullptr, "null ptr check"); @@ -1334,9 +1324,12 @@ void DumpMuList(const IRMap *irMap, const MapleMap &muLis } else { LogInfo::MapleLogger() << ", "; } - if (DumpOptions::GetDumpVsyNum() > 0 && ++count >= DumpOptions::GetDumpVsyNum()) { - LogInfo::MapleLogger() << " ... "; - break; + if (DumpOptions::GetDumpVsyNum() > 0) { + (void)++count; + if (count >= DumpOptions::GetDumpVsyNum()) { + LogInfo::MapleLogger() << " ... "; + break; + } } } LogInfo::MapleLogger() << " }\n"; @@ -1356,9 +1349,12 @@ void DumpChiList(const IRMap *irMap, const MapleMap &chiList } else { LogInfo::MapleLogger() << ", "; } - if (DumpOptions::GetDumpVsyNum() > 0 && count++ >= DumpOptions::GetDumpVsyNum()) { - LogInfo::MapleLogger() << " ... "; - break; + if (DumpOptions::GetDumpVsyNum() > 0) { + (void)++count; + if (count >= DumpOptions::GetDumpVsyNum()) { + LogInfo::MapleLogger() << " ... "; + break; + } } } LogInfo::MapleLogger() << " }\n"; diff --git a/src/mapleall/maple_me/src/me_ivopts.cpp b/src/mapleall/maple_me/src/me_ivopts.cpp index e702995529a91d35252c4a88616f485ea53a8218..3985dd1bd641bab8c548b99326b0d69b82ddf542 100644 --- a/src/mapleall/maple_me/src/me_ivopts.cpp +++ b/src/mapleall/maple_me/src/me_ivopts.cpp @@ -63,8 +63,7 @@ class IVUse { public: friend class IVOptData; friend class IVOptimizer; - IVUse(MeStmt *s, IV *i) - : stmt(s), iv(i) {} + IVUse(MeStmt *s, IV *i) : stmt(s), iv(i) {} private: MeStmt *stmt; // the stmts that the use belongs to @@ -181,7 +180,7 @@ class IVOptimizer { ~IVOptimizer() = default; void Run(); - void DumpIV(const IV &iv, int32 indent = 0); + void DumpIV(const IV &iv, int32 indent = 0) const; void DumpGroup(const IVGroup &group); void DumpCand(const IVCand &cand); void DumpSet(const CandSet &set); @@ -195,6 +194,8 @@ class IVOptimizer { bool CreateIVFromMul(OpMeExpr &op, MeStmt &stmt); bool CreateIVFromSub(OpMeExpr &op, MeStmt &stmt); bool CreateIVFromCvt(OpMeExpr &op, MeStmt &stmt); + bool CanCreateIV(const OpMeExpr &op, const IV &iv) const; + bool CreateIVFromUnsignedCvt(const OpMeExpr &op, const IV &iv) const; bool CreateIVFromIaddrof(OpMeExpr &op, MeStmt &stmt); OpMeExpr *TryCvtCmp(OpMeExpr &op, MeStmt &stmt); bool FindGeneralIVInExpr(MeStmt &stmt, MeExpr &expr, bool useInAddress = false); @@ -210,8 +211,9 @@ class IVOptimizer { void CreateIVCandidate(); // step4: estimate the costs of candidates in every use and its own costs void ComputeCandCost(); - int64 ComputeRatioOfStep(MeExpr &candStep, MeExpr &groupStep); - MeExpr *ComputeExtraExprOfBase(MeExpr &candBase, MeExpr &groupBase, uint64 ratio, bool &replaced, bool analysis); + int64 ComputeRatioOfStep(MeExpr &candStep, MeExpr &groupStep) const; + MeExpr *ComputeExtraExprOfBase(MeExpr &candBase, MeExpr &groupBase, uint64 ratio, + bool &replaced, bool analysis) const; uint32 ComputeCandCostForGroup(const IVCand &cand, IVGroup &group); void ComputeGroupCost(); uint32 ComputeSetCost(CandSet &set); @@ -219,14 +221,14 @@ class IVOptimizer { void InitSet(bool originFirst); void TryOptimize(bool originFirst); void FindCandSet(); - void TryReplaceWithCand(CandSet &set, IVCand &cand, std::unordered_map &changehange); + void TryReplaceWithCand(CandSet &set, IVCand &cand, std::unordered_map &change); bool OptimizeSet(); // step6: replace ivs with selected candidates bool IsReplaceSameOst(const MeExpr *parent, ScalarMeExpr *target); MeStmt *GetIncPos(); MeExpr *GetInvariant(MeExpr *expr); - MeExpr *ReplaceCompareOpnd(const OpMeExpr &cmp, MeExpr *compared, MeExpr *replace); - bool PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, MeStmt *incPos, + MeExpr *ReplaceCompareOpnd(const OpMeExpr &cmp, const MeExpr *compared, MeExpr *replace) const; + bool PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, const MeStmt *incPos, MeExpr *&extraExpr, MeExpr *&replace); MeExpr *GenerateRealReplace(int64 ratio, MeExpr *extraExpr, MeExpr *replace, PrimType realUseType, bool replaceCompare); @@ -248,7 +250,7 @@ class IVOptimizer { std::unordered_map invariables; // used to record the newly added invariables }; -void IVOptimizer::DumpIV(const IV &iv, int32 indent) { +void IVOptimizer::DumpIV(const IV &iv, int32 indent) const { constexpr int32 kIndent8 = 8; PrintIndentation(indent); LogInfo::MapleLogger() << "IV:\n"; @@ -501,7 +503,7 @@ bool IVOptimizer::FindBasicIVs() { bool find = false; auto *loopHeader = data->currLoop->head; // basic iv always appears in head's phi list - for (auto &phi : loopHeader->GetMePhiList()) { + for (const auto &phi : std::as_const(loopHeader->GetMePhiList())) { if (!phi.second->GetIsLive()) { continue; } @@ -664,14 +666,61 @@ bool IVOptimizer::CreateIVFromSub(OpMeExpr &op, MeStmt &stmt) { return false; } -bool IVOptimizer::CreateIVFromCvt(OpMeExpr &op, MeStmt &stmt) { - auto *iv = data->GetIV(*op.GetOpnd(0)); - ASSERT_NOT_NULL(iv); +bool IVOptimizer::CreateIVFromUnsignedCvt(const OpMeExpr &op, const IV &iv) const { + if ((data->realIterNum == static_cast(-1)) || + // Primtype above 8 bytes is not supported. + (GetPrimTypeSize(op.GetPrimType()) > kEightByte) || + (GetPrimTypeSize(iv.base->GetPrimType()) > kEightByte) || + (GetPrimTypeSize(iv.step->GetPrimType()) > kEightByte)) { + return false; + } + // Ensure that the steps of each iteration are consistent. + // For example: + // base: uint64_max - 1 + // step: 1 + // tripCount: 10 + // The step of first iteration is 1 and the step of second iteration is -uint64_max. + if (iv.base->GetMeOp() != kMeOpConst || iv.step->GetMeOp() != kMeOpConst) { + return false; + } + auto base = static_cast(iv.base)->GetIntValue().TruncOrExtend(op.GetOpndType()); + auto unsignedStep = static_cast(iv.step)->GetIntValue().TruncOrExtend(op.GetOpndType()); + if (unsignedStep == 0) { + return false; + } + auto signedStep = unsignedStep.TruncOrExtend(GetSignedPrimType(op.GetOpndType())); + // finalValue: base + step * tripcount + auto finalValue = base + unsignedStep * data->realIterNum; + auto res = (finalValue < base) ? (base - finalValue) : (finalValue - base); + // Compare the calculated trip count with the actual trip count. + // If it is not equal, it means that overflow occurs during iteration. + IntVal zeroVersion(static_cast(0), op.GetOpndType()); + if ((finalValue < base) && + ((signedStep > zeroVersion && (res / unsignedStep) == static_cast(data->realIterNum)) || + (signedStep < zeroVersion && (res / (-signedStep)) == static_cast(data->realIterNum)))) { + } else if ((finalValue > base) && ((res / unsignedStep) == static_cast(data->realIterNum))) { + } else { + return false; + } + return true; +} + +bool IVOptimizer::CanCreateIV(const OpMeExpr &op, const IV &iv) const { if (!IsPrimitiveInteger(op.GetPrimType())) { - data->CreateGroup(stmt, *iv, kUseGeneral, &op); return false; } - if (IsUnsignedInteger(op.GetOpndType()) && GetPrimTypeSize(op.GetOpndType()) < GetPrimTypeSize(op.GetPrimType())) { + if (IsUnsignedInteger(op.GetOpndType()) && + GetPrimTypeSize(op.GetOpndType()) < GetPrimTypeSize(op.GetPrimType()) && + !CreateIVFromUnsignedCvt(op, iv)) { + return false; + } + return true; +} + +bool IVOptimizer::CreateIVFromCvt(OpMeExpr &op, MeStmt &stmt) { + auto *iv = data->GetIV(*op.GetOpnd(0)); + ASSERT_NOT_NULL(iv); + if (!CanCreateIV(op, *iv)) { data->CreateGroup(stmt, *iv, kUseGeneral, &op); return false; } @@ -703,7 +752,9 @@ bool IVOptimizer::CreateIVFromIaddrof(OpMeExpr &op, MeStmt &stmt) { auto *inc = irMap->CreateIntConstMeExpr(offset, op.GetPrimType()); auto *initValue = irMap->CreateMeExprBinary(OP_add, op.GetPrimType(), *iv->base, *inc); auto *simplified = irMap->SimplifyMeExpr(initValue); - if (simplified != nullptr) { initValue = simplified; } + if (simplified != nullptr) { + initValue = simplified; + } data->CreateIV(&op, initValue, iv->step, false); return true; } @@ -961,7 +1012,7 @@ bool IVOptimizer::LHSEscape(const ScalarMeExpr *lhs) { return false; } // find in all uses to check if the lhs escape the loop - for (auto &useSite : *useList) { + for (const auto &useSite : std::as_const(*useList)) { if (useSite.IsUseByStmt()) { auto *useBB = useSite.GetStmt()->GetBB(); if (data->currLoop->loopBBs.count(useBB->GetBBId()) == 0) { @@ -1027,7 +1078,7 @@ void IVOptimizer::FindGeneralIVInStmt(MeStmt &stmt) { auto *opnd = stmt.GetOpnd(i); auto *opted = OptimizeInvariable(opnd); if (opted != opnd) { - irMap->ReplaceMeExprStmt(stmt, *opnd, *opted); + (void)irMap->ReplaceMeExprStmt(stmt, *opnd, *opted); opnd = opted; } bool isUsedInAddr = (stmt.GetOp() == OP_iassign || stmt.GetOp() == OP_iassignoff) && i == 0; @@ -1069,7 +1120,7 @@ void IVOptimizer::TraversalLoopBB(BB &bb, std::vector &bbVisited) { bbVisited[bb.GetBBId()] = true; if (&bb != data->currLoop->head) { - for (auto &phiMap : bb.GetMePhiList()) { + for (const auto &phiMap : std::as_const(bb.GetMePhiList())) { FindGeneralIVInPhi(*phiMap.second); } } @@ -1314,7 +1365,7 @@ void IVOptimizer::CreateIVCandidate() { } // create candidate from common offset - for (auto &it : offsetCount) { + for (const auto &it : std::as_const(offsetCount)) { if (it.second.size() > 1) { tmp = irMap->CreateRegMeExpr(it.first->GetPrimType()); auto *iv = data->GetIV(*it.first); @@ -1344,7 +1395,7 @@ static uint32 ComputeExprCost(MeExpr &expr, const MeExpr *parent = nullptr) { constexpr uint32 cvtCost = 4; constexpr uint32 addCost = 4; constexpr uint32 addressCost = 5; - constexpr uint32 mulCost = 5; + constexpr uint32 mulCost = 4; constexpr uint32 symbolCost = 9; constexpr uint32 defaultCost = 16; @@ -1392,7 +1443,7 @@ static uint32 ComputeExprCost(MeExpr &expr, const MeExpr *parent = nullptr) { } } -static uint32 ComputeAddressCost(MeExpr *expr, int64 ratio, bool hasField) { +static uint32 ComputeAddressCost(const MeExpr *expr, int64 ratio, bool hasField) { bool ratioCombine = ratio == 1 || ratio == 2 || ratio == 4 || ratio == 8; uint32 cost = 0; if (expr != nullptr) { @@ -1452,7 +1503,7 @@ void FindScalarFactor(MeExpr &expr, OpMeExpr *parentCvt, std::unordered_map(expr).GetExtIntValue(); auto it = record.find(kInvalidExprID); if (it == record.end()) { - record.emplace(kInvalidExprID, ScalarPeel(&expr, multiplier * constVal, PTY_unknown)); + (void)record.emplace(kInvalidExprID, ScalarPeel(&expr, multiplier * constVal, PTY_unknown)); } else { it->second.multiplier += (multiplier * constVal); } @@ -1463,7 +1514,7 @@ void FindScalarFactor(MeExpr &expr, OpMeExpr *parentCvt, std::unordered_mapGetOpndType() : expr.GetPrimType(); auto it = record.find(expr.GetExprID()); if (it == record.end()) { - record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); + (void)record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); } else { it->second.multiplier += multiplier; } @@ -1482,7 +1533,7 @@ void FindScalarFactor(MeExpr &expr, OpMeExpr *parentCvt, std::unordered_mapGetMeOp() != kMeOpConst && opnd1->GetMeOp() != kMeOpConst) { PrimType expandType = parentCvt ? parentCvt->GetOpndType() : expr.GetPrimType(); - record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); + (void)record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); return; } if (opnd0->GetMeOp() == kMeOpConst) { @@ -1496,7 +1547,7 @@ void FindScalarFactor(MeExpr &expr, OpMeExpr *parentCvt, std::unordered_mapGetOpndType() : expr.GetPrimType(); - record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); + (void)record.emplace(expr.GetExprID(), ScalarPeel(&expr, multiplier, expandType)); return; } parentCvt = &op; @@ -1510,7 +1561,7 @@ void FindScalarFactor(MeExpr &expr, OpMeExpr *parentCvt, std::unordered_map candMap; std::unordered_map groupMap; FindScalarFactor(candBase, nullptr, candMap, 1, false, analysis); FindScalarFactor(groupBase, nullptr, groupMap, 1, false, analysis); MeExpr *extraExpr = nullptr; - int64 candConst = 0; + uint64 candConst = 0; int64 groupConst = 0; for (auto &itGroup : groupMap) { auto itCand = candMap.find(itGroup.first); if (itGroup.first == kInvalidExprID) { - candConst = itCand == candMap.end() ? 0 : itCand->second.multiplier * ratio; + candConst = itCand == candMap.end() ? + 0 : static_cast(itCand->second.multiplier) * ratio; groupConst = itGroup.second.multiplier; continue; } @@ -1598,7 +1650,8 @@ MeExpr *IVOptimizer::ComputeExtraExprOfBase(MeExpr &candBase, MeExpr &groupBase, extraExpr = extraExpr == nullptr ? expr : irMap->CreateMeExprBinary(OP_add, groupBase.GetPrimType(), *extraExpr, *expr); } else { - int64 newMultiplier = itGroup.second.multiplier - (itCand->second.multiplier * ratio); + int64 newMultiplier = static_cast(static_cast(itGroup.second.multiplier) - + (static_cast(itCand->second.multiplier) * ratio)); if (newMultiplier == 0) { continue; } @@ -1622,15 +1675,15 @@ MeExpr *IVOptimizer::ComputeExtraExprOfBase(MeExpr &candBase, MeExpr &groupBase, for (auto &itCand : candMap) { auto itGroup = groupMap.find(itCand.first); if (itCand.first == kInvalidExprID) { - candConst = itCand.second.multiplier * ratio; + candConst = static_cast(itCand.second.multiplier) * ratio; groupConst = itGroup == groupMap.end() ? 0 : itGroup->second.multiplier; continue; } bool addCvt = (itGroup == groupMap.end() || itGroup->second.expandType != itCand.second.expandType); if (itGroup == groupMap.end() || addCvt) { - if ((itCand.second.expr->GetPrimType() == PTY_ptr || - itCand.second.expr->GetPrimType() == PTY_a64 || - itCand.second.expr->GetPrimType() == PTY_a32) && itCand.second.expr->GetOp() != OP_cvt) { + if (analysis && itCand.second.expr->GetOp() != OP_cvt && + (itCand.second.expr->GetPrimType() == PTY_ptr || itCand.second.expr->GetPrimType() == PTY_a64 || + itCand.second.expr->GetPrimType() == PTY_a32)) { // it's not good to use one obj to form others replaced = false; return nullptr; @@ -1651,10 +1704,14 @@ MeExpr *IVOptimizer::ComputeExtraExprOfBase(MeExpr &candBase, MeExpr &groupBase, : irMap->CreateMeExprBinary(OP_add, ptyp, *extraExpr, *expr); } } - if (static_cast(groupConst) - static_cast(candConst) == 0) { + IntVal gConst(groupConst, groupBase.GetPrimType()); + IntVal cConst(candConst, candBase.GetPrimType()); + IntVal subVal(static_cast(gConst.GetExtValue()) - static_cast(cConst.GetExtValue()), + groupBase.GetPrimType()); + if (subVal == 0) { return extraExpr; } - auto *constExpr = irMap->CreateIntConstMeExpr(static_cast(groupConst) - static_cast(candConst), ptyp); + auto *constExpr = irMap->CreateIntConstMeExpr(subVal.GetSXTValue(), ptyp); extraExpr = extraExpr == nullptr ? constExpr : irMap->CreateMeExprBinary(OP_add, ptyp, *extraExpr, *constExpr); return extraExpr; @@ -1730,7 +1787,7 @@ uint32 IVOptimizer::ComputeCandCostForGroup(const IVCand &cand, IVGroup &group) if (!replaced) { return kInfinityCost; } - uint32 mulCost = 5; + uint32 mulCost = 4; uint8 extraConstCost = 4; if (group.type == kUseGeneral) { if (extraExpr == nullptr) { @@ -2069,8 +2126,8 @@ MeStmt *IVOptimizer::GetIncPos() { while (incPos != nullptr && (incPos->GetOp() == OP_comment || incPos->GetOp() == OP_goto)) { incPos = incPos->GetPrev(); } - bool headQuicklyExit = data->currLoop->inloopBB2exitBBs.find(data->currLoop->head->GetBBId()) != - data->currLoop->inloopBB2exitBBs.end(); + bool headQuicklyExit = std::as_const(data->currLoop->inloopBB2exitBBs).find(data->currLoop->head->GetBBId()) != + data->currLoop->inloopBB2exitBBs.cend(); if (headQuicklyExit) { auto *headFirst = data->currLoop->head->GetFirstMe(); while (headFirst != nullptr && headFirst->GetOp() == OP_comment) { @@ -2085,8 +2142,8 @@ MeStmt *IVOptimizer::GetIncPos() { auto *pred = latchBB->GetPred(0); MeStmt *lastMe = nullptr; while (pred != nullptr) { - bool predIsExit = data->currLoop->inloopBB2exitBBs.find(pred->GetBBId()) != - data->currLoop->inloopBB2exitBBs.end(); + bool predIsExit = std::as_const(data->currLoop->inloopBB2exitBBs).find(pred->GetBBId()) != + data->currLoop->inloopBB2exitBBs.cend(); if (pred->GetSucc().size() > 1 && !predIsExit) { break; } @@ -2131,7 +2188,7 @@ MeExpr *IVOptimizer::GetInvariant(MeExpr *expr) { return expr; } -MeExpr *IVOptimizer::ReplaceCompareOpnd(const OpMeExpr &cmp, MeExpr *compared, MeExpr *replace) { +MeExpr *IVOptimizer::ReplaceCompareOpnd(const OpMeExpr &cmp, const MeExpr *compared, MeExpr *replace) const { OpMeExpr newOpExpr(cmp, kInvalidExprID); for (size_t i = 0; i < newOpExpr.GetNumOpnds(); i++) { if (newOpExpr.GetOpnd(i) == compared) { @@ -2141,13 +2198,16 @@ MeExpr *IVOptimizer::ReplaceCompareOpnd(const OpMeExpr &cmp, MeExpr *compared, M return irMap->HashMeExpr(newOpExpr); } -bool IVOptimizer::PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, MeStmt *incPos, +bool IVOptimizer::PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, const MeStmt *incPos, MeExpr *&extraExpr, MeExpr *&replace) { bool replaced = true; bool replaceCompare = false; MeExpr *simplified = nullptr; - if (IsSignedInteger(static_cast(use->expr)->GetOpndType())) { - static_cast(use->expr)->SetOpndType(GetSignedPrimType(cand->iv->expr->GetPrimType())); + auto opndType = static_cast(use->expr)->GetOpndType(); + if (IsSignedInteger(opndType)) { + PrimType oldType = opndType; + opndType = GetSignedPrimType(cand->iv->expr->GetPrimType()); + use->iv->base = irMap->CreateMeExprTypeCvt(opndType, oldType, *use->iv->base); } ratio = ComputeRatioOfStep(*cand->iv->step, *use->iv->step); if (ratio == 0) { @@ -2155,6 +2215,7 @@ bool IVOptimizer::PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, MeSt // swap comparison if ratio is negative if (ratio < 0) { OpMeExpr newOpExpr(static_cast(*use->expr), kInvalidExprID); + newOpExpr.SetOpndType(opndType); auto op = newOpExpr.GetOp(); CHECK_FATAL(IsCompareHasReverseOp(op), "should be known op!"); auto newOp = GetSwapCmpOp(op); @@ -2222,16 +2283,18 @@ bool IVOptimizer::PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, MeSt } extraExpr = irMap->CreateMeExprBinary(OP_sub, extraExpr->GetPrimType(), *comparedExpr, *extraExpr); - if (extraExpr->GetPrimType() != static_cast(use->expr)->GetOpndType()) { - extraExpr = irMap->CreateMeExprTypeCvt(static_cast(use->expr)->GetOpndType(), - extraExpr->GetPrimType(), *extraExpr); + if (extraExpr->GetPrimType() != opndType) { + extraExpr = irMap->CreateMeExprTypeCvt(opndType, extraExpr->GetPrimType(), *extraExpr); } simplified = irMap->SimplifyMeExpr(extraExpr); - if (simplified != nullptr) { extraExpr = simplified; } + if (simplified != nullptr) { + extraExpr = simplified; + } extraExpr = GetInvariant(extraExpr); if (ratio == -1) { // swap comparison OpMeExpr newOpExpr(static_cast(*use->expr), kInvalidExprID); + newOpExpr.SetOpndType(opndType); auto op = newOpExpr.GetOp(); CHECK_FATAL(IsCompareHasReverseOp(op), "should be known op!"); auto newOp = GetSwapCmpOp(op); @@ -2243,21 +2306,26 @@ bool IVOptimizer::PrepareCompareUse(int64 &ratio, IVUse *use, IVCand *cand, MeSt *irMap->CreateIntConstMeExpr(-1, extraExpr->GetPrimType())); ratio = 1; simplified = irMap->SimplifyMeExpr(extraExpr); - if (simplified != nullptr) { extraExpr = simplified; } + if (simplified != nullptr) { + extraExpr = simplified; + } } extraExpr = GetInvariant(extraExpr); - auto *newCmp = ReplaceCompareOpnd(static_cast(*use->expr), use->comparedExpr, extraExpr); + OpMeExpr newOpExpr(static_cast(*use->expr), kInvalidExprID); + newOpExpr.SetOpndType(opndType); + auto *newCmp = ReplaceCompareOpnd(newOpExpr, use->comparedExpr, extraExpr); (void)irMap->ReplaceMeExprStmt(*use->stmt, *use->expr, *newCmp); use->expr = newCmp; use->comparedExpr = extraExpr; extraExpr = nullptr; } } - if (use->comparedExpr->GetPrimType() != static_cast(use->expr)->GetOpndType()) { - auto *cvt = irMap->CreateMeExprTypeCvt(static_cast(use->expr)->GetOpndType(), - use->comparedExpr->GetPrimType(), *use->comparedExpr); + if (use->comparedExpr->GetPrimType() != opndType) { + auto *cvt = irMap->CreateMeExprTypeCvt(opndType, use->comparedExpr->GetPrimType(), *use->comparedExpr); cvt = GetInvariant(cvt); - auto *newCmp = ReplaceCompareOpnd(static_cast(*use->expr), use->comparedExpr, cvt); + OpMeExpr newOpExpr(static_cast(*use->expr), kInvalidExprID); + newOpExpr.SetOpndType(opndType); + auto *newCmp = ReplaceCompareOpnd(newOpExpr, use->comparedExpr, cvt); (void)irMap->ReplaceMeExprStmt(*use->stmt, *use->expr, *newCmp); use->expr = newCmp; use->comparedExpr = cvt; @@ -2288,7 +2356,9 @@ MeExpr *IVOptimizer::GenerateRealReplace(int64 ratio, MeExpr *extraExpr, MeExpr auto *tmpReplace = irMap->CreateMeExprBinary(OP_add, replace->GetPrimType(), *regExtra->GetDefStmt()->GetRHS(), *replace); simplified = irMap->SimplifyMeExpr(tmpReplace); - if (simplified != nullptr) { tmpReplace = simplified; } + if (simplified != nullptr) { + tmpReplace = simplified; + } if (tmpReplace->GetDepth() <= regExtra->GetDefStmt()->GetRHS()->GetDepth()) { return tmpReplace; } @@ -2296,7 +2366,9 @@ MeExpr *IVOptimizer::GenerateRealReplace(int64 ratio, MeExpr *extraExpr, MeExpr } replace = irMap->CreateMeExprBinary(OP_add, replace->GetPrimType(), *extraExpr, *replace); simplified = irMap->SimplifyMeExpr(replace); - if (simplified != nullptr) { replace = simplified; } + if (simplified != nullptr) { + replace = simplified; + } } return replace; } @@ -2334,7 +2406,8 @@ void IVOptimizer::UseReplace() { } else { bool replaced = true; ratio = ComputeRatioOfStep(*cand->iv->step, *use->iv->step); - extraExpr = ComputeExtraExprOfBase(*cand->iv->base, *use->iv->base, static_cast(ratio), replaced, false); + extraExpr = ComputeExtraExprOfBase(*cand->iv->base, *use->iv->base, + static_cast(ratio), replaced, false); if (incPos != nullptr && incPos->IsCondBr() && use->stmt == incPos) { if (extraExpr == nullptr || (extraExpr->IsLeaf() && extraExpr->GetMeOp() != kMeOpConst)) { auto *tmpExpr = irMap->CreateRegMeExpr(use->expr->GetPrimType()); @@ -2573,7 +2646,7 @@ void IVOptimizer::Run() { continue; } // remove redundant phi - for (auto &phi : loop->head->GetMePhiList()) { + for (const auto &phi : std::as_const(loop->head->GetMePhiList())) { for (uint32 k = 0; k < phi.second->GetOpnds().size(); ++k) { auto *phiOpnd = phi.second->GetOpnd(k); if (phiOpnd == phi.second->GetLHS()) { @@ -2646,18 +2719,16 @@ bool MEIVOpts::PhaseRun(maple::MeFunction &f) { f.Dump(); } ivOptimizer.Run(); - if (ivOptimizer.LoopOptimized()) { - // run hdse to remove unused exprs - auto *aliasClass0 = FORCE_GET(MEAliasClass); - MeHDSE hdse0(f, *dom, *pdom, *f.GetIRMap(), aliasClass0, DEBUGFUNC_NEWPM(f)); - if (!MeOption::quiet) { - LogInfo::MapleLogger() << " == " << PhaseName() << " invokes [ " << hdse0.PhaseName() << " ] ==\n"; - } - hdse0.DoHDSE(); - if (hdse0.NeedUNClean()) { - f.GetCfg()->UnreachCodeAnalysis(true); - } + if (!ivOptimizer.LoopOptimized()) { + return false; + } + // run hdse to remove unused exprs + auto *aliasClass0 = FORCE_GET(MEAliasClass); + MeHDSE doHdse(f, *dom, *pdom, *f.GetIRMap(), aliasClass0, DEBUGFUNC_NEWPM(f)); + if (!MeOption::quiet) { + LogInfo::MapleLogger() << " == " << PhaseName() << " invokes [ " << doHdse.PhaseName() << " ] ==\n"; } + doHdse.DoHDSESafely(&f, *GetAnalysisInfoHook()); return true; } } // namespace maple diff --git a/src/mapleall/maple_me/src/me_jump_threading.cpp b/src/mapleall/maple_me/src/me_jump_threading.cpp index 40d571b72c16f9cfef5a04642cdc004683f14944..28769738d189fe0f0536348b410c81e3355c1962 100644 --- a/src/mapleall/maple_me/src/me_jump_threading.cpp +++ b/src/mapleall/maple_me/src/me_jump_threading.cpp @@ -67,7 +67,7 @@ void JumpThreading::Execute() { // Insert the ost of phi opnds to their def bbs. void JumpThreading::InsertOstOfPhi2Cands(BB &bb, size_t i) { - for (auto &it : bb.GetMePhiList()) { + for (const auto &it : std::as_const(bb).GetMePhiList()) { if (i >= it.second->GetOpnds().size()) { break; } diff --git a/src/mapleall/maple_me/src/me_loop_analysis.cpp b/src/mapleall/maple_me/src/me_loop_analysis.cpp index e9c3c9e28883d32b9118d67ac5568010bbe6bc2f..de74652be79ee0c8b111970a50248cc9df8883aa 100644 --- a/src/mapleall/maple_me/src/me_loop_analysis.cpp +++ b/src/mapleall/maple_me/src/me_loop_analysis.cpp @@ -31,7 +31,7 @@ bool LoopDesc::CheckBasicIV(MeExpr *solve, ScalarMeExpr *phiLhs, bool onlyStepOn return false; } int meet = 0; - if (!DoCheckBasicIV(solve, phiLhs, meet) || meet != 1) { + if (!DoCheckBasicIV(solve, phiLhs, meet, false, onlyStepOne) || meet != 1) { return false; } return true; @@ -92,15 +92,15 @@ bool LoopDesc::DoCheckBasicIV(MeExpr *solve, ScalarMeExpr *phiLhs, int &meet, bo return false; } -bool IsStepOneIV(MeExpr *expr, const LoopDesc &loop) { - if (!expr->IsScalar()) { +bool IsStepOneIV(MeExpr &expr, const LoopDesc &loop) { + if (!expr.IsScalar()) { return false; } - auto *scalarExpr = static_cast(expr); + auto &scalarExpr = static_cast(expr); auto *loopHeader = loop.head; auto &phiList = loopHeader->GetMePhiList(); - auto it = phiList.find(scalarExpr->GetOstIdx()); - if (it == phiList.end()) { + const auto it = std::as_const(phiList).find(scalarExpr.GetOstIdx()); + if (it == phiList.cend()) { return false; } auto *phi = it->second; @@ -113,22 +113,22 @@ bool IsStepOneIV(MeExpr *expr, const LoopDesc &loop) { } // loop must be a Canonical Loop -static bool IsExprIntConstOrDefOutOfLoop(MeExpr *expr, const LoopDesc &loop) { - if (expr->GetOp() == OP_constval && static_cast(expr)->GetConstVal()->GetKind() == kConstInt) { +static bool IsExprIntConstOrDefOutOfLoop(MeExpr &expr, const LoopDesc &loop) { + if (expr.GetOp() == OP_constval && static_cast(expr).GetConstVal()->GetKind() == kConstInt) { return true; } - if (!expr->IsScalar()) { + if (!expr.IsScalar()) { return false; } - auto *scalarExpr = static_cast(expr); - auto *defStmt = scalarExpr->GetDefByMeStmt(); + auto &scalarExpr = static_cast(expr); + auto *defStmt = scalarExpr.GetDefByMeStmt(); auto &loopBBs = loop.loopBBs; if (defStmt != nullptr && std::find(loopBBs.begin(), loopBBs.end(), defStmt->GetBB()->GetID()) == loop.loopBBs.end()) { return true; } - if (scalarExpr->GetDefBy() == kDefByPhi) { - auto *phi = &scalarExpr->GetDefPhi(); + if (scalarExpr.GetDefBy() == kDefByPhi) { + auto *phi = &scalarExpr.GetDefPhi(); auto *defBB = phi->GetDefBB(); // defPhi is not in the loop if (std::find(loopBBs.begin(), loopBBs.end(), defBB->GetID()) == loop.loopBBs.end()) { @@ -166,10 +166,10 @@ bool LoopDesc::IsFiniteLoop() const { } auto *opnd0 = condExpr->GetOpnd(0); auto *opnd1 = condExpr->GetOpnd(1); - bool isIV0 = IsStepOneIV(opnd0, *this); - bool isIV1 = IsStepOneIV(opnd1, *this); - bool isConstOrDefOutOfLoop0 = IsExprIntConstOrDefOutOfLoop(opnd0, *this); - bool isConstOrDefOutOfLoop1 = IsExprIntConstOrDefOutOfLoop(opnd1, *this); + bool isIV0 = IsStepOneIV(*opnd0, *this); + bool isIV1 = IsStepOneIV(*opnd1, *this); + bool isConstOrDefOutOfLoop0 = IsExprIntConstOrDefOutOfLoop(*opnd0, *this); + bool isConstOrDefOutOfLoop1 = IsExprIntConstOrDefOutOfLoop(*opnd1, *this); if (isConstOrDefOutOfLoop0 && isConstOrDefOutOfLoop1) { return true; } @@ -196,7 +196,7 @@ void IdentifyLoops::SetLoopParent4BB(const BB &bb, LoopDesc &loopDesc) { bbLoopParent[bb.GetBBId()] = &loopDesc; } -void IdentifyLoops::SetExitBB(LoopDesc &loop) { +void IdentifyLoops::SetExitBB(LoopDesc &loop) const { for (auto bbId : loop.loopBBs) { auto *bb = cfg->GetBBFromID(bbId); for (auto *succ : bb->GetSucc()) { @@ -223,7 +223,7 @@ void IdentifyLoops::ProcessBB(BB *bb) { LoopDesc *loop = CreateLoopDesc(*bb, *pred); // check try...catch auto found = std::find_if(bb->GetPred().begin(), bb->GetPred().end(), - [](BB *pre) { return pre->GetAttributes(kBBAttrIsTry); }); + [](const BB *pre) { return pre->GetAttributes(kBBAttrIsTry); }); if (found != bb->GetPred().end()) { loop->SetHasTryBB(true); } @@ -284,7 +284,7 @@ void IdentifyLoops::Dump() const { } } -void IdentifyLoops::ProcessPreheaderAndLatch(LoopDesc &loop) { +void IdentifyLoops::ProcessPreheaderAndLatch(LoopDesc &loop) const { // If predsize of head is one, it means that one is entry bb. if (loop.head->GetPred().size() == 1) { CHECK_FATAL(cfg->GetCommonEntryBB()->GetSucc(0) == loop.head, "succ of entry bb must be head"); diff --git a/src/mapleall/maple_me/src/me_loop_canon.cpp b/src/mapleall/maple_me/src/me_loop_canon.cpp index e05c083aa9127d988575a62bd6607f0de41e08a8..59197a832d9f1cfe7c5d0da2854abec4c3af0704 100644 --- a/src/mapleall/maple_me/src/me_loop_canon.cpp +++ b/src/mapleall/maple_me/src/me_loop_canon.cpp @@ -154,7 +154,7 @@ void MeLoopCanon::SplitPreds(const std::vector &splitList, BB *splittedBB, auto *phi = phiIter->second; auto *phiOpnd0 = phi->GetOpnd(0); auto foundDiff = std::find_if(phi->GetOpnds().begin(), phi->GetOpnds().end(), - [phiOpnd0](ScalarMeExpr *opnd) { return opnd != phiOpnd0; }); + [phiOpnd0](const ScalarMeExpr *opnd) { return opnd != phiOpnd0; }); if (foundDiff == phi->GetOpnds().end()) { auto &opnds = splittedBB->GetMePhiList()[phiIter->first]->GetOpnds(); // mergedBB is always the last pred of splittedBB diff --git a/src/mapleall/maple_me/src/me_loop_inversion.cpp b/src/mapleall/maple_me/src/me_loop_inversion.cpp index e7f2f67cbed5046b7b1a5c8612ac284888fcdbac..d957eff305764124dca1fc16ab94e520bf9d2d74 100644 --- a/src/mapleall/maple_me/src/me_loop_inversion.cpp +++ b/src/mapleall/maple_me/src/me_loop_inversion.cpp @@ -119,9 +119,9 @@ bool MeLoopInversion::NeedConvert(MeFunction *func, BB &bb, BB &pred, MapleAlloc return true; } -void MeLoopInversion::Convert(MeFunction &func, BB &bb, BB &pred, MapleMap &swapSuccs) { +void MeLoopInversion::Convert(MeFunction &func, BB &bb, BB &pred, MapleMap &swapSuccs) const { // if bb->fallthru is in loopbody, latchBB need convert condgoto and make original target as its fallthru - bool swapSuccOfLatch = (swapSuccs.find(std::make_pair(&bb, &pred)) != swapSuccs.cend()); + bool swapSuccOfLatch = (std::as_const(swapSuccs).find(std::make_pair(&bb, &pred)) != swapSuccs.cend()); if (isDebugFunc) { LogInfo::MapleLogger() << "***loop convert: backedge bb->id " << bb.GetBBId() << " pred->id " << pred.GetBBId(); if (swapSuccOfLatch) { @@ -136,7 +136,7 @@ void MeLoopInversion::Convert(MeFunction &func, BB &bb, BB &pred, MapleMapSetAttributes(kBBAttrIsInLoop); // latchBB is inloop // update newBB frequency : copy predBB succFreq as latch frequency if (func.GetCfg()->UpdateCFGFreq()) { - int idx = pred.GetSuccIndex(bb); + int64 idx = pred.GetSuccIndex(bb); ASSERT(idx >= 0 && idx < pred.GetSucc().size(), "sanity check"); FreqType freq = pred.GetEdgeFreq(static_cast(idx)); latchBB->SetFrequency(freq); diff --git a/src/mapleall/maple_me/src/me_loop_unrolling.cpp b/src/mapleall/maple_me/src/me_loop_unrolling.cpp index 5cf24103263b2fc74c424bd834839be6fdeb8983..da436c2662a79c2f81eae7ce55070b08ebf566f1 100644 --- a/src/mapleall/maple_me/src/me_loop_unrolling.cpp +++ b/src/mapleall/maple_me/src/me_loop_unrolling.cpp @@ -458,7 +458,7 @@ void LoopUnrolling::RemoveCondGoto() { cfg->DeleteBasicBlock(*loop->latch); } -bool LoopUnrolling::SplitCondGotoBB() { +bool LoopUnrolling::SplitCondGotoBB() const { auto *exitBB = func->GetCfg()->GetBBFromID(loop->inloopBB2exitBBs.begin()->first); auto *exitedBB = *(loop->inloopBB2exitBBs.cbegin()->second->cbegin()); MeStmt *lastStmt = exitBB->GetLastMe(); @@ -530,10 +530,8 @@ LoopUnrolling::ReturnKindOfFullyUnroll LoopUnrolling::LoopFullyUnroll(uint64 tri return kCanNotSplitCondGoto; } replicatedLoopNum = tripCount; - for (int64 i = 0; i < tripCount; ++i) { - if (i > 0) { - needUpdateInitLoopFreq = false; - } + for (uint64 i = 0; i < tripCount; ++i) { + needUpdateInitLoopFreq = false; CopyAndInsertBB(false); } RemoveCondGoto(); diff --git a/src/mapleall/maple_me/src/me_merge_stmts.cpp b/src/mapleall/maple_me/src/me_merge_stmts.cpp index 764beff024b572ddda9bbeb4090c732026a907c6..0abcfecc25326004381c8a09cfa9d9273af1b4fa 100644 --- a/src/mapleall/maple_me/src/me_merge_stmts.cpp +++ b/src/mapleall/maple_me/src/me_merge_stmts.cpp @@ -17,7 +17,7 @@ #include "mpl_options.h" namespace maple { -int32 MergeStmts::GetStructFieldBitSize(const MIRStructType *structType, FieldID fieldID) { +int32 MergeStmts::GetStructFieldBitSize(const MIRStructType *structType, FieldID fieldID) const { TyIdx fieldTypeIdx = structType->GetFieldTyIdx(fieldID); MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx); uint32 fieldBitSize; @@ -29,14 +29,14 @@ int32 MergeStmts::GetStructFieldBitSize(const MIRStructType *structType, FieldID return fieldBitSize; } -int32 MergeStmts::GetPointedTypeBitSize(TyIdx ptrTypeIdx) { +int32 MergeStmts::GetPointedTypeBitSize(TyIdx ptrTypeIdx) const { MIRPtrType *ptrMirType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTypeIdx)); MIRType *pointedMirType = ptrMirType->GetPointedType(); return static_cast(pointedMirType->GetSize() * 8); } // Candidate stmts LHS must cover contiguous memory and RHS expr must be const -void MergeStmts::mergeIassigns(vOffsetStmt& iassignCandidates) { +void MergeStmts::MergeIassigns(vOffsetStmt& iassignCandidates) { if (iassignCandidates.empty() || iassignCandidates.size() == 1) { return; } @@ -57,7 +57,7 @@ void MergeStmts::mergeIassigns(vOffsetStmt& iassignCandidates) { size_t endIdx = 0; int32 startBitOffset = iassignCandidates[startCandidate].first; - if ((startBitOffset & 0x7) != 0) { + if ((static_cast(startBitOffset) & 0x7) != 0) { startCandidate++; continue; } @@ -118,7 +118,7 @@ void MergeStmts::mergeIassigns(vOffsetStmt& iassignCandidates) { uint64 fieldVal = static_cast(rhsLastIassignMeStmt->GetExtIntValue()); uint64 combinedVal = (fieldVal << (64 - fieldBitSize)) >> (64 - fieldBitSize); - auto combineValue = [&](int stmtIdx) { + auto combineValue = [&](size_t stmtIdx) { fieldID = static_cast(iassignCandidates[stmtIdx].second)->GetLHSVal()->GetFieldID(); if (fieldID == 0) { TyIdx lhsPtrTypeIdx = static_cast(iassignCandidates[stmtIdx].second)->GetLHSVal()->GetTyIdx(); @@ -127,18 +127,18 @@ void MergeStmts::mergeIassigns(vOffsetStmt& iassignCandidates) { fieldBitSize = GetStructFieldBitSize(lhsStructType, fieldID); } fieldVal = static_cast(static_cast( - static_cast(iassignCandidates[stmtIdx].second)->GetOpnd(1))->GetExtIntValue()); + static_cast(iassignCandidates[stmtIdx].second)->GetOpnd(1U))->GetExtIntValue()); fieldVal = (fieldVal << (64 - fieldBitSize)) >> (64 - fieldBitSize); - combinedVal = (combinedVal << fieldBitSize) | fieldVal; + combinedVal = (combinedVal << static_cast(fieldBitSize)) | fieldVal; }; if (isBigEndian) { - for (int stmtIdx = static_cast(startCandidate) + 1; stmtIdx <= static_cast(endIdx); ++stmtIdx) { + for (size_t stmtIdx = startCandidate + 1; stmtIdx <= endIdx; ++stmtIdx) { combineValue(stmtIdx); } } else { - for (int stmtIdx = static_cast(endIdx) - 1; stmtIdx >= static_cast(startCandidate); --stmtIdx) { - combineValue(stmtIdx); + for (int64 stmtIdx = static_cast(endIdx) - 1; stmtIdx >= static_cast(startCandidate); --stmtIdx) { + combineValue(static_cast(stmtIdx)); } } @@ -165,7 +165,7 @@ void MergeStmts::mergeIassigns(vOffsetStmt& iassignCandidates) { } // Candidate stmts LHS must cover contiguous memory and RHS expr must be const -void MergeStmts::mergeDassigns(vOffsetStmt& dassignCandidates) { +void MergeStmts::MergeDassigns(vOffsetStmt& dassignCandidates) { if (dassignCandidates.empty() || dassignCandidates.size() == 1) { return; } @@ -231,7 +231,7 @@ void MergeStmts::mergeDassigns(vOffsetStmt& dassignCandidates) { uint64 combinedVal = (fieldValIdx << (64 - fieldBitSizeEndIdx)) >> (64 - fieldBitSizeEndIdx); - auto combineValue = [&](int stmtIdx) { + auto combineValue = [&dassignCandidates, &lhsStructTypeStart, &combinedVal, this](size_t stmtIdx) { OriginalSt *lhsOrigStStmtIdx = static_cast(dassignCandidates[stmtIdx].second)->GetVarLHS()->GetOst(); FieldID fieldIDStmtIdx = lhsOrigStStmtIdx->GetFieldID(); @@ -247,8 +247,8 @@ void MergeStmts::mergeDassigns(vOffsetStmt& dassignCandidates) { combineValue(stmtIdx); } } else { - for (int stmtIdx = static_cast(endIdx) - 1; stmtIdx >= static_cast(startCandidate); --stmtIdx) { - combineValue(stmtIdx); + for (int64 stmtIdx = static_cast(endIdx) - 1; stmtIdx >= static_cast(startCandidate); --stmtIdx) { + combineValue(static_cast(stmtIdx)); } } @@ -275,7 +275,7 @@ void MergeStmts::mergeDassigns(vOffsetStmt& dassignCandidates) { } } -IassignMeStmt *MergeStmts::genSimdIassign(int32 offset, IvarMeExpr iVar1, IvarMeExpr iVar2, +IassignMeStmt *MergeStmts::GenSimdIassign(int32 offset, IvarMeExpr iVar1, IvarMeExpr iVar2, const MapleMap &stmtChi, TyIdx ptrTypeIdx) { MeIRMap *irMap = func.GetIRMap(); iVar1.SetOffset(offset); @@ -286,7 +286,7 @@ IassignMeStmt *MergeStmts::genSimdIassign(int32 offset, IvarMeExpr iVar1, IvarMe return xIassignStmt; } -IassignMeStmt *MergeStmts::genSimdIassign(int32 offset, IvarMeExpr iVar, MeExpr &valMeExpr, +IassignMeStmt *MergeStmts::GenSimdIassign(int32 offset, IvarMeExpr iVar, MeExpr &valMeExpr, const MapleMap &stmtChi, TyIdx ptrTypeIdx) { MeIRMap *irMap = func.GetIRMap(); iVar.SetOffset(offset); @@ -303,7 +303,7 @@ void MergeStmts::GenShortSet(MeExpr *dstMeExpr, uint32 offset, const MIRType *uX IvarMeExpr iVarBase(&func.GetIRMap()->GetIRMapAlloc(), kInvalidExprID, uXTgtMirType->GetPrimType(), uXTgtPtrType->GetTypeIndex(), 0); iVarBase.SetBase(dstMeExpr); - IassignMeStmt *xIassignStmt = genSimdIassign(offset, iVarBase, *srcRegMeExpr, memsetCallStmtChi, + IassignMeStmt *xIassignStmt = GenSimdIassign(offset, iVarBase, *srcRegMeExpr, memsetCallStmtChi, uXTgtPtrType->GetTypeIndex()); memsetCallStmt->GetBB()->InsertMeStmtBefore(memsetCallStmt, xIassignStmt); xIassignStmt->CopyInfo(*memsetCallStmt); @@ -321,7 +321,7 @@ bool EnableSIMD(const int &length, bool needToBeMultiplyOfByte) { } } -void MergeStmts::simdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt) { +void MergeStmts::SimdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt) { ASSERT(memcpyCallStmt->GetIntrinsic() == INTRN_C_memcpy, "The stmt is NOT intrinsic memcpy"); ConstMeExpr *lengthExpr = static_cast(memcpyCallStmt->GetOpnd(2)); @@ -374,7 +374,7 @@ void MergeStmts::simdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt) { tmpIvar2.SetBase(srcMeExpr); for (int32 i = 0; i < numOf16Byte; i++) { - IassignMeStmt *xIassignStmt = genSimdIassign(16 * i, tmpIvar1, tmpIvar2, *memcpyCallStmtChi, + IassignMeStmt *xIassignStmt = GenSimdIassign(16 * i, tmpIvar1, tmpIvar2, *memcpyCallStmtChi, v16uint8PtrType->GetTypeIndex()); memcpyCallStmt->GetBB()->InsertMeStmtBefore(memcpyCallStmt, xIassignStmt); xIassignStmt->CopyInfo(*memcpyCallStmt); @@ -387,7 +387,7 @@ void MergeStmts::simdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt) { tmpIvar3.SetBase(dstMeExpr); IvarMeExpr tmpIvar4(&func.GetIRMap()->GetIRMapAlloc(), kInvalidExprID, PTY_v8u8, v8uint8PtrType->GetTypeIndex(), 0); tmpIvar4.SetBase(srcMeExpr); - IassignMeStmt *xIassignStmt = genSimdIassign(offset8Byte, tmpIvar3, tmpIvar4, *memcpyCallStmtChi, + IassignMeStmt *xIassignStmt = GenSimdIassign(offset8Byte, tmpIvar3, tmpIvar4, *memcpyCallStmtChi, v8uint8PtrType->GetTypeIndex()); memcpyCallStmt->GetBB()->InsertMeStmtBefore(memcpyCallStmt, xIassignStmt); xIassignStmt->CopyInfo(*memcpyCallStmt); @@ -400,7 +400,7 @@ void MergeStmts::simdMemcpy(IntrinsiccallMeStmt* memcpyCallStmt) { } } -void MergeStmts::simdMemset(IntrinsiccallMeStmt *memsetCallStmt) { +void MergeStmts::SimdMemset(IntrinsiccallMeStmt *memsetCallStmt) { ASSERT(memsetCallStmt->GetIntrinsic() == INTRN_C_memset, "The stmt is NOT intrinsic memset"); ConstMeExpr *numExpr = static_cast(memsetCallStmt->GetOpnd(2)); @@ -451,7 +451,7 @@ void MergeStmts::simdMemset(IntrinsiccallMeStmt *memsetCallStmt) { dupRegAssignMeStmt->CopyInfo(*memsetCallStmt); for (int32 i = 0; i < numOf16Byte; i++) { - IassignMeStmt *xIassignStmt = genSimdIassign(16 * i, tmpIvar, *dupRegMeExpr, *memsetCallStmtChi, + IassignMeStmt *xIassignStmt = GenSimdIassign(16 * i, tmpIvar, *dupRegMeExpr, *memsetCallStmtChi, v16u8PtrType->GetTypeIndex()); memsetCallStmt->GetBB()->InsertMeStmtBefore(memsetCallStmt, xIassignStmt); xIassignStmt->CopyInfo(*memsetCallStmt); @@ -591,10 +591,10 @@ void MergeStmts::MergeMeStmts() { MIRIntrinsicID intrinsicCallID = intrinsicCallStmt->GetIntrinsic(); if (intrinsicCallID == INTRN_C_memcpy) { candidateStmts.push(nullptr); - simdMemcpy(intrinsicCallStmt); + SimdMemcpy(intrinsicCallStmt); } else if (intrinsicCallID == INTRN_C_memset) { candidateStmts.push(nullptr); - simdMemset(intrinsicCallStmt); + SimdMemset(intrinsicCallStmt); } else { // More to come } @@ -644,7 +644,7 @@ void MergeStmts::MergeMeStmts() { candidateStmts.pop(); } iassignCandidates.insert(iassignCandidates.begin(), uniqueCheck.begin(), uniqueCheck.end()); - mergeIassigns(iassignCandidates); + MergeIassigns(iassignCandidates); break; } case OP_dassign: { @@ -661,7 +661,7 @@ void MergeStmts::MergeMeStmts() { candidateStmts.pop(); } dassignCandidates.insert(dassignCandidates.begin(), uniqueCheck.begin(), uniqueCheck.end()); - mergeDassigns(dassignCandidates); + MergeDassigns(dassignCandidates); break; } default: { diff --git a/src/mapleall/maple_me/src/me_obj_size.cpp b/src/mapleall/maple_me/src/me_obj_size.cpp index f81923d3b6ee3ba0f3f26c1dd18f2869b2de1944..017d8ff490d55991c77b1a057ddc96b40b4ed6cc 100644 --- a/src/mapleall/maple_me/src/me_obj_size.cpp +++ b/src/mapleall/maple_me/src/me_obj_size.cpp @@ -356,12 +356,64 @@ size_t OBJSize::DealWithIaddrof(const MeExpr &opnd, int64 type, bool getSizeOfWh static_cast(typeOfBase->GetBitOffsetFromBaseAddr(fieldIDOfIaddrof) / kBitsPerByte); } -size_t OBJSize::DealWithDread(const MeExpr &opnd, int64 type, bool getMaxSizeOfObjs) const { +bool OBJSize::DealWithOpnd(const MeExpr &opnd, std::set &visitedPhi) const { + for (uint8 i = 0; i < opnd.GetNumOpnds(); ++i) { + if (PhiOpndIsDefPointOfOtherPhi(*opnd.GetOpnd(i), visitedPhi)) { + return true; + } + } + return false; +} + +// If phi opnd is def point of other phi, return true. Such as meir: +// VAR:%pu8Buf{offset:0}<0>[idx:16] mx94 = MEPHI{mx93,mx361} +// VAR:%pu8Buf{offset:0}<0>[idx:16] mx93 = MEPHI{mx53,mx94} +bool OBJSize::PhiOpndIsDefPointOfOtherPhi(MeExpr &expr, std::set &visitedPhi) const { + if (!expr.IsScalar()) { + return false; + } + auto *var = static_cast(&expr); + if (var->GetDefBy() == kDefByStmt) { + auto defStmt = var->GetDefStmt(); + for (size_t i = 0; i < defStmt->NumMeStmtOpnds(); ++i) { + if (DealWithOpnd(*defStmt->GetOpnd(i), visitedPhi)) { + return true; + } + } + } + if (var->GetDefBy() == kDefByChi) { + auto *rhs = var->GetDefChi().GetRHS(); + if (rhs == nullptr) { + return false; + } + return PhiOpndIsDefPointOfOtherPhi(*rhs, visitedPhi); + } + if (var->GetDefBy() == kDefByPhi) { + MePhiNode *phi = &(var->GetDefPhi()); + if (visitedPhi.find(phi) != visitedPhi.end()) { + return true; + } + (void)visitedPhi.insert(phi); + std::set res; + for (auto *phiOpnd : phi->GetOpnds()) { + if (PhiOpndIsDefPointOfOtherPhi(*phiOpnd, visitedPhi)) { + return true; + } + } + } + return false; +} + +size_t OBJSize::DealWithDread(MeExpr &opnd, int64 type, bool getMaxSizeOfObjs) const { if (!opnd.IsScalar()) { return kInvalidDestSize; } - auto &scalarMeExpr = static_cast(opnd); + auto &scalarMeExpr = static_cast(opnd); if (scalarMeExpr.GetDefBy() == kDefByPhi) { + std::set visitedPhi; + if (PhiOpndIsDefPointOfOtherPhi(scalarMeExpr, visitedPhi)) { + return kInvalidDestSize; + } auto &phi = scalarMeExpr.GetDefPhi(); size_t size = getMaxSizeOfObjs ? 0 : std::numeric_limits::max(); for (size_t i = 0; i < phi.GetOpnds().size(); ++i) { diff --git a/src/mapleall/maple_me/src/me_option.cpp b/src/mapleall/maple_me/src/me_option.cpp index 00fe8cd116cf61d90dedf257fb134884f1e8c630..f4679951f071239db5868e6c7d64b1c2f82e8cb9 100644 --- a/src/mapleall/maple_me/src/me_option.cpp +++ b/src/mapleall/maple_me/src/me_option.cpp @@ -130,6 +130,7 @@ bool MeOption::seqVec = true; bool MeOption::enableLFO = true; uint8 MeOption::rematLevel = 2; bool MeOption::layoutWithPredict = true; // optimize output layout using branch prediction +bool MeOption::layoutColdPath = false; // layout cold blocks (such as unlikely) out of hot path SafetyCheckMode MeOption::npeCheckMode = SafetyCheckMode::kNoCheck; bool MeOption::isNpeCheckAll = false; SafetyCheckMode MeOption::boundaryCheckMode = SafetyCheckMode::kNoCheck; @@ -310,7 +311,7 @@ bool MeOption::SolveOptions(bool isDebug) { maplecl::CopyIfEnabled(warnNativeFunc, opts::me::warnemptynative); maplecl::CopyIfEnabled(epreLimit, opts::me::eprelimit); maplecl::CopyIfEnabled(eprePULimit, opts::me::eprepulimit); - maplecl::CopyIfEnabled(epreUseProfileLimit, opts::me::epreuseprofilelimit); + maplecl::CopyIfEnabled(epreUseProfileLimit, opts::me::epreUseProfileLimit); maplecl::CopyIfEnabled(stmtprePULimit, opts::me::stmtprepulimit); maplecl::CopyIfEnabled(lpreLimit, opts::me::lprelimit); maplecl::CopyIfEnabled(lprePULimit, opts::me::lprepulimit); @@ -425,6 +426,7 @@ bool MeOption::SolveOptions(bool isDebug) { maplecl::CopyIfEnabled(enableLFO, opts::me::lfo); maplecl::CopyIfEnabled(rematLevel, opts::me::remat); maplecl::CopyIfEnabled(layoutWithPredict, opts::me::layoutwithpredict); + maplecl::CopyIfEnabled(layoutColdPath, opts::me::layoutColdPath); maplecl::CopyIfEnabled(vecLoopLimit, opts::me::veclooplimit); maplecl::CopyIfEnabled(ivoptsLimit, opts::me::ivoptslimit); maplecl::CopyIfEnabled(unifyRets, opts::me::unifyrets); diff --git a/src/mapleall/maple_me/src/me_options.cpp b/src/mapleall/maple_me/src/me_options.cpp index d932fb32d735f3e02126a5780b554df7ee7356df..3c673a83723f2fb43a427042d57989cfea3b7beb 100644 --- a/src/mapleall/maple_me/src/me_options.cpp +++ b/src/mapleall/maple_me/src/me_options.cpp @@ -12,12 +12,8 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ - -#include "driver_options.h" - -#include -#include #include +#include "driver_options.h" namespace opts::me { @@ -52,7 +48,8 @@ maplecl::Option range({"--range"}, {meCategory}); maplecl::Option pgoRange({"--pgorange"}, - " --pglrange \tUse profile-guided optimizations only for funcid in the range [NUM0, NUM1]\n" + " --pglrange \tUse profile-guided optimizations only for funcid " + "in the range [NUM0, NUM1]\n" " \t--pgorange=NUM0,NUM1\n", {meCategory}); @@ -214,8 +211,9 @@ maplecl::Option eprepulimit({"--eprepulimit"}, " \t--eprepulimit=NUM\n", {meCategory}); -maplecl::Option epreuseprofilelimit({"--epreuseprofilelimit"}, - " --epreuseprofilelimit \tMake EPRE take advantage of profile data only for the first NUM expressions\n" +maplecl::Option epreUseProfileLimit({"--epreuseprofilelimit"}, + " --epreuseprofilelimit \tMake EPRE take advantage of profile data only " + "for the first NUM expressions\n" " \t--epreuseprofilelimit=NUM\n", {meCategory}); @@ -651,6 +649,14 @@ maplecl::Option layoutwithpredict({"--layoutwithpredict"}, {meCategory}, maplecl::DisableWith("--no-layoutwithpredict")); +maplecl::Option layoutColdPath({"--layout-cold-path"}, + " --layout-cold-path" + " \tEnable layouting cold blocks (such as unlikely) out of hot path\n" + " --no-layout-cold-path" + " \tDisable layouting cold blocks (such as unlikely) out of hot path\n", + {meCategory}, + maplecl::DisableWith("--no-layout-cold-path")); + maplecl::Option veclooplimit({"--veclooplimit"}, " --veclooplimit \tApply vectorize loops only up to NUM \n" " \t--veclooplimit=NUM\n", diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index d349a58591da6977e78ba3e2d3f8e01571de2306..f7c7975d7b0a7e305474e3895ac9f464b9683a29 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -234,6 +234,7 @@ MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(METopLevelSSA, toplevelssa) MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(MEBBLayout, bblayout) MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(MEIRMapBuild, irmapbuild) MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(MEPredict, predict) +MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(METailcall, tailcall) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEAnalyzeRC, analyzerc) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEStorePre, storepre) @@ -251,6 +252,7 @@ MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MECondBasedNPC, condbasednpc) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEPregRename, pregrename) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEStmtPre, stmtpre) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEGVN, gvn) +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MESRA, sra) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MECfgOpt, cfgopt) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEAutoVectorization, autovec) MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MELfoUnroll, lfounroll) diff --git a/src/mapleall/maple_me/src/me_placement_rc.cpp b/src/mapleall/maple_me/src/me_placement_rc.cpp index f1f8423b735b6e9525f16cc58e7e6a10bc335a3c..801a551e8852ecdf13492fee6433253c0910b96e 100644 --- a/src/mapleall/maple_me/src/me_placement_rc.cpp +++ b/src/mapleall/maple_me/src/me_placement_rc.cpp @@ -16,7 +16,7 @@ #include "me_irmap_build.h" namespace { -const std::set whiteListFunc { +const std::set kWhiteListFunc { #include "rcwhitelist.def" }; } @@ -121,7 +121,7 @@ void PlacementRC::HandleThrowOperand(SRealOcc &realOcc, ThrowMeStmt &thwStmt) { } } -void PlacementRC::AddCleanupArg() { +void PlacementRC::AddCleanupArg() const { auto cfg = func->GetCfg(); for (BB *bb : cfg->GetCommonExitBB()->GetPred()) { auto &meStmts = bb->GetMeStmts(); @@ -890,7 +890,7 @@ void MEPlacementRC::GetAnalysisDependence(maple::AnalysisDep &aDep) const { bool MEPlacementRC::PhaseRun(maple::MeFunction &f) { std::string funcName = f.GetName(); - if (whiteListFunc.find(funcName) != whiteListFunc.end() || f.GetMirFunc()->GetAttr(FUNCATTR_rclocalunowned)) { + if (kWhiteListFunc.find(funcName) != kWhiteListFunc.end() || f.GetMirFunc()->GetAttr(FUNCATTR_rclocalunowned)) { return false; } auto *cfg = GET_ANALYSIS(MEMeCfg, f); diff --git a/src/mapleall/maple_me/src/me_predict.cpp b/src/mapleall/maple_me/src/me_predict.cpp index 783782818d88dda5d58f15bc5dde11cee61aa97c..14e847a61e3b5e60c44a7b63b83e374fd31ce8fd 100644 --- a/src/mapleall/maple_me/src/me_predict.cpp +++ b/src/mapleall/maple_me/src/me_predict.cpp @@ -286,7 +286,7 @@ bool MePrediction::PredictedByLoopHeuristic(const BB &bb) const { } // Sort loops first so that hanle innermost loop first in PropFreqInLoops. -void MePrediction::SortLoops() { +void MePrediction::SortLoops() const { const auto &bbId2rpoId = dom->GetReversePostOrderId(); std::stable_sort(meLoop->GetMeLoops().begin(), meLoop->GetMeLoops().end(), [&bbId2rpoId](const LoopDesc *loop1, const LoopDesc *loop2) { @@ -590,11 +590,11 @@ void MePrediction::CombinePredForBB(const BB &bb) { } // If we have only one successor which is unknown, we can compute missing probablity. if (nunknown == 1) { - int32 prob = kProbAlways; + uint32 prob = static_cast(kProbAlways); Edge *missing = nullptr; for (Edge *edge = edges[bb.GetBBId()]; edge != nullptr; edge = edge->next) { if (edge->probability > 0) { - prob -= static_cast(edge->probability); + prob -= edge->probability; } else if (missing == nullptr) { missing = edge; } else { @@ -602,7 +602,7 @@ void MePrediction::CombinePredForBB(const BB &bb) { } } CHECK_FATAL(missing != nullptr, "null ptr check"); - missing->probability = static_cast(prob); + missing->probability = prob; return; } EdgePrediction *preds = bbPredictions[bb.GetBBId()]; diff --git a/src/mapleall/maple_me/src/me_profile_gen.cpp b/src/mapleall/maple_me/src/me_profile_gen.cpp index e3d5184983059ae88efd37754dcc4b39a96e42cb..cf792ed7b8d944aafa6d3ff7a400de40a2d267b5 100644 --- a/src/mapleall/maple_me/src/me_profile_gen.cpp +++ b/src/mapleall/maple_me/src/me_profile_gen.cpp @@ -86,7 +86,7 @@ void MeProfGen::InstrumentBB(BB &bb) { } } -void MeProfGen::SaveProfile() { +void MeProfGen::SaveProfile() const { if (!Options::profileTest) { return; } diff --git a/src/mapleall/maple_me/src/me_profile_use.cpp b/src/mapleall/maple_me/src/me_profile_use.cpp index b4915dbcb72cf8fedf50e98072cd7f05d14c9046..55cba37a2f73d5296258c30bdb3af3cf39413955 100644 --- a/src/mapleall/maple_me/src/me_profile_use.cpp +++ b/src/mapleall/maple_me/src/me_profile_use.cpp @@ -79,7 +79,7 @@ void MeProfUse::InitBBEdgeInfo() { } // If all input edges or output edges determined, caculate BB freq -void MeProfUse::ComputeBBFreq(BBUseInfo &bbInfo, bool &changed) { +void MeProfUse::ComputeBBFreq(BBUseInfo &bbInfo, bool &changed) const { FreqType count = 0; if (!bbInfo.GetStatus()) { if (bbInfo.GetUnknownOutEdges() == 0) { @@ -149,7 +149,7 @@ void MeProfUse::ComputeEdgeFreq() { * this used to set the edge count for the unknown edge * ensure only one unkown edge in the edges */ -void MeProfUse::SetEdgeCount(MapleVector &edges, FreqType value) { +void MeProfUse::SetEdgeCount(MapleVector &edges, FreqType value) const { for (const auto &e : edges) { if (!e->GetStatus()) { e->SetCount(value); @@ -163,7 +163,7 @@ void MeProfUse::SetEdgeCount(MapleVector &edges, FreqType value) { CHECK(false, "can't find unkown edge"); } -void MeProfUse::SetEdgeCount(BBUseEdge &edge, FreqType value) { +void MeProfUse::SetEdgeCount(BBUseEdge &edge, FreqType value) const { // edge counter already valid skip if (edge.GetStatus()) { return; @@ -296,7 +296,7 @@ bool MeProfUse::Run() { return true; } -FuncProfInfo *MeProfUse::GetFuncData() { +FuncProfInfo *MeProfUse::GetFuncData() const { MplProfileData *profData = func->GetMIRModule().GetMapleProfile(); if (!profData) { return nullptr; @@ -305,7 +305,7 @@ FuncProfInfo *MeProfUse::GetFuncData() { return funcData; } -void MeProfUse::CheckSumFail(const uint64 hash, const uint32 expectedCheckSum, const std::string &tag) { +void MeProfUse::CheckSumFail(const uint64 hash, const uint32 expectedCheckSum, const std::string &tag) const { uint32 curCheckSum = static_cast((hash >> 32) ^ (hash & 0xffffffff)); CHECK_FATAL(curCheckSum == expectedCheckSum, "%s() %s checksum %u doesn't match the expected %u; aborting\n", func->GetName().c_str(), tag.c_str(), curCheckSum, expectedCheckSum); diff --git a/src/mapleall/maple_me/src/me_prop.cpp b/src/mapleall/maple_me/src/me_prop.cpp index be42905946cd62e6c847614d07c87d4cacda4cca..2ebd8e0faed854046927e1a839faa32f3612c5da 100644 --- a/src/mapleall/maple_me/src/me_prop.cpp +++ b/src/mapleall/maple_me/src/me_prop.cpp @@ -25,7 +25,7 @@ // encounters a variable reference, it uses its SSA representation to look up // its assigned value and try to do the substitution. namespace { -const std::set propWhiteList { +const std::set kPropWhiteList { #define PROPILOAD(funcname) #funcname, #include "propiloadlist.def" #undef PROPILOAD @@ -59,7 +59,7 @@ bool MEMeProp::PhaseRun(maple::MeFunction &f) { MIRSymbol *fnSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(f.GetMirFunc()->GetStIdx().Idx()); CHECK_FATAL(fnSt, "fnSt is nullptr"); const std::string &funcName = fnSt->GetName(); - propIloadRef = propWhiteList.find(funcName) != propWhiteList.end(); + propIloadRef = kPropWhiteList.find(funcName) != kPropWhiteList.end(); if (DEBUGFUNC_NEWPM(f)) { if (propIloadRef) { LogInfo::MapleLogger() << "propiloadref enabled because function is in white list"; diff --git a/src/mapleall/maple_me/src/me_rc_lowering.cpp b/src/mapleall/maple_me/src/me_rc_lowering.cpp index bd83f21825036227da5e714e3163f7a6ba7a0468..288ae948e6b4193bc682c6134b0f45c9ee935eef 100644 --- a/src/mapleall/maple_me/src/me_rc_lowering.cpp +++ b/src/mapleall/maple_me/src/me_rc_lowering.cpp @@ -22,7 +22,7 @@ // based on previous analyze results. RC intrinsic will later be lowered // in Code Generation namespace { -const std::set whiteListFunc { +const std::set kWhiteListFunc { #include "rcwhitelist.def" }; } @@ -252,7 +252,7 @@ IntrinsiccallMeStmt *RCLowering::CreateRCIntrinsic(MIRIntrinsicID intrnID, const return intrn; } -MIRIntrinsicID RCLowering::PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId) { +MIRIntrinsicID RCLowering::PrepareVolatileCall(const MeStmt &stmt, MIRIntrinsicID intrnId) const { bool isLoad = (intrnId == INTRN_MCCLoadRefSVol || intrnId == INTRN_MCCLoadWeakVol || intrnId == INTRN_MCCLoadRefVol); if (isLoad) { CheckRemove(stmt.GetNext(), OP_membaracquire); @@ -414,7 +414,7 @@ void RCLowering::HandleRetOfCallAssignedMeStmt(MeStmt &stmt, MeExpr &pendingDec) } } -bool RCLowering::RCFirst(MeExpr &rhs) { +bool RCLowering::RCFirst(MeExpr &rhs) const { // null, local var/reg read if (rhs.GetMeOp() == kMeOpConst) { return static_cast(rhs).IsZero(); @@ -489,7 +489,7 @@ void RCLowering::HandleAssignMeStmtVarLHS(MeStmt &stmt, MeExpr *pendingDec) { assignedPtrSym.insert(lsym); } -MIRType *RCLowering::GetArrayNodeType(const VarMeExpr &var) { +MIRType *RCLowering::GetArrayNodeType(const VarMeExpr &var) const { const MIRSymbol *arrayElemSym = var.GetOst()->GetMIRSymbol(); MIRType *baseType = arrayElemSym->GetType(); MIRType *arrayElemType = nullptr; @@ -516,7 +516,7 @@ void RCLowering::CheckArrayStore(IntrinsiccallMeStmt &writeRefCall) { return; } MIRIntrinsicID intrnID = writeRefCall.GetIntrinsic(); - if (!((INTRN_MCCWriteVolNoInc <= intrnID) && (INTRN_MCCWrite >= intrnID))) { + if (!((intrnID >= INTRN_MCCWriteVolNoInc) && (intrnID <= INTRN_MCCWrite))) { return; } if (writeRefCall.GetOpnd(1)->GetOp() != OP_iaddrof) { @@ -891,7 +891,7 @@ IntrinsiccallMeStmt *FindCleanupIntrinsic(BB &bb) { // as a result, LoadRefField was not generated. This function fixes up the issue // by checking rhs to see if it is regread and then try to // propagate incref from current stmt back to regassign from iread which had no incref -void RCLowering::EpreFixup(BB &bb) { +void RCLowering::EpreFixup(BB &bb) const { for (auto &stmt : bb.GetMeStmts()) { // remove decref as mpl2mpl will insert based on ref assign if (!stmt.NeedIncref()) { @@ -937,7 +937,7 @@ void RCLowering::HandleReturnVar(RetMeStmt &ret) { // must be regreadAtReturn // checking localrefvar because some objects are meta HandleReturnRegread(ret); - } else if (!((func.GetHints() & kPlacementRCed) && sym != nullptr && sym->GetStorageClass() == kScFormal && + } else if (!((func.GetHints() & kPlacementRCed) != 0 && sym != nullptr && sym->GetStorageClass() == kScFormal && assignedPtrSym.count(sym) > 0)) { // if returning formal, incref unless placementRC is used and formal is NOT reassigned HandleReturnFormal(ret); @@ -1122,7 +1122,7 @@ void RCLowering::HandleReturnStmt() { void RCLowering::HandleArguments() { // placementRC would have already addressed formals - if (func.GetHints() & kPlacementRCed) { + if ((func.GetHints() & kPlacementRCed) != 0) { return; } // handle arguments, if the formal gets modified @@ -1466,7 +1466,7 @@ void RCLowering::FastLowerThrowStmt(MeStmt &stmt, MapleMap &exc if (throwVal->GetMeOp() == kMeOpVar) { auto *var = static_cast(throwVal); auto iter = exceptionAllocsites.find(var->GetVstIdx()); - if (iter != exceptionAllocsites.cend()) { + if (iter != exceptionAllocsites.end()) { exceptionAllocsites.erase(iter); } } @@ -1656,7 +1656,7 @@ bool MERCLowering::PhaseRun(maple::MeFunction &f) { } MIRFunction *mirFunction = f.GetMirFunc(); MeCFG *cfg = f.GetCfg(); - if (whiteListFunc.find(mirFunction->GetName()) != whiteListFunc.end() || + if (kWhiteListFunc.find(mirFunction->GetName()) != kWhiteListFunc.end() || mirFunction->GetAttr(FUNCATTR_rclocalunowned)) { auto eIt = cfg->valid_end(); for (auto bIt = cfg->valid_begin(); bIt != eIt; ++bIt) { diff --git a/src/mapleall/maple_me/src/me_rename2preg.cpp b/src/mapleall/maple_me/src/me_rename2preg.cpp index 9c6ca06b5192ac811d30617edb9ad6b831ab2d25..6210496d08d6fc7fa3d431c7a46ceb576974224b 100644 --- a/src/mapleall/maple_me/src/me_rename2preg.cpp +++ b/src/mapleall/maple_me/src/me_rename2preg.cpp @@ -83,22 +83,22 @@ bool SSARename2Preg::VarMeExprIsRenameCandidate(const VarMeExpr &varMeExpr) cons return true; } -RegMeExpr *SSARename2Preg::CreatePregForVar(const VarMeExpr *varMeExpr) { - auto primType = varMeExpr->GetPrimType(); +RegMeExpr *SSARename2Preg::CreatePregForVar(const VarMeExpr &varMeExpr) { + auto primType = varMeExpr.GetPrimType(); RegMeExpr *curtemp = nullptr; if (primType != PTY_ref) { curtemp = meirmap->CreateRegMeExpr(primType); } else { - curtemp = meirmap->CreateRegMeExpr(*varMeExpr->GetType()); + curtemp = meirmap->CreateRegMeExpr(*varMeExpr.GetType()); } OriginalSt *pregOst = curtemp->GetOst(); - if (varMeExpr->IsZeroVersion()) { + if (varMeExpr.IsZeroVersion()) { pregOst->SetZeroVersionIndex(curtemp->GetVstIdx()); } - const OriginalSt *ost = varMeExpr->GetOst(); + const OriginalSt *ost = varMeExpr.GetOst(); pregOst->SetIsFormal(ost->IsFormal()); sym2reg_map[ost->GetIndex()] = pregOst; - (void)vstidx2reg_map.emplace(std::make_pair(varMeExpr->GetExprID(), curtemp)); + (void)vstidx2reg_map.emplace(std::make_pair(varMeExpr.GetExprID(), curtemp)); // set fields in MIRPreg to support rematerialization MIRPreg *preg = pregOst->GetMIRPreg(); preg->SetOp(OP_dread); @@ -152,10 +152,10 @@ RegMeExpr *SSARename2Preg::RenameVar(const VarMeExpr *varMeExpr) { auto *ost = varMeExpr->GetOst(); CHECK_FATAL(ost != nullptr, "null ptr check"); - auto varOst2RegOstIt = sym2reg_map.find(ost->GetIndex()); + auto varOst2RegOstIt = std::as_const(sym2reg_map).find(ost->GetIndex()); RegMeExpr *regForVarMeExpr = nullptr; - if (varOst2RegOstIt == sym2reg_map.end()) { - regForVarMeExpr = CreatePregForVar(varMeExpr); + if (varOst2RegOstIt == sym2reg_map.cend()) { + regForVarMeExpr = CreatePregForVar(*varMeExpr); } else { OriginalSt *pregOst = varOst2RegOstIt->second; CHECK_FATAL(pregOst != nullptr, "null ptr check"); @@ -201,31 +201,31 @@ RegMeExpr *SSARename2Preg::FindOrCreatePregForVarPhiOpnd(const VarMeExpr *varMeE } // update regphinode operands -void SSARename2Preg::UpdateRegPhi(MePhiNode *mevarphinode, MePhiNode *regphinode, +void SSARename2Preg::UpdateRegPhi(MePhiNode &mevarphinode, MePhiNode ®phinode, const VarMeExpr *lhs) { // update phi's opnds - for (uint32 i = 0; i < mevarphinode->GetOpnds().size(); i++) { - auto *opndexpr = mevarphinode->GetOpnds()[i]; + for (uint32 i = 0; i < mevarphinode.GetOpnds().size(); i++) { + auto *opndexpr = mevarphinode.GetOpnds()[i]; ASSERT(opndexpr->GetOst()->GetIndex() == lhs->GetOst()->GetIndex(), "phi is not correct"); CHECK_FATAL(opndexpr->GetMeOp() == kMeOpVar, "opnd of Var-PhiNode must be VarMeExpr"); RegMeExpr *opndtemp = FindOrCreatePregForVarPhiOpnd(static_cast(opndexpr)); - regphinode->GetOpnds().push_back(opndtemp); + regphinode.GetOpnds().push_back(opndtemp); } (void)lhs; } -bool SSARename2Preg::Rename2PregPhi(MePhiNode *mevarphinode, MapleMap ®PhiList) { - VarMeExpr *lhs = static_cast(mevarphinode->GetLHS()); +bool SSARename2Preg::Rename2PregPhi(MePhiNode &mevarphinode, MapleMap ®PhiList) { + VarMeExpr *lhs = static_cast(mevarphinode.GetLHS()); SetupParmUsed(lhs); RegMeExpr *lhsreg = RenameVar(lhs); if (lhsreg == nullptr) { return false; } MePhiNode *regphinode = meirmap->CreateMePhi(*lhsreg); - regphinode->SetDefBB(mevarphinode->GetDefBB()); - UpdateRegPhi(mevarphinode, regphinode, lhs); - regphinode->SetIsLive(mevarphinode->GetIsLive()); - mevarphinode->SetIsLive(false); + regphinode->SetDefBB(mevarphinode.GetDefBB()); + UpdateRegPhi(mevarphinode, *regphinode, lhs); + regphinode->SetIsLive(mevarphinode.GetIsLive()); + mevarphinode.SetIsLive(false); (void) regPhiList.insert(std::make_pair(lhsreg->GetOst()->GetIndex(), regphinode)); return true; @@ -237,11 +237,13 @@ void SSARename2Preg::Rename2PregLeafRHS(MeStmt *mestmt, const VarMeExpr *varmeex if (varreg != nullptr) { if (varreg->GetPrimType() != varmeexpr->GetPrimType()) { varreg = meirmap->CreateMeExprTypeCvt(varmeexpr->GetPrimType(), varreg->GetPrimType(), *varreg); - } else if (static_cast(varreg)->IsZeroVersion() && GetPrimTypeSize(varreg->GetPrimType()) < 4) { + } else if (static_cast(varreg)->IsZeroVersion() && + GetPrimTypeSize(varreg->GetPrimType()) < k4BitSize) { // if reading garbage, need to truncate the garbage value Opcode extOp = IsSignedInteger(varreg->GetPrimType()) ? OP_sext : OP_zext; varreg = meirmap->CreateMeExprUnary(extOp, GetRegPrimType(varreg->GetPrimType()), *varreg); - static_cast(varreg)->SetBitsSize(static_cast(GetPrimTypeSize(varmeexpr->GetPrimType()) * 8)); + static_cast(varreg)->SetBitsSize( + static_cast(GetPrimTypeSize(varmeexpr->GetPrimType()) * k8BitSize)); } (void)meirmap->ReplaceMeExprStmt(*mestmt, *varmeexpr, *varreg); } @@ -493,7 +495,7 @@ void SSARename2Preg::RunSelf() { ++phiListIt; continue; } - if (!Rename2PregPhi(phiListIt->second, regPhiList)) { + if (!Rename2PregPhi(*(phiListIt->second), regPhiList)) { ++phiListIt; continue; } diff --git a/src/mapleall/maple_me/src/me_safety_warning.cpp b/src/mapleall/maple_me/src/me_safety_warning.cpp index f79fab14f2e8ad4d12e838be2ddd6475c7c717a6..fc5e64b4e49f0541f21ba18eb87fef80d5cc129b 100644 --- a/src/mapleall/maple_me/src/me_safety_warning.cpp +++ b/src/mapleall/maple_me/src/me_safety_warning.cpp @@ -267,12 +267,10 @@ bool MESafetyWarning::IsStaticModeForOp(Opcode op) const { return MeOption::boundaryCheckMode == SafetyCheckMode::kStaticCheck; default: CHECK_FATAL(false, "NEVER REACH"); - break; } - return false; } -SafetyWarningHandler *MESafetyWarning::FindHandler(Opcode op) { +SafetyWarningHandler *MESafetyWarning::FindHandler(Opcode op) const { auto handler = realNpeHandleMap->find(op); if (handler != realNpeHandleMap->end()) { return &handler->second; diff --git a/src/mapleall/maple_me/src/me_scalar_analysis.cpp b/src/mapleall/maple_me/src/me_scalar_analysis.cpp index 34cc4004621b6f7ca98e8c22cba1fc810bc5fd8d..25b69ec40ccbf1706fb78c6b65f6fbf3f2125ebe 100644 --- a/src/mapleall/maple_me/src/me_scalar_analysis.cpp +++ b/src/mapleall/maple_me/src/me_scalar_analysis.cpp @@ -83,7 +83,7 @@ static bool IsConstantMultipliedByVariable(const CRMulNode &crMulNode) { } // Get the byte size of array for simpify the crNodes. -uint8 LoopScalarAnalysisResult::GetByteSize(std::vector &crNodeVector) { +uint8 LoopScalarAnalysisResult::GetByteSize(std::vector &crNodeVector) const { for (size_t i = 0; i < crNodeVector.size(); ++i) { if (crNodeVector[i]->GetCRType() != kCRMulNode) { continue; @@ -98,7 +98,7 @@ uint8 LoopScalarAnalysisResult::GetByteSize(std::vector &crNodeVector) } // Get the prim type of index or bound, if not found, return PTY_i64. -PrimType LoopScalarAnalysisResult::GetPrimType(std::vector &crNodeVector) { +PrimType LoopScalarAnalysisResult::GetPrimType(std::vector &crNodeVector) const { for (size_t i = 0; i < crNodeVector.size(); ++i) { if (crNodeVector[i]->GetCRType() == kCRVarNode) { return crNodeVector[i]->GetExpr()->GetPrimType(); @@ -124,7 +124,7 @@ bool LoopScalarAnalysisResult::NormalizationWithByteCount(std::vector & if (value % static_cast(byteSize) != 0) { return false; } - value = value / byteSize; + value = value / static_cast(byteSize); crNodeVector[i] = GetOrCreateCRConstNode(nullptr, value); continue; } @@ -134,10 +134,10 @@ bool LoopScalarAnalysisResult::NormalizationWithByteCount(std::vector & return false; } auto value = static_cast(crMulNode->GetOpnd(0))->GetConstValue(); - if (value % byteSize != 0) { + if (value % static_cast(byteSize) != 0) { return false; } - value = value / byteSize; + value = value / static_cast(byteSize); if (value == 1) { crNodeVector[i] = crMulNode->GetOpnd(1); } else { @@ -395,8 +395,8 @@ CRNode *LoopScalarAnalysisResult::GetOrCreateCRConstNode(MeExpr *expr, int64 val allCRNodes.insert(std::move(constNode)); return constPtr; } - auto it = expr2CR.find(expr); - if (it != expr2CR.end()) { + const auto it = std::as_const(expr2CR).find(expr); + if (it != expr2CR.cend()) { return it->second; } @@ -408,8 +408,8 @@ CRNode *LoopScalarAnalysisResult::GetOrCreateCRConstNode(MeExpr *expr, int64 val } CRNode *LoopScalarAnalysisResult::GetOrCreateCRVarNode(MeExpr &expr) { - auto it = expr2CR.find(&expr); - if (it != expr2CR.end()) { + const auto it = std::as_const(expr2CR).find(&expr); + if (it != expr2CR.cend()) { return it->second; } @@ -447,8 +447,8 @@ CRNode* LoopScalarAnalysisResult::GetOrCreateCRAddNode(MeExpr *expr, const std:: crPtr->SetOpnds(crAddNodes); return crPtr; } - auto it = expr2CR.find(expr); - if (it != expr2CR.end()) { + const auto it = std::as_const(expr2CR).find(expr); + if (it != expr2CR.cend()) { return it->second; } std::unique_ptr crAdd = std::make_unique(expr); @@ -478,8 +478,8 @@ CRNode *LoopScalarAnalysisResult::GetOrCreateCR(MeExpr &expr, CRNode &start, CRN } CRNode *LoopScalarAnalysisResult::GetOrCreateCR(MeExpr *expr, const std::vector &crNodes) { - auto it = expr2CR.find(expr); - if (it != expr2CR.end()) { + const auto it = std::as_const(expr2CR).find(expr); + if (it != expr2CR.cend()) { return it->second; } std::unique_ptr cr = std::make_unique(expr); @@ -630,8 +630,8 @@ CRNode *LoopScalarAnalysisResult::GetCRMulNode(MeExpr *expr, std::vectorGetCRType() == kCRMulNode) { (void)crMulOpnds.insert(crMulOpnds.cend(), - static_cast(crMulOpnds[mulCRIndex])->GetOpnds().cbegin(), - static_cast(crMulOpnds[mulCRIndex])->GetOpnds().cend()); + static_cast(crMulOpnds[mulCRIndex])->GetOpnds().cbegin(), + static_cast(crMulOpnds[mulCRIndex])->GetOpnds().cend()); (void)crMulOpnds.erase(crMulOpnds.cbegin() + static_cast(mulCRIndex)); } return GetCRMulNode(expr, crMulOpnds); @@ -689,9 +689,9 @@ CRNode *LoopScalarAnalysisResult::GetCRMulNode(MeExpr *expr, std::vector(divCRIndex)); (void)crMulOpnds.erase(crMulOpnds.cbegin()); (void)crMulOpnds.insert(crMulOpnds.cend(), - GetOrCreateCRDivNode(nullptr, - *divCRNode->GetLHS(), - *GetOrCreateCRConstNode(nullptr, res))); + GetOrCreateCRDivNode(nullptr, + *divCRNode->GetLHS(), + *GetOrCreateCRConstNode(nullptr, res))); if (crMulOpnds.size() == 1) { return crMulOpnds[0]; } @@ -781,7 +781,6 @@ CRNode *LoopScalarAnalysisResult::ChangeNegative2MulCRNode(CRNode &crNode) { return GetCRMulNode(nullptr, crMulNodes); } - CR *LoopScalarAnalysisResult::AddCRWithCR(CR &lhsCR, CR &rhsCR) { std::unique_ptr cr = std::make_unique(nullptr); CR *crPtr = cr.get(); @@ -965,7 +964,7 @@ CRNode *LoopScalarAnalysisResult::CreateSimpleCRForPhi(const MePhiNode &phiNode, return GetOrCreateCR(*phiNode.GetLHS(), *start, *stride); } -CRNode *LoopScalarAnalysisResult::CreateCRForPhi(MePhiNode &phiNode) { +CRNode *LoopScalarAnalysisResult::CreateCRForPhi(const MePhiNode &phiNode) { if (loop == nullptr) { return nullptr; } @@ -1231,8 +1230,12 @@ uint64 LoopScalarAnalysisResult::ComputeTripCountWithSimpleConstCR(Opcode op, bo uint64 remainder = static_cast((value - start) % stride); switch (op) { case OP_ge: { - if (isSigned && start < value) { return 0; } - if (!isSigned && static_cast(start) < static_cast(value)) { return 0; } + if (isSigned && start < value) { + return 0; + } + if (!isSigned && static_cast(start) < static_cast(value)) { + return 0; + } // consider if there's overflow if (stride > 0) { if (isSigned || // undefined overflow @@ -1246,8 +1249,12 @@ uint64 LoopScalarAnalysisResult::ComputeTripCountWithSimpleConstCR(Opcode op, bo return static_cast(times + 1); } case OP_gt: { - if (isSigned && start <= value) { return 0; } - if (!isSigned && static_cast(start) <= static_cast(value)) { return 0; } + if (isSigned && start <= value) { + return 0; + } + if (!isSigned && static_cast(start) <= static_cast(value)) { + return 0; + } // consider if there's overflow if (stride > 0) { if (isSigned || // undefined overflow @@ -1260,8 +1267,12 @@ uint64 LoopScalarAnalysisResult::ComputeTripCountWithSimpleConstCR(Opcode op, bo return static_cast(times + static_cast(remainder != 0)); } case OP_le: { - if (isSigned && start > value) { return 0; } - if (!isSigned && static_cast(start) > static_cast(value)) { return 0; } + if (isSigned && start > value) { + return 0; + } + if (!isSigned && static_cast(start) > static_cast(value)) { + return 0; + } // consider if there's underflow if (stride < 0) { if (isSigned || // undefined underflow @@ -1275,8 +1286,12 @@ uint64 LoopScalarAnalysisResult::ComputeTripCountWithSimpleConstCR(Opcode op, bo return static_cast(times + 1); } case OP_lt: { - if (isSigned && start >= value) { return 0; } - if (!isSigned && static_cast(start) >= static_cast(value)) { return 0; } + if (isSigned && start >= value) { + return 0; + } + if (!isSigned && static_cast(start) >= static_cast(value)) { + return 0; + } // consider if there's underflow if (stride < 0) { if (isSigned || // undefined underflow @@ -1286,21 +1301,31 @@ uint64 LoopScalarAnalysisResult::ComputeTripCountWithSimpleConstCR(Opcode op, bo // change to "i >= 0" to compute return ComputeTripCountWithSimpleConstCR(OP_ge, false, 0, start, stride); } - return static_cast(times + (remainder != 0)); + return static_cast(times + static_cast(remainder != 0)); } case OP_eq: { - if (start != value) { return 0; } + if (start != value) { + return 0; + } return 1; } case OP_ne: { - if (start == value) { return 0; } - if (remainder != 0) { return kInvalidTripCount; } // infinite loop + if (start == value) { + return 0; + } + if (remainder != 0) { + return kInvalidTripCount; + } // infinite loop if (stride < 0) { // consider if there's underflow - if (isSigned && start < value) { return kInvalidTripCount; } // undefined underflow + if (isSigned && start < value) { + return kInvalidTripCount; + } // undefined underflow } else { // consider if there's overflow - if (isSigned && start > value) { return kInvalidTripCount; } // undefined overflow + if (isSigned && start > value) { + return kInvalidTripCount; + } // undefined overflow } return static_cast(times); } @@ -1316,7 +1341,7 @@ void LoopScalarAnalysisResult::SortOperatorCRNode(std::vector &crNodeOp } void LoopScalarAnalysisResult::PutTheAddrExprAtTheFirstOfVector( - std::vector &crNodeOperands, const MeExpr &addrExpr) { + std::vector &crNodeOperands, const MeExpr &addrExpr) const { if (crNodeOperands.size() >= 1 && crNodeOperands[0]->GetExpr() == &addrExpr) { return; } diff --git a/src/mapleall/maple_me/src/me_side_effect.cpp b/src/mapleall/maple_me/src/me_side_effect.cpp index 7e579af30d2a7854296a7cdfab03f4e694fb66a4..8c14024e6bff6a8fc3863af066998158f80dd383 100644 --- a/src/mapleall/maple_me/src/me_side_effect.cpp +++ b/src/mapleall/maple_me/src/me_side_effect.cpp @@ -74,7 +74,7 @@ bool IpaSideEffect::IsIgnoreMethod(const MIRFunction &func) { if (func.IsAbstract()) { return true; } - Klass *klass = callGraph.GetKlassh()->GetKlassFromFunc(&func); + Klass *klass = callGraph.GetKlassh().GetKlassFromFunc(&func); if (klass == nullptr) { // An array, must have method, but has all effects SetEffectsTrue(); @@ -84,7 +84,7 @@ bool IpaSideEffect::IsIgnoreMethod(const MIRFunction &func) { return std::find(methods.begin(), methods.end(), &func) == methods.end(); } -void IpaSideEffect::CopySccSideEffectToAllFunctions(SCCNode &scc, uint8 seMask) { +void IpaSideEffect::CopySccSideEffectToAllFunctions(SCCNode &scc, uint8 seMask) const { // For all members of the SCC, copy the sum of the side effect of SCC to each member func. for (auto &sccIt : scc.GetNodes()) { CGNode *cgNode = sccIt; @@ -97,16 +97,16 @@ void IpaSideEffect::CopySccSideEffectToAllFunctions(SCCNode &scc, uint8 CHECK_FATAL(func->IsNoDefEffect() || (seMask & kHasDef) == kHasDef, "Must be true."); CHECK_FATAL(func->IsNoThrowException() || (seMask & kHasThrow) == kHasThrow, "Must be true."); CHECK_FATAL(func->IsNoPrivateDefEffect() || (seMask & kHasPrivateDef) == kHasPrivateDef, "Must be true."); - if (seMask & kNotPure) { + if ((seMask & kNotPure) != 0) { func->UnsetPure(); } - if (seMask & kHasDef) { + if ((seMask & kHasDef) != 0) { func->UnsetNoDefEffect(); } - if (seMask & kHasThrow) { + if ((seMask & kHasThrow) != 0) { func->UnsetNoThrowException(); } - if (seMask & kHasPrivateDef) { + if ((seMask & kHasPrivateDef) != 0) { func->UnsetNoPrivateDefEffect(); } } @@ -116,8 +116,8 @@ void IpaSideEffect::GetEffectFromCallee(MIRFunction &callee, const MIRFunction & uint32 calleeScc = GetOrSetSCCNodeId(callee); if (IsCallingIntoSCC(calleeScc)) { // Call graph ensures that all methods in SCC are visited before a call into the SCC. - auto it = sccSe.find(calleeScc); - CHECK_FATAL(it != sccSe.end(), "Sideeffect of scc must have been set."); + const auto it = std::as_const(sccSe).find(calleeScc); + CHECK_FATAL(it != sccSe.cend(), "Sideeffect of scc must have been set."); uint8 mask = it->second; hasDefArg = hasDefArg || (mask & kHasDefArg); @@ -332,8 +332,8 @@ bool IpaSideEffect::MatchPuidxAndSetSideEffects(PUIdx idx) { if (idx == 0) { return false; } - auto mrtIt = mrtPuIdx.find(idx); - if (mrtIt == mrtPuIdx.end()) { + const auto mrtIt = std::as_const(mrtPuIdx).find(idx); + if (mrtIt == mrtPuIdx.cend()) { return false; } uint8 mrtSe = mrtIt->second; @@ -358,7 +358,7 @@ bool IpaSideEffect::IsPureFromSummary(const MIRFunction &func) const { } -void IpaSideEffect::ReadSummary() { +void IpaSideEffect::ReadSummary() const { if (mrtListSz != 0) { return; } @@ -452,7 +452,7 @@ void IpaSideEffect::UpdateExternalFuncSideEffects(MIRFunction &func) { auto callerIt = callGraph.GetNodesMap().find(&func); CHECK_FATAL(callerIt != callGraph.GetNodesMap().end(), "CGNode not found."); CGNode *cgNode = callerIt->second; - for (auto &callSite : cgNode->GetCallee()) { + for (auto &callSite : std::as_const(cgNode->GetCallee())) { // IPASEEN == true, body == NULL; // IPASEEN == true, body != NULL; // IPASEEN == false, body != NULL, ignore @@ -793,7 +793,7 @@ bool IpaSideEffect::UpdateSideEffectWithStmt(MeStmt &meStmt, bool defArg = false; bool returnGlobal = false; bool returnArg = false; - for (auto &callSite : callerNode->GetCallee()) { + for (auto &callSite : std::as_const(callerNode->GetCallee())) { if (callSite.first->GetID() == callMeStmt.GetStmtID()) { for (auto *calleeNode : *callSite.second) { MIRFunction *calleeFunc = calleeNode->GetMIRFunction(); @@ -884,7 +884,7 @@ bool IpaSideEffect::UpdateSideEffectWithStmt(MeStmt &meStmt, CGNode *callerNode = callGraph.GetCGNode(meFunc.GetMirFunc()); CHECK_FATAL(callerNode != nullptr, "Must not be null"); bool defArg = false; - for (Callsite callSite : callerNode->GetCallee()) { + for (auto &callSite : std::as_const(callerNode->GetCallee())) { if (callSite.first->GetID() == callMeStmt.GetStmtID()) { for (auto *calleeNode : *callSite.second) { MIRFunction *calleeFunc = calleeNode->GetMIRFunction(); @@ -1062,8 +1062,8 @@ void IpaSideEffect::DoAnalysis() { uint8 mask = 0; sccId = GetOrSetSCCNodeId(*func); if (sccId != 0) { - auto itTmp = sccSe.find(sccId); - if (itTmp != sccSe.end()) { + const auto itTmp = std::as_const(sccSe).find(sccId); + if (itTmp != sccSe.cend()) { mask = itTmp->second; } } diff --git a/src/mapleall/maple_me/src/me_sink.cpp b/src/mapleall/maple_me/src/me_sink.cpp index e8ebde18763cf87b035ba8a749becd549a1f37be..a57fa0a90f834a5844547bd55dea1c86b9bd67be 100644 --- a/src/mapleall/maple_me/src/me_sink.cpp +++ b/src/mapleall/maple_me/src/me_sink.cpp @@ -83,10 +83,11 @@ class MeSink { void ProcessPhiList(BB *bb); void SinkStmtsInBB(BB *bb); - const BB *BestSinkBB(const BB *fromBB, const BB *toBB); - std::pair CalCandSinkBBForUseSites(const ScalarMeExpr *scalar, const UseSitesType &useList); + const BB *BestSinkBB(const BB *fromBB, const BB *toBB) const; + std::pair CalCandSinkBBForUseSites(const ScalarMeExpr *scalar, const UseSitesType &useList) const; const BB *CalSinkSiteOfScalarDefStmt(const ScalarMeExpr *scalar); void CalSinkSites(); + void Run(); private: @@ -265,17 +266,17 @@ bool MeSink::MergeAssignStmtWithCallAssign(AssignMeStmt *assign, MeStmt *callAss } lhs->SetDefBy(kDefByMustDef); - lhs->SetDefMustDef(const_cast(mustDefNode)); - const_cast(mustDefNode).SetLHS(lhs); + lhs->SetDefMustDef(static_cast(mustDefNode)); + static_cast(mustDefNode).SetLHS(lhs); // merge chiList of copyStmt and CallAssignStmt auto *chiList = assign->GetChiList(); if (chiList != nullptr) { auto *chiListOfCall = assign->GetChiList(); - for (auto &ost2chi : std::as_const(*chiList)) { + for (auto &ost2chi : *chiList) { auto it = chiListOfCall->find(ost2chi.first); if (it == chiListOfCall->end()) { (void)chiListOfCall->emplace(ost2chi.first, ost2chi.second); - ost2chi.second->SetBase(const_cast(callAssignStmt)); + ost2chi.second->SetBase(static_cast(callAssignStmt)); } else { it->second->SetLHS(ost2chi.second->GetLHS()); ost2chi.second->GetLHS()->SetDefChi(*it->second); @@ -1001,16 +1002,14 @@ static void CollectUsedScalar(MeExpr *expr, ScalarVec &scalarVec) { } break; } - default: { - for (size_t opndId = 0; opndId < expr->GetNumOpnds(); ++opndId) { - auto opnd = expr->GetOpnd(opndId); - if (opnd == nullptr) { - continue; - } - CollectUsedScalar(opnd, scalarVec); - } - break; + default: break; + } + for (size_t opndId = 0; opndId < expr->GetNumOpnds(); ++opndId) { + auto opnd = expr->GetOpnd(opndId); + if (opnd == nullptr) { + continue; } + CollectUsedScalar(opnd, scalarVec); } } @@ -1052,7 +1051,7 @@ static bool BBIsEmptyOrContainsSingleGoto(const BB *bb) { // we should not sink stmt from non-loop BB into loop BB or from outter loop into inner loop. // if toBB is in a different loop with fromBB, return a dominator of toBB which is in the same loop with fromBB. -const BB *MeSink::BestSinkBB(const BB *fromBB, const BB *toBB) { +const BB *MeSink::BestSinkBB(const BB *fromBB, const BB *toBB) const { CHECK_FATAL(domTree->Dominate(*fromBB, *toBB), "fromBB must dom toBB"); if (fromBB == toBB) { return toBB; @@ -1086,7 +1085,8 @@ void MeSink::RecordStmtSinkToBottomOfTargetBB(MeStmt *defStmt, const BB *targetB defStmtsSinkToBottom[targetBB->GetBBId()]->push_front(defStmt); } -std::pair MeSink::CalCandSinkBBForUseSites(const ScalarMeExpr *scalar, const UseSitesType &useList) { +std::pair MeSink::CalCandSinkBBForUseSites(const ScalarMeExpr *scalar, + const UseSitesType &useList) const { if (useList.empty()) { return {nullptr, false}; } @@ -1185,6 +1185,15 @@ const BB *MeSink::CalSinkSiteOfScalarDefStmt(const ScalarMeExpr *scalar) { if (candSinkBB == defBB) { return nullptr; } + // also try to sink to necessary succ BB + if (defBB->GetSucc().size() > 1) { + for (auto *succ : defBB->GetSucc()) { + if (succ != candSinkBB && domTree->Dominate(*succ, *candSinkBB)) { + RecordStmtSinkToHeaderOfTargetBB(defStmt, succ); + break; + } + } + } RecordStmtSinkToHeaderOfTargetBB(defStmt, candSinkBB); } return candSinkBB; @@ -1434,9 +1443,9 @@ void MeSink::Run() { } void MEMeSink::GetAnalysisDependence(maple::AnalysisDep &aDep) const { - aDep.AddRequired(); - aDep.AddRequired(); - aDep.SetPreservedAll(); + aDep.AddRequired(); + aDep.AddRequired(); + aDep.SetPreservedAll(); } bool MEMeSink::PhaseRun(maple::MeFunction &f) { diff --git a/src/mapleall/maple_me/src/me_slp.cpp b/src/mapleall/maple_me/src/me_slp.cpp index 792681a5083ad93bcc72d14e6fe49504876d779a..7f41e9e129698cec453315a994fc5e891cd299ac 100644 --- a/src/mapleall/maple_me/src/me_slp.cpp +++ b/src/mapleall/maple_me/src/me_slp.cpp @@ -18,6 +18,7 @@ #include "me_irmap.h" #include "me_dominance.h" #include "common_utils.h" +#include "aarch64/aarch64_imm_valid.h" #define SLP_DEBUG(X) \ do { if (debug) { LogInfo::MapleLogger() << "[SLP] "; (X); } } while (false) @@ -44,11 +45,6 @@ const std::vector supportedIntrns = {INTRN_C_rev_4, INTRN_C_rev_ std::vector *localSymOffsetTab = nullptr; } // anonymous namespace -namespace maple { -using namespace maplebe; -#include "immvalid.def" -} - namespace maple { // A wrapper class of meExpr with its defStmt, this can avoid repeated searches for use-def chains constexpr int8 k8Bit = 8; @@ -76,7 +72,7 @@ class ExprWithDef { } private: - void FindDef(ScalarMeExpr *scalarExpr, BB *bb) { + void FindDef(const ScalarMeExpr *scalarExpr, const BB *bb) { if (scalarExpr->GetDefBy() != kDefByStmt) { return; } @@ -137,14 +133,15 @@ void MemoryHelper::ExtractAddendOffset(const MeExpr &expr, bool isNeg, MemLoc &m CHECK_FATAL(type->GetKind() == kTypePointer, "must be"); auto *pointedType = static_cast(type)->GetPointedType(); CHECK_FATAL(pointedType->GetKind() == kTypeStruct || pointedType->GetKind() == kTypeUnion, "must be"); - auto bitOffset = static_cast(pointedType)->GetBitOffsetFromBaseAddr(fieldId); + auto bitOffset = static_cast(static_cast(pointedType)-> + GetBitOffsetFromBaseAddr(fieldId)); memLoc.offset += (bitOffset / k8Bit); } ExtractAddendOffset(*expr.GetOpnd(0), isNeg, memLoc); break; } case OP_constval: { - auto val = static_cast(expr).GetExtIntValue(); + auto val = static_cast(static_cast(expr).GetExtIntValue()); memLoc.offset += (isNeg ? -val : val); break; } @@ -210,7 +207,7 @@ MemLoc *MemoryHelper::GetMemLoc(VarMeExpr &var) { memLoc.offset = ost->GetOffset().val / k8Bit; memLoc.type = ost->GetType(); memLoc.base = alloc.GetMemPool()->New(); - memLoc.base->SetBaseOstIdx(prevLevOst->GetIndex().get()); + memLoc.base->SetBaseOstIdx(static_cast(prevLevOst->GetIndex().get())); // unique memLoc base UniqueMemLocBase(memLoc); @@ -254,7 +251,7 @@ MemLoc *MemoryHelper::GetMemLoc(IvarMeExpr &ivar) { // pointedType is either kTypeStruct or kTypeUnion ASSERT(pointedType->GetKind() == kTypeStruct || pointedType->GetKind() == kTypeUnion, "must be"); auto *structType = static_cast(pointedType); - auto bitOffset = structType->GetBitOffsetFromBaseAddr(ivar.GetFieldID()); + auto bitOffset = static_cast(structType->GetBitOffsetFromBaseAddr(ivar.GetFieldID())); extraOffset += (bitOffset / k8Bit); } extraOffset += ivar.GetOffset(); @@ -322,18 +319,18 @@ bool MemoryHelper::MustHaveNoOverlap(const MemLoc &mem1, const MemLoc &mem2) { } int32 offset1 = mem1.offset; int32 offset2 = mem2.offset; - uint32 size1 = mem1.type->GetSize(); - uint32 size2 = mem2.type->GetSize(); + uint32 size1 = static_cast(mem1.type->GetSize()); + uint32 size2 = static_cast(mem2.type->GetSize()); // overlapping case one: // mem1: |------| // mem2: |------| - if (offset1 <= offset2 && (offset2 - offset1) < size1) { + if (offset1 <= offset2 && static_cast(offset2 - offset1) < size1) { return false; } // overlapping case two: // mem1: |------| // mem2: |------| - if (offset2 < offset1 && (offset1 - offset2) < size2) { + if (offset2 < offset1 && static_cast(offset1 - offset2) < size2) { return false; } return true; @@ -366,7 +363,7 @@ bool MemoryHelper::IsAllIvarConsecutive(const std::vector &ivarVec, boo return true; } -int32 GetLocalSymApproximateOffset(MIRSymbol *sym, MeFunction &func) { +int32 GetLocalSymApproximateOffset(const MIRSymbol *sym, MeFunction &func) { if (localSymOffsetTab == nullptr) { localSymOffsetTab = new std::vector(); } @@ -375,7 +372,7 @@ int32 GetLocalSymApproximateOffset(MIRSymbol *sym, MeFunction &func) { if (symIdx < offsetTab.size()) { return offsetTab[symIdx]; } - for (uint32 i = offsetTab.size(); i <= symIdx; ++i) { + for (uint32 i = static_cast(offsetTab.size()); i <= symIdx; ++i) { if (i == 0) { offsetTab.push_back(0); continue; @@ -387,7 +384,7 @@ int32 GetLocalSymApproximateOffset(MIRSymbol *sym, MeFunction &func) { } // align 8 bytes auto sizeAfterAlign = (lastSym->GetType()->GetSize() + 7) & -8; - offsetTab.push_back(offsetTab[i - 1] + sizeAfterAlign); + offsetTab.push_back(offsetTab[i - 1] + static_cast(sizeAfterAlign)); } return offsetTab[symIdx]; } @@ -418,14 +415,15 @@ std::optional EstimateStackOffsetOfMemLoc(MemLoc *memLoc, MeFunction &fun return {}; } +#if defined(TARGAARCH64) && TARGAARCH64 // This is target-dependent function for armv8, maybe we need abstract it TargetInfo bool IsIntStpLdpOffsetValid(uint32 typeBitSize, int32 offset) { switch (typeBitSize) { case 32: { - return StrLdr32PairImmValid(offset); + return maplebe::StrLdr32PairImmValid(offset); } case 64: { - return StrLdr64PairImmValid(offset); + return maplebe::StrLdr64PairImmValid(offset); } default: { CHECK_FATAL(false, "should not be here"); @@ -434,24 +432,26 @@ bool IsIntStpLdpOffsetValid(uint32 typeBitSize, int32 offset) { } return false; } +#endif +#if defined(TARGAARCH64) && TARGAARCH64 // This is target-dependent function for armv8, maybe we need abstract it TargetInfo bool IsVecStrLdrOffsetValid(uint32 typeBitSize, int32 offset) { switch (typeBitSize) { case 8: { - return StrLdr8ImmValid(offset); + return maplebe::StrLdr8ImmValid(offset); } case 16: { - return StrLdr16ImmValid(offset); + return maplebe::StrLdr16ImmValid(offset); } case 32: { - return StrLdr32ImmValid(offset); + return maplebe::StrLdr32ImmValid(offset); } case 64: { - return StrLdr64ImmValid(offset); + return maplebe::StrLdr64ImmValid(offset); } case 128: { - return StrLdr128ImmValid(offset); + return maplebe::StrLdr128ImmValid(offset); } default: { CHECK_FATAL(false, "should not be here"); @@ -460,6 +460,7 @@ bool IsVecStrLdrOffsetValid(uint32 typeBitSize, int32 offset) { } return false; } +#endif // ------------------- // // Assign Stmt Split // @@ -699,7 +700,7 @@ ScalarMeExpr* DoFindPreVersionByAlias(ScalarMeExpr &aliasPreVersion, ScalarMeExp ScalarMeExpr *nextExpr = nullptr; auto *chiList = defStmt->GetChiList(); if (chiList != nullptr) { - auto it = chiList->find(targetOstIdx); + const auto it = std::as_const(chiList)->find(targetOstIdx); if (it != chiList->end()) { return it->second->GetRHS(); } @@ -864,7 +865,7 @@ class TreeNode { auto *minMemLoc = GetMinMemLoc(); auto typeSize = minMemLoc->type->GetSize(); for (auto *memLoc : memLocs) { - order.push_back((memLoc->offset - minMemLoc->offset) / typeSize); + order.push_back(static_cast((memLoc->offset - minMemLoc->offset)) / typeSize); } } else if (firstStmt->GetRHS() != nullptr && firstStmt->GetRHS()->GetMeOp() == kMeOpIvar) { // Init load memLoc @@ -876,7 +877,7 @@ class TreeNode { auto *minMemLoc = GetMinMemLoc(); auto typeSize = minMemLoc->type->GetSize(); for (auto *memLoc : memLocs) { - order.push_back((memLoc->offset - minMemLoc->offset) / typeSize); + order.push_back(static_cast((memLoc->offset - minMemLoc->offset)) / typeSize); } } SetParent(parentNode); @@ -1235,7 +1236,7 @@ class SLPTree { } void AddTreeNode(TreeNode *treeNode) { - treeNode->SetId(treeNodeVec.size()); + treeNode->SetId(static_cast(treeNodeVec.size())); treeNodeVec.push_back(treeNode); } @@ -1313,7 +1314,7 @@ PrimType TreeNode::GetType() const { } uint32 TreeNode::GetBundleSize() const { - return GetLane() * GetPrimTypeBitSize(tree.GetType()); + return static_cast(GetLane() * GetPrimTypeBitSize(tree.GetType())); } // Only valid for load/store treeNode for now @@ -1330,8 +1331,12 @@ int32 TreeNode::GetScalarCost() const { offset = stackOffset.value(); } } +#if defined(TARGAARCH64) && TARGAARCH64 // If stp/ldp for stack memory, we think that an extra insn is always needed for preparing offset bool offsetValid = IsIntStpLdpOffsetValid(GetPrimTypeBitSize(GetType()), offset); +#else + bool offsetValid = false; +#endif if (!offsetValid) { cost += 1; // invalid offset, an extra instruction is needed to prepare offset const if (IsStore()) { @@ -1356,10 +1361,14 @@ int32 TreeNode::GetVectorCost() const { ASSERT(IsLoad() || IsStore(), "must be"); ASSERT(CanVectorized(), "must be"); int32 cost = 1; +#if defined(TARGAARCH64) && TARGAARCH64 int32 offset = GetMinMemLoc()->offset; if (!IsVecStrLdrOffsetValid(GetBundleSize(), offset)) { cost += 1; } +#else + cost += 1; +#endif return cost; } @@ -1387,6 +1396,7 @@ int32 TreeNode::GetExternalUserCost() const { return externalUserCost; } +#if defined(TARGAARCH64) && TARGAARCH64 // Wether the constval is in the range of insn's imm field static bool IsConstvalInRangeOfInsnImm(ConstMeExpr *constExpr, Opcode op, uint32 typeBitSize) { if (constExpr->GetConstVal()->GetKind() != kConstInt) { @@ -1396,14 +1406,14 @@ static bool IsConstvalInRangeOfInsnImm(ConstMeExpr *constExpr, Opcode op, uint32 switch (op) { case OP_add: case OP_sub: - return Imm12BitValid(val); + return maplebe::Imm12BitValid(val); case OP_band: case OP_bior: case OP_bxor: { if (typeBitSize == 32) { - return Imm12BitMaskValid(val); + return maplebe::Imm12BitMaskValid(val); } else if (typeBitSize == 64) { - return Imm13BitMaskValid(val); + return maplebe::Imm13BitMaskValid(val); } break; } @@ -1412,6 +1422,7 @@ static bool IsConstvalInRangeOfInsnImm(ConstMeExpr *constExpr, Opcode op, uint32 } return false; } +#endif int32 TreeNode::GetCost() const { bool isLoadOrStore = IsLoad() || IsStore(); @@ -1422,20 +1433,22 @@ int32 TreeNode::GetCost() const { return kHugeCost; } if (!CanVectorized()) { - return GetLane(); + return static_cast(GetLane()); } if (op == OP_constval) { if (SameExpr()) { +#if defined(TARGAARCH64) && TARGAARCH64 auto typeBitSize = GetPrimTypeBitSize(GetType()); if (IsConstvalInRangeOfInsnImm(static_cast(exprs[0]), GetParent()->GetOp(), typeBitSize)) { return 2; // no extra constval assign insn is needed, for example, sub x1, x1, #1 } +#endif return 1; } else { - return GetLane(); + return static_cast(GetLane()); } } - int32 cost = -(GetLane() - 1); + int32 cost = -(static_cast(GetLane() - 1)); if (op == OP_ashr) { // aarch64 only vector shl(register), lowering vector shr will introduce an extra vector neg instruction. cost += 1; @@ -1474,7 +1487,7 @@ int32 SLPTree::GetCost() const { return cost; } -void PrintTreeNode(TreeNode *node, MeFunction &func, bool inDot) { +void PrintTreeNode(const TreeNode *node, MeFunction &func, bool inDot) { if (inDot) { LogInfo::MapleLogger() << "Node" << node->GetId() << " [shape=record" << (node->GetCost() > 0 ? ",style=bold,color=red" : "") << ",label=\"{"; @@ -1534,7 +1547,7 @@ struct BlockScheduling { SetOrderId(stmt2, tmp); } - bool IsRegionEmpty(MeStmt *beginStmt, MeStmt *endStmt) const { + bool IsRegionEmpty(const MeStmt *beginStmt, const MeStmt *endStmt) const { return beginStmt == endStmt; } @@ -1553,39 +1566,39 @@ struct BlockScheduling { } } - bool IsStmtInRegion(MeStmt *stmt, MeStmt *beginStmt, MeStmt *endStmt) { + bool IsStmtInRegion(MeStmt &stmt, MeStmt *beginStmt, MeStmt *endStmt) const { if (IsRegionEmpty(beginStmt, endStmt)) { return false; } - if (stmt->GetBB() != bb) { + if (stmt.GetBB() != bb) { return false; } - auto id = GetOrderId(stmt); + auto id = GetOrderId(&stmt); if (id >= GetOrderId(beginStmt) && id <= GetOrderId(endStmt)) { return true; } return false; } - MeStmt *FindAnyStmtInRegion(const std::vector &meStmtVec, MeStmt *beginStmt, MeStmt *endStmt) { + MeStmt *FindAnyStmtInRegion(const std::vector &meStmtVec, MeStmt *beginStmt, MeStmt *endStmt) const { for (auto *stmt : meStmtVec) { - if (IsStmtInRegion(stmt, beginStmt, endStmt)) { + if (IsStmtInRegion(*stmt, beginStmt, endStmt)) { return stmt; } } return nullptr; } - bool IsAnyStmtInRegion(const std::vector &meStmtVec, MeStmt *beginStmt, MeStmt *endStmt) { + bool IsAnyStmtInRegion(const std::vector &meStmtVec, MeStmt *beginStmt, MeStmt *endStmt) const { for (auto *stmt : meStmtVec) { - if (IsStmtInRegion(stmt, beginStmt, endStmt)) { + if (IsStmtInRegion(*stmt, beginStmt, endStmt)) { return true; } } return false; } - bool IsOstUsedByStmt(OriginalSt *ost, MeStmt *stmt); + bool IsOstUsedByStmt(OriginalSt *ost, MeStmt *stmt) const; void ScheduleStmtBefore(MeStmt *stmt, MeStmt *anchor, bool needRectifyChiList = false) { CHECK_FATAL(stmt->GetBB() == anchor->GetBB(), "must belong to same BB"); @@ -1597,7 +1610,7 @@ struct BlockScheduling { } // should use signed id for (int32 id = stmtOrderId - 1; id >= anchorOrderId; --id) { - SwapStmts(stmt, stmtVec[id]); + SwapStmts(stmt, stmtVec[static_cast(id)]); } if (debug) { VerifyScheduleResult(GetOrderId(stmt), GetOrderId(anchor)); @@ -1610,7 +1623,7 @@ struct BlockScheduling { void VerifyScheduleResult(uint32 firstOrderId, uint32 lastOrderId); // Collect use info from `begin` to `end` (not include `end`) in current BB // These use info are needed by dependency analysis and stmt scheduling - void RebuildUseInfo(MeStmt *begin, MeStmt *end, MapleAllocator &alloc); + void RebuildUseInfo(MeStmt *begin, const MeStmt *end, MapleAllocator &alloc); void ExtendUseInfo(); }; @@ -1666,7 +1679,7 @@ void GetDependencyStmts(MeStmt *stmt, std::vector &depsOfOpnd, std::vec } auto *chiList = stmt->GetChiList(); if (chiList != nullptr) { - for (auto &chiNodePair : *chiList) { + for (auto &chiNodePair : std::as_const(*chiList)) { auto *chiNode = chiNodePair.second; auto *defStmt = chiNode->GetRHS()->GetDefByMeStmt(); if (defStmt != nullptr) { @@ -1677,7 +1690,7 @@ void GetDependencyStmts(MeStmt *stmt, std::vector &depsOfOpnd, std::vec auto *muList = stmt->GetMuList(); if (muList != nullptr) { - for (auto &muPair : *muList) { + for (auto &muPair : std::as_const(*muList)) { auto *mu = muPair.second; auto *defStmt = mu->GetDefByMeStmt(); if (defStmt != nullptr) { @@ -1687,24 +1700,24 @@ void GetDependencyStmts(MeStmt *stmt, std::vector &depsOfOpnd, std::vec } } -void GetOstsUsed(MeExpr *expr, std::unordered_set &ostsUsed) { - Opcode op = expr->GetOp(); +void GetOstsUsed(const MeExpr &expr, std::unordered_set &ostsUsed) { + Opcode op = expr.GetOp(); if (op == OP_regread) { // we don't need to consider defPhi, we only focus on BB level use-def - auto *ost = static_cast(expr)->GetOst(); + auto *ost = static_cast(expr).GetOst(); ostsUsed.insert(ost); } - for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { - auto *opnd = expr->GetOpnd(i); - GetOstsUsed(opnd, ostsUsed); + for (size_t i = 0; i < expr.GetNumOpnds(); ++i) { + auto *opnd = expr.GetOpnd(i); + GetOstsUsed(*opnd, ostsUsed); } } -void GetOstsUsed(MeStmt *stmt, std::unordered_set &ostsUsed) { +void GetOstsUsed(const MeStmt &stmt, std::unordered_set &ostsUsed) { // We only consider stmt opnd, skip chiList, because there is always no chi for preg - for (size_t i = 0; i < stmt->NumMeStmtOpnds(); ++i) { - auto *opnd = stmt->GetOpnd(i); - GetOstsUsed(opnd, ostsUsed); + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { + auto *opnd = stmt.GetOpnd(i); + GetOstsUsed(*opnd, ostsUsed); } } @@ -1713,7 +1726,7 @@ void GetUseStmtsOfChiRhs(const MeExprUseInfo &useInfo, MeStmt *stmt, std::vector if (chiList == nullptr) { return; } - for (auto &chiNodePair : *chiList) { + for (auto &chiNodePair : std::as_const(*chiList)) { auto *chiNode = chiNodePair.second; auto *chiRhs = chiNode->GetRHS(); // Get use stmt of chiRhs @@ -1755,7 +1768,7 @@ void BlockScheduling::VerifyScheduleResult(uint32 firstOrderId, uint32 lastOrder } // `end` may be nullptr -void BlockScheduling::RebuildUseInfo(MeStmt *begin, MeStmt *end, MapleAllocator &alloc) { +void BlockScheduling::RebuildUseInfo(MeStmt *begin, const MeStmt *end, MapleAllocator &alloc) { CHECK_NULL_FATAL(begin); CHECK_FATAL(begin->GetBB() == bb, "begin stmt must belong to current BB"); exprUseInfo = alloc.New(alloc.GetMemPool()); @@ -1780,9 +1793,9 @@ void BlockScheduling::ExtendUseInfo() { extendUseInfo = true; } -bool BlockScheduling::IsOstUsedByStmt(OriginalSt *ost, MeStmt *stmt) { +bool BlockScheduling::IsOstUsedByStmt(OriginalSt *ost, MeStmt *stmt) const { std::unordered_set ostsUsed; - GetOstsUsed(stmt, ostsUsed); + GetOstsUsed(*stmt, ostsUsed); return ostsUsed.find(ost) != ostsUsed.end(); } @@ -2174,7 +2187,7 @@ class SLPVectorizer { bool DoVectorizeSlicedStores(StoreVec &storeVec, uint32 begin, uint32 end, bool onlySchedule = false); bool TryScheduleTogehter(const std::vector &stmts); - bool BuildTree(std::vector &stmts); + void BuildTree(std::vector &stmts); TreeNode *BuildTreeRec(std::vector &exprVec, uint32 depth, TreeNode *parentNode); bool VectorizeTreeNode(TreeNode *treeNode); @@ -2298,7 +2311,7 @@ void SLPVectorizer::VectorizeStores(StoreVec &storeVec) { continue; } // num > 2, try to vectorize storeSlice - VectorizeCompatibleStores(storeVec, i, j); + VectorizeCompatibleStores(storeVec, static_cast(i), static_cast(j)); i = j; } } @@ -2433,10 +2446,7 @@ bool SLPVectorizer::DoVectorizeSlicedStores(StoreVec &storeVec, uint32 begin, ui for (uint32 i = begin; i < end; ++i) { stmts.push_back(storeVec[i]->stmt); } - bool res = BuildTree(stmts); - if (!res) { - CHECK_FATAL(false, "should not be here"); - } + BuildTree(stmts); if (onlySchedule) { SLP_DEBUG(os << "onlySchedule save result" << std::endl); CodeMotionSaveSchedulingResult(); @@ -2746,8 +2756,9 @@ bool SLPVectorizer::TryScheduleTogehter(const std::vector &stmts) { std::sort(sortedStmts.begin(), sortedStmts.end(), [](MeStmt *a, MeStmt *b) { return GetOrderId(a) < GetOrderId(b); }); - for (int32 i = sortedStmts.size() - 2; i >= 0; --i) { - bool res = blockScheduling->TryScheduleTwoStmtsTogether(sortedStmts[i], sortedStmts[i + 1]); + for (int32 i = static_cast(sortedStmts.size()) - 2; i >= 0; --i) { // i should be int + bool res = blockScheduling->TryScheduleTwoStmtsTogether(sortedStmts[static_cast(i)], + sortedStmts[static_cast(i + 1)]); if (!res) { return false; // Schedule fail } @@ -2757,14 +2768,14 @@ bool SLPVectorizer::TryScheduleTogehter(const std::vector &stmts) { // Building SLP tree // input: sorted stmts by memLoc offset -bool SLPVectorizer::BuildTree(std::vector &stmts) { +void SLPVectorizer::BuildTree(std::vector &stmts) { tree = tmpAlloc->New(*tmpAlloc, memoryHelper, func, *blockScheduling); SLP_DEBUG(os << "Build tree node for " << GetOpName(stmts[0]->GetOp()) << std::endl); CHECK_FATAL(stmts.size() >= k2BitSize, "must be"); if (!TryScheduleTogehter(stmts)) { SLP_FAILURE_DEBUG(os << "Scheduling failure" << std::endl); - return true; + return; } SLP_DEBUG(os << "Scheduling OK, building tree node..." << std::endl); auto *rootNode = tree->CreateTreeNodeByStmts(stmts, nullptr); @@ -2783,7 +2794,6 @@ bool SLPVectorizer::BuildTree(std::vector &stmts) { os << "===== Print tree in text foramt =====" << std::endl; PrintSLPTree(rootNode, func, false); } - return true; } TreeNode *SLPVectorizer::BuildTreeRec(std::vector &exprVec, uint32 depth, TreeNode *parentNode) { @@ -2820,7 +2830,7 @@ TreeNode *SLPVectorizer::BuildTreeRec(std::vector &exprVec, uint32 if (currOp == OP_intrinsicop) { auto intrnId = static_cast(firstRealExpr)->GetIntrinsic(); if (std::find(supportedIntrns.begin(), supportedIntrns.end(), intrnId) == supportedIntrns.end()) { - SLP_GATHER_DEBUG(os << "unsupported intrinsicop: " << intrnId << std::endl); + SLP_GATHER_DEBUG(os << "unsupported intrinsicop: " << static_cast(intrnId) << std::endl); return tree->CreateTreeNodeByExprs(exprVec, parentNode, false); } } @@ -2961,7 +2971,7 @@ MeExpr *BuildExprAfterVectorSetElement(MeFunction &func, RegMeExpr *vecReg, // Example: // constants: [ 12, 35, 78, 89 ], elemSize: 8bit // output: (bin) 01011001 01001110 00100011 00001100 -uint64 ConstructConstants(std::vector &constants, uint32 elemSize) { +uint64 ConstructConstants(std::vector &constants, uint32 elemSize) { uint64 res = 0; uint32 shift = 0; uint32 maskShift = 64 - elemSize; @@ -2980,7 +2990,7 @@ bool SLPVectorizer::DoVectTreeNodeConstval(TreeNode *treeNode) { return false; } PrimType elemType = tree->GetType(); - auto *vecType = GenMergedType(tree->GetType(), treeNode->GetLane()); + auto *vecType = GenMergedType(tree->GetType(), static_cast(treeNode->GetLane())); CHECK_NULL_FATAL(vecType); bool useScalarType = tree->CanTreeUseScalarTypeForConstvalIassign(); ScalarMeExpr *lhsReg = nullptr; @@ -2996,12 +3006,13 @@ bool SLPVectorizer::DoVectTreeNodeConstval(TreeNode *treeNode) { } vecType = GetScalarUnsignedTypeBySize(GetPrimTypeBitSize(vecType->GetPrimType())); lhsReg = irMap.CreateRegMeExpr(*vecType); - std::vector constants(treeNode->GetExprs().size()); - std::transform(treeNode->GetExprs().begin(), treeNode->GetExprs().end(), constants.begin(), [](MeExpr *expr) { - return static_cast(expr)->GetExtIntValue(); + std::vector constants(treeNode->GetExprs().size()); + std::transform(treeNode->GetExprs().cbegin(), treeNode->GetExprs().cend(), constants.begin(), + [](const MeExpr *expr) { + return static_cast(static_cast(expr)->GetExtIntValue()); }); uint64 mergeConstval = ConstructConstants(constants, GetPrimTypeBitSize(elemType)); - rhs = irMap.CreateIntConstMeExpr(mergeConstval, vecType->GetPrimType()); + rhs = irMap.CreateIntConstMeExpr(static_cast(mergeConstval), vecType->GetPrimType()); } else if (treeNode->SameExpr()) { if (!mergeToVecType) { SLP_DEBUG(os << "vector_from_scalar is not supproted for scalar mergedType" << std::endl); @@ -3040,12 +3051,12 @@ bool SLPVectorizer::DoVectTreeNodeConstval(TreeNode *treeNode) { return true; } -void SetMuListForVectorIvar(IvarMeExpr &ivar, TreeNode *treeNode, IRMap &irMap) { - auto &order = treeNode->GetOrder(); +void SetMuListForVectorIvar(IvarMeExpr &ivar, const TreeNode &treeNode) { + auto &order = treeNode.GetOrder(); ivar.GetMuList().resize(order.size(), nullptr); for (size_t i = 0; i < order.size(); ++i) { uint32 orderIdx = order[i]; - auto *mu = static_cast(treeNode->GetMemLocs()[i]->Emit())->GetUniqueMu(); + auto *mu = static_cast(treeNode.GetMemLocs()[i]->Emit())->GetUniqueMu(); ivar.SetMuItem(orderIdx, mu); } } @@ -3064,7 +3075,7 @@ bool SLPVectorizer::DoVectTreeNodeIvar(TreeNode *treeNode) { } return false; } - auto *vecType = GenMergedType(tree->GetType(), treeNode->GetLane()); + auto *vecType = GenMergedType(tree->GetType(), static_cast(treeNode->GetLane())); CHECK_NULL_FATAL(vecType); auto *minMem = treeNode->GetMinMemLoc(); auto *vecPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*vecType); @@ -3079,7 +3090,7 @@ bool SLPVectorizer::DoVectTreeNodeIvar(TreeNode *treeNode) { newIvar.SetTyIdx(vecPtrType->GetTypeIndex()); newIvar.SetPtyp(vecType->GetPrimType()); newIvar.SetBase(newBase); - SetMuListForVectorIvar(newIvar, treeNode, irMap); + SetMuListForVectorIvar(newIvar, *treeNode); addrExpr = irMap.HashMeExpr(newIvar); CHECK_FATAL(addrExpr->GetMeOp() == kMeOpIvar, "only iread memLoc is supported for now"); @@ -3095,7 +3106,7 @@ bool SLPVectorizer::DoVectTreeNodeIvar(TreeNode *treeNode) { bool SLPVectorizer::DoVectTreeNodeBinary(TreeNode *treeNode) { CHECK_FATAL(treeNode->GetChildren().size() == 2, "must be"); - auto *vecType = GenMergedType(tree->GetType(), treeNode->GetLane()); + auto *vecType = GenMergedType(tree->GetType(), static_cast(treeNode->GetLane())); CHECK_NULL_FATAL(vecType); // Get the first vector operand auto &childOutStmts0 = treeNode->GetChildren()[0]->GetOutStmts(); @@ -3170,18 +3181,18 @@ bool SLPVectorizer::DoVectTreeNodeNaryReverse(TreeNode *treeNode, MIRIntrinsicID uint32 elementBitSize = 0; GetRevInfoFromScalarRevIntrnId(intrnId, rangeBitSize, elementBitSize); uint32 opndBitSize = GetPrimTypeBitSize(treeNode->GetType()); - uint32 vecOpndBitSize = opndBitSize * treeNode->GetLane(); + uint32 vecOpndBitSize = static_cast(opndBitSize * treeNode->GetLane()); if (vecOpndBitSize != 64 && vecOpndBitSize != 128) { SLP_FAILURE_DEBUG( os << "only v64 and v128 are allowed as vector reverse operand, but get " << vecOpndBitSize << std::endl); return false; } - auto *vecType = GenMergedType(tree->GetType(), treeNode->GetLane()); + auto *vecType = GenMergedType(tree->GetType(), static_cast(treeNode->GetLane())); CHECK_NULL_FATAL(vecType); uint32 newNumLane = vecOpndBitSize / elementBitSize; auto isSign = !PrimitiveType(treeNode->GetType()).IsUnsigned(); auto newElementType = GetIntegerPrimTypeBySizeAndSign(elementBitSize, isSign); - auto revVecType = GenMergedType(newElementType, newNumLane); + auto revVecType = GenMergedType(newElementType, static_cast(newNumLane)); MIRIntrinsicID vecIntrnId; switch (rangeBitSize) { case 16: @@ -3222,7 +3233,7 @@ bool SLPVectorizer::DoVectTreeNodeIassign(TreeNode *treeNode) { CHECK_NULL_FATAL(minMem); // Use root node type for covering the following case: // [(mx943), 4] : u8 = constval : u32 - auto *vecType = GenMergedType(tree->GetType(), treeNode->GetLane()); + auto *vecType = GenMergedType(tree->GetType(), static_cast(treeNode->GetLane())); CHECK_NULL_FATAL(vecType); if (tree->CanTreeUseScalarTypeForConstvalIassign()) { vecType = GetScalarUnsignedTypeBySize(GetPrimTypeBitSize(vecType->GetPrimType())); @@ -3297,7 +3308,7 @@ bool SLPVectorizer::DoVectTreeNodeGatherNeeded(TreeNode &treeNode) { return false; } auto elemType = tree->GetType(); - auto *vecType = GenMergedType(elemType, treeNode.GetLane()); + auto *vecType = GenMergedType(elemType, static_cast(treeNode.GetLane())); CHECK_NULL_FATAL(vecType); auto *vecReg = irMap.CreateRegMeExpr(*vecType); auto exprNum = treeNode.GetExprs().size(); @@ -3378,7 +3389,6 @@ bool SLPVectorizer::VectorizeTreeNode(TreeNode *treeNode) { return false; } } - return false; } bool SLPVectorizer::VectorizeSLPTree() { diff --git a/src/mapleall/maple_me/src/me_sra.cpp b/src/mapleall/maple_me/src/me_sra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0b446ed6ad84dc5ff83c632e246ae297e2fa810f --- /dev/null +++ b/src/mapleall/maple_me/src/me_sra.cpp @@ -0,0 +1,606 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "me_sra.h" +#include "me_phase_manager.h" + +namespace maple { +namespace { +bool kDebug = false; +#define DEBUG_SRA() \ + if (kDebug) LogInfo::MapleLogger() + +constexpr size_t kSRASizeLimit = 32; +} + +struct AggUse { + bool mayAliased = false; + FieldID fid = -1; + BB *bb = nullptr; + StmtNode *parent = nullptr; + BaseNode *access = nullptr; +}; + +struct AggGroup { + bool isAddressToken = false; + bool isRet = false; + bool isOffsetRead = false; + bool wholeSplit = true; + MIRSymbol *symbol = nullptr; + std::vector> uses; + std::set replaceFields; +}; + +/// Implementation of Scalar Replacement of Aggregates. +/// This phase analyzes the local aggregates and tries to replace them +/// with scalar ones. +/// +/// step 1. It scans the whole function and collects all the uses of local aggregates, +/// including agg copies, agg inits, agg field reads/writes. +/// step 2. Analyze all the uses, determine whether the whole agg can be split or +/// just rewrite the necessary parts. +/// step 3. Leave all the dead stmts or propgatable exprs to followed phases +/// (like hdse, epre, .etc) to optimize. +class SRA { + public: + explicit SRA(MeFunction &f) : func(f), builder(*f.GetMIRModule().GetMIRBuilder()) {} + ~SRA() { + curBB = nullptr; + } + + void Run(); + private: + MIRSymbol *GetLocalSym(StIdx idx); + void CollectCandidates(); + void AddUse(StIdx idx, FieldID id, BaseNode &access, StmtNode *parent); + BaseNode *ScanNodes(BaseNode &node, StmtNode *parent); + void ScanFunc(); + void RemoveUnsplittable(); + void DetermineSplitRange(); + template + void SplitAggCopy(AssignType &assignNode, MIRStructType &structureType); + void SplitDassignAggCopy(DassignNode &dassign); + void SplitIassignAggCopy(IassignNode &iassign); + void DoWholeSplit(AggGroup &group); + void DoPartialSplit(AggGroup &group); + void DoReplace(); + + MeFunction &func; + MIRBuilder &builder; + BB *curBB = nullptr; + std::unordered_map> groups; + std::set> removed; +}; + +MIRSymbol *SRA::GetLocalSym(StIdx idx) { + if (idx.IsGlobal()) { + return nullptr; + } + return func.GetMirFunc()->GetSymbolTabItem(idx.Idx()); +} + +static MIRStructType *GetReadedStructureType(const DreadNode &dread, const MIRFunction &func) { + const auto &rhsStIdx = dread.GetStIdx(); + auto rhsSymbol = func.GetLocalOrGlobalSymbol(rhsStIdx); + ASSERT_NOT_NULL(rhsSymbol); + auto rhsAggType = rhsSymbol->GetType(); + auto rhsFieldID = dread.GetFieldID(); + if (rhsFieldID != 0) { + CHECK_FATAL(rhsAggType->IsStructType(), "only struct has non-zero fieldID"); + rhsAggType = static_cast(rhsAggType)->GetFieldType(rhsFieldID); + } + if (!rhsAggType->IsStructType()) { + return nullptr; + } + return static_cast(rhsAggType); +} + +static MIRStructType *GetReadedStructureType(const IreadNode &iread, const MIRFunction &func) { + (void)func; + auto rhsPtrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx()); + CHECK_FATAL(rhsPtrType->IsMIRPtrType(), "must be pointer type"); + auto rhsAggType = static_cast(rhsPtrType)->GetPointedType(); + auto rhsFieldID = iread.GetFieldID(); + if (rhsFieldID != 0) { + CHECK_FATAL(rhsAggType->IsStructType(), "only struct has non-zero fieldID"); + rhsAggType = static_cast(rhsAggType)->GetFieldType(rhsFieldID); + } + if (!rhsAggType->IsStructType()) { + return nullptr; + } + return static_cast(rhsAggType); +} + +template +void SRA::SplitAggCopy(AssignType &assignNode, MIRStructType &structureType) { + auto *readNode = static_cast(assignNode.GetRHS()); + auto rhsFieldID = readNode->GetFieldID(); + auto *rhsAggType = GetReadedStructureType(*readNode, *func.GetMirFunc()); + if (&structureType != rhsAggType) { + return; + } + + FieldID id = 1; + while (id <= static_cast(structureType.NumberOfFieldIDs())) { + MIRType *fieldType = structureType.GetFieldType(id); + if (fieldType->GetSize() == 0) { + id++; + continue; // field size is zero for empty struct/union; + } + if (fieldType->GetKind() == kTypeBitField && static_cast(fieldType)->GetFieldSize() == 0) { + id++; + continue; // bitfield size is zero + } + if (fieldType->IsMIRStructType()) { + id++; + continue; + } + auto *newAssign = assignNode.CloneTree(func.GetMirFunc()->GetCodeMemPoolAllocator()); + newAssign->SetFieldID(assignNode.GetFieldID() + id); + auto *newRHS = static_cast(newAssign->GetRHS()); + newRHS->SetFieldID(rhsFieldID + id); + newRHS->SetPrimType(fieldType->GetPrimType()); + curBB->GetStmtNodes().insertAfter(&assignNode, newAssign); + newAssign->SetExpandFromArrayOfCharFunc(assignNode.IsExpandedFromArrayOfCharFunc()); + if (fieldType->IsMIRUnionType()) { + id += static_cast(fieldType->NumberOfFieldIDs()); + } + id++; + } + (void)removed.emplace(std::make_pair(&assignNode, curBB)); +} + +void SRA::SplitDassignAggCopy(DassignNode &dassign) { + auto *rhs = dassign.GetRHS(); + auto stIdx = dassign.GetStIdx(); + auto *symbol = stIdx.IsGlobal() ? GlobalTables::GetGlobalTables().GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) : + func.GetMirFunc()->GetSymbolTabItem(stIdx.Idx()); + CHECK_NULL_FATAL(symbol); + if (dassign.GetFieldID() != 0) { + auto *fieldType = static_cast(symbol->GetType())->GetFieldType(dassign.GetFieldID()); + if (fieldType->IsMIRUnionType()) { + return; + } + } else if (symbol->GetType()->IsMIRUnionType()) { + return; + } + auto *lhsType = static_cast(symbol->GetType())->GetFieldType(dassign.GetFieldID()); + if (!lhsType->IsMIRStructType()) { + return; + } + auto *lhsAggType = static_cast(lhsType); + + if (rhs->GetOpCode() == OP_dread) { + return SplitAggCopy(dassign, *lhsAggType); + } else if (rhs->GetOpCode() == OP_iread) { + return SplitAggCopy(dassign, *lhsAggType); + } +} + +static MIRStructType *GetIassignedStructType(const IassignNode &iassign) { + auto ptrTyIdx = iassign.GetTyIdx(); + auto *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyIdx); + CHECK_FATAL(ptrType->IsMIRPtrType(), "must be pointer type"); + auto aggTyIdx = static_cast(ptrType)->GetPointedTyIdxWithFieldID(iassign.GetFieldID()); + auto *lhsAggType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(aggTyIdx); + if (!lhsAggType->IsStructType()) { + return nullptr; + } + if (lhsAggType->GetKind() == kTypeUnion) { + return nullptr; + } + return static_cast(lhsAggType); +} + +void SRA::SplitIassignAggCopy(IassignNode &iassign) { + auto rhs = iassign.GetRHS(); + auto *lhsAggType = GetIassignedStructType(iassign); + if (!lhsAggType) { + return; + } + + if (rhs->GetOpCode() == OP_dread) { + return SplitAggCopy(iassign, *lhsAggType); + } else if (rhs->GetOpCode() == OP_iread) { + return SplitAggCopy(iassign, *lhsAggType); + } +} + +void SRA::DoWholeSplit(AggGroup &group) { + auto *type = static_cast(group.symbol->GetType()); + for (auto &use : group.uses) { + if (!type->GetFieldType(use->fid)->IsMIRStructType()) { + continue; + } + curBB = use->bb; + auto *stmt = use->parent; + if (removed.find(std::make_pair(stmt, curBB)) != removed.end()) { + continue; + } + if (stmt->GetOpCode() == OP_dassign) { + SplitDassignAggCopy(static_cast(*stmt)); + } else if (stmt->GetOpCode() == OP_iassign) { + SplitIassignAggCopy(static_cast(*stmt)); + } + } +} + +void SRA::DoPartialSplit(AggGroup &group) { + DEBUG_SRA() << "\nDo not split BIG aggregates"; + auto *symbol = group.symbol; + auto *type = static_cast(symbol->GetType()); + std::vector newLocal(type->NumberOfFieldIDs() + 1, StIdx(0)); + for (auto id : group.replaceFields) { + auto name = symbol->GetName() + "@" + std::to_string(id) + "@SRA"; + auto *fieldType = type->GetFieldType(id); + auto *fieldSym = + builder.CreateSymbol(fieldType->GetTypeIndex(), name, kStVar, kScAuto, func.GetMirFunc(), kScopeLocal); + DEBUG_SRA() << "Create a local symbol for %" << symbol->GetName() << + " field " << std::to_string(id) << ": " << fieldSym->GetName() << std::endl; + newLocal[static_cast(id)] = fieldSym->GetStIdx(); + } + for (auto &use : group.uses) { + if (!type->GetFieldType(use->fid)->IsMIRStructType()) { + if (use->access->GetOpCode() == OP_dassign) { + static_cast(use->access)->SetStIdx(newLocal[static_cast(use->fid)]); + static_cast(use->access)->SetFieldID(0); + } else if (use->access->GetOpCode() == OP_dread) { + static_cast(use->access)->SetStIdx(newLocal[static_cast(use->fid)]); + static_cast(use->access)->SetFieldID(0); + } else { + CHECK_FATAL_FALSE("SRA: Check access op!"); + } + continue; + } + + curBB = use->bb; + auto offset1 = type->GetBitOffsetFromBaseAddr(use->fid); + auto fieldType1 = type->GetFieldType(use->fid); + auto size1 = fieldType1->IsMIRBitFieldType() ? static_cast(fieldType1)->GetFieldSize() : + fieldType1->GetSize() * CHAR_BIT; + for (auto id : group.replaceFields) { + if (use->fid >= id) { + continue; + } + auto offset2 = type->GetBitOffsetFromBaseAddr(id); + auto fieldType2 = type->GetFieldType(id); + auto size2 = fieldType2->IsMIRBitFieldType() ? static_cast(fieldType2)->GetFieldSize() : + fieldType2->GetSize() * CHAR_BIT; + if (std::max(offset1, offset2) >= + std::min(offset1 + static_cast(size1), offset2 + static_cast(size2))) { + continue; + } + + if (use->access->GetOpCode() == OP_dassign) { + auto *rhs = static_cast(use->access)->GetRHS(); + auto *newRhs = rhs->CloneTree(func.GetMirFunc()->GetCodeMemPoolAllocator()); + if (rhs->GetOpCode() == OP_dread) { + static_cast(newRhs)->SetFieldID(static_cast(newRhs)->GetFieldID() + id - use->fid); + } else if (rhs->GetOpCode() == OP_iread) { + static_cast(newRhs)->SetFieldID(static_cast(newRhs)->GetFieldID() + id - use->fid); + } else { + CHECK_FATAL_FALSE("SRA: Check access rhs op!"); + } + newRhs->SetPrimType(fieldType2->GetPrimType()); + auto *newAssign = builder.CreateStmtDassign(newLocal[static_cast(id)], 0, newRhs); + curBB->GetStmtNodes().insertAfter(static_cast(use->access), newAssign); + continue; + } + + if (use->access->GetOpCode() == OP_dread) { + auto *localSym = func.GetMirFunc()->GetSymbolTabItem(newLocal[static_cast(id)].Idx()); + auto *localRead = builder.CreateDread(*localSym, fieldType2->GetPrimType()); + auto *newAssign = builder.CreateStmtDassign(symbol->GetStIdx(), id, localRead); + curBB->InsertStmtBefore(use->parent, newAssign); + continue; + } + CHECK_FATAL_FALSE("SRA: Check access op!"); + } + } +} + +void SRA::DoReplace() { + for (auto &it : groups) { + auto *group = it.second.get(); + if (group->wholeSplit) { + DoWholeSplit(*group); + continue; + } + DoPartialSplit(*group); + } + for (auto &pair : removed) { + pair.second->RemoveStmtNode(pair.first); + } +} + +void SRA::DetermineSplitRange() { + for (auto &it : groups) { + auto *type = static_cast(it.first->GetType()); + if (type->GetSize() <= kSRASizeLimit) { + continue; + } + FieldID id = 1; + while (id <= static_cast(type->NumberOfFieldIDs())) { + auto *fieldType = type->GetFieldType(id); + if (!fieldType->IsMIRUnionType()) { + id++; + continue; + } + for (auto &use : it.second->uses) { + if (use->fid >= id && use->fid <= id + static_cast(fieldType->NumberOfFieldIDs())) { + use->mayAliased = true; + } + } + id = id + static_cast(fieldType->NumberOfFieldIDs()) + 1; + } + for (auto &use : it.second->uses) { + if (!use->mayAliased && IsPrimitiveScalar(type->GetFieldType(use->fid)->GetPrimType())) { + (void)it.second->replaceFields.emplace(use->fid); + } + } + // if split part is less than 3 / 4, do not apply whole split + if (type->GetFieldsSize() * 3 / 4 >= it.second->replaceFields.size()) { + it.second->wholeSplit = false; + } + } +} + +void SRA::AddUse(StIdx idx, FieldID id, BaseNode &access, StmtNode *parent) { + auto *symbol = GetLocalSym(idx); + auto found = groups.find(symbol); + if (found != groups.end()) { + auto use = std::make_unique(); + use->bb = curBB; + use->parent = parent; + use->access = &access; + use->fid = id; + (void)found->second->uses.emplace_back(std::move(use)); + } +} + +BaseNode *SRA::ScanNodes(BaseNode &node, StmtNode *parent) { + switch (node.GetOpCode()) { + case OP_addrof: { + auto &addrof = static_cast(node); + auto *symbol = GetLocalSym(addrof.GetStIdx()); + auto found = groups.find(symbol); + if (found != groups.end()) { + found->second->isAddressToken = true; + } + break; + } + case OP_dread: { + auto &dread = static_cast(node); + AddUse(dread.GetStIdx(), dread.GetFieldID(), node, parent); + break; + } + case OP_dreadoff: { + auto &dreadoff = static_cast(node); + auto *symbol = GetLocalSym(dreadoff.stIdx); + auto found = groups.find(symbol); + if (found != groups.end()) { + found->second->isOffsetRead = true; + } + break; + } + case OP_dassign: { + auto &dassign = static_cast(node); + AddUse(dassign.GetStIdx(), dassign.GetFieldID(), node, parent); + break; + } + case OP_dassignoff: { + auto &dassignoff = static_cast(node); + auto *symbol = GetLocalSym(dassignoff.stIdx); + auto found = groups.find(symbol); + if (found != groups.end()) { + found->second->isOffsetRead = true; + } + break; + } + case OP_iread: { + auto &iread = static_cast(node); + auto *base = iread.Opnd(0); + if (base->GetOpCode() != OP_addrof) { + break; + } + auto *addrof = static_cast(base); + auto *symbol = GetLocalSym(addrof->GetStIdx()); + auto found = groups.find(symbol); + if (found == groups.end()) { + break; + } + auto *ptrtype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + auto symbolTyIdx = addrof->GetFieldID() == 0 ? symbol->GetTyIdx() : + static_cast(symbol->GetType())->GetFieldTyIdx(addrof->GetFieldID()); + if (symbolTyIdx != ptrtype->GetPointedTyIdx()) { + break; + } + // use dread to replace this iread + auto *dread = builder.CreateDread(*symbol, iread.GetPrimType()); + dread->SetFieldID(addrof->GetFieldID() + iread.GetFieldID()); + AddUse(dread->GetStIdx(), dread->GetFieldID(), *dread, parent); + return dread; + } + case OP_iassign: { + auto &iassign = static_cast(node); + auto *base = iassign.Opnd(0); + if (base->GetOpCode() != OP_addrof) { + break; + } + auto *addrof = static_cast(base); + auto *symbol = GetLocalSym(addrof->GetStIdx()); + auto found = groups.find(symbol); + if (found == groups.end()) { + break; + } + auto *ptrtype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx())); + auto symbolTyIdx = addrof->GetFieldID() == 0 ? symbol->GetTyIdx() : + static_cast(symbol->GetType())->GetFieldTyIdx(addrof->GetFieldID()); + if (symbolTyIdx != ptrtype->GetPointedTyIdx()) { + break; + } + // use dassign to replace this iassign + auto *dassign = + builder.CreateStmtDassign(*symbol, addrof->GetFieldID() + iassign.GetFieldID(), iassign.GetRHS()); + curBB->InsertStmtBefore(&iassign, dassign); + curBB->RemoveStmtNode(&iassign); + AddUse(dassign->GetStIdx(), dassign->GetFieldID(), *dassign, dassign); + auto *replace = ScanNodes(*dassign->GetRHS(), dassign); + if (replace) { + dassign->SetRHS(replace); + } + return nullptr; + } + default: { + if (!kOpcodeInfo.IsCall(node.GetOpCode())) { + break; + } + auto &call = static_cast(node); + for (size_t i = 0; i < call.GetCallReturnVector()->size(); ++i) { + auto retPair = call.GetReturnPair(i); + if (retPair.second.IsReg()) { + continue; + } + auto *symbol = GetLocalSym(retPair.first); + auto found = groups.find(symbol); + if (found != groups.end()) { + found->second->isRet = true; + } + } + break; + } + } + for (size_t i = 0; i < node.NumOpnds(); i++) { + auto *replace = ScanNodes(*node.Opnd(i), parent); + if (replace) { + node.SetOpnd(replace, i); + } + } + return nullptr; +} + +void SRA::ScanFunc() { + for (BB *bb : func.GetCfg()->GetAllBBs()) { + if (bb == nullptr) { + continue; + } + curBB = bb; + StmtNode *stmt = to_ptr(bb->GetStmtNodes().begin()); + while (stmt) { + auto *next = stmt->GetNext(); + (void)ScanNodes(*stmt, stmt); + stmt = next; + } + } +} + +void SRA::RemoveUnsplittable() { + for (auto it = groups.begin(); it != groups.end();) { + auto *symbol = it->first; + auto *group = it->second.get(); + + if (group->isAddressToken) { + DEBUG_SRA() << "Symbol %" << symbol->GetName() << " rejected by SRA, because it is [addressed].\n"; + it = groups.erase(it); + continue; + } + + if (group->isRet) { + DEBUG_SRA() << "Symbol %" << symbol->GetName() << " rejected by SRA, because it is a [return value].\n"; + it = groups.erase(it); + continue; + } + + if (group->isOffsetRead) { + DEBUG_SRA() << "Symbol %" << symbol->GetName() << " rejected by SRA, because some parts are [read by offset].\n"; + it = groups.erase(it); + continue; + } + + bool notCopyUse = false; + size_t useCount = 0; + for (auto &use : group->uses) { + auto *fieldTy = static_cast(symbol->GetType())->GetFieldType(use->fid); + if (IsPrimitiveScalar(fieldTy->GetPrimType())) { + useCount++; + continue; + } + if (use->parent->GetOpCode() != OP_dassign && use->parent->GetOpCode() != OP_iassign) { + notCopyUse = true; + break; + } + } + if (notCopyUse) { + DEBUG_SRA() << "Symbol %" << it->first->GetName() << " rejected by SRA, because it is [not copy use].\n"; + it = groups.erase(it); + continue; + } + if (useCount == 0) { + DEBUG_SRA() << "Symbol %" << it->first->GetName() << " rejected by SRA, because "; + DEBUG_SRA() << "[no scalar part] need to be replaced.\n"; + it = groups.erase(it); + continue; + } + ++it; + } +} + +void SRA::CollectCandidates() { + for (size_t i = 0; i < func.GetMirFunc()->GetSymbolTabSize(); i++) { + auto *symbol = func.GetMirFunc()->GetSymbolTabItem(static_cast(i)); + if (!symbol || symbol->IsFormal() || symbol->IsVolatile() || symbol->IsPUStatic()) { + continue; + } + auto *type = symbol->GetType(); + if (type->GetKind() != kTypeStruct) { + continue; + } + auto group = std::make_unique(); + group->symbol = symbol; + (void)groups.emplace(symbol, std::move(group)); + } +} + +void SRA::Run() { + if (func.IsEmpty()) { + return; + } + CollectCandidates(); + if (groups.empty()) { + // no candidate found, exit + return; + } + + ScanFunc(); + RemoveUnsplittable(); + if (groups.empty()) { + // no candidate found, exit + return; + } + + DetermineSplitRange(); + DoReplace(); +} + +void MESRA::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.SetPreservedAll(); +} + +bool MESRA::PhaseRun(maple::MeFunction &f) { + kDebug = DEBUGFUNC_NEWPM(f); + SRA sra(f); + sra.Run(); + return false; +} +} // namespace maple diff --git a/src/mapleall/maple_me/src/me_ssa.cpp b/src/mapleall/maple_me/src/me_ssa.cpp index 515eb0654ca5a80a0baac33e91bf1844c878aaed..37a0285672260f919c9194e6d86fb86d391170f9 100644 --- a/src/mapleall/maple_me/src/me_ssa.cpp +++ b/src/mapleall/maple_me/src/me_ssa.cpp @@ -103,8 +103,8 @@ void MeSSA::InsertIdentifyAssignments(IdentifyLoops *identloops) { if (aloop->exitBB == nullptr) { continue; } - auto it = preMeFunc->label2WhileInfo.find(headbb->GetBBLabel()); - if (it == preMeFunc->label2WhileInfo.end()) { + const auto it = std::as_const(preMeFunc->label2WhileInfo).find(headbb->GetBBLabel()); + if (it == preMeFunc->label2WhileInfo.cend()) { continue; } if (headbb->GetPred().size() != 2) { @@ -180,7 +180,7 @@ bool MESSA::PhaseRun(maple::MeFunction &f) { CHECK_FATAL(identloops != nullptr, "identloops has problem"); ssa->InsertIdentifyAssignments(identloops); } - ssa->RenameAllBBs(cfg); + ssa->RenameAllBBs(*cfg); ssa->VerifySSA(); if (DEBUGFUNC_NEWPM(f)) { ssaTab->GetVersionStTable().Dump(&ssaTab->GetModule()); diff --git a/src/mapleall/maple_me/src/me_ssa_epre.cpp b/src/mapleall/maple_me/src/me_ssa_epre.cpp index dc285e63b1f25a2d7fba5064ccb0e7e7a4ae3044..ad9dcd2aace445f4d5de0e166e3e234e91b9d6af 100644 --- a/src/mapleall/maple_me/src/me_ssa_epre.cpp +++ b/src/mapleall/maple_me/src/me_ssa_epre.cpp @@ -22,7 +22,7 @@ #include "me_stack_protect.h" namespace { -const std::set propWhiteList { +const std::set kPropWhiteList { #define PROPILOAD(funcName) #funcName, #include "propiloadlist.def" #undef PROPILOAD @@ -98,7 +98,7 @@ bool MESSAEPre::PhaseRun(maple::MeFunction &f) { (eprePULimitSpecified && puCount != MeOption::eprePULimit) ? UINT32_MAX : MeOption::epreLimit; MemPool *ssaPreMemPool = ApplyTempMemPool(); bool epreIncludeRef = MeOption::epreIncludeRef; - if (!MeOption::gcOnly && propWhiteList.find(f.GetName()) != propWhiteList.end()) { + if (!MeOption::gcOnly && kPropWhiteList.find(f.GetName()) != kPropWhiteList.end()) { epreIncludeRef = false; } MeSSAEPre ssaPre(f, *irMap, *dom, *pdom, kh, *ssaPreMemPool, *ApplyTempMemPool(), epreLimitUsed, epreIncludeRef, @@ -123,7 +123,7 @@ bool MESSAEPre::PhaseRun(maple::MeFunction &f) { ssaPre.doLFTR = true; } } - if (f.GetHints() & kPlacementRCed) { + if ((f.GetHints() & kPlacementRCed) != 0) { ssaPre.SetPlacementRC(true); } if (eprePULimitSpecified && puCount == MeOption::eprePULimit && epreLimitUsed != UINT32_MAX) { @@ -142,7 +142,7 @@ bool MESSAEPre::PhaseRun(maple::MeFunction &f) { MeSSAUpdate ssaUpdate(f, *f.GetMeSSATab(), *dom, ssaPre.GetCandsForSSAUpdate()); ssaUpdate.Run(); } - if ((f.GetHints() & kPlacementRCed) && ssaPre.GetAddedNewLocalRefVars()) { + if ((f.GetHints() & kPlacementRCed) != 0 && ssaPre.GetAddedNewLocalRefVars()) { PlacementRC placeRC(f, *dom, *pdom, *ssaPreMemPool, DEBUGFUNC_NEWPM(f)); placeRC.preKind = MeSSUPre::kSecondDecrefPre; placeRC.ApplySSUPre(); @@ -154,10 +154,7 @@ bool MESSAEPre::PhaseRun(maple::MeFunction &f) { LogInfo::MapleLogger() << " == " << PhaseName() << " invokes [ " << hdse.PhaseName() << " ] ==\n"; } hdse.hdseKeepRef = MeOption::dseKeepRef; - hdse.DoHDSE(); - if (hdse.NeedUNClean()) { - f.GetCfg()->UnreachCodeAnalysis(true); - } + hdse.DoHDSESafely(&f, *GetAnalysisInfoHook()); } ++puCount; return true; diff --git a/src/mapleall/maple_me/src/me_ssa_lpre.cpp b/src/mapleall/maple_me/src/me_ssa_lpre.cpp index e89c0cf7b14be532b06b38ed290327cceb6e49dc..c1e97a2965c86352300a172e759ff60cd6417d1e 100644 --- a/src/mapleall/maple_me/src/me_ssa_lpre.cpp +++ b/src/mapleall/maple_me/src/me_ssa_lpre.cpp @@ -121,7 +121,7 @@ void MeSSALPre::GenerateSaveRealOcc(MeRealOcc &realOcc) { realOcc.SetSavedExpr(*regOrVar); } -MeExpr *MeSSALPre::GetTruncExpr(const VarMeExpr &theLHS, MeExpr &savedRHS) { +MeExpr *MeSSALPre::GetTruncExpr(const VarMeExpr &theLHS, MeExpr &savedRHS) const { MIRType *lhsType = theLHS.GetType(); if (theLHS.GetType()->GetKind() != kTypeBitField) { if (GetPrimTypeSize(theLHS.GetPrimType()) < GetPrimTypeSize(savedRHS.GetPrimType())) { @@ -256,10 +256,7 @@ void MeSSALPre::BuildWorkListLHSOcc(MeStmt &meStmt, int32 seqStmt) { (void)assignedFormals.insert(ost->GetIndex()); } CHECK_NULL_FATAL(meStmt.GetRHS()); - if (ost->IsVolatile()) { - return; - } - if (lhs->GetPrimType() == PTY_agg) { + if (ost->IsVolatile() || lhs->GetPrimType() == PTY_agg) { return; } CreateRealOcc(meStmt, seqStmt, *lhs, false, true); @@ -346,7 +343,7 @@ void MeSSALPre::BuildWorkListExpr(MeStmt &meStmt, int32 seqStmt, MeExpr &meExpr, if (sym->GetAsmAttr() != 0) { break; } - if (sym->IsInstrumented() && !(func->GetHints() & kPlacementRCed)) { + if (sym->IsInstrumented() && (func->GetHints() & kPlacementRCed) == 0) { // not doing because its SSA form is not complete break; } diff --git a/src/mapleall/maple_me/src/me_ssa_update.cpp b/src/mapleall/maple_me/src/me_ssa_update.cpp index 4f99bb513fe06c048f7c3d6db345053b6dd89fe3..fec78b272d57d01c61fca5f945415e0e480de964 100644 --- a/src/mapleall/maple_me/src/me_ssa_update.cpp +++ b/src/mapleall/maple_me/src/me_ssa_update.cpp @@ -26,8 +26,8 @@ std::stack *VectorVersionStacks::GetRenameStack(OStIdx idx) { } std::stack *MapVersionStacks::GetRenameStack(OStIdx idx) { - auto it = renameWithMapStacks.find(idx); - if (it == renameWithMapStacks.end()) { + auto it = std::as_const(renameWithMapStacks).find(idx); + if (it == renameWithMapStacks.cend()) { return nullptr; } return it->second.get(); @@ -68,13 +68,11 @@ void MapVersionStacks::InitRenameStack(OStIdx idx) { void VectorVersionStacks::RecordCurrentStackSize(std::vector> &origStackSize) { origStackSize.resize(renameWithVectorStacks.size()); - uint32 stackId = 0; for (size_t i = 0; i < renameWithVectorStacks.size(); ++i) { if (renameWithVectorStacks.at(i) == nullptr) { continue; } origStackSize[i] = std::make_pair(renameWithVectorStacks.at(i)->size(), OStIdx(i)); - ++stackId; } } @@ -151,7 +149,7 @@ void MeSSAUpdate::InsertPhis() { } } -void MeSSAUpdate::RenamePhi(const BB &bb) { +void MeSSAUpdate::RenamePhi(const BB &bb) const { if (bb.GetMePhiList().empty()) { return; } @@ -304,7 +302,7 @@ void MeSSAUpdate::RenameStmts(BB &bb) { } } -void MeSSAUpdate::RenamePhiOpndsInSucc(const BB &bb) { +void MeSSAUpdate::RenamePhiOpndsInSucc(const BB &bb) const { for (BB *succ : bb.GetSucc()) { if (succ->GetMePhiList().empty()) { continue; @@ -349,7 +347,7 @@ void MeSSAUpdate::InsertOstToSSACands(OStIdx ostIdx, const BB &defBB, if (ssaCands == nullptr) { return; } - auto it = ssaCands->find(ostIdx); + const auto it = std::as_const(ssaCands)->find(ostIdx); if (it == ssaCands->end()) { std::unique_ptr> bbSet = std::make_unique>(std::less()); bbSet->insert(defBB.GetBBId()); diff --git a/src/mapleall/maple_me/src/me_ssi.cpp b/src/mapleall/maple_me/src/me_ssi.cpp index e32c8f896f0c1999177a753471fe9f4624a80de4..9b88d1faa6aff4a4ca39f3ec9dd61a9890f28a06 100644 --- a/src/mapleall/maple_me/src/me_ssi.cpp +++ b/src/mapleall/maple_me/src/me_ssi.cpp @@ -19,7 +19,7 @@ bool MeSSI::isDebug = false; constexpr int kNumOpnds = 2; constexpr int kPiStmtUpperBound = 2; -VarMeExpr *MeSSI::CreateNewPiExpr(const MeExpr &opnd) { +VarMeExpr *MeSSI::CreateNewPiExpr(const MeExpr &opnd)const { if (opnd.GetMeOp() == kMeOpConst) { return nullptr; } @@ -92,7 +92,7 @@ void MeSSI::AddPiForABCOpt(BB &bb) { } } -NaryMeExpr *MeSSI::GetInstanceOfType(MeExpr &e) { +NaryMeExpr *MeSSI::GetInstanceOfType(MeExpr &e) const { CHECK_FATAL(e.GetMeOp() == kMeOpVar, "must b"); VarMeExpr *var = static_cast(&e); if (var->GetPrimType() != PTY_u1 || var->GetDefBy() != kDefByStmt) { @@ -116,7 +116,7 @@ NaryMeExpr *MeSSI::GetInstanceOfType(MeExpr &e) { return callNode; } -uint8_t MeSSI::AnalysisBranch(MeStmt &meStmt) { +uint8_t MeSSI::AnalysisBranch(MeStmt &meStmt) const { CHECK_FATAL(meStmt.IsCondBr(), "must be"); auto *brMeStmt = static_cast(&meStmt); MeExpr *meCmp = brMeStmt->GetOpnd(); @@ -214,17 +214,17 @@ void MeSSI::InsertPiNodes() { } } -bool MeSSI::ExistedPhiNode(BB &bb, const VarMeExpr &rhs) { +bool MeSSI::ExistedPhiNode(BB &bb, const VarMeExpr &rhs) const { return bb.GetMePhiList().find(rhs.GetOstIdx()) != bb.GetMePhiList().end(); } -bool MeSSI::ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs) { +bool MeSSI::ExistedPiNode(BB &bb, BB &parentBB, const VarMeExpr &rhs) const { MapleMap> &piList = bb.GetPiList(); - auto it = piList.find(&parentBB); - if (it == piList.end()) { + auto it = std::as_const(piList).find(&parentBB); + if (it == piList.cend()) { return false; } - std::vector &piStmts = it->second; + const std::vector &piStmts = it->second; CHECK_FATAL(!piStmts.empty(), "should not be empty"); CHECK_FATAL(piStmts.size() <= kPiStmtUpperBound, "must be"); PiassignMeStmt *pi1 = piStmts.at(0); @@ -382,8 +382,8 @@ void MeSSI::ReplacePiPhiInSuccs(BB &bb, VarMeExpr &newVar) { } CHECK_FATAL(index < succBB->GetPred().size(), "must be"); MapleMap &phiList = succBB->GetMePhiList(); - auto it2 = phiList.find(newVar.GetOstIdx()); - if (it2 != phiList.end()) { + auto it2 = std::as_const(phiList).find(newVar.GetOstIdx()); + if (it2 != phiList.cend()) { MePhiNode *phi = it2->second; ScalarMeExpr *oldVar = phi->GetOpnd(index); phi->SetOpnd(index, &newVar); @@ -624,7 +624,7 @@ void MeSSI::RemoveExtraNodes() { for (DefPoint *defP : newDefPoints) { defP->RemoveFromBB(); } - for (auto pair : modifiedStmt) { + for (auto &pair : std::as_const(modifiedStmt)) { MeStmt *meStmt = pair.first.first; MeExpr *newVar = nullptr; if ((meStmt->GetOp() == OP_iassign) && (pair.first.second == 0)) { @@ -636,7 +636,7 @@ void MeSSI::RemoveExtraNodes() { bool replaced = ReplaceStmtWithNewVar(*meStmt, *newVar, *oldVar, false); CHECK_FATAL(replaced, "must be"); } - for (auto pair : modifiedPhi) { + for (auto &pair : std::as_const(modifiedPhi)) { MePhiNode *phi = pair.first; for (size_t i = 0; i < pair.second.size(); ++i) { size_t index = i; diff --git a/src/mapleall/maple_me/src/me_stack_protect.cpp b/src/mapleall/maple_me/src/me_stack_protect.cpp index 69848b3230485eb06e550709c9885d466d972a45..ea3296ec883fdab9cd56b80809f7ff4387440e0e 100644 --- a/src/mapleall/maple_me/src/me_stack_protect.cpp +++ b/src/mapleall/maple_me/src/me_stack_protect.cpp @@ -320,7 +320,7 @@ void MeStackProtect::CheckAddrofStack() { } } -bool MeStackProtect::MayWriteStack() { +bool MeStackProtect::MayWriteStack() const { auto *cfg = f->GetCfg(); for (BB *bb: cfg->GetAllBBs()) { if (bb == nullptr || bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB()) { diff --git a/src/mapleall/maple_me/src/me_stmt_fre.cpp b/src/mapleall/maple_me/src/me_stmt_fre.cpp index 576e3bd2a6c09d24e465e44df4a2f7a971b9b9e4..33c81c549cc0eb577cd29348dafc2cdfd5f31fe3 100644 --- a/src/mapleall/maple_me/src/me_stmt_fre.cpp +++ b/src/mapleall/maple_me/src/me_stmt_fre.cpp @@ -16,7 +16,7 @@ #include "me_stmt_pre.h" namespace maple { -void MeStmtPre::ResetFullyAvail(MePhiOcc &occ) { +void MeStmtPre::ResetFullyAvail(MePhiOcc &occ) const { occ.SetIsCanBeAvail(false); // reset those phiocc nodes that have oc as one of its operands for (auto it = phiOccs.begin(); it != phiOccs.end(); ++it) { diff --git a/src/mapleall/maple_me/src/me_stmt_pre.cpp b/src/mapleall/maple_me/src/me_stmt_pre.cpp index 12a704cd5d717d84594dbec5b6aaad85398257d6..da2898cf3a0b81ba88f8dc1af71c11c9a638d08c 100644 --- a/src/mapleall/maple_me/src/me_stmt_pre.cpp +++ b/src/mapleall/maple_me/src/me_stmt_pre.cpp @@ -178,7 +178,7 @@ void MeStmtPre::Finalize1() { auto *phiOpnd = static_cast(occ); MePhiOcc *phiOcc = phiOpnd->GetDefPhiOcc(); if (phiOcc->IsWillBeAvail()) { - if (OKToInsert(phiOpnd)) { + if (OKToInsert(*phiOpnd)) { // insert the current expression at the end of the block containing phiOpnd if (phiOpnd->GetBB()->GetSucc().size() > 1) { CHECK_FATAL(!workCand->Redo2HandleCritEdges(), "Finalize1: insertion at critical edge; aborting"); @@ -593,8 +593,8 @@ void MeStmtPre::CreateSortedOccs() { stmtWkCand->GetTheMeStmt()->GetVarLHS() != nullptr && !stmtWkCand->LHSIsFinal()) { VarMeExpr *lhsVar = static_cast(stmtWkCand->GetTheMeStmt()->GetVarLHS()); OStIdx ostIdx = lhsVar->GetOstIdx(); - auto uMapIt = useOccurMap.find(ostIdx); - CHECK_FATAL(uMapIt != useOccurMap.end(), "MeStmtPre::CreateSortedOccs: missing entry in useOccurMap"); + auto uMapIt = std::as_const(useOccurMap).find(ostIdx); + CHECK_FATAL(uMapIt != useOccurMap.cend(), "MeStmtPre::CreateSortedOccs: missing entry in useOccurMap"); useDfns = uMapIt->second; } else { // create empty MapleSet to be pointed to by use_dfns @@ -768,8 +768,8 @@ void MeStmtPre::CreateSortedOccs() { void MeStmtPre::ConstructUseOccurMapExpr(uint32 bbDfn, const MeExpr &meExpr) { if (meExpr.GetMeOp() == kMeOpVar) { OStIdx ostIdx = static_cast(&meExpr)->GetOstIdx(); - auto mapIt = useOccurMap.find(ostIdx); - if (mapIt == useOccurMap.end()) { + auto mapIt = std::as_const(useOccurMap).find(ostIdx); + if (mapIt == useOccurMap.cend()) { return; } MapleSet *bbDfnSet = mapIt->second; diff --git a/src/mapleall/maple_me/src/me_store_pre.cpp b/src/mapleall/maple_me/src/me_store_pre.cpp index b7f0b241a32d7caee88e1634911cb13a798f398c..82ba5675c8051a8ef775c6f28044675d5d54dfcd 100644 --- a/src/mapleall/maple_me/src/me_store_pre.cpp +++ b/src/mapleall/maple_me/src/me_store_pre.cpp @@ -101,8 +101,8 @@ RegMeExpr *MeStorePre::EnsureRHSInCurTemp(BB &bb) { } } // check if there is def by phi - auto phiIt = bb.GetMePhiList().find(workCand->GetOst()->GetIndex()); - if (phiIt != bb.GetMePhiList().end()) { + const auto phiIt = std::as_const(bb.GetMePhiList()).find(workCand->GetOst()->GetIndex()); + if (phiIt != bb.GetMePhiList().cend()) { if (enabledDebug) { LogInfo::MapleLogger() << "EnsureRHSInCurTemp: found def-by-phi at BB" << bb.GetBBId() << '\n'; } @@ -209,8 +209,8 @@ void MeStorePre::CreateRealOcc(const OStIdx &ostIdx, MeStmt &meStmt) { } SpreWorkCand *wkCand = nullptr; - auto mapIt = workCandMap.find(ostIdx); - if (mapIt != workCandMap.end()) { + const auto mapIt = std::as_const(workCandMap).find(ostIdx); + if (mapIt != workCandMap.cend()) { wkCand = mapIt->second; } else { OriginalSt *ost = ssaTab->GetSymbolOriginalStFromID(ostIdx); @@ -243,8 +243,8 @@ void MeStorePre::CreateRealOcc(const OStIdx &ostIdx, MeStmt &meStmt) { // create a new use occurrence for symbol oidx in given bb void MeStorePre::CreateUseOcc(const OStIdx &ostIdx, BB &bb) { SpreWorkCand *wkCand = nullptr; - auto mapIt = workCandMap.find(ostIdx); - if (mapIt == workCandMap.end()) { + const auto mapIt = std::as_const(workCandMap).find(ostIdx); + if (mapIt == workCandMap.cend()) { OriginalSt *ost = ssaTab->GetSymbolOriginalStFromID(ostIdx); CHECK_FATAL(ost, "ost is nullptr!"); wkCand = spreMp->New(spreAllocator, *ost); diff --git a/src/mapleall/maple_me/src/me_subsum_rc.cpp b/src/mapleall/maple_me/src/me_subsum_rc.cpp index 1debe908f1e11c8e52e8c13672ffc4d7bf52e9eb..a6a3f46e6634ae6fd01e76fda44e1b44cce13547 100644 --- a/src/mapleall/maple_me/src/me_subsum_rc.cpp +++ b/src/mapleall/maple_me/src/me_subsum_rc.cpp @@ -21,7 +21,7 @@ void SubsumRC::SetCantSubsum() { if (bb == nullptr) { continue; } - for (auto it : bb->GetMePhiList()) { + for (auto &it : std::as_const(bb->GetMePhiList())) { const OriginalSt *ost = ssaTab->GetOriginalStFromID(it.first); CHECK_FATAL(ost, "ost is nullptr!"); if (!ost->IsSymbolOst() || ost->GetIndirectLev() != 0) { @@ -163,8 +163,8 @@ void SubsumRC::ReplaceExpr(BB &bb, const MeExpr &var, MeExpr ®) { // create realoccurence decRef of varX, meStmt is the work candidate. void SubsumRC::CreateRealOcc(VarMeExpr &varX, DassignMeStmt &meStmt, MeStmt &decRef) { SpreWorkCand *wkCand = nullptr; - auto mapIt = candMap.find(&meStmt); - if (mapIt != candMap.end()) { + const auto mapIt = std::as_const(candMap).find(&meStmt); + if (mapIt != candMap.cend()) { wkCand = mapIt->second; } else { OriginalSt *ost = ssaTab->GetSymbolOriginalStFromID(varX.GetOstIdx()); @@ -295,7 +295,7 @@ void SubsumRC::RunSSUPre() { LogInfo::MapleLogger() << "------ worklist initial size " << candMap.size() << '\n'; } size_t candNum = 0; - for (std::pair wkCandPair : candMap) { + for (auto &wkCandPair : std::as_const(candMap)) { workCand = wkCandPair.second; MeStmt *stmt = wkCandPair.first; ASSERT(stmt->GetOp() == OP_dassign, "work_cand should be a dassign stmt"); @@ -343,7 +343,7 @@ void MESubsumRC::GetAnalysisDependence(maple::AnalysisDep &aDep) const { } bool MESubsumRC::PhaseRun(maple::MeFunction &f) { - if (!(f.GetHints() & kPlacementRCed)) { + if ((f.GetHints() & kPlacementRCed) == 0) { return false; } if (!MeOption::subsumRC) { diff --git a/src/mapleall/maple_me/src/me_tailcall.cpp b/src/mapleall/maple_me/src/me_tailcall.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19142b59c58532ab10ff07e6d7682254257e60d3 --- /dev/null +++ b/src/mapleall/maple_me/src/me_tailcall.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "me_tailcall.h" + +#include "me_cfg.h" + +namespace maple { + +static constexpr int kUnvisited = 0; +static constexpr int kUnEscaped = 1; +static constexpr int kEscaped = 2; + +TailcallOpt::TailcallOpt(MeFunction &f, MemPool &mempool) + : AnalysisResult(&mempool), + func(f), tailcallAlloc(&mempool), + callCands(tailcallAlloc.Adapter()), + escapedPoints(f.GetCfg()->NumBBs(), kUnvisited, tailcallAlloc.Adapter()) {} + +void TailcallOpt::Walk() { + auto cfg = func.GetCfg(); + auto entryBB = cfg->GetFirstBB(); + std::vector worklist{entryBB}; + + // all of the BBs are marked as unvisited at the begining, while found address token in + // one stmt, marked it's parent BB escaped. + while (!worklist.empty()) { + auto currentBB = worklist.back(); + worklist.pop_back(); + const auto &escaped = escapedPoints[currentBB->GetBBId()]; + if (escaped == kUnvisited) { + WalkTroughBB(*currentBB); + } + + for (auto bb : currentBB->GetSucc()) { + auto &escapedNext = escapedPoints[bb->GetBBId()]; + // all unvisited succ BBs are pushed into worklist. And if current BB is escaped, we + // push the unescaped succs and prop them; + if (escapedNext < escaped) { + if (escaped == kEscaped) { + escapedNext = kEscaped; + } + worklist.push_back(bb); + } + } + } + + for (auto call : callCands) { + // if the parent BB of call has stack address escaped, don't mark it as tailcall + if (escapedPoints[call->GetBB()->GetBBId()] == kEscaped) { + continue; + } + call->SetMayTailcall(); + } +} + +void TailcallOpt::WalkTroughBB(BB &bb) { + for (auto &stmt : bb.GetMeStmts()) { + for (size_t opndId = 0; opndId < stmt.NumMeStmtOpnds(); ++opndId) { + auto opnd = stmt.GetOpnd(opndId); + // stack memory segment would be token by alloca as well + if (opnd->GetOp() == OP_alloca) { + escapedPoints[bb.GetBBId()] = kEscaped; + return; + } + if (opnd->GetOp() == OP_addrof) { + auto addrExpr = static_cast(opnd); + auto symbolStorageClass = addrExpr->GetOst()->GetMIRSymbol()->GetStorageClass(); + if (symbolStorageClass == kScAuto || symbolStorageClass == kScFormal) { + escapedPoints[bb.GetBBId()] = kEscaped; + return; + } + } + } + if (kOpcodeInfo.IsCall(stmt.GetOp()) || kOpcodeInfo.IsCallAssigned(stmt.GetOp()) || + kOpcodeInfo.IsICall(stmt.GetOp())) { + callCands.push_back(&stmt); + } + } + // no stack address token, mark bb as unescaped + escapedPoints[bb.GetBBId()] = kUnEscaped; +} + +void METailcall::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + (void)aDep; +} + +bool METailcall::PhaseRun(MeFunction &f) { + auto opt = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); + opt->Walk(); + return true; +} + +} // namespace maple diff --git a/src/mapleall/maple_me/src/me_toplevel_ssa.cpp b/src/mapleall/maple_me/src/me_toplevel_ssa.cpp index 438997555a5fcf3643a8ca7adcc623cbd0d8dac6..f658c18e979526757f72c7906ccee49bf1322fa8 100644 --- a/src/mapleall/maple_me/src/me_toplevel_ssa.cpp +++ b/src/mapleall/maple_me/src/me_toplevel_ssa.cpp @@ -45,7 +45,7 @@ bool METopLevelSSA::PhaseRun(maple::MeFunction &f) { cfg->DumpToFile("ssalocal-"); } ssa->InsertPhiNode(); - ssa->RenameAllBBs(cfg); + ssa->RenameAllBBs(*cfg); if (DEBUGFUNC_NEWPM(f)) { ssaTab->GetVersionStTable().Dump(&ssaTab->GetModule()); } diff --git a/src/mapleall/maple_me/src/me_value_range_prop.cpp b/src/mapleall/maple_me/src/me_value_range_prop.cpp index 9c0570203df3b4d8bcce37ed9efe68a2e2cb750e..26ad4831f4fa1c07dbea5019b0090affa4e63915 100644 --- a/src/mapleall/maple_me/src/me_value_range_prop.cpp +++ b/src/mapleall/maple_me/src/me_value_range_prop.cpp @@ -297,7 +297,15 @@ bool ValueRangePropagation::DealWithSwitchWhenOpndIsConstant(BB &bb, BB *default auto *caseBB = func.GetCfg()->GetLabelBBAt(pair.second); CHECK_FATAL(caseBB, "caseBB is nullptr!"); // The value ranges of opnd and case are equal. - if (valueRange->GetBound().IsEqual(Bound(nullptr, pair.first, PTY_i64), valueRange->GetBound().GetPrimType())) { + if (valueRange->GetBound().GetPrimType() == PTY_u1) { + // If the opnd of the switch is of type bool, there is no need to convert the primtype of the case, + // and the values are directly compared for equality. + if (valueRange->GetBound().GetConstant() == pair.first) { + findBB = true; + continue; + } + } else if (valueRange->GetBound().IsEqual(Bound(nullptr, pair.first, PTY_i64), + switchMeStmt.GetOpnd()->GetPrimType())) { findBB = true; continue; } @@ -310,6 +318,9 @@ bool ValueRangePropagation::DealWithSwitchWhenOpndIsConstant(BB &bb, BB *default bb.RemoveLastMeStmt(); bb.SetKind(kBBFallthru); isCFGChange = true; + if (bb.GetAttributes() == 0 && bb.GetSucc().size() != 1) { + ASSERT(false, "must only has one succ bb"); + } return true; } @@ -633,7 +644,7 @@ MeExpr *GetCmpExprFromVR(const ValueRange *vr, MeExpr &expr, MeIRMap *irmap) { // a: valueRange(0, constant) || valueRange(constant, 0) // ==> // a: valueRange(1, constant) || valueRange(constant, -1) -std::unique_ptr ValueRangePropagation::ZeroIsInRange(const ValueRange &valueRange) { +std::unique_ptr ValueRangePropagation::ZeroIsInRange(const ValueRange &valueRange) const { if (valueRange.GetRangeType() == kLowerAndUpper && valueRange.GetLower().GetVar() == nullptr && valueRange.GetUpper().GetVar() == nullptr && valueRange.GetLower().GetConstant() < valueRange.GetUpper().GetConstant()) { @@ -708,7 +719,7 @@ bool SafetyCheckWithBoundaryError::HandleAssertltOrAssertle( } bool ValueRangePropagation::CompareConstantOfIndexAndLength( - const MeStmt &meStmt, const ValueRange &valueRangeOfIndex, ValueRange &valueRangeOfLengthPtr, Opcode op) { + const MeStmt &meStmt, const ValueRange &valueRangeOfIndex, ValueRange &valueRangeOfLengthPtr, Opcode op) const { if (safetyCheckBoundary->HandleAssertltOrAssertle(meStmt, op, valueRangeOfIndex.GetUpper().GetConstant(), valueRangeOfLengthPtr.GetBound().GetConstant())) { return true; @@ -1082,14 +1093,14 @@ void ValueRangePropagation::GetValueRangeOfCRNode( } template -bool ValueRangePropagation::IsOverflowAfterMul(T lhs, T rhs, PrimType pty) { +bool ValueRangePropagation::IsOverflowAfterMul(T lhs, T rhs, PrimType pty) const { if (!IsNeededPrimType(pty)) { return true; } if (lhs == 0 || rhs == 0) { return false; } - if (lhs >= 0 && rhs >= 0) { + if (lhs > 0 && rhs > 0) { return (GetMaxNumber(pty) / lhs) < rhs; } if (lhs < 0 && rhs < 0) { @@ -1800,7 +1811,7 @@ void ValueRangePropagation::UpdateTryAttribute(BB &bb) { // Insert the ost of phi opnds to their def bbs. void ValueRangePropagation::InsertOstOfPhi2Cands( - BB &bb, size_t i, ScalarMeExpr *updateSSAExceptTheScalarExpr, + BB &bb, size_t i, const ScalarMeExpr *updateSSAExceptTheScalarExpr, std::map> &ssaupdateCandsForCondExpr, bool setPhiIsDead) { for (auto &it : bb.GetMePhiList()) { if (setPhiIsDead) { @@ -1961,8 +1972,8 @@ bool ValueRangePropagation::AddOrSubWithConstant( } if (IsPrimTypeUint64(primType)) { res = (op == OP_add) ? - (static_cast(lhsConstant) + static_cast(rhsConstant)) : - (static_cast(lhsConstant) - static_cast(rhsConstant)); + static_cast((static_cast(lhsConstant) + static_cast(rhsConstant))) : + static_cast((static_cast(lhsConstant) - static_cast(rhsConstant))); } else { if (op == OP_add) { if ((rhsConstant > 0 && lhsConstant > GetMaxNumber(primType) - rhsConstant) || @@ -1984,7 +1995,7 @@ bool ValueRangePropagation::AddOrSubWithConstant( } // Create new bound when old bound add or sub with a constant. -bool ValueRangePropagation::CreateNewBoundWhenAddOrSub(Opcode op, Bound bound, int64 rhsConstant, Bound &res) { +bool ValueRangePropagation::CreateNewBoundWhenAddOrSub(Opcode op, Bound bound, int64 rhsConstant, Bound &res) const { int64 constant = 0; if (AddOrSubWithConstant(bound.GetPrimType(), op, bound.GetConstant(), rhsConstant, constant)) { res = Bound(bound.GetVar(), constant, bound.GetPrimType()); @@ -2024,7 +2035,7 @@ bool ValueRangePropagation::IsConstant(const BB &bb, MeExpr &expr, int64 &value, // Create new valueRange when old valueRange add or sub with a valuerange. std::unique_ptr ValueRangePropagation::AddOrSubWithValueRange( - Opcode op, ValueRange &valueRangeLeft, ValueRange &valueRangeRight) { + Opcode op, ValueRange &valueRangeLeft, ValueRange &valueRangeRight) const { if (valueRangeLeft.GetRangeType() == kNotEqual || valueRangeRight.GetRangeType() == kNotEqual || valueRangeLeft.GetRangeType() == kOnlyHasLowerBound || valueRangeRight.GetRangeType() == kOnlyHasLowerBound || valueRangeLeft.GetRangeType() == kOnlyHasUpperBound || valueRangeRight.GetRangeType() == kOnlyHasUpperBound) { @@ -2069,7 +2080,7 @@ std::unique_ptr ValueRangePropagation::AddOrSubWithValueRange( valueRangeLeft.IsAccurate() || valueRangeRight.IsAccurate()); } -bool ValueRangePropagation::AddOrSubWithBound(Bound oldBound, Bound &resBound, int64 rhsConstant, Opcode op) { +bool ValueRangePropagation::AddOrSubWithBound(Bound oldBound, Bound &resBound, int64 rhsConstant, Opcode op) const { int64 res = 0; if (!AddOrSubWithConstant(oldBound.GetPrimType(), op, oldBound.GetConstant(), rhsConstant, res)) { return false; @@ -2080,7 +2091,7 @@ bool ValueRangePropagation::AddOrSubWithBound(Bound oldBound, Bound &resBound, i // Create new valueRange when old valueRange add or sub with a constant. std::unique_ptr ValueRangePropagation::AddOrSubWithValueRange( - Opcode op, ValueRange &valueRange, int64 rhsConstant) { + Opcode op, ValueRange &valueRange, int64 rhsConstant) const { if (valueRange.GetRangeType() == kLowerAndUpper) { if (valueRange.IsConstantLowerAndUpper() && valueRange.GetLower().IsGreaterThan(valueRange.GetUpper(), valueRange.GetPrimType())) { @@ -2187,7 +2198,6 @@ std::unique_ptr ValueRangePropagation::RemWithValueRange(const BB &b Bound upper = Bound(nullptr, upperRes, opMeExpr.GetPrimType()); return std::make_unique(lower, upper, kLowerAndUpper); } - return nullptr; } // Create valueRange when deal with OP_rem. @@ -2319,7 +2329,7 @@ int64 GetRealValue(int64 value, PrimType primType) { } } -std::unique_ptr ValueRangePropagation::CopyValueRange(ValueRange &valueRange, PrimType primType) { +std::unique_ptr ValueRangePropagation::CopyValueRange(ValueRange &valueRange, PrimType primType) const { if (primType != PTY_begin && (!valueRange.GetLower().IsEqualAfterCVT(valueRange.GetPrimType(), primType) || !valueRange.GetUpper().IsEqualAfterCVT(valueRange.GetPrimType(), primType))) { // When the valueRange changes after conversion according to the parameter primType, return nullptr. @@ -2428,7 +2438,7 @@ void ValueRangePropagation::DealWithAssign(BB &bb, const MeStmt &stmt) { // i1 = phi(i0, i2), // i2 = i1 + 1, // stride is 1. -bool ValueRangePropagation::CanComputeLoopIndVar(const MeExpr &phiLHS, MeExpr &expr, int64 &constant) { +bool ValueRangePropagation::CanComputeLoopIndVar(const MeExpr &phiLHS, MeExpr &expr, int64 &constant) const { auto *curExpr = &expr; while (true) { if (!curExpr->IsScalar()) { @@ -2448,7 +2458,7 @@ bool ValueRangePropagation::CanComputeLoopIndVar(const MeExpr &phiLHS, MeExpr &e ConstMeExpr *rhsExpr = static_cast(opMeExpr.GetOpnd(1)); int64 res = 0; auto rhsConst = rhsExpr->GetExtIntValue(); - if (rhsExpr->GetPrimType() == PTY_u64 && static_cast(rhsConst) > GetMaxNumber(PTY_i64)) { + if (rhsExpr->GetPrimType() == PTY_u64 && rhsConst > GetMaxNumber(PTY_i64)) { return false; } if (AddOrSubWithConstant(opMeExpr.GetPrimType(), defStmt->GetRHS()->GetOp(), constant, rhsConst, res)) { @@ -2699,7 +2709,7 @@ std::unique_ptr ValueRangePropagation::MergeValueRangeOfPhiOperands( } bool ValueRangePropagation::MergeVrOrInitAndBackedge(MePhiNode &mePhiNode, ValueRange &vrOfInitExpr, - ValueRange &valueRange, Bound &resBound) { + ValueRange &valueRange, Bound &resBound) const { bool isOnlyHasLowerBound = vrOfInitExpr.GetRangeType() == kOnlyHasLowerBound; auto pType = mePhiNode.GetLHS()->GetPrimType(); auto upperBound = isOnlyHasLowerBound ? valueRange.GetBound() : vrOfInitExpr.GetBound(); @@ -2824,13 +2834,6 @@ void ValueRangePropagation::MergeValueRangeOfPhiOperands(const LoopDesc &loop, c } } -bool ValueRangePropagation::TheValueRangeOfOpndAndSubOpndAreEqual(const MeExpr &opnd) const { - // opnd[0] = OP zext i32 kPtyInvalid mx590 - // opnd[0] = REGINDX:15 u8 %15 mx589 - return (opnd.GetOp() == OP_zext && static_cast(opnd).GetBitsOffSet() == 0 && - IsPrimitiveUnsigned(opnd.GetOpnd(0)->GetPrimType())); -} - void ValueRangePropagation::CalculateVROfSubOpnd(BBId bbID, const MeExpr &opnd, ValueRange &valueRange) { // Deal with the case like: // opnd[0] = OP sub u32 u32 mx1 @@ -2871,14 +2874,8 @@ bool ValueRangePropagation::Insert2Caches( } if (onlyRecordValueRangeInTempCache.top()) { - if (opnd != nullptr && TheValueRangeOfOpndAndSubOpndAreEqual(*opnd)) { - (void)tempCaches[bbID].insert(std::make_pair(opnd->GetOpnd(0)->GetExprID(), CopyValueRange(*valueRange))); - } tempCaches[bbID][exprID] = std::move(valueRange); } else { - if (opnd != nullptr && TheValueRangeOfOpndAndSubOpndAreEqual(*opnd)) { - caches.at(bbID)[opnd->GetOpnd(0)->GetExprID()] = CopyValueRange(*valueRange); - } caches.at(bbID)[exprID] = std::move(valueRange); } if (opnd != nullptr) { @@ -2889,7 +2886,7 @@ bool ValueRangePropagation::Insert2Caches( // The rangeType of vrOfRHS is kEqual and the rangeType of vrOfLHS is kEqual, kNotEqual or kLowerAndUpper void ValueRangePropagation::JudgeEqual(MeExpr &expr, ValueRange &vrOfLHS, ValueRange &vrOfRHS, - std::unique_ptr &valueRangePtr) { + std::unique_ptr &valueRangePtr) const { if (vrOfRHS.GetRangeType() != kEqual) { return; } @@ -2957,7 +2954,7 @@ std::unique_ptr ValueRangePropagation::NegValueRange( } ValueRange *ValueRangePropagation::DealWithNegWhenFindValueRange(const BB &bb, const MeExpr &expr, - uint32 &numberOfRecursions, std::unordered_set &foundExprs, uint32 maxThreshold) { + uint32 &numberOfRecursions, std::unordered_set &foundExprs) { auto *opnd = expr.GetOpnd(0); if (!foundExprs.insert(opnd->GetExprID()).second) { return nullptr; @@ -2975,7 +2972,7 @@ ValueRange *ValueRangePropagation::FindValueRangeWithCompareOp(const BB &bb, MeE uint32 &numberOfRecursions, std::unordered_set &foundExprs, uint32 maxThreshold) { auto op = expr.GetOp(); if (op == OP_neg) { - return DealWithNegWhenFindValueRange(bb, expr, numberOfRecursions, foundExprs, maxThreshold); + return DealWithNegWhenFindValueRange(bb, expr, numberOfRecursions, foundExprs); } if (!IsCompareHasReverseOp(op) || expr.GetNumOpnds() != kNumOperands) { return nullptr; @@ -3749,7 +3746,7 @@ bool ValueRangePropagation::OnlyHaveOneCondGotoPredBB(const BB &bb, const BB &co } void ValueRangePropagation::UpdateProfile(BB &pred, BB &bb, const BB &targetBB) const { - if (bb.GetKind() != kBBCondGoto || bb.IsMeStmtEmpty() || !bb.GetLastMe()->IsCondBr()) { + if (bb.GetKind() != kBBCondGoto || bb.IsMeStmtEmpty() || !bb.GetLastMe() || !bb.GetLastMe()->IsCondBr()) { return; } auto *condGotoStmt = static_cast(bb.GetLastMe()); @@ -3780,7 +3777,6 @@ void ValueRangePropagation::UpdateProfile(BB &pred, BB &bb, const BB &targetBB) return; } auto *targetCondGotoStmt = static_cast(predCondGoto->GetLastMe()); - ASSERT_NOT_NULL(targetCondGotoStmt); if (targetCondGotoStmt->GetBranchProb() == kProbUnlikely || targetCondGotoStmt->GetBranchProb() == kProbLikely) { return; } @@ -4290,7 +4286,7 @@ bool ValueRangePropagation::MustBeFallthruOrGoto(const BB &defBB, const BB &bb) return false; } -std::unique_ptr ValueRangePropagation::AntiValueRange(ValueRange &valueRange) { +std::unique_ptr ValueRangePropagation::AntiValueRange(ValueRange &valueRange) const { RangeType oldType = valueRange.GetRangeType(); if (oldType != kEqual && oldType != kNotEqual) { return nullptr; @@ -4300,27 +4296,6 @@ std::unique_ptr ValueRangePropagation::AntiValueRange(ValueRange &va valueRange.GetBound().GetPrimType()), newType); } -void ValueRangePropagation::DeleteUnreachableBBs(BB &curBB, BB &falseBranch, BB &trueBranch) { - size_t sizeOfUnreachables = 0; - for (auto &pred : curBB.GetPred()) { - if (unreachableBBs.find(pred) != unreachableBBs.end()) { - sizeOfUnreachables++; - } - } - if (curBB.GetPred().size() - sizeOfUnreachables == 0) { - // If the preds of curBB which is condgoto and analysised is empty, delete the curBB. - Insert2UnreachableBBs(curBB); - // If the preds of falseBranch is empty, delete the falseBranch. - if (falseBranch.GetPred().size() == 1 && falseBranch.GetPred(0) == &curBB) { - Insert2UnreachableBBs(falseBranch); - } - // If the preds of trueBranch is empty, delete the trueBranch. - if (trueBranch.GetPred().size() == 1 && trueBranch.GetPred(0) == &curBB) { - Insert2UnreachableBBs(trueBranch); - } - } -} - void ValueRangePropagation::PropValueRangeFromCondGotoToTrueAndFalseBranch( const MeExpr &opnd0, ValueRange &rightRange, const BB &falseBranch, const BB &trueBranch) { std::unique_ptr trueBranchValueRange; @@ -4337,7 +4312,7 @@ void ValueRangePropagation::PropValueRangeFromCondGotoToTrueAndFalseBranch( // phiOpnds: (b, c), phi rhs of opnd in this bb // predOpnd or phiOpnds is uesd to find the valuerange in the pred of bb void ValueRangePropagation::ReplaceOpndByDef(const BB &bb, MeExpr &currOpnd, MeExpr *&predOpnd, - MePhiNode *&phi, bool &thePhiIsInBB) { + MePhiNode *&phi, bool &thePhiIsInBB) const { /* If currOpnd is not defined in bb, set opnd to currOpnd */ predOpnd = &currOpnd; /* find the rhs of opnd */ @@ -4403,7 +4378,7 @@ MeExpr &ValueRangePropagation::GetVersionOfOpndInPred(const BB &pred, const BB & // a2 = a1 + 1 ignore def point a2 when judge the way has def point from begin to end // | // bb2 -// if (a2 < 1) +// if a2 < 1 bool ValueRangePropagation::CanIgnoreTheDefPoint(const MeStmt &stmt, const BB &end, const ScalarMeExpr &expr) const { if (end.GetKind() != kBBCondGoto) { return false; @@ -4440,7 +4415,7 @@ bool ValueRangePropagation::CanIgnoreTheDefPoint(const MeStmt &stmt, const BB &e // succ0(a4 = phi(a0,a3)) // | // end if (a4 - 1 > constant) -bool ValueRangePropagation::HasDefPointInPred(const BB &begin, const BB &end, const ScalarMeExpr &opnd) { +bool ValueRangePropagation::HasDefPointInPred(const BB &begin, const BB &end, const ScalarMeExpr &opnd) const { auto *tempBB = &begin; while (tempBB != &end) { if (tempBB->GetSucc().size() != 1) { @@ -4490,7 +4465,9 @@ std::unique_ptr ValueRangePropagation::GetValueRangeOfLHS(const BB & } auto vrpOfOpnd0 = FindValueRangeInCurrBBOrDominateBBs(pred, versionOpnd0InPred); auto vrpOfOpnd1 = FindValueRangeInCurrBBOrDominateBBs(pred, versionOpnd1InPred); - if (vrpOfOpnd0 == nullptr || vrpOfOpnd1 == nullptr) { + if (vrpOfOpnd0 == nullptr || vrpOfOpnd1 == nullptr || + // If two ranges are not a certain constant value, it cannot be determined whether they are equal. + !vrpOfOpnd0->IsKEqualAndConstantRange() || !vrpOfOpnd1->IsKEqualAndConstantRange()) { return nullptr; } if (vrpOfOpnd0->IsEqual(vrpOfOpnd1.get())) { @@ -4656,7 +4633,8 @@ bool ValueRangePropagation::AnalysisValueRangeInPredsOfCondGotoBB( } if (ConditionEdgeCanBeDeleted(*pred, bb, valueRangeInPred, dummyRightRange, falseBranch, trueBranch, opndType, op, updateSSAExceptTheScalarExpr, ssaupdateCandsForCondExpr)) { - if (updateSSAExceptTheScalarExpr != nullptr && phi != nullptr) { + if (updateSSAExceptTheScalarExpr != nullptr && updateSSAExceptTheScalarExpr->GetOst()->IsLocal() && + phi != nullptr) { if (updateSSAExceptTheScalarExpr->GetDefBy() == kDefByStmt) { // PredOpnd is only used by condGoto stmt and phi, if the condGoto stmt can be deleted, need not update ssa // of predOpnd and the def point of predOpnd can be deleted. @@ -5004,8 +4982,11 @@ MeExpr *ValueRangePropagation::GetDefOfBase(const IvarMeExpr &ivar) const { } void ValueRangePropagation::DealWithCondGotoWhenRightRangeIsNotExist( - BB &bb, const MeExpr &opnd0, MeExpr &opnd1, Opcode opOfBrStmt, Opcode conditionalOp, ValueRange *valueRangeOfLeft) { - PrimType prim = opnd1.GetPrimType(); + BB &bb, const OpMeExpr &opMeExpr, Opcode opOfBrStmt, ValueRange *valueRangeOfLeft) { + auto opnd0 = opMeExpr.GetOpnd(0); + auto opnd1 = opMeExpr.GetOpnd(1); + auto conditionalOp = opMeExpr.GetOp(); + PrimType prim = opMeExpr.GetOpndType(); if (!IsNeededPrimType(prim)) { return; } @@ -5014,73 +4995,73 @@ void ValueRangePropagation::DealWithCondGotoWhenRightRangeIsNotExist( GetTrueAndFalseBranch(opOfBrStmt, bb, trueBranch, falseBranch); switch (conditionalOp) { case OP_ne: { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), kNotEqual)); - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), kEqual)); + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), kNotEqual)); + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), kEqual)); break; } case OP_eq: { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), kEqual)); - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), kNotEqual)); + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), kEqual)); + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), kNotEqual)); break; } case OP_le: { if (valueRangeOfLeft != nullptr && valueRangeOfLeft->GetRangeType() == kOnlyHasLowerBound) { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(valueRangeOfLeft->GetLower(), - Bound(&opnd1, prim), kLowerAndUpper, dealWithPhi)); + Bound(opnd1, prim), kLowerAndUpper, dealWithPhi)); } else { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(Bound(GetMinNumber(prim), prim), - Bound(&opnd1, prim), kLowerAndUpper)); + Bound(opnd1, prim), kLowerAndUpper)); } - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, 1, prim), + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, 1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); break; } case OP_lt: { if (valueRangeOfLeft != nullptr && valueRangeOfLeft->GetRangeType() == kOnlyHasLowerBound) { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(valueRangeOfLeft->GetLower(), - Bound(&opnd1, -1, prim), kLowerAndUpper, dealWithPhi)); + Bound(opnd1, -1, prim), kLowerAndUpper, dealWithPhi)); } else { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(Bound(GetMinNumber(prim), prim), - Bound(&opnd1, -1, prim), kLowerAndUpper)); + Bound(opnd1, -1, prim), kLowerAndUpper)); } - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); break; } case OP_ge: { if (valueRangeOfLeft != nullptr && valueRangeOfLeft->GetRangeType() == kOnlyHasUpperBound) { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), std::make_unique( - Bound(&opnd1, prim), valueRangeOfLeft->GetBound(), kLowerAndUpper, dealWithPhi)); + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique( + Bound(opnd1, prim), valueRangeOfLeft->GetBound(), kLowerAndUpper, dealWithPhi)); } else { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, prim), + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); } - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(Bound(GetMinNumber(prim), prim), - Bound(&opnd1, -1, prim), kLowerAndUpper)); + Bound(opnd1, -1, prim), kLowerAndUpper)); break; } case OP_gt: { if (valueRangeOfLeft != nullptr && valueRangeOfLeft->GetRangeType() == kOnlyHasUpperBound) { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), std::make_unique( - Bound(&opnd1, 1, prim), valueRangeOfLeft->GetBound(), kLowerAndUpper, dealWithPhi)); + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), std::make_unique( + Bound(opnd1, 1, prim), valueRangeOfLeft->GetBound(), kLowerAndUpper, dealWithPhi)); } else { - (void)Insert2Caches(trueBranch->GetBBId(), opnd0.GetExprID(), - std::make_unique(Bound(&opnd1, 1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); + (void)Insert2Caches(trueBranch->GetBBId(), opnd0->GetExprID(), + std::make_unique(Bound(opnd1, 1, prim), Bound(GetMaxNumber(prim), prim), kLowerAndUpper)); } - (void)Insert2Caches(falseBranch->GetBBId(), opnd0.GetExprID(), + (void)Insert2Caches(falseBranch->GetBBId(), opnd0->GetExprID(), std::make_unique(Bound(GetMinNumber(prim), prim), - Bound(&opnd1, prim), kLowerAndUpper)); + Bound(opnd1, prim), kLowerAndUpper)); break; } default: @@ -5330,16 +5311,17 @@ bool ValueRangePropagation::AnalysisUnreachableForEqOrNe(BB &bb, const CondGotoM // second brfalse mx1 eq(ne/gt/le/ge/lt) mx2 ==> remove falseBranch or trueBranch bool ValueRangePropagation::DealWithVariableRange(BB &bb, const CondGotoMeStmt &brMeStmt, const ValueRange &leftRange) { + auto primType = static_cast(brMeStmt.GetOpnd())->GetOpndType(); MeExpr *opnd1 = static_cast(brMeStmt.GetOpnd())->GetOpnd(1); // Example1: deal with like if (mx1 > mx2), when the valuerange of mx1 is [mx2+1, max(mx2_type)] // Example2: deal with like if (mx1 >= mx2), when the valuerange of mx1 is [mx2, max(mx2_type)] if (leftRange.GetLower().GetVar() == opnd1 && leftRange.GetUpper().GetVar() == nullptr && - leftRange.UpperIsMax(opnd1->GetPrimType())) { + leftRange.UpperIsMax(primType)) { return AnalysisUnreachableForGeOrGt(bb, brMeStmt, leftRange); // Example1: deal with like if (mx1 < mx2), when the valuerange of mx1 is [min(mx2_type), mx2-1] // Example2: deal with like if (mx1 <= mx2), when the valuerange of mx1 is [min(mx2_type), mx2] } else if (leftRange.GetLower().GetVar() == nullptr && leftRange.GetUpper().GetVar() == opnd1 && - leftRange.LowerIsMin(opnd1->GetPrimType())) { + leftRange.LowerIsMin(primType)) { return AnalysisUnreachableForLeOrLt(bb, brMeStmt, leftRange); // Example: deal with like if (mx1 == mx2), when the valuerange of mx1 is [mx2, mx2] // Example: deal with like if (mx1 != mx2), when the valuerange of mx1 is [mx2, mx2] @@ -5381,7 +5363,7 @@ void ValueRangePropagation::DealWithCondGoto(BB &bb, MeStmt &stmt) { } if (rightRange == nullptr) { if (!ConditionEdgeCanBeDeleted(bb, *opnd0, rightRange, opMeExpr->GetOpndType(), opMeExpr->GetOp())) { - DealWithCondGotoWhenRightRangeIsNotExist(bb, *opnd0, *opnd1, brMeStmt.GetOp(), opMeExpr->GetOp(), leftRange); + DealWithCondGotoWhenRightRangeIsNotExist(bb, *opMeExpr, brMeStmt.GetOp(), leftRange); } return; } @@ -5494,13 +5476,15 @@ bool MEValueRangePropagation::PhaseRun(maple::MeFunction &f) { (void)f.GetCfg()->UnreachCodeAnalysis(true); f.GetCfg()->WontExitAnalysis(); // split critical edges - (void)MeSplitCEdge(false).SplitCriticalEdgeForMeFunc(f); + bool split = MeSplitCEdge(false).SplitCriticalEdgeForMeFunc(f); + if (split || valueRangePropagation.IsCFGChange()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &MEDominance::id); + dom = FORCE_EXEC(MEDominance)->GetDomResult(); + } if (valueRangePropagation.IsCFGChange()) { if (ValueRangePropagation::isDebug) { f.GetCfg()->DumpToFile("valuerange-after" + std::to_string(f.vrpRuns)); } - GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &MEDominance::id); - dom = FORCE_EXEC(MEDominance)->GetDomResult(); if (valueRangePropagation.NeedUpdateSSA()) { MeSSAUpdate ssaUpdate(f, *f.GetMeSSATab(), *dom, cands); MPLTimer timer; diff --git a/src/mapleall/maple_me/src/me_verify.cpp b/src/mapleall/maple_me/src/me_verify.cpp index e504209307761a45a5e1031480da77f9923b61b2..ba732812b819071138d7d46df56e62b66eadf689 100644 --- a/src/mapleall/maple_me/src/me_verify.cpp +++ b/src/mapleall/maple_me/src/me_verify.cpp @@ -82,7 +82,6 @@ void GetAttrOfType(const MIRType &type, std::string &str) { auto arryType = static_cast(type); for (uint32 i = 0; i < arryType.GetDim(); ++i) { str += "_" + std::to_string(arryType.GetSizeArrayItem(i)); - break; } GetAttrOfType(*arryType.GetElemType(), str); break; diff --git a/src/mapleall/maple_me/src/meconstprop.cpp b/src/mapleall/maple_me/src/meconstprop.cpp index a0bba911187ba5a8225f14cdf059026f6bb0b323..2321174c2261c0d6ed986740c011987b5f4b4340 100644 --- a/src/mapleall/maple_me/src/meconstprop.cpp +++ b/src/mapleall/maple_me/src/meconstprop.cpp @@ -13,14 +13,6 @@ * See the Mulan PSL v2 for more details. */ #include "meconstprop.h" -#include -#include -#include "clone.h" -#include "constantfold.h" -#include "mir_nodes.h" -#include "me_function.h" -#include "ssa_mir_nodes.h" -#include "mir_builder.h" namespace maple { void MeConstProp::IntraConstProp() const {} diff --git a/src/mapleall/maple_me/src/occur.cpp b/src/mapleall/maple_me/src/occur.cpp index 648fba11fbd514dca7a762bb185256f1ea1f278a..0375976f038fafec82aa2c02966ee63aa4b32048 100644 --- a/src/mapleall/maple_me/src/occur.cpp +++ b/src/mapleall/maple_me/src/occur.cpp @@ -31,7 +31,7 @@ constexpr uint32_t kOffsetNaryMeStmtOpnd = 2; } namespace maple { -void MeOccur::DumpOccur(IRMap &irMap) { +void MeOccur::DumpOccur(IRMap &irMap) const { MIRModule *mod = &irMap.GetSSATab().GetModule(); mod->GetOut() << "MeOccur "; Dump(irMap); diff --git a/src/mapleall/maple_me/src/optimizeCFG.cpp b/src/mapleall/maple_me/src/optimizeCFG.cpp index 5ebff35320077ef24b79b8c072e24b7abcd585c7..7ba847d4b0ff34a5642271f6b171236a86d9aff5 100644 --- a/src/mapleall/maple_me/src/optimizeCFG.cpp +++ b/src/mapleall/maple_me/src/optimizeCFG.cpp @@ -1,4 +1,3 @@ - /* * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. * @@ -14,6 +13,7 @@ * See the Mulan PSL v2 for more details. */ #include "optimizeCFG.h" +#include #include "bb.h" #include "factory.h" @@ -455,8 +455,8 @@ MeExpr *FindCond2SelRHSFromPhiNode(BB *condBB, const BB *ftOrGtBB, BB *jointBB, int predIdx = GetRealPredIdx(*jointBB, *condBB); ASSERT(predIdx != -1, "[FUNC: %s]ftBB is not a pred of jointBB", funcName.c_str()); auto &phiList = jointBB->GetMePhiList(); - auto it = phiList.find(ostIdx); - if (it == phiList.end()) { + auto it = std::as_const(phiList).find(ostIdx); + if (it == phiList.cend()) { return nullptr; } MePhiNode *phi = it->second; @@ -476,40 +476,40 @@ MeExpr *FindCond2SelRHSFromPhiNode(BB *condBB, const BB *ftOrGtBB, BB *jointBB, // expr has deref nullptr or div/rem zero, return expr; // if it is not sure whether the expr will throw exception, return nullptr -void MustThrowExceptionExpr(MeExpr *expr, std::set &exceptionExpr, bool &isDivOrRemException) { +void MustThrowExceptionExpr(MeExpr &expr, std::set &exceptionExpr, bool &isDivOrRemException) { if (isDivOrRemException) { return; } - if (expr->GetMeOp() == kMeOpIvar) { + if (expr.GetMeOp() == kMeOpIvar) { // deref nullptr - if (static_cast(expr)->GetBase()->IsZero()) { - exceptionExpr.emplace(static_cast(expr)); + if (static_cast(&expr)->GetBase()->IsZero()) { + exceptionExpr.emplace(static_cast(&expr)); return; } - } else if ((expr->GetOp() == OP_div || expr->GetOp() == OP_rem) && expr->GetOpnd(1)->IsIntZero()) { + } else if ((expr.GetOp() == OP_div || expr.GetOp() == OP_rem) && expr.GetOpnd(1)->IsIntZero()) { // for float or double zero, this is legal. - exceptionExpr.emplace(expr); + exceptionExpr.emplace(&expr); isDivOrRemException = true; return; - } else if (expr->GetOp() == OP_select) { - MustThrowExceptionExpr(expr->GetOpnd(0), exceptionExpr, isDivOrRemException); + } else if (expr.GetOp() == OP_select) { + MustThrowExceptionExpr(*expr.GetOpnd(0), exceptionExpr, isDivOrRemException); // for select, if only one result will cause error, we are not sure whether // the actual result of this select expr will cause error std::set trueExpr; - MustThrowExceptionExpr(expr->GetOpnd(1), trueExpr, isDivOrRemException); + MustThrowExceptionExpr(*expr.GetOpnd(1), trueExpr, isDivOrRemException); if (trueExpr.empty()) { return; } std::set falseExpr; - MustThrowExceptionExpr(expr->GetOpnd(2), falseExpr, isDivOrRemException); + MustThrowExceptionExpr(*expr.GetOpnd(2), falseExpr, isDivOrRemException); if (falseExpr.empty()) { return; } - exceptionExpr.emplace(expr); + exceptionExpr.emplace(&expr); return; } - for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { - MustThrowExceptionExpr(expr->GetOpnd(i), exceptionExpr, isDivOrRemException); + for (size_t i = 0; i < expr.GetNumOpnds(); ++i) { + MustThrowExceptionExpr(*expr.GetOpnd(i), exceptionExpr, isDivOrRemException); } } @@ -522,7 +522,7 @@ MeStmt *GetNoReturnStmt(BB *bb) { std::set exceptionExpr; for (size_t i = 0; i < stmt->NumMeStmtOpnds(); ++i) { bool isDivOrRemException = false; - MustThrowExceptionExpr(stmt->GetOpnd(i), exceptionExpr, isDivOrRemException); + MustThrowExceptionExpr(*stmt->GetOpnd(i), exceptionExpr, isDivOrRemException); if (!exceptionExpr.empty()) { return stmt; } @@ -730,6 +730,20 @@ bool HasFallthruPred(const BB &bb) { // For BB Level optimization class OptimizeBB { public: + struct ContinuousCondBrInfo { + ContinuousCondBrInfo() {} + ContinuousCondBrInfo(CondGotoMeStmt &s1, CondGotoMeStmt &s2, OpMeExpr &expr1, OpMeExpr &expr2) : + stmt1(&s1), stmt2(&s2), opMeExpr1(&expr1), opMeExpr2(&expr2) {} + + ContinuousCondBrInfo(const ContinuousCondBrInfo&) = default; + maple::OptimizeBB::ContinuousCondBrInfo& operator=(const ContinuousCondBrInfo&) = default; + + CondGotoMeStmt *stmt1 = nullptr; + CondGotoMeStmt *stmt2 = nullptr; + OpMeExpr *opMeExpr1 = nullptr; + OpMeExpr *opMeExpr2 = nullptr; + }; + OptimizeBB(BB *bb, MeFunction &func, std::map>> *candidates) : currBB(bb), f(func), @@ -741,7 +755,7 @@ class OptimizeBB { // optimize each currBB until no change occur bool OptBBIteratively(); // initial factory to create corresponding optimizer for currBB according to BBKind. - void InitBBOptFactory(); + void InitBBOptFactory() const; private: BB *currBB = nullptr; // BB we currently perform optimization on @@ -769,7 +783,7 @@ class OptimizeBB { // Optimize once time on bb, some common cfg opt and peephole cfg opt will be performed on currBB bool OptBBOnce(); - // elminate BB that is unreachable: + // elminate BB that is unreachable // 1.BB has no pred(expect then entry block) // 2.BB has itself as pred bool EliminateDeadBB(); @@ -794,7 +808,7 @@ class OptimizeBB { bool OptimizeSwitchBB(); // for sub-pattern in OptimizeCondBB - MeExpr *TryToSimplifyCombinedCond(const MeExpr &expr); + MeExpr *TryToSimplifyCombinedCond(const MeExpr &expr) const; bool FoldBranchToCommonDest(BB *pred, BB *succ); bool FoldBranchToCommonDest(); bool SkipRedundantCond(); @@ -805,9 +819,14 @@ class OptimizeBB { BB *MergeSuccIntoPred(BB *pred, BB *succ); bool CondBranchToSelect(); bool FoldCondBranch(); - bool IsProfitableForCond2Sel(MeExpr *condExpr, MeExpr *trueExpr, MeExpr *falseExpr); + bool IsProfitableForCond2Sel(MeExpr &condExpr, MeExpr &trueExpr, MeExpr &falseExpr) const; + bool CanFoldCondBranch(BB &predBB, BB &succBB, ContinuousCondBrInfo &brInfo) const; + bool FoldCondBranchWithAdjacentIread(BB &predBB, BB &succBB, ContinuousCondBrInfo &brInfo, + MeExpr &foldExpr1, MeExpr &foldExpr2) const; + bool FoldCondBranchWithSuccAdjacentIread(); + bool FoldCondBranchWithPredAdjacentIread(); // for OptimizeUncondBB - bool MergeGotoBBToPred(BB *gotoBB, BB *pred); + bool MergeGotoBBToPred(BB *succ, BB *pred); // after moving pred from curr to curr's successor (i.e. succ), update the phiList of curr and succ // a phiOpnd will be removed from curr's philist, and a phiOpnd will be inserted to succ's philist // note: when replace pred's succ (i.e. curr) with succ, please DO NOT remove phiOpnd immediately, @@ -816,20 +835,20 @@ class OptimizeBB { // for OptimizeCondBB2UnCond bool BranchBB2UncondBB(BB &bb); // Get first return BB - BB *GetFirstReturnBB(); + BB *GetFirstReturnBB() const; bool EliminateRedundantPhiList(BB *bb); // Check if state of currBB is error. bool CheckCurrBB(); // Insert ost of philist in bb to cand, and set ost start from newBB(newBB will be bb itself if not specified) void UpdateSSACandForBBPhiList(BB *bb, const BB *newBB = nullptr); - void UpdateSSACandForOst(const OStIdx &ostIdx, const BB *bb); + void UpdateSSACandForOst(const OStIdx &ostIdx, const BB *bb) const; // replace oldBBID in cands with newBBID void UpdateBBIdInSSACand(const BBId &oldBBID, const BBId &newBBID); void DeleteBB(BB *bb); - bool IsEmptyBB(BB &bb) { + bool IsEmptyBB(BB &bb) const { return isMeIR ? IsMeEmptyBB(bb) : IsMplEmptyBB(bb); } @@ -894,7 +913,7 @@ void OptimizeBB::UpdateBBIdInSSACand(const BBId &oldBBID, const BBId &newBBID) { } } -void OptimizeBB::UpdateSSACandForOst(const OStIdx &ostIdx, const BB *bb) { +void OptimizeBB::UpdateSSACandForOst(const OStIdx &ostIdx, const BB *bb) const { MeSSAUpdate::InsertOstToSSACands(ostIdx, *bb, cands); } @@ -906,16 +925,33 @@ void OptimizeBB::UpdateSSACandForBBPhiList(BB *bb, const BB *newBB) { newBB = bb; } std::set &ostSet = candsOstInBB[newBB->GetBBId()]; - for (auto phi : bb->GetMePhiList()) { + for (auto &phi : std::as_const(bb->GetMePhiList())) { OStIdx ostIdx = phi.first; UpdateSSACandForOst(ostIdx, newBB); ostSet.emplace(ostIdx); } + for (auto &stmt : newBB->GetMeStmts()) { + if (stmt.GetLHS() != nullptr) { + UpdateSSACandForOst(stmt.GetLHS()->GetOstIdx(), newBB); + ostSet.emplace(stmt.GetLHS()->GetOstIdx()); + } + if (stmt.GetChiList() != nullptr) { + for (auto &chiNode : std::as_const(*stmt.GetChiList())) { + UpdateSSACandForOst(chiNode.first, newBB); + ostSet.emplace(chiNode.first); + } + } + if (stmt.GetMustDefList() != nullptr) { + for (auto &mustDefNode : std::as_const(*stmt.GetMustDefList())) { + UpdateSSACandForOst(mustDefNode.GetLHS()->GetOstIdx(), newBB); + } + } + } if (bb != newBB) { auto it = candsOstInBB.find(bb->GetBBId()); if (it != candsOstInBB.end()) { // ost in bb should be updated, make it updated with newBB - for (auto ostIdx : it->second) { + for (auto &ostIdx : std::as_const(it->second)) { UpdateSSACandForOst(ostIdx, newBB); ostSet.emplace(ostIdx); } @@ -1132,7 +1168,7 @@ bool OptimizeBB::OptimizeCondBB2UnCond() { } // return first return bb -BB *OptimizeBB::GetFirstReturnBB() { +BB *OptimizeBB::GetFirstReturnBB() const { for (auto *bb : cfg->GetAllBBs()) { if (bb == nullptr) { continue; @@ -1175,7 +1211,7 @@ bool OptimizeBB::RemoveSuccFromNoReturnBB() { std::set exceptionExprSet; bool divOrRemException = false; for (size_t i = 0; i < exceptionStmt->NumMeStmtOpnds(); ++i) { - MustThrowExceptionExpr(exceptionStmt->GetOpnd(i), exceptionExprSet, divOrRemException); + MustThrowExceptionExpr(*exceptionStmt->GetOpnd(i), exceptionExprSet, divOrRemException); } // if exceptionStmt not a callsite of exit func, we replace it with a exception-throwing expr. if (f.GetMIRModule().IsCModule() && divOrRemException) { @@ -1334,18 +1370,18 @@ bool OptimizeBB::MergeDistinctBBPair() { return everChanged; } -bool OptimizeBB::IsProfitableForCond2Sel(MeExpr *condExpr, MeExpr *trueExpr, MeExpr *falseExpr) { - if (trueExpr == falseExpr) { +bool OptimizeBB::IsProfitableForCond2Sel(MeExpr &condExpr, MeExpr &trueExpr, MeExpr &falseExpr) const { + if (&trueExpr == &falseExpr) { return true; } /* Select for Float128 is not possible */ - if (condExpr->GetPrimType() == PTY_f128 || falseExpr->GetPrimType() == PTY_f128) { + if (condExpr.GetPrimType() == PTY_f128 || falseExpr.GetPrimType() == PTY_f128) { return false; } - ASSERT(IsSafeExpr(trueExpr), "[FUNC: %s]Please check for safety first", funcName.c_str()); - ASSERT(IsSafeExpr(falseExpr), "[FUNC: %s]Please check for safety first", funcName.c_str()); + ASSERT(IsSafeExpr(&trueExpr), "[FUNC: %s]Please check for safety first", funcName.c_str()); + ASSERT(IsSafeExpr(&falseExpr), "[FUNC: %s]Please check for safety first", funcName.c_str()); // try to simplify select expr - MeExpr *selExpr = irmap->CreateMeExprSelect(trueExpr->GetPrimType(), *condExpr, *trueExpr, *falseExpr); + MeExpr *selExpr = irmap->CreateMeExprSelect(trueExpr.GetPrimType(), condExpr, trueExpr, falseExpr); MeExpr *simplifiedSel = irmap->SimplifyMeExpr(selExpr); if (simplifiedSel != selExpr) { return true; // can be simplified @@ -1354,12 +1390,12 @@ bool OptimizeBB::IsProfitableForCond2Sel(MeExpr *condExpr, MeExpr *trueExpr, MeE // We can check for every opnd of opndExpr, and calculate their cost according to cg's insn // but optimization in mplbe may change the insn and the result is not correct after that. // Therefore, to make this easier, only reg/const/var are allowed here - MeExprOp trueOp = trueExpr->GetMeOp(); - MeExprOp falseOp = falseExpr->GetMeOp(); - if (trueOp == kMeOpVar && !DoesExprContainSubExpr(condExpr, trueExpr)) { + MeExprOp trueOp = trueExpr.GetMeOp(); + MeExprOp falseOp = falseExpr.GetMeOp(); + if (trueOp == kMeOpVar && !DoesExprContainSubExpr(&condExpr, &trueExpr)) { return false; } - if (falseOp == kMeOpVar && !DoesExprContainSubExpr(condExpr, falseExpr)) { + if (falseOp == kMeOpVar && !DoesExprContainSubExpr(&condExpr, &falseExpr)) { return false; } if ((trueOp != kMeOpConst && trueOp != kMeOpReg && trueOp != kMeOpVar) || @@ -1367,7 +1403,7 @@ bool OptimizeBB::IsProfitableForCond2Sel(MeExpr *condExpr, MeExpr *trueExpr, MeE return false; } // big integer - if (GetNonSimpleImm(trueExpr) != 0 || GetNonSimpleImm(falseExpr) != 0) { + if (GetNonSimpleImm(&trueExpr) != 0 || GetNonSimpleImm(&falseExpr) != 0) { return false; } return true; @@ -1382,6 +1418,9 @@ bool OptimizeBB::IsProfitableForCond2Sel(MeExpr *condExpr, MeExpr *trueExpr, MeE // jointBB jointBB bool OptimizeBB::CondBranchToSelect() { CHECK_CURR_BB(); + if (currBB->GetKind() != kBBCondGoto) { + return false; + } BB *ftBB = FindFirstRealSucc(currBB->GetSucc(0)); // fallthruBB BB *gtBB = FindFirstRealSucc(currBB->GetSucc(1)); // gotoBB if (ftBB == gtBB) { @@ -1426,7 +1465,7 @@ bool OptimizeBB::CondBranchToSelect() { ftLHS = static_cast(ftStmt->GetLHS()); ftRHS = ftStmt->GetRHS(); if (ftStmt->GetChiList() != nullptr) { - for (auto &chiNode : *ftStmt->GetChiList()) { + for (auto &chiNode : std::as_const(*ftStmt->GetChiList())) { chiListCands.emplace(chiNode.second->GetRHS()); } } @@ -1437,7 +1476,7 @@ bool OptimizeBB::CondBranchToSelect() { gtLHS = static_cast(gtStmt->GetLHS()); gtRHS = gtStmt->GetRHS(); if (gtStmt->GetChiList() != nullptr) { - for (auto &chiNode : *gtStmt->GetChiList()) { + for (auto &chiNode : std::as_const(*gtStmt->GetChiList())) { chiListCands.emplace(chiNode.second->GetRHS()); } } @@ -1483,7 +1522,7 @@ bool OptimizeBB::CondBranchToSelect() { MeExpr *trueExpr = (condStmt->GetOp() == OP_brtrue) ? gtRHS : ftRHS; MeExpr *falseExpr = (trueExpr == gtRHS) ? ftRHS : gtRHS; MeExpr *condExpr = condStmt->GetOpnd(0); - if (!IsProfitableForCond2Sel(condExpr, trueExpr, falseExpr)) { + if (!IsProfitableForCond2Sel(*condExpr, *trueExpr, *falseExpr)) { DEBUG_LOG() << "Abort cond2sel for BB" << LOG_BBID(currBB) << ", because cond2sel is not profitable\n"; return false; } @@ -1596,7 +1635,6 @@ bool OptimizeBB::FoldCondBranch() { } auto stmt1 = static_cast(currBB->GetLastMe()); auto stmt2 = static_cast(succBB->GetFirstMe()); - ASSERT_NOT_NULL(stmt1); if (stmt1->GetOp() != stmt2->GetOp()) { return false; } @@ -1608,8 +1646,11 @@ bool OptimizeBB::FoldCondBranch() { isAnd = true; } - if (!isAnd && (foldExpr = FoldCmpOfBitOps(*irmap, *stmt1->GetOpnd(), *stmt2->GetOpnd())) != nullptr) { - break; + if (!isAnd) { + foldExpr = FoldCmpOfBitOps(*irmap, *stmt1->GetOpnd(), *stmt2->GetOpnd()); + if (foldExpr != nullptr) { + break; + } } if ((foldExpr = ConstantFold::FoldCmpExpr(*irmap, *stmt1->GetOpnd(), *stmt2->GetOpnd(), isAnd)) != nullptr) { @@ -1637,6 +1678,120 @@ bool OptimizeBB::FoldCondBranch() { return false; } +bool OptimizeBB::CanFoldCondBranch(BB &predBB, BB &succBB, ContinuousCondBrInfo &brInfo) const { + if (predBB.GetKind() != kBBCondGoto || succBB.GetKind() != kBBCondGoto) { + return false; + } + auto realBrOfPred = FindFirstRealSucc(predBB.GetSucc(1)); + auto realBrOfSucc = FindFirstRealSucc(succBB.GetSucc(1)); + if (realBrOfPred != realBrOfSucc) { + return false; + } + if (!HasOnlyMeCondGotoStmt(predBB) && !HasOnlyMeCondGotoStmt(succBB)) { + return false; + } + auto stmt1 = static_cast(predBB.GetLastMe()); + auto stmt2 = static_cast(succBB.GetFirstMe()); + if (stmt1->GetOp() != stmt2->GetOp()) { + return false; + } + if (stmt1->GetOp() != OP_brfalse) { + return false; + } + OpMeExpr *opMeExpr1 = static_cast(stmt1->GetOpnd()); + OpMeExpr *opMeExpr2 = static_cast(stmt2->GetOpnd()); + if (opMeExpr1->GetNumOpnds() != opMeExpr2->GetNumOpnds()) { + return false; + } + if (opMeExpr1->GetOp() != opMeExpr2->GetOp()) { + return false; + } + if (opMeExpr1->GetNumOpnds() != kOperandNumBinary) { + return false; + } + brInfo = ContinuousCondBrInfo(*stmt1, *stmt2, *opMeExpr1, *opMeExpr2); + return true; +} + +bool OptimizeBB::FoldCondBranchWithAdjacentIread(BB &predBB, BB &succBB, ContinuousCondBrInfo &brInfo, + MeExpr &foldExpr1, MeExpr &foldExpr2) const { + MeExpr *newOpMeExpr = irmap->CreateMeExprCompare(brInfo.opMeExpr1->GetOp(), brInfo.opMeExpr1->GetPrimType(), + foldExpr1.GetPrimType(), foldExpr1, foldExpr2); + brInfo.stmt1->SetOpnd(0, newOpMeExpr); + brInfo.stmt1->SetBranchProb(brInfo.stmt2->GetBranchProb()); + succBB.RemoveLastMeStmt(); + succBB.SetKind(kBBFallthru); + if (cfg->UpdateCFGFreq()) { + FreqType freqToMove = succBB.GetSuccFreq()[1]; + predBB.SetSuccFreq(0, predBB.GetSuccFreq()[0] - freqToMove); + succBB.SetFrequency(succBB.GetFrequency() - freqToMove); + predBB.SetSuccFreq(1, predBB.GetSuccFreq()[1] + freqToMove); + predBB.GetSucc(1)->SetFrequency(predBB.GetSucc(1)->GetFrequency() + freqToMove); + } + BB *succOfSuccBB = succBB.GetSucc(1); + succBB.RemoveBBFromSucc(*succOfSuccBB); + succOfSuccBB->RemoveBBFromPred(succBB, true); + return true; +} + +// fold 2 sequential condbranch if they are semantically logical and/or +// cond1 cond1 +// | \ |\ +// cond2 \ -> | \ +// | \ \ fallth br +// fallth \| +// | br +bool OptimizeBB::FoldCondBranchWithSuccAdjacentIread() { + CHECK_CURR_BB(); + auto succBB = currBB->GetSucc(0); + ContinuousCondBrInfo brInfo; + if (!CanFoldCondBranch(*currBB, *currBB->GetSucc(0), brInfo)) { + return false; + } + auto foldExpr1 = irmap->MergeAdjacentIread(*brInfo.opMeExpr1->GetOpnd(0), *brInfo.opMeExpr2->GetOpnd(0)); + auto foldExpr2 = irmap->MergeAdjacentIread(*brInfo.opMeExpr1->GetOpnd(1), *brInfo.opMeExpr2->GetOpnd(1)); + if (foldExpr1 == nullptr || foldExpr2 == nullptr || foldExpr1->GetPrimType() != foldExpr2->GetPrimType()) { + return false; + } + FoldCondBranchWithAdjacentIread(*currBB, *succBB, brInfo, *foldExpr1, *foldExpr2); + return true; +} + +// fold 2 sequential condbranch if they are semantically logical and/or +// cond1 cond2 +// | \ |\ +// cond2 \ -> | \ +// | \ \ fallth br +// fallth \| +// | br +bool OptimizeBB::FoldCondBranchWithPredAdjacentIread() { + CHECK_CURR_BB(); + if (currBB->GetPred().size() != 1) { + return false; + } + auto predBB = currBB->GetPred(0); + ContinuousCondBrInfo brInfo; + if (!CanFoldCondBranch(*predBB, *currBB, brInfo)) { + return false; + } + auto foldExpr1 = irmap->MergeAdjacentIread(*brInfo.opMeExpr1->GetOpnd(0), *brInfo.opMeExpr2->GetOpnd(0)); + auto foldExpr2 = irmap->MergeAdjacentIread(*brInfo.opMeExpr1->GetOpnd(1), *brInfo.opMeExpr2->GetOpnd(1)); + if (foldExpr1 == nullptr || foldExpr2 == nullptr || foldExpr1->GetPrimType() != foldExpr2->GetPrimType()) { + auto optBand1 = irmap->OptBandWithIread(*brInfo.opMeExpr1->GetOpnd(0), *brInfo.opMeExpr2->GetOpnd(0)); + auto optBand2 = irmap->OptBandWithIread(*brInfo.opMeExpr1->GetOpnd(1), *brInfo.opMeExpr2->GetOpnd(1)); + if (optBand1 == nullptr || optBand2 == nullptr|| optBand1->GetPrimType() != optBand2->GetPrimType()) { + return false; + } + MeExpr *newOpMeExpr = irmap->CreateMeExprCompare(brInfo.opMeExpr1->GetOp(), brInfo.opMeExpr1->GetPrimType(), + optBand1->GetPrimType(), *optBand1, *optBand2); + brInfo.stmt2->SetOpnd(0, newOpMeExpr); + return true; + } + FoldCondBranchWithAdjacentIread(*predBB, *currBB, brInfo, *foldExpr1, *foldExpr2); + currBB = predBB; + return true; +} + bool IsExprSameLexicalally(MeExpr *expr1, MeExpr *expr2) { if (expr1 == expr2) { return true; @@ -1822,7 +1977,6 @@ BranchResult InferSuccCondBrFromPredCond(const MeExpr *predCond, const MeExpr *s // succ ... // / \ // ftBB gtBB -// // If succ's cond can be inferred from pred's cond, pred can skip succ and branches to one of succ's successors directly // Here we deal with two cases: // 1. pred's cond is the same as succ's @@ -1938,7 +2092,10 @@ bool OptimizeBB::SkipRedundantCond(BB &pred, BB &succ) { << LOG_BBID(&pred) << "->...->BB" << LOG_BBID(&succ) << "(skipped)" << " => BB" << LOG_BBID(&pred) << "->BB" << LOG_BBID(newBB) << "(new)->BB" << LOG_BBID(newTarget) << "\n"; - if (pred.GetSucc(1) == newBB) { + if (pred.GetSucc().size() == 1) { + pred.RemoveLastMeStmt(); + pred.SetKind(kBBFallthru); + } else if (pred.GetSucc(1) == newBB) { cfg->UpdateBranchTarget(pred, succ, *newBB, f); } newTarget->AddPred(*newBB); @@ -1947,7 +2104,7 @@ bool OptimizeBB::SkipRedundantCond(BB &pred, BB &succ) { if (cfg->UpdateCFGFreq()) { int idx = pred.GetSuccIndex(*newBB); ASSERT(idx >= 0 && idx < pred.GetSucc().size(), "sanity check"); - FreqType freq = pred.GetEdgeFreq(idx); + FreqType freq = pred.GetEdgeFreq(static_cast(static_cast(idx))); newBB->SetFrequency(freq); newBB->PushBackSuccFreq(freq); // update frequency of succ because one of its pred is removed @@ -1959,7 +2116,7 @@ bool OptimizeBB::SkipRedundantCond(BB &pred, BB &succ) { BB *affectedBB = (tfBranch == kBrTrue) ? stfSucc.first : stfSucc.second; idx = succ.GetSuccIndex(*affectedBB); ASSERT(idx >= 0 && idx < succ.GetSucc().size(), "sanity check"); - FreqType oldedgeFreq = succ.GetSuccFreq()[static_cast(idx)]; + FreqType oldedgeFreq = succ.GetSuccFreq()[static_cast(static_cast(idx))]; if (oldedgeFreq >= freq) { succ.SetSuccFreq(idx, oldedgeFreq - freq); } else { @@ -2018,19 +2175,25 @@ bool OptimizeBB::OptimizeCondBB() { SetBBRunAgain(); return true; } + if (FoldCondBranchWithSuccAdjacentIread()) { + SetBBRunAgain(); + return true; + } + if (FoldCondBranchWithPredAdjacentIread()) { + SetBBRunAgain(); + return true; + } return change; } // after moving pred from curr to curr's successor (i.e. succ), update the phiList of curr and succ // a phiOpnd will be removed from curr's philist, and another phiOpnd will be inserted to succ's philist -// // ... pred ... pred // \ / \ \ / \ // curr ... ==> curr / ... // / \ ... / \ / ... // / \ / / \/ / // ... succ ... succ -// // parameter predIdxForCurr is the index of pred in the predVector of curr // note: // 1.when replace pred's succ (i.e. curr) with succ, please DO NOT remove phiOpnd immediately, @@ -2043,7 +2206,7 @@ void OptimizeBB::UpdatePhiForMovingPred(int predIdxForCurr, const BB *pred, BB * if (succPhiList.empty()) { // succ has only one pred(i.e. curr) before // we copy curr's philist to succ, but not with all phiOpnd - for (auto &phiNode : currPhilist) { + for (auto &phiNode : std::as_const(currPhilist)) { auto *phiMeNode = irmap->NewInPool(); phiMeNode->SetDefBB(succ); succPhiList.emplace(phiNode.first, phiMeNode); @@ -2062,12 +2225,12 @@ void OptimizeBB::UpdatePhiForMovingPred(int predIdxForCurr, const BB *pred, BB * // succ has other pred besides curr for (auto &phi : succPhiList) { OStIdx ostIdx = phi.first; - auto it = currPhilist.find(ostIdx); + auto it = std::as_const(currPhilist).find(ostIdx); ASSERT(predPredIdx != -1, "[FUNC: %s]pred BB%d is not a predecessor of succ BB%d yet", funcName.c_str(), LOG_BBID(pred), LOG_BBID(succ)); auto &phiOpnds = phi.second->GetOpnds(); - if (it != currPhilist.end()) { + if (it != currPhilist.cend()) { // curr has phiNode for this ost, we copy pred's corresponding phiOpnd in curr to succ phiOpnds.insert(phiOpnds.begin() + predPredIdx, it->second->GetOpnd(static_cast(predIdxForCurr))); } else { @@ -2079,7 +2242,7 @@ void OptimizeBB::UpdatePhiForMovingPred(int predIdxForCurr, const BB *pred, BB * } } // search philist in curr for phinode that is not in succ yet - for (auto &phi : currPhilist) { + for (auto &phi : std::as_const(currPhilist)) { OStIdx ostIdx = phi.first; auto resPair = succPhiList.emplace(ostIdx, nullptr); if (!resPair.second) { @@ -2309,7 +2472,7 @@ bool OptimizeBB::OptimizeSwitchBB() { return true; } -MeExpr *OptimizeBB::TryToSimplifyCombinedCond(const MeExpr &expr) { +MeExpr *OptimizeBB::TryToSimplifyCombinedCond(const MeExpr &expr) const { Opcode op = expr.GetOp(); if (op != OP_land && op != OP_lior) { return nullptr; @@ -2336,7 +2499,7 @@ MeExpr *OptimizeBB::TryToSimplifyCombinedCond(const MeExpr &expr) { return resExpr; } -// pattern is like: +// pattern is like // pred(condBB) // / \ // / succ(condBB) @@ -2382,7 +2545,7 @@ bool OptimizeBB::FoldBranchToCommonDest(BB *pred, BB *succ) { auto stfBrPair = GetTrueFalseBrPair(succ); // succ's true and false branches Opcode combinedCondOp = OP_undef; bool invertSuccCond = false; // invert second condition, e.g. (cond1 && !cond2) - // all cases are listed as follow: + // all cases are listed as follow // | case | predBB -> common | succBB -> common | invertSuccCond | or/and | // | ---- | ---------------- | ---------------- | -------------- | ------ | // | 1 | true | true | | or | @@ -2444,7 +2607,7 @@ bool OptimizeBB::FoldBranchToCommonDest(BB *pred, BB *succ) { return true; } -// pattern is like: +// pattern is like // curr(condBB) // / \ // / succ(condBB) @@ -2476,7 +2639,7 @@ bool OptimizeBB::OptBBOnce() { currBB->UpdateEdgeFreqs(false); } bool everChanged = false; - // eliminate dead BB : + // eliminate dead BB // 1.BB has no pred(expect then entry block) // 2.BB has only itself as pred if (EliminateDeadBB()) { @@ -2508,7 +2671,7 @@ bool OptimizeBB::OptBBOnce() { return everChanged; } -void OptimizeBB::InitBBOptFactory() { +void OptimizeBB::InitBBOptFactory() const { RegisterFactoryFunction(kBBCondGoto, &OptimizeBB::OptimizeCondBB); RegisterFactoryFunction(kBBGoto, &OptimizeBB::OptimizeUncondBB); RegisterFactoryFunction(kBBFallthru, &OptimizeBB::OptimizeFallthruBB); @@ -2634,8 +2797,11 @@ bool MEOptimizeCFGNoSSA::PhaseRun(MeFunction &f) { debug = DEBUGFUNC_NEWPM(f); phaseName = PhaseName(); bool change = OptimizeMeFuncCFG(f, nullptr); - if (change && f.GetCfg()->DumpIRProfileFile()) { - f.GetCfg()->DumpToFile("after-OptimizeCFGNOSSA", false, f.GetCfg()->UpdateCFGFreq()); + if (change) { + FORCE_INVALID(MEDominance, f); + if (f.GetCfg()->DumpIRProfileFile()) { + f.GetCfg()->DumpToFile("after-OptimizeCFGNOSSA", false, f.GetCfg()->UpdateCFGFreq()); + } } return change; } diff --git a/src/mapleall/maple_me/src/orig_symbol.cpp b/src/mapleall/maple_me/src/orig_symbol.cpp index ae41a4df70f31ca9f71a37d6ccbdeb706974c2ba..10433aa4df053026a928fbd2e804864658376dbe 100644 --- a/src/mapleall/maple_me/src/orig_symbol.cpp +++ b/src/mapleall/maple_me/src/orig_symbol.cpp @@ -267,17 +267,17 @@ MIRType *OriginalStTable::GetTypeFromBaseAddressAndFieldId(TyIdx tyIdx, FieldID } OriginalSt *OriginalStTable::FindOrCreateExtraLevOriginalSt( - const VersionSt *vst, TyIdx tyIdx, FieldID fieldId, const OffsetType &offset, bool isFieldArrayType) { - if (!vst->GetOst()->IsSymbolOst() && !vst->GetOst()->IsPregOst()) { + const VersionSt &vst, TyIdx tyIdx, FieldID fieldId, const OffsetType &offset, bool isFieldArrayType) { + if (!vst.GetOst()->IsSymbolOst() && !vst.GetOst()->IsPregOst()) { return nullptr; } - auto *ost = vst->GetOst(); + auto *ost = vst.GetOst(); tyIdx = (tyIdx == 0u) ? ost->GetTyIdx() : tyIdx; MIRType *typeOfExtraLevOst = GetTypeFromBaseAddressAndFieldId(tyIdx, fieldId, isFieldArrayType); FieldID fieldIDInOst = fieldId; - OriginalSt *nextLevOst = FindExtraLevOriginalSt(vst, tyIdx, typeOfExtraLevOst, fieldIDInOst, offset); + OriginalSt *nextLevOst = FindExtraLevOriginalSt(&vst, tyIdx, typeOfExtraLevOst, fieldIDInOst, offset); if (nextLevOst != nullptr) { return nextLevOst; } @@ -294,7 +294,7 @@ OriginalSt *OriginalStTable::FindOrCreateExtraLevOriginalSt( originalStVector.push_back(nextLevOst); CHECK_FATAL(ost->GetIndirectLev() < INT8_MAX, "boundary check"); nextLevOst->SetIndirectLev(ost->GetIndirectLev() + 1); - nextLevOst->SetPointerVst(vst); + nextLevOst->SetPointerVst(&vst); nextLevOst->SetOffset(offset); nextLevOst->SetPointerTyIdx(tyIdx); nextLevOst->SetAddressTaken(true); @@ -314,7 +314,7 @@ OriginalSt *OriginalStTable::FindOrCreateExtraLevOriginalSt( if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(ost->GetTyIdx())->PointsToConstString()) { nextLevOst->SetIsFinal(true); } - AddNextLevelOstOfVst(vst, nextLevOst); + AddNextLevelOstOfVst(&vst, nextLevOst); return nextLevOst; } diff --git a/src/mapleall/maple_me/src/pme_emit.cpp b/src/mapleall/maple_me/src/pme_emit.cpp index 043383452849084d7035216ad0416f21578817fd..509cd0f1fe65cb9b8b4203f49755c5fdf886db3c 100644 --- a/src/mapleall/maple_me/src/pme_emit.cpp +++ b/src/mapleall/maple_me/src/pme_emit.cpp @@ -24,20 +24,20 @@ namespace maple { // convert x to use OP_array if possible; return nullptr if unsuccessful; // ptrTyIdx is the high level pointer type of x -ArrayNode *PreMeEmitter::ConvertToArray(BaseNode *x, TyIdx ptrTyIdx) { - if (x->GetOpCode() != OP_add) { +ArrayNode *PreMeEmitter::ConvertToArray(BaseNode &x, TyIdx ptrTyIdx) { + if (x.GetOpCode() != OP_add) { return nullptr; } MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyIdx); if (mirType->GetKind() != kTypePointer) { return nullptr; } - BaseNode *opnd0 = x->Opnd(0); + BaseNode *opnd0 = x.Opnd(0); ASSERT_NOT_NULL(GetMexpr(opnd0)); if (!GetMexpr(opnd0)->HasAddressValue()) { return nullptr; } - BaseNode *opnd1 = x->Opnd(1); + BaseNode *opnd1 = x.Opnd(1); MIRType *elemType = static_cast(mirType)->GetPointedType(); size_t elemSize = elemType->GetSize(); BaseNode *indexOpnd = opnd1; @@ -66,34 +66,34 @@ ArrayNode *PreMeEmitter::ConvertToArray(BaseNode *x, TyIdx ptrTyIdx) { // form the pointer to array type MIRArrayType *arryType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, 0); MIRType *ptArryType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*arryType); - ArrayNode *arryNode = codeMP->New(*codeMPAlloc, x->GetPrimType(), ptArryType->GetTypeIndex()); + ArrayNode *arryNode = codeMP->New(*codeMPAlloc, x.GetPrimType(), ptArryType->GetTypeIndex()); arryNode->SetBoundsCheck(false); arryNode->GetNopnd().push_back(opnd0); arryNode->GetNopnd().push_back(indexOpnd); // The number of operands of arryNode is set to 2 arryNode->SetNumOpnds(2); - preMeExprExtensionMap[arryNode] = preMeExprExtensionMap[x]; + preMeExprExtensionMap[arryNode] = preMeExprExtensionMap[&x]; // update opnds' parent info if it has - if (preMeExprExtensionMap[opnd0]) { + if (preMeExprExtensionMap[opnd0] != nullptr) { preMeExprExtensionMap[opnd0]->SetParent(arryNode); } - if (preMeExprExtensionMap[indexOpnd]) { + if (preMeExprExtensionMap[indexOpnd] != nullptr) { preMeExprExtensionMap[indexOpnd]->SetParent(arryNode); } return arryNode; } -BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { - PreMeMIRExtension *pmeExt = preMeMP->New(parent, meExpr); - switch (meExpr->GetOp()) { +BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr &meExpr, BaseNode *parent) { + PreMeMIRExtension *pmeExt = preMeMP->New(parent, &meExpr); + switch (meExpr.GetOp()) { case OP_constval: { - MIRConst *constval = static_cast(meExpr)->GetConstVal(); + MIRConst *constval = static_cast(&meExpr)->GetConstVal(); ConstvalNode *lcvlNode = codeMP->New(constval->GetType().GetPrimType(), constval); preMeExprExtensionMap[lcvlNode] = pmeExt; return lcvlNode; } case OP_dread: { - VarMeExpr *varmeexpr = static_cast(meExpr); + VarMeExpr *varmeexpr = static_cast(&meExpr); MIRSymbol *sym = varmeexpr->GetOst()->GetMIRSymbol(); if (sym->IsLocal()) { sym->ResetIsDeleted(); @@ -112,11 +112,11 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { case OP_cmpl: case OP_cmpg: case OP_lt: { - OpMeExpr *cmpexpr = static_cast(meExpr); + OpMeExpr *cmpexpr = static_cast(&meExpr); CompareNode *cmpNode = - codeMP->New(meExpr->GetOp(), cmpexpr->GetPrimType(), cmpexpr->GetOpndType(), nullptr, nullptr); - BaseNode *opnd0 = EmitPreMeExpr(cmpexpr->GetOpnd(0), cmpNode); - BaseNode *opnd1 = EmitPreMeExpr(cmpexpr->GetOpnd(1), cmpNode); + codeMP->New(meExpr.GetOp(), cmpexpr->GetPrimType(), cmpexpr->GetOpndType(), nullptr, nullptr); + BaseNode *opnd0 = EmitPreMeExpr(*cmpexpr->GetOpnd(0), cmpNode); + BaseNode *opnd1 = EmitPreMeExpr(*cmpexpr->GetOpnd(1), cmpNode); cmpNode->SetBOpnd(opnd0, 0); cmpNode->SetBOpnd(opnd1, 1); cmpNode->SetOpndType(cmpNode->GetOpndType()); @@ -124,15 +124,15 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return cmpNode; } case OP_array: { - NaryMeExpr *arrExpr = static_cast(meExpr); + NaryMeExpr *arrExpr = static_cast(&meExpr); ArrayNode *arrNode = codeMP->New(*codeMPAlloc, arrExpr->GetPrimType(), arrExpr->GetTyIdx()); arrNode->SetBoundsCheck(arrExpr->GetBoundCheck()); for (uint32 i = 0; i < arrExpr->GetNumOpnds(); i++) { - BaseNode *opnd = EmitPreMeExpr(arrExpr->GetOpnd(i), arrNode); + BaseNode *opnd = EmitPreMeExpr(*arrExpr->GetOpnd(i), arrNode); arrNode->GetNopnd().push_back(opnd); } - arrNode->SetNumOpnds(meExpr->GetNumOpnds()); + arrNode->SetNumOpnds(meExpr.GetNumOpnds()); preMeExprExtensionMap[arrNode] = pmeExt; return arrNode; } @@ -153,23 +153,23 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { case OP_shl: case OP_sub: case OP_add: { - OpMeExpr *opExpr = static_cast(meExpr); - BinaryNode *binNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); - binNode->SetBOpnd(EmitPreMeExpr(opExpr->GetOpnd(0), binNode), 0); - binNode->SetBOpnd(EmitPreMeExpr(opExpr->GetOpnd(1), binNode), 1); + OpMeExpr *opExpr = static_cast(&meExpr); + BinaryNode *binNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); + binNode->SetBOpnd(EmitPreMeExpr(*opExpr->GetOpnd(0), binNode), 0); + binNode->SetBOpnd(EmitPreMeExpr(*opExpr->GetOpnd(1), binNode), 1); preMeExprExtensionMap[binNode] = pmeExt; return binNode; } case OP_iread: { - IvarMeExpr *ivarExpr = static_cast(meExpr); - IreadNode *irdNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); + IvarMeExpr *ivarExpr = static_cast(&meExpr); + IreadNode *irdNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); ASSERT(ivarExpr->GetOffset() == 0, "offset in iread should be 0"); - irdNode->SetOpnd(EmitPreMeExpr(ivarExpr->GetBase(), irdNode), 0); + irdNode->SetOpnd(EmitPreMeExpr(*ivarExpr->GetBase(), irdNode), 0); irdNode->SetTyIdx(ivarExpr->GetTyIdx()); irdNode->SetFieldID(ivarExpr->GetFieldID()); preMeExprExtensionMap[irdNode] = pmeExt; if (irdNode->Opnd(0)->GetOpCode() != OP_array) { - ArrayNode *arryNode = ConvertToArray(irdNode->Opnd(0), irdNode->GetTyIdx()); + ArrayNode *arryNode = ConvertToArray(*(irdNode->Opnd(0)), irdNode->GetTyIdx()); if (arryNode != nullptr) { irdNode->SetOpnd(arryNode, 0); } @@ -177,11 +177,11 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return irdNode; } case OP_ireadoff: { - IvarMeExpr *ivarExpr = static_cast(meExpr); - IreadNode *irdNode = codeMP->New(OP_iread, meExpr->GetPrimType()); + IvarMeExpr *ivarExpr = static_cast(&meExpr); + IreadNode *irdNode = codeMP->New(OP_iread, meExpr.GetPrimType()); MeExpr *baseexpr = ivarExpr->GetBase(); if (ivarExpr->GetOffset() == 0) { - irdNode->SetOpnd(EmitPreMeExpr(baseexpr, irdNode), 0); + irdNode->SetOpnd(EmitPreMeExpr(*baseexpr, irdNode), 0); } else { MIRType *mirType = GlobalTables::GetTypeTable().GetInt32(); MIRIntConst *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( @@ -190,7 +190,7 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { PreMeMIRExtension *pmeExt2 = preMeMP->New(irdNode, baseexpr); preMeExprExtensionMap[constValNode] = pmeExt2; BinaryNode *newAddrNode = - codeMP->New(OP_add, baseexpr->GetPrimType(), EmitPreMeExpr(baseexpr, irdNode), constValNode); + codeMP->New(OP_add, baseexpr->GetPrimType(), EmitPreMeExpr(*baseexpr, irdNode), constValNode); preMeExprExtensionMap[newAddrNode] = pmeExt2; irdNode->SetOpnd(newAddrNode, 0); } @@ -200,7 +200,7 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return irdNode; } case OP_addrof: { - AddrofMeExpr *addrMeexpr = static_cast(meExpr); + AddrofMeExpr *addrMeexpr = static_cast(&meExpr); OriginalSt *ost = addrMeexpr->GetOst(); MIRSymbol *sym = ost->GetMIRSymbol(); AddrofNode *addrofNode = @@ -209,14 +209,14 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return addrofNode; } case OP_addroflabel: { - AddroflabelMeExpr *addroflabelexpr = static_cast(meExpr); + AddroflabelMeExpr *addroflabelexpr = static_cast(&meExpr); AddroflabelNode *addroflabel = codeMP->New(addroflabelexpr->labelIdx); - addroflabel->SetPrimType(meExpr->GetPrimType()); + addroflabel->SetPrimType(meExpr.GetPrimType()); preMeExprExtensionMap[addroflabel] = pmeExt; return addroflabel; } case OP_addroffunc: { - AddroffuncMeExpr *addrMeexpr = static_cast(meExpr); + AddroffuncMeExpr *addrMeexpr = static_cast(&meExpr); AddroffuncNode *addrfunNode = codeMP->New(addrMeexpr->GetPrimType(), addrMeexpr->GetPuIdx()); preMeExprExtensionMap[addrfunNode] = pmeExt; return addrfunNode; @@ -224,19 +224,19 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { case OP_gcmalloc: case OP_gcpermalloc: case OP_stackmalloc: { - GcmallocMeExpr *gcMeexpr = static_cast(meExpr); + GcmallocMeExpr *gcMeexpr = static_cast(&meExpr); GCMallocNode *gcMnode = - codeMP->New(meExpr->GetOp(), meExpr->GetPrimType(), gcMeexpr->GetTyIdx()); + codeMP->New(meExpr.GetOp(), meExpr.GetPrimType(), gcMeexpr->GetTyIdx()); gcMnode->SetTyIdx(gcMeexpr->GetTyIdx()); preMeExprExtensionMap[gcMnode] = pmeExt; return gcMnode; } case OP_retype: { - OpMeExpr *opMeexpr = static_cast(meExpr); - RetypeNode *retypeNode = codeMP->New(meExpr->GetPrimType()); + OpMeExpr *opMeexpr = static_cast(&meExpr); + RetypeNode *retypeNode = codeMP->New(meExpr.GetPrimType()); retypeNode->SetFromType(opMeexpr->GetOpndType()); retypeNode->SetTyIdx(opMeexpr->GetTyIdx()); - retypeNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), retypeNode), 0); + retypeNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), retypeNode), 0); preMeExprExtensionMap[retypeNode] = pmeExt; return retypeNode; } @@ -244,36 +244,36 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { case OP_cvt: case OP_floor: case OP_trunc: { - OpMeExpr *opMeexpr = static_cast(meExpr); - TypeCvtNode *tycvtNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); + OpMeExpr *opMeexpr = static_cast(&meExpr); + TypeCvtNode *tycvtNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); tycvtNode->SetFromType(opMeexpr->GetOpndType()); - tycvtNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), tycvtNode), 0); + tycvtNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), tycvtNode), 0); preMeExprExtensionMap[tycvtNode] = pmeExt; return tycvtNode; } case OP_sext: case OP_zext: case OP_extractbits: { - OpMeExpr *opMeexpr = static_cast(meExpr); - ExtractbitsNode *extNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); - extNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), extNode), 0); + OpMeExpr *opMeexpr = static_cast(&meExpr); + ExtractbitsNode *extNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); + extNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), extNode), 0); extNode->SetBitsOffset(opMeexpr->GetBitsOffSet()); extNode->SetBitsSize(opMeexpr->GetBitsSize()); preMeExprExtensionMap[extNode] = pmeExt; return extNode; } case OP_depositbits: { - OpMeExpr *opMeexpr = static_cast(meExpr); - DepositbitsNode *depNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); - depNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), depNode), 0); - depNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(1), depNode), 1); + OpMeExpr *opMeexpr = static_cast(&meExpr); + DepositbitsNode *depNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); + depNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), depNode), 0); + depNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(1), depNode), 1); depNode->SetBitsOffset(opMeexpr->GetBitsOffSet()); depNode->SetBitsSize(opMeexpr->GetBitsSize()); preMeExprExtensionMap[depNode] = pmeExt; return depNode; } case OP_regread: { - RegMeExpr *regMeexpr = static_cast(meExpr); + RegMeExpr *regMeexpr = static_cast(&meExpr); RegreadNode *regNode = codeMP->New(); regNode->SetPrimType(regMeexpr->GetPrimType()); regNode->SetRegIdx(regMeexpr->GetRegIdx()); @@ -281,14 +281,14 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return regNode; } case OP_sizeoftype: { - SizeoftypeMeExpr *sizeofMeexpr = static_cast(meExpr); + SizeoftypeMeExpr *sizeofMeexpr = static_cast(&meExpr); SizeoftypeNode *sizeofTynode = codeMP->New(sizeofMeexpr->GetPrimType(), sizeofMeexpr->GetTyIdx()); preMeExprExtensionMap[sizeofTynode] = pmeExt; return sizeofTynode; } case OP_fieldsdist: { - FieldsDistMeExpr *fdMeexpr = static_cast(meExpr); + FieldsDistMeExpr *fdMeexpr = static_cast(&meExpr); FieldsDistNode *fieldsNode = codeMP->New(fdMeexpr->GetPrimType(), fdMeexpr->GetTyIdx(), fdMeexpr->GetFieldID1(), fdMeexpr->GetFieldID2()); @@ -296,14 +296,14 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { return fieldsNode; } case OP_conststr: { - ConststrMeExpr *constrMeexpr = static_cast(meExpr); + ConststrMeExpr *constrMeexpr = static_cast(&meExpr); ConststrNode *constrNode = codeMP->New(constrMeexpr->GetPrimType(), constrMeexpr->GetStrIdx()); preMeExprExtensionMap[constrNode] = pmeExt; return constrNode; } case OP_conststr16: { - Conststr16MeExpr *constr16Meexpr = static_cast(meExpr); + Conststr16MeExpr *constr16Meexpr = static_cast(&meExpr); Conststr16Node *constr16Node = codeMP->New(constr16Meexpr->GetPrimType(), constr16Meexpr->GetStrIdx()); preMeExprExtensionMap[constr16Node] = pmeExt; @@ -317,38 +317,38 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { case OP_sqrt: case OP_alloca: case OP_malloc: { - OpMeExpr *opMeexpr = static_cast(meExpr); - UnaryNode *unNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); - unNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), unNode), 0); + OpMeExpr *opMeexpr = static_cast(&meExpr); + UnaryNode *unNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); + unNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), unNode), 0); preMeExprExtensionMap[unNode] = pmeExt; return unNode; } case OP_iaddrof: { - OpMeExpr *opMeexpr = static_cast(meExpr); - IreadNode *ireadNode = codeMP->New(meExpr->GetOp(), meExpr->GetPrimType()); - ireadNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), ireadNode), 0); + OpMeExpr *opMeexpr = static_cast(&meExpr); + IreadNode *ireadNode = codeMP->New(meExpr.GetOp(), meExpr.GetPrimType()); + ireadNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), ireadNode), 0); ireadNode->SetTyIdx(opMeexpr->GetTyIdx()); ireadNode->SetFieldID(opMeexpr->GetFieldID()); preMeExprExtensionMap[ireadNode] = pmeExt; return ireadNode; } case OP_select: { - OpMeExpr *opMeexpr = static_cast(meExpr); - TernaryNode *tNode = codeMP->New(OP_select, meExpr->GetPrimType()); - tNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(0), tNode), 0); - tNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(1), tNode), 1); - tNode->SetOpnd(EmitPreMeExpr(opMeexpr->GetOpnd(2), tNode), 2); + OpMeExpr *opMeexpr = static_cast(&meExpr); + TernaryNode *tNode = codeMP->New(OP_select, meExpr.GetPrimType()); + tNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(0), tNode), 0); + tNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(1), tNode), 1); + tNode->SetOpnd(EmitPreMeExpr(*opMeexpr->GetOpnd(2), tNode), 2); preMeExprExtensionMap[tNode] = pmeExt; return tNode; } case OP_intrinsicop: case OP_intrinsicopwithtype: { - NaryMeExpr *nMeexpr = static_cast(meExpr); + NaryMeExpr *nMeexpr = static_cast(&meExpr); IntrinsicopNode *intrnNode = - codeMP->New(*codeMPAlloc, meExpr->GetOp(), meExpr->GetPrimType(), nMeexpr->GetTyIdx()); + codeMP->New(*codeMPAlloc, meExpr.GetOp(), meExpr.GetPrimType(), nMeexpr->GetTyIdx()); intrnNode->SetIntrinsic(nMeexpr->GetIntrinsic()); for (uint32 i = 0; i < nMeexpr->GetNumOpnds(); i++) { - BaseNode *opnd = EmitPreMeExpr(nMeexpr->GetOpnd(i), intrnNode); + BaseNode *opnd = EmitPreMeExpr(*nMeexpr->GetOpnd(i), intrnNode); intrnNode->GetNopnd().push_back(opnd); } intrnNode->SetNumOpnds(nMeexpr->GetNumOpnds()); @@ -360,11 +360,11 @@ BaseNode *PreMeEmitter::EmitPreMeExpr(MeExpr *meExpr, BaseNode *parent) { } } -StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { - PreMeMIRExtension *pmeExt = preMeMP->New(parent, meStmt); - switch (meStmt->GetOp()) { +StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt &meStmt, BaseNode *parent) { + PreMeMIRExtension *pmeExt = preMeMP->New(parent, &meStmt); + switch (meStmt.GetOp()) { case OP_dassign: { - DassignMeStmt *dsmestmt = static_cast(meStmt); + DassignMeStmt *dsmestmt = static_cast(&meStmt); if (dsmestmt->GetRHS()->GetMeOp() == kMeOpVar && static_cast(dsmestmt->GetRHS())->GetOst() == dsmestmt->GetLHS()->GetOst()) { return nullptr; // identity assignment introduced by LFO @@ -373,33 +373,33 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { MIRSymbol *sym = dsmestmt->GetLHS()->GetOst()->GetMIRSymbol(); dass->SetStIdx(sym->GetStIdx()); dass->SetFieldID(static_cast(dsmestmt->GetLHS())->GetOst()->GetFieldID()); - dass->SetOpnd(EmitPreMeExpr(dsmestmt->GetRHS(), dass), 0); + dass->SetOpnd(EmitPreMeExpr(*dsmestmt->GetRHS(), dass), 0); dass->SetSrcPos(dsmestmt->GetSrcPosition()); - dass->CopySafeRegionAttr(meStmt->GetStmtAttr()); - dass->SetOriginalID(meStmt->GetOriginalId()); + dass->CopySafeRegionAttr(meStmt.GetStmtAttr()); + dass->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[dass->GetStmtID()] = pmeExt; return dass; } case OP_regassign: { - AssignMeStmt *asMestmt = static_cast(meStmt); + AssignMeStmt *asMestmt = static_cast(&meStmt); RegassignNode *rssnode = codeMP->New(); rssnode->SetPrimType(asMestmt->GetLHS()->GetPrimType()); rssnode->SetRegIdx(asMestmt->GetLHS()->GetRegIdx()); - rssnode->SetOpnd(EmitPreMeExpr(asMestmt->GetRHS(), rssnode), 0); + rssnode->SetOpnd(EmitPreMeExpr(*asMestmt->GetRHS(), rssnode), 0); rssnode->SetSrcPos(asMestmt->GetSrcPosition()); - rssnode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - rssnode->SetOriginalID(meStmt->GetOriginalId()); + rssnode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + rssnode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[rssnode->GetStmtID()] = pmeExt; return rssnode; } case OP_iassign: { - IassignMeStmt *iass = static_cast(meStmt); + IassignMeStmt *iass = static_cast(&meStmt); IvarMeExpr *lhsVar = iass->GetLHSVal(); IassignNode *iassignNode = codeMP->New(); iassignNode->SetTyIdx(iass->GetTyIdx()); iassignNode->SetFieldID(lhsVar->GetFieldID()); if (lhsVar->GetOffset() == 0) { - iassignNode->SetAddrExpr(EmitPreMeExpr(lhsVar->GetBase(), iassignNode)); + iassignNode->SetAddrExpr(EmitPreMeExpr(*lhsVar->GetBase(), iassignNode)); } else { auto *mirType = GlobalTables::GetTypeTable().GetInt32(); auto *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( @@ -407,38 +407,38 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { auto *constValNode = codeMP->New(mirType->GetPrimType(), mirConst); auto *newAddrNode = codeMP->New(OP_add, lhsVar->GetBase()->GetPrimType(), - EmitPreMeExpr(lhsVar->GetBase(), iassignNode), constValNode); + EmitPreMeExpr(*lhsVar->GetBase(), iassignNode), constValNode); iassignNode->SetAddrExpr(newAddrNode); } if (iassignNode->Opnd(0)->GetOpCode() != OP_array) { - ArrayNode *arryNode = ConvertToArray(iassignNode->Opnd(0), iassignNode->GetTyIdx()); + ArrayNode *arryNode = ConvertToArray(*(iassignNode->Opnd(0)), iassignNode->GetTyIdx()); if (arryNode != nullptr) { iassignNode->SetAddrExpr(arryNode); } } - iassignNode->rhs = EmitPreMeExpr(iass->GetRHS(), iassignNode); + iassignNode->rhs = EmitPreMeExpr(*iass->GetRHS(), iassignNode); iassignNode->SetSrcPos(iass->GetSrcPosition()); - iassignNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - iassignNode->SetOriginalID(meStmt->GetOriginalId()); + iassignNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + iassignNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[iassignNode->GetStmtID()] = pmeExt; iassignNode->SetExpandFromArrayOfCharFunc(iass->IsExpandedFromArrayOfCharFunc()); return iassignNode; } case OP_return: { - RetMeStmt *retMestmt = static_cast(meStmt); + RetMeStmt *retMestmt = static_cast(&meStmt); NaryStmtNode *retNode = codeMP->New(*codeMPAlloc, OP_return); for (uint32 i = 0; i < retMestmt->GetOpnds().size(); i++) { - retNode->GetNopnd().push_back(EmitPreMeExpr(retMestmt->GetOpnd(i), retNode)); + retNode->GetNopnd().push_back(EmitPreMeExpr(*retMestmt->GetOpnd(i), retNode)); } retNode->SetNumOpnds(static_cast(retMestmt->GetOpnds().size())); retNode->SetSrcPos(retMestmt->GetSrcPosition()); - retNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - retNode->SetOriginalID(meStmt->GetOriginalId()); + retNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + retNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[retNode->GetStmtID()] = pmeExt; return retNode; } case OP_goto: { - GotoMeStmt *gotoStmt = static_cast(meStmt); + GotoMeStmt *gotoStmt = static_cast(&meStmt); if (preMeFunc->WhileLabelCreatedByPreMe(gotoStmt->GetOffset())) { return nullptr; } @@ -448,28 +448,28 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { GotoNode *gto = codeMP->New(OP_goto); gto->SetOffset(gotoStmt->GetOffset()); gto->SetSrcPos(gotoStmt->GetSrcPosition()); - gto->CopySafeRegionAttr(meStmt->GetStmtAttr()); - gto->SetOriginalID(meStmt->GetOriginalId()); + gto->CopySafeRegionAttr(meStmt.GetStmtAttr()); + gto->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[gto->GetStmtID()] = pmeExt; return gto; } case OP_igoto: { - UnaryMeStmt *igotoMeStmt = static_cast(meStmt); + UnaryMeStmt *igotoMeStmt = static_cast(&meStmt); UnaryStmtNode *igto = codeMP->New(OP_igoto); - igto->SetOpnd(EmitPreMeExpr(igotoMeStmt->GetOpnd(), igto), 0); + igto->SetOpnd(EmitPreMeExpr(*igotoMeStmt->GetOpnd(), igto), 0); igto->SetSrcPos(igotoMeStmt->GetSrcPosition()); - igto->CopySafeRegionAttr(meStmt->GetStmtAttr()); - igto->SetOriginalID(meStmt->GetOriginalId()); + igto->CopySafeRegionAttr(meStmt.GetStmtAttr()); + igto->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[igto->GetStmtID()] = pmeExt; return igto; } case OP_comment: { - CommentMeStmt *cmtmeNode = static_cast(meStmt); + CommentMeStmt *cmtmeNode = static_cast(&meStmt); CommentNode *cmtNode = codeMP->New(*codeMPAlloc); cmtNode->SetComment(cmtmeNode->GetComment()); cmtNode->SetSrcPos(cmtmeNode->GetSrcPosition()); - cmtNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - cmtNode->SetOriginalID(meStmt->GetOriginalId()); + cmtNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + cmtNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[cmtNode->GetStmtID()] = pmeExt; return cmtNode; } @@ -489,18 +489,18 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { case OP_customcallassigned: case OP_polymorphiccall: case OP_polymorphiccallassigned: { - CallMeStmt *callMeStmt = static_cast(meStmt); - CallNode *callnode = codeMP->New(*codeMPAlloc, meStmt->GetOp()); + CallMeStmt *callMeStmt = static_cast(&meStmt); + CallNode *callnode = codeMP->New(*codeMPAlloc, meStmt.GetOp()); callnode->SetPUIdx(callMeStmt->GetPUIdx()); callnode->SetTyIdx(callMeStmt->GetTyIdx()); callnode->SetNumOpnds(static_cast(callMeStmt->GetOpnds().size())); callnode->SetSrcPos(callMeStmt->GetSrcPosition()); - meStmt->EmitCallReturnVector(callnode->GetReturnVec()); + meStmt.EmitCallReturnVector(callnode->GetReturnVec()); for (uint32 i = 0; i < callMeStmt->GetOpnds().size(); i++) { - callnode->GetNopnd().push_back(EmitPreMeExpr(callMeStmt->GetOpnd(i), callnode)); + callnode->GetNopnd().push_back(EmitPreMeExpr(*callMeStmt->GetOpnd(i), callnode)); } - callnode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - callnode->SetOriginalID(meStmt->GetOriginalId()); + callnode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + callnode->SetOriginalID(meStmt.GetOriginalId()); callnode->SetMeStmtID(callMeStmt->GetMeStmtId()); preMeStmtExtensionMap[callnode->GetStmtID()] = pmeExt; callnode->SetEnclosingBlock(static_cast(parent)); @@ -510,15 +510,15 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { case OP_icallassigned: case OP_icallproto: case OP_icallprotoassigned: { - IcallMeStmt *icallMeStmt = static_cast(meStmt); + IcallMeStmt *icallMeStmt = static_cast(&meStmt); IcallNode *icallnode = codeMP->New(*codeMPAlloc, OP_icallprotoassigned, icallMeStmt->GetRetTyIdx()); for (uint32 i = 0; i < icallMeStmt->GetOpnds().size(); i++) { - icallnode->GetNopnd().push_back(EmitPreMeExpr(icallMeStmt->GetOpnd(i), icallnode)); + icallnode->GetNopnd().push_back(EmitPreMeExpr(*icallMeStmt->GetOpnd(i), icallnode)); } icallnode->SetNumOpnds(static_cast(icallMeStmt->GetOpnds().size())); - icallnode->SetSrcPos(meStmt->GetSrcPosition()); - meStmt->EmitCallReturnVector(icallnode->GetReturnVec()); + icallnode->SetSrcPos(meStmt.GetSrcPosition()); + meStmt.EmitCallReturnVector(icallnode->GetReturnVec()); icallnode->SetRetTyIdx(TyIdx(PTY_void)); for (uint32 j = 0; j < icallnode->GetReturnVec().size(); j++) { CallReturnPair retpair = icallnode->GetReturnVec()[j]; @@ -534,11 +534,11 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { icallnode->SetRetTyIdx(TyIdx(preg->GetPrimType())); } } - if (meStmt->GetOp() == OP_icallproto || meStmt->GetOp() == OP_icallprotoassigned) { + if (meStmt.GetOp() == OP_icallproto || meStmt.GetOp() == OP_icallprotoassigned) { icallnode->SetRetTyIdx(icallMeStmt->GetRetTyIdx()); } - icallnode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - icallnode->SetOriginalID(meStmt->GetOriginalId()); + icallnode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + icallnode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[icallnode->GetStmtID()] = pmeExt; return icallnode; } @@ -548,40 +548,40 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { case OP_xintrinsiccallassigned: case OP_intrinsiccallwithtype: case OP_intrinsiccallwithtypeassigned: { - IntrinsiccallMeStmt *callMeStmt = static_cast(meStmt); + IntrinsiccallMeStmt *callMeStmt = static_cast(&meStmt); IntrinsiccallNode *callnode = - codeMP->New(*codeMPAlloc, meStmt->GetOp(), callMeStmt->GetIntrinsic()); + codeMP->New(*codeMPAlloc, meStmt.GetOp(), callMeStmt->GetIntrinsic()); callnode->SetIntrinsic(callMeStmt->GetIntrinsic()); callnode->SetTyIdx(callMeStmt->GetTyIdx()); for (uint32 i = 0; i < callMeStmt->GetOpnds().size(); i++) { - callnode->GetNopnd().push_back(EmitPreMeExpr(callMeStmt->GetOpnd(i), callnode)); + callnode->GetNopnd().push_back(EmitPreMeExpr(*callMeStmt->GetOpnd(i), callnode)); } callnode->SetNumOpnds(static_cast(callnode->GetNopndSize())); - callnode->SetSrcPos(meStmt->GetSrcPosition()); - if (kOpcodeInfo.IsCallAssigned(meStmt->GetOp())) { - meStmt->EmitCallReturnVector(callnode->GetReturnVec()); + callnode->SetSrcPos(meStmt.GetSrcPosition()); + if (kOpcodeInfo.IsCallAssigned(meStmt.GetOp())) { + meStmt.EmitCallReturnVector(callnode->GetReturnVec()); } - callnode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - callnode->SetOriginalID(meStmt->GetOriginalId()); + callnode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + callnode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[callnode->GetStmtID()] = pmeExt; return callnode; } case OP_asm: { - AsmMeStmt *asmMeStmt = static_cast(meStmt); + AsmMeStmt *asmMeStmt = static_cast(&meStmt); AsmNode *asmNode = codeMP->New(codeMPAlloc); for (size_t i = 0; i < asmMeStmt->NumMeStmtOpnds(); ++i) { - asmNode->GetNopnd().push_back(EmitPreMeExpr(asmMeStmt->GetOpnd(i), asmNode)); + asmNode->GetNopnd().push_back(EmitPreMeExpr(*asmMeStmt->GetOpnd(i), asmNode)); } asmNode->SetNumOpnds(static_cast(asmNode->GetNopndSize())); - asmNode->SetSrcPos(meStmt->GetSrcPosition()); - meStmt->EmitCallReturnVector(*asmNode->GetCallReturnVector()); + asmNode->SetSrcPos(meStmt.GetSrcPosition()); + meStmt.EmitCallReturnVector(*asmNode->GetCallReturnVector()); asmNode->asmString = asmMeStmt->asmString; asmNode->inputConstraints = asmMeStmt->inputConstraints; asmNode->outputConstraints = asmMeStmt->outputConstraints; asmNode->clobberList = asmMeStmt->clobberList; asmNode->gotoLabels = asmMeStmt->gotoLabels; - asmNode->SetOriginalID(meStmt->GetOriginalId()); - asmNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); + asmNode->SetOriginalID(meStmt.GetOriginalId()); + asmNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); return asmNode; } case OP_jscatch: @@ -592,142 +592,142 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { case OP_membarrelease: case OP_membarstorestore: case OP_membarstoreload: { - StmtNode *stmtNode = codeMP->New(meStmt->GetOp()); - stmtNode->SetSrcPos(meStmt->GetSrcPosition()); - stmtNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - stmtNode->SetOriginalID(meStmt->GetOriginalId()); + StmtNode *stmtNode = codeMP->New(meStmt.GetOp()); + stmtNode->SetSrcPos(meStmt.GetSrcPosition()); + stmtNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + stmtNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[stmtNode->GetStmtID()] = pmeExt; return stmtNode; } case OP_retsub: { - StmtNode *usesStmtNode = codeMP->New(meStmt->GetOp()); - usesStmtNode->SetSrcPos(meStmt->GetSrcPosition()); - usesStmtNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - usesStmtNode->SetOriginalID(meStmt->GetOriginalId()); + StmtNode *usesStmtNode = codeMP->New(meStmt.GetOp()); + usesStmtNode->SetSrcPos(meStmt.GetSrcPosition()); + usesStmtNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + usesStmtNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[usesStmtNode->GetStmtID()] = pmeExt; return usesStmtNode; } case OP_brfalse: case OP_brtrue: { - CondGotoNode *CondNode = codeMP->New(meStmt->GetOp()); - CondGotoMeStmt *condMeStmt = static_cast(meStmt); - CondNode->SetBranchProb(condMeStmt->GetBranchProb()); - CondNode->SetOffset(condMeStmt->GetOffset()); - CondNode->SetSrcPos(meStmt->GetSrcPosition()); - CondNode->SetOpnd(EmitPreMeExpr(condMeStmt->GetOpnd(), CondNode), 0); - CondNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - CondNode->SetOriginalID(meStmt->GetOriginalId()); - CondNode->SetMeStmtID(meStmt->GetMeStmtId()); - preMeStmtExtensionMap[CondNode->GetStmtID()] = pmeExt; - return CondNode; + CondGotoNode *condNode = codeMP->New(meStmt.GetOp()); + CondGotoMeStmt *condMeStmt = static_cast(&meStmt); + condNode->SetBranchProb(condMeStmt->GetBranchProb()); + condNode->SetOffset(condMeStmt->GetOffset()); + condNode->SetSrcPos(meStmt.GetSrcPosition()); + condNode->SetOpnd(EmitPreMeExpr(*condMeStmt->GetOpnd(), condNode), 0); + condNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + condNode->SetOriginalID(meStmt.GetOriginalId()); + condNode->SetMeStmtID(meStmt.GetMeStmtId()); + preMeStmtExtensionMap[condNode->GetStmtID()] = pmeExt; + return condNode; } case OP_cpptry: case OP_try: { TryNode *jvTryNode = codeMP->New(*codeMPAlloc); - TryMeStmt *tryMeStmt = static_cast(meStmt); + TryMeStmt *tryMeStmt = static_cast(&meStmt); size_t offsetsSize = tryMeStmt->GetOffsets().size(); jvTryNode->ResizeOffsets(offsetsSize); for (size_t i = 0; i < offsetsSize; i++) { jvTryNode->SetOffset(tryMeStmt->GetOffsets()[i], i); } jvTryNode->SetSrcPos(tryMeStmt->GetSrcPosition()); - jvTryNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - jvTryNode->SetOriginalID(meStmt->GetOriginalId()); + jvTryNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + jvTryNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[jvTryNode->GetStmtID()] = pmeExt; return jvTryNode; } case OP_cppcatch: { CppCatchNode *cppCatchNode = codeMP->New(); - CppCatchMeStmt *catchMestmt = static_cast(meStmt); + CppCatchMeStmt *catchMestmt = static_cast(&meStmt); cppCatchNode->exceptionTyIdx = catchMestmt->exceptionTyIdx; cppCatchNode->SetSrcPos(catchMestmt->GetSrcPosition()); - cppCatchNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - cppCatchNode->SetOriginalID(meStmt->GetOriginalId()); + cppCatchNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + cppCatchNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[cppCatchNode->GetStmtID()] = pmeExt; return cppCatchNode; } case OP_catch: { CatchNode *jvCatchNode = codeMP->New(*codeMPAlloc); - CatchMeStmt *catchMestmt = static_cast(meStmt); + CatchMeStmt *catchMestmt = static_cast(&meStmt); jvCatchNode->SetExceptionTyIdxVec(catchMestmt->GetExceptionTyIdxVec()); jvCatchNode->SetSrcPos(catchMestmt->GetSrcPosition()); - jvCatchNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - jvCatchNode->SetOriginalID(meStmt->GetOriginalId()); + jvCatchNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + jvCatchNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[jvCatchNode->GetStmtID()] = pmeExt; return jvCatchNode; } case OP_throw: { - UnaryStmtNode *throwStmtNode = codeMP->New(meStmt->GetOp()); - ThrowMeStmt *throwMeStmt = static_cast(meStmt); - throwStmtNode->SetOpnd(EmitPreMeExpr(throwMeStmt->GetOpnd(), throwStmtNode), 0); + UnaryStmtNode *throwStmtNode = codeMP->New(meStmt.GetOp()); + ThrowMeStmt *throwMeStmt = static_cast(&meStmt); + throwStmtNode->SetOpnd(EmitPreMeExpr(*throwMeStmt->GetOpnd(), throwStmtNode), 0); throwStmtNode->SetSrcPos(throwMeStmt->GetSrcPosition()); - throwStmtNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - throwStmtNode->SetOriginalID(meStmt->GetOriginalId()); + throwStmtNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + throwStmtNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[throwStmtNode->GetStmtID()] = pmeExt; return throwStmtNode; } case OP_callassertnonnull: { - CallAssertNonnullMeStmt *assertNullStmt = static_cast(meStmt); - CallAssertNonnullStmtNode *assertNullNode = codeMP->New(meStmt->GetOp(), + CallAssertNonnullMeStmt *assertNullStmt = static_cast(&meStmt); + CallAssertNonnullStmtNode *assertNullNode = codeMP->New(meStmt.GetOp(), assertNullStmt->GetFuncNameIdx(), assertNullStmt->GetParamIndex(), assertNullStmt->GetStmtFuncNameIdx()); - assertNullNode->SetSrcPos(meStmt->GetSrcPosition()); - assertNullNode->SetOpnd(EmitPreMeExpr(assertNullStmt->GetOpnd(), assertNullNode), 0); + assertNullNode->SetSrcPos(meStmt.GetSrcPosition()); + assertNullNode->SetOpnd(EmitPreMeExpr(*assertNullStmt->GetOpnd(), assertNullNode), 0); assertNullNode->SetNumOpnds(1); - assertNullNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - assertNullNode->SetOriginalID(meStmt->GetOriginalId()); + assertNullNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + assertNullNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[assertNullNode->GetStmtID()] = pmeExt; return assertNullNode; } case OP_callassertle: { - CallAssertBoundaryMeStmt *assertBoundaryStmt = static_cast(meStmt); + CallAssertBoundaryMeStmt *assertBoundaryStmt = static_cast(&meStmt); CallAssertBoundaryStmtNode *assertBoundaryNode = codeMP->New( - *codeMPAlloc, meStmt->GetOp(), assertBoundaryStmt->GetFuncNameIdx(), assertBoundaryStmt->GetParamIndex(), + *codeMPAlloc, meStmt.GetOp(), assertBoundaryStmt->GetFuncNameIdx(), assertBoundaryStmt->GetParamIndex(), assertBoundaryStmt->GetStmtFuncNameIdx()); - assertBoundaryNode->SetSrcPos(meStmt->GetSrcPosition()); + assertBoundaryNode->SetSrcPos(meStmt.GetSrcPosition()); for (uint32 i = 0; i < assertBoundaryStmt->GetOpnds().size(); i++) { - assertBoundaryNode->GetNopnd().push_back(EmitPreMeExpr(assertBoundaryStmt->GetOpnd(i), assertBoundaryNode)); + assertBoundaryNode->GetNopnd().push_back(EmitPreMeExpr(*assertBoundaryStmt->GetOpnd(i), assertBoundaryNode)); } assertBoundaryNode->SetNumOpnds(static_cast(assertBoundaryNode->GetNopndSize())); - assertBoundaryNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - assertBoundaryNode->SetOriginalID(meStmt->GetOriginalId()); + assertBoundaryNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + assertBoundaryNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[assertBoundaryNode->GetStmtID()] = pmeExt; return assertBoundaryNode; } case OP_eval: case OP_free: { - UnaryStmtNode *unaryStmtNode = codeMP->New(meStmt->GetOp()); - UnaryMeStmt *uMeStmt = static_cast(meStmt); - unaryStmtNode->SetOpnd(EmitPreMeExpr(uMeStmt->GetOpnd(), unaryStmtNode), 0); + UnaryStmtNode *unaryStmtNode = codeMP->New(meStmt.GetOp()); + UnaryMeStmt *uMeStmt = static_cast(&meStmt); + unaryStmtNode->SetOpnd(EmitPreMeExpr(*uMeStmt->GetOpnd(), unaryStmtNode), 0); unaryStmtNode->SetSrcPos(uMeStmt->GetSrcPosition()); - unaryStmtNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - unaryStmtNode->SetOriginalID(meStmt->GetOriginalId()); + unaryStmtNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + unaryStmtNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[unaryStmtNode->GetStmtID()] = pmeExt; return unaryStmtNode; } case OP_switch: { SwitchNode *switchNode = codeMP->New(*codeMPAlloc); - SwitchMeStmt *meSwitch = static_cast(meStmt); - switchNode->SetSwitchOpnd(EmitPreMeExpr(meSwitch->GetOpnd(), switchNode)); + SwitchMeStmt *meSwitch = static_cast(&meStmt); + switchNode->SetSwitchOpnd(EmitPreMeExpr(*meSwitch->GetOpnd(), switchNode)); switchNode->SetDefaultLabel(meSwitch->GetDefaultLabel()); switchNode->SetSwitchTable(meSwitch->GetSwitchTable()); switchNode->SetSrcPos(meSwitch->GetSrcPosition()); - switchNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - switchNode->SetOriginalID(meStmt->GetOriginalId()); - switchNode->SetMeStmtID(meStmt->GetMeStmtId()); + switchNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + switchNode->SetOriginalID(meStmt.GetOriginalId()); + switchNode->SetMeStmtID(meStmt.GetMeStmtId()); preMeStmtExtensionMap[switchNode->GetStmtID()] = pmeExt; return switchNode; } case OP_assertnonnull: case OP_assignassertnonnull: case OP_returnassertnonnull: { - AssertNonnullMeStmt *assertNullStmt = static_cast(meStmt); + AssertNonnullMeStmt *assertNullStmt = static_cast(&meStmt); AssertNonnullStmtNode *assertNullNode = codeMP->New( - meStmt->GetOp(), assertNullStmt->GetFuncNameIdx()); - assertNullNode->SetSrcPos(meStmt->GetSrcPosition()); - assertNullNode->SetOpnd(EmitPreMeExpr(assertNullStmt->GetOpnd(), assertNullNode), 0); + meStmt.GetOp(), assertNullStmt->GetFuncNameIdx()); + assertNullNode->SetSrcPos(meStmt.GetSrcPosition()); + assertNullNode->SetOpnd(EmitPreMeExpr(*assertNullStmt->GetOpnd(), assertNullNode), 0); assertNullNode->SetNumOpnds(1); - assertNullNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - assertNullNode->SetOriginalID(meStmt->GetOriginalId()); + assertNullNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + assertNullNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[assertNullNode->GetStmtID()] = pmeExt; return assertNullNode; } @@ -737,27 +737,27 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { case OP_assertlt: case OP_assignassertle: case OP_returnassertle: { - AssertBoundaryMeStmt *assertBoundaryStmt = static_cast(meStmt); + AssertBoundaryMeStmt *assertBoundaryStmt = static_cast(&meStmt); AssertBoundaryStmtNode *assertBoundaryNode = codeMP->New( - *codeMPAlloc, meStmt->GetOp(), assertBoundaryStmt->GetFuncNameIdx()); - assertBoundaryNode->SetSrcPos(meStmt->GetSrcPosition()); + *codeMPAlloc, meStmt.GetOp(), assertBoundaryStmt->GetFuncNameIdx()); + assertBoundaryNode->SetSrcPos(meStmt.GetSrcPosition()); for (uint32 i = 0; i < assertBoundaryStmt->GetOpnds().size(); i++) { - assertBoundaryNode->GetNopnd().push_back(EmitPreMeExpr(assertBoundaryStmt->GetOpnd(i), assertBoundaryNode)); + assertBoundaryNode->GetNopnd().push_back(EmitPreMeExpr(*assertBoundaryStmt->GetOpnd(i), assertBoundaryNode)); } assertBoundaryNode->SetNumOpnds(static_cast(assertBoundaryNode->GetNopndSize())); - assertBoundaryNode->CopySafeRegionAttr(meStmt->GetStmtAttr()); - assertBoundaryNode->SetOriginalID(meStmt->GetOriginalId()); + assertBoundaryNode->CopySafeRegionAttr(meStmt.GetStmtAttr()); + assertBoundaryNode->SetOriginalID(meStmt.GetOriginalId()); preMeStmtExtensionMap[assertBoundaryNode->GetStmtID()] = pmeExt; return assertBoundaryNode; } case OP_syncenter: case OP_syncexit: { - auto naryMeStmt = static_cast(meStmt); - auto syncStmt = codeMP->New(*codeMPAlloc, meStmt->GetOp()); + auto naryMeStmt = static_cast(&meStmt); + auto syncStmt = codeMP->New(*codeMPAlloc, meStmt.GetOp()); for (uint32 i = 0; i < naryMeStmt->GetOpnds().size(); i++) { - syncStmt->GetNopnd().push_back(EmitPreMeExpr(naryMeStmt->GetOpnd(i), syncStmt)); + syncStmt->GetNopnd().push_back(EmitPreMeExpr(*naryMeStmt->GetOpnd(i), syncStmt)); } - syncStmt->SetNumOpnds(syncStmt->GetNopndSize()); + syncStmt->SetNumOpnds(static_cast(syncStmt->GetNopndSize())); return syncStmt; } default: @@ -765,14 +765,15 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *meStmt, BaseNode *parent) { } } -void PreMeEmitter::UpdateStmtInfoForLabelNode(LabelNode &label, BB &bb) { +void PreMeEmitter::UpdateStmtInfoForLabelNode(LabelNode &label, BB &bb) const { if (ipaInfo == nullptr) { return; } label.SetStmtInfoId(ipaInfo->GetRealFirstStmtInfoId(bb)); } -void PreMeEmitter::UpdateStmtInfo(const MeStmt &meStmt, StmtNode &stmt, BlockNode &currBlock, FreqType frequency) { +void PreMeEmitter::UpdateStmtInfo(const MeStmt &meStmt, StmtNode &stmt, + BlockNode &currBlock, FreqType frequency) const { if (ipaInfo == nullptr || meStmt.GetStmtInfoId() == kInvalidIndex) { return; } @@ -783,70 +784,69 @@ void PreMeEmitter::UpdateStmtInfo(const MeStmt &meStmt, StmtNode &stmt, BlockNod stmt.SetStmtInfoId(meStmt.GetStmtInfoId()); } -void PreMeEmitter::EmitBB(BB *bb, BlockNode *curBlk) { - CHECK_FATAL(curBlk != nullptr, "null ptr check"); - bool bbIsEmpty = bb->GetMeStmts().empty(); +void PreMeEmitter::EmitBB(BB &bb, BlockNode &curBlk) { + bool bbIsEmpty = bb.GetMeStmts().empty(); // emit head. label - LabelIdx labidx = bb->GetBBLabel(); + LabelIdx labidx = bb.GetBBLabel(); if (labidx != 0 && !preMeFunc->WhileLabelCreatedByPreMe(labidx) && !preMeFunc->IfLabelCreatedByPreMe(labidx)) { // not a empty bb LabelNode *lbnode = codeMP->New(); - UpdateStmtInfoForLabelNode(*lbnode, *bb); + UpdateStmtInfoForLabelNode(*lbnode, bb); lbnode->SetLabelIdx(labidx); - curBlk->AddStatement(lbnode); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + curBlk.AddStatement(lbnode); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); preMeStmtExtensionMap[lbnode->GetStmtID()] = pmeExt; if (GetFuncProfData()) { - GetFuncProfData()->SetStmtFreq(lbnode->GetStmtID(), bb->GetFrequency()); + GetFuncProfData()->SetStmtFreq(lbnode->GetStmtID(), bb.GetFrequency()); } } - for (auto& mestmt : bb->GetMeStmts()) { - StmtNode *stmt = EmitPreMeStmt(&mestmt, curBlk); + for (auto& mestmt : bb.GetMeStmts()) { + StmtNode *stmt = EmitPreMeStmt(mestmt, &curBlk); if (!stmt) { // can be null i.e, a goto to a label that was created by lno lower continue; } - UpdateStmtInfo(mestmt, *stmt, *curBlk, bb->GetFrequency()); - curBlk->AddStatement(stmt); + UpdateStmtInfo(mestmt, *stmt, curBlk, bb.GetFrequency()); + curBlk.AddStatement(stmt); // add for first stmt in bb in curblk if (GetFuncProfData() != nullptr) { - GetFuncProfData()->SetStmtFreq(stmt->GetStmtID(), bb->GetFrequency()); + GetFuncProfData()->SetStmtFreq(stmt->GetStmtID(), bb.GetFrequency()); } } - if (bb->GetAttributes(kBBAttrIsTryEnd)) { + if (bb.GetAttributes(kBBAttrIsTryEnd)) { /* generate op_endtry */ StmtNode *endtry = codeMP->New(OP_endtry); - curBlk->AddStatement(endtry); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + curBlk.AddStatement(endtry); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); preMeStmtExtensionMap[endtry->GetStmtID()] = pmeExt; } // add stmtnode to last if (GetFuncProfData()) { if (bbIsEmpty) { if (!MeOption::quiet) { - LogInfo::MapleLogger() << " bb " << bb->GetBBId() << ": no stmt used to add frequency; added comment node\n"; + LogInfo::MapleLogger() << " bb " << bb.GetBBId() << ": no stmt used to add frequency; added comment node\n"; } CommentNode *commentNode = codeMP->New(*(mirFunc->GetModule())); commentNode->SetComment("freqStmt"+std::to_string(commentNode->GetStmtID())); - GetFuncProfData()->SetStmtFreq(commentNode->GetStmtID(), bb->GetFrequency()); - curBlk->AddStatement(commentNode); + GetFuncProfData()->SetStmtFreq(commentNode->GetStmtID(), bb.GetFrequency()); + curBlk.AddStatement(commentNode); } } } -DoloopNode *PreMeEmitter::EmitPreMeDoloop(BB *meWhileBB, BlockNode *curBlk, PreMeWhileInfo *whileInfo) { - MeStmt *lastmestmt = meWhileBB->GetLastMe(); +DoloopNode *PreMeEmitter::EmitPreMeDoloop(BB &meWhileBB, BlockNode &curBlk, PreMeWhileInfo &whileInfo) { + MeStmt *lastmestmt = meWhileBB.GetLastMe(); ASSERT_NOT_NULL(lastmestmt); CHECK_FATAL(lastmestmt->GetPrev() == nullptr || dynamic_cast(lastmestmt->GetPrev()) == nullptr, "EmitPreMeDoLoop: there are other statements at while header bb"); DoloopNode *Doloopnode = codeMP->New(); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); pmeExt->mestmt = lastmestmt; preMeStmtExtensionMap[Doloopnode->GetStmtID()] = pmeExt; - Doloopnode->SetDoVarStIdx(whileInfo->ivOst->GetMIRSymbol()->GetStIdx()); + Doloopnode->SetDoVarStIdx(whileInfo.ivOst->GetMIRSymbol()->GetStIdx()); CondGotoMeStmt *condGotostmt = static_cast(lastmestmt); - Doloopnode->SetStartExpr(EmitPreMeExpr(whileInfo->initExpr, Doloopnode)); - Doloopnode->SetContExpr(EmitPreMeExpr(condGotostmt->GetOpnd(), Doloopnode)); + Doloopnode->SetStartExpr(EmitPreMeExpr(*whileInfo.initExpr, Doloopnode)); + Doloopnode->SetContExpr(EmitPreMeExpr(*condGotostmt->GetOpnd(), Doloopnode)); CompareNode *compare = static_cast(Doloopnode->GetCondExpr()); if (compare->Opnd(0)->GetOpCode() == OP_cvt && compare->Opnd(0)->Opnd(0)->GetOpCode() == OP_cvt) { PrimType resPrimType = compare->Opnd(0)->GetPrimType(); @@ -862,38 +862,38 @@ DoloopNode *PreMeEmitter::EmitPreMeDoloop(BB *meWhileBB, BlockNode *curBlk, PreM PreMeMIRExtension *doloopExt = preMeMP->New(Doloopnode); preMeStmtExtensionMap[dobodyNode->GetStmtID()] = doloopExt; MIRIntConst *intConst = - mirFunc->GetModule()->GetMemPool()->New(whileInfo->stepValue, *whileInfo->ivOst->GetType()); + mirFunc->GetModule()->GetMemPool()->New(whileInfo.stepValue, *whileInfo.ivOst->GetType()); ConstvalNode *constnode = codeMP->New(intConst->GetType().GetPrimType(), intConst); preMeExprExtensionMap[constnode] = doloopExt; Doloopnode->SetIncrExpr(constnode); Doloopnode->SetIsPreg(false); - curBlk->AddStatement(Doloopnode); + curBlk.AddStatement(Doloopnode); // add stmtfreq if (GetFuncProfData()) { - GetFuncProfData()->SetStmtFreq(Doloopnode->GetStmtID(), meWhileBB->GetFrequency()); + GetFuncProfData()->SetStmtFreq(Doloopnode->GetStmtID(), meWhileBB.GetFrequency()); } return Doloopnode; } -WhileStmtNode *PreMeEmitter::EmitPreMeWhile(BB *meWhileBB, BlockNode *curBlk) { - MeStmt *lastmestmt = meWhileBB->GetLastMe(); +WhileStmtNode *PreMeEmitter::EmitPreMeWhile(BB &meWhileBB, BlockNode &curBlk) { + MeStmt *lastmestmt = meWhileBB.GetLastMe(); ASSERT_NOT_NULL(lastmestmt); CHECK_FATAL(lastmestmt->GetPrev() == nullptr || dynamic_cast(lastmestmt->GetPrev()) == nullptr, "EmitPreMeWhile: there are other statements at while header bb"); WhileStmtNode *Whilestmt = codeMP->New(OP_while); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); preMeStmtExtensionMap[Whilestmt->GetStmtID()] = pmeExt; CondGotoMeStmt *condGotostmt = static_cast(lastmestmt); - Whilestmt->SetOpnd(EmitPreMeExpr(condGotostmt->GetOpnd(), Whilestmt), 0); + Whilestmt->SetOpnd(EmitPreMeExpr(*condGotostmt->GetOpnd(), Whilestmt), 0); BlockNode *whilebodyNode = codeMP->New(); PreMeMIRExtension *whilenodeExt = preMeMP->New(Whilestmt); preMeStmtExtensionMap[whilebodyNode->GetStmtID()] = whilenodeExt; Whilestmt->SetBody(whilebodyNode); // add stmtfreq if (GetFuncProfData()) { - GetFuncProfData()->SetStmtFreq(Whilestmt->GetStmtID(), meWhileBB->GetFrequency()); + GetFuncProfData()->SetStmtFreq(Whilestmt->GetStmtID(), meWhileBB.GetFrequency()); } - curBlk->AddStatement(Whilestmt); + curBlk.AddStatement(Whilestmt); return Whilestmt; } @@ -914,11 +914,11 @@ uint32 PreMeEmitter::Raise2PreMeWhile(uint32 curJ, BlockNode *curBlk) { StmtNode *loop = nullptr; ++curJ; if (whileInfo->canConvertDoloop) { // emit doloop - auto *doloopNode = EmitPreMeDoloop(curbb, curBlk, whileInfo); + auto *doloopNode = EmitPreMeDoloop(*curbb, *curBlk, *whileInfo); loop = doloopNode; dobody = doloopNode->GetDoBody(); } else { // emit while loop - auto *whileNode = EmitPreMeWhile(curbb, curBlk); + auto *whileNode = EmitPreMeWhile(*curbb, *curBlk); loop = whileNode; dobody = whileNode->GetBody(); } @@ -942,12 +942,12 @@ uint32 PreMeEmitter::Raise2PreMeWhile(uint32 curJ, BlockNode *curBlk) { // set dobody freq if (GetFuncProfData()) { FreqType freq = (endlblbb == suc0) ? suc1->GetFrequency() : suc0->GetFrequency(); - GetFuncProfData()->SetStmtFreq(dobody->GetStmtID(), static_cast(freq)); + GetFuncProfData()->SetStmtFreq(dobody->GetStmtID(), freq); } return curJ; } -uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode *curBlk) { +uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode &curBlk) { MapleVector &bbvec = cfg->GetAllBBs(); BB *curbb = bbvec[curJ]; bool setFirstFreq = (GetFuncProfData() != nullptr); @@ -957,19 +957,19 @@ uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode *curBlk) { LabelNode *lbnode = mirFunc->GetCodeMempool()->New(); UpdateStmtInfoForLabelNode(*lbnode, *curbb); lbnode->SetLabelIdx(labidx); - curBlk->AddStatement(lbnode); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + curBlk.AddStatement(lbnode); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); preMeStmtExtensionMap[lbnode->GetStmtID()] = pmeExt; } MeStmt *mestmt = curbb->GetFirstMe(); while (mestmt->GetOp() != OP_brfalse && mestmt->GetOp() != OP_brtrue) { - StmtNode *stmt = EmitPreMeStmt(mestmt, curBlk); + StmtNode *stmt = EmitPreMeStmt(*mestmt, &curBlk); if (stmt == nullptr) { mestmt = mestmt->GetNext(); continue; } - UpdateStmtInfo(*mestmt, *stmt, *curBlk, curbb->GetFrequency()); - curBlk->AddStatement(stmt); + UpdateStmtInfo(*mestmt, *stmt, curBlk, curbb->GetFrequency()); + curBlk.AddStatement(stmt); if (GetFuncProfData() && (setFirstFreq || (stmt->GetOpCode() == OP_call) || IsCallAssigned(stmt->GetOpCode()))) { // add frequency of first/call stmt of curbb @@ -985,9 +985,9 @@ uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode *curBlk) { PreMeIfInfo *ifInfo = preMeFunc->label2IfInfo[condgoto->GetOffset()]; CHECK_FATAL(ifInfo->endLabel != 0, "Raise2PreMeIf: endLabel not found"); IfStmtNode *ifStmtNode = mirFunc->GetCodeMempool()->New(); - PreMeMIRExtension *pmeExt = preMeMP->New(curBlk); + PreMeMIRExtension *pmeExt = preMeMP->New(&curBlk); preMeStmtExtensionMap[ifStmtNode->GetStmtID()] = pmeExt; - BaseNode *condnode = EmitPreMeExpr(condgoto->GetOpnd(), ifStmtNode); + BaseNode *condnode = EmitPreMeExpr(*condgoto->GetOpnd(), ifStmtNode); if (condgoto->IsBranchProbValid() && (condgoto->GetBranchProb() == kProbLikely || condgoto->GetBranchProb() == kProbUnlikely)) { IntrinsicopNode *expectNode = codeMP->New(*mirFunc->GetModule(), OP_intrinsicop, PTY_i64); @@ -999,11 +999,12 @@ uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode *curBlk) { // | brtrue (1) | expectTrue (1) | expectFalse (0) | // | brfalse(0) | expectFalse(0) | expectTrue (1) | // XNOR - uint32 val = !(static_cast(mestmt->GetOp() == OP_brtrue) ^ (condgoto->GetBranchProb() == kProbLikely)); + uint32 val = !(static_cast(mestmt->GetOp() == OP_brtrue) ^ + (static_cast((condgoto->GetBranchProb() == kProbLikely)))); MIRIntConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *type); ConstvalNode *constNode = codeMP->New(constVal->GetType().GetPrimType(), constVal); expectNode->GetNopnd().push_back(constNode); - expectNode->SetNumOpnds(expectNode->GetNopnd().size()); + expectNode->SetNumOpnds(static_cast(expectNode->GetNopnd().size())); MIRIntConst *constZeroVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *type); ConstvalNode *constZeroNode = codeMP->New(constVal->GetType().GetPrimType(), constZeroVal); CompareNode *cmpNode = @@ -1012,8 +1013,8 @@ uint32 PreMeEmitter::Raise2PreMeIf(uint32 curJ, BlockNode *curBlk) { } ifStmtNode->SetOpnd(condnode, 0); ifStmtNode->SetMeStmtID(condgoto->GetMeStmtId()); - UpdateStmtInfo(*mestmt, *ifStmtNode, *curBlk, curbb->GetFrequency()); - curBlk->AddStatement(ifStmtNode); + UpdateStmtInfo(*mestmt, *ifStmtNode, curBlk, curbb->GetFrequency()); + curBlk.AddStatement(ifStmtNode); if (GetFuncProfData()) { // set ifstmt freq GetFuncProfData()->SetStmtFreq(ifStmtNode->GetStmtID(), curbb->GetFrequency()); @@ -1083,7 +1084,8 @@ uint32 PreMeEmitter::EmitPreMeBB(uint32 curJ, BlockNode *curBlk) { return curJ + 1; } if (mebb->GetBBLabel() != 0) { - MapleMap::const_iterator it = preMeFunc->label2WhileInfo.find(mebb->GetBBLabel()); + MapleMap::const_iterator it = + std::as_const(preMeFunc->label2WhileInfo).find(mebb->GetBBLabel()); if (it != preMeFunc->label2WhileInfo.end()) { if (mebb->GetSucc().size() == 2) { curJ = Raise2PreMeWhile(curJ, curBlk); @@ -1097,13 +1099,14 @@ uint32 PreMeEmitter::EmitPreMeBB(uint32 curJ, BlockNode *curBlk) { (mebb->GetLastMe()->GetOp() == OP_brfalse || mebb->GetLastMe()->GetOp() == OP_brtrue)) { CondGotoMeStmt *condgoto = static_cast(mebb->GetLastMe()); - MapleMap::const_iterator it = preMeFunc->label2IfInfo.find(condgoto->GetOffset()); + MapleMap::const_iterator it = + std::as_const(preMeFunc->label2IfInfo).find(condgoto->GetOffset()); if (it != preMeFunc->label2IfInfo.end()) { - curJ = Raise2PreMeIf(curJ, curBlk); + curJ = Raise2PreMeIf(curJ, *curBlk); return curJ; } } - EmitBB(mebb, curBlk); + EmitBB(*mebb, *curBlk); return ++curJ; } diff --git a/src/mapleall/maple_me/src/pme_mir_lower.cpp b/src/mapleall/maple_me/src/pme_mir_lower.cpp index 77a9a6d7510476042e43a8e086535864277fb5fe..423fd3f56ed2b466f83873cea60b1e2b001f3887 100644 --- a/src/mapleall/maple_me/src/pme_mir_lower.cpp +++ b/src/mapleall/maple_me/src/pme_mir_lower.cpp @@ -222,14 +222,14 @@ BlockNode *PreMeMIRLower::LowerIfStmt(IfStmtNode &ifstmt, bool recursive) { preMeFunc->label2IfInfo.insert(std::make_pair(elselabelidx, ifInfo)); } - bool fallthru_from_then = true; + bool fallthruFromThen = true; if (!thenempty) { blk->AppendStatementsFromBlock(*ifstmt.GetThenPart()); - fallthru_from_then = !OpCodeNoFallThrough(ifstmt.GetThenPart()->GetLast()->GetOpCode()); + fallthruFromThen = !OpCodeNoFallThrough(ifstmt.GetThenPart()->GetLast()->GetOpCode()); } LabelIdx endlabelidx = 0; - if (fallthru_from_then) { + if (fallthruFromThen) { GotoNode *gotostmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); endlabelidx = mirFunc->GetLabelTab()->CreateLabelWithPrefix('e'); mirFunc->GetLabelTab()->AddToStringLabelMap(endlabelidx); @@ -268,7 +268,7 @@ BlockNode *PreMeMIRLower::LowerIfStmt(IfStmtNode &ifstmt, bool recursive) { blk->AppendStatementsFromBlock(*ifstmt.GetElsePart()); } - if (fallthru_from_then) { + if (fallthruFromThen) { labstmt = mirModule.CurFuncCodeMemPool()->New(); labstmt->SetLabelIdx(endlabelidx); if (!elseempty) { diff --git a/src/mapleall/maple_me/src/prop.cpp b/src/mapleall/maple_me/src/prop.cpp index a5c3c24f574219d96a2f4178935ac5e3aaee7e0b..5fc64efb6176c417f83478043b4b4cb4222b850f 100644 --- a/src/mapleall/maple_me/src/prop.cpp +++ b/src/mapleall/maple_me/src/prop.cpp @@ -223,10 +223,10 @@ bool Prop::IsFunctionOfCurVersion(ScalarMeExpr *scalar, const ScalarMeExpr *cur) return InvertibleOccurrences(scalar, ass->GetRHS()) == 1; } -static void Calc(const MeExpr *x, uint32 &count) { +static void Calc(const MeExpr &x, uint32 &count) { count++; - for (uint32 i = 0; i < x->GetNumOpnds(); i++) { - Calc(x->GetOpnd(i), count); + for (uint32 i = 0; i < x.GetNumOpnds(); i++) { + Calc(*x.GetOpnd(i), count); } } @@ -239,7 +239,7 @@ static void Calc(const MeExpr *x, uint32 &count) { Propagatability Prop::Propagatable(MeExpr *x, BB *fromBB, bool atParm, bool checkInverse, ScalarMeExpr *propagatingScalar) { uint32 count = 0; - Calc(x, count); + Calc(*x, count); if (count > kTreeNodeLimit) { return kPropNo; } @@ -731,12 +731,9 @@ MeExpr &Prop::PropVar(VarMeExpr &varMeExpr, bool atParm, bool checkPhi) { DassignMeStmt *defStmt = static_cast(varMeExpr.GetDefStmt()); ASSERT(defStmt != nullptr, "dynamic cast result is nullptr"); MeExpr *rhs = defStmt->GetRHS(); - if (st->GetType() && st->GetType()->GetKind() == kTypePointer) { - if (static_cast(st->GetType())->IsFunctionPtr()) { - if (rhs->GetMeOp() != kMeOpAddroffunc) { + if (st->GetType() && st->GetType()->GetKind() == kTypePointer && + static_cast(st->GetType())->IsFunctionPtr() && rhs->GetMeOp() != kMeOpAddroffunc) { return varMeExpr; - } - } } uint32 treeLevelLimitUsed = kPropTreeLevel; if (varMeExpr.GetOst()->storesIVInitValue) { diff --git a/src/mapleall/maple_me/src/seqvec.cpp b/src/mapleall/maple_me/src/seqvec.cpp index 9653a4684cc753cb90381987262594d80e6e513c..067877a3e167e341f2e06eccfb1f36e5a220fe01 100644 --- a/src/mapleall/maple_me/src/seqvec.cpp +++ b/src/mapleall/maple_me/src/seqvec.cpp @@ -227,8 +227,8 @@ MIRType* SeqVectorize::GenVecType(PrimType sPrimType, uint8 lanes) const { return vecType; } -bool SeqVectorize::CanAdjustRhsType(PrimType targetType, const ConstvalNode *rhs) const { - const MIRIntConst *intConst = static_cast(rhs->GetConstVal()); +bool SeqVectorize::CanAdjustRhsType(PrimType targetType, const ConstvalNode &rhs) const { + const MIRIntConst *intConst = static_cast(rhs.GetConstVal()); int64 v = intConst->GetExtValue(); bool res = false; switch (targetType) { @@ -268,11 +268,11 @@ bool SeqVectorize::CanAdjustRhsType(PrimType targetType, const ConstvalNode *rhs return res; } -void SeqVectorize::DumpCandidates(const MeExpr *base, const StoreList *storelist) const { +void SeqVectorize::DumpCandidates(const MeExpr &base, const StoreList &storelist) const { LogInfo::MapleLogger() << "Dump base node \t"; - base->Dump(meIRMap, 0); - for (uint32_t i = 0; i < (*storelist).size(); i++) { - (*storelist)[i]->Dump(0); + base.Dump(meIRMap, 0); + for (uint32_t i = 0; i < storelist.size(); i++) { + storelist[i]->Dump(0); } return; } @@ -293,7 +293,7 @@ void SeqVectorize::CollectStores(IassignNode *iassign) { // check lhs and rhs type if (iassign->GetRHS()->IsConstval() && (stmtpt != iassign->GetRHS()->GetPrimType()) && - (!CanAdjustRhsType(stmtpt, static_cast(iassign->GetRHS())))) { + (!CanAdjustRhsType(stmtpt, *(static_cast(iassign->GetRHS()))))) { return; } // compare base address with store list @@ -322,13 +322,13 @@ void SeqVectorize::CollectStores(IassignNode *iassign) { stores[base] = storelist; } -bool SeqVectorize::SameIntConstValue(const MeExpr *e1, const MeExpr *e2) const { - if (e1->GetOp() == maple::OP_constval && e2->GetOp() == maple::OP_constval && - IsPrimitiveInteger(e1->GetPrimType()) && - IsPrimitiveInteger(e2->GetPrimType())) { - const MIRConst *const1 = (static_cast(e1))->GetConstVal(); +bool SeqVectorize::SameIntConstValue(const MeExpr &e1, const MeExpr &e2) const { + if (e1.GetOp() == maple::OP_constval && e2.GetOp() == maple::OP_constval && + IsPrimitiveInteger(e1.GetPrimType()) && + IsPrimitiveInteger(e2.GetPrimType())) { + const MIRConst *const1 = (static_cast(&e1))->GetConstVal(); const MIRIntConst *intc1 = static_cast(const1); - const MIRConst *const2 = (static_cast(e2))->GetConstVal(); + const MIRConst *const2 = (static_cast(&e2))->GetConstVal(); const MIRIntConst *intc2 = static_cast(const2); return (intc1->GetExtValue() == intc2->GetExtValue()); } @@ -337,7 +337,7 @@ bool SeqVectorize::SameIntConstValue(const MeExpr *e1, const MeExpr *e2) const { bool SeqVectorize::CanSeqVecRhs(MeExpr *rhs1, MeExpr *rhs2) { // case 1: rhs1 and rhs2 are constval and same value - if ((rhs1 == rhs2) || SameIntConstValue(rhs1, rhs2)) { + if ((rhs1 == rhs2) || SameIntConstValue(*rhs1, *rhs2)) { if (IsRhsConst() || IsRhsStatusUnset()) { SetRhsConst(); return true; @@ -372,33 +372,33 @@ bool SeqVectorize::CanSeqVecRhs(MeExpr *rhs1, MeExpr *rhs2) { return false; } -bool SeqVectorize::IsOpExprConsecutiveMem(MeExpr *off1, MeExpr *off2, int32_t diff) const { - if (off1->GetOp() == off2->GetOp() && - off1->GetOp() == OP_add) { - if (off1->GetOpnd(0) == off2->GetOpnd(0) && - (off1->GetOpnd(1)->GetOp() == OP_constval) && - (off2->GetOpnd(1)->GetOp() == OP_constval)) { - MIRConst *constoff1 = static_cast(off1->GetOpnd(1))->GetConstVal(); - MIRIntConst *intoff1 = static_cast(constoff1); - MIRConst *constoff2 = static_cast(off2->GetOpnd(1))->GetConstVal(); - MIRIntConst *intoff2 = static_cast(constoff2); +bool SeqVectorize::IsOpExprConsecutiveMem(const MeExpr &off1, const MeExpr &off2, int32_t diff) const { + if (off1.GetOp() == off2.GetOp() && + off1.GetOp() == OP_add) { + if (off1.GetOpnd(0) == off2.GetOpnd(0) && + (off1.GetOpnd(1)->GetOp() == OP_constval) && + (off2.GetOpnd(1)->GetOp() == OP_constval)) { + const MIRConst *constoff1 = static_cast(off1.GetOpnd(1))->GetConstVal(); + const MIRIntConst *intoff1 = static_cast(constoff1); + const MIRConst *constoff2 = static_cast(off2.GetOpnd(1))->GetConstVal(); + const MIRIntConst *intoff2 = static_cast(constoff2); if (intoff2->GetExtValue() - intoff1->GetExtValue() == diff) { return true; } } - } else if (off1->GetOp() == OP_mul && off2->GetOp() == OP_add) { - if (off1 == off2->GetOpnd(0) && off2->GetOpnd(1)->GetOp() == OP_constval) { - MIRConst *constoff2 = static_cast(off2->GetOpnd(1))->GetConstVal(); + } else if (off1.GetOp() == OP_mul && off2.GetOp() == OP_add) { + if ((&off1) == off2.GetOpnd(0) && off2.GetOpnd(1)->GetOp() == OP_constval) { + MIRConst *constoff2 = static_cast(off2.GetOpnd(1))->GetConstVal(); MIRIntConst *intoff2 = static_cast(constoff2); if (intoff2->GetValue() == diff) { return true; } } - } else if (off1->GetOp() == off2->GetOp() && off1->GetOp() == OP_constval) { - MIRConst *const1 = static_cast(off1)->GetConstVal(); - MIRIntConst *intc1 = static_cast(const1); - MIRConst *const2 = static_cast(off2)->GetConstVal(); - MIRIntConst *intc2 = static_cast(const2); + } else if (off1.GetOp() == off2.GetOp() && off1.GetOp() == OP_constval) { + const MIRConst *const1 = static_cast(&off1)->GetConstVal(); + const MIRIntConst *intc1 = static_cast(const1); + const MIRConst *const2 = static_cast(&off2)->GetConstVal(); + const MIRIntConst *intc2 = static_cast(const2); if (intc2->GetExtValue() - intc1->GetExtValue() == diff) { return true; } @@ -406,7 +406,7 @@ bool SeqVectorize::IsOpExprConsecutiveMem(MeExpr *off1, MeExpr *off2, int32_t di return false; } -bool SeqVectorize::IsIvarExprConsecutiveMem(IvarMeExpr *ivar1, IvarMeExpr *ivar2, PrimType ptrType) { +bool SeqVectorize::IsIvarExprConsecutiveMem(IvarMeExpr *ivar1, IvarMeExpr *ivar2, PrimType ptrType) const { MeExpr *base1 = ivar1->GetBase(); MeExpr *base2 = ivar2->GetBase(); uint32_t base1NumOpnds = base1->GetNumOpnds(); @@ -439,7 +439,7 @@ bool SeqVectorize::IsIvarExprConsecutiveMem(IvarMeExpr *ivar1, IvarMeExpr *ivar2 // check lhs: highest dimension offset is consecutive MeExpr *off1 = base1->GetOpnd(base1NumOpnds - 1); MeExpr *off2 = base2->GetOpnd(base2NumOpnds - 1); - if (!IsOpExprConsecutiveMem(off1, off2, 1)) { + if (!IsOpExprConsecutiveMem(*off1, *off2, 1)) { return false; } } else { @@ -608,18 +608,18 @@ void SeqVectorize::MergeIassigns(MapleVector &cands) { SeqVectorize::seqVecStores++; } -void SeqVectorize::LegalityCheckAndTransform(const StoreList *storelist) { +void SeqVectorize::LegalityCheckAndTransform(const StoreList &storelist) { MapleVector cands(localAlloc.Adapter()); - size_t len = storelist->size(); + size_t len = storelist.size(); bool needReverse = true; cands.clear(); ResetRhsStatus(); // reset rhs is const flag for (size_t i = 0; i < len; ++i) { - IassignNode *store1 = (*storelist)[i]; + IassignNode *store1 = storelist[i]; MIRPtrType *ptrType = static_cast(&GetTypeFromTyIdx(store1->GetTyIdx())); cands.push_back(store1); for (size_t j = i + 1; j < len; ++j) { - IassignNode *store2 = (*storelist)[j]; + IassignNode *store2 = storelist[j]; if (CanSeqVec(cands.back(), store2, false)) { cands.push_back(store2); } @@ -637,11 +637,11 @@ void SeqVectorize::LegalityCheckAndTransform(const StoreList *storelist) { } ResetRhsStatus(); // reset rhs is const flag for (int i = static_cast(len) - 1; i >= 0; --i) { - IassignNode *store1 = (*storelist)[i]; + IassignNode *store1 = storelist[i]; MIRPtrType *ptrType = static_cast(&GetTypeFromTyIdx(store1->GetTyIdx())); cands.push_back(store1); for (int j = i - 1; j >= 0; --j) { - IassignNode *store2 = (*storelist)[j]; + IassignNode *store2 = storelist[j]; if (CanSeqVec(cands.back(), store2, true)) { cands.push_back(store2); } @@ -663,9 +663,9 @@ void SeqVectorize::CheckAndTransform() { StoreListMap::const_iterator mapit = stores.cbegin(); for (; mapit != stores.end(); ++mapit) { if (enableDebug) { - DumpCandidates(mapit->first, mapit->second); + DumpCandidates(*(mapit->first), *(mapit->second)); } - LegalityCheckAndTransform(mapit->second); + LegalityCheckAndTransform(*(mapit->second)); } // clear list diff --git a/src/mapleall/maple_me/src/ssa.cpp b/src/mapleall/maple_me/src/ssa.cpp index b1a4152fb12c6777fb262e2bf58b2ba6cb46b83e..989e24d6517f04110d86f914238617f79f97b5f4 100644 --- a/src/mapleall/maple_me/src/ssa.cpp +++ b/src/mapleall/maple_me/src/ssa.cpp @@ -258,15 +258,15 @@ void SSA::RenamePhiUseInSucc(const BB &bb) const { } } -void SSA::RenameAllBBs(const MeCFG *cfg) { +void SSA::RenameAllBBs(const MeCFG &cfg) { // renameMP is a tmp mempool, will be release after rename. auto renameMP = std::make_unique(memPoolCtrler, "ssa-rename-mempool"); MapleAllocator renameAlloc(renameMP.get()); InitRenameStack(ssaTab->GetOriginalStTable(), ssaTab->GetVersionStTable(), renameAlloc); // recurse down dominator tree in pre-order traversal - auto *children = &dom->GetDomChildren(cfg->GetCommonEntryBB()->GetID()); + auto *children = &dom->GetDomChildren(cfg.GetCommonEntryBB()->GetID()); for (auto child : *children) { - RenameBB(*cfg->GetBBFromID(BBId(child))); + RenameBB(*cfg.GetBBFromID(BBId(child))); } vstStacks = nullptr; } diff --git a/src/mapleall/maple_me/src/ssa_devirtual.cpp b/src/mapleall/maple_me/src/ssa_devirtual.cpp index eb11e6c90e537a6eb2d3fc05d9f8c910999fa63d..03454c2b11a5ccaf7dc77e44cf467272b76aace4 100644 --- a/src/mapleall/maple_me/src/ssa_devirtual.cpp +++ b/src/mapleall/maple_me/src/ssa_devirtual.cpp @@ -67,22 +67,21 @@ static bool IsFinalMethod(const MIRFunction *mirFunc) { TyIdx SSADevirtual::GetInferredTyIdx(MeExpr &expr) const { if (expr.GetMeOp() == kMeOpVar) { auto *varMeExpr = static_cast(&expr); - if (varMeExpr->GetInferredTyIdx() == 0u) { - // If varMeExpr->inferredTyIdx has not been set, we can double check - // if it is coming from a static final field - const OriginalSt *ost = varMeExpr->GetOst(); - const MIRSymbol *mirSym = ost->GetMIRSymbol(); - if (mirSym->IsStatic() && mirSym->IsFinal() && mirSym->GetInferredTyIdx() != kInitTyIdx && - mirSym->GetInferredTyIdx() != kNoneTyIdx) { - varMeExpr->SetInferredTyIdx(mirSym->GetInferredTyIdx()); - } - if (mirSym->GetType()->GetKind() == kTypePointer) { - MIRType *pointedType = (static_cast(mirSym->GetType()))->GetPointedType(); - if (pointedType->GetKind() == kTypeClass) { - if ((static_cast(pointedType))->IsFinal()) { - varMeExpr->SetInferredTyIdx(pointedType->GetTypeIndex()); - } - } + if (varMeExpr->GetInferredTyIdx() != 0u) { + return varMeExpr->GetInferredTyIdx(); + } + // If varMeExpr->inferredTyIdx has not been set, we can double check + // if it is coming from a static final field + const OriginalSt *ost = varMeExpr->GetOst(); + const MIRSymbol *mirSym = ost->GetMIRSymbol(); + if (mirSym->IsStatic() && mirSym->IsFinal() && mirSym->GetInferredTyIdx() != kInitTyIdx && + mirSym->GetInferredTyIdx() != kNoneTyIdx) { + varMeExpr->SetInferredTyIdx(mirSym->GetInferredTyIdx()); + } + if (mirSym->GetType()->GetKind() == kTypePointer) { + MIRType *pointedType = (static_cast(mirSym->GetType()))->GetPointedType(); + if (pointedType->GetKind() == kTypeClass && (static_cast(pointedType))->IsFinal()) { + varMeExpr->SetInferredTyIdx(pointedType->GetTypeIndex()); } } return varMeExpr->GetInferredTyIdx(); @@ -363,7 +362,7 @@ void SSADevirtual::VisitVarPhiNode(MePhiNode &varPhi) { } VarMeExpr *lhsVar = static_cast(varPhi.GetLHS()); - auto mapit = inferredTypeCandidatesMap.find(lhsVar->GetExprID()); + const auto &mapit = std::as_const(inferredTypeCandidatesMap).find(lhsVar->GetExprID()); if (mapit == inferredTypeCandidatesMap.cend()) { auto tyIdxCandidates = devirtualAlloc.GetMemPool()->New>(devirtualAlloc.Adapter()); inferredTypeCandidatesMap[lhsVar->GetExprID()] = tyIdxCandidates; @@ -372,20 +371,19 @@ void SSADevirtual::VisitVarPhiNode(MePhiNode &varPhi) { for (size_t i = 0; i < opnds.size(); ++i) { VarMeExpr *opnd = static_cast(opnds[i]); PropVarInferredType(*opnd); - if (opnd->GetInferredTyIdx() != 0u) { - size_t j = 0; - for (; j < inferredTypeCandidates.size(); j++) { - if (inferredTypeCandidates.at(j) == opnd->GetInferredTyIdx()) { - break; - } - } - if (j == inferredTypeCandidates.size()) { - inferredTypeCandidates.push_back(opnd->GetInferredTyIdx()); - } - } else { + if (opnd->GetInferredTyIdx() == 0u) { inferredTypeCandidates.clear(); break; } + size_t j = 0; + for (; j < inferredTypeCandidates.size(); j++) { + if (inferredTypeCandidates.at(j) == opnd->GetInferredTyIdx()) { + break; + } + } + if (j == inferredTypeCandidates.size()) { + inferredTypeCandidates.push_back(opnd->GetInferredTyIdx()); + } } } diff --git a/src/mapleall/maple_me/src/ssa_epre.cpp b/src/mapleall/maple_me/src/ssa_epre.cpp index 553e74548e46662bdc8eb4e3fbedee3fb3a23070..8c47398a23a2bc3346342493657d7803d56306e7 100644 --- a/src/mapleall/maple_me/src/ssa_epre.cpp +++ b/src/mapleall/maple_me/src/ssa_epre.cpp @@ -68,10 +68,7 @@ void SSAEPre::GenerateSaveLHSRealocc(MeRealOcc &realOcc, ScalarMeExpr ®OrVar) savedRHS = irMap->CreateMeExprTypeCvt(lhsPrimType, savedRHS->GetPrimType(), *savedRHS); } else { Opcode extOp = IsSignedInteger(lhsPrimType) ? OP_sext : OP_zext; - PrimType newPrimType = PTY_u32; - if (IsSignedInteger(lhsPrimType)) { - newPrimType = PTY_i32; - } + PrimType newPrimType = IsSignedInteger(lhsPrimType) ? PTY_i32 : PTY_u32; OpMeExpr opmeexpr(-1, extOp, newPrimType, 1); opmeexpr.SetBitsSize(static_cast(GetPrimTypeSize(lhsPrimType) * 8)); opmeexpr.SetOpnd(0, savedRHS); diff --git a/src/mapleall/maple_me/src/ssa_epre_for_lftr.cpp b/src/mapleall/maple_me/src/ssa_epre_for_lftr.cpp index 7e10036a9e34d3379d3101f1e34ba33cbd44856a..e6a97800e56f2174e0d895b76c719797a4c5005d 100644 --- a/src/mapleall/maple_me/src/ssa_epre_for_lftr.cpp +++ b/src/mapleall/maple_me/src/ssa_epre_for_lftr.cpp @@ -20,9 +20,9 @@ namespace maple { // Find the SSA version of scalar at stmt by search backward for its def. // When reaching the beginning of BB, continue with parent BB in the dominator // tree. It is assumed that scalarOst has no alias, so chi lists are skipped. -ScalarMeExpr *SSAEPre::FindScalarVersion(ScalarMeExpr *scalar, MeStmt *stmt) const { - if (scalar->GetOst()->NumSSAVersions() == 1) { - return scalar; +ScalarMeExpr *SSAEPre::FindScalarVersion(ScalarMeExpr &scalar, MeStmt *stmt) const { + if (scalar.GetOst()->NumSSAVersions() == 1) { + return &scalar; } BB *bb = stmt->GetBB(); stmt = stmt->GetPrev(); @@ -33,14 +33,14 @@ ScalarMeExpr *SSAEPre::FindScalarVersion(ScalarMeExpr *scalar, MeStmt *stmt) con AssignMeStmt *asStmt = dynamic_cast(stmt); if (asStmt != nullptr) { lhs = asStmt->GetLHS(); - if (lhs->GetOst() == scalar->GetOst()) { + if (lhs->GetOst() == scalar.GetOst()) { return lhs; } } else { CallMeStmt *callStmt = dynamic_cast(stmt); if (callStmt != nullptr) { lhs = callStmt->GetAssignedLHS(); - if (lhs != nullptr && lhs->GetOst() == scalar->GetOst()) { + if (lhs != nullptr && lhs->GetOst() == scalar.GetOst()) { return lhs; } } @@ -49,7 +49,7 @@ ScalarMeExpr *SSAEPre::FindScalarVersion(ScalarMeExpr *scalar, MeStmt *stmt) con } // check if there is phi MapleMap &mePhiList = bb->GetMePhiList(); - auto it = mePhiList.find(scalar->GetOst()->GetIndex()); + auto it = std::as_const(mePhiList).find(scalar.GetOst()->GetIndex()); if (it != mePhiList.cend()) { return it->second->GetLHS(); } @@ -59,7 +59,6 @@ ScalarMeExpr *SSAEPre::FindScalarVersion(ScalarMeExpr *scalar, MeStmt *stmt) con stmt = to_ptr(bb->GetMeStmts().rbegin()); } while (true); CHECK_FATAL(false, "FindScalarVersion: fail to find SSA version for scalar"); - return nullptr; } // one side of compare is an operand x in workCand->GetTheMeExpr() with current @@ -120,7 +119,7 @@ OpMeExpr *SSAEPre::FormLFTRCompare(MeRealOcc *compOcc, MeExpr *regorvar) { if (scalarOpnd == nullptr) { newSide.SetOpnd(1 - i, x->GetOpnd(1 - i)); } else { - scalarOpnd = FindScalarVersion(scalarOpnd, compOcc->GetMeStmt()); + scalarOpnd = FindScalarVersion(*scalarOpnd, compOcc->GetMeStmt()); newSide.SetOpnd(1 - i, scalarOpnd); } break; diff --git a/src/mapleall/maple_me/src/ssa_epre_for_sr.cpp b/src/mapleall/maple_me/src/ssa_epre_for_sr.cpp index 15e8f30fee958650eb81442ba3cd323be58f254b..b13411f43c603a77451cc715cba8b7b40f73d4ff 100644 --- a/src/mapleall/maple_me/src/ssa_epre_for_sr.cpp +++ b/src/mapleall/maple_me/src/ssa_epre_for_sr.cpp @@ -49,13 +49,13 @@ ScalarMeExpr* SSAEPre::ResolveAllInjuringDefs(ScalarMeExpr *regx) const { return answer; } -bool SSAEPre::OpndInDefOcc(const MeExpr *opnd, MeOccur *defocc, uint32 i) const { - if (defocc->GetOccType() == kOccReal) { - MeRealOcc *defrealocc = static_cast(defocc); +bool SSAEPre::OpndInDefOcc(const MeExpr &opnd, MeOccur &defocc, uint32 i) const { + if (defocc.GetOccType() == kOccReal) { + MeRealOcc *defrealocc = static_cast(&defocc); MeExpr *defexpr = defrealocc->GetMeExpr(); - return opnd == defexpr->GetOpnd(i); + return (&opnd) == defexpr->GetOpnd(i); } else { // kOccPhi - return DefVarDominateOcc(opnd, *defocc); + return DefVarDominateOcc(&opnd, defocc); } } @@ -92,7 +92,7 @@ void SSAEPre::SRSetNeedRepair(MeOccur *useocc, std::set *needRepairInj if (curopnd->GetMeOp() != kMeOpVar && curopnd->GetMeOp() != kMeOpReg) { continue; } - if (!OpndInDefOcc(curopnd, defocc, i)) { + if (!OpndInDefOcc(*curopnd, *defocc, i)) { ScalarMeExpr *varx = static_cast(curopnd); needRepairInjuringDefs->insert(varx->GetDefStmt()); } @@ -111,28 +111,28 @@ static int64 GetIncreAmtAndRhsScalar(MeExpr *x, ScalarMeExpr *&rhsScalar) { return (opexpr->GetOp() == OP_sub) ? -amt : amt; } -MeExpr* SSAEPre::InsertRepairStmt(MeExpr *temp, int64 increAmt, MeStmt *injuringDef) const { +MeExpr* SSAEPre::InsertRepairStmt(MeExpr &temp, int64 increAmt, MeStmt &injuringDef) const { MeExpr *rhs = nullptr; if (increAmt >= 0) { - rhs = irMap->CreateMeExprBinary(OP_add, temp->GetPrimType(), *temp, - *irMap->CreateIntConstMeExpr(increAmt, temp->GetPrimType())); + rhs = irMap->CreateMeExprBinary(OP_add, temp.GetPrimType(), temp, + *irMap->CreateIntConstMeExpr(increAmt, temp.GetPrimType())); } else { - rhs = irMap->CreateMeExprBinary(OP_sub, temp->GetPrimType(), *temp, - *irMap->CreateIntConstMeExpr(-increAmt, temp->GetPrimType())); + rhs = irMap->CreateMeExprBinary(OP_sub, temp.GetPrimType(), temp, + *irMap->CreateIntConstMeExpr(-increAmt, temp.GetPrimType())); } - BB *bb = injuringDef->GetBB(); + BB *bb = injuringDef.GetBB(); MeStmt *newstmt = nullptr; - if (temp->GetMeOp() == kMeOpReg) { - RegMeExpr *newreg = irMap->CreateRegMeExprVersion(*static_cast(temp)); + if (temp.GetMeOp() == kMeOpReg) { + RegMeExpr *newreg = irMap->CreateRegMeExprVersion(*static_cast(&temp)); newstmt = irMap->CreateAssignMeStmt(*newreg, *rhs, *bb); static_cast(newstmt)->isIncDecStmt = true; - bb->InsertMeStmtAfter(injuringDef, newstmt); + bb->InsertMeStmtAfter(&injuringDef, newstmt); return newreg; } else { - VarMeExpr *newvar = irMap->CreateVarMeExprVersion(*static_cast(temp)); + VarMeExpr *newvar = irMap->CreateVarMeExprVersion(*static_cast(&temp)); newstmt = irMap->CreateAssignMeStmt(*newvar, *rhs, *bb); static_cast(newstmt)->isIncDecStmt = true; - bb->InsertMeStmtAfter(injuringDef, newstmt); + bb->InsertMeStmtAfter(&injuringDef, newstmt); return newvar; } } @@ -147,7 +147,6 @@ static MeExpr *FindLaterRepairedTemp(const MeExpr *temp, const MeStmt *injuringD ass = static_cast(ass->GetNext()); } CHECK_FATAL(false, "FindLaterRepairedTemp: failed to find repair statement"); - return nullptr; } MeExpr* SSAEPre::SRRepairOpndInjuries(MeExpr *curopnd, MeOccur *defocc, int32 i, @@ -159,36 +158,35 @@ MeExpr* SSAEPre::SRRepairOpndInjuries(MeExpr *curopnd, MeOccur *defocc, int32 i, AssignMeStmt *ass = static_cast(scalarx->GetDefStmt()); CHECK_FATAL(ass->isIncDecStmt, "SRRepairOpndInjuries: not an inc/dec statement"); MeStmt *latestInjuringDef = ass; - if (repairedInjuringDefs->count(ass) == 0) { - repairedInjuringDefs->insert(ass); - bool done = false; - int64 increAmt = 0; - ScalarMeExpr *rhsScalar = nullptr; - do { - increAmt += GetIncreAmtAndRhsScalar(ass->GetRHS(), rhsScalar); - if (OpndInDefOcc(rhsScalar, defocc, static_cast(i))) { - done = true; - } else { - scalarx = rhsScalar; - ass = static_cast(scalarx->GetDefStmt()); - CHECK_FATAL(ass->isIncDecStmt, "SRRepairOpndInjuries: not an inc/dec statement"); - done = needRepairInjuringDefs->count(ass) == 1; - if (done) { - if (repairedInjuringDefs->count(ass) == 0) { - repairedTemp = SRRepairOpndInjuries(scalarx, defocc, i, tempAtDef, needRepairInjuringDefs, - repairedInjuringDefs); - } - repairedTemp = FindLaterRepairedTemp(repairedTemp, ass); - } - } - } while (!done); - // generate the increment statement at latestInjuringDef - repairedTemp = InsertRepairStmt(repairedTemp, increAmt * workCand->GetTheMeExpr()->SRMultiplier(scalarx->GetOst()), - latestInjuringDef); - } else { - // find the last repair increment statement + if (repairedInjuringDefs->count(ass) != 0) { repairedTemp = FindLaterRepairedTemp(repairedTemp, latestInjuringDef); + return repairedTemp; } + repairedInjuringDefs->insert(ass); + bool done = false; + int64 increAmt = 0; + ScalarMeExpr *rhsScalar = nullptr; + do { + increAmt += GetIncreAmtAndRhsScalar(ass->GetRHS(), rhsScalar); + if (OpndInDefOcc(*rhsScalar, *defocc, static_cast(i))) { + done = true; + } else { + scalarx = rhsScalar; + ass = static_cast(scalarx->GetDefStmt()); + CHECK_FATAL(ass->isIncDecStmt, "SRRepairOpndInjuries: not an inc/dec statement"); + done = needRepairInjuringDefs->count(ass) == 1; + if (done) { + if (repairedInjuringDefs->count(ass) == 0) { + repairedTemp = SRRepairOpndInjuries(scalarx, defocc, i, tempAtDef, needRepairInjuringDefs, + repairedInjuringDefs); + } + repairedTemp = FindLaterRepairedTemp(repairedTemp, ass); + } + } + } while (!done); + // generate the increment statement at latestInjuringDef + repairedTemp = InsertRepairStmt(*repairedTemp, increAmt * workCand->GetTheMeExpr()->SRMultiplier(scalarx->GetOst()), + *latestInjuringDef); return repairedTemp; } @@ -240,7 +238,7 @@ MeExpr* SSAEPre::SRRepairInjuries(MeOccur *useocc, continue; } } - if (!OpndInDefOcc(curopnd, defocc, i)) { + if (!OpndInDefOcc(*curopnd, *defocc, i)) { repairedTemp = SRRepairOpndInjuries(curopnd, defocc, i, repairedTemp, needRepairInjuringDefs, repairedInjuringDefs); } diff --git a/src/mapleall/maple_me/src/ssa_pre.cpp b/src/mapleall/maple_me/src/ssa_pre.cpp index 2b45f73d57d14cf366b7700840432d81db6ac346..da39bee80ea9c3439ad9a647fcf98bdbad36802d 100644 --- a/src/mapleall/maple_me/src/ssa_pre.cpp +++ b/src/mapleall/maple_me/src/ssa_pre.cpp @@ -168,45 +168,7 @@ void SSAPre::UpdateInsertedPhiOccOpnd() { if (!WillBeAvail(phiOcc) || phiOcc->IsRemoved()) { continue; } - if (phiOcc->GetRegPhi()) { - MePhiNode *phiReg = phiOcc->GetRegPhi(); - const MapleVector &phiopnds = phiOcc->GetPhiOpnds(); - for (uint32 i = 0; i < phiopnds.size(); i++) { - RegMeExpr *regOpnd = static_cast(phiopnds[i]->phiOpnd4Temp); - if (regOpnd == nullptr) { - // create a zero version - CHECK_FATAL(curTemp != nullptr, "curTemp can't be null in SSAPre::UpdateInsertedPhiOccOpnd"); - regOpnd = irMap->CreateRegMeExprVersion(static_cast(*curTemp)); - } - phiReg->GetOpnds().push_back(regOpnd); - } - (void)phiOcc->GetBB()->GetMePhiList().insert(std::make_pair(phiReg->GetOpnd(0)->GetOstIdx(), phiReg)); - if (workCand->NeedLocalRefVar() && phiOcc->GetVarPhi() != nullptr) { - MePhiNode *phiVar = phiOcc->GetVarPhi(); - const MapleVector &phiOpnds = phiOcc->GetPhiOpnds(); - for (uint32 i = 0; i < phiOpnds.size(); i++) { - RegMeExpr *regOpnd = static_cast(phiOpnds[i]->phiOpnd4Temp); - VarMeExpr *localRefVarOpnd = nullptr; - if (regOpnd == nullptr) { - // create a zero version - CHECK_FATAL(curLocalRefVar != nullptr, "null ptr check"); - OriginalSt *ost = curLocalRefVar->GetOst(); - localRefVarOpnd = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); - } else { - auto mapIt = temp2LocalRefVarMap.find(regOpnd); - if (mapIt == temp2LocalRefVarMap.end()) { - CHECK_FATAL(curLocalRefVar != nullptr, "null ptr check"); - OriginalSt *ost = curLocalRefVar->GetOst(); - localRefVarOpnd = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); - } else { - localRefVarOpnd = mapIt->second; - } - } - phiVar->GetOpnds().push_back(localRefVarOpnd); - } - (void)phiOcc->GetBB()->GetMePhiList().insert(std::make_pair(phiVar->GetOpnd(0)->GetOstIdx(), phiVar)); - } - } else { + if (!phiOcc->GetRegPhi()) { MePhiNode *phiVar = phiOcc->GetVarPhi(); const MapleVector &phiopnds = phiOcc->GetPhiOpnds(); for (uint32 i = 0; i < phiopnds.size(); i++) { @@ -218,7 +180,46 @@ void SSAPre::UpdateInsertedPhiOccOpnd() { phiVar->GetOpnds().push_back(varOpnd); } (void)phiOcc->GetBB()->GetMePhiList().insert(std::make_pair(phiVar->GetOpnd(0)->GetOstIdx(), phiVar)); + continue; + } + MePhiNode *phiReg = phiOcc->GetRegPhi(); + const MapleVector &phiopnds = phiOcc->GetPhiOpnds(); + for (uint32 i = 0; i < phiopnds.size(); i++) { + RegMeExpr *regOpnd = static_cast(phiopnds[i]->phiOpnd4Temp); + if (regOpnd == nullptr) { + // create a zero version + CHECK_FATAL(curTemp != nullptr, "curTemp can't be null in SSAPre::UpdateInsertedPhiOccOpnd"); + regOpnd = irMap->CreateRegMeExprVersion(static_cast(*curTemp)); + } + phiReg->GetOpnds().push_back(regOpnd); + } + (void)phiOcc->GetBB()->GetMePhiList().insert(std::make_pair(phiReg->GetOpnd(0)->GetOstIdx(), phiReg)); + if (!workCand->NeedLocalRefVar() || phiOcc->GetVarPhi() == nullptr) { + continue; + } + MePhiNode *phiVar = phiOcc->GetVarPhi(); + const MapleVector &phiOpnds = phiOcc->GetPhiOpnds(); + for (uint32 i = 0; i < phiOpnds.size(); i++) { + RegMeExpr *regOpnd = static_cast(phiOpnds[i]->phiOpnd4Temp); + VarMeExpr *localRefVarOpnd = nullptr; + if (regOpnd == nullptr) { + // create a zero version + CHECK_FATAL(curLocalRefVar != nullptr, "null ptr check"); + OriginalSt *ost = curLocalRefVar->GetOst(); + localRefVarOpnd = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); + } else { + auto mapIt = std::as_const(temp2LocalRefVarMap).find(regOpnd); + if (mapIt == temp2LocalRefVarMap.end()) { + CHECK_FATAL(curLocalRefVar != nullptr, "null ptr check"); + OriginalSt *ost = curLocalRefVar->GetOst(); + localRefVarOpnd = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); + } else { + localRefVarOpnd = mapIt->second; + } + } + phiVar->GetOpnds().push_back(localRefVarOpnd); } + (void)phiOcc->GetBB()->GetMePhiList().insert(std::make_pair(phiVar->GetOpnd(0)->GetOstIdx(), phiVar)); } } @@ -367,15 +368,15 @@ void SSAPre::CodeMotion() { // ================ Step 5: Finalize ================= -// return true if either: +// return true if either // operand is nullptr (def is null), or // hasRealUse is false and defined by a PHI not will be avail -bool SSAPre::OKToInsert(MePhiOpndOcc *phiOpnd) { - if (phiOpnd->GetDef() == nullptr) { +bool SSAPre::OKToInsert(MePhiOpndOcc &phiOpnd) const { + if (phiOpnd.GetDef() == nullptr) { return true; } - if (!phiOpnd->HasRealUse()) { - MeOccur *defOcc = phiOpnd->GetDef(); + if (!phiOpnd.HasRealUse()) { + MeOccur *defOcc = phiOpnd.GetDef(); if (defOcc->GetOccType() == kOccPhiocc && !WillBeAvail(static_cast(defOcc))) { return true; @@ -439,7 +440,7 @@ void SSAPre::Finalize1() { if (!WillBeAvail(phiOcc)) { break; } - if (OKToInsert(phiOpnd)) { + if (OKToInsert(*phiOpnd)) { // insert the current expression at the end of the block containing phiOpnd if (phiOpnd->GetBB()->GetSucc().size() > 1) { CHECK_FATAL(!workCand->Redo2HandleCritEdges(), "Finalize1: insertion at critical edge, aborting"); @@ -791,26 +792,27 @@ void SSAPre::ComputeDS() const { } } } - if (GetSSAPreDebug()) { - mirModule->GetOut() << "========ssapre candidate " << workCand->GetIndex() - << " after DownSafety===================\n"; - for (auto it = phiOccs.begin(); it != phiOccs.end(); ++it) { - MePhiOcc *phiOcc = *it; - phiOcc->Dump(*irMap); - if (phiOcc->SpeculativeDownSafe()) { - mirModule->GetOut() << " spec_downsafe /"; - } - if (phiOcc->IsDownSafe()) { - mirModule->GetOut() << " is downsafe\n"; - for (MePhiOpndOcc *phiOpnd : phiOcc->GetPhiOpnds()) { - if (!phiOpnd->IsProcessed()) { - phiOpnd->Dump(*irMap); - mirModule->GetOut() << " has not been processed by Rename2\n"; - } + if (!GetSSAPreDebug()) { + return; + } + mirModule->GetOut() << "========ssapre candidate " << workCand->GetIndex() + << " after DownSafety===================\n"; + for (auto it = phiOccs.begin(); it != phiOccs.end(); ++it) { + MePhiOcc *phiOcc = *it; + phiOcc->Dump(*irMap); + if (phiOcc->SpeculativeDownSafe()) { + mirModule->GetOut() << " spec_downsafe /"; + } + if (phiOcc->IsDownSafe()) { + mirModule->GetOut() << " is downsafe\n"; + for (MePhiOpndOcc *phiOpnd : phiOcc->GetPhiOpnds()) { + if (!phiOpnd->IsProcessed()) { + phiOpnd->Dump(*irMap); + mirModule->GetOut() << " has not been processed by Rename2\n"; } - } else { - mirModule->GetOut() << " is not downsafe\n"; } + } else { + mirModule->GetOut() << " is not downsafe\n"; } } } diff --git a/src/mapleall/maple_me/src/ssa_pre_for_hoist.cpp b/src/mapleall/maple_me/src/ssa_pre_for_hoist.cpp index e7b544afbbaebf3cf5ed0225f81ad9b4a400f570..99fd6ca48e5d722779d122df3bd9612bb1a19a0c 100644 --- a/src/mapleall/maple_me/src/ssa_pre_for_hoist.cpp +++ b/src/mapleall/maple_me/src/ssa_pre_for_hoist.cpp @@ -178,7 +178,7 @@ class ExprHoist { void UpdateSuccCount(HoistSummary *hs, uint32 whichSucc, MeExpr *expr, MeOccur *occ); void AddToHoistWorklist(HoistSummary *hs); - MeOccur *GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOcc); + MeOccur *GetHoistedOcc(HoistSummary &hs, MeExpr *expr, MeOccur *defOcc); void HoistExpr(const MapleVector &allOccs, int32 candId); int32 GetHoistedCount() const { return hoistedCount; @@ -238,23 +238,23 @@ static MeExpr *GetRealExpr(MeOccur &occ) { } } -MeOccur *ExprHoist::GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOcc) { - ASSERT(hs->FullyAnticipated(), "GetHoistedOcc: cd is not fully anticipated."); - ASSERT(hs->candId == curCandId, "GetHoistedOcc: wrong cand."); - if (hs->candId == curCandId && hs->hoistedOcc) { - return hs->hoistedOcc; +MeOccur *ExprHoist::GetHoistedOcc(HoistSummary &hs, MeExpr *expr, MeOccur *defOcc) { + ASSERT(hs.FullyAnticipated(), "GetHoistedOcc: cd is not fully anticipated."); + ASSERT(hs.candId == curCandId, "GetHoistedOcc: wrong cand."); + if (hs.candId == curCandId && hs.hoistedOcc) { + return hs.hoistedOcc; } MeOccur *hoistedOcc = nullptr; // loop up the cd chain - if (hs->cdHS && - hs->cdHS->candId == curCandId && - (hs->cdHS->occ == nullptr || (GetRealExpr(*hs->cdHS->occ) == expr)) && - hs->cdHS->FullyAnticipated() && - hs->DefoccAllowHoist(defOcc)) { - hoistedOcc = GetHoistedOcc(hs->cdHS, expr, defOcc); + if (hs.cdHS && + hs.cdHS->candId == curCandId && + (hs.cdHS->occ == nullptr || (GetRealExpr(*hs.cdHS->occ) == expr)) && + hs.cdHS->FullyAnticipated() && + hs.DefoccAllowHoist(defOcc)) { + hoistedOcc = GetHoistedOcc(*hs.cdHS, expr, defOcc); } else { // already at cd chain's root if (defOcc && - fDom->Dominate(*defOcc->GetBB(), *hs->bb) && + fDom->Dominate(*defOcc->GetBB(), *hs.bb) && (defOcc->GetOccType() == kOccReal || (defOcc->GetOccType() == kOccPhiocc && static_cast(defOcc)->IsWillBeAvail()))) { // use defOcc @@ -262,10 +262,10 @@ MeOccur *ExprHoist::GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOc } else { // insert a new one ASSERT(expr->GetExprID() != kInvalidExprID, "GetHoistedOcc: check expr hashed."); auto *fakeStmt = parent->irMap->CreateAssignMeStmt(*parent->irMap->CreateRegMeExpr(expr->GetPrimType()), - *expr, *hs->bb); - hs->bb->InsertMeStmtLastBr(fakeStmt); + *expr, *hs.bb); + hs.bb->InsertMeStmtLastBr(fakeStmt); auto seqStmt = 0; - for (auto &stmt : hs->bb->GetMeStmts()) { + for (auto &stmt : hs.bb->GetMeStmts()) { ++seqStmt; if (&stmt == fakeStmt) { break; @@ -285,14 +285,10 @@ MeOccur *ExprHoist::GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOc // keep dt_preorder for (auto iter = parent->allOccs.begin(); iter != parent->allOccs.end(); ++iter) { auto *occ = *iter; - if (fDom->GetDtDfnItem(occ->GetBB()->GetBBId()) < - fDom->GetDtDfnItem(newRealocc->GetBB()->GetBBId())) { - continue; - } - if (fDom->Dominate(*occ->GetBB(), *newRealocc->GetBB())) { - continue; - } - if (!fDom->Dominate(*newRealocc->GetBB(), *occ->GetBB())) { + if ((fDom->GetDtDfnItem(occ->GetBB()->GetBBId()) < + fDom->GetDtDfnItem(newRealocc->GetBB()->GetBBId())) || + (fDom->Dominate(*occ->GetBB(), *newRealocc->GetBB())) || + (!fDom->Dominate(*newRealocc->GetBB(), *occ->GetBB()))) { continue; } (void)parent->allOccs.insert(iter, newRealocc); @@ -300,7 +296,7 @@ MeOccur *ExprHoist::GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOc } newRealocc->SetIsHoisted(true); if (defOcc && - fDom->Dominate(*defOcc->GetBB(), *hs->bb) && + fDom->Dominate(*defOcc->GetBB(), *hs.bb) && defOcc->GetOccType() == kOccPhiocc) { newRealocc->SetClassID(defOcc->GetClassID()); newRealocc->SetDef(defOcc); @@ -311,7 +307,7 @@ MeOccur *ExprHoist::GetHoistedOcc(HoistSummary *hs, MeExpr *expr, MeOccur *defOc hoistedOcc = newRealocc; } } - hs->hoistedOcc = hoistedOcc; + hs.hoistedOcc = hoistedOcc; return hoistedOcc; } @@ -349,7 +345,7 @@ void ExprHoist::HoistExpr(const MapleVector &allOccs, int32 candId) { } auto *phiOpndocc = static_cast(occ); auto *phiOcc = phiOpndocc->GetDefPhiOcc(); - if (phiOcc->IsWillBeAvail() && parent->OKToInsert(phiOpndocc)) { + if (phiOcc->IsWillBeAvail() && parent->OKToInsert(*phiOpndocc)) { if (hs->cdHS && // need a cd to hoist hs->occ == nullptr && // if not null, hs has been inserted hs->cdHS->occ != nullptr && // make sure there's at least one realocc at cd @@ -380,7 +376,7 @@ void ExprHoist::HoistExpr(const MapleVector &allOccs, int32 candId) { continue; } } - auto *hoistedOcc = GetHoistedOcc(hs->cdHS, realocc->GetMeExpr(), realocc->GetDef()); + auto *hoistedOcc = GetHoistedOcc(*hs->cdHS, realocc->GetMeExpr(), realocc->GetDef()); auto *hoistedOccDef = (hoistedOcc->GetOccType() == kOccReal && hoistedOcc->GetDef()) ? hoistedOcc->GetDef() : hoistedOcc; if (hoistedOccDef->GetClassID() != realocc->GetClassID()) { @@ -416,7 +412,7 @@ void ExprHoist::HoistExpr(const MapleVector &allOccs, int32 candId) { continue; } } - auto *hoistedOcc = GetHoistedOcc(hs->cdHS, phiopndocc->GetCurrentMeExpr(), nullptr); + auto *hoistedOcc = GetHoistedOcc(*hs->cdHS, phiopndocc->GetCurrentMeExpr(), nullptr); auto *hoistedOccDef = (hoistedOcc->GetOccType() == kOccReal && hoistedOcc->GetDef()) ? hoistedOcc->GetDef() : hoistedOcc; phiopndocc->SetDef(hoistedOccDef); diff --git a/src/mapleall/maple_me/src/ssa_tab.cpp b/src/mapleall/maple_me/src/ssa_tab.cpp index cd143c4513310bce5e559ffdcc2364fcca4eb9dc..ce784a7892357113612a8113a780f48d47af0dc3 100644 --- a/src/mapleall/maple_me/src/ssa_tab.cpp +++ b/src/mapleall/maple_me/src/ssa_tab.cpp @@ -129,18 +129,19 @@ void SSATab::CreateSSAStmt(StmtNode &stmt, const BB *curbb) { theSSAPart->InsertMayDefNode(MayDefNode(theSSAPart->GetSSAVar(), &dNode)); } // set ost->isPtrWithIncDec - if (ost->GetType()->IsMIRPtrType()) { - if (dNode.GetRHS()->GetOpCode() == OP_add || dNode.GetRHS()->GetOpCode() == OP_sub) { - BinaryNode *rhs = static_cast(dNode.GetRHS()); - if (rhs->Opnd(0)->GetOpCode() == OP_dread && rhs->Opnd(1)->GetOpCode() == OP_constval) { - AddrofSSANode *dread = static_cast(rhs->Opnd(0)); - MIRSymbol *st2 = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); - CHECK_FATAL(st2 != nullptr, "null ptr check"); - OriginalSt *ost2 = FindOrCreateSymbolOriginalSt(*st2, mirModule.CurFunction()->GetPuidx(), - dread->GetFieldID()); - if (ost == ost2) { - ost->isPtrWithIncDec = true; - } + if (!ost->GetType()->IsMIRPtrType()) { + return; + } + if (dNode.GetRHS()->GetOpCode() == OP_add || dNode.GetRHS()->GetOpCode() == OP_sub) { + BinaryNode *rhs = static_cast(dNode.GetRHS()); + if (rhs->Opnd(0)->GetOpCode() == OP_dread && rhs->Opnd(1)->GetOpCode() == OP_constval) { + AddrofSSANode *dread = static_cast(rhs->Opnd(0)); + MIRSymbol *st2 = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + CHECK_FATAL(st2 != nullptr, "null ptr check"); + OriginalSt *ost2 = FindOrCreateSymbolOriginalSt(*st2, mirModule.CurFunction()->GetPuidx(), + dread->GetFieldID()); + if (ost == ost2) { + ost->isPtrWithIncDec = true; } } } @@ -155,14 +156,15 @@ void SSATab::CreateSSAStmt(StmtNode &stmt, const BB *curbb) { VersionSt *vst = versionStTable.GetZeroVersionSt(ost); stmtsSSAPart.SetSSAPartOf(stmt, vst); // set ost->isPtrWithIncDec - if (ost->GetType()->IsMIRPtrType()) { - if (regNode.GetRHS()->GetOpCode() == OP_add || regNode.GetRHS()->GetOpCode() == OP_sub) { - BinaryNode *rhs = static_cast(regNode.GetRHS()); - if (rhs->Opnd(0)->GetOpCode() == OP_regread && rhs->Opnd(1)->GetOpCode() == OP_constval) { - RegreadSSANode *regread = static_cast(rhs->Opnd(0)); - if (regNode.GetRegIdx() == regread->GetRegIdx()) { - ost->isPtrWithIncDec = true; - } + if (!ost->GetType()->IsMIRPtrType()) { + return; + } + if (regNode.GetRHS()->GetOpCode() == OP_add || regNode.GetRHS()->GetOpCode() == OP_sub) { + BinaryNode *rhs = static_cast(regNode.GetRHS()); + if (rhs->Opnd(0)->GetOpCode() == OP_regread && rhs->Opnd(1)->GetOpCode() == OP_constval) { + RegreadSSANode *regread = static_cast(rhs->Opnd(0)); + if (regNode.GetRegIdx() == regread->GetRegIdx()) { + ost->isPtrWithIncDec = true; } } } diff --git a/src/mapleall/maple_me/src/type_based_alias_analysis.cpp b/src/mapleall/maple_me/src/type_based_alias_analysis.cpp index 686b1b2e6999b16d23e91b1c07c64176633c9bc4..2934089ec649c4e8fc90672f2ed65a886a58cbab 100644 --- a/src/mapleall/maple_me/src/type_based_alias_analysis.cpp +++ b/src/mapleall/maple_me/src/type_based_alias_analysis.cpp @@ -37,7 +37,7 @@ MIRType *GetFieldType(MIRStructType *strucType, FieldID fieldId) { fieldsType[0] = strucType; size_t i = 1; while (i < fieldNum) { - MIRType *fieldType = strucType->GetFieldType(i); + MIRType *fieldType = strucType->GetFieldType(static_cast(i)); MIRStructType *structFieldType = fieldType->EmbeddedStructType(); if (structFieldType != nullptr) { (void)GetFieldType(structFieldType, 0); // build sub-struct @@ -152,8 +152,8 @@ bool IsFieldTypeOfArrayType(ArrayType *arrayType, MIRType *checkedType) { class FieldTypeComparator { public: - FieldTypeComparator(MIRType *type) : checkedType(type) {} - bool operator() (MIRType *fieldType) { + explicit FieldTypeComparator(MIRType *type) : checkedType(type) {} + bool operator() (MIRType *fieldType) const { if (fieldType == checkedType) { return true; } @@ -344,12 +344,12 @@ bool IsPointerInterconvertible(const MIRPtrType &ptrTypeA, const MIRPtrType &ptr // bit-field, then to the unit in which it resides), and vice versa. std::set initialMemType; GetInitialMemType(*pointedTypeA, initialMemType); - if (initialMemType.count(pointedTypeB)) { + if (initialMemType.count(pointedTypeB) != 0) { return true; } initialMemType.clear(); GetInitialMemType(*pointedTypeB, initialMemType); - if (initialMemType.count(pointedTypeA)) { + if (initialMemType.count(pointedTypeA) != 0) { return true; } return false; @@ -381,8 +381,8 @@ bool IsTypeCompatible(MIRType *typeA, MIRType *typeB) { } return false; } - if (TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeA, typeB) || - TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeB, typeA)) { + if (TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeA, *typeB) || + TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeB, *typeA)) { return true; } return false; @@ -417,18 +417,18 @@ static void GetPossibleFieldID(MIRType *aggType, MIRType *checkedType, std::vect MIRType *fieldType = fieldsTypeVec[i]; if (fieldType == checkedType) { - fieldIDs.emplace_back(i); + (void)fieldIDs.emplace_back(i); continue; } if (IsPrimitiveScalar(checkedType->GetPrimType()) && fieldType->GetPrimType() == checkedType->GetPrimType()) { - fieldIDs.emplace_back(i); + (void)fieldIDs.emplace_back(i); continue; } if (fieldType->IsMIRArrayType()) { if (static_cast(fieldType)->GetElemType() == checkedType || (checkedType->IsMIRArrayType() && IsArrayTypeCompatible(fieldType, checkedType))) { - fieldIDs.emplace_back(i); + (void)fieldIDs.emplace_back(i); } } } @@ -555,7 +555,7 @@ bool TypeWithSameSizeEmbedded(MIRType *aggType, MIRType *checkedType) { // when aggTypeB is embedded in aggTypeA, check if ostA alias with ostB. bool MayAliasForAggTypeNest(MIRType *aggTypeA, const OriginalSt *ostA, MIRType *aggTypeB, const OriginalSt *ostB) { MIRType *typeA = ostA->GetType(); - if (TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeA, aggTypeB)) { // aggTypeB is field type of typeA + if (TypeBasedAliasAnalysis::IsFieldTypeOfAggType(typeA, *aggTypeB)) { // aggTypeB is field type of typeA return true; } FieldID fieldIdB = ostB->GetFieldID(); @@ -595,7 +595,7 @@ bool MayAliasOstAndType(const OriginalSt *ost, MIRType *checkedType) { } if (sizeA < sizeB) { // fieldNumA <= fieldNumB is also true implicitly. // check if aggType can be embedded in checkedType - return TypeBasedAliasAnalysis::IsFieldTypeOfAggType(checkedType, aggType); + return TypeBasedAliasAnalysis::IsFieldTypeOfAggType(checkedType, *aggType); } else if (sizeA == sizeB) { if (fieldNumA == fieldNumB) { // <[1] <$struct>> and <$struct> has same size and fieldNum if (aggType->GetKind() == kTypeArray) { @@ -610,8 +610,9 @@ bool MayAliasOstAndType(const OriginalSt *ost, MIRType *checkedType) { } } else { // sizeA > sizeB (fieldNumA >= fieldNumB is also true implicitly) // check if checkedType can be embedded in aggType, and overlap with ost - if ((ostType->GetPrimType() == PTY_agg && TypeBasedAliasAnalysis::IsFieldTypeOfAggType(ostType, checkedType)) || - (checkedType->GetPrimType() == PTY_agg && TypeBasedAliasAnalysis::IsFieldTypeOfAggType(checkedType, ostType))) { + if ((ostType->GetPrimType() == PTY_agg && TypeBasedAliasAnalysis::IsFieldTypeOfAggType(ostType, *checkedType)) || + (checkedType->GetPrimType() == PTY_agg && TypeBasedAliasAnalysis::IsFieldTypeOfAggType(checkedType, + *ostType))) { return true; } MIRStructType *structType = aggType->EmbeddedStructType(); @@ -684,7 +685,8 @@ static bool MayMemoryOverlap( auto getValidOffsetValue = [](const OriginalSt &ost, const MIRType *aggType) { auto fieldId = ost.GetFieldID(); auto &offset = ost.GetOffset(); - if (!aggType || (offset.val < static_cast(GetTypeBitSize(*aggType)) && (!offset.IsInvalid() || fieldId))) { + if (!aggType || (offset.val < static_cast(GetTypeBitSize(*aggType)) && + (!offset.IsInvalid() || fieldId != 0))) { return static_cast(offset.val); } return aggType->GetBitOffsetFromBaseAddr(fieldId); @@ -776,33 +778,32 @@ static bool MayAliasForVirtualOstOfVoidPtr( return IsMemoryOverlap(offsetA, GetTypeBitSize(*ostA.GetType()), offsetB, GetTypeBitSize(*ostB.GetType())); } -bool TypeBasedAliasAnalysis::IsFieldTypeOfAggType(MIRType *aggType, MIRType *checkedType) { +bool TypeBasedAliasAnalysis::IsFieldTypeOfAggType(MIRType *aggType, MIRType &checkedType) { ASSERT_NOT_NULL(aggType); - if (aggType == checkedType) { + if (aggType == &checkedType) { return true; } if (aggType->GetPrimType() != PTY_agg) { return false; } - ASSERT_NOT_NULL(checkedType); - if (aggType->GetSize() < checkedType->GetSize() || aggType->NumberOfFieldIDs() < checkedType->NumberOfFieldIDs()) { + if (aggType->GetSize() < checkedType.GetSize() || aggType->NumberOfFieldIDs() < checkedType.NumberOfFieldIDs()) { return false; } if (compatibleTypeCache.find(aggType) != compatibleTypeCache.end()) { auto &fieldsTypeMap = compatibleTypeCache[aggType]; - if (fieldsTypeMap.find(checkedType) != fieldsTypeMap.end()) { - return fieldsTypeMap[checkedType]; + if (fieldsTypeMap.find(&checkedType) != fieldsTypeMap.end()) { + return fieldsTypeMap[&checkedType]; } } bool res = false; if (aggType->IsStructType()) { - res = IsFieldTypeOfStructType(static_cast(aggType), checkedType); + res = IsFieldTypeOfStructType(static_cast(aggType), &checkedType); } else if (aggType->GetKind() == kTypeArray) { - res = IsFieldTypeOfArrayType(static_cast(aggType), checkedType); + res = IsFieldTypeOfArrayType(static_cast(aggType), &checkedType); } else if (aggType->GetKind() == kTypeFArray) { - res = IsFieldTypeOfArrayType(static_cast(aggType), checkedType); + res = IsFieldTypeOfArrayType(static_cast(aggType), &checkedType); } - (void)compatibleTypeCache[aggType].emplace(checkedType, res); + (void)compatibleTypeCache[aggType].emplace(&checkedType, res); return res; } @@ -872,10 +873,10 @@ bool TypeBasedAliasAnalysis::MayAliasTBAAForC(const OriginalSt *ostA, const Orig if (MayAliasForVirtualOstOfVoidPtr(*ostA, *aggTypeA, *ostB, *aggTypeB)) { return true; } - if (IsFieldTypeOfAggType(aggTypeA, aggTypeB)) { // aggTypeB is embedded in aggTypeA + if (IsFieldTypeOfAggType(aggTypeA, *aggTypeB)) { // aggTypeB is embedded in aggTypeA return MayAliasForAggTypeNest(aggTypeA, ostA, aggTypeB, ostB); } - if (IsFieldTypeOfAggType(aggTypeB, aggTypeA)) { + if (IsFieldTypeOfAggType(aggTypeB, *aggTypeA)) { return MayAliasForAggTypeNest(aggTypeB, ostB, aggTypeA, ostA); } return false; @@ -883,7 +884,7 @@ bool TypeBasedAliasAnalysis::MayAliasTBAAForC(const OriginalSt *ostA, const Orig // return true if can filter this aliasElemOst, otherwise return false; bool TypeBasedAliasAnalysis::FilterAliasElemOfRHSForIassign( - const OriginalSt *aliasElemOst, const OriginalSt *lhsOst, const OriginalSt *rhsOst) { + const OriginalSt &aliasElemOst, const OriginalSt &lhsOst, const OriginalSt &rhsOst) { if (!MeOption::tbaa) { return false; } @@ -892,6 +893,6 @@ bool TypeBasedAliasAnalysis::FilterAliasElemOfRHSForIassign( // if rhs is not alias with rhs, their memories completely not overlap. // Rhs may def itself if they overlap, but its value is the same as before. // So we skip inserting maydef for ost the same as rhs here - return (aliasElemOst == rhsOst && rhsOst->GetTyIdx() == lhsOst->GetTyIdx()); + return (&aliasElemOst == &rhsOst && rhsOst.GetTyIdx() == lhsOst.GetTyIdx()); } } // namespace maple diff --git a/src/mapleall/maple_pgo/include/cfg_mst.h b/src/mapleall/maple_pgo/include/cfg_mst.h index fda24156cca9608de4353b5043c2c59cf253fe7a..ea1ce851c25b470ad0cc9902ce9bc944489d1b08 100644 --- a/src/mapleall/maple_pgo/include/cfg_mst.h +++ b/src/mapleall/maple_pgo/include/cfg_mst.h @@ -23,7 +23,9 @@ template class CFGMST { public: explicit CFGMST(MemPool &mp) : mp(&mp), alloc(&mp), allEdges(alloc.Adapter()), bbGroups(alloc.Adapter()) {} - virtual ~CFGMST() = default; + virtual ~CFGMST() { + mp = nullptr; + } void ComputeMST(BB *commonEntry, BB *commonExit); void BuildEdges(BB *commonEntry, BB *commonExit); void SortEdges(); diff --git a/src/mapleall/maple_pgo/include/instrument.h b/src/mapleall/maple_pgo/include/instrument.h index 743d067dd4569942767a8c3bcaedea14c6df9085..f2170b4d7b7b517edba0c73ed4f119709122e664 100644 --- a/src/mapleall/maple_pgo/include/instrument.h +++ b/src/mapleall/maple_pgo/include/instrument.h @@ -117,7 +117,7 @@ class PGOInstrumentTemplate { public: explicit PGOInstrumentTemplate(MemPool &mp) : mst(mp) {} - void GetInstrumentBBs(std::vector &bbs, IRBB *commonEnty) const; + void GetInstrumentBBs(std::vector &bbs, IRBB *commonEntry) const; void PrepareInstrumentInfo(IRBB *commonEntry, IRBB* commmonExit) { mst.ComputeMST(commonEntry, commmonExit); } @@ -204,6 +204,8 @@ class BBUseInfo { BBUseEdge *GetOnlyUnknownInEdges(); + void Dump(); + private: bool valid = false; uint64 countValue = 0; diff --git a/src/mapleall/maple_pgo/include/litepgo.h b/src/mapleall/maple_pgo/include/litepgo.h index 99accb300ccca426cbe80620aa2fb653890fd8be..4ade720589009280a9f01ba18cf7fd54e9c0d6f2 100644 --- a/src/mapleall/maple_pgo/include/litepgo.h +++ b/src/mapleall/maple_pgo/include/litepgo.h @@ -1,18 +1,34 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ #ifndef OPENARKCOMPILER_LITEPGO_H #define OPENARKCOMPILER_LITEPGO_H -#include "types_def.h" #include #include #include +#include "types_def.h" namespace maple { class MIRLexer; +class MIRModule; class LiteProfile { public: struct BBInfo { uint32 funcHash = 0; std::vector counter; + std::pair verified = {false, false}; BBInfo() = default; BBInfo(uint64 hash, std::vector &&counter) : funcHash(hash), counter(counter) {} @@ -21,14 +37,14 @@ class LiteProfile { ~BBInfo() = default; }; // default get all kind profile - bool HandleLitePGOFile(const std::string &fileName, const std::string &moduleName); + bool HandleLitePGOFile(const std::string &fileName, MIRModule &m); bool HandleLitePgoWhiteList(const std::string &fileName) const; BBInfo *GetFuncBBProf(const std::string &funcName); - bool isExtremelyCold(const std::string &funcName) { + bool IsExtremelyCold(const std::string &funcName) { return extremelyColdFuncs.count(funcName); } static bool IsInWhiteList(const std::string &funcName) { - return whiteList.empty() ? true : whiteList.count(funcName); + return whiteList.empty() ? true : (whiteList.count(funcName) != 0); } static uint32 GetBBNoThreshold() { return bbNoThreshold; @@ -46,4 +62,4 @@ class LiteProfile { void ParseCounters(MIRLexer &fdLexer, const std::string &funcName, uint32 cfghash); }; } -#endif //OPENARKCOMPILER_LITEPGO_H +#endif // OPENARKCOMPILER_LITEPGO_H diff --git a/src/mapleall/maple_pgo/pgo_lib/CMakeLists.txt b/src/mapleall/maple_pgo/pgo_lib/CMakeLists.txt index 7c84e3e42df6d837c6380a94bff33e1885ffa7e1..dfaa00d0940736d5c179798492b5c1fd8aea08a8 100644 --- a/src/mapleall/maple_pgo/pgo_lib/CMakeLists.txt +++ b/src/mapleall/maple_pgo/pgo_lib/CMakeLists.txt @@ -4,7 +4,17 @@ project(mplpgo_c C) set(CMAKE_C_STANDARD 99) set(MAPLE_ROOT $ENV{MAPLE_ROOT}) +set(SYS_NAME $ENV{BiShengC_GET_OS_VERSION}) set(GCC_LINARO_PATH ${MAPLE_ROOT}/tools/gcc-linaro-7.5.0) set(CMAKE_C_COMPILER ${GCC_LINARO_PATH}/bin/aarch64-linux-gnu-gcc) add_library(mplpgo SHARED mplpgo.c mplpgo.h common_util.h) +add_library(mplpgo_static STATIC mplpgo.c mplpgo.h common_util.h) + +SET_TARGET_PROPERTIES(mplpgo_static PROPERTIES OUTPUT_NAME mplpgo) + +SET_TARGET_PROPERTIES(mplpgo_static PROPERTIES CLEAN_DIRECT_OUTPUT 1) +SET_TARGET_PROPERTIES(mplpgo PROPERTIES CLEAN_DIRECT_OUTPUT 1) + +# set output directory +SET(LIBRARY_OUTPUT_PATH ${MAPLE_ROOT}/libpgo/lib_${SYS_NAME}) diff --git a/src/mapleall/maple_pgo/pgo_lib/common_util.h b/src/mapleall/maple_pgo/pgo_lib/common_util.h index 5c7a4155fd2e5ceed3269d5c2b5a848ba5caf67f..33da5b6dcd0456458364246b5486d1a0946c7bf9 100644 --- a/src/mapleall/maple_pgo/pgo_lib/common_util.h +++ b/src/mapleall/maple_pgo/pgo_lib/common_util.h @@ -1,3 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ #ifndef MPLPGO_C_COMMON_UTIL_H #define MPLPGO_C_COMMON_UTIL_H #include @@ -15,33 +29,36 @@ typedef unsigned long long int uint64_t; typedef long int int64_t; -#define O_RDONLY 00 -#define O_WRONLY 01 -#define O_RDWR 02 +#define O_RDONLY 00 +#define O_WRONLY 01 +#define O_RDWR 02 #ifndef O_CREAT -#define O_CREAT 0100 /* Not fcntl. */ +#define O_CREAT 0100 // Not fcntl. #endif #ifndef O_APPEND -# define O_APPEND 02000 +# define O_APPEND 02000 +#endif +#ifndef O_TRUNC +#define O_TRUNC 01000 #endif -#define MAP_SHARED 0x01 /* Share changes. */ -#define MAP_PRIVATE 0x02 /* Changes are private. */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* Don't use a file. */ +#define MAP_SHARED 0x01 // Share changes. +#define MAP_PRIVATE 0x02 // Changes are private. +#define MAP_FIXED 0x10 // Interpret addr exactly +#define MAP_ANONYMOUS 0x20 // Don't use a file. #define MAP_ANON MAP_ANONYMOUS -# define SEEK_SET 0 /* Seek from beginning of file. */ -# define SEEK_CUR 1 /* Seek from current position. */ -# define SEEK_END 2 /* Seek from end of file. */ -#define PROT_READ 0x1 /* Page can be read. */ -#define PROT_WRITE 0x2 /* Page can be written. */ -#define PROT_EXEC 0x4 /* Page can be executed. */ -#define PROT_NONE 0x0 /* Page can not be accessed. */ -/* Flags to `msync'. */ -#define MS_ASYNC 1 /* Sync memory asynchronously. */ -#define MS_SYNC 4 /* Synchronous memory sync. */ -#define MS_INVALIDATE 2 /* Invalidate the caches. */ +# define SEEK_SET 0 // Seek from beginning of file. +# define SEEK_CUR 1 // Seek from current position. +# define SEEK_END 2 // Seek from end of file. +#define PROT_READ 0x1 // Page can be read. +#define PROT_WRITE 0x2 // Page can be written. +#define PROT_EXEC 0x4 // Page can be executed. +#define PROT_NONE 0x0 // Page can not be accessed. +// Flags to `msync'. +#define MS_ASYNC 1 // Sync memory asynchronously. +#define MS_SYNC 4 // Synchronous memory sync. +#define MS_INVALIDATE 2 // Invalidate the caches. /* implement in arm v8 */ uint64_t __nanosleep(const struct timespec *req, struct timespec *rem) { @@ -167,11 +184,11 @@ void *__mmap(uint64_t addr, uint64_t size, uint64_t prot, uint64_t flags, void current_time_to_buf(char* buf) { time_t timer; - struct tm* tm_info; + struct tm tm_info; timer = time(NULL); - tm_info = localtime(&timer); - strftime(buf, TIMEBUFSIZE, "%Y-%m-%d %H:%M:%S", tm_info); + (void)localtime_r(&timer, &tm_info); + strftime(buf, TIMEBUFSIZE, "%Y-%m-%d %H:%M:%S", &tm_info); } #define SAVE_ALL \ diff --git a/src/mapleall/maple_pgo/pgo_lib/mplpgo.c b/src/mapleall/maple_pgo/pgo_lib/mplpgo.c index 75a835266e47f4cc05f9d342dd54afe9a19675b4..4ff0cca59e5648eaa1ed9c019c67f60162317532 100644 --- a/src/mapleall/maple_pgo/pgo_lib/mplpgo.c +++ b/src/mapleall/maple_pgo/pgo_lib/mplpgo.c @@ -1,7 +1,7 @@ #include "mplpgo.h" #include -struct Mpl_Lite_Pgo_ProfileInfoRoot __mpl_pgo_info_root; +struct Mpl_Lite_Pgo_ProfileInfoRoot __mpl_pgo_info_root __attribute__ ((__visibility__ ("hidden"))) = {0, 0, 0}; extern uint32_t __mpl_pgo_sleep_time; extern char __mpl_pgo_wait_forks; extern char *__mpl_pgo_dump_filename; @@ -59,7 +59,7 @@ static inline void EmitFlavor(int fd) { static inline void WriteToFile(const struct Mpl_Lite_Pgo_ObjectFileInfo *fInfo) { size_t txtLen = 0; struct Mpl_Lite_Pgo_DumpInfo *head = NULL; - int fd = open(__mpl_pgo_dump_filename, O_RDWR | O_APPEND | O_CREAT, 0640); + int fd = open(__mpl_pgo_dump_filename, O_RDWR | O_TRUNC | O_CREAT, 0640); if (fd == -1) { perror("Error opening mpl_pgo_dump file"); return; @@ -192,6 +192,7 @@ void __mpl_pgo_dump_wrapper() { pthread_rwlock_init(&rwlock, NULL); if (((unsigned int)(__mpl_pgo_info_root.dumpOnce) % 10000ul) == 0) { WriteToFile(__mpl_pgo_info_root.ofileInfoList); + __mpl_pgo_flush_counter(); } __mpl_pgo_info_root.dumpOnce++; pthread_rwlock_destroy(&rwlock); diff --git a/src/mapleall/maple_pgo/pgo_lib/mplpgo.h b/src/mapleall/maple_pgo/pgo_lib/mplpgo.h index de31d5a8f8fc5b1901f877233f09115eab70b298..01e695197582ca1567f675349d81c7aabdd85ab6 100644 --- a/src/mapleall/maple_pgo/pgo_lib/mplpgo.h +++ b/src/mapleall/maple_pgo/pgo_lib/mplpgo.h @@ -1,3 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ #ifndef MPLPGO_C_LIBRARY_H #define MPLPGO_C_LIBRARY_H #include "common_util.h" @@ -30,9 +44,6 @@ struct Mpl_Lite_Pgo_ProfileInfoRoot { int setUp; }; -extern struct Mpl_Lite_Pgo_ProfileInfoRoot __mpl_pgo_info_root __attribute__ ((__visibility__ ("hidden"))) = - {0, 0, 0}; - void __mpl_pgo_setup(); void __mpl_pgo_init(struct Mpl_Lite_Pgo_ObjectFileInfo *fileInfo); void __mpl_pgo_exit(); diff --git a/src/mapleall/maple_pgo/src/cfg_mst.cpp b/src/mapleall/maple_pgo/src/cfg_mst.cpp index 82223ed80b944150fd268a0a5ad8a52f09a72aa5..a709c20bb20dae9dae5050aa90daccd8738ed2fc 100644 --- a/src/mapleall/maple_pgo/src/cfg_mst.cpp +++ b/src/mapleall/maple_pgo/src/cfg_mst.cpp @@ -92,7 +92,7 @@ void CFGMST::SortEdges() { template uint32 CFGMST::FindGroup(uint32 bbId) { - CHECK_FATAL(bbGroups.count(bbId), "unRegister bb"); + CHECK_FATAL(bbGroups.count(bbId) != 0, "unRegister bb"); if (bbGroups[bbId] != bbId) { bbGroups[bbId] = FindGroup(bbGroups[bbId]); } diff --git a/src/mapleall/maple_pgo/src/instrument.cpp b/src/mapleall/maple_pgo/src/instrument.cpp index e0d59e31d14b7e84996b6f27d4451805d3914737..0213f27bc2ea672bb24f7fa993c0cf45d7aa6ba3 100644 --- a/src/mapleall/maple_pgo/src/instrument.cpp +++ b/src/mapleall/maple_pgo/src/instrument.cpp @@ -15,8 +15,9 @@ #include "instrument.h" #include "cgbb.h" - #include "mir_builder.h" +#include "mpl_logging.h" + namespace maple { std::string GetProfCntSymbolName(const std::string &funcName, PUIdx idx) { return funcName + "_" + std::to_string(idx) + "_counter"; @@ -37,8 +38,8 @@ static inline void RegisterInFuncInfo(MIRFunction &func, const MIRSymbol &counte MIRIntConst *eleCntMirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(elemCnt, *u32Ty); auto *counterConst = func.GetModule()->GetMemPool()->New( counter.GetStIdx(), 0, *GlobalTables::GetTypeTable().GetPtr()); - funcInfoMirConst->SetItem(1, cfgHashConst,2); - funcInfoMirConst->SetItem(2, eleCntMirConst,3); + funcInfoMirConst->SetItem(1, cfgHashConst, 2); + funcInfoMirConst->SetItem(2, eleCntMirConst, 3); funcInfoMirConst->SetItem(3, counterConst, 4); } @@ -120,6 +121,22 @@ BBUseEdge *BBUseInfo::GetOnlyUnknownInEdges() { return ouEdge; } +template +void BBUseInfo::Dump() { + for (const auto &inE : inEdges) { + if (inE->GetStatus()) { + LogInfo::MapleLogger() << inE->GetSrcBB()->GetId() << "->" << + inE->GetDestBB()->GetId() << " c : " << inE->GetCount() << "\n"; + } + } + for (const auto &outE : outEdges) { + if (outE->GetStatus()) { + LogInfo::MapleLogger() << outE->GetSrcBB()->GetId() << "->" << + outE->GetDestBB()->GetId() << " c : " << outE->GetCount() << "\n"; + } + } +} + template class PGOInstrumentTemplate>; template class PGOInstrumentTemplate>; template class BBUseInfo; diff --git a/src/mapleall/maple_pgo/src/litepgo.cpp b/src/mapleall/maple_pgo/src/litepgo.cpp index 45c84371731e0161b7b49a6689cfa7ce30c81250..6d6187095ca31545092ba439dfa8f5bb6f3cc7c0 100644 --- a/src/mapleall/maple_pgo/src/litepgo.cpp +++ b/src/mapleall/maple_pgo/src/litepgo.cpp @@ -1,6 +1,24 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ #include "litepgo.h" +#include #include "itab_util.h" #include "lexer.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" namespace maple { bool LiteProfile::loaded = false; @@ -22,18 +40,17 @@ LiteProfile::BBInfo *LiteProfile::GetFuncBBProf(const std::string &funcName) { return &item->second; } -bool LiteProfile::HandleLitePGOFile(const std::string &fileName, const std::string &moduleName) { +bool LiteProfile::HandleLitePGOFile(const std::string &fileName, MIRModule &m) { if (loaded) { LogInfo::MapleLogger() << "this Profile has been handled before" << '\n'; return false; } loaded = true; + const std::string moduleName = m.GetFileName(); /* init a lexer for parsing lite-pgo function data */ - MemPool *funcDatatMp = memPoolCtrler.NewMemPool("LitePgoFuncData Mempool", true); - MapleAllocator funcDataMa(funcDatatMp); - MIRLexer funcDataLexer(nullptr, funcDataMa); + MIRLexer funcDataLexer(nullptr, m.GetMPAllocator()); funcDataLexer.PrepareForFile(fileName); - funcDataLexer.NextToken(); + (void)funcDataLexer.NextToken(); bool atEof = false; while (!atEof) { TokenKind currentTokenKind = funcDataLexer.GetTokenKind(); @@ -42,26 +59,29 @@ bool LiteProfile::HandleLitePGOFile(const std::string &fileName, const std::stri continue; } if (currentTokenKind == TK_flavor) { - /* skip beginning of lite-pgo data, not implement yet */ + /* + * skip beginning of lite-pgo data, not implement yet + * refresh profile when a new set of pgo data comes in + */ + funcBBProfData.clear(); } if (currentTokenKind == TK_func) { ParseFuncProfile(funcDataLexer, moduleName); continue; } - funcDataLexer.NextToken(); + (void)funcDataLexer.NextToken(); } - delete funcDatatMp; return true; } /* lite-pgo keyword format ";${keyword}" */ static inline void ParseLitePgoKeyWord(MIRLexer &fdLexer, const std::string &keyWord) { /* parse counterSz */ - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_coma) { CHECK_FATAL_FALSE("expect coma here "); } - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_invalid) { CHECK_FATAL_FALSE("expect string after coma "); } @@ -87,18 +107,18 @@ static inline bool VerifyModuleHash(uint64 pgoId, const std::string &moduleName) */ void LiteProfile::ParseFuncProfile(MIRLexer &fdLexer, const std::string &moduleName) { /* parse func name */ - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_fname) { CHECK_FATAL_FALSE("expect function name for func"); } const std::string funcName = fdLexer.GetName(); /* parse funcid */ - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_funcid) { CHECK_FATAL_FALSE("expect funcid here"); } - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_intconst) { CHECK_FATAL_FALSE("expect integer after funcid "); } @@ -111,17 +131,17 @@ void LiteProfile::ParseFuncProfile(MIRLexer &fdLexer, const std::string &moduleN return; } - /* parse counterSz */ + // parse counterSz ParseLitePgoKeyWord(fdLexer, "counterSz"); - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_intconst) { CHECK_FATAL_FALSE("expect integer after counterSz "); } uint64 countersize = fdLexer.GetTheIntVal(); - /* parse cfghash */ + // parse cfghash ParseLitePgoKeyWord(fdLexer, "cfghash"); - fdLexer.NextToken(); + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_intconst) { CHECK_FATAL_FALSE("expect integer after counterSz "); } @@ -130,13 +150,13 @@ void LiteProfile::ParseFuncProfile(MIRLexer &fdLexer, const std::string &moduleN CHECK_FATAL_FALSE("unexpect cfg hash data type"); } - /* parse counters*/ - fdLexer.NextToken(); + // parse counters + (void)fdLexer.NextToken(); if (fdLexer.GetTokenKind() != TK_coma) { CHECK_FATAL_FALSE("expect coma here "); } - fdLexer.NextToken(); - if (countersize) { + (void)fdLexer.NextToken(); + if (countersize != 0) { ParseCounters(fdLexer, funcName, static_cast(cfghash)); } else { LogInfo::MapleLogger() << "LITEPGO log : func " << funcName << " --- no counters?" << '\n'; @@ -160,7 +180,7 @@ void LiteProfile::ParseCounters(MIRLexer &fdLexer, const std::string &funcName, BBInfo bbInfo(cfghash, std::move(temp)); funcBBProfData.emplace(funcName, bbInfo); } - fdLexer.NextToken(); + (void)fdLexer.NextToken(); } } @@ -170,7 +190,7 @@ bool LiteProfile::HandleLitePgoWhiteList(const std::string &fileName) const { MapleAllocator whitelistMa(whitelistMp); MIRLexer whiteListLexer(nullptr, whitelistMa); whiteListLexer.PrepareForFile(fileName); - whiteListLexer.NextToken(); + (void)whiteListLexer.NextToken(); bool atEof = false; while (!atEof) { TokenKind currentTokenKind = whiteListLexer.GetTokenKind(); @@ -180,7 +200,7 @@ bool LiteProfile::HandleLitePgoWhiteList(const std::string &fileName) const { } if (currentTokenKind == TK_invalid) { whiteList.emplace(whiteListLexer.GetName()); - whiteListLexer.NextToken(); + (void)whiteListLexer.NextToken(); } else { CHECK_FATAL(false, "unexpected format in instrumentation white list"); delete whitelistMp; @@ -195,5 +215,3 @@ std::set LiteProfile::whiteList = {}; uint32 LiteProfile::bbNoThreshold = 100000; } - - diff --git a/src/mapleall/maple_phase/include/maple_phase.h b/src/mapleall/maple_phase/include/maple_phase.h index 3a40125955c2bbd8e3d3e550e85d27684620b524..e4769b93add16a75bc521f0d9f4afdc9890b7800 100644 --- a/src/mapleall/maple_phase/include/maple_phase.h +++ b/src/mapleall/maple_phase/include/maple_phase.h @@ -15,6 +15,7 @@ #ifndef MAPLE_PHASE_INCLUDE_MAPLE_PHASE_H #define MAPLE_PHASE_INCLUDE_MAPLE_PHASE_H #include "maple_phase_support.h" + namespace maple { class MaplePhase; class AnalysisInfoHook; @@ -159,7 +160,7 @@ class PHASENAME : public MapleSccPhase { \ static MaplePhase *CreatePhase(MemPool *createMP) { \ return createMP->New(createMP); \ } \ - bool PhaseRun(IRTYPE &scc) override; + bool PhaseRun(IRTYPE &scc) override; // can not add parentheses for IRTYPE, it needs to take "&" #define MAPLE_SCC_PHASE_DECLARE_END \ }; @@ -211,10 +212,10 @@ static RegisterPhase MAPLEPHASE_##PHASENAME(#PHASENAME, false, false, #define GET_ANALYSIS(PHASENAME, PHASEKEY) \ static_cast( \ - GetAnalysisInfoHook()->FindAnalysisData((PHASEKEY).GetUniqueID(), this, &PHASENAME::id))->GetResult() + GetAnalysisInfoHook()->FindAnalysisData((PHASEKEY).GetUniqueID(), *this, &PHASENAME::id))->GetResult() #define EXEC_ANALYSIS(PHASENAME, PHASEKEY) \ -static_cast(GetAnalysisInfoHook()->FindAnalysisData((PHASEKEY).GetUniqueID(), this, &PHASENAME::id)) +static_cast(GetAnalysisInfoHook()->FindAnalysisData((PHASEKEY).GetUniqueID(), *this, (&PHASENAME::id))) #define FORCE_GET(PHASENAME) \ static_cast( \ diff --git a/src/mapleall/maple_phase/include/maple_phase_manager.h b/src/mapleall/maple_phase/include/maple_phase_manager.h index 6cdc8d9545ab63cb9fd8da73b0309afd7a2f740c..e6e6af0e1fb8a96b9ca305a4e7e9f803d550d781 100644 --- a/src/mapleall/maple_phase/include/maple_phase_manager.h +++ b/src/mapleall/maple_phase/include/maple_phase_manager.h @@ -14,6 +14,7 @@ */ #ifndef MAPLE_PHASE_INCLUDE_MAPLE_PHASE_MANAGER_H #define MAPLE_PHASE_INCLUDE_MAPLE_PHASE_MANAGER_H +#include #include "maple_phase.h" #include "me_option.h" #include "call_graph.h" @@ -122,14 +123,14 @@ class MaplePhaseManager { void InitTimeHandler(uint32 threadNum = 1) { phaseTh = GetManagerMemPool()->New(*GetManagerMemPool(), threadNum); } - void DumpPhaseTime(); + void DumpPhaseTime() const; /* threadMP is given by thread local mempool */ AnalysisDataManager *ApplyAnalysisDataManager(const std::thread::id threadID, MemPool &threadMP); AnalysisDataManager *GetAnalysisDataManager(const std::thread::id threadID, MemPool &threadMP); /* mempool */ - std::unique_ptr AllocateMemPoolInPhaseManager(const std::string &mempoolName); + std::unique_ptr AllocateMemPoolInPhaseManager(const std::string &mempoolName) const; bool UseGlobalMpCtrler() const { return useGlobalMpCtrler; } @@ -182,13 +183,13 @@ class AnalysisInfoHook { return adManager.GetVaildAnalysisPhase(phaseKey, id); } - MaplePhase *FindAnalysisData(uint32 phaseKey, const MaplePhase *p, MaplePhaseID id) { - const auto anaPhaseInfoIt = analysisPhasesData.find(AnalysisMemKey(phaseKey, id)); + MaplePhase *FindAnalysisData(uint32 phaseKey, const MaplePhase &p, MaplePhaseID id) { + const auto anaPhaseInfoIt = std::as_const(analysisPhasesData).find(AnalysisMemKey(phaseKey, id)); if (anaPhaseInfoIt != analysisPhasesData.end()) { return anaPhaseInfoIt->second; } else { /* fill all required analysis phase at first time */ - AnalysisDep *anaDependence = bindingPM->FindAnalysisDep(*p); + AnalysisDep *anaDependence = bindingPM->FindAnalysisDep(p); for (auto requiredAnaPhase : anaDependence->GetRequiredPhase()) { const MaplePhaseInfo *requiredPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(requiredAnaPhase); @@ -208,12 +209,12 @@ class AnalysisInfoHook { MaplePhase *FindOverIRAnalyisData(const IRUnit &u) const { MaplePhase *it = dynamic_cast(bindingPM); ASSERT(it != nullptr, "find Over IR info failed"); - return it->GetAnalysisInfoHook()->FindAnalysisData(u.GetUniqueID(), it, &AIMPHASE::id); + return it->GetAnalysisInfoHook()->FindAnalysisData(u.GetUniqueID(), *it, &AIMPHASE::id); } /* Get analysis data directly which is at higher IR level */ template - MaplePhase *GetOverIRAnalyisData(const IRUnit &u) { + MaplePhase *GetOverIRAnalyisData(const IRUnit &u) const { MaplePhase *it = dynamic_cast(bindingPM); ASSERT(it != nullptr, "find Over IR info failed"); return it->GetAnalysisInfoHook()->GetAnalysisData(u.GetUniqueID(), &AIMPHASE::id); @@ -233,7 +234,7 @@ class AnalysisInfoHook { upperPhase = dynamic_cast(upperHook->bindingPM); upperHook = upperPhase->GetAnalysisInfoHook(); } - return curHook->FindAnalysisData(u.GetUniqueID(), curPhase, &AIMPHASE::id); + return curHook->FindAnalysisData(u.GetUniqueID(), *curPhase, &AIMPHASE::id); } MemPool *GetOverIRMempool() const { @@ -284,21 +285,21 @@ class AnalysisInfoHook { class ModulePM : public MaplePhase, public MaplePhaseManager { public: ModulePM(MemPool *mp, MaplePhaseID id) : MaplePhase(kModulePM, id, *mp), MaplePhaseManager(*mp) {} - virtual ~ModulePM() = default; + ~ModulePM() override = default; }; /* manages (function phases) & (loop/region phase managers) */ class FunctionPM : public MapleModulePhase, public MaplePhaseManager { public: FunctionPM(MemPool *mp, MaplePhaseID id) : MapleModulePhase(id, mp), MaplePhaseManager(*mp) {} - virtual ~FunctionPM() = default; + ~FunctionPM() override = default; }; /* manages (scc phases) */ class SccPM : public MapleModulePhase, public MaplePhaseManager { public: SccPM(MemPool *mp, MaplePhaseID id) : MapleModulePhase(id, mp), MaplePhaseManager(*mp) {} - virtual ~SccPM() = default; + ~SccPM() override = default; }; /* manages (function phases in function phase) */ @@ -306,7 +307,7 @@ template class FunctionPhaseGroup : public MapleFunctionPhase, public MaplePhaseManager { public: FunctionPhaseGroup(MemPool *mp, MaplePhaseID id) : MapleFunctionPhase(&id, mp), MaplePhaseManager(*mp) {} - virtual ~FunctionPhaseGroup() = default; + ~FunctionPhaseGroup() override = default; }; } #endif // MAPLE_PHASE_MANAGER_H diff --git a/src/mapleall/maple_phase/include/phase_driver.h b/src/mapleall/maple_phase/include/phase_driver.h index 40d65fde58fcb869ae6dab291107bfd33255eb19..a70abaa8ee0f942f8bcb70a56bc7cb01afdcedba 100644 --- a/src/mapleall/maple_phase/include/phase_driver.h +++ b/src/mapleall/maple_phase/include/phase_driver.h @@ -22,7 +22,7 @@ namespace maple { class PhaseDriverImpl : public MplTaskParam { public: PhaseDriverImpl() = default; - virtual ~PhaseDriverImpl() = default; + ~PhaseDriverImpl() override = default; virtual void GlobalInit() {} @@ -39,7 +39,10 @@ class PhaseDriver : public MplScheduler { public: explicit Task(void *currTarget, void *currParamEx = nullptr) : target(currTarget), paramException(currParamEx) {} - ~Task() = default; + ~Task() override { + paramException = nullptr; + target = nullptr; + } protected: int RunImpl(MplTaskParam *param) override { @@ -58,7 +61,10 @@ class PhaseDriver : public MplScheduler { }; explicit PhaseDriver(const std::string &phaseName); - virtual ~PhaseDriver() = default; + ~PhaseDriver() override { + module = nullptr; + phaseImpl = nullptr; + } void RunAll(MIRModule *currModule, int thread, bool bSeq = false); virtual void RunSerial(); diff --git a/src/mapleall/maple_phase/include/phase_impl.h b/src/mapleall/maple_phase/include/phase_impl.h index ceb6e229fa3f07bddf2eac5730020d2edbaaf101..1742e4e75ec377103b4097181cf828fe94635de3 100644 --- a/src/mapleall/maple_phase/include/phase_impl.h +++ b/src/mapleall/maple_phase/include/phase_impl.h @@ -23,7 +23,7 @@ namespace maple { class FuncOptimizeImpl : public MplTaskParam { public: explicit FuncOptimizeImpl(MIRModule &mod, KlassHierarchy *kh = nullptr, bool currTrace = false); - ~FuncOptimizeImpl(); + ~FuncOptimizeImpl() override; // Each phase needs to implement its own Clone virtual FuncOptimizeImpl *Clone() = 0; MIRModule &GetMIRModule() { @@ -75,7 +75,9 @@ class FuncOptimizeIterator : public MplScheduler { public: explicit Task(MIRFunction &func) : function(&func) {} - ~Task() = default; + ~Task() override { + function = nullptr; + } protected: int RunImpl(MplTaskParam *param) override { @@ -93,7 +95,7 @@ class FuncOptimizeIterator : public MplScheduler { }; FuncOptimizeIterator(const std::string &phaseName, std::unique_ptr phaseImpl); - ~FuncOptimizeIterator(); + ~FuncOptimizeIterator() override; virtual void Run(uint32 threadNum = 1, bool isSeq = false); protected: diff --git a/src/mapleall/maple_phase/include/phases.def b/src/mapleall/maple_phase/include/phases.def index cf5ef10ad2285c642c76e49a3050e5dbdd57a8fa..524bcf0f699d52f4c9a6c9335386f323fcb6fc4f 100644 --- a/src/mapleall/maple_phase/include/phases.def +++ b/src/mapleall/maple_phase/include/phases.def @@ -13,7 +13,6 @@ ADDMODULEPHASE("funcdeleter", CLANG && (!Options::profileUse && !Options::profil // run callgraph again to delete fully inlined static function ADDMODULEPHASE("callgraph", Options::O2 && Options::useInline && !Options::profileUse) ADDMODULEPHASE("simplify", (Options::O2 && !Options::genLMBC) || (CLANG && Options::profileGen)) -ADDMODULEPHASE("Expand128Floats", Options::O2) ADDMODULEPHASE("ConstantFold", Options::O2) // ipa phase manager ADDMODULEPHASE("IpaSccPM", CLANG && (Options::O2 || Options::profileGen || Options::profileUse)) @@ -24,7 +23,7 @@ ADDMODULEPHASE("ginline", CLANG && Options::O2 && Options::useInline && !Options // run callgraph, simplify, constantfold again after ginline ADDMODULEPHASE("callgraph", CLANG && Options::O2 && Options::useInline && Options::enableGInline) ADDMODULEPHASE("simplify", CLANG && Options::O2 && !Options::genLMBC && Options::enableGInline) -ADDMODULEPHASE("Expand128Floats", Options::O2) +ADDMODULEPHASE("Expand128Floats", CLANG) ADDMODULEPHASE("ConstantFold", CLANG && Options::O2 && Options::enableGInline) ADDMODULEPHASE("inline", CLANG && (Options::O2 && Options::useInline && Options::profileUse)) @@ -41,6 +40,7 @@ ADDMODULEPHASE("VtableImpl", JAVALANG) ADDMODULEPHASE("CodeReLayout", MeOption::optLevel == 2 && JAVALANG) ADDMODULEPHASE("javaehlower", JAVALANG) ADDMODULEPHASE("MUIDReplacement", JAVALANG) +ADDMODULEPHASE("Expand128Floats", true) ADDMODULEPHASE("ConstantFold", JAVALANG || Options::O2) #endif @@ -75,6 +75,7 @@ ADDMAPLEMEPHASE("autovec", CLANG && MeOption::optLevel >= 3 && MeOption::enableL ADDMAPLEMEPHASE("lfounroll", CLANG && MeOption::optLevel >= 2 && MeOption::enableLFO && MeOption::boundaryCheckMode == SafetyCheckMode::kNoCheck) ADDMAPLEMEPHASE("mecfgbuild", MeOption::optLevel >= 2 || JAVALANG) +ADDMAPLEMEPHASE("sra", MeOption::optLevel >= 2 && CLANG) ADDMAPLEMEPHASE("optimizeCFGNoSSA", CLANG && MeOption::optLevel >= 2) ADDMAPLEMEPHASE("codefactoring", CLANG && MeOption::optForSize) ADDMAPLEMEPHASE("bypatheh", JAVALANG && MeOption::optLevel >= 2) @@ -116,6 +117,7 @@ ADDMAPLEMEPHASE("safetyWarning", CLANG && MeOption::optLevel >= 2 && ADDMAPLEMEPHASE("sink", MeOption::optLevel >= 2) ADDMAPLEMEPHASE("copyprop", MeOption::optLevel >= 2 && !JAVALANG) ADDMAPLEMEPHASE("hdse", MeOption::optLevel >= 2 && !JAVALANG) +ADDMAPLEMEPHASE("tailcall", MeOption::optLevel >= 2 && !JAVALANG && Options::tailcall) ADDMAPLEMEPHASE("pregrename", MeOption::optLevel >= 2) ADDMAPLEMEPHASE("bblayout", MeOption::optLevel >= 2 || JAVALANG) ADDMAPLEMEPHASE("meemit", MeOption::optLevel >= 2 || JAVALANG) diff --git a/src/mapleall/maple_phase/src/maple_phase_manager.cpp b/src/mapleall/maple_phase/src/maple_phase_manager.cpp index a8010008af8ce1772f540642b04425ddcbae5326..c0471443d7e0168e184208ccac7e06a988f9dc57 100644 --- a/src/mapleall/maple_phase/src/maple_phase_manager.cpp +++ b/src/mapleall/maple_phase/src/maple_phase_manager.cpp @@ -13,6 +13,7 @@ * See the Mulan PSL v2 for more details. */ #include "maple_phase_manager.h" +#include #include "cgfunc.h" #include "mpl_timer.h" #include "me_function.h" @@ -43,8 +44,8 @@ void AnalysisDataManager::AddAnalysisPhase(uint32 phaseKey, MaplePhase *p) { // This is for the actully phase void AnalysisDataManager::EraseAnalysisPhase(uint32 phaseKey, MaplePhaseID pid) { auto it = analysisPhaseMemPool.find(AnalysisMemKey(phaseKey, pid)); - const auto itanother = availableAnalysisPhases.find(AnalysisMemKey(phaseKey, pid)); - if (it != analysisPhaseMemPool.cend() && itanother != availableAnalysisPhases.cend()) { + const auto &itanother = std::as_const(availableAnalysisPhases).find(AnalysisMemKey(phaseKey, pid)); + if (it != analysisPhaseMemPool.end() && itanother != availableAnalysisPhases.cend()) { auto resultanother = availableAnalysisPhases.erase(AnalysisMemKey(phaseKey, pid)); CHECK_FATAL(resultanother > 0, "Release Failed"); delete it->second; @@ -82,38 +83,39 @@ void AnalysisDataManager::EraseAnalysisPhase(MapleMapfirst.first == phaseKey) { - EraseAnalysisPhase(it); - } else { - ++it; - } + if (aDep.GetPreservedAll()) { + if (aDep.GetPreservedExceptPhase().empty()) { + return; + } + for (auto exceptPhaseID : aDep.GetPreservedExceptPhase()) { + auto it = availableAnalysisPhases.find(AnalysisMemKey(phaseKey, exceptPhaseID)); + if (it != availableAnalysisPhases.end()) { + EraseAnalysisPhase(it); } } + return; + } + // delete phases which are not preserved + if (aDep.GetPreservedPhase().empty()) { for (auto it = availableAnalysisPhases.begin(); it != availableAnalysisPhases.end();) { - if (!aDep.FindIsPreserved((it->first).second) && it->first.first == phaseKey) { + if (it->first.first == phaseKey) { EraseAnalysisPhase(it); } else { ++it; } } - } else { - if (!aDep.GetPreservedExceptPhase().empty()) { - for (auto exceptPhaseID : aDep.GetPreservedExceptPhase()) { - auto it = availableAnalysisPhases.find(AnalysisMemKey(phaseKey, exceptPhaseID)); - if (it != availableAnalysisPhases.end()) { - EraseAnalysisPhase(it); - } - } + } + for (auto it = availableAnalysisPhases.begin(); it != availableAnalysisPhases.end();) { + if (!aDep.FindIsPreserved((it->first).second) && it->first.first == phaseKey) { + EraseAnalysisPhase(it); + } else { + ++it; } } } MaplePhase *AnalysisDataManager::GetVaildAnalysisPhase(uint32 phaseKey, MaplePhaseID pid) { - const auto it = availableAnalysisPhases.find(AnalysisMemKey(phaseKey, pid)); + const auto it = std::as_const(availableAnalysisPhases).find(AnalysisMemKey(phaseKey, pid)); if (it == availableAnalysisPhases.cend()) { LogInfo::MapleLogger() << "Required " << MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(pid)->PhaseName() << " running before \n"; @@ -125,7 +127,7 @@ MaplePhase *AnalysisDataManager::GetVaildAnalysisPhase(uint32 phaseKey, MaplePha } bool AnalysisDataManager::IsAnalysisPhaseAvailable(uint32 phaseKey, MaplePhaseID pid) { - const auto it = availableAnalysisPhases.find(AnalysisMemKey(phaseKey, pid)); + const auto it = std::as_const(availableAnalysisPhases).find(AnalysisMemKey(phaseKey, pid)); return it != availableAnalysisPhases.cend(); } @@ -169,7 +171,7 @@ void MaplePhaseManager::AddPhase(const std::string &phaseName, bool condition) { } } -void MaplePhaseManager::DumpPhaseTime() { +void MaplePhaseManager::DumpPhaseTime() const { if (phaseTh != nullptr) { phaseTh->DumpPhasesTime(); phaseTh->Clear(); @@ -211,7 +213,7 @@ void MaplePhaseManager::SolveSkipAfter(const std::string &phaseName, size_t &i) AnalysisDep *MaplePhaseManager::FindAnalysisDep(const MaplePhase &phase) { AnalysisDep *anDependence = nullptr; - const auto anDepIt = analysisDepMap.find(phase.GetPhaseID()); + const auto anDepIt = std::as_const(analysisDepMap).find(phase.GetPhaseID()); if (anDepIt != analysisDepMap.cend()) { anDependence = anDepIt->second; } else { @@ -242,7 +244,8 @@ AnalysisDataManager *MaplePhaseManager::GetAnalysisDataManager(const std::thread } } -std::unique_ptr MaplePhaseManager::AllocateMemPoolInPhaseManager(const std::string &mempoolName) { +std::unique_ptr MaplePhaseManager::AllocateMemPoolInPhaseManager( + const std::string &mempoolName) const { if (!UseGlobalMpCtrler()) { LogInfo::MapleLogger() << " Inner Ctrler has not been supported yet \n"; } diff --git a/src/mapleall/maple_util/BUILD.gn b/src/mapleall/maple_util/BUILD.gn index 71233a417a3a9038fdc179d6e8a8196fb7c2607e..7c8d44925e6876a771da24192a3276e9897505bb 100755 --- a/src/mapleall/maple_util/BUILD.gn +++ b/src/mapleall/maple_util/BUILD.gn @@ -27,6 +27,7 @@ src_libmplutil = [ "src/error_code.cpp", "src/thread_env.cpp", "src/mpl_int_val.cpp", + "src/chain_layout.cpp", "src/mpl_profdata.cpp", "src/suffix_array.cpp", "src/mpl_posix_sighandler.cpp" @@ -42,6 +43,11 @@ include_libmplutil = [ "${MAPLEALL_ROOT}/maple_util/include", "${MAPLEALL_ROOT}/maple_pgo/include", "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/be", "${MAPLEALL_ROOT}/mempool/include", "${MAPLEALL_ROOT}/maple_driver/include", "${THIRD_PARTY_ROOT}/bounds_checking_function/include", diff --git a/src/mapleall/maple_util/CMakeLists.txt b/src/mapleall/maple_util/CMakeLists.txt index d76164a6d39f7da95ab690ef89daa71dc81b5d9f..c2f905add0bdb475b0196525c5021374fc07335a 100644 --- a/src/mapleall/maple_util/CMakeLists.txt +++ b/src/mapleall/maple_util/CMakeLists.txt @@ -28,6 +28,7 @@ set(src_libmplutil src/error_code.cpp src/thread_env.cpp src/mpl_int_val.cpp + src/chain_layout.cpp src/mpl_profdata.cpp src/suffix_array.cpp src/mpl_posix_sighandler.cpp @@ -44,6 +45,11 @@ add_library(libmplutil STATIC ${src_libmplutil}) target_include_directories(libmplutil PRIVATE ${MAPLEALL_ROOT}/maple_util/include ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/maple_phase/include + ${MAPLEALL_ROOT}/maple_be/include/cg + ${MAPLEALL_ROOT}/maple_be/include/be ${MAPLEALL_ROOT}/mempool/include ${MAPLEALL_ROOT}/maple_driver/include ${MAPLEALL_ROOT}/maple_pgo/include diff --git a/src/mapleall/maple_util/include/base_graph_node.h b/src/mapleall/maple_util/include/base_graph_node.h index ae168a48a5909ff7b3af15eaf5fcb910fb3b080f..708da07dfb234d5e28b9351c152868300aba18bc 100644 --- a/src/mapleall/maple_util/include/base_graph_node.h +++ b/src/mapleall/maple_util/include/base_graph_node.h @@ -27,6 +27,10 @@ class BaseGraphNode { virtual void GetInNodes(std::vector &outNodes) const = 0; virtual const std::string GetIdentity() = 0; + virtual FreqType GetNodeFrequency() const { return 0; } + virtual FreqType GetEdgeFrequency(const BaseGraphNode &node) const { return 0; } + virtual FreqType GetEdgeFrequency(size_t idx) const { return 0; } + uint32 GetID() const { return id; } diff --git a/src/mapleall/maple_util/include/chain_layout.h b/src/mapleall/maple_util/include/chain_layout.h new file mode 100644 index 0000000000000000000000000000000000000000..f4af251597b0a6b7433e77986fffffb650ab4afb --- /dev/null +++ b/src/mapleall/maple_util/include/chain_layout.h @@ -0,0 +1,868 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_UTIL_INCLUDE_CHAIN_LAYOUT_H +#define MAPLE_UTIL_INCLUDE_CHAIN_LAYOUT_H +#include "cg_dominance.h" +#include "me_function.h" +#include "me_loop_analysis.h" +#include "mpl_number.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "types_def.h" +#include "base_graph_node.h" +#include "cgbb.h" +#include "cgfunc.h" +#include "loop.h" + +namespace maple { +using NodeType = BaseGraphNode; + +// Data structure for loop +class LoopWrapperBase { + public: + virtual ~LoopWrapperBase() = default; + virtual NodeType *GetHeader() = 0; + virtual void GetLoopMembers(std::vector &nodeIds) const = 0; + virtual uint32 GetLoopDepth() const = 0; +}; + +class MeLoopWrapper : public LoopWrapperBase { + public: + explicit MeLoopWrapper(LoopDesc &meLoop) : loop(meLoop) {} + + ~MeLoopWrapper() override = default; + + NodeType *GetHeader() override { + return loop.head; + } + + void GetLoopMembers(std::vector &nodeIds) const override { + const auto &loopBBs = loop.loopBBs; + nodeIds.resize(loopBBs.size(), 0); + size_t i = 0; + for (const auto &bbId : loopBBs) { + nodeIds[i] = bbId.GetIdx(); + ++i; + } + } + + uint32 GetLoopDepth() const override { + return loop.nestDepth; + } + + private: + LoopDesc &loop; +}; + +class CGLoopWrapper : public LoopWrapperBase { + public: + explicit CGLoopWrapper(maplebe::CGFuncLoops &cgLoop) : loop(cgLoop) {} + + ~CGLoopWrapper() override = default; + + NodeType *GetHeader() override { + return loop.GetHeader(); + } + + void GetLoopMembers(std::vector &nodeIds) const override { + const auto &loopBBs = loop.GetLoopMembers(); + nodeIds.resize(loopBBs.size(), 0); + size_t i = 0; + for (const auto *bb : loopBBs) { + nodeIds[i] = bb->GetID(); + ++i; + } + } + + uint32 GetLoopDepth() const override { + return loop.GetLoopLevel(); + } + + private: + maplebe::CGFuncLoops &loop; +}; + +class NodeIterBase { + public: + using value_type = NodeType*; + using pointer = value_type*; + using reference = value_type&; + using size_type = size_t; + using different_type = ptrdiff_t; + using iterator_category = std::forward_iterator_tag; + using self = NodeIterBase; + + virtual ~NodeIterBase() = default; + + virtual value_type operator*() const = 0; + virtual self &operator++() = 0; + virtual bool operator==(const self &rhs) const = 0; + virtual bool operator!=(const self &rhs) const = 0; +}; + +class MeBBIter : public NodeIterBase { + public: + explicit MeBBIter(pointer pt) : nodePtr(pt) {} + ~MeBBIter() override { + nodePtr = nullptr; + } + + value_type operator*() const override { + return *nodePtr; + } + + self &operator++() override { + nodePtr += 1; + return *this; + } + + bool operator==(const self &rhs) const override { + return nodePtr == static_cast(rhs).nodePtr; + } + + bool operator!=(const self &rhs) const override { + return !(*this == rhs); + } + + private: + pointer nodePtr = nullptr; +}; + +class CGBBIter : public NodeIterBase { + public: + explicit CGBBIter(value_type val) : node(val) {} + + ~CGBBIter() override = default; + + value_type operator*() const override { + return node; + } + + self &operator++() override { + CHECK_FATAL(node != nullptr, "current iterator is invalid"); + auto *cgBB = static_cast(node); + node = cgBB->GetNext(); + return *this; + } + + bool operator==(const self &rhs) const override { + return node == static_cast(rhs).node; + } + + bool operator!=(const self &rhs) const override { + return !(*this == rhs); + } + + private: + value_type node = nullptr; +}; + +// Data structure for wrapper of meFunc and cgFunc +class FuncWrapperBase { + public: + using iterator = NodeIterBase; + + virtual ~FuncWrapperBase() = default; + + // member functions for container + virtual size_t size() const = 0; + virtual bool empty() const = 0; + virtual iterator &begin() = 0; + virtual iterator &end() = 0; + + virtual const std::string &GetName() const = 0; + virtual NodeType *GetNodeById(uint32 id) = 0; + virtual NodeType *GetCommonEntryNode() = 0; + virtual NodeType *GetCommonExitNode() = 0; + virtual NodeType *GetLayoutStartNode() = 0; + virtual bool IsNodeInCFG(const NodeType *node) const = 0; + + bool IsMeFunc() const { + return isMeFunc; + } + + protected: + FuncWrapperBase(bool isMeFunction, MemPool &mp) : isMeFunc(isMeFunction), memPool(mp) {} + const bool isMeFunc; + MemPool &memPool; +}; + +inline NodeType **CastPointer(BB **ppBB) { + union { + NodeType **ppNode; + BB **ppBB; + } tmp; + tmp.ppBB = ppBB; + return tmp.ppNode; +} + +inline NodeType **CastPointer(maplebe::BB **ppBB) { + union { + NodeType **ppNode; + maplebe::BB **ppBB; + } tmp; + tmp.ppBB = ppBB; + return tmp.ppNode; +} + +class MeFuncWrapper : public FuncWrapperBase { + public: + MeFuncWrapper(MeFunction &meFunc, MemPool &mp) : FuncWrapperBase(true, mp), func(meFunc) {} + + ~MeFuncWrapper() override = default; + MeFunction &GetFunc() { + return func; + } + + const std::string &GetName() const override { + return func.GetName(); + } + + NodeType *GetNodeById(uint32 id) override { + return func.GetCfg()->GetBBFromID(BBId(id)); + } + + NodeType *GetCommonEntryNode() override { + return func.GetCfg()->GetCommonEntryBB(); + } + + NodeType *GetCommonExitNode() override { + return func.GetCfg()->GetCommonExitBB(); + } + + NodeType *GetLayoutStartNode() override { + return func.GetCfg()->GetCommonEntryBB(); + } + + bool IsNodeInCFG(const NodeType *node) const override { + if (node == nullptr) { + return false; + } + // Exclude common exit meBB + if (node == func.GetCfg()->GetCommonExitBB()) { + return false; + } + return true; + } + + size_t size() const override { + return func.GetCfg()->size(); + } + + bool empty() const override { + return size() == 0; + } + + iterator &begin() override { + BB **storageStart = func.GetCfg()->GetAllBBs().data(); + NodeType **start = CastPointer(storageStart); + NodeIterBase &iter = *memPool.New(start); + return iter; + } + + iterator &end() override { + BB **storageStart = func.GetCfg()->GetAllBBs().data(); + BB **storageFinish = storageStart + size(); + NodeType **finish = CastPointer(storageFinish); + NodeIterBase &iter = *memPool.New(finish); + return iter; + } + + private: + MeFunction &func; +}; + +class CGFuncWrapper : public FuncWrapperBase { + public: + CGFuncWrapper(maplebe::CGFunc &cgFunc, MemPool &mp) : FuncWrapperBase(false, mp), func(cgFunc) {} + + ~CGFuncWrapper() override = default; + + maplebe::CGFunc &GetFunc() { + return func; + } + + const std::string &GetName() const override { + return func.GetName(); + } + + NodeType *GetNodeById(uint32 id) override { + return func.GetBBFromID(id); + } + + NodeType *GetCommonEntryNode() override { + return func.GetCommonEntryBB(); + } + + NodeType *GetCommonExitNode() override { + return func.GetCommonExitBB(); + } + + NodeType *GetLayoutStartNode() override { + return func.GetFirstBB(); + } + + bool IsNodeInCFG(const NodeType *node) const override { + if (node == nullptr) { + return false; + } + // Exclude common entry cgBB, common exit cgBB and unreachable cgBB + const auto *cgBB = static_cast(node); + if (node == func.GetCommonEntryBB() || node == func.GetCommonExitBB() || cgBB->IsUnreachable()) { + return false; + } + if (func.IsExitBB(*cgBB)) { + if (cgBB->GetPrev() && cgBB->GetPrev()->GetKind() == maplebe::BB::kBBGoto && + cgBB->GetPreds().empty() && cgBB->GetSuccs().empty()) { + return false; + } + } + return true; + } + + size_t size() const override { + return func.GetAllBBs().size(); + } + + bool empty() const override { + return size() == 0; + } + + iterator &begin() override { + auto *firstBB = func.GetFirstBB(); + NodeIterBase &iter = *memPool.New(firstBB); + return iter; + } + + iterator &end() override { + NodeIterBase &iter = *memPool.New(nullptr); + return iter; + } + + private: + maplebe::CGFunc &func; +}; + +class DomWrapperBase { + using NodePtrHolder = MapleVector; + protected: + using NodeId = Dominance::NodeId; + public: + using value_type = NodePtrHolder::value_type; + using size_type = NodePtrHolder::size_type; + using difference_type = NodePtrHolder::difference_type; + using pointer = NodePtrHolder::pointer; + using const_pointer = NodePtrHolder::const_pointer; + using reference = NodePtrHolder::reference; + using const_reference = NodePtrHolder::const_reference; + using iterator = NodePtrHolder::iterator; + using const_iterator = NodePtrHolder::const_iterator; + using reverse_iterator = NodePtrHolder::reverse_iterator; + using const_reverse_iterator = NodePtrHolder::const_reverse_iterator; + + virtual ~DomWrapperBase() = default; + + virtual size_t rpo_size() const = 0; + virtual iterator rpo_begin() = 0; + virtual iterator rpo_end() = 0; + virtual MapleVector &GetDomChildren(size_t idx) = 0; +}; + +class MeDomWrapper : public DomWrapperBase { + public: + explicit MeDomWrapper(Dominance &meDom) : dom(meDom) {} + + ~MeDomWrapper() override = default; + + size_t rpo_size() const override { + return dom.GetReversePostOrder().size(); + } + + iterator rpo_begin() override { + return dom.GetReversePostOrder().begin(); + } + + iterator rpo_end() override { + return dom.GetReversePostOrder().end(); + } + + MapleVector &GetDomChildren(size_t idx) override { + return dom.GetDomChildren(idx); + } + + private: + Dominance &dom; +}; + +class CGDomWrapper : public DomWrapperBase { + public: + explicit CGDomWrapper(maplebe::DomAnalysis &cgDom) : dom(cgDom) {} + + ~CGDomWrapper() override = default; + + size_t rpo_size() const override { + return dom.GetReversePostOrder().size(); + } + + iterator rpo_begin() override { + maplebe::BB **storageStart = dom.GetReversePostOrder().data(); + NodeType **start = CastPointer(storageStart); + return iterator(start); + } + + iterator rpo_end() override { + maplebe::BB **storageStart = dom.GetReversePostOrder().data(); + maplebe::BB **storageFinish = storageStart + rpo_size(); + NodeType **finish = CastPointer(storageFinish); + return iterator(finish); + } + + MapleVector &GetDomChildren(size_t idx) override { + return dom.GetDomChildren(idx); + } + + private: + maplebe::DomAnalysis &dom; +}; + +// Temperature of a layout context +enum class NodeContextTemperature { + kAll, // Context contains both cold and non-cold BBs + kCold, // Context only contains cold BBs + kNonCold // Context only contains non-cold BBs +}; + +// Range of a layout context +enum class NodeContextKind { + kGlobal, // Context is the whole function range. `inNodes` is nullptr, `loop` is nullptr. + kLocalInLoop, // Context is a local range in a loop. `inNodes` is valid, `loop` is valid. + kLocalOutOfLoop // Context is a local range not in any loops. `inBBs` is valid, `loop` is nullptr. +}; + +enum class LayoutRangeKind: uint32 { + kRangeSucc = 0x01, + kRangeReadyList = 0x02, + kRangeFreqRpoList = 0x04, + kRangeRpotList = 0x08, + kRangeColdPath = 0x10, + kRangeAll = UINT32_MAX +}; + +class NodeChain; // circular dependency exists, no other choice +// A series of nodes to be laid out +class NodeContext { + public: + NodeContext(FuncWrapperBase &curFunc, MapleVector &node2chainParam, + MapleVector *inVec, LoopWrapperBase *curLoop, NodeContextTemperature temp) + : func(curFunc), node2chain(node2chainParam), inNodes(inVec), loop(curLoop), temperature(temp) { + if (inNodes == nullptr) { + kind = NodeContextKind::kGlobal; + } else if (loop == nullptr) { + kind = NodeContextKind::kLocalOutOfLoop; + } else { + kind = NodeContextKind::kLocalInLoop; + } + } + + void InitReadyChains(MapleSet &readyChain); + + NodeType *GetBestStartBB(const MapleSet &readyChains) const { + if (IsGlobal()) { + return func.GetLayoutStartNode(); + } + if (IsInLoop()) { + return GetBestStartBBInLoop(); + } else { + return GetBestStartBBOutOfLoop(readyChains); + } + } + + NodeContextTemperature GetTemperature() const { + return temperature; + } + + NodeContextKind GetKind() const { + return kind; + } + + const char *GetKindName() const { + if (kind == NodeContextKind::kGlobal) { + return "func"; + } + if (kind == NodeContextKind::kLocalInLoop) { + return "loop"; + } + if (kind == NodeContextKind::kLocalOutOfLoop) { + return "out-of-loop"; + } + CHECK_FATAL_FALSE("found unsupported layout context kind"); + } + + bool Contains(uint32 nodeId) const { + if (IsGlobal()) { + return true; + } + CHECK_FATAL(nodeId < inNodes->size(), "out of range"); + return (*inNodes)[nodeId]; + } + + bool Contains(const NodeType &bb) const { + return Contains(bb.GetID()); + } + + bool IsGlobal() const { + return kind == NodeContextKind::kGlobal; + } + + bool IsInLoop() const { + return kind == NodeContextKind::kLocalInLoop; + } + + private: + NodeType *GetBestStartBBInLoop() const; + NodeType *GetBestStartBBOutOfLoop(const MapleSet &readyChains) const; + + FuncWrapperBase &func; + MapleVector &node2chain; + // `inNodes`: a range of nodes in which the chain is built. A nullptr value means the whole function range + MapleVector *inNodes = nullptr; + // `loop` is the loop that context belongs to, if `loop` is nullptr, the context is not in a loop (may be a global + // context or a local context out of any loops). + LoopWrapperBase *loop = nullptr; + // See enum ContextTemperature for details + NodeContextTemperature temperature = NodeContextTemperature::kAll; + // See enum ContextKind for details + NodeContextKind kind = NodeContextKind::kGlobal; +}; + +class NodeChain { + public: + using iterator = MapleVector::iterator; + using reverse_iterator = MapleVector::reverse_iterator; + using const_iterator = MapleVector::const_iterator; + NodeChain(MapleAllocator &alloc, MapleVector &node2chainParam, NodeType &node, uint32 inputId) + : id(inputId), nodeVec(1, &node, alloc.Adapter()), node2chain(node2chainParam) { + node2chain[node.GetID()] = this; + } + + iterator begin() { + return nodeVec.begin(); + } + const_iterator begin() const { + return nodeVec.begin(); + } + iterator end() { + return nodeVec.end(); + } + const_iterator end() const { + return nodeVec.end(); + } + reverse_iterator rbegin() { + return nodeVec.rbegin(); + } + reverse_iterator rend() { + return nodeVec.rend(); + } + + bool empty() const { + return nodeVec.empty(); + } + + size_t size() const { + return nodeVec.size(); + } + + uint32 GetId() const { + return id; + } + + bool IsColdChain() const { + return isCold; + } + + void SetColdChain(bool cold) { + isCold = cold; + } + + NodeType *GetHeader() { + CHECK_FATAL(!nodeVec.empty(), "cannot get header from a empty bb chain"); + return nodeVec.front(); + } + + NodeType *GetTail() { + CHECK_FATAL(!nodeVec.empty(), "cannot get tail from a empty bb chain"); + return nodeVec.back(); + } + + bool Contains(const NodeType &node) const { + auto it = std::find(nodeVec.cbegin(), nodeVec.cend(), &node); + return it != nodeVec.cend(); + } + + // update unlaidPredCnt if needed. The chain is ready to layout only if unlaidPredCnt == 0 + bool IsReadyToLayout(const NodeContext &context) { + if (IsColdChain()) { + return false; // All cold chains are saved in `coldChains`, should not be added in ready chain list + } + MayRecalculateUnlaidPredCnt(context); + return (unlaidPredCnt == 0); + } + + // Merge src chain to this one + void MergeFrom(NodeChain *srcChain) { + CHECK_FATAL(this != srcChain, "merge same chain?"); + ASSERT_NOT_NULL(srcChain); + if (srcChain->empty()) { + return; + } + for (auto *node : *srcChain) { + nodeVec.push_back(node); + node2chain[node->GetID()] = this; + } + srcChain->nodeVec.clear(); + srcChain->unlaidPredCnt = 0; + srcChain->isCacheValid = false; + isCacheValid = false; // is this necessary? + } + + void UpdateSuccChainBeforeMerged(const NodeChain &destChain, const NodeContext &context, + MapleSet &readyToLayoutChains) { + for (auto *node : nodeVec) { + std::vector succVec; + node->GetOutNodes(succVec); + for (auto *succ : succVec) { + if (!context.Contains(*succ)) { + continue; + } + if (node2chain[succ->GetID()] == this || node2chain[succ->GetID()] == &destChain) { + continue; + } + NodeChain *succChain = node2chain[succ->GetID()]; + succChain->MayRecalculateUnlaidPredCnt(context); + if (succChain->unlaidPredCnt != 0) { + --succChain->unlaidPredCnt; + } + if (succChain->unlaidPredCnt == 0) { + readyToLayoutChains.insert(succChain); + } + } + } + } + + void Dump() const { + LogInfo::MapleLogger() << "bb chain with " << nodeVec.size() << " blocks: "; + for (size_t i = 0; i < nodeVec.size(); ++i) { + auto *node = nodeVec[i]; + LogInfo::MapleLogger() << node->GetID(); + if (i != nodeVec.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + if (isCold) { + LogInfo::MapleLogger() << " (cold chain)"; + } + LogInfo::MapleLogger() << std::endl; + } + + void DumpOneLine() const { + for (auto *node : nodeVec) { + LogInfo::MapleLogger() << node->GetID() << " "; + } + } + + private: + void MayRecalculateUnlaidPredCnt(const NodeContext &context) { + if (isCacheValid) { + return; // If cache is trustable, no need to recalculate + } + unlaidPredCnt = 0; + for (auto *node : nodeVec) { + std::vector predVec; + node->GetInNodes(predVec); + for (auto *pred : predVec) { + // exclude blocks out of context + if (!context.Contains(pred->GetID())) { + continue; + } + // exclude blocks within the same chain + if (node2chain[pred->GetID()] == this) { + continue; + } + ++unlaidPredCnt; + } + } + isCacheValid = true; + } + + uint32 id = 0; + uint32 unlaidPredCnt = 0; // how many predecessors are not laid out + MapleVector nodeVec; + MapleVector &node2chain; + bool isCacheValid = false; // whether unlaidPredCnt is trustable + bool isCold = false; // set true if the chain is within a loop and starts with a unlikely head +}; + +struct NodeOrderElem { + NodeOrderElem(FreqType freq, uint32 rpoIdx, NodeType *curNode) + : node(curNode), + frequency(freq), + reversePostOrderIdx(rpoIdx) {} + + // frequency first, then rpoIdx + bool operator<(const NodeOrderElem &rhs) const { + if (frequency == rhs.frequency) { + return reversePostOrderIdx < rhs.reversePostOrderIdx; + } else { + return frequency > rhs.frequency; + } + } + + NodeType *node; + FreqType frequency; + uint32 reversePostOrderIdx; +}; + +class ChainLayout { + public: + ChainLayout(MeFunction &meFunc, MemPool &memPool, bool enabledDebug, IdentifyLoops &identifyLoops, Dominance &meDom) + : ChainLayout(&meFunc, nullptr, memPool, enabledDebug, &identifyLoops, &meDom, nullptr) {} + + ChainLayout(maplebe::CGFunc &cgFunc, MemPool &memPool, bool enabledDebug, maplebe::DomAnalysis &cgDom) + : ChainLayout(nullptr, &cgFunc, memPool, enabledDebug, nullptr, nullptr, &cgDom) {} + + ~ChainLayout() = default; + + void BuildChainForFunc(); + + MapleVector &GetNode2Chain() { + return node2chain; + } + + void SetHasRealProfile(bool val) { + hasRealProfile = val; + } + + void SetConsiderBetterPred(bool val) { + considerBetterPred = val; + } + + private: + using NodePredicate = bool(*)(NodeType&); + ChainLayout(MeFunction *meFunc, maplebe::CGFunc *cgFunc, MemPool &memPool, bool enabledDebug, + IdentifyLoops *identifyLoops, Dominance *meDom, maplebe::DomAnalysis *cgDom) + : layoutAlloc(&memPool), + func(meFunc != nullptr ? static_cast(*layoutAlloc.New(*meFunc, memPool)) : + static_cast(*layoutAlloc.New(*cgFunc, memPool))), + node2chain(layoutAlloc.Adapter()), + loops(layoutAlloc.Adapter()), + coldChains(layoutAlloc.Adapter()), + readyToLayoutChains(layoutAlloc.Adapter()), + dom(meFunc != nullptr ? static_cast(*layoutAlloc.New(*meDom)) : + static_cast(*layoutAlloc.New(*cgDom))), + freqRpoNodeList(layoutAlloc.Adapter()), + debugChainLayout(enabledDebug), + meLayoutColdPath(MeOption::layoutColdPath), + cgLayoutColdPath(maplebe::CGOptions::DoLayoutColdPath()) { + if (func.IsMeFunc()) { + CHECK_NULL_FATAL(identifyLoops); + InitLoopsForME(*identifyLoops); + if (meLayoutColdPath) { + layoutColdPath = true; + } + } else { + CHECK_NULL_FATAL(cgFunc); + InitLoopsForCG(cgFunc->GetLoops()); + if (cgLayoutColdPath) { + layoutColdPath = true; + } + } + } + + void InitLoopsForME(IdentifyLoops &meLoops); + void InitLoopsForCG(MapleVector &cgLoops); + void InitChains(); + void InitColdNodes(); + void InitColdNodesForME(); + void InitColdNodesForCG(); + void InitFreqRpoNodeList(); + + bool IsColdNode(const NodeType &node) const { + return IsColdNode(node.GetID()); + } + + bool IsColdNode(uint32 nodeId) const { + if (coldNodes == nullptr) { + return false; // No cold nodes info, always return false. + } + CHECK_FATAL(nodeId < coldNodes->size(), "node id out of range"); + return (*coldNodes)[nodeId]; + } + + bool IsNodeInLoop(const NodeType &node) { + if (nodesInLoop != nullptr) { + return (*nodesInLoop)[node.GetID()]; + } + if (func.IsMeFunc()) { + return static_cast(node).GetAttributes(kBBAttrIsInLoop); + } + return false; + } + + void BuildChainForLoops(); + void BuildChainForLoop(LoopWrapperBase &loop, MapleVector &inBBs, NodeContextTemperature temperature); + void BuildChainForColdPathInFunc(); + NodeChain *BuildChainInContext(MapleVector *inBBs, LoopWrapperBase *loop, uint32 range, + NodeContextTemperature contextTemperature = NodeContextTemperature::kAll); + bool FindNodesToLayoutInLoop(const LoopWrapperBase &loop, NodeContextTemperature temperature, + MapleVector &inBBs); + void PostBuildChainForCGFunc(NodeChain &entryChain); + void DoBuildChain(const NodeType &header, NodeChain &chain, uint32 range); + + NodeType *GetBestSucc(NodeType &node, const NodeChain &chain, uint32 range, bool considerBetterPredForSucc); + NodeType *FindNextNodeInSucc(NodeType &node, bool considerBetterPredForSucc); + NodeType *FindNextNodeInReadyList(NodeType &node) const; + NodeType *FindNextNodeInRpotList(const NodeChain &chain); + NodeType *FindNextNodeInFreqRpotList(const NodeChain &chain) const; + void MayDumpSelectLog(const NodeType &curNode, const NodeType &nextNode, const std::string &hint); + void MayDumpFormedChain(const NodeChain &chain) const; + NodeChain *GetNextColdChain(const NodeChain &curChain); + bool IsCandidateSucc(const NodeType &node, const NodeType &succ) const; + bool HasBetterLayoutPred(const NodeType &node, NodeType &succ); + + MapleAllocator layoutAlloc; + FuncWrapperBase &func; + MapleVector node2chain; // mapping node id to the chain that the node belongs to + MapleVector loops; + NodeContext *layoutContext = nullptr; + MapleVector *coldNodes = nullptr; // to mark cold Nodes + MapleVector *nodesInLoop = nullptr; // only for cgBB, because meBB has kBBAttrIsInLoop + MapleList coldChains; // collect unlikely chain in loop + MapleSet readyToLayoutChains; + DomWrapperBase &dom; + MapleSet freqRpoNodeList; // frequency reverse post order node list + uint32 rpoSearchPos = 0; // reverse post order search beginning position + bool debugChainLayout = false; + bool hasRealProfile = false; + bool considerBetterPred = false; + bool hasColdNode = false; + bool hasColdNodeOutOfLoop = false; + // outline cold blocks such as unlikely or rarely executed blocks according to real profile. + const bool meLayoutColdPath = false; + const bool cgLayoutColdPath = false; + bool layoutColdPath = false; +}; +} // namespace maple +#endif // MAPLE_UTIL_INCLUDE_CHAIN_LAYOUT_H + diff --git a/src/mapleall/maple_util/include/cl_option.h b/src/mapleall/maple_util/include/cl_option.h index 35475e7decb71c359a9e545ebef057dbc0c6f6b0..a4829012c2febef6cc051bd02704897fb3cdef99 100644 --- a/src/mapleall/maple_util/include/cl_option.h +++ b/src/mapleall/maple_util/include/cl_option.h @@ -32,7 +32,7 @@ namespace maplecl { template -constexpr inline bool digitalCheck = (std::is_same_v || +constexpr inline bool kDigitalCheck = (std::is_same_v || std::is_same_v || std::is_same_v || std::is_same_v || @@ -72,12 +72,12 @@ enum class EqualType { }; /* These constexpr are needed to use short name in option description, like this: - * maplecl::Option option({"--option"}, "Description", optionalValue); + * maplecl::Option option({"--option"}, "Description", kOptionalValue); * instead of: * maplecl::Option option({"--option"}, "Description", ValueExpectedType::kValueOptional); */ -constexpr ValueExpectedType optionalValue = ValueExpectedType::kValueOptional; -constexpr ValueExpectedType requiredValue = ValueExpectedType::kValueRequired; +constexpr ValueExpectedType kOptionalValue = ValueExpectedType::kValueOptional; +constexpr ValueExpectedType kRequiredValue = ValueExpectedType::kValueRequired; constexpr ValueExpectedType disallowedValue = ValueExpectedType::kValueDisallowed; constexpr ValueJoinedType joinedValue = ValueJoinedType::kValueJoined; constexpr ValueJoinedType separatedValue = ValueJoinedType::kValueSeparated; @@ -163,6 +163,12 @@ class OptionInterface { return names[0]; } + // Used for option sorting. + std::string GetOptName() const { + assert(names.size() > 0); + return names[0]; + } + const std::string &GetDescription() const { return optDescription; } @@ -229,7 +235,7 @@ class Option : public OptionInterface { * strding test = option1; or int dig = option2 - here will be implicit conversation. */ /* implicit */ - operator T() { + operator T() const { return GetValue(); } @@ -237,7 +243,7 @@ class Option : public OptionInterface { KeyArg &keyArg) override { RetCode err = RetCode::noError; auto &key = args[argsIndex]; - if constexpr(digitalCheck) { + if constexpr(kDigitalCheck) { err = ParseDigit(argsIndex, args, keyArg); } else if constexpr(std::is_same_v) { err = ParseString(argsIndex, args, keyArg); @@ -259,7 +265,7 @@ class Option : public OptionInterface { if (defaultValue.isSet) { value = defaultValue.defaultValue; } else { - if constexpr(digitalCheck) { + if constexpr(kDigitalCheck) { value = 0; } else if constexpr(std::is_same_v) { value = ""; @@ -318,8 +324,8 @@ class Option : public OptionInterface { RetCode ParseString(size_t &argsIndex, const std::deque &args, KeyArg &keyArg); RetCode ParseBool(size_t &argsIndex, const std::deque &args); - void FillVal(const T &val, std::vector &vals) { - if constexpr(digitalCheck) { + void FillVal(const T &val, std::vector &vals) const { + if constexpr(kDigitalCheck) { (void)vals.emplace_back(std::to_string(val)); } else if constexpr (std::is_same_v) { (void)vals.emplace_back(val); diff --git a/src/mapleall/maple_util/include/cl_parser.h b/src/mapleall/maple_util/include/cl_parser.h index 320acad1520f1cb7423db6508ebdecef397b6c71..30cc37771b42dfa4991bdd7ee96b9209372ac9b8 100644 --- a/src/mapleall/maple_util/include/cl_parser.h +++ b/src/mapleall/maple_util/include/cl_parser.h @@ -66,7 +66,7 @@ struct OptionCategory { } void DeleteEnabledOption(OptionInterface *opt) { - if (enabledOptionsSet.find(opt) == enabledOptionsSet.end()) { + if (enabledOptionsSet.find(opt) != enabledOptionsSet.end()) { enabledOptionsSet.erase(enabledOptionsSet.find(opt)); enabledOptions.erase(std::find(enabledOptions.begin(), enabledOptions.end(), opt)); } @@ -119,12 +119,34 @@ class CommandLine { return BashCompletionPrinter(defaultCategory); } - void HelpPrinter(const OptionCategory &optCategory) const; - void HelpPrinter() const { + void HelpPrinter(OptionCategory &optCategory) const; + void HelpPrinter() { return HelpPrinter(defaultCategory); } + std::vector &GetLinkOptions() { + return linkOptions; + } + + bool GetUseLitePgoGen() { + return useLitePgoGen; + } + + void SetUseLitePgoGen(bool flag) { + useLitePgoGen = flag; + } + + bool GetHasPgoLib() { + return hasPgoLib; + } + + void SetHasPgoLib(bool flag) { + hasPgoLib = flag; + } + void CloseOptimize(const OptionCategory &optCategory) const; + void DeleteEnabledOptions(size_t &argsIndex, const std::deque &args, + const OptionCategory &optCategory) const; std::vector> badCLArgs; OptionCategory defaultCategory; @@ -144,20 +166,23 @@ class CommandLine { OptionCategory ipaCategory; OptionCategory unSupCategory; + std::vector linkOptions; private: - OptionInterface *CheckJoinedOption(KeyArg &keyArg, OptionCategory &optCategory) const; + bool useLitePgoGen = false; + bool hasPgoLib = false; + OptionInterface *CheckJoinedOption(KeyArg &keyArg, OptionCategory &optCategory); RetCode ParseJoinedOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, OptionCategory &optCategory); RetCode ParseOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, const OptionCategory &optCategory, - OptionInterface *opt); + OptionInterface &opt) const; RetCode ParseEqualOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, OptionCategory &optCategory, - const OptionsMapType &optMap, ssize_t pos); + const OptionsMapType &optMap, size_t pos); RetCode ParseSimpleOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, OptionCategory &optCategory, diff --git a/src/mapleall/maple_util/include/dominance.h b/src/mapleall/maple_util/include/dominance.h index 284e2ca252c2f73e219d3e40a7dc36ae3880f2b8..5a9f00e05c32202cee6f896742d23a4c9982c1fe 100644 --- a/src/mapleall/maple_util/include/dominance.h +++ b/src/mapleall/maple_util/include/dominance.h @@ -59,11 +59,11 @@ class Dominance { return nodeVec.at(i); } - BaseGraphNode &GetCommonEntryNode() const { + const BaseGraphNode &GetCommonEntryNode() const { return commonEntryNode; } - BaseGraphNode &GetCommonExitNode() const { + const BaseGraphNode &GetCommonExitNode() const { return commonExitNode; } @@ -216,7 +216,7 @@ class Dominance { void GenPostOrderID() { ASSERT(!nodeVec.empty(), "size to be allocated is 0"); std::vector visitedMap(nodeVec.size(), false); - int32 postOrderID = 0; + size_t postOrderID = 0; PostOrderWalk(commonEntryNode, postOrderID, visitedMap); // initialize reversePostOrder reversePostOrder.resize(postOrderID); @@ -226,7 +226,7 @@ class Dominance { if (postOrderNo == -1) { continue; } - reversePostOrder[maxPostOrderID - postOrderNo] = nodeVec[i]; + reversePostOrder[maxPostOrderID - static_cast(postOrderNo)] = nodeVec[i]; } } @@ -282,7 +282,7 @@ class Dominance { void ComputeDtDfn() { for (size_t i = 0; i < dtPreOrder.size(); ++i) { - dtDfn[dtPreOrder[i]] = i; + dtDfn[dtPreOrder[i]] = static_cast(i); } } @@ -348,7 +348,7 @@ class Dominance { return false; } - void PostOrderWalk(const BaseGraphNode &node, int32 &pid, std::vector &visitedMap) { + void PostOrderWalk(const BaseGraphNode &node, size_t &pid, std::vector &visitedMap) { auto nodeId = node.GetID(); ASSERT(nodeId < visitedMap.size(), "index out of range"); if (nodeVec[nodeId] == nullptr || visitedMap[nodeId]) { @@ -398,7 +398,7 @@ class Dominance { for (auto frontierNodeId : domFrontier[node.GetID()]) { (void)dfSet.insert(frontierNodeId); if (frontierNodeId < nodeIdMarker) { // union with its computed result - (void)dfSet.insert(iterDomFrontier[frontierNodeId].cbegin(), iterDomFrontier[frontierNodeId].cend()); + dfSet.insert(iterDomFrontier[frontierNodeId].cbegin(), iterDomFrontier[frontierNodeId].cend()); } else { // recursive call auto frontierNode = nodeVec[frontierNodeId]; if (frontierNode == nullptr) { diff --git a/src/mapleall/maple_util/include/factory.h b/src/mapleall/maple_util/include/factory.h index 5e7e74b3b577eb5c940dac59a6f92151429309fb..de199d7691c88c5632f797d1d046611eaea4b29e 100644 --- a/src/mapleall/maple_util/include/factory.h +++ b/src/mapleall/maple_util/include/factory.h @@ -15,7 +15,6 @@ #ifndef MAPLE_UTIL_INCLUDE_FACTORY_H #define MAPLE_UTIL_INCLUDE_FACTORY_H #include -#include #include #include #include "thread_env.h" diff --git a/src/mapleall/maple_util/include/file_utils.h b/src/mapleall/maple_util/include/file_utils.h index 3a657dda57b0d644f5ef4827dda395dbc84651e8..1a78f896704ec1c463c2e9cd0cdad3c6de1e39cc 100644 --- a/src/mapleall/maple_util/include/file_utils.h +++ b/src/mapleall/maple_util/include/file_utils.h @@ -15,8 +15,31 @@ #ifndef MAPLE_DRIVER_INCLUDE_FILE_UTILS_H #define MAPLE_DRIVER_INCLUDE_FILE_UTILS_H #include +#include "types_def.h" +#include "mpl_logging.h" namespace maple { + enum class InputFileType { + kFileTypeNone, + kFileTypeClass, + kFileTypeJar, + kFileTypeAst, + kFileTypeCpp, + kFileTypeC, + kFileTypeDex, + kFileTypeMpl, + kFileTypeVtableImplMpl, + kFileTypeS, + kFileTypeObj, + kFileTypeBpl, + kFileTypeMeMpl, + kFileTypeMbc, + kFileTypeLmbc, + kFileTypeH, + kFileTypeI, + kFileTypeOast, +}; + extern const std::string kFileSeperatorStr; extern const char kFileSeperatorChar; // Use char[] since getenv receives char* as parameter @@ -25,6 +48,7 @@ constexpr char kClangPath[] = "BiShengC_Clang_Path"; constexpr char kAsPath[] = "BiShengC_AS_Path"; constexpr char kGccPath[] = "BiShengC_GCC_Path"; constexpr char kLdLibPath[] = "LD_LIBRARY_PATH"; +constexpr char kGetOsVersion[] = "BiShengC_GET_OS_VERSION"; class FileUtils { public: @@ -46,18 +70,25 @@ class FileUtils { static bool IsFileExists(const std::string &filePath); static std::string AppendMapleRootIfNeeded(bool needRootPath, const std::string &path, const std::string &defaultRoot = "." + kFileSeperatorStr); + static InputFileType GetFileType(const std::string &filePath); + static InputFileType GetFileTypeByMagicNumber(const std::string &pathName); + const std::string &GetTmpFolder() const { return tmpFolderPath; }; static std::string GetOutPutDir(); - bool DelTmpDir(); - std::string GetTmpFolderPath(); + bool DelTmpDir() const; + std::string GetTmpFolderPath() const; private: std::string tmpFolderPath; FileUtils() : tmpFolderPath(GetTmpFolderPath()) {} ~FileUtils() { - DelTmpDir(); + if (!DelTmpDir()) { + maple::LogInfo::MapleLogger() << "DelTmpDir failed" << '\n'; + }; } + static const uint32 kMagicAST = 0x48435043; + static const uint32 kMagicELF = 0x464c457f; }; } // namespace maple #endif // MAPLE_DRIVER_INCLUDE_FILE_UTILS_H diff --git a/src/mapleall/maple_util/include/int128_util.h b/src/mapleall/maple_util/include/int128_util.h new file mode 100644 index 0000000000000000000000000000000000000000..6af3fee05e3e3c2470635deecd30352fbcbac434 --- /dev/null +++ b/src/mapleall/maple_util/include/int128_util.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_UTIL_INCLUDE_INT128 +#define MAPLE_UTIL_INCLUDE_INT128 + +#include "mpl_logging.h" +#include "securec.h" +#include "types_def.h" + +namespace maple { +using Int128ElemTy = uint64; +constexpr size_t kInt128ElemNum = 2; +constexpr size_t kInt128BitSize = 128; +using Int128Arr = Int128ElemTy[kInt128ElemNum]; + +class Int128Util { + public: + static inline void CopyInt128(Int128ElemTy *dst, const Int128ElemTy *src) { + constexpr size_t copySize = kInt128ElemNum * sizeof(Int128ElemTy); + errno_t err = memcpy_s(dst, copySize, src, copySize); + CHECK_FATAL(err == EOK, "memcpy_s failed"); + } +}; + +} // namespace maple + +#endif // MAPLE_UTIL_INCLUDE_INT128 diff --git a/src/mapleall/maple_util/include/literalstrname.h b/src/mapleall/maple_util/include/literalstrname.h index dbb5e7a3b6706bb013229e097fb1bb252a9e01c2..836de465d5a4cb68dc6e3a1bbb5effef8e10e967 100644 --- a/src/mapleall/maple_util/include/literalstrname.h +++ b/src/mapleall/maple_util/include/literalstrname.h @@ -21,7 +21,6 @@ const std::string kConstString = "_C_STR_"; const std::string kConstStringPtr = "_PTR_C_STR_"; const std::string kLocalStringPrefix = "L_STR_"; -constexpr int kConstStringLen = 7; class LiteralStrName { public: @@ -37,7 +36,8 @@ class LiteralStrName { } else { const char16_t *end = data + len; while (data < end) { - hash = (static_cast(hash) << 5) - hash + *data++; // calculate the hash code of data + // calculate the hash code of data + hash = (static_cast(hash) << 5) - hash + static_cast(*data++); } } return hash; @@ -46,6 +46,7 @@ class LiteralStrName { static std::string GetHexStr(const uint8_t *bytes, uint32_t len); static std::string GetLiteralStrName(const uint8_t *bytes, uint32_t len); static std::string ComputeMuid(const uint8_t *bytes, uint32_t len); + static constexpr int kConstStringLen = 7; }; #endif diff --git a/src/mapleall/maple_util/include/mpl_int_val.h b/src/mapleall/maple_util/include/mpl_int_val.h index d55195216bd87c5f811e4a1ca6060fe1e4623edf..3150ca9055ca62708cd75c1c3e08218f5071397e 100644 --- a/src/mapleall/maple_util/include/mpl_int_val.h +++ b/src/mapleall/maple_util/include/mpl_int_val.h @@ -84,16 +84,20 @@ class IntVal { // copy-assignment IntVal &operator=(const IntVal &other) { + // self-assignment check + if (this == &other) { + return *this; + } + if (width == 0) { // Allow 'this' to be assigned with new bit-width and sign iff // its original bit-width is zero (i.e. the value was created by the default ctor) - Assign(other); } else { // Otherwise, assign only new value, but sign and width must be the same ASSERT(width == other.width && sign == other.sign, "different bit-width or sign"); - Init(other.u, !other.IsOneWord()); } + Assign(other); return *this; } @@ -110,6 +114,11 @@ class IntVal { } else { ASSERT(width == other.width && sign == other.sign, "different bit-width or sign"); } + + if (!IsOneWord()) { + delete[] u.pValue; + } + errno_t err = memcpy_s(&u, sizeof(u), &other.u, sizeof(other.u)); CHECK_FATAL(err == EOK, "memcpy_s failed"); other.width = 0; @@ -125,6 +134,10 @@ class IntVal { } void Assign(const IntVal &other) { + if (!IsOneWord()) { + delete[] u.pValue; + } + width = other.width; sign = other.sign; @@ -148,7 +161,7 @@ class IntVal { /// @return sign or zero extended value depending on its signedness int64 GetExtValue(uint8 size = 0) const { - return sign ? GetSXTValue(size) : GetZXTValue(size); + return sign ? GetSXTValue(size) : static_cast(GetZXTValue(size)); } /// @return zero extended value @@ -156,20 +169,29 @@ class IntVal { ASSERT(IsOneSignificantWord(), "value doesn't fit into 64 bits"); uint64 value = IsOneWord() ? u.value : u.pValue[0]; // if size == 0, just return the value itself because it's already truncated for an appropriate width - return size ? (value << (wordBitSize - size)) >> (wordBitSize - size) : value; + return (size != 0) ? (value << (wordBitSize - size)) >> (wordBitSize - size) : value; } /// @return sign extended value int64 GetSXTValue(uint8 size = 0) const { ASSERT(IsOneSignificantWord(), "value doesn't fit into 64 bits"); uint64 value = IsOneWord() ? u.value : u.pValue[0]; - uint8 bitWidth = size ? size : width; - return static_cast(value << (wordBitSize - bitWidth)) >> (wordBitSize - bitWidth); + uint8 bitWidth = (size != 0) ? size : static_cast(width); + ASSERT(size <= width, "size should <= %u, but got %u", width, size); + // Do not rely on compiler implement-defined behavior for signed integer shifting + uint8 shift = wordBitSize - bitWidth; + value <<= shift; + // prepare leading ones for negative value + uint64 leadingOnes = allOnes << ((bitWidth < wordBitSize) ? bitWidth : (wordBitSize - 1)); + if (bitWidth != 0) { + return static_cast(GetBit(bitWidth - 1) ? ((value >> shift) | leadingOnes) : (value >> shift)); + } + return 0; } /// @return true if the (most significant bit) MSB is set bool GetSignBit() const { - return GetBit(width - 1); + return GetBit(static_cast(width - 1)); } uint16 GetNumWords() const { @@ -197,7 +219,7 @@ class IntVal { /// @return true if value is a power of 2 > 0 bool IsPowerOf2() const { if (IsOneWord()) { - return u.value && !(u.value & (u.value - 1)); + return (u.value != 0) && !(u.value & (u.value - 1)); } return CountPopulation() == 1; @@ -289,7 +311,7 @@ class IntVal { return GetExtValue() == val; } - return (*this == IntVal(val, width, sign)); + return (*this == IntVal(static_cast(val), width, sign)); } bool operator!=(const IntVal &rhs) const { @@ -591,7 +613,7 @@ class IntVal { IntVal ret = TruncOrExtend(pType); ASSERT(shift <= ret.width, "invalid shift value"); if (IsOneWord()) { - ret.u.value = ret.GetSXTValue() >> shift; + ret.u.value = (IntVal(ret.GetSXTValue(), PTY_i64) >> shift).GetExtValue(); } else { ret.WideAShrInPlace(shift); } @@ -625,14 +647,12 @@ class IntVal { ASSERT(!bitWidth || !width || GetNumWords() == GetNumWords(bitWidth), "trunc to given width isn't implemented"); if (IsOneWord()) { - u.value &= allOnes >> (wordBitSize - (bitWidth ? bitWidth : width)); + uint16 truncWidth = (bitWidth != 0) ? bitWidth : width; + u.value &= allOnes >> (wordBitSize - truncWidth); } else { - uint16 truncWidth = bitWidth ? bitWidth : width; - for (uint16 i = 0; i < GetNumWords(); ++i) { - uint8 emptyBits = (truncWidth >= wordBitSize) ? 0 : truncWidth; - u.pValue[i] &= allOnes >> (wordBitSize - emptyBits); - truncWidth -= wordBitSize; - } + uint16 truncWidth = (bitWidth == 0) ? bitWidth : width; + uint16 lastWord = GetNumWords() - 1; + u.pValue[lastWord] &= allOnes >> (truncWidth % wordBitSize); } if (bitWidth) { @@ -643,7 +663,7 @@ class IntVal { /// @return truncated value to the given bit-width /// @note returned value will have bit-width and sign obtained from newType IntVal Trunc(PrimType newType) const { - return Trunc(GetPrimTypeActualBitSize(newType), IsSignedInteger(newType)); + return Trunc(static_cast(GetPrimTypeActualBitSize(newType)), IsSignedInteger(newType)); } IntVal Trunc(uint16 newWidth, bool isSigned) const { @@ -657,7 +677,7 @@ class IntVal { /// @return sign or zero extended value depending on its signedness /// @note returned value will have bit-width and sign obtained from newType IntVal Extend(PrimType newType) const { - return Extend(GetPrimTypeActualBitSize(newType), IsSignedInteger(newType)); + return Extend(static_cast(GetPrimTypeActualBitSize(newType)), IsSignedInteger(newType)); } IntVal Extend(uint16 newWidth, bool isSigned) const { @@ -672,7 +692,7 @@ class IntVal { /// @return sign/zero extended value or truncated value depending on bit-width /// @note returned value will have bit-width and sign obtained from newType IntVal TruncOrExtend(PrimType newType) const { - return TruncOrExtend(GetPrimTypeActualBitSize(newType), IsSignedInteger(newType)); + return TruncOrExtend(static_cast(GetPrimTypeActualBitSize(newType)), IsSignedInteger(newType)); } IntVal TruncOrExtend(uint16 newWidth, bool isSigned) const { @@ -696,7 +716,7 @@ class IntVal { private: static uint8 GetNumWords(uint16 bitWidth) { - return (bitWidth + wordBitSize - 1) / wordBitSize; + return static_cast((bitWidth + wordBitSize - 1) / wordBitSize); } /// @brief compare two Wide integers @@ -787,14 +807,14 @@ class IntVal { if (IsOneWord()) { u.value = u.value | mask; } else { - uint16 word = GetNumWords(bit + 1) - 1; + uint16 word = static_cast(GetNumWords(bit + 1) - 1); u.pValue[word] = u.pValue[word] | mask; } } /// @brief set the sign bit to one and set 'sign' to true void SetSignBit() { - SetBit(width - 1); + SetBit(width - static_cast(1)); } bool GetBit(uint16 bit) const { @@ -803,7 +823,7 @@ class IntVal { return (u.value & (uint64(1) << bit)) != 0; } - uint8 word = GetNumWords(bit + 1) - 1; + uint8 word = static_cast(GetNumWords(bit + 1) - 1); uint8 wordOffset = bit % wordBitSize; return (u.pValue[word] & (uint64(1) << wordOffset)) != 0; @@ -852,7 +872,7 @@ class IntVal { static constexpr uint8 wordBitSize = sizeof(uint64) * CHAR_BIT; static constexpr uint64 allOnes = uint64(~0); - uint16 width; + uint16 width = 0; bool sign; }; diff --git a/src/mapleall/maple_util/include/mpl_number.h b/src/mapleall/maple_util/include/mpl_number.h index 6bdb4fa674c9e417b060efa4dade417665e81d4d..3dab5ad10223cd9a21526b416c567f51a3e7b305 100644 --- a/src/mapleall/maple_util/include/mpl_number.h +++ b/src/mapleall/maple_util/include/mpl_number.h @@ -77,12 +77,12 @@ class Number { template operator - std::enable_if_t>, std::is_same>::value, U>() const noexcept { + std::enable_if_t>, std::is_same>::value, U>() const noexcept { return static_cast(val); } - template>, - meta_not>>::value>> + template>, + MetaNot>>::value>> explicit operator U() const noexcept { return static_cast(val); } @@ -179,73 +179,73 @@ inline Number operator-(const Number &lhs, const Number } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator==(const Number &lhs, const U &rhs) { return lhs.get() == rhs; } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator==(const U &lhs, const Number &rhs) { return lhs == rhs.get(); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator!=(const Number &lhs, const U &rhs) { return !(lhs == rhs); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator!=(const U &lhs, const Number &rhs) { return !(lhs == rhs); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator<(const Number &lhs, const U &rhs) { return lhs.get() < rhs; } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator<(const U &lhs, const Number &rhs) { return lhs < rhs.get(); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator<=(const Number &lhs, const U &rhs) { return lhs.get() <= rhs; } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator<=(const U &lhs, const Number &rhs) { return lhs <= rhs.get(); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator>(const Number &lhs, const U &rhs) { return !(lhs <= rhs); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator>(const U &lhs, const Number &rhs) { return !(lhs <= rhs); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator>=(const Number &lhs, const U &rhs) { return !(lhs < rhs); } template, std::is_enum>::value>> + typename = std::enable_if_t, std::is_enum>::value>> inline bool operator>=(const U &lhs, const Number &rhs) { return !(lhs < rhs); } diff --git a/src/mapleall/maple_util/include/mpl_profdata.h b/src/mapleall/maple_util/include/mpl_profdata.h index a6f994330e5a555fb756a79a4c3f5b4c094f0f10..ad35f103c3b981bf4ba8152b0a114d06aacde743 100644 --- a/src/mapleall/maple_util/include/mpl_profdata.h +++ b/src/mapleall/maple_util/include/mpl_profdata.h @@ -21,7 +21,7 @@ #include "types_def.h" namespace maple { constexpr uint32_t HOTCALLSITEFREQ = 100; -enum UpdateFreqOp { +enum UpdateFreqOp : uint32_t { kKeepOrigFreq = 0, kUpdateOrigFreq = 0x1, kUpdateFreqbyScale = 0x2, @@ -61,38 +61,38 @@ class ProfileSummary { sumCount = sumcount; } void AddHistogramRecord(uint32_t s, uint32_t num, uint64_t mincount, uint64_t cumcounts) { - histogram.push_back(ProfileSummaryHistogram(s, num, mincount, cumcounts)); + (void)histogram.emplace_back(ProfileSummaryHistogram(s, num, mincount, cumcounts)); } void DumpSummary(); - uint64_t GetCheckSum() { + uint64_t GetCheckSum() const { return checkSum; } - uint32_t GetRun() { + uint32_t GetRun() const { return run; } - uint32_t GetTotalCount() { + uint32_t GetTotalCount() const { return totalCount; } - uint64_t GetMaxCount() { + uint64_t GetMaxCount() const { return maxCount; } - uint64_t GetSumCount() { + uint64_t GetSumCount() const { return sumCount; } - uint32_t GetHistogramLength() { + size_t GetHistogramLength() const { return histogram.size(); } void ProcessHistogram(); - MapleVector &GetHistogram() { + const MapleVector &GetHistogram() const { return histogram; } private: - uint64_t checkSum; // checksum value of module - uint32_t run; // run times - uint32_t totalCount; // number of counters - uint64_t maxCount; // max counter value in single run - uint64_t sumCount; // sum of all counters accumulated. + uint64_t checkSum = 0; // checksum value of module + uint32_t run = 0; // run times + uint32_t totalCount = 0; // number of counters + uint64_t maxCount = 0; // max counter value in single run + uint64_t sumCount = 0; // sum of all counters accumulated. MapleVector histogram; // record gcov_bucket_type histogram[GCOV_HISTOGRAM_SIZE]; }; @@ -116,6 +116,7 @@ class FuncProfInfo { FreqType GetFuncRealFrequency() const { return realEntryfreq; } + void SetFuncRealFrequency(FreqType freq) { realEntryfreq = freq; } @@ -123,21 +124,25 @@ class FuncProfInfo { std::unordered_map &GetStmtFreqs() { return stmtFreqs; } + FreqType GetStmtFreq(uint32_t stmtID) { if (stmtFreqs.count(stmtID) > 0) { return stmtFreqs[stmtID]; } return -1; // unstored } + void SetStmtFreq(uint32_t stmtID, FreqType freq) { if (freq == -1) { return; } stmtFreqs[stmtID] = freq; } + void EraseStmtFreq(uint32_t stmtID) { - stmtFreqs.erase(stmtID); + (void)stmtFreqs.erase(stmtID); } + void CopyStmtFreq(uint32_t newStmtID, uint32_t origStmtId, bool deleteOld = false) { ASSERT(GetStmtFreq(origStmtId) >= 0, "origStmtId no freq record"); SetStmtFreq(newStmtID, GetStmtFreq(origStmtId)); @@ -145,6 +150,7 @@ class FuncProfInfo { EraseStmtFreq(origStmtId); } } + bool IsHotCallSite(uint32_t stmtID) { if (stmtFreqs.count(stmtID) > 0) { FreqType freq = stmtFreqs[stmtID]; @@ -153,6 +159,7 @@ class FuncProfInfo { ASSERT(0, "should not be here"); return false; } + void DumpFunctionProfile(); unsigned ident; @@ -162,9 +169,9 @@ class FuncProfInfo { // Raw arc coverage counts. unsigned edgeCounts; MapleVector counts; - FreqType entryFreq; // record entry bb frequence + FreqType entryFreq = 0; // record entry bb frequence std::unordered_map stmtFreqs; // stmt_id is key, counter value - FreqType realEntryfreq; // function prof data may be modified after clone/inline + FreqType realEntryfreq = 0; // function prof data may be modified after clone/inline }; class MplProfileData { diff --git a/src/mapleall/maple_util/include/mpl_stacktrace.h b/src/mapleall/maple_util/include/mpl_stacktrace.h index 1666b3a6d6fdfb17b76f4041241fd2b45fe5e573..458556926e0d6ca17fdf19b0279d363e2c788203 100644 --- a/src/mapleall/maple_util/include/mpl_stacktrace.h +++ b/src/mapleall/maple_util/include/mpl_stacktrace.h @@ -36,7 +36,7 @@ namespace stacktrace { static std::string demangle(const char *mangledName) { #ifdef __unix__ int status = 0; - char *name = abi::__cxa_demangle(mangledName, NULL, NULL, &status); + char *name = abi::__cxa_demangle(mangledName, nullptr, nullptr, &status); if (status != 0) { return mangledName; @@ -44,7 +44,6 @@ static std::string demangle(const char *mangledName) { std::string res = std::string(name); std::free(name); - return res; #else return std::string(); @@ -53,15 +52,16 @@ static std::string demangle(const char *mangledName) { class Frame { public: - explicit Frame(void *addr) : addr(addr) { + explicit Frame(const void *addr) : addr(addr) { init(); } Frame(const Frame &) = default; Frame &operator=(const Frame &) = default; - ~Frame(){}; + ~Frame() { + addr = nullptr; + }; - public: std::string getFilename() const { return filename; } @@ -89,12 +89,12 @@ class Frame { friend std::string to_string(const Frame &fr) { std::stringstream ss; - if (fr.getName().empty()) + if (fr.getName().empty()) { ss << fr.getAddr(); - else + } else { ss << fr.getName(); - ss << " [" - << "0x" << std::hex << fr.getElfAddr() << "]"; + } + ss << " [" << "0x" << std::hex << fr.getElfAddr() << "]"; ss << (" in " + fr.getFilename()); return ss.str(); } @@ -120,14 +120,13 @@ class Frame { filename = info.dli_fname ? std::string(info.dli_fname) : "??"; name = info.dli_sname ? demangle(info.dli_sname) : "??"; - linkAddr = reinterpret_cast(linkMap->l_addr); + linkAddr = static_cast(linkMap->l_addr); #else return; #endif } - private: - const void *addr; + const void *addr = nullptr; uintptr_t linkAddr; std::string filename; @@ -137,13 +136,17 @@ class Frame { template class Stacktrace { public: - __attribute__((noinline)) Stacktrace(size_t maxDepth = 256) { + explicit __attribute__((noinline)) Stacktrace(size_t maxDepth = 256) { init(1, maxDepth); } Stacktrace(const Stacktrace &st) : frames(st.frames) {} Stacktrace &operator=(const Stacktrace &st) { + /* self-assignment check */ + if (this == &st) { + return *this; + } frames = st.frames; return *this; } @@ -151,6 +154,10 @@ class Stacktrace { Stacktrace(const Stacktrace &&st) : frames(std::move(st.frames)) {} Stacktrace &operator=(const Stacktrace &&st) { + /* self-assignment check */ + if (this == &st) { + return *this; + } frames = std::move(st.frames); return *this; } @@ -172,7 +179,6 @@ class Stacktrace { return os; } - public: using iterator = typename std::vector::iterator; using const_iterator = typename std::vector::const_iterator; using reverse_iterator = typename std::reverse_iterator; @@ -201,28 +207,27 @@ class Stacktrace { private: std::vector frames; - private: void __attribute__((noinline)) init(size_t nskip, size_t maxDepth) { ++nskip; // skip current frame - typedef typename std::allocator_traits::template rebind_alloc allocator_void_ptr; - allocator_void_ptr allocator; + using AllocatorVoidPtr = typename std::allocator_traits::template rebind_alloc; + AllocatorVoidPtr allocator; size_t bufferSize = maxDepth; void **buffer = allocator.allocate(bufferSize); - size_t nframes = collectFrames(buffer, maxDepth); + size_t nframes = CollectFrames(buffer, static_cast(maxDepth)); ++nskip; // skip frame of call "colectFrames(...)" if (nskip <= nframes) { for (size_t i = 0; i < nframes - nskip; ++i) { - frames.emplace_back(buffer[nskip + i]); + (void)frames.emplace_back(buffer[nskip + i]); } } allocator.deallocate(buffer, bufferSize); } - static size_t __attribute__((noinline)) collectFrames(void **outbuf, int maxFrames) { + static size_t __attribute__((noinline)) CollectFrames(void **outbuf, int maxFrames) { #ifdef __unix__ return backtrace(outbuf, maxFrames); #else @@ -233,7 +238,7 @@ class Stacktrace { } // namespace stacktrace -template > +template > using Stacktrace = stacktrace::Stacktrace; } // namespace maple diff --git a/src/mapleall/maple_util/include/namemangler.h b/src/mapleall/maple_util/include/namemangler.h index e453605df9edc6c104513cccb6e33de413b86370..0f6ac55e5aee83e4d356928d9047144cbf911f06 100644 --- a/src/mapleall/maple_util/include/namemangler.h +++ b/src/mapleall/maple_util/include/namemangler.h @@ -218,6 +218,8 @@ uint32_t EncodeULEB128(uint64_t value, std::ofstream &out); uint64_t DecodeULEB128(const uint8_t *p, unsigned *n = nullptr, const uint8_t *end = nullptr); int64_t DecodeSLEB128(const uint8_t *p, unsigned *n = nullptr, const uint8_t *end = nullptr); constexpr int32_t kZeroAsciiNum = 48; +constexpr int32_t kAAsciiNum = 65; +constexpr int32_t kaAsciiNum = 97; } // namespace namemangler #endif diff --git a/src/mapleall/maple_util/include/profile.h b/src/mapleall/maple_util/include/profile.h index 10a06c5c51025441668813c5ba01ba0c07cb22b2..130a017e1771ab60619bde73fad1f37b2fc9b24e 100644 --- a/src/mapleall/maple_util/include/profile.h +++ b/src/mapleall/maple_util/include/profile.h @@ -63,7 +63,7 @@ class Profile { bool CheckReflectionStrHot(const std::string &str, uint8 &layoutType) const; void InitPreHot(); // default get all kind profile - bool DeCompress(const std::string &path, const std::string &dexName, ProfileType type = kAll); + bool DeCompress(const std::string &path, const std::string &dexNameInner, ProfileType type = kAll); const std::unordered_map &GetFunctionProf() const; bool GetFunctionBBProf(const std::string &funcName, BBInfo &result); size_t GetLiteralProfileSize() const; @@ -114,7 +114,7 @@ class Profile { void ParseReflectionStr(const char *data, int32 fileNum); void ParseFunc(const char *data, int32 fileNum); void ParseLiteral(const char *data, const char *end); - void ParseIRFuncDesc(const char *data, int fileNum); + void ParseIRFuncDesc(const char *data, int32 fileNum); void ParseCounterTab(const char *data, int32 fileNum); }; } // namespace maple diff --git a/src/mapleall/maple_util/include/profile_type.h b/src/mapleall/maple_util/include/profile_type.h index ca3b2e20508dfd84fa8324ee1a796b7c1b51d27f..603d9941ab771d910d04a470fface2669580a502 100644 --- a/src/mapleall/maple_util/include/profile_type.h +++ b/src/mapleall/maple_util/include/profile_type.h @@ -15,6 +15,7 @@ #ifndef PROFILE_TYPE_H #define PROFILE_TYPE_H +#include /* Anonymous namespace to restrict visibility of utility functions */ namespace { enum ProfileFileType : uint8_t { diff --git a/src/mapleall/maple_util/include/safe_cast.h b/src/mapleall/maple_util/include/safe_cast.h index fce8a918bdb46720ef1fa96f041b83bf45e78383..0f6fe79ca7f3b8367b2070d0e7b793e458469514 100644 --- a/src/mapleall/maple_util/include/safe_cast.h +++ b/src/mapleall/maple_util/include/safe_cast.h @@ -45,7 +45,7 @@ struct InstanceOfImpl -struct EnabledSafeCast : public utils::meta_or, SafeCastCondition>::type {}; +struct EnabledSafeCast : public utils::MetaOr, SafeCastCondition>::type {}; } template &GetOccurrences() { + const std::vector &GetOccurrences() const { return occurrences; } - size_t GetLength() { + const size_t GetLength() const { return length; } @@ -45,7 +45,7 @@ class SubStringOccurrences { class SuffixArray { public: - SuffixArray(std::vector src, size_t length, size_t size) + SuffixArray(const std::vector src, size_t length, size_t size) : length(length), alphabetSize(size), src(src), @@ -71,25 +71,25 @@ class SuffixArray { void Run(bool collectSubString = false); void Dump(); - std::vector &GetSuffixArray() { + const std::vector &GetSuffixArray() const { return suffixArray; } - std::vector &GetHeightArray() { + const std::vector &GetHeightArray() const { return heightArray; } - std::vector &GetRepeatedSubStrings() { + const std::vector &GetRepeatedSubStrings() const { return repeatedSubStrings; } private: - size_t length; + size_t length = 1; size_t alphabetSize; size_t lmsSubStringCount = 0; size_t lmsCharacterCount = 1; bool hasSameLmsSubstring = false; - std::vector src; + const std::vector src; std::vector suffixType; std::vector lmsSubStringPosition; std::vector lmsCharacterString; diff --git a/src/mapleall/maple_util/include/utils.h b/src/mapleall/maple_util/include/utils.h index 6af5ecfb73dba574a39bd8164f93446d89a4c87b..ea369c043d61bfc3b44d7be7641f2a238b777a58 100644 --- a/src/mapleall/maple_util/include/utils.h +++ b/src/mapleall/maple_util/include/utils.h @@ -19,6 +19,10 @@ namespace maple { namespace utils { +const int kNumLimit = 10; +constexpr int32_t kAAsciiNum = 65; +constexpr int32_t kaAsciiNum = 97; + // Operations on char constexpr bool IsDigit(char c) { return (c >= '0' && c <= '9'); @@ -71,10 +75,10 @@ struct ToDigitImpl<16, T> { return c - '0'; } if (c >= 'a' && c <= 'f') { - return c - 'a' + 10; + return static_cast(c - kaAsciiNum + kNumLimit); } if (c >= 'A' && c <= 'F') { - return c - 'A' + 10; + return static_cast(c - kAAsciiNum + kNumLimit); } return std::numeric_limits::max(); } diff --git a/src/mapleall/maple_util/include/utils/meta.h b/src/mapleall/maple_util/include/utils/meta.h index cc09ba766691e239db6ec7e5024aa18f2dc96992..f3367ae5496e6bf37c4db661f9061444fb7c0519 100644 --- a/src/mapleall/maple_util/include/utils/meta.h +++ b/src/mapleall/maple_util/include/utils/meta.h @@ -18,95 +18,87 @@ namespace maple { namespace utils { template -struct meta_and - : public std::conditional_t {}; +struct MetaAnd : public std::conditional_t {}; template -struct meta_or - : public std::conditional_t {}; +struct MetaOr : public std::conditional_t {}; template -struct meta_not - : public std::integral_constant(T::value)>::type {}; +struct MetaNot : public std::integral_constant(T::value)>::type {}; template -struct is_signed; +struct IsSigned; template -struct is_signed +struct IsSigned : public std::is_signed::type {}; template -struct is_signed - : public meta_and, std::is_signed>::type {}; +struct IsSigned + : public MetaAnd, std::is_signed>::type {}; template -constexpr bool is_signed_v = is_signed::value; +constexpr bool kIsSignedV = IsSigned::value; template -struct is_unsigned; +struct IsUnsigned; template -struct is_unsigned +struct IsUnsigned : public std::is_unsigned::type {}; template -struct is_unsigned - : public meta_and, std::is_unsigned>::type {}; +struct IsUnsigned + : public MetaAnd, std::is_unsigned>::type {}; template -constexpr bool is_unsigned_v = is_unsigned::value; +constexpr bool kIsUnsignedV = IsUnsigned::value; template -struct is_same_sign - : public meta_or, is_unsigned>::type {}; +struct IsSameSign : public MetaOr, IsUnsigned>::type {}; template -struct is_diff_sign - : public meta_not>::type {}; +struct IsDiffSign : public MetaNot>::type {}; template -struct is_pointer; +struct IsPointer; template -struct is_pointer +struct IsPointer : public std::is_pointer::type {}; template -struct is_pointer - : public meta_and, is_pointer>::type {}; +struct IsPointer + : public MetaAnd, IsPointer>::type {}; template -constexpr bool is_pointer_v = is_pointer::value; +constexpr bool kIsPointerV = IsPointer::value; template -struct const_of - : public meta_and, std::is_same, U>>::type {}; +struct ConstOf : public MetaAnd, std::is_same, U>>::type {}; template -constexpr bool const_of_v = const_of::value; +constexpr bool kConstOfV = ConstOf::value; template struct is_ncv_same : public std::is_same, std::remove_cv_t>::type {}; template -constexpr bool is_ncv_same_v = is_ncv_same::value; +constexpr bool kIsNcvSameV = is_ncv_same::value; namespace ptr { -template >> -struct const_of - : public utils::const_of, std::remove_pointer_t>::type {}; +template >> +struct ConstOf : public utils::ConstOf, std::remove_pointer_t>::type {}; -template >> -constexpr bool const_of_v = const_of::value; +template >> +constexpr bool constOfV = ConstOf::value; -template >> -struct is_ncv_same - : public utils::is_ncv_same, std::remove_pointer_t>::type {}; +template >> +struct IsNcvSame : public utils::is_ncv_same, std::remove_pointer_t>::type {}; -template >> -constexpr bool is_ncv_same_v = is_ncv_same::value; +template >> +constexpr bool kIsNcvSameV = IsNcvSame::value; } }} #endif // MAPLE_UTIL_INCLUDE_UTILS_META_H diff --git a/src/mapleall/maple_util/include/version.h b/src/mapleall/maple_util/include/version.h index a9f5ba8f866914b93947bcf8ce4c0580d10b3b70..9db8f96024735ded4bcf7ec91cfd231e4f040fda 100644 --- a/src/mapleall/maple_util/include/version.h +++ b/src/mapleall/maple_util/include/version.h @@ -64,7 +64,7 @@ class Version { #ifdef ANDROID // compatible for Android build script static constexpr const uint32_t kMajorVersion = 4; -#elif MAJOR_VERSION +#elif defined(MAJOR_VERSION) && MAJOR_VERSION static constexpr const uint32_t kMajorVersion = MAJOR_VERSION; #else // MAJOR_VERSION static constexpr const uint32_t kMajorVersion = 1; diff --git a/src/mapleall/maple_util/src/chain_layout.cpp b/src/mapleall/maple_util/src/chain_layout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c4ad29e07d41370d03f2a152c8833ca525e04b59 --- /dev/null +++ b/src/mapleall/maple_util/src/chain_layout.cpp @@ -0,0 +1,761 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "chain_layout.h" + +namespace maple { +// Multiple loops may share the same header, we try to find the best unplaced node in the loop +// This function can be improved +NodeType *NodeContext::GetBestStartBBInLoop() const { + // If the loop header has not been placed, take it as start BB of the loop chain + auto *header = loop->GetHeader(); + auto *headerChain = node2chain[header->GetID()]; + if (headerChain->size() == 1) { + return header; + } + // take inner loop chain tail BB as start BB + if (headerChain->size() > 1 && Contains(*headerChain->GetTail())) { + return headerChain->GetTail(); + } + std::vector loopNodeIds; + loop->GetLoopMembers(loopNodeIds); + for (auto nodeId : loopNodeIds) { + if (node2chain[nodeId]->size() == 1) { + return func.GetNodeById(nodeId); + } + } + return nullptr; +} + +NodeType *NodeContext::GetBestStartBBOutOfLoop(const MapleSet &readyChains) const { + // For local context out of loops, we take the ready BB with the biggest frequency as the best start BB. + NodeType *bestStart = nullptr; + FreqType maxBBFreq = -1; + for (auto *readyChain : readyChains) { + if (readyChain->size() > 1) { + continue; // the cold chain has been laid out in the inner loop, skip it + } + auto *node = readyChain->GetHeader(); + auto bbFreq = node->GetNodeFrequency(); + if (bbFreq > maxBBFreq) { + maxBBFreq = bbFreq; + bestStart = node; + } else if (bbFreq == maxBBFreq && bestStart != nullptr && + node2chain[node->GetID()]->GetId() < node2chain[bestStart->GetID()]->GetId()) { + bestStart = node; + } + } + return bestStart; +} + +void NodeContext::InitReadyChains(MapleSet &readyChains) { + // (1) Global context + if (IsGlobal()) { + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + NodeType *node = *it; + if (node == nullptr) { + continue; + } + auto nodeId = node->GetID(); + NodeChain *chain = node2chain[nodeId]; + if (chain->IsReadyToLayout(*this)) { + (void)readyChains.insert(chain); + } + } + return; + } + // (2) Local context in the loop + if (IsInLoop()) { + std::vector loopNodeIds; + loop->GetLoopMembers(loopNodeIds); + for (auto nodeId : loopNodeIds) { + if (!Contains(nodeId)) { + continue; + } + NodeChain *chain = node2chain[nodeId]; + if (chain->IsReadyToLayout(*this)) { + (void)readyChains.insert(chain); + } + } + return; + } + // (3) Local context not in any loops + for (size_t i = 0; i < inNodes->size(); ++i) { + bool inContext = Contains(static_cast(i)); + if (!inContext) { + continue; + } + NodeChain *chain = node2chain[i]; + if (chain->IsReadyToLayout(*this)) { + (void)readyChains.insert(chain); + } + } +} + +void ChainLayout::InitLoopsForME(IdentifyLoops &identifyLoops) { + auto &meLoops = identifyLoops.GetMeLoops(); + if (meLoops.empty()) { + return; + } + loops.resize(meLoops.size(), nullptr); + for (size_t i = 0; i < meLoops.size(); ++i) { + auto *meLoopWrapper = layoutAlloc.New(*meLoops[i]); + loops[i] = meLoopWrapper; + } +} + +void ChainLayout::InitLoopsForCG(MapleVector &cgLoops) { + if (cgLoops.empty()) { + return; + } + loops.resize(cgLoops.size(), nullptr); + nodesInLoop = layoutAlloc.New>(func.size(), false, layoutAlloc.Adapter()); + for (size_t i = 0; i < cgLoops.size(); ++i) { + auto *cgLoop = cgLoops[i]; + auto *cgLoopWrapper = layoutAlloc.New(*cgLoop); + loops[i] = cgLoopWrapper; + const auto &loopBBs = cgLoop->GetLoopMembers(); + for (auto *bb : loopBBs) { + (*nodesInLoop)[bb->GetID()] = true; + } + } +} + +// Create chains for each BB +void ChainLayout::InitChains() { + uint32 id = 0; + node2chain.resize(func.size(), nullptr); + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + auto *node = *it; + if (node == nullptr) { + continue; + } + // NodeChain constructor will update node2chain + (void)layoutAlloc.GetMemPool()->New(layoutAlloc, node2chain, *node, id++); + } +} + +void GetAllDomChildren(DomWrapperBase &dom, uint32 bbId, std::vector &allChildren) { + const auto &children = dom.GetDomChildren(bbId); + for (auto id : children) { + allChildren.push_back(id); + GetAllDomChildren(dom, id, allChildren); + } +} + +void ChainLayout::InitColdNodes() { + if (func.IsMeFunc()) { + InitColdNodesForME(); + } else { + InitColdNodesForCG(); + } + if (debugChainLayout && hasColdNode) { + LogInfo::MapleLogger() << "Cold BBs in " << func.GetName() << ": "; + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + auto *node = *it; + if (IsColdNode(*node)) { + LogInfo::MapleLogger() << node->GetID() << ", "; + } + } + LogInfo::MapleLogger() << std::endl; + } +} + +void ChainLayout::InitColdNodesForCG() { + // If node freq is smaller than the threshold, it will be marked as cold + constexpr FreqType coldCntThresh = 10; + CHECK_FATAL(!func.IsMeFunc(), "must be"); + if (!hasRealProfile) { // Only consider real profile for now + return; + } + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + auto *node = *it; + if (node == nullptr) { + continue; + } + auto nodeFreq = node->GetNodeFrequency(); + if (nodeFreq > coldCntThresh) { + continue; + } + if (coldNodes == nullptr) { + coldNodes = layoutAlloc.New>(func.size(), false, layoutAlloc.Adapter()); + } + hasColdNode = true; + if (!IsNodeInLoop(*node)) { + hasColdNodeOutOfLoop = true; + } + (*coldNodes)[node->GetID()] = true; + } +} + +// Mark all cold basic block. +// Now we only mark unlikely BB and it's all dom children as cold blocks. This can be +// enhanced when real profile data is available. +void ChainLayout::InitColdNodesForME() { + CHECK_FATAL(func.IsMeFunc(), "must be"); + std::vector immediateColdBBs; + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + auto *node = *it; + if (node == nullptr) { + continue; + } + auto *bb = static_cast(node); + if (bb->IsImmediateUnlikelyBB()) { + immediateColdBBs.push_back(bb->GetID()); + if (coldNodes == nullptr) { + coldNodes = layoutAlloc.New>(func.size(), false, layoutAlloc.Adapter()); + } + hasColdNode = true; + if (!IsNodeInLoop(*bb)) { + hasColdNodeOutOfLoop = true; + } + (*coldNodes)[bb->GetID()] = true; + std::vector allChildren; + GetAllDomChildren(dom, bb->GetID(), allChildren); + for (auto id : allChildren) { + (*coldNodes)[id] = true; + } + } + } + if (debugChainLayout && hasColdNode) { + LogInfo::MapleLogger() << "Immediate Cold BBs in " << func.GetName() << ": "; + for (auto id : immediateColdBBs) { + LogInfo::MapleLogger() << id << ", "; + } + } +} + +void ChainLayout::InitFreqRpoNodeList() { + auto end = dom.rpo_end(); + uint32 i = 0; + for (auto it = dom.rpo_begin(); it != end; ++it) { + NodeType *node = *it; + CHECK_NULL_FATAL(node); + NodeOrderElem nodeElem(node->GetNodeFrequency(), i++, node); + freqRpoNodeList.emplace(nodeElem); + } +} + +void ChainLayout::PostBuildChainForCGFunc(NodeChain &entryChain) { + if (func.IsMeFunc()) { + return; + } + maplebe::CGFunc *f = &static_cast(func).GetFunc(); + /* merge clean up */ + if (f->GetCleanupBB()) { + auto *cleanup = node2chain[f->GetCleanupBB()->GetId()]; + if (readyToLayoutChains.find(cleanup) == readyToLayoutChains.end()) { + LogInfo::MapleLogger() << "clean up bb is not in ready layout "; + } + CHECK_FATAL(cleanup->GetHeader() == f->GetCleanupBB(), "more than one cleanup"); + if (maplebe::CGOptions::DoEnableHotColdSplit()) { + auto *header = static_cast(cleanup->GetHeader()); + header->SetColdSection(); + } + entryChain.MergeFrom(cleanup); + } + /* merge symbol label in C which is not in control flow */ + std::vector labelBB; + for (auto *curbb = f->GetFirstBB(); curbb != nullptr; curbb = curbb->GetNext()) { + if (curbb->IsUnreachable()) { + /* delete unreachable bb in cfgo */ + ASSERT(false, "check unreachable bb"); + CHECK_FATAL_FALSE("check unreachable bb"); + continue; + } + if (!func.IsNodeInCFG(static_cast(curbb))) { + continue; + } + if (!entryChain.Contains(*curbb)) { + if (curbb->GetPreds().empty() && maplebe::CGCFG::InSwitchTable(curbb->GetLabIdx(), *f)) { + labelBB.push_back(curbb); + // last bb which is not in control flow + } else if (curbb->GetPreds().empty() && curbb->GetSuccs().empty() && f->GetLastBB() == curbb) { + labelBB.push_back(curbb); + } else { + LogInfo::MapleLogger() << "In function " << f->GetName() << " bb " << curbb->GetId() << " is no in chain\n"; + } + } + } + + for (auto bb : labelBB) { + auto *labelchain = node2chain[bb->GetID()]; + if (readyToLayoutChains.find(labelchain) == readyToLayoutChains.end()) { + LogInfo::MapleLogger() << "label bb is not in ready layout "; + } + entryChain.MergeFrom(labelchain); + if (maplebe::CGOptions::DoEnableHotColdSplit()) { + bb->SetColdSection(); + } + bb->SetNext(nullptr); + bb->SetPrev(nullptr); + } +} + +static void AddLayoutRange(uint32 &range, const std::initializer_list &rangeKindList) { + for (auto it = rangeKindList.begin(); it != rangeKindList.end(); ++it) { + range |= static_cast(*it); + } +} + +static void RemoveLayoutRange(uint32 &range, const std::initializer_list &rangeKindList) { + for (auto it = rangeKindList.begin(); it != rangeKindList.end(); ++it) { + range &= ~static_cast(*it); + } +} + +static bool IsTargetRange(uint32 range, LayoutRangeKind candKind) { + return (range & static_cast(candKind)) != 0; +} + +void ChainLayout::BuildChainForFunc() { + int32 validBBNumTmp = 0; + const auto &end = func.end(); + for (auto &it = func.begin(); it != end; ++it) { + auto *node = *it; + if (!func.IsNodeInCFG(node)) { + continue; + } + ++validBBNumTmp; + } + CHECK_FATAL(validBBNumTmp > 0, "BBNum must > 0"); + uint32 validBBNum = static_cast(validBBNumTmp); + if (debugChainLayout) { + LogInfo::MapleLogger() << "\n[Chain layout] " << func.GetName() << ", valid bb num: " << validBBNum << std::endl; + LogInfo::MapleLogger() << "layoutColdPath: " << layoutColdPath << std::endl; + } + InitChains(); + if (layoutColdPath) { + InitColdNodes(); + } + const bool cgRealProfile = !func.IsMeFunc() && hasRealProfile; + if (cgRealProfile) { + InitFreqRpoNodeList(); + } + + BuildChainForLoops(); + BuildChainForColdPathInFunc(); + + if (debugChainLayout) { + LogInfo::MapleLogger() << "\n[BuildChainForFunc] " << func.GetName() << std::endl; + } + uint32 range = static_cast(LayoutRangeKind::kRangeAll); + if (!cgRealProfile) { + RemoveLayoutRange(range, { LayoutRangeKind::kRangeFreqRpoList }); + } + auto *entryChain = BuildChainInContext(nullptr, nullptr, range); + CHECK_FATAL(entryChain != nullptr, "build chain failure"); + + PostBuildChainForCGFunc(*entryChain); + // To sure all of BBs have been laid out + CHECK_FATAL(entryChain->size() == validBBNum, "has any BB not been laid out?"); +} + +// Layout cold BBs out of loops +void ChainLayout::BuildChainForColdPathInFunc() { + if (!layoutColdPath || !hasColdNodeOutOfLoop) { + return; + } + auto *inBBs = layoutAlloc.GetMemPool()->New>(func.size(), false, layoutAlloc.Adapter()); + int32 numNodes = 0; + for (size_t i = 0; i < coldNodes->size(); ++i) { + if (!(*coldNodes)[i]) { + continue; + } + auto *node = func.GetNodeById(static_cast(i)); + if (!IsNodeInLoop(*node)) { + (*inBBs)[i] = true; + ++numNodes; + } + } + if (numNodes == 0) { + return; + } + if (debugChainLayout) { + LogInfo::MapleLogger() << "\n[BuildChainForColdPathInFunc] numBBs: " << numNodes << std::endl; + } + uint32 range = 0; + AddLayoutRange(range, { LayoutRangeKind::kRangeSucc, LayoutRangeKind::kRangeReadyList }); + while (numNodes > 0) { + auto *chain = BuildChainInContext(inBBs, nullptr, range, NodeContextTemperature::kCold); + if (chain == nullptr) { + break; + } + numNodes -= static_cast(chain->size()); + } +} + +void ChainLayout::BuildChainForLoops() { + if (loops.empty()) { + return; + } + // sort loops from inner most to outer most + // need use the same sort rules as prediction? + std::stable_sort(loops.begin(), loops.end(), [](const auto *loop1, const auto *loop2) { + return loop1->GetLoopDepth() > loop2->GetLoopDepth(); + }); + // build chain for loops one by one + auto *inBBs = layoutAlloc.GetMemPool()->New>(func.size(), false, layoutAlloc.Adapter()); + for (size_t i = 0; i < loops.size(); ++i) { + auto *loop = loops[i]; + if (debugChainLayout) { + LogInfo::MapleLogger() << "\n[BuildChainForLoop] index: " << i << ", depth: " << + loop->GetLoopDepth() << std::endl; + LogInfo::MapleLogger() << "Loop BBs: "; + std::vector loopNodeIds; + loop->GetLoopMembers(loopNodeIds); + for (auto nodeId : loopNodeIds) { + LogInfo::MapleLogger() << nodeId << ", "; + } + LogInfo::MapleLogger() << std::endl; + } + if (layoutColdPath) { + BuildChainForLoop(*loop, *inBBs, NodeContextTemperature::kNonCold); + BuildChainForLoop(*loop, *inBBs, NodeContextTemperature::kCold); + } else { + BuildChainForLoop(*loop, *inBBs, NodeContextTemperature::kAll); + } + } +} + +// Collect blocks in the given `loop` that are to be laid out. +bool ChainLayout::FindNodesToLayoutInLoop(const LoopWrapperBase &loop, NodeContextTemperature temperature, + MapleVector &inBBs) { + std::fill(inBBs.begin(), inBBs.end(), false); + bool found = false; + std::vector loopNodeIds; + loop.GetLoopMembers(loopNodeIds); + for (auto nodeId : loopNodeIds) { + if (temperature == NodeContextTemperature::kAll) { + inBBs[nodeId] = true; + found = true; + continue; + } + bool isColdNode = IsColdNode(nodeId); + if (temperature == NodeContextTemperature::kCold && isColdNode) { + inBBs[nodeId] = true; + found = true; + continue; + } + if (temperature == NodeContextTemperature::kNonCold && !isColdNode) { + inBBs[nodeId] = true; + found = true; + continue; + } + } + return found; +} + +NodeChain *ChainLayout::BuildChainInContext(MapleVector *inBBs, LoopWrapperBase *loop, uint32 range, + NodeContextTemperature contextTemperature) { + NodeContext context(func, node2chain, inBBs, loop, contextTemperature); + layoutContext = &context; + // Init ready chains + layoutContext->InitReadyChains(readyToLayoutChains); + + // Find best starting BB in context + auto *startNode = layoutContext->GetBestStartBB(readyToLayoutChains); + if (startNode == nullptr) { + return nullptr; // all blocks in the loop have been laid out, just return + } + // clear ready list for kLocalOutOfLoop + if (layoutContext->GetKind() == NodeContextKind::kLocalOutOfLoop) { + readyToLayoutChains.clear(); + } + NodeChain *startChain = node2chain[startNode->GetID()]; + DoBuildChain(*startNode, *startChain, range); + if (layoutContext->GetTemperature() == NodeContextTemperature::kCold) { + startChain->SetColdChain(true); + coldChains.push_back(startChain); + } + MayDumpFormedChain(*startChain); + readyToLayoutChains.clear(); + return startChain; +} + +void ChainLayout::BuildChainForLoop(LoopWrapperBase &loop, MapleVector &inBBs, + NodeContextTemperature temperature) { + bool found = FindNodesToLayoutInLoop(loop, temperature, inBBs); + if (!found) { + return; // inBBs is empty, just return + } + uint32 range = static_cast(LayoutRangeKind::kRangeAll); + if (func.IsMeFunc()) { + RemoveLayoutRange(range, { LayoutRangeKind::kRangeFreqRpoList }); + } + (void)BuildChainInContext(&inBBs, &loop, range, temperature); +} + +void ChainLayout::MayDumpFormedChain(const NodeChain &chain) const { + if (!debugChainLayout) { + return; + } + const char *contextKindName = layoutContext->GetKindName(); + LogInfo::MapleLogger() << "(" << func.GetName() << ") " << "Finish forming " << contextKindName << " chain: "; + chain.Dump(); +} + +void ChainLayout::DoBuildChain(const NodeType &header, NodeChain &chain, uint32 range) { + CHECK_FATAL(node2chain[header.GetID()] == &chain, "node2chain mis-match"); + auto *node = chain.GetTail(); + auto *bestSucc = GetBestSucc(*node, chain, range, considerBetterPred); + while (bestSucc != nullptr) { + NodeChain *succChain = node2chain[bestSucc->GetID()]; + succChain->UpdateSuccChainBeforeMerged(chain, *layoutContext, readyToLayoutChains); + chain.MergeFrom(succChain); + readyToLayoutChains.erase(succChain); + node = chain.GetTail(); + bestSucc = GetBestSucc(*node, chain, range, considerBetterPred); + } +} + +bool ChainLayout::IsCandidateSucc(const NodeType &node, const NodeType &succ) const { + if (!layoutContext->Contains(succ)) { // succ must be in the current context + return false; + } + if (func.IsMeFunc()) { + const auto &meSucc = static_cast(succ); + if (meSucc.GetKind() == kBBNoReturn) { + return false; // noreturn BB is unlikely taken + } + } + if (node2chain[succ.GetID()] == node2chain[node.GetID()]) { // bb and succ should belong to different chains + return false; + } + if (succ.GetID() == 1) { // special case, exclude common exit BB + return false; + } + auto *chain = node2chain[succ.GetID()]; + if (chain->IsColdChain()) { + return false; // special case, cold chain + } + return true; +} + +// Whether succ has a better layout pred than bb +bool ChainLayout::HasBetterLayoutPred(const NodeType &node, NodeType &succ) { + std::vector predList; + succ.GetInNodes(predList); + // predList.size() may be 0 if bb is common entry BB + if (predList.size() <= 1) { + return false; + } + FreqType sumEdgeFreq = succ.GetNodeFrequency(); + double hotEdgeFreqPercent = 0.8; // should further fine tuning + if (hasRealProfile) { + const double freqForRealProfile = 0.6; + hotEdgeFreqPercent = freqForRealProfile; + } + FreqType hotEdgeFreq = static_cast(static_cast(sumEdgeFreq) * hotEdgeFreqPercent); + // if edge freq(bb->succ) contribute more than hotEdgeFreqPercent to succ block freq, no better layout pred than bb + for (uint32 i = 0; i < predList.size(); ++i) { + if (predList[i] == &node) { + continue; + } + FreqType edgeFreq = predList[i]->GetEdgeFrequency(succ); + if (edgeFreq > (sumEdgeFreq - hotEdgeFreq)) { + return true; + } + } + return false; +} + +NodeChain *ChainLayout::GetNextColdChain(const NodeChain &curChain) { + // only for layoutInFunc + if (!layoutContext->IsGlobal()) { + return nullptr; + } + NodeChain *nextColdChain = nullptr; + FreqType maxBBFreq = -1; + // Find the unlaid cold chain with max bb freq. + for (auto *coldChain : coldChains) { + if (coldChain == &curChain || coldChain->empty()) { + continue; // skip laid chain and empty chain (a empty chain may be produced by NodeChain::MergeFrom) + } + auto *node = coldChain->GetHeader(); + auto nodeFreq = node->GetNodeFrequency(); + if (nodeFreq > maxBBFreq) { + nextColdChain = coldChain; + maxBBFreq = nodeFreq; + } + } + if (nextColdChain == nullptr) { + return nullptr; + } + CHECK_FATAL(nextColdChain->IsColdChain(), "not cold chain"); + return nextColdChain; +} + +NodeType *ChainLayout::FindNextNodeInSucc(NodeType &node, bool considerBetterPredForSucc) { + NodeType *bestSucc = nullptr; + FreqType bestEdgeFreq = 0; + std::vector succVec; + node.GetOutNodes(succVec); + for (uint32 i = 0; i < succVec.size(); ++i) { + auto *succ = succVec[i]; + if (!IsCandidateSucc(node, *succ)) { + continue; + } + if (considerBetterPredForSucc && HasBetterLayoutPred(node, *succ)) { + continue; + } + FreqType currEdgeFreq = node.GetEdgeFrequency(i); // attention: entryBB->succFreq[i] is always 0 + if (node.GetID() == 0) { // special case for common entry BB + std::vector commonEntrySuccVec; + node.GetOutNodes(commonEntrySuccVec); + CHECK_FATAL(commonEntrySuccVec.size() == 1, "common entry BB should not have more than 1 succ"); + bestSucc = succ; + break; + } + if (currEdgeFreq > bestEdgeFreq) { // find max edge freq + bestEdgeFreq = currEdgeFreq; + bestSucc = succ; + } + } + return bestSucc; +} + +NodeType *ChainLayout::FindNextNodeInReadyList(NodeType &node) const { + NodeType *bestSucc = nullptr; + FreqType bestFreq = 0; // need to change to -1? + for (auto it = readyToLayoutChains.begin(); it != readyToLayoutChains.end(); ++it) { + NodeChain *readyChain = *it; + auto *header = readyChain->GetHeader(); + if (!IsCandidateSucc(node, *header)) { + continue; + } + FreqType subBestFreq = 0; + std::vector predVec; + header->GetInNodes(predVec); + for (auto *pred : predVec) { + FreqType curFreq = pred->GetEdgeFrequency(*header); + if (curFreq > subBestFreq) { + subBestFreq = curFreq; + } + } + if (subBestFreq > bestFreq) { + bestFreq = subBestFreq; + bestSucc = header; + } else if (subBestFreq == bestFreq && bestSucc != nullptr && + node2chain[header->GetID()]->GetId() < node2chain[bestSucc->GetID()]->GetId()) { + bestSucc = header; + } + } + return bestSucc; +} + +void ChainLayout::MayDumpSelectLog(const NodeType &curNode, const NodeType &nextNode, const std::string &hint) { + if (!debugChainLayout) { + return; + } + LogInfo::MapleLogger() << "Select [" << hint << "]: "; + LogInfo::MapleLogger() << curNode.GetID() << " -> " << nextNode.GetID() << std::endl; +} + +NodeType *ChainLayout::FindNextNodeInFreqRpotList(const NodeChain &chain) const { + for (auto freqRpoElem : freqRpoNodeList) { + if (freqRpoElem.frequency > 0) { + auto *candNode = freqRpoElem.node; + auto *candChain = node2chain[candNode->GetID()]; + if (layoutContext->Contains(*candNode) && candChain != &chain) { + return candNode; + } + } else { + break; + } + } + return nullptr; +} + +NodeType *ChainLayout::FindNextNodeInRpotList(const NodeChain &chain) { + bool searchedAgain = false; + size_t rpoSize = dom.rpo_size(); + auto rpoBegin = dom.rpo_begin(); + for (size_t i = rpoSearchPos; i < rpoSize; ++i) { + auto *rpoNode = *(rpoBegin + static_cast(i)); + auto *candNode = func.GetNodeById(rpoNode->GetID()); + auto *candChain = node2chain[candNode->GetID()]; + if (layoutContext->Contains(*candNode) && candChain != &chain && !candChain->IsColdChain()) { + rpoSearchPos = static_cast(i); + return candNode; + } + if (i == rpoSize - 1 && !searchedAgain) { + i = 0; + searchedAgain = true; + } + } + return nullptr; +} + +// considerBetterPredForSucc: whether consider better layout pred for succ, we found better +// performance when this argument is disabled +NodeType *ChainLayout::GetBestSucc(NodeType &node, const NodeChain &chain, uint32 range, + bool considerBetterPredForSucc) { + CHECK_FATAL(node2chain[node.GetID()] == &chain, "node2chain mis-match"); + NodeType *bestSucc = nullptr; + // search in succ + if (IsTargetRange(range, LayoutRangeKind::kRangeSucc)) { + bestSucc = FindNextNodeInSucc(node, considerBetterPredForSucc); + if (bestSucc != nullptr) { + MayDumpSelectLog(node, *bestSucc, "range1 succ "); + return bestSucc; + } + } + + // search in readyToLayoutChains + if (IsTargetRange(range, LayoutRangeKind::kRangeReadyList)) { + bestSucc = FindNextNodeInReadyList(node); + if (bestSucc != nullptr) { + MayDumpSelectLog(node, *bestSucc, "range2 ready"); + return bestSucc; + } + } + + // search left part in context by profile + if (IsTargetRange(range, LayoutRangeKind::kRangeFreqRpoList)) { + bestSucc = FindNextNodeInFreqRpotList(chain); + if (bestSucc != nullptr) { + MayDumpSelectLog(node, *bestSucc, "range3 frequency"); + return bestSucc; + } + } + + // search left part in context by topological sequence + if (IsTargetRange(range, LayoutRangeKind::kRangeRpotList)) { + bestSucc = FindNextNodeInRpotList(chain); + if (bestSucc != nullptr) { + MayDumpSelectLog(node, *bestSucc, "range4 rpot "); + return bestSucc; + } + } + + // loop cold chain + if (IsTargetRange(range, LayoutRangeKind::kRangeColdPath)) { + auto *nextColdChain = GetNextColdChain(chain); + if (nextColdChain != nullptr) { + coldChains.remove(nextColdChain); + bestSucc = nextColdChain->GetHeader(); + MayDumpSelectLog(node, *bestSucc, "range5 cold "); + return bestSucc; + } + } + return nullptr; +} +} // namespace maple diff --git a/src/mapleall/maple_util/src/cl_option_parser.cpp b/src/mapleall/maple_util/src/cl_option_parser.cpp index 2b99610fa6abcd89e97bc51b49812d9f88d8e13e..9e9fb605c418e44fd9a9ba7dec6f768d2e054f5e 100644 --- a/src/mapleall/maple_util/src/cl_option_parser.cpp +++ b/src/mapleall/maple_util/src/cl_option_parser.cpp @@ -33,11 +33,6 @@ bool IsPrefixDetected(const std::string_view &opt) { return true; } - /* -Wl is linker option */ - if (opt.substr(0, 3) == "-Wl") { // 3: length of -Wl option - return false; - } - /* It should be "--" or "-" */ return (opt[0] == '-'); } @@ -130,7 +125,7 @@ template <> RetCode Option::ParseString(size_t &argsIndex, if (keyArg.rawArg == "-o" && keyArg.val == "-") { keyArg.val = "/dev/stdout"; } - if (keyArg.rawArg == "-D" && keyArg.val.find("_FORTIFY_SOURCE") != keyArg.val.npos) { + if (keyArg.rawArg == "-D" && keyArg.val.find("_FORTIFY_SOURCE") != std::string::npos) { static std::string tmp(keyArg.val); tmp += " -O2 "; keyArg.val = tmp; @@ -144,12 +139,7 @@ template <> RetCode Option::ParseString(size_t &argsIndex, } if (IsJoinedValPermitted() && (GetValue() != "")) { - if (keyArg.key == "-Wl") { - // 3 is length of -Wl - SetValue(GetValue() + std::string(keyArg.val).substr(3)); - } else { - SetValue(GetValue() + " " + std::string(keyArg.key) + " " + std::string(keyArg.val)); - } + SetValue(GetValue() + " " + std::string(keyArg.key) + " " + std::string(keyArg.val)); } else { SetValue(std::string(keyArg.val)); } @@ -171,7 +161,7 @@ template RetCode Option::ParseDigit(size_t &argsIndex, const std::deque &args, KeyArg &keyArg) { - static_assert(digitalCheck, "Expected (u)intXX types"); + static_assert(kDigitalCheck, "Expected (u)intXX types"); RetCode err = RetCode::noError; size_t indexIncCnt = 0; diff --git a/src/mapleall/maple_util/src/cl_parser.cpp b/src/mapleall/maple_util/src/cl_parser.cpp index 74472bd879ae5b67dfab5aca66fc8d040596f20a..ce9e76f268ff93872269a52f1495898daeca4479 100644 --- a/src/mapleall/maple_util/src/cl_parser.cpp +++ b/src/mapleall/maple_util/src/cl_parser.cpp @@ -12,10 +12,13 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ +#include + #include "cl_option.h" #include "cl_parser.h" #include "mpl_logging.h" +#include "string_utils.h" using namespace maplecl; @@ -25,27 +28,41 @@ CommandLine &CommandLine::GetCommandLine() { return cl; } -OptionInterface *CommandLine::CheckJoinedOption(KeyArg &keyArg, OptionCategory &optCategory) const { +OptionInterface *CommandLine::CheckJoinedOption(KeyArg &keyArg, OptionCategory &optCategory) { auto &str = keyArg.rawArg; for (auto joinedOption : optCategory.joinedOptions) { /* Joined Option (like -DMACRO) can be detected as substring (-D) in the option string */ if (str.find(joinedOption.first) == 0) { size_t keySize; - if (joinedOption.first != "-Wl") { + if (joinedOption.first != "-Wl" && joinedOption.first != "-l") { keySize = joinedOption.first.size(); keyArg.key = str.substr(0, keySize); + keyArg.val = str.substr(keySize); } else { + std::string tmp(str); + linkOptions.push_back(tmp); keySize = 0; - keyArg.key = "-Wl"; + if (joinedOption.first == "-Wl") { + keyArg.key = "-Wl"; + } else { + keyArg.key = "-l"; + } } - keyArg.val = str.substr(keySize); - keyArg.val = str.substr(keySize); keyArg.isJoinedOpt = true; return joinedOption.second; } } + std::string tempStr(str); + std::string tmp = maple::StringUtils::GetStrAfterLast(tempStr, "."); + if (tmp == "a" || tmp == "so") { + if (maple::StringUtils::GetStrAfterLast(tempStr, "/") == "libmplpgo.so" || + maple::StringUtils::GetStrAfterLast(tempStr, "/") == "libmplpgo.a") { + SetHasPgoLib(true); + } + linkOptions.push_back(tempStr); + } return nullptr; } @@ -55,16 +72,26 @@ RetCode CommandLine::ParseJoinedOption(size_t &argsIndex, KeyArg &keyArg, OptionCategory &optCategory) { OptionInterface *option = CheckJoinedOption(keyArg, optCategory); if (option != nullptr) { - RetCode err = option->Parse(argsIndex, args, keyArg); - if (err != RetCode::noError) { - return err; - } + if (keyArg.key != "-Wl" && keyArg.key != "-l") { + RetCode err = option->Parse(argsIndex, args, keyArg); + if (err != RetCode::noError) { + return err; + } - /* Set Option in all categories registering for this option */ - for (auto &category : option->optCategories) { - category->AddEnabledOption(option); + /* Set Option in all categories registering for this option */ + for (auto &category : option->optCategories) { + category->AddEnabledOption(option); + } + } else { + argsIndex++; } } else { + std::string tempStr(keyArg.rawArg); + std::string tmp = maple::StringUtils::GetStrAfterLast(tempStr, "."); + if (tmp == "a" || tmp == "so") { + argsIndex++; + return RetCode::noError; + } return RetCode::notRegistered; } @@ -89,34 +116,56 @@ void CommandLine::CloseOptimize(const OptionCategory &optCategory) const { } } +void CommandLine::DeleteEnabledOptions(size_t &argsIndex, const std::deque &args, + const OptionCategory &optCategory) const { + std::map picOrPie = {{"-fpic", "-fPIC"}, {"--fpic", "-fPIC"}, {"-fpie", "-fPIE"}, + {"--fpie", "-fPIE"}, {"-fPIE", "-fpie"}, {"--fPIE", "-fpie"}, + {"-fPIC", "-fpic"}, {"--fPIC", "-fpic"}}; + auto item = optCategory.options.find(picOrPie[args[argsIndex]]); + item->second->UnSetEnabledByUser(); + for (auto &category : item->second->optCategories) { + if (std::find(category->GetEnabledOptions().begin(), category->GetEnabledOptions().end(), item->second) != + category->GetEnabledOptions().end()) { + category->DeleteEnabledOption(item->second); + } + } +} + RetCode CommandLine::ParseOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, const OptionCategory &optCategory, - OptionInterface *opt) { - if (args[argsIndex] == "--no-pie") { + OptionInterface &opt) const { + if (args[argsIndex] == "--no-pie" || args[argsIndex] == "-fno-pie") { auto item = optCategory.options.find("-fPIE"); item->second->SetEnabledByUser(); } - if (args[argsIndex] == "--no-pic") { + if (args[argsIndex] == "--no-pic" || args[argsIndex] == "-fno-pic") { auto item = optCategory.options.find("-fPIC"); item->second->SetEnabledByUser(); } + if (args[argsIndex] == "-fpic" || args[argsIndex] == "--fpic" || + args[argsIndex] == "-fpie" || args[argsIndex] == "--fpie" || + args[argsIndex] == "-fPIE" || args[argsIndex] == "--fPIE" || + args[argsIndex] == "-fPIC" || args[argsIndex] == "--fPIC") { + DeleteEnabledOptions(argsIndex, args, optCategory); + } + if (args[argsIndex] == "--O0" || args[argsIndex] == "-O0" || args[argsIndex] == "--O1" || args[argsIndex] == "-O1" || args[argsIndex] == "--O2" || args[argsIndex] == "-O2" || args[argsIndex] == "--O3" || args[argsIndex] == "-O3" || args[argsIndex] == "--Os" || args[argsIndex] == "-Os") { CloseOptimize(optCategory); } - RetCode err = opt->Parse(argsIndex, args, keyArg); + RetCode err = opt.Parse(argsIndex, args, keyArg); if (err != RetCode::noError) { return err; } /* Set Option in all categories registering for this option */ - for (auto &category : opt->optCategories) { - category->AddEnabledOption(opt); + for (auto &category : opt.optCategories) { + category->AddEnabledOption(&opt); } return RetCode::noError; @@ -125,7 +174,7 @@ RetCode CommandLine::ParseOption(size_t &argsIndex, RetCode CommandLine::ParseEqualOption(size_t &argsIndex, const std::deque &args, KeyArg &keyArg, OptionCategory &optCategory, - const OptionsMapType &optMap, ssize_t pos) { + const OptionsMapType &optMap, size_t pos) { keyArg.isEqualOpt = true; auto &arg = args[argsIndex]; @@ -134,12 +183,19 @@ RetCode CommandLine::ParseEqualOption(size_t &argsIndex, * As example for -Dkey=value: default splitting key="Dkey" value="value", * Joined option splitting key="D" value="key=value" */ - auto item = optMap.find(std::string(arg.substr(0, pos))); + auto item = optMap.find(std::string(arg)); + if (item == optMap.end()) { + item = optMap.find(std::string(arg.substr(0, pos + 1))); + if (item == optMap.end()) { + item = optMap.find(std::string(arg.substr(0, pos))); + } + } if (item != optMap.end()) { /* equal option, like --key=value */ - keyArg.key = arg.substr(0, pos); + keyArg.key = (optMap.find(std::string(arg.substr(0, pos + 1))) != optMap.end()) ? arg.substr(0, pos + 1) : + arg.substr(0, pos); keyArg.val = arg.substr(pos + 1); - return ParseOption(argsIndex, args, keyArg, optCategory, item->second); + return ParseOption(argsIndex, args, keyArg, optCategory, *item->second); } else { /* It can be joined option, like: -DMACRO=VALUE */ return ParseJoinedOption(argsIndex, args, keyArg, optCategory); @@ -152,11 +208,16 @@ RetCode CommandLine::ParseSimpleOption(size_t &argsIndex, const OptionsMapType &optMap) { keyArg.isEqualOpt = false; auto &arg = args[argsIndex]; + if (std::string(arg) == "--lite-pgo-gen") { + SetUseLitePgoGen(true); + } else if (std::string(arg) == "--no-lite-pgo-gen") { + SetUseLitePgoGen(false); + } auto item = optMap.find(std::string(arg)); if (item != optMap.end()) { /* --key or --key value */ - return ParseOption(argsIndex, args, keyArg, optCategory, item->second); + return ParseOption(argsIndex, args, keyArg, optCategory, *item->second); } else { /* It can be joined option, like: -DMACRO */ return ParseJoinedOption(argsIndex, args, keyArg, optCategory); @@ -179,7 +240,7 @@ RetCode CommandLine::HandleInputArgs(const std::deque &args, continue; } - if (arg.find("_FORTIFY_SOURCE") != arg.npos) { + if (arg.find("_FORTIFY_SOURCE") != std::string::npos) { auto item = clangCategory.options.find("-pO2ToCl"); item->second->SetEnabledByUser(); } @@ -283,7 +344,11 @@ void CommandLine::BashCompletionPrinter(const OptionCategory &optCategory) const } } -void CommandLine::HelpPrinter(const OptionCategory &optCategory) const { +void CommandLine::HelpPrinter(OptionCategory &optCategory) const { + std::sort(optCategory.registredOptions.begin(), optCategory.registredOptions.end(), + [](const OptionInterface *a, const OptionInterface *b) { + return a->GetOptName() < b->GetOptName(); + }); for (auto &opt : optCategory.registredOptions) { if (opt->IsVisibleOption()) { maple::LogInfo::MapleLogger() << opt->GetDescription() << '\n'; diff --git a/src/mapleall/maple_util/src/file_utils.cpp b/src/mapleall/maple_util/src/file_utils.cpp index f71ff0bef2b75024070bec7a0ddc05942978bb0a..ef7ecd0890304d2afca44d8137f2788d3de0066a 100644 --- a/src/mapleall/maple_util/src/file_utils.cpp +++ b/src/mapleall/maple_util/src/file_utils.cpp @@ -65,7 +65,7 @@ std::string FileUtils::SafeGetPath(const char *envVar, const char *name) { std::string path(buf); CHECK_FATAL(path.find(name) != std::string::npos, "Failed! Unable to find path of %s \n", name); std::string tmp = name; - int index = path.find(tmp) + tmp.length(); + size_t index = path.find(tmp) + tmp.length(); path = path.substr(0, index); return path; } @@ -96,19 +96,19 @@ std::string FileUtils::GetOutPutDir() { return "./"; } -std::string FileUtils::GetTmpFolderPath() { +std::string FileUtils::GetTmpFolderPath() const { int size = 1024; FILE *fp = nullptr; char buf[size]; const char *cmd = "mktemp -d"; CHECK_FATAL((fp = popen(cmd, "r")) != nullptr, "Failed to create tmp folder"); while (fgets(buf, size, fp) != nullptr) {} - pclose(fp); + (void)pclose(fp); fp = nullptr; std::string path(buf); CHECK_FATAL(path.size() != 0, "Failed to create tmp folder"); std::string tmp = "\n"; - int index = path.find(tmp) == path.npos ? path.length() : path.find(tmp); + size_t index = path.find(tmp) == path.npos ? path.length() : path.find(tmp); path = path.substr(0, index); return path + "/"; } @@ -141,7 +141,52 @@ std::string FileUtils::GetFileName(const std::string &filePath, bool isWithExten } std::string FileUtils::GetFileExtension(const std::string &filePath) { - return StringUtils::GetStrAfterLast(filePath, ".", true); + std::string fileExtension = StringUtils::GetStrAfterLast(filePath, ".", true); + return fileExtension; +} + +InputFileType FileUtils::GetFileType(const std::string &filePath) { + InputFileType fileType = InputFileType::kFileTypeNone; + std::string extensionName = GetFileExtension(filePath); + if (extensionName == "class") { + fileType = InputFileType::kFileTypeClass; + } else if (extensionName == "dex") { + fileType = InputFileType::kFileTypeDex; + } else if (extensionName == "c") { + fileType = InputFileType::kFileTypeC; + } else if (extensionName == "cpp") { + fileType = InputFileType::kFileTypeCpp; + } else if (extensionName == "ast") { + fileType = InputFileType::kFileTypeAst; + } else if (extensionName == "jar") { + fileType = InputFileType::kFileTypeJar; + } else if (extensionName == "mpl" || extensionName == "bpl") { + if (filePath.find("VtableImpl") == std::string::npos) { + if (filePath.find(".me.mpl") != std::string::npos) { + fileType = InputFileType::kFileTypeMeMpl; + } else { + fileType = extensionName == "mpl" ? InputFileType::kFileTypeMpl : InputFileType::kFileTypeBpl; + } + } else { + fileType = InputFileType::kFileTypeVtableImplMpl; + } + } else if (extensionName == "s" || extensionName == "S") { + fileType = InputFileType::kFileTypeS; + } else if (extensionName == "o") { + fileType = GetFileTypeByMagicNumber(filePath); + } else if (extensionName == "mbc") { + fileType = InputFileType::kFileTypeMbc; + } else if (extensionName == "lmbc") { + fileType = InputFileType::kFileTypeLmbc; + } else if (extensionName == "h") { + fileType = InputFileType::kFileTypeH; + } else if (extensionName == "i") { + fileType = InputFileType::kFileTypeI; + } else if (extensionName == "oast") { + fileType = InputFileType::kFileTypeOast; + } + + return fileType; } std::string FileUtils::GetExecutable() { @@ -215,16 +260,15 @@ std::string FileUtils::AppendMapleRootIfNeeded(bool needRootPath, const std::str return ostrStream.str(); } -bool FileUtils::DelTmpDir() { +bool FileUtils::DelTmpDir() const { if (FileUtils::GetInstance().GetTmpFolder() == "") { return true; } std::string tmp = "rm -rf " + FileUtils::GetInstance().GetTmpFolder(); - const char* cmd = tmp.c_str(); const int size = 1024; FILE *fp = nullptr; char buf[size] = {0}; - if ((fp = popen(cmd, "r")) == nullptr) { + if ((fp = popen(tmp.c_str(), "r")) == nullptr) { return false; } while (fgets(buf, size, fp) != nullptr) {} @@ -237,4 +281,18 @@ bool FileUtils::DelTmpDir() { return true; } +InputFileType FileUtils::GetFileTypeByMagicNumber(const std::string &pathName) { + std::ifstream file(GetRealPath(pathName)); + if (!file.is_open()) { + ERR(kLncErr, "unable to open file %s", pathName.c_str()); + return InputFileType::kFileTypeNone; + } + uint32 magic = 0; + int length = static_cast(sizeof(uint32)); + (void)file.read(reinterpret_cast(&magic), length); + file.close(); + return magic == kMagicAST ? InputFileType::kFileTypeOast : magic == kMagicELF ? InputFileType::kFileTypeObj : + InputFileType::kFileTypeNone; +} + } // namespace maple diff --git a/src/mapleall/maple_util/src/itab_util.cpp b/src/mapleall/maple_util/src/itab_util.cpp index eed9d9e291974b2f7e01de1c6d565ac09b141f70..27d15a1660be4729e58db276ce92a99970a1d3db 100644 --- a/src/mapleall/maple_util/src/itab_util.cpp +++ b/src/mapleall/maple_util/src/itab_util.cpp @@ -20,7 +20,7 @@ namespace maple { unsigned int DJBHash(const char *str) { unsigned int hash = 5381; // 5381: initial value for DJB hash algorithm - while (*str) { + while (*str != 0) { hash += (hash << 5) + static_cast(*str++); // 5: calculate the hash code of data } return (hash & 0x7FFFFFFF); diff --git a/src/mapleall/maple_util/src/mpl_int_val.cpp b/src/mapleall/maple_util/src/mpl_int_val.cpp index 12d9ef5f40d5e4d71feeec0e79cf70d21965c923..2cd855a06eebd19a9cd3eda7fc7bbbffafc3f95f 100644 --- a/src/mapleall/maple_util/src/mpl_int_val.cpp +++ b/src/mapleall/maple_util/src/mpl_int_val.cpp @@ -155,7 +155,7 @@ IntVal IntVal::ExtendToWideInt(uint16 newWidth, bool isSigned) const { } int filler = sign && GetSignBit() ? ~0 : 0; - if (numExtendBits && filler != 0) { + if (numExtendBits != 0 && filler != 0) { // sign-extend the last word from given value newValue[oldNumWords - 1] |= allOnes << (wordBitSize - numExtendBits); } @@ -186,7 +186,8 @@ int BitwiseCompare(const T *lhs, const T *rhs, uint16 comparedWidth) { return ((lhs[i] & mask) < (rhs[i] & mask)) ? -1 : 1; } - while (i--) { + while (i != 0) { + i--; if (lhs[i] != rhs[i]) { return lhs[i] < rhs[i] ? -1 : 1; } @@ -383,7 +384,7 @@ static void MulPart(uint64 *dst, const uint64 *src, uint64 multiplier, uint16 pa carry += AddCarry(dst[part + i + 1], overflow); uint16 j = part + i + 2; - while (carry && j < numParts) { + while ((carry != 0) && j < numParts) { carry = AddCarry(dst[j], carry); ++j; } @@ -510,7 +511,7 @@ static void ShiftLeft(T *dst, uint16 numWords, uint16 bits) { uint16 shiftWords = bits / typeBitSize; uint8 shiftBits = bits % typeBitSize; - if (!shiftBits) { + if (shiftBits == 0) { size_t size = (numWords - shiftWords) * sizeof(T); errno_t err = memmove_s(dst + shiftWords, size, dst, size); CHECK_FATAL(err == EOK, "memmove_s failed"); @@ -562,7 +563,7 @@ static void ShiftRight(T *dst, uint16 numWords, uint16 bits, bool negRemainder = uint16 shiftWords = bits / typeBitSize; uint8 shiftBits = bits % typeBitSize; - if (!shiftBits) { + if (shiftBits == 0) { errno_t err = memmove_s(dst, (numWords - shiftWords) * sizeof(T), dst + shiftWords, (numWords - shiftWords) * sizeof(T)); CHECK_FATAL(err == EOK, "memmove_s failed"); @@ -583,7 +584,7 @@ static void ShiftRight(T *dst, uint16 numWords, uint16 bits, bool negRemainder = errno_t err = memset_s(dst + (numWords - shiftWords), size, remainderFiller, size); CHECK_FATAL(err == EOK, "memset_s failed"); - if (shiftBits) { + if (shiftBits != 0) { dst[numWords - (shiftWords + 1)] |= T(remainderFiller) << (typeBitSize - shiftBits); } } @@ -639,7 +640,7 @@ static uint32 MulSubOverflow(uint32 *dst, const uint32 *src0, uint32 src1, uint1 borrow += resOverflow; uint16 j = i + 1; - while (j < numWords && borrow) { + while (j < numWords && borrow != 0) { borrow = SubBorrow(dst[j], borrow); ++j; } @@ -664,7 +665,7 @@ static uint32 MulAddOverflow(uint32 *dst, const uint32 *src0, uint32 src1, uint1 carry += resOverflow; uint16 j = i + 1; - while (j < numWords && carry) { + while (j < numWords && carry != 0) { carry = AddCarry(dst[j], carry); ++j; } @@ -721,7 +722,7 @@ static void BasecaseDivRem(uint32 *q, uint32 *r, uint32 *a, uint32 *b, uint16 n, auto quotientLoHi = GetLoHi(quotient); // q[j] = min(q_tmp , β − 1) - q[j] = quotientLoHi.hi ? uint32(~0) : quotientLoHi.lo; + q[j] = (quotientLoHi.hi != 0) ? uint32(~0) : quotientLoHi.lo; // calculate BShifted = B * β^m separately cos we must use them in several places err = memcpy_s(bShifted, (n + m) * sizeof(uint32), b, n * sizeof(uint32)); @@ -741,7 +742,7 @@ static void BasecaseDivRem(uint32 *q, uint32 *r, uint32 *a, uint32 *b, uint16 n, --q[j]; borrow -= MulAddOverflow(a, bShifted, 1, n + m); } - } while (j); + } while (j != 0); err = memcpy_s(r, (n + m) * sizeof(uint32), a, (n + m) * sizeof(uint32)); CHECK_FATAL(err == EOK, "memcpy_s failed"); @@ -788,9 +789,9 @@ std::pair IntVal::WideUnsignedDivRem(const IntVal &delimer, cons // copy delimer and divisor values to uint32 arrays uint32 a[n + m], b[n], r[n + m], q[m + 1]; - errno_t err = memcpy_s(a, (n + m) * sizeof(uint32), delimer.u.pValue, (n + m) * sizeof(uint32)); + errno_t err = memcpy_s(a, (n + m) * sizeof(uint32), delimer.u.pValue, (n + m - 1u) * sizeof(uint32)); CHECK_FATAL(err == EOK, "memcpy_s failed"); - a[n + m - 1] = 0; + a[n + m - 1u] = 0; err = memcpy_s(b, n * sizeof(uint32), divisor.u.pValue, n * sizeof(uint32)); CHECK_FATAL(err == EOK, "memcpy_s failed"); @@ -899,7 +900,7 @@ void IntVal::WideSetMinValue() { uint16 IntVal::CountLeadingOnes() const { // mask in neccessary because the high bits of value can be zero uint8 startPos = width % wordBitSize; - uint64 mask = startPos ? allOnes << (wordBitSize - startPos) : 0; + uint64 mask = (startPos != 0) ? allOnes << (wordBitSize - startPos) : 0; if (IsOneWord()) { return maple::CountLeadingOnes(u.value | mask) - startPos; @@ -907,7 +908,7 @@ uint16 IntVal::CountLeadingOnes() const { uint16 i = GetNumWords() - 1; uint8 ones = maple::CountLeadingOnes(u.pValue[i] | mask) - startPos; - if (!ones) { + if (ones == 0) { return 0; } @@ -916,7 +917,7 @@ uint16 IntVal::CountLeadingOnes() const { do { ones = maple::CountLeadingOnes(u.pValue[i]); count += ones; - } while (ones && i--); + } while (ones != 0 && i--); return count; } @@ -948,12 +949,14 @@ uint16 IntVal::CountLeadingZeros() const { } uint16 count = 0; - uint16 i = GetNumWords() - 1; uint8 zeros = 0; - do { + for (int16 i = static_cast(GetNumWords() - 1); i >= 0; --i) { zeros = maple::CountLeadingZeros(u.pValue[i]); count += zeros; - } while ((zeros == wordBitSize) && i--); + if (zeros != wordBitSize) { + break; + } + } ASSERT(count >= emptyBits, "invalid count of leading zeros"); return count - emptyBits; @@ -985,7 +988,7 @@ uint16 IntVal::CountSignificantBits() const { if (sign) { nonSignificantBits = GetSignBit() ? CountLeadingOnes() : CountLeadingZeros(); // sign bit is always significant - if (nonSignificantBits) { + if (nonSignificantBits != 0) { --nonSignificantBits; } } else { @@ -1035,7 +1038,7 @@ void IntVal::Dump(std::ostream &os) const { uint16 numWords = GetNumWords(); ASSERT(numWords >= numZeroWords, "invalid count of zero words"); - os << std::hex << "0xL"; + os << std::hex << "0x"; if (numWords == numZeroWords) { os << "0"; } else { diff --git a/src/mapleall/maple_util/src/mpl_logging.cpp b/src/mapleall/maple_util/src/mpl_logging.cpp index 24b641e3cdf397f69ca800663285cc98b4b25c90..f292d045586dee92b027773c12bc64bb6ab76794 100644 --- a/src/mapleall/maple_util/src/mpl_logging.cpp +++ b/src/mapleall/maple_util/src/mpl_logging.cpp @@ -70,24 +70,27 @@ SECUREC_ATTRIBUTE(7, 8) void LogInfo::EmitLogForDevelop(enum LogTags tag, enum L va_list l; va_start(l, fmt); - int lenBack = vsnprintf_s(buf + lenFront, kMaxLogLen - lenFront, - static_cast(kMaxLogLen - lenFront - 1), fmt, l); + int lenBack = vsnprintf_s(buf + lenFront, static_cast(kMaxLogLen - lenFront), + static_cast(kMaxLogLen - static_cast(lenFront) - 1), fmt, l); if (lenBack == -1) { WARN(kLncWarn, "vsnprintf_s failed "); va_end(l); return; } if (outMode != 0) { - int eNum = snprintf_s(buf + lenFront + lenBack, kMaxLogLen - lenFront - lenBack, - kMaxLogLen - lenFront - lenBack - 1, " [%s] [%s:%d]", func.c_str(), file.c_str(), line); + int eNum = snprintf_s(buf + lenFront + lenBack, static_cast(kMaxLogLen - lenFront - lenBack), + static_cast(kMaxLogLen - static_cast(lenFront - lenBack) - 1), + " [%s] [%s:%d]", func.c_str(), file.c_str(), line); if (eNum == -1) { WARN(kLncWarn, "snprintf_s failed"); va_end(l); return; } } else { - int eNum = snprintf_s(buf + lenFront + lenBack, kMaxLogLen - lenFront - lenBack, - kMaxLogLen - lenFront - lenBack - 1, " [%s]", func.c_str()); + int eNum = snprintf_s(buf + lenFront + lenBack, + static_cast(kMaxLogLen - static_cast(lenFront - lenBack)), + static_cast(kMaxLogLen - static_cast(lenFront - lenBack) - 1), + " [%s]", func.c_str()); if (eNum == -1) { WARN(kLncWarn, "snprintf_s failed"); va_end(l); @@ -95,7 +98,7 @@ SECUREC_ATTRIBUTE(7, 8) void LogInfo::EmitLogForDevelop(enum LogTags tag, enum L } } va_end(l); - fprintf(outStream, "%s\n", buf); + (void)fprintf(outStream, "%s\n", buf); return; } @@ -153,11 +156,11 @@ SECUREC_ATTRIBUTE(5, 6) void LogInfo::EmitErrorMessage(const std::string &cond, unsigned int line, const char *fmt, ...) const { char buf[kMaxLogLen]; #ifdef _WIN32 - int len = snprintf_s(buf, kMaxLogLen, kMaxLogLen - 1, "CHECK/CHECK_FATAL failure: %s at [%s:%d] ", + int len = snprintf_s(buf, kMaxLogLen, kMaxLogLen - 1, "CHECK/CHECK_FATAL failure: %s at [%s:%u] ", cond.c_str(), file.c_str(), line); #else pid_t tid = syscall(SYS_gettid); - int len = snprintf_s(buf, kMaxLogLen, kMaxLogLen - 1, "Tid(%d): CHECK/CHECK_FATAL failure: %s at [%s:%d] ", + int len = snprintf_s(buf, kMaxLogLen, kMaxLogLen - 1, "Tid(%d): CHECK/CHECK_FATAL failure: %s at [%s:%u] ", tid, cond.c_str(), file.c_str(), line); #endif if (len == -1) { diff --git a/src/mapleall/maple_util/src/mpl_posix_sighandler.cpp b/src/mapleall/maple_util/src/mpl_posix_sighandler.cpp index bb5322f10fbff8e17a6e8185d8b283f9c9e719e6..ea9945d41af2a30ac81df66c6ba1d0768d2197ef 100644 --- a/src/mapleall/maple_util/src/mpl_posix_sighandler.cpp +++ b/src/mapleall/maple_util/src/mpl_posix_sighandler.cpp @@ -91,7 +91,7 @@ void SigHandler::SetTimer(int seconds) { } }; - if (setitimer(ITIMER_REAL, &timeValue, nullptr)) { + if (setitimer(ITIMER_REAL, &timeValue, nullptr) != 0) { LogInfo::MapleLogger(kLlErr) << "setitimer failed with " << errno << std::endl; exit(EXIT_FAILURE); } @@ -322,7 +322,7 @@ void SigHandler::SetSigaction(int sig, SigHandler::FuncPtr callback) { sigact.sa_flags = SA_SIGINFO | SA_ONSTACK; sigact.sa_sigaction = callback; - if (sigaction(sig, &sigact, nullptr)) { + if (sigaction(sig, &sigact, nullptr) != 0) { LogInfo::MapleLogger(kLlErr) << "sigacton failed with " << errno << std::endl; exit(EXIT_FAILURE); } @@ -335,7 +335,7 @@ void SigHandler::SetDefaultSigaction(int sig) { #ifdef __unix__ struct sigaction sigact = {}; sigact.sa_handler = SIG_DFL; - if (sigaction(sig, &sigact, nullptr)) { + if (sigaction(sig, &sigact, nullptr) != 0) { LogInfo::MapleLogger(kLlErr) << "sigacton failed with " << errno << std::endl; exit(EXIT_FAILURE); } @@ -347,7 +347,7 @@ void SigHandler::SetDefaultSigaction(int sig) { void DumpFaultingAddr(uintptr_t addr) { uintptr_t linkAddr = GetLinkAddr(addr); LogInfo::MapleLogger(kLlErr) << "faulting address: "; - if (linkAddr) { + if (linkAddr != 0) { LogInfo::MapleLogger(kLlErr) << "0x" << std::hex << addr - linkAddr << " (0x" << std::hex << addr << ")" << std::endl; } else { @@ -362,10 +362,12 @@ void SigHandler::Handler(int sig, siginfo_t *info, void *ucontext) noexcept { DumpFaultingAddr(reinterpret_cast(info->si_addr)); } - LogInfo::MapleLogger(kLlErr) << Stacktrace<>() << std::endl; + if (sig != SIGTERM) { + LogInfo::MapleLogger(kLlErr) << Stacktrace<>() << std::endl; - if (FuncPtr callback = sig2callback.at(sig)) { - callback(sig, info, ucontext); + if (FuncPtr callback = sig2callback.at(sig)) { + callback(sig, info, ucontext); + } } exit(EXIT_FAILURE); diff --git a/src/mapleall/maple_util/src/namemangler.cpp b/src/mapleall/maple_util/src/namemangler.cpp index d4ebf01f153f5523a8ffefd2d31ef3968b05c28f..9458bb1580d6884a3b7f6aa63492c4f0818b3e34 100644 --- a/src/mapleall/maple_util/src/namemangler.cpp +++ b/src/mapleall/maple_util/src/namemangler.cpp @@ -13,23 +13,23 @@ * See the Mulan PSL v2 for more details. */ #include "namemangler.h" -#include #include -#include #include +#include +#include namespace namemangler { #ifdef __MRT_DEBUG -#define ASSERT(f) assert(f) +#define MRT_ASSERT(f) assert(f) #else -#define ASSERT(f) ((void)0) +#define MRT_ASSERT(f) ((void)0) #endif const int kLocalCodebufSize = 1024; const int kMaxCodecbufSize = (1 << 16); // Java spec support a max name length of 64K. -#define GETHEXCHAR(n) static_cast((n) < 10 ? (n) + kZeroAsciiNum : (n) - 10 + 'a') -#define GETHEXCHARU(n) static_cast((n) < 10 ? (n) + '0' : (n) - 10 + 'A') +#define GETHEXCHAR(n) static_cast((n) < 10 ? static_cast(n) + kZeroAsciiNum : (n) - 10 + kaAsciiNum) +#define GETHEXCHARU(n) static_cast((n) < 10 ? static_cast(n) + kZeroAsciiNum : (n) - 10 + kAAsciiNum) bool doCompression = false; @@ -221,13 +221,17 @@ std::string DecodeName(const std::string &name) { str16.clear(); i++; c = static_cast(namePtr[i++]); - uint8_t b1 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + uint8_t b1 = (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast(c - kaAsciiNum + kNumLimit); c = static_cast(namePtr[i++]); - uint8_t b2 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + uint8_t b2 = (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast(c - kaAsciiNum + kNumLimit); c = static_cast(namePtr[i++]); - uint8_t b3 = (c <= '9') ? c - '0' : c - 'a' + kNumLimit; + uint8_t b3 = (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast(c - kaAsciiNum + kNumLimit); c = static_cast(namePtr[i++]); - uint8_t b4 = (c <= '9') ? static_cast(c - '0') : static_cast(c - 'a' + kNumLimit); + uint8_t b4 = (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast(c - kaAsciiNum + kNumLimit); uint32_t codepoint = (b1 << kCodeOffset3) | (b2 << kCodeOffset2) | (b3 << kCodeOffset) | b4; str16 += static_cast(codepoint); unsigned int n = UTF16ToUTF8(str, str16, 1, false); @@ -246,13 +250,13 @@ std::string DecodeName(const std::string &name) { } } else { c = static_cast(namePtr[i++]); - unsigned int v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + unsigned int v = static_cast((c <= '9') ? c - kZeroAsciiNum : c - kAAsciiNum + kNumLimit); unsigned int asc = v << kCodeOffset; if (i >= nameLen) { break; } c = static_cast(namePtr[i++]); - v = (c <= '9') ? c - '0' : c - 'A' + kNumLimit; + v = (c <= '9') ? c - kZeroAsciiNum : c - kAAsciiNum + kNumLimit; asc += v; newName[pos++] = static_cast(asc); @@ -332,10 +336,12 @@ std::string NativeJavaName(const std::string &name, bool overLoaded) { // _XX: '_' followed by ascii code in hex c = decompressedName[i++]; unsigned char v = - (c <= '9') ? static_cast(c - '0') : static_cast((c - 'A') + kNumLimit); + (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast((c - kAAsciiNum) + kNumLimit); unsigned char asc = v << kCodeOffset; c = decompressedName[i++]; - v = (c <= '9') ? static_cast(c - '0') : static_cast((c - 'A') + kNumLimit); + v = (c <= '9') ? static_cast(c - kZeroAsciiNum) : + static_cast((c - kAAsciiNum) + kNumLimit); asc += v; if (asc == '/') { newName += "_"; @@ -365,7 +371,7 @@ std::string NativeJavaName(const std::string &name, bool overLoaded) { } else { printf("name = %s\n", decompressedName.c_str()); printf("c = %c\n", asc); - ASSERT(false && "more cases in NativeJavaName"); + MRT_ASSERT(false && "more cases in NativeJavaName"); } } } else { @@ -499,7 +505,7 @@ uint32_t GetCodePoint(const std::string &str8, uint32_t &i) { b = static_cast(str8[i++]); codePoint = ((a & 0x1F) << kCodepointOffset1) | (b & 0x3F); } else { - ASSERT(false && "invalid UTF-8"); + MRT_ASSERT(false && "invalid UTF-8"); } return codePoint; } @@ -534,7 +540,7 @@ unsigned UTF8ToUTF16(std::u16string &str16, const std::string &str8, unsigned sh count++; // only convert num elmements if (num == count) { - return (static_cast(retNum) << kCountOffset) | static_cast(i); + return (static_cast(retNum) << kCountOffset) | i; } } return i; @@ -555,7 +561,7 @@ void GetUnsignedLeb128Encode(std::vector &dest, uint32_t value) { } uint32_t GetUnsignedLeb128Decode(const uint8_t **data) { - ASSERT(data != nullptr && "data in GetUnsignedLeb128Decode() is nullptr"); + MRT_ASSERT(data != nullptr && "data in GetUnsignedLeb128Decode() is nullptr"); const uint8_t *ptr = *data; uint32_t result = 0; uint32_t shift = 0; @@ -573,7 +579,7 @@ uint32_t GetUnsignedLeb128Decode(const uint8_t **data) { } size_t GetUleb128Size(uint64_t v) { - ASSERT(v && "if v == 0, __builtin_clzll(v) is not defined"); + MRT_ASSERT(v && "if v == 0, __builtin_clzll(v) is not defined"); size_t clz = static_cast(__builtin_clzll(v)); // num of 7-bit groups return size_t((64 - clz + 6) / 7); @@ -581,17 +587,27 @@ size_t GetUleb128Size(uint64_t v) { size_t GetSleb128Size(int32_t v) { size_t size = 0; - int rem = v >> kGreybackOffset; + + // intended signed shift: block codedex here + constexpr uint32_t oneByte = 8; + uint32_t vShift = sizeof(v) * oneByte - kGreybackOffset; + uint32_t maskRem = static_cast(v) >> kGreybackOffset; + int32_t rem = (v < 0) ? static_cast(maskRem | (UINT32_MAX << vShift)) : static_cast(maskRem); + bool hasMore = true; - int end = ((v >= 0) ? 0 : -1); + int32_t end = ((v >= 0) ? 0 : -1); + + uint32_t remShift = sizeof(rem) * oneByte - kGreybackOffset; while (hasMore) { // judege whether has more valid rem hasMore = (rem != end) || ((static_cast(rem) & 1) != - (static_cast((static_cast(v) >> 6)) & 1)); + (static_cast((static_cast(v) >> 6)) & 1)); size++; v = rem; - rem >>= static_cast(kGreybackOffset); // intended signed shift: block codedex here + // intended signed shift: block codedex here + uint32_t blockRem = static_cast(rem) >> kGreybackOffset; + rem = (rem < 0) ? static_cast(blockRem | (UINT32_MAX << remShift)) : static_cast(blockRem); } return size; } @@ -688,7 +704,7 @@ int64_t DecodeSLEB128(const uint8_t *p, unsigned *n, const uint8_t *end) { } return 0; } - value |= static_cast(slice << shift); + value = static_cast(static_cast(value) | (slice << shift)); shift += kGreybackOffset; ++p; } while (byte >= kOneHundredTwentyEight); diff --git a/src/mapleall/maple_util/src/profile.cpp b/src/mapleall/maple_util/src/profile.cpp index 7ac12ea4889568fd4e8bf6a78156ca09044d901f..4050f913636465115d2fafb35b9806deb5256a77 100644 --- a/src/mapleall/maple_util/src/profile.cpp +++ b/src/mapleall/maple_util/src/profile.cpp @@ -28,7 +28,6 @@ #include "namemangler.h" #include "file_layout.h" #include "types_def.h" -#include "itab_util.h" namespace maple { constexpr uint8 Profile::stringEnd = 0x00; diff --git a/src/mapleall/mempool/include/maple_sparse_bitvector.h b/src/mapleall/mempool/include/maple_sparse_bitvector.h index 0e5a23e4008adcbc8c36a5ed51c281373995714c..c2c9ff71ed6c10e1d9bf7fb70b134b52515dfe27 100644 --- a/src/mapleall/mempool/include/maple_sparse_bitvector.h +++ b/src/mapleall/mempool/include/maple_sparse_bitvector.h @@ -160,12 +160,12 @@ class MapleSparseBitVector { using BitWord = unsigned long long; public: - explicit MapleSparseBitVector(MapleAllocator &alloc) + explicit MapleSparseBitVector(const MapleAllocator &alloc) : allocator(alloc), elementList(allocator.Adapter()), currIter(elementList.begin()) {} - explicit MapleSparseBitVector(const MapleSparseBitVector &rhs, MapleAllocator &alloc) + explicit MapleSparseBitVector(const MapleSparseBitVector &rhs, const MapleAllocator &alloc) : allocator(alloc), elementList(rhs.elementList, allocator.Adapter()), currIter(elementList.begin()) {} diff --git a/src/mapleall/mempool/include/maple_string.h b/src/mapleall/mempool/include/maple_string.h index f0c3c440011bcf10cbdd2d899ace907bc9de00b5..787323a16d54e69a7121969cd8742ed43fddeb16 100644 --- a/src/mapleall/mempool/include/maple_string.h +++ b/src/mapleall/mempool/include/maple_string.h @@ -227,7 +227,19 @@ class MapleString { return ::strlen(s); } - inline static char *NewData(MemPool &currMp, const char *source, size_t len); + inline static char *NewData(MemPool &currMp, const char *source, size_t len) { + if (source == nullptr && len == 0) { + return nullptr; + } + char *str = static_cast(currMp.Malloc((len + 1) * sizeof(char))); + CHECK_FATAL(str != nullptr, "MemPool::Malloc failed"); + if (source != nullptr && len != 0) { + errno_t err = memcpy_s(str, len, source, len); + CHECK_FATAL(err == EOK, "memcpy_s failed"); + } + str[len] = 0; + return str; + } inline size_t UnsafeFind(const char *str, size_t pos, size_t n) const { if ((dataLength - pos) < n) { @@ -240,7 +252,7 @@ class MapleString { if (matchStart == nullptr) { return std::string::npos; } - size_t i = matchStart - data; + size_t i = static_cast(matchStart - data); if ((dataLength - i) < n) { return std::string::npos; } @@ -264,7 +276,9 @@ class MapleString { template inline OS &operator<<(OS &os, const MapleString &data) { - os << data.c_str(); + if (!data.empty()) { + os << data.c_str(); + } return os; } diff --git a/src/mapleall/mempool/src/MPTest.cpp b/src/mapleall/mempool/src/MPTest.cpp index 12a2faeb4280e6bca70a330bc09a3c8aa8a4101b..e2bb30fc36feb163fbf42247ecab01475bb833f4 100644 --- a/src/mapleall/mempool/src/MPTest.cpp +++ b/src/mapleall/mempool/src/MPTest.cpp @@ -37,7 +37,7 @@ class MyClass { void TestLocalAllocater() { MemPoolCtrler mpc; - auto mp = std::unique_ptr(new StackMemPool(mpc, "")); + auto mp = std::make_unique(mpc, ""); LocalMapleAllocator alloc1(*mp); MapleVector v1({ 1, 2, 3, 4, 5 }, alloc1.Adapter()); { diff --git a/src/mapleall/mempool/src/maple_string.cpp b/src/mapleall/mempool/src/maple_string.cpp index c8491ecf3c117486c143a6e890fa65da0878c97b..a604dbeceb0637432850b3c238905fe57fda278b 100644 --- a/src/mapleall/mempool/src/maple_string.cpp +++ b/src/mapleall/mempool/src/maple_string.cpp @@ -35,19 +35,6 @@ MapleString::MapleString(const MapleString &str) MapleString::MapleString(const std::string &str, MemPool *currMp) : MapleString(str.data(), str.length(), currMp) {} -char *MapleString::NewData(MemPool &currMp, const char *source, size_t len) { - if (source == nullptr && len == 0) { - return nullptr; - } - char *str = static_cast(currMp.Malloc((len + 1) * sizeof(char))); - CHECK_FATAL(str != nullptr, "MemPool::Malloc failed"); - if (source != nullptr && len != 0) { - errno_t err = memcpy_s(str, len, source, len); - CHECK_FATAL(err == EOK, "memcpy_s failed"); - } - str[len] = 0; - return str; -} void MapleString::clear() { data = nullptr; diff --git a/src/mapleall/mpl2mpl/include/annotation_analysis.h b/src/mapleall/mpl2mpl/include/annotation_analysis.h index 7219610275d4be5de2eb8f23d22c98c799740c53..a53df79d1793e1e0f0be28868784bd1ac04011d0 100644 --- a/src/mapleall/mpl2mpl/include/annotation_analysis.h +++ b/src/mapleall/mpl2mpl/include/annotation_analysis.h @@ -89,12 +89,14 @@ class GenericType : public AnnotationType { GenericType(const GStrIdx &strIdx, MIRType *ms, MapleAllocator &alloc) : AnnotationType(kGenericType, strIdx), mirStructType(ms), - GenericArg(alloc.Adapter()), - ArgOrder(alloc.Adapter()) {} - ~GenericType() override = default; + genericArg(alloc.Adapter()), + argOrder(alloc.Adapter()) {} + ~GenericType() override { + mirStructType = nullptr; + } void AddGenericPair(GenericDeclare * const k, AnnotationType *v) { - GenericArg[k] = v; - ArgOrder.push_back(v); + genericArg[k] = v; + argOrder.push_back(v); } MIRStructType *GetMIRStructType() const { @@ -117,19 +119,19 @@ class GenericType : public AnnotationType { } MapleMap &GetGenericMap() { - return GenericArg; + return genericArg; } MapleVector &GetGenericArg() { - return ArgOrder; + return argOrder; } void Dump() override; void ReWriteType(std::string &subClass) override; private: MIRType *mirStructType; - MapleMap GenericArg; - MapleVector ArgOrder; + MapleMap genericArg; + MapleVector argOrder; }; class GenericDeclare : public AnnotationType { @@ -137,7 +139,9 @@ class GenericDeclare : public AnnotationType { explicit GenericDeclare(const GStrIdx &strIdx) : AnnotationType(kGenericDeclare, strIdx), defaultType(nullptr) {} - ~GenericDeclare() override = default; + ~GenericDeclare() override { + defaultType = nullptr; + } AnnotationType *GetDefaultType() { return defaultType; } @@ -150,7 +154,7 @@ class GenericDeclare : public AnnotationType { } std::string GetBelongToName() const { - if (defKind == defByStruct) { + if (defKind == kDefByStruct) { return belongsTo.structType->GetName(); } else { return belongsTo.func->GetName(); @@ -160,12 +164,12 @@ class GenericDeclare : public AnnotationType { void Dump() override; void SetBelongToStruct(MIRStructType *s) { - defKind = defByStruct; + defKind = kDefByStruct; belongsTo.structType = s; } void SetBelongToFunc(MIRFunction *f) { - defKind = defByFunc; + defKind = kDefByFunc; belongsTo.func = f; } @@ -177,11 +181,11 @@ class GenericDeclare : public AnnotationType { }; DefPoint belongsTo; enum DefKind { - defByNone, - defByStruct, - defByFunc + kDefByNone, + kDefByStruct, + kDefByFunc }; - DefKind defKind = defByNone; + DefKind defKind = kDefByNone; }; class ExtendGeneric : public AnnotationType { @@ -189,7 +193,9 @@ class ExtendGeneric : public AnnotationType { ExtendGeneric(AnnotationType &c, EInfo h) : AnnotationType(kExtendType, GStrIdx(0)), contains(&c), eInfo(h) { CHECK_FATAL(c.GetKind() != kGenericMatch, "must be"); } - ~ExtendGeneric() override = default; + ~ExtendGeneric() override { + contains = nullptr; + } void Dump() override { std::cout << (eInfo == kHierarchyExtend ? '+' : (eInfo == kArrayType ? '[' : '-')); @@ -266,7 +272,13 @@ class AnnotationAnalysis : public AnalysisResult { MIRType &classType = GetTypeFromTyIdx(GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(strIdx)); dummyObj = pragmaMp.New(strIdx, &static_cast(classType), pragmaAllocator); }; - ~AnnotationAnalysis() override = default; + ~AnnotationAnalysis() override { + genericMatch = nullptr; + dummyObj = nullptr; + pragmaMemPool = nullptr; + mirModule = nullptr; + klassH = nullptr; + } void Run(); private: void AnalysisAnnotation(); diff --git a/src/mapleall/mpl2mpl/include/array_hotfunclist.def b/src/mapleall/mpl2mpl/include/array_hotfunclist.def index eb367cd0d0b0fe0a7dbf0b35084523fb367e8357..bc5ee63fe57b8f5b1e1d145ba05425791089801d 100644 --- a/src/mapleall/mpl2mpl/include/array_hotfunclist.def +++ b/src/mapleall/mpl2mpl/include/array_hotfunclist.def @@ -33,4 +33,4 @@ "Ljava_2Flang_2FThrowable_3B_7CgetOurStackTrace_7C_28_29ALjava_2Flang_2FStackTraceElement_3B", "Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass2_7C_28_29Ljava_2Flang_2FClass_3B", "Lcom_2Fandroid_2Fserver_2Fam_2FActivityManagerService_3B_7CattachApplicationLocked_7C_28Landroid_2Fapp_2FIApplicationThread_3BI_29Z", -"Lcom_2Fandroid_2Fserver_2FInputMethodManagerService_3B_7ChideCurrentInputLocked_7C_28ILandroid_2Fos_2FResultReceiver_3B_29Z" +"Lcom_2Fandroid_2Fserver_2FInputMethodManagerService_3B_7ChideCurrentInputLocked_7C_28ILandroid_2Fos_2FResultReceiver_3B_29Z", diff --git a/src/mapleall/mpl2mpl/include/call_graph.h b/src/mapleall/mpl2mpl/include/call_graph.h index 0291f5f722b3fc12393cd029d503736dbe65e111..5cc68c219d58607b47632bda90bf8ea338a32210 100644 --- a/src/mapleall/mpl2mpl/include/call_graph.h +++ b/src/mapleall/mpl2mpl/include/call_graph.h @@ -91,14 +91,14 @@ struct Comparator { // Information description of each callsite class CallInfo { public: - CallInfo(CallType type, MIRFunction *callee, StmtNode *node, uint32 ld, uint32 stmtId, bool local = false) - : areAllArgsLocal(local), cType(type), callee(callee), callStmt(node), loopDepth(ld), id(stmtId) {} - CallInfo(CallType type, MIRFunction *caller, MIRFunction &callee, StmtNode *node, uint32 ld, uint32 stmtId, + CallInfo(CallType type, MIRFunction *curCallee, StmtNode *node, uint32 ld, uint32 stmtId, bool local = false) + : areAllArgsLocal(local), cType(type), callee(curCallee), callStmt(node), loopDepth(ld), id(stmtId) {} + CallInfo(CallType type, MIRFunction *curCaller, MIRFunction *curCallee, StmtNode *node, uint32 ld, uint32 stmtId, bool local = false) : areAllArgsLocal(local), cType(type), - caller(caller), - callee(&callee), + caller(curCaller), + callee(curCallee), callStmt(node), loopDepth(ld), id(stmtId) {} @@ -360,9 +360,9 @@ class CGNode : public BaseGraphNode { } void GetCaller(std::vector &callInfos) { - for (const auto &pair : callers) { - auto *callerNode = pair.first; - auto *callStmts = pair.second; + for (auto pairIter = callers.cbegin(); pairIter != callers.cend(); ++pairIter) { + auto *callerNode = pairIter->first; + auto *callStmts = pairIter->second; for (auto *callStmt : *callStmts) { auto *callInfo = callerNode->GetCallInfo(*callStmt); CHECK_NULL_FATAL(callInfo); @@ -371,7 +371,7 @@ class CGNode : public BaseGraphNode { } } - CallInfo *GetCallInfo(StmtNode &stmtNode) { + CallInfo *GetCallInfo(const StmtNode &stmtNode) { return GetCallInfoByStmtId(stmtNode.GetStmtID()); } @@ -518,27 +518,33 @@ using Caller2Cands = std::pair; class CallGraph : public AnalysisResult { public: CallGraph(MIRModule &m, MemPool &memPool, MemPool &templPool, const KlassHierarchy &kh, const std::string &fn); - ~CallGraph() override = default; + ~CallGraph() override { + klassh = nullptr; + mirModule = nullptr; + entryNode = nullptr; + mirBuilder = nullptr; + callExternal = nullptr; + } void InitCallExternal() { callExternal = cgAlloc.GetMemPool()->New(static_cast(nullptr), &cgAlloc, numOfNodes++); } - const CGNode *CallExternal() const { - return callExternal; + const CGNode &CallExternal() const { + return *callExternal; } void BuildCallGraph(); - const CGNode *GetEntryNode() const { - return entryNode; + const CGNode &GetEntryNode() const { + return *entryNode; } const MapleVector &GetRootNodes() const { return rootNodes; } - const KlassHierarchy *GetKlassh() const { - return klassh; + const KlassHierarchy &GetKlassh() const { + return *klassh; } const MapleVector*> &GetSCCTopVec() const { @@ -550,11 +556,11 @@ class CallGraph : public AnalysisResult { } void HandleBody(MIRFunction &func, BlockNode &body, CGNode &node, uint32 loopDepth); - void HandleCall(BlockNode &body, CGNode &node, StmtNode &stmt, uint32 loopDepth2); + void HandleCall(BlockNode &body, CGNode &node, StmtNode &stmt, uint32 loopDepth); void HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint32 loopDepth); MIRType *GetFuncTypeFromFuncAddr(const BaseNode *base); void RecordLocalConstValue(const StmtNode *stmt); - CallNode *ReplaceIcallToCall(BlockNode &body, IcallNode *icall, PUIdx newPUIdx) const; + CallNode *ReplaceIcallToCall(BlockNode &body, IcallNode &icall, PUIdx newPUIdx) const; void CollectAddroffuncFromExpr(const BaseNode *expr); void CollectAddroffuncFromStmt(const StmtNode *stmt); void CollectAddroffuncFromConst(MIRConst *mirConst); @@ -564,7 +570,7 @@ class CallGraph : public AnalysisResult { CGNode *GetCGNode(MIRFunction *func) const; CGNode *GetCGNode(const PUIdx puIdx) const; void UpdateCaleeCandidate(PUIdx callerPuIdx, const IcallNode *icall, PUIdx calleePuidx, CallNode *call) const; - void UpdateCaleeCandidate(PUIdx callerPuIdx, const IcallNode *icall, std::set &candidate) const; + void UpdateCaleeCandidate(PUIdx callerPuIdx, const IcallNode *icall, const std::set &candidate) const; SCCNode *GetSCCNode(MIRFunction *func) const; bool IsRootNode(MIRFunction *func) const; void UpdateCallGraphNode(CGNode &node); @@ -585,7 +591,7 @@ class CallGraph : public AnalysisResult { return nodesMap.begin(); } - iterator_const CBegin() { + const iterator_const CBegin() const { return nodesMap.cbegin(); } @@ -593,7 +599,7 @@ class CallGraph : public AnalysisResult { return nodesMap.end(); } - iterator_const CEnd() { + const iterator_const CEnd() const { return nodesMap.cend(); } @@ -637,11 +643,11 @@ class CallGraph : public AnalysisResult { CallInfo *GenCallInfo(CallType type, MIRFunction *call, StmtNode *s, uint32 loopDepth, uint32 callsiteID) { MIRFunction *caller = mirModule->CurFunction(); ASSERT_NOT_NULL(call); - return cgAlloc.GetMemPool()->New(type, caller, *call, s, loopDepth, callsiteID); + return cgAlloc.GetMemPool()->New(type, caller, call, s, loopDepth, callsiteID); } CallInfo *GenCallInfo(CallType type, MIRFunction &caller, MIRFunction &callee, StmtNode &s) { - return cgAlloc.GetMemPool()->New(type, &caller, callee, &s, 0, s.GetStmtID()); + return cgAlloc.GetMemPool()->New(type, &caller, &callee, &s, 0, s.GetStmtID()); } bool debugFlag = false; @@ -670,13 +676,13 @@ class IPODevirtulize { : cgAlloc(memPool), mirBuilder(cgAlloc.GetMemPool()->New(m)), klassh(kh), debugFlag(false) {} ~IPODevirtulize() = default; - void DevirtualFinal(); - const KlassHierarchy *GetKlassh() const { - return klassh; + void DevirtualFinal() const; + const KlassHierarchy &GetKlassh() const { + return *klassh; } private: - void SearchDefInMemberMethods(const Klass &klass); + void SearchDefInMemberMethods(const Klass &klass) const; void SearchDefInClinit(const Klass &klass) const; MapleAllocator cgAlloc; MIRBuilder *mirBuilder; diff --git a/src/mapleall/mpl2mpl/include/class_hierarchy.h b/src/mapleall/mpl2mpl/include/class_hierarchy.h index bbf3059d8e2fadd08998668f02c9ac1a0108ce06..89a314ea1c2b1e2d1c7def0e3199563213b39242 100644 --- a/src/mapleall/mpl2mpl/include/class_hierarchy.h +++ b/src/mapleall/mpl2mpl/include/class_hierarchy.h @@ -196,8 +196,8 @@ class Klass { clinitMethod = m; } - MIRSymbol *GetClassInitBridge() const { - return classInitBridge; + MIRSymbol &GetClassInitBridge() const { + return *classInitBridge; } void SetClassInitBridge(MIRSymbol *s) { @@ -335,7 +335,9 @@ class WKTypes { class KlassHierarchy : public AnalysisResult { public: KlassHierarchy(MIRModule *mirmodule, MemPool *memPool); - ~KlassHierarchy() override = default; + ~KlassHierarchy() override { + mirModule = nullptr; + } // Get a class. Return nullptr it does not exist. Klass *GetKlassFromStrIdx(GStrIdx strIdx) const; @@ -369,8 +371,8 @@ class KlassHierarchy : public AnalysisResult { return topoWorkList; } - const MIRModule *GetModule() const { - return mirModule; + const MIRModule &GetModule() const { + return *mirModule; } static bool traceFlag; private: @@ -386,7 +388,7 @@ class KlassHierarchy : public AnalysisResult { // Get a vector of child class and implemented class void GetChildKlasses(const Klass &klass, std::vector &childKlasses) const; void ExceptionFlagProp(Klass &klass); - Klass *AddClassFlag(const std::string &name, uint32 flag); + Klass *AddClassFlag(const std::string &name, uint32 flag) const; int GetFieldIDOffsetBetweenClasses(const Klass &super, const Klass &base) const; void TopologicalSortKlasses(); void MarkClassFlags(); diff --git a/src/mapleall/mpl2mpl/include/clone.h b/src/mapleall/mpl2mpl/include/clone.h index b60605429d866d17b35e8c32702de47fa19650d6..7dc89134a27cdc8884b40a939a889c31f781b3c6 100644 --- a/src/mapleall/mpl2mpl/include/clone.h +++ b/src/mapleall/mpl2mpl/include/clone.h @@ -61,7 +61,11 @@ class Clone : public AnalysisResult { : AnalysisResult(memPool), mirModule(mod), allocator(memPool), mirBuilder(builder), kh(kh), replaceRetIgnored(memPool->New(memPool)) {} - ~Clone() override = default; + ~Clone() override { + mirModule = nullptr; + kh = nullptr; + replaceRetIgnored = nullptr; + } static MIRSymbol *CloneLocalSymbol(const MIRSymbol &oldSym, const MIRFunction &newFunc); static void CloneSymbols(MIRFunction &newFunc, const MIRFunction &oldFunc); @@ -73,8 +77,8 @@ class Clone : public AnalysisResult { void CopyFuncInfo(MIRFunction &originalFunction, MIRFunction &newFunc) const; void UpdateFuncInfo(MIRFunction &newFunc); void CloneArgument(MIRFunction &originalFunction, ArgVector &argument) const; - const ReplaceRetIgnored *GetReplaceRetIgnored() const { - return replaceRetIgnored; + const ReplaceRetIgnored &GetReplaceRetIgnored() const { + return *replaceRetIgnored; } void UpdateReturnVoidIfPossible(CallMeStmt *callMeStmt, const MIRFunction &targetFunc) const; diff --git a/src/mapleall/mpl2mpl/include/constantfold.h b/src/mapleall/mpl2mpl/include/constantfold.h index 1f894a9216d172111051a51db3c25c699b710ce2..da0c5f53a9de3c161d2007d0832fe153ac8bf24e 100644 --- a/src/mapleall/mpl2mpl/include/constantfold.h +++ b/src/mapleall/mpl2mpl/include/constantfold.h @@ -43,7 +43,7 @@ class ConstantFold : public FuncOptimizeImpl { // simplification happened. If the statement can be deleted after a // simplification, it returns nullptr. StmtNode *Simplify(StmtNode *node); - StmtNode *SimplifyIassignWithAddrofBaseNode(IassignNode &node, const AddrofNode &base); + StmtNode *SimplifyIassignWithAddrofBaseNode(IassignNode &node, const AddrofNode &base) const; FuncOptimizeImpl *Clone() override { return new ConstantFold(*this); @@ -55,13 +55,14 @@ class ConstantFold : public FuncOptimizeImpl { } template T CalIntValueFromFloatValue(T value, const MIRType &resultType) const; - MIRConst *FoldFloorMIRConst(const MIRConst&, PrimType, PrimType, bool isFloor = true) const; - MIRConst *FoldRoundMIRConst(const MIRConst&, PrimType, PrimType) const; - MIRConst *FoldTypeCvtMIRConst(const MIRConst&, PrimType, PrimType) const; - MIRConst *FoldSignExtendMIRConst(Opcode, PrimType, uint8, const IntVal&) const; - static MIRConst *FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst *intConst0, - const MIRIntConst *intConst1); - MIRConst *FoldConstComparisonMIRConst(Opcode, PrimType, PrimType, const MIRConst&, const MIRConst&); + MIRConst *FoldFloorMIRConst(const MIRConst &cst, PrimType fromType, PrimType toType, bool isFloor = true) const; + MIRConst *FoldRoundMIRConst(const MIRConst &cst, PrimType fromType, PrimType toType) const; + MIRConst *FoldTypeCvtMIRConst(const MIRConst &cst, PrimType fromType, PrimType toType) const; + MIRConst *FoldSignExtendMIRConst(Opcode opcode, PrimType resultType, uint8 size, const IntVal &val) const; + static MIRConst *FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst &intConst0, + const MIRIntConst &intConst1); + MIRConst *FoldConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, + const MIRConst &const0, const MIRConst &const1) const; static bool IntegerOpIsOverflow(Opcode op, PrimType primType, int64 cstA, int64 cstB); static MIRIntConst *FoldIntConstUnaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst *constNode); @@ -107,8 +108,8 @@ class ConstantFold : public FuncOptimizeImpl { const ConstvalNode &const1) const; ConstvalNode *FoldIntConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, const ConstvalNode &const0, const ConstvalNode &const1) const; - MIRIntConst *FoldIntConstComparisonMIRConst(Opcode, PrimType, PrimType, const MIRIntConst&, - const MIRIntConst&) const; + MIRIntConst *FoldIntConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, + const MIRIntConst &intConst0, const MIRIntConst &intConst1) const; ConstvalNode *FoldIntConstBinary(Opcode opcode, PrimType resultType, const ConstvalNode &const0, const ConstvalNode &const1) const; ConstvalNode *FoldFPConstComparison(Opcode opcode, PrimType resultType, PrimType opndType, const ConstvalNode &const0, @@ -133,13 +134,13 @@ class ConstantFold : public FuncOptimizeImpl { BaseNode *Negate(BaseNode *node) const; BaseNode *Negate(UnaryNode *node) const; BaseNode *Negate(const ConstvalNode *node) const; - BinaryNode *NewBinaryNode(BinaryNode *old, Opcode op, PrimType primeType, BaseNode *lhs, BaseNode *rhs) const; - UnaryNode *NewUnaryNode(UnaryNode *old, Opcode op, PrimType primeType, BaseNode *expr) const; + BinaryNode *NewBinaryNode(BinaryNode *old, Opcode op, PrimType primType, BaseNode *lhs, BaseNode *rhs) const; + UnaryNode *NewUnaryNode(UnaryNode *old, Opcode op, PrimType primType, BaseNode *expr) const; std::pair> DispatchFold(BaseNode *node); BaseNode *PairToExpr(PrimType resultType, const std::pair> &pair) const; - BaseNode *SimplifyDoubleCompare(CompareNode &node) const; + BaseNode *SimplifyDoubleCompare(CompareNode &compareNode) const; CompareNode *FoldConstComparisonReverse(Opcode opcode, PrimType resultType, PrimType opndType, - BaseNode &l, BaseNode &r); + BaseNode &l, BaseNode &r) const; MIRModule *mirModule; CFConfig cfc; }; diff --git a/src/mapleall/mpl2mpl/include/expand128floats.h b/src/mapleall/mpl2mpl/include/expand128floats.h index b98a29a42e446c4cc2bfb02eca77e4fe4c4c66d3..a685636151d2c2ad75bf79fd8903243b2f9e2faa 100644 --- a/src/mapleall/mpl2mpl/include/expand128floats.h +++ b/src/mapleall/mpl2mpl/include/expand128floats.h @@ -34,10 +34,10 @@ class Expand128Floats : public FuncOptimizeImpl { void Finish() override {} private: - std::string GetSequentialName0(const std::string &prefix, uint32_t num); + std::string GetSequentialName0(const std::string &prefix, uint32_t num) const; uint32 GetSequentialNumber() const; - std::string GetSequentialName(const std::string &prefix); - std::string SelectSoftFPCall(Opcode opCode, const BaseNode *node); + std::string GetSequentialName(const std::string &prefix) const; + std::string SelectSoftFPCall(Opcode opCode, BaseNode *node) const; void ReplaceOpNode(BlockNode *block, BaseNode *baseNode, size_t opndId, BaseNode *currNode, MIRFunction *func, StmtNode *stmt); bool CheckAndUpdateOp(BlockNode *block, BaseNode *node, diff --git a/src/mapleall/mpl2mpl/include/ext_constantfold.h b/src/mapleall/mpl2mpl/include/ext_constantfold.h index bdaf517cb421ae35b565c7fa557b15c9dae8eb3b..bb6fd30de85012c614c6c8ecb28302265dc55d6e 100644 --- a/src/mapleall/mpl2mpl/include/ext_constantfold.h +++ b/src/mapleall/mpl2mpl/include/ext_constantfold.h @@ -26,7 +26,7 @@ class ExtConstantFold { BaseNode* ExtFoldTernary(TernaryNode *node); StmtNode *ExtSimplify(StmtNode *node); BaseNode *ExtFold(BaseNode *node); - BaseNode *ExtFoldIor(BinaryNode *node); + BaseNode *ExtFoldIor(BinaryNode *node) const; BaseNode *ExtFoldXand(BinaryNode *node); StmtNode *ExtSimplifyBlock(BlockNode *node); StmtNode *ExtSimplifyIf(IfStmtNode *node); diff --git a/src/mapleall/mpl2mpl/include/gen_check_cast.h b/src/mapleall/mpl2mpl/include/gen_check_cast.h index 1894a97da221ebdafcdcad81bbf497be5b7f247d..87bd0db5795a6edf91326995db0368472ddd5e59 100644 --- a/src/mapleall/mpl2mpl/include/gen_check_cast.h +++ b/src/mapleall/mpl2mpl/include/gen_check_cast.h @@ -57,21 +57,21 @@ class CheckCastGenerator : public FuncOptimizeImpl { void InitFuncs(); void GenAllCheckCast(); void GenCheckCast(StmtNode &stmt); - BaseNode *GetObjectShadow(BaseNode *opnd); - MIRSymbol *GetOrCreateClassInfoSymbol(const std::string &className); + BaseNode *GetObjectShadow(BaseNode *opnd) const; + MIRSymbol *GetOrCreateClassInfoSymbol(const std::string &className) const; void GenAllCheckCast(bool isHotFunc); void OptimizeInstanceof(); void OptimizeIsAssignableFrom(); void CheckIsAssignableFrom(BlockNode &blockNode, StmtNode &stmt, const IntrinsicopNode &intrinsicNode); void ConvertCheckCastToIsAssignableFrom(StmtNode &stmt); - void AssignedCastValue(StmtNode &stmt); - void ConvertInstanceofToIsAssignableFrom(StmtNode &stmt, const IntrinsicopNode &intrinsicNode); + void AssignedCastValue(StmtNode &stmt) const; + void ConvertInstanceofToIsAssignableFrom(StmtNode &stmt, const IntrinsicopNode &intrinsicNode) const; void ReplaceNoSubClassIsAssignableFrom(BlockNode &blockNode, StmtNode &stmt, const MIRPtrType &ptrType, - const IntrinsicopNode &intrinsicNode); + const IntrinsicopNode &intrinsicNode) const; void ReplaceIsAssignableFromUsingCache(BlockNode &blockNode, StmtNode &stmt, const MIRPtrType &targetClassType, - const IntrinsicopNode &intrinsicNode); + const IntrinsicopNode &intrinsicNode) const; bool IsDefinedConstClass(const StmtNode &stmt, const MIRPtrType &targetClassType, - PregIdx &classSymPregIdx, MIRSymbol *&classSym); + PregIdx &classSymPregIdx, MIRSymbol *&classSym) const; MIRType *pointerObjType = nullptr; MIRType *pointerClassMetaType = nullptr; MIRType *classinfoType = nullptr; diff --git a/src/mapleall/mpl2mpl/include/gen_profile.h b/src/mapleall/mpl2mpl/include/gen_profile.h index 038fdea9a07c8d8d5986e704066b14d4ab55a59b..70ef235a40bf1eb5272df7e1693934d7bf688ab4 100644 --- a/src/mapleall/mpl2mpl/include/gen_profile.h +++ b/src/mapleall/mpl2mpl/include/gen_profile.h @@ -31,9 +31,9 @@ static constexpr const uint32_t kMplFuncProfCtrInfoNum = 1; class ProfileGenPM : public SccPM { public: explicit ProfileGenPM(MemPool *memPool) : SccPM(memPool, &id) {} + ~ProfileGenPM() override = default; bool PhaseRun(MIRModule &mod) override; PHASECONSTRUCTOR(ProfileGenPM); - ~ProfileGenPM() override {} std::string PhaseName() const override; private: void GetAnalysisDependence(AnalysisDep &aDep) const override; diff --git a/src/mapleall/mpl2mpl/include/ginline.h b/src/mapleall/mpl2mpl/include/ginline.h index a39ad12701550bd860568691adbb2bcddc10dc24..893180ac78804ce28723d3873636a93d126aa23b 100644 --- a/src/mapleall/mpl2mpl/include/ginline.h +++ b/src/mapleall/mpl2mpl/include/ginline.h @@ -34,8 +34,8 @@ namespace maple { class CallSiteNode { public: - CallSiteNode(CallInfo *info, BadnessInfo &badnessInfo, uint32 inlineDepth) - : callInfo(info), badInfo(badnessInfo), depth(inlineDepth) {} + CallSiteNode(CallInfo *info, const BadnessInfo &badnessInfo, uint32 inlineDepth) : + callInfo(info), badInfo(badnessInfo), depth(inlineDepth) {} auto *GetCallInfo() const { return callInfo; @@ -140,7 +140,7 @@ class GInline { BadnessInfo *CalcBadness(CallInfo &info); void InsertNewCallSite(CallInfo &info, uint32 depth); void UpdateCallSite(CallInfo &info); - bool CanIgnoreGrowthLimit(const CallSiteNode &callSiteNode); + bool CanIgnoreGrowthLimit(const CallSiteNode &callSiteNode) const; void PrintGInlineReport() const; MapleSet funcsToBeRemoved; diff --git a/src/mapleall/mpl2mpl/include/inline.h b/src/mapleall/mpl2mpl/include/inline.h index 2428830522a7a3eb23353101985026774de6435d..7840a91ce87841cf3a3df422b8509745518fabcd 100644 --- a/src/mapleall/mpl2mpl/include/inline.h +++ b/src/mapleall/mpl2mpl/include/inline.h @@ -14,17 +14,7 @@ */ #ifndef MPL2MPL_INCLUDE_INLINE_H #define MPL2MPL_INCLUDE_INLINE_H -#include "call_graph.h" #include "inline_transformer.h" -#include "maple_phase_manager.h" -#include "me_option.h" -#include "mempool.h" -#include "mempool_allocator.h" -#include "mir_builder.h" -#include "mir_function.h" -#include "mir_parser.h" -#include "opcode_info.h" -#include "string_utils.h" namespace maple { @@ -80,14 +70,14 @@ class MInline { InlineResult AnalyzeCallee(const MIRFunction &caller, MIRFunction &callee, const CallNode &callStmt); void AdjustInlineThreshold(const MIRFunction &caller, MIRFunction &callee, const CallNode &callStmt, uint32 &threshold, uint32 &thresholdType); - bool IsSmallCalleeForEarlyInline(MIRFunction &callee, int32 *outInsns); + bool IsSmallCalleeForEarlyInline(MIRFunction &callee, int32 *outInsns) const; virtual bool CanInline(CGNode*, std::unordered_map&) { return false; } bool CheckCalleeAndInline(MIRFunction*, BlockNode *enclosingBlk, CallNode*, MIRFunction*); bool SuitableForTailCallOpt(BaseNode &enclosingBlk, const StmtNode &stmtNode, CallNode &callStmt); - bool CalleeReturnValueCheck(StmtNode &stmtNode, CallNode &callStmt); + bool CalleeReturnValueCheck(StmtNode &stmtNode, CallNode &callStmt) const; void InlineCalls(CGNode &node); void PostInline(MIRFunction &caller); void InlineCallsBlock(MIRFunction &func, BlockNode &enclosingBlk, BaseNode &baseNode, bool &changed, diff --git a/src/mapleall/mpl2mpl/include/inline_mplt.h b/src/mapleall/mpl2mpl/include/inline_mplt.h index 372c65922c32b24e0d6fb2c04ac35679f840942e..d3d07a4dd23b10e0275037520325592fad1677f2 100644 --- a/src/mapleall/mpl2mpl/include/inline_mplt.h +++ b/src/mapleall/mpl2mpl/include/inline_mplt.h @@ -40,7 +40,7 @@ class InlineMplt { void CollectTypesForGlobalVar(const MIRSymbol &globalSymbol); void DumpInlineCandidateToFile(const std::string &fileNameStr); void DumpOptimizedFunctionTypes(); - uint32 GetFunctionSize(MIRFunction &mirFunc); + uint32 GetFunctionSize(MIRFunction &mirFunc) const; private: std::set optimizedFuncs; diff --git a/src/mapleall/mpl2mpl/include/inline_summary.h b/src/mapleall/mpl2mpl/include/inline_summary.h index 35f5a5be35f709af4ccb905b5fc2a48fd6e61199..13cb0f040823d6b043ff9c54712ead2a73593ed2 100644 --- a/src/mapleall/mpl2mpl/include/inline_summary.h +++ b/src/mapleall/mpl2mpl/include/inline_summary.h @@ -928,7 +928,7 @@ class InlineSummaryCollector { inlineSummary = func->GetMirFunc()->GetOrCreateInlineSummary(); } - void PreparePredicateForBB(BB &bb) { + void PreparePredicateForBB(const BB &bb) { auto bbId = bb.GetBBId().get(); if (allBBPred[bbId] == nullptr) { allBBPred[bbId] = tmpAlloc.New(tmpAlloc); diff --git a/src/mapleall/mpl2mpl/include/inline_transformer.h b/src/mapleall/mpl2mpl/include/inline_transformer.h index 8663cbfddcd50174852470d9c83f26b6a497412c..ff95862d7e512150537a9d8a92c73d0cc3a9f69b 100644 --- a/src/mapleall/mpl2mpl/include/inline_transformer.h +++ b/src/mapleall/mpl2mpl/include/inline_transformer.h @@ -50,6 +50,37 @@ constexpr char kSecondInlineEndComment[] = "second inlining end: FUNC "; void UpdateEnclosingBlockCallback(const BlockNode &oldBlock, BlockNode &newBlock, const StmtNode &oldStmt, StmtNode &newStmt, CallBackData *data); +enum class RealArgPropCandKind { + kUnknown, + kConst, + kVar, + kPreg +}; + +// The candidate of real argument that can be propagated to callee's formal. +// It is either a mirConst or a CONST symbol (both local and global are OK), +// see RealArgPropCand::Parse for details. +struct RealArgPropCand { + RealArgPropCandKind kind = RealArgPropCandKind::kUnknown; + union { + MIRSymbol *symbol = nullptr; + MIRConst *mirConst; + } data; + + void Parse(MIRFunction &caller, BaseNode &argExpr); + + PrimType GetPrimType() const { + if (kind == RealArgPropCandKind::kConst) { + CHECK_NULL_FATAL(data.mirConst); + return data.mirConst->GetType().GetPrimType(); + } else if (kind == RealArgPropCandKind::kVar || kind == RealArgPropCandKind::kPreg) { + ASSERT_NOT_NULL(data.symbol); + return data.symbol->GetType()->GetPrimType(); + } + return PTY_unknown; + } +}; + enum InlineStage { kEarlyInline, kGreedyInline @@ -73,7 +104,7 @@ class InlineTransformer { bool PerformInline(BlockNode &enclosingBlk); bool PerformInline(std::vector *newCallInfo = nullptr); - static void ReplaceSymbols(BaseNode *baseNode, uint32 StIdxOff, const std::vector *oldStIdx2New); + static void ReplaceSymbols(BaseNode *baseNode, uint32 stIdxOff, const std::vector *oldStIdx2New); static void ConvertPStaticToFStatic(MIRFunction &func); void SetDumpDetail(bool value) { dumpDetail = value; @@ -93,9 +124,15 @@ class InlineTransformer { void AssignActualsToFormals(BlockNode &newBody, uint32 stIdxOff, uint32 regIdxOff); void AssignActualToFormal(BlockNode& newBody, uint32 stIdxOff, uint32 regIdxOff, BaseNode &oldActual, const MIRSymbol &formal); + void PropConstFormalInBlock(BlockNode &newBody, MIRSymbol &formal, const RealArgPropCand &realArg, + uint32 stIdxOff, uint32 regIdxOff); + void PropConstFormalInNode(BaseNode &baseNode, MIRSymbol &formal, const RealArgPropCand &realArg, + uint32 stIdxOff, uint32 regIdxOff); + void TryReplaceConstFormalWithRealArg(BaseNode &parent, uint32 opndIdx, MIRSymbol &formal, + const RealArgPropCand &realArg, const std::pair &offsetPair); void GenReturnLabel(BlockNode &newBody, uint32 inlinedTimes); void HandleReturn(BlockNode &newBody); - void ReplaceCalleeBody(BlockNode &enclosingBlk, BlockNode &newBody); + void ReplaceCalleeBody(BlockNode &enclosingBlock, BlockNode &newBody); LabelIdx CreateReturnLabel(uint32 inlinedTimes) const; GotoNode *UpdateReturnStmts(BlockNode &newBody, const CallReturnVector *callReturnVector, int &retCount) const; GotoNode *DoUpdateReturnStmts(BlockNode &newBody, StmtNode &stmt, const CallReturnVector *callReturnVector, diff --git a/src/mapleall/mpl2mpl/include/java_intrn_lowering.h b/src/mapleall/mpl2mpl/include/java_intrn_lowering.h index f14b722d58fecdfb36b1492afa9c4e1ef3eb9ca2..3be5a2da48ce9173bca9e43708ecf4bb29a0a0c2 100644 --- a/src/mapleall/mpl2mpl/include/java_intrn_lowering.h +++ b/src/mapleall/mpl2mpl/include/java_intrn_lowering.h @@ -23,7 +23,12 @@ namespace maple { class JavaIntrnLowering : public FuncOptimizeImpl { public: JavaIntrnLowering(MIRModule &mod, KlassHierarchy *kh, bool dump); - ~JavaIntrnLowering() override = default; + ~JavaIntrnLowering() override { + classForName3Func = nullptr; + classLoaderPointerToType = nullptr; + getCurrentClassLoaderFunc = nullptr; + classForName1Func = nullptr; + } FuncOptimizeImpl *Clone() override { return new JavaIntrnLowering(*this); @@ -34,13 +39,13 @@ class JavaIntrnLowering : public FuncOptimizeImpl { void InitFuncs(); void InitLists(); void ProcessStmt(StmtNode &stmt) override; - void ProcessJavaIntrnMerge(StmtNode &assignNode, const IntrinsicopNode &intrinNode); - BaseNode *JavaIntrnMergeToCvtType(PrimType destType, PrimType srcType, BaseNode *src); + void ProcessJavaIntrnMerge(StmtNode &assignNode, const IntrinsicopNode &intrinNode) const; + BaseNode *JavaIntrnMergeToCvtType(PrimType destType, PrimType srcType, BaseNode *src) const; void LoadClassLoaderInvocation(const std::string &list); void CheckClassLoaderInvocation(const CallNode &callNode) const; void DumpClassLoaderInvocation(const CallNode &callNode); void ProcessForNameClassLoader(CallNode &callNode); - void ProcessJavaIntrnFillNewArray(IntrinsiccallNode &intrinCall); + void ProcessJavaIntrnFillNewArray(IntrinsiccallNode &intrinCall) const; std::string outFileName; std::unordered_set clInterfaceSet; std::multimap clInvocationMap; diff --git a/src/mapleall/mpl2mpl/include/method_replace.h b/src/mapleall/mpl2mpl/include/method_replace.h index 0ceea2485754efc1d94ed22d0ceb19952f38c910..d5dabe1e0af3c9ab6cb2801882bb3fbc3de28cd5 100644 --- a/src/mapleall/mpl2mpl/include/method_replace.h +++ b/src/mapleall/mpl2mpl/include/method_replace.h @@ -30,7 +30,9 @@ class MethodReplace : public AnalysisResult { MethodReplace(MIRModule *mod, MemPool *mp, MIRBuilder &builder) : AnalysisResult(mp), mirModule(mod), allocator(mp), mBuilder(builder) {} - ~MethodReplace() override = default; + ~MethodReplace() override { + mirModule = nullptr; + } void DoMethodReplace(); void Init(); diff --git a/src/mapleall/mpl2mpl/include/mpl_profdata_parser.h b/src/mapleall/mpl2mpl/include/mpl_profdata_parser.h index 0877ff1a11de1d64d2c8c234aa78bec9e5230e6f..6e720c2971f4a3d2a28fff790fdc9f5140f5b283 100644 --- a/src/mapleall/mpl2mpl/include/mpl_profdata_parser.h +++ b/src/mapleall/mpl2mpl/include/mpl_profdata_parser.h @@ -57,6 +57,9 @@ const uint32_t kMapleProfDataMagicNumber = 0xA0EFEF; class ProfDataBinaryImportBase { public: ProfDataBinaryImportBase(std::string &filename, std::ifstream &input) : fileName(filename), inputStream(input) {} + virtual ~ProfDataBinaryImportBase() { + pos = nullptr; + } template T ReadNum(); std::ifstream &GetInputStream() { @@ -81,6 +84,7 @@ class ProfDataBinaryImportBase { class ProfileSummaryImport : public ProfDataBinaryImportBase { public: ProfileSummaryImport(std::string &outputFile, std::ifstream &input) : ProfDataBinaryImportBase(outputFile, input) {} + ~ProfileSummaryImport() override = default; void ReadSummary(MplProfileData* profData); private: @@ -90,6 +94,7 @@ class ProfileSummaryImport : public ProfDataBinaryImportBase { class FunctionProfileImport : public ProfDataBinaryImportBase { public: FunctionProfileImport(std::string &inputFile, std::ifstream &input) : ProfDataBinaryImportBase(inputFile, input) {} + ~FunctionProfileImport() override = default; int ReadFuncProfile(MplProfileData *profData); }; @@ -97,7 +102,10 @@ class MplProfDataParser : public AnalysisResult { public: MplProfDataParser(MIRModule &mirmod, MemPool *mp, bool debug) : AnalysisResult(mp), m(mirmod), alloc(memPool), mempool(mp), dumpDetail(debug) {} - ~MplProfDataParser() override = default; + ~MplProfDataParser() override { + profData = nullptr; + mempool = nullptr; + } MplProfileData *GetProfData() { return profData; } diff --git a/src/mapleall/mpl2mpl/include/muid_replacement.h b/src/mapleall/mpl2mpl/include/muid_replacement.h index 508d603d38f2bd993e97f4dcbc862e08e6bb4776..c61f0f42a7ad05d625ef5c2a140cf11377f5a2e0 100644 --- a/src/mapleall/mpl2mpl/include/muid_replacement.h +++ b/src/mapleall/mpl2mpl/include/muid_replacement.h @@ -135,8 +135,8 @@ class MUIDReplacement : public FuncOptimizeImpl { void GenCompilerMfileStatus(); bool FindFuncNameInSimplfy(const std::string &name); bool CheckFunctionIsUsed(const MIRFunction &mirFunc) const; - void ReplaceMethodMetaFuncAddr(const MIRSymbol &funcSymbol, uint64 index); - void ReplaceFieldMetaStaticAddr(const MIRSymbol &mirSymbol, uint32 index); + void ReplaceMethodMetaFuncAddr(const MIRSymbol &funcSymbol, uint64 index) const; + void ReplaceFieldMetaStaticAddr(const MIRSymbol &mirSymbol, uint32 index) const; void CollectFuncAndDataFromKlasses(); void CollectFuncAndDataFromGlobalTab(); void CollectFuncAndDataFromFuncList(); @@ -145,12 +145,12 @@ class MUIDReplacement : public FuncOptimizeImpl { void CollectSuperClassArraySymbolData(); void GenerateSourceInfo(); static MIRSymbol *GetSymbolFromName(const std::string &name); - ConstvalNode* GetConstvalNode(uint64 index); + ConstvalNode* GetConstvalNode(uint64 index) const; void InsertArrayClassSet(const MIRType &type); - MIRType *GetIntrinsicConstArrayClass(StmtNode &stmt); + MIRType *GetIntrinsicConstArrayClass(StmtNode &stmt) const; void CollectArrayClass(); void GenArrayClassCache(); - void ReleasePragmaMemPool(); + void ReleasePragmaMemPool() const; std::unordered_set arrayClassSet; // The following sets are for internal uses. Sorting order does not matter here. std::unordered_set funcDefSet; diff --git a/src/mapleall/mpl2mpl/include/native_stub_func.h b/src/mapleall/mpl2mpl/include/native_stub_func.h index 7a6bc4270a1e0095baf51acfd42342a755818a1b..ef9d141b32652e978f56acc77d2eabda96bf7c41 100644 --- a/src/mapleall/mpl2mpl/include/native_stub_func.h +++ b/src/mapleall/mpl2mpl/include/native_stub_func.h @@ -54,7 +54,17 @@ class NativeFuncProperty { class NativeStubFuncGeneration : public FuncOptimizeImpl { public: NativeStubFuncGeneration(MIRModule &mod, KlassHierarchy *kh, bool dump); - ~NativeStubFuncGeneration() override = default; + ~NativeStubFuncGeneration() override { + mccSetReliableUnwindContextFunc = nullptr; + regTableConst = nullptr; + mrtDecodeRefFunc = nullptr; + mrtCallSlowNativeExtFunc = nullptr; + regFuncTabConst = nullptr; + mrtPostNativeFunc = nullptr; + mrtCheckThrowPendingExceptionFunc = nullptr; + regFuncSymbol = nullptr; + MRTPreNativeFunc = nullptr; + } void ProcessFunc(MIRFunction *func) override; void Finish() override; @@ -93,12 +103,12 @@ class NativeStubFuncGeneration : public FuncOptimizeImpl { MIRSymbol *regFuncSymbol = nullptr; MIRAggConst *regFuncTabConst = nullptr; MIRFunction *MRTPreNativeFunc = nullptr; - MIRFunction *MRTPostNativeFunc = nullptr; - MIRFunction *MRTDecodeRefFunc = nullptr; - MIRFunction *MRTCheckThrowPendingExceptionFunc = nullptr; - MIRFunction *MRTCallSlowNativeFunc[kSlownativeFuncnum] = { nullptr }; // for native func which args <=8, use x0-x7 - MIRFunction *MRTCallSlowNativeExtFunc = nullptr; - MIRFunction *MCCSetReliableUnwindContextFunc = nullptr; + MIRFunction *mrtPostNativeFunc = nullptr; + MIRFunction *mrtDecodeRefFunc = nullptr; + MIRFunction *mrtCheckThrowPendingExceptionFunc = nullptr; + MIRFunction *mrtCallSlowNativeFunc[kSlownativeFuncnum] = { nullptr }; // for native func which args <=8, use x0-x7 + MIRFunction *mrtCallSlowNativeExtFunc = nullptr; + MIRFunction *mccSetReliableUnwindContextFunc = nullptr; static const std::string callSlowNativeFuncs[kSlownativeFuncnum]; }; diff --git a/src/mapleall/mpl2mpl/include/outline.h b/src/mapleall/mpl2mpl/include/outline.h index c743f3fcf69a04642b44b2b21acfcf6fea70115e..c5fc21de05d6814fc6c2f62abb0590536d59ed4f 100644 --- a/src/mapleall/mpl2mpl/include/outline.h +++ b/src/mapleall/mpl2mpl/include/outline.h @@ -23,7 +23,8 @@ class OutlineCandidate { public: explicit OutlineCandidate(RegionCandidate *candidate) : regionCandidate(candidate) {} virtual ~OutlineCandidate() { - regionCandidate = nullptr; + regionCandidate = nullptr; + returnValue = nullptr; } size_t InsertIntoParameterList(BaseNode &expr); @@ -65,7 +66,7 @@ class OutlineCandidate { return returnValue; } - void CreateReturnExpr(SymbolRegPair &outputPair) { + void CreateReturnExpr(const SymbolRegPair &outputPair) { if (returnValue != nullptr) { return; } @@ -105,7 +106,9 @@ class OutlineGroup { regionGroup.emplace_back(®ion); } } - virtual ~OutlineGroup() = default; + virtual ~OutlineGroup() { + outlineFunc = nullptr; + } void PrepareParameterLists() { CollectOutlineInfo(); @@ -152,7 +155,7 @@ class OutlineGroup { std::vector regionGroup; std::vector parameterList; std::vector extraParameterValueNumber; - MIRFunction *outlineFunc; + MIRFunction *outlineFunc = nullptr; GroupId groupId; }; @@ -161,7 +164,9 @@ class OutLine { OutLine(CollectIpaInfo *ipaInfo, MIRModule *module, MemPool *memPool) : ipaInfo(ipaInfo), module(module), memPool(memPool) {} virtual ~OutLine() { - ipaInfo = nullptr; + ipaInfo = nullptr; + memPool = nullptr; + module = nullptr; } void Run(); private: diff --git a/src/mapleall/mpl2mpl/include/reflection_analysis.h b/src/mapleall/mpl2mpl/include/reflection_analysis.h index 9a995e95792e3e83a8b10c870f9b82d8334dbf37..60af8a58aa60ebcc800fa6a8384ec15c16e8c8e0 100644 --- a/src/mapleall/mpl2mpl/include/reflection_analysis.h +++ b/src/mapleall/mpl2mpl/include/reflection_analysis.h @@ -122,7 +122,10 @@ class ReflectionAnalysis : public AnalysisResult { isLibcore = true; } } - ~ReflectionAnalysis() override = default; + ~ReflectionAnalysis() override { + klassH = nullptr; + mirModule = nullptr; + } static void GenStrTab(MIRModule &module); static uint32 FindOrInsertRepeatString(const std::string &str, bool isHot = false, uint8 hotType = kLayoutUnused); @@ -132,7 +135,7 @@ class ReflectionAnalysis : public AnalysisResult { static TyIdx GetClassMetaDataTyIdx() { return classMetadataTyIdx; } - void DumpPGOSummary(); + void DumpPGOSummary() const; private: static std::unordered_map &GetStr2IdxMap() { @@ -176,14 +179,14 @@ class ReflectionAnalysis : public AnalysisResult { } static uint32 FirstFindOrInsertRepeatString(const std::string &str, bool isHot, uint8 hotType); - MIRSymbol *GetOrCreateSymbol(const std::string &name, TyIdx tyIdx, bool needInit); - MIRSymbol *GetSymbol(const std::string &name, TyIdx tyIdx); - MIRSymbol *CreateSymbol(GStrIdx strIdx, TyIdx tyIdx); - MIRSymbol *GetSymbol(GStrIdx strIdx, TyIdx tyIdx); + MIRSymbol *GetOrCreateSymbol(const std::string &name, const TyIdx &tyIdx, bool needInit) const; + MIRSymbol *GetSymbol(const std::string &name, const TyIdx &tyIdx) const; + MIRSymbol *CreateSymbol(GStrIdx strIdx, TyIdx tyIdx) const; + MIRSymbol *GetSymbol(GStrIdx strIdx, TyIdx tyIdx) const; void GenClassMetaData(Klass &klass); std::string GetAnnoValueNoArray(const MIRPragmaElement &annoElem); - std::string GetArrayValue(const MapleVector &subElemVector); - std::string GetAnnotationValue(const MapleVector &subElemVector, GStrIdx typeStrIdx); + std::string GetArrayValue(const MapleVector &subelemVector); + std::string GetAnnotationValue(const MapleVector &subelemVector, GStrIdx typeStrIdx); MIRSymbol *GenSuperClassMetaData(std::list superClassList); MIRSymbol *GenFieldOffsetData(const Klass &klass, const std::pair &fieldInfo); MIRSymbol *GetMethodSignatureSymbol(std::string signature); @@ -194,7 +197,7 @@ class ReflectionAnalysis : public AnalysisResult { void GenFieldMetaCompact(const Klass &klass, MIRStructType &fieldsInfoCompactType, const std::pair &fieldInfo, MIRAggConst &aggConstCompact); - void GenMethodMetaCompact(const Klass &klass, MIRStructType &methodsInfoType, int idx, + void GenMethodMetaCompact(const Klass &klass, MIRStructType &methodsInfoCompactType, int idx, const MIRSymbol &funcSym, MIRAggConst &aggConst, int &allDeclaringClassOffset, std::unordered_map &baseNameMp, @@ -204,7 +207,7 @@ class ReflectionAnalysis : public AnalysisResult { std::unordered_map &baseNameMp, std::unordered_map &fullNameMp); MIRSymbol *GenFieldsMeta(const Klass &klass, std::vector> &fieldsVector, - const std::vector> &fieldHashvec); + const std::vector> &fieldHashVec); void GenFieldMeta(const Klass &klass, MIRStructType &fieldsInfoType, const std::pair &fieldInfo, MIRAggConst &aggConst, int idx, const std::vector> &fieldHashVec); @@ -223,43 +226,43 @@ class ReflectionAnalysis : public AnalysisResult { static void GenMetadataType(MIRModule &module); static MIRType *GetRefFieldType(); static TyIdx GenMetaStructType(MIRModule &module, MIRStructType &metaType, const std::string &str); - uint32 GetHashIndex(const std::string &strName); + uint32 GetHashIndex(const std::string &strName) const; static void GenHotClassNameString(const Klass &klass); - uint32 FindOrInsertReflectString(const std::string &str); + uint32 FindOrInsertReflectString(const std::string &str) const; static void InitReflectString(); - uint32 BKDRHash(const std::string &strName, uint32 seed); + uint32 BKDRHash(const std::string &strName, uint32 seed) const; void GenClassHashMetaData(); - void MarkWeakMethods(); + void MarkWeakMethods() const; bool VtableFunc(const MIRFunction &func) const; void GenPrimitiveClass(); void GenAllMethodHash(std::vector> &methodInfoVec, std::unordered_map &baseNameMap, - std::unordered_map &fullNameMap); + std::unordered_map &fullNameMap) const; void GenAllFieldHash(std::vector> &fieldV) const; void GenAnnotation(std::map &idxNumMap, std::string &annoArr, MIRStructType &classType, PragmaKind paragKind, const std::string ¶gName, TyIdx fieldTypeIdx, - std::map *paramNumArray = nullptr, int *paramIndex = nullptr); - void AppendValueByType(std::string &annoArr, const MIRPragmaElement &elem); - bool IsAnonymousClass(const std::string &annotationString); - bool IsLocalClass(const std::string annotationString); + std::map *paramnumArray = nullptr, int *paramIndex = nullptr); + void AppendValueByType(std::string &annoArr, const MIRPragmaElement &elem) const; + bool IsAnonymousClass(const std::string &annotationString) const; + bool IsLocalClass(const std::string annotationString) const; bool IsPrivateClass(const MIRClassType &classType) const; bool IsStaticClass(const MIRStructType &classType) const; - int8 JudgePara(MIRStructType &ctype); - void CheckPrivateInnerAndNoSubClass(Klass &clazz, const std::string &annoArr); - void ConvertMapleClassName(const std::string &mplClassName, std::string &javaDsp); + int8 JudgePara(MIRStructType &classType) const; + void CheckPrivateInnerAndNoSubClass(Klass &clazz, const std::string &annoArr) const; + void ConvertMapleClassName(const std::string &mplClassName, std::string &javaDsp) const; - int GetDeflateStringIdx(const std::string &subStr, bool needSpecialFlag); + int GetDeflateStringIdx(const std::string &subStr, bool needSpecialFlag) const; uint32 GetAnnoCstrIndex(std::map &idxNumMap, const std::string &annoArr, bool isField); - uint16 GetMethodInVtabIndex(const Klass &clazz, const MIRFunction &func) const; + uint16 GetMethodInVtabIndex(const Klass &klass, const MIRFunction &func) const; void GetSignatureTypeNames(std::string &signature, std::vector &typeNames); - MIRSymbol *GetClinitFuncSymbol(const Klass &klass); + MIRSymbol *GetClinitFuncSymbol(const Klass &klass) const; int SolveAnnotation(MIRStructType &classType, const MIRFunction &func); - uint32 GetTypeNameIdxFromType(const MIRType &type, const Klass &klass, const std::string &fieldName); - bool IsMemberClass(const std::string &annotationString); - int8_t GetAnnoFlag(const std::string &annotationString); + uint32 GetTypeNameIdxFromType(const MIRType &type, const Klass &klass, const std::string &fieldName) const; + bool IsMemberClass(const std::string &annotationString) const; + int8_t GetAnnoFlag(const std::string &annotationString) const; void GenFieldTypeClassInfo(const MIRType &type, const Klass &klass, std::string &classInfo, - const std::string fieldName, bool &isClass); + const std::string fieldName, bool &isClass) const; MIRModule *mirModule; MapleAllocator allocator; diff --git a/src/mapleall/mpl2mpl/include/scalarreplacement.h b/src/mapleall/mpl2mpl/include/scalarreplacement.h index a75889eedf30a71ab84b2a02842b12bf45d47373..8618f22a4c714fd1888cfd94b4e33a78829e5ef8 100644 --- a/src/mapleall/mpl2mpl/include/scalarreplacement.h +++ b/src/mapleall/mpl2mpl/include/scalarreplacement.h @@ -40,9 +40,9 @@ class ScalarReplacement : public FuncOptimizeImpl { std::vector localRefCleanup; std::unordered_map localVarMap; template - BaseNode *IterateExpr(StmtNode *stmt, BaseNode *expr, Func const &applyFunc); + BaseNode *IterateExpr(StmtNode *stmt, BaseNode *expr, Func const &applyFunc) const; template - void IterateStmt(StmtNode *stmt, Func const &applyFunc); + void IterateStmt(StmtNode *stmt, Func const &applyFunc) const; BaseNode *MarkDassignDread(StmtNode *stmt, BaseNode *opnd); void CollectCandidates(); void DumpCandidates() const; @@ -50,7 +50,7 @@ class ScalarReplacement : public FuncOptimizeImpl { bool IsSetClass(StmtNode *stmt) const; bool IsCCWriteRefField(StmtNode *stmt) const; bool CanBeReplaced(const StmtVec *refs) const; - BaseNode *ReplaceDassignDread(StmtNode *stmt, BaseNode *opnd); + BaseNode *ReplaceDassignDread(StmtNode &stmt, BaseNode *opnd) const; void AppendLocalRefCleanup(const MIRSymbol *sym); void ReplaceWithScalar(const StmtVec *refs); void FixRCCalls(const StmtVec *refs); diff --git a/src/mapleall/mpl2mpl/include/simplify.h b/src/mapleall/mpl2mpl/include/simplify.h index 486aada04a089717ae6cbc483f2285783804dabd..6dd3036b16d9b43efd25b10d53871f83dec075cc 100644 --- a/src/mapleall/mpl2mpl/include/simplify.h +++ b/src/mapleall/mpl2mpl/include/simplify.h @@ -19,7 +19,7 @@ #include "maple_phase_manager.h" namespace maple { -const std::map asmMap = { +const std::map kAsmMap = { #include "asm_map.def" }; @@ -33,15 +33,15 @@ enum ErrorNumber : int32 { }; enum OpKind { - MEM_OP_unknown, - MEM_OP_memset, - MEM_OP_memcpy, - MEM_OP_memset_s, - KMemOpMemcpyS, - SPRINTF_OP_sprintf, - SPRINTF_OP_sprintf_s, - SPRINTF_OP_snprintf_s, - SPRINTF_OP_vsnprintf_s + kMemOpUnknown, + kMemOpMemset, + kMemOpMemcpy, + kMemOpMemsetS, + kMemOpMemcpyS, + kSprintfOpSprintf, + kSprintfOpSprintfS, + kSprintfOpSnprintfS, + kSprintfOpVsnprintfS }; // MemEntry models a memory entry with high level type information. @@ -101,7 +101,7 @@ class SprintfBaseOper { public: explicit SprintfBaseOper(ProxyMemOp &op) : op(op) {} void ProcessRetValue(StmtNode &stmt, BlockNode &block, OpKind opKind, int32 retVal, bool isLowLevel); - bool DealWithFmtConstStr(StmtNode &stmt, BaseNode *fmt, BlockNode &block, bool isLowLevel); + bool DealWithFmtConstStr(StmtNode &stmt, const BaseNode *fmt, BlockNode &block, bool isLowLevel); static bool CheckCondIfNeedReplace(const StmtNode &stmt, uint32_t opIdx); static bool IsCountConst(StmtNode &stmt, uint64 &count, uint32_t opndIdx); StmtNode *InsertMemcpyCallStmt(const MapleVector &args, StmtNode &stmt, @@ -112,8 +112,8 @@ class SprintfBaseOper { bool ReplaceSprintfWithMemcpy(StmtNode &stmt, BlockNode &block, uint32 opndIdx, uint64 copySize, bool isLowLevel); bool CompareDstMaxSrcSize(StmtNode &stmt, BlockNode &block, uint64 dstMax, uint64 srcSize, bool isLowLevel); bool CompareCountSrcSize(StmtNode &stmt, BlockNode &block, uint64 count, uint64 srcSize, bool isLowLevel); - bool DealWithDstOrEndZero(StmtNode &stmt, BlockNode &block, bool isLowLevel, uint64 count); - static bool CheckInvalidPara(uint64 count, uint64 dstMax, uint64 srcSize); + bool DealWithDstOrEndZero(const StmtNode &stmt, BlockNode &block, bool isLowLevel, uint64 count); + static bool CheckInvalidPara(uint64 count, uint64 dstMax, uint64 srcSize); virtual bool ReplaceSprintfIfNeeded(StmtNode &stmt, BlockNode &block, bool isLowLevel, const OpKind &opKind) { CHECK_FATAL(false, "NEVER REACH"); }; @@ -149,14 +149,16 @@ class SimplifySnprintfS : public SprintfBaseOper { class SimplifyOp : public ProxyMemOp { public: static OpKind ComputeOpKind(StmtNode &stmt); - ~SimplifyOp() override = default; + ~SimplifyOp() override { + func = nullptr; + } explicit SimplifyOp(MemPool &memPool) : sprintfAlloc(&memPool), sprintfMap(sprintfAlloc.Adapter()) { auto simplifySnprintfs = sprintfAlloc.New(*this); - sprintfMap.emplace(SPRINTF_OP_sprintf, sprintfAlloc.New(*this)); - sprintfMap.emplace(SPRINTF_OP_sprintf_s, sprintfAlloc.New(*this)); - sprintfMap.emplace(SPRINTF_OP_snprintf_s, simplifySnprintfs); - sprintfMap.emplace(SPRINTF_OP_vsnprintf_s, simplifySnprintfs); + (void)sprintfMap.emplace(kSprintfOpSprintf, sprintfAlloc.New(*this)); + (void)sprintfMap.emplace(kSprintfOpSprintfS, sprintfAlloc.New(*this)); + (void)sprintfMap.emplace(kSprintfOpSnprintfS, simplifySnprintfs); + (void)sprintfMap.emplace(kSprintfOpVsnprintfS, simplifySnprintfs); } void SetFunction(MIRFunction *f) { func = f; @@ -175,9 +177,11 @@ class SimplifyOp : public ProxyMemOp { } bool AutoSimplify(StmtNode &stmt, BlockNode &block, bool isLowLevel); - bool SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLevel); + bool SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLevel) const; bool SimplifyMemcpy(StmtNode &stmt, BlockNode &block, bool isLowLevel) override; private: + void FoldMemsExpr(StmtNode &stmt, uint64 &srcSize, bool &isSrcSizeConst, uint64 &dstSize, + bool &isDstSizeConst) const; StmtNode *PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) const; StmtNode *PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block); @@ -218,12 +222,12 @@ class Simplify : public FuncOptimizeImpl { bool IsConstRepalceable(const MIRConst &mirConst) const; bool SimplifyMathMethod(const StmtNode &stmt, BlockNode &block); void SimplifyCallAssigned(StmtNode &stmt, BlockNode &block); - StmtNode *SimplifyBitFieldWrite(const IassignNode &iass); - BaseNode *SimplifyBitFieldRead(IreadNode &iread); - StmtNode *SimplifyToSelect(MIRFunction *func, IfStmtNode *ifNode, BlockNode *block); + StmtNode *SimplifyBitFieldWrite(const IassignNode &iass) const; + BaseNode *SimplifyBitFieldRead(IreadNode &iread) const; + StmtNode *SimplifyToSelect(MIRFunction &func, IfStmtNode *ifNode, BlockNode *block) const; BaseNode *SimplifyExpr(BaseNode &expr); - BaseNode *ReplaceExprWithConst(DreadNode &dread); - MIRConst *GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirConst); + BaseNode *ReplaceExprWithConst(DreadNode &dread) const; + MIRConst *GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirConst) const; }; MAPLE_MODULE_PHASE_DECLARE(M2MSimplify) diff --git a/src/mapleall/mpl2mpl/include/stmt_cost_analyzer.h b/src/mapleall/mpl2mpl/include/stmt_cost_analyzer.h index 994b74ebee51cac9586d855895d1109898e06127..0f841c777190c7f898c79899b3002bd9992fbb84 100644 --- a/src/mapleall/mpl2mpl/include/stmt_cost_analyzer.h +++ b/src/mapleall/mpl2mpl/include/stmt_cost_analyzer.h @@ -61,9 +61,9 @@ class StmtCostAnalyzer { int64 GetExprCost(BaseNode *expr); int64 GetMeStmtCost(MeStmt *meStmt); int64 GetMeExprCost(MeExpr *meExpr); - int64 GetMoveCost(size_t sizeInByte); + int64 GetMoveCost(size_t sizeInByte) const; TargetInfo *GetTargetInfo() { return ti; } - MIRType *GetMIRTypeFromStIdxAndField(const StIdx idx, FieldID fieldID); + MIRType *GetMIRTypeFromStIdxAndField(const StIdx idx, FieldID fieldID) const; void SetFunction(MIRFunction *func) { curFunc = func; } ~StmtCostAnalyzer() { diff --git a/src/mapleall/mpl2mpl/include/vtable_analysis.h b/src/mapleall/mpl2mpl/include/vtable_analysis.h index 9d619c848e030d6d88ccb6cc28039008c5a4f037..c9c4f6a23d7f9dd5f60786bcfec7476e4dfb3547 100644 --- a/src/mapleall/mpl2mpl/include/vtable_analysis.h +++ b/src/mapleall/mpl2mpl/include/vtable_analysis.h @@ -28,7 +28,11 @@ constexpr unsigned int kShiftCountBit = 8 * 4; // Get the low 32bit class VtableAnalysis : public FuncOptimizeImpl { public: VtableAnalysis(MIRModule &mod, KlassHierarchy *kh, bool dump); - ~VtableAnalysis() override = default; + ~VtableAnalysis() override { + voidPtrType = nullptr; + zeroConst = nullptr; + oneConst = nullptr; + } static std::string DecodeBaseNameWithType(const MIRFunction &func); static bool IsVtableCandidate(const MIRFunction &func); void ProcessFunc(MIRFunction *func) override; @@ -39,18 +43,18 @@ class VtableAnalysis : public FuncOptimizeImpl { private: bool CheckInterfaceSpecification(const Klass &baseKlass, const Klass &currKlass) const; bool CheckOverrideForCrossPackage(const MIRFunction &baseMethod, const MIRFunction &currMethod) const; - void AddMethodToTable(MethodPtrVector &methodTable, MethodPair &methodPair); + void AddMethodToTable(MethodPtrVector &methodTable, MethodPair &methodPair) const; void GenVtableList(const Klass &klass); void DumpVtableList(const Klass &klass) const; void GenTableSymbol(const std::string &prefix, const std::string klassName, MIRAggConst &newConst) const; void GenVtableDefinition(const Klass &klass); void GenItableDefinition(const Klass &klass); void AddNullPointExceptionCheck(MIRFunction &func, StmtNode &stmt) const; - BaseNode *GenVtabItabBaseAddr(BaseNode &obj, bool isVirtual); + BaseNode *GenVtabItabBaseAddr(BaseNode &obj, bool isVirtual) const; size_t SearchWithoutRettype(const MIRFunction &callee, const MIRStructType &structType) const; bool CheckInterfaceImplemented(const CallNode &stmt) const; void ReplaceVirtualInvoke(CallNode &stmt); - void ReplaceInterfaceInvoke(CallNode &stmt); + void ReplaceInterfaceInvoke(CallNode &stmt) const; void ReplaceSuperclassInvoke(CallNode &stmt); void ReplacePolymorphicInvoke(CallNode &stmt); diff --git a/src/mapleall/mpl2mpl/include/vtable_impl.h b/src/mapleall/mpl2mpl/include/vtable_impl.h index d6456e9b57afcb19ae6358ff803308c79d1c1062..937ac23e0dd0a7de3d5b537eb8af2ec6666f24c8 100644 --- a/src/mapleall/mpl2mpl/include/vtable_impl.h +++ b/src/mapleall/mpl2mpl/include/vtable_impl.h @@ -26,7 +26,11 @@ enum CallKind { class VtableImpl : public FuncOptimizeImpl { public: VtableImpl(MIRModule &mod, KlassHierarchy *kh, bool dump); - ~VtableImpl() override = default; + ~VtableImpl() override { + mirModule = nullptr; + klassHierarchy = nullptr; + mccItabFunc = nullptr; + } void ProcessFunc(MIRFunction *func) override; FuncOptimizeImpl *Clone() override { @@ -39,10 +43,10 @@ class VtableImpl : public FuncOptimizeImpl { #endif // ~USE_ARM32_MACRO private: - void ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode); + void ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode) const; void ItabProcess(const StmtNode &stmt, const ResolveFuncNode &resolveNode, const std::string &signature, - const PregIdx &pregFuncPtr, const MIRType &compactPtrType, const PrimType &compactPtrPrim); - bool Intrinsify(MIRFunction &func, CallNode &cnode); + const PregIdx &pregFuncPtr, const MIRType &compactPtrType, const PrimType &compactPtrPrim) const; + bool Intrinsify(MIRFunction &func, CallNode &cnode) const; #ifndef USE_ARM32_MACRO #ifdef USE_32BIT_REF void InlineCacheinit(); @@ -56,7 +60,7 @@ class VtableImpl : public FuncOptimizeImpl { #endif // ~USE_32BIT_REF #endif // ~USE_ARM32_MACRO void DeferredVisit(CallNode &stmt, CallKind kind); - void DeferredVisitCheckFloat(CallNode &stmt, const MIRFunction &mirFunc); + void DeferredVisitCheckFloat(CallNode &stmt, const MIRFunction &mirFunc) const; MIRModule *mirModule; KlassHierarchy *klassHierarchy; #ifndef USE_ARM32_MACRO diff --git a/src/mapleall/mpl2mpl/src/annotation_analysis.cpp b/src/mapleall/mpl2mpl/src/annotation_analysis.cpp index 6ce477be740ef80f1230aaf8f68007cf3441f060..454caefb90d31e2862855d3b7f6e013d608fc804 100644 --- a/src/mapleall/mpl2mpl/src/annotation_analysis.cpp +++ b/src/mapleall/mpl2mpl/src/annotation_analysis.cpp @@ -44,9 +44,9 @@ void GenericType::Dump() { std::string s = namemangler::DecodeName(GetName()); s.pop_back(); std::cout << s; - if (GenericArg.size() != 0) { + if (genericArg.size() != 0) { std::cout << "<"; - for (AnnotationType *real : ArgOrder) { + for (AnnotationType *real : argOrder) { if (real->GetKind() == kGenericType) { real->Dump(); } else if (real->GetKind() == kGenericDeclare) { @@ -70,12 +70,12 @@ void AnnotationType::Dump() { void GenericType::ReWriteType(std::string &subClass) { std::string className = mirStructType->GetName(); - size_t ClassMethodSplitterStrSize = strlen(namemangler::kClassMethodSplitterStr); - (void)className.replace(className.size() - ClassMethodSplitterStrSize, ClassMethodSplitterStrSize, subClass); + size_t classMethodSplitterStrSize = strlen(namemangler::kClassMethodSplitterStr); + (void)className.replace(className.size() - classMethodSplitterStrSize, classMethodSplitterStrSize, subClass); mirStructType = GetClassTypeFromName(className); typeName = GlobalTables::GetStrTable().GetStrIdxFromName(className); - GenericArg.clear(); - ArgOrder.clear(); + genericArg.clear(); + argOrder.clear(); } GenericDeclare *AnnotationParser::GetOrCreateDeclare(GStrIdx gStrIdx, MemPool &mp, bool check, MIRStructType *sType) { diff --git a/src/mapleall/mpl2mpl/src/barrierinsertion.cpp b/src/mapleall/mpl2mpl/src/barrierinsertion.cpp index 06d0db62b526a273283a938d685a1d3298ab43f3..abb79037e7403316a48eb79267745195dceeb31f 100644 --- a/src/mapleall/mpl2mpl/src/barrierinsertion.cpp +++ b/src/mapleall/mpl2mpl/src/barrierinsertion.cpp @@ -156,18 +156,15 @@ StmtNode *BarrierInsertion::RunFunction::CheckRefRead(BaseNode *opnd, const Stmt // Condition: 1) rhs is not addressof symbols with names starting "__vtab__" or // "__itab__" (vtab, itab initialization // Handling: 1) Replace iassign with a call to CC_WriteRefField. -// // [[|i|intrinsic]callassigned] // Condition: 1) Return type is ref. // Handling: 1) Assign return values to temporary variables, use // MCC_ReleaseRefVar on the original return variable, and then // dassign the tmp variable to the actual return var. -// // [call] // Condition: 1) Return type is ref. // Handling: 1) Assign return values to temporary variables, and then call // MCC_ReleaseRefVar on them. -// // [return] // Assumption: 1) If the return type is ref, assume it is the result of a dread. // Handling: 1) If the return value is a reference, consider it as if it is @@ -175,7 +172,6 @@ StmtNode *BarrierInsertion::RunFunction::CheckRefRead(BaseNode *opnd, const Stmt // 2) Call MCC_ReleaseRefVar on all local vars except un-assigned // parameters and the return value. this is processed after // procesing all statements. -// // This function returns the last stmt it generated or visited. HandleBlock // shall continue with the next statement after the return value, if any. StmtNode *BarrierInsertion::RunFunction::HandleStmt(StmtNode &stmt, BlockNode &block) const { @@ -298,18 +294,14 @@ void BarrierInsertion::RunFunction::HandleReturn(const NaryStmtNode &retNode) { if (val->GetPrimType() != PTY_ref) { continue; } - if (val->GetOpCode() != OP_dread) { - if (val->GetOpCode() == OP_constval) { - auto constvalNode = static_cast(val); - MIRConst *con = constvalNode->GetConstVal(); - if (con->GetKind() == kConstInt) { - auto intConst = safe_cast(con); - if (intConst->IsZero()) { - // It is a nullptr. Skip this return value. - continue; - } - } + if (val->GetOpCode() == OP_constval) { + auto constvalNode = static_cast(val); + MIRConst *con = constvalNode->GetConstVal(); + if (con->GetKind() == kConstInt && safe_cast(con)->IsZero()) { + // It is a nullptr. Skip this return value. + continue; } + } else if (val->GetOpCode() != OP_dread) { CHECK_FATAL(false, "Found a return statement that returns a ref but is not from a dread. Please enable the code below" "this line."); @@ -346,13 +338,9 @@ void BarrierInsertion::RunFunction::HandleReturn(const NaryStmtNode &retNode) { CHECK_FATAL(localSym != nullptr, "local_sym is nullptr"); if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(localSym->GetTyIdx())->GetPrimType() == PTY_ref && (localSym->GetStorageClass() == kScAuto || assignedParams.find(localSym->GetStIdx()) != assignedParams.end())) { - if (localSym->IgnoreRC()) { - continue; - } - if (backupVarIndices.find(localSym->GetStIdx()) != backupVarIndices.end()) { - continue; - } - if (retValStIdxs.find(localSym->GetStIdx()) != retValStIdxs.end()) { + if (localSym->IgnoreRC() || + backupVarIndices.find(localSym->GetStIdx()) != backupVarIndices.end() || + retValStIdxs.find(localSym->GetStIdx()) != retValStIdxs.end()) { continue; } if (BARDEBUG) { diff --git a/src/mapleall/mpl2mpl/src/call_graph.cpp b/src/mapleall/mpl2mpl/src/call_graph.cpp index 72a02f103de9c43dd153759cf2f0e7186be112de..65da701911b42846e34779acb6aafd8bbe286779 100644 --- a/src/mapleall/mpl2mpl/src/call_graph.cpp +++ b/src/mapleall/mpl2mpl/src/call_graph.cpp @@ -282,7 +282,7 @@ FreqType CGNode::GetCallsiteFrequency(const StmtNode &callstmt) const { return funcInfo->stmtFreqs[callstmt.GetStmtID()]; } ASSERT(0, "should not be here"); - return UINT64_MAX; + return INT64_MAX; } FreqType CGNode::GetFuncFrequency() const { @@ -424,7 +424,8 @@ SCCNode *CallGraph::GetSCCNode(MIRFunction *func) const { return (cgNode != nullptr) ? cgNode->GetSCCNode() : nullptr; } -void CallGraph::UpdateCaleeCandidate(PUIdx callerPuIdx, const IcallNode *icall, std::set &candidate) const { +void CallGraph::UpdateCaleeCandidate(PUIdx callerPuIdx, const IcallNode *icall, + const std::set &candidate) const { CGNode *caller = GetCGNode(callerPuIdx); for (auto &pair : std::as_const(caller->GetCallee())) { auto *callsite = pair.first; @@ -606,22 +607,22 @@ void CallGraph::RecordLocalConstValue(const StmtNode *stmt) { localConstValueMap[lhs->GetStIdx()] = dassign->GetRHS(); } -CallNode *CallGraph::ReplaceIcallToCall(BlockNode &body, IcallNode *icall, PUIdx newPUIdx) const { - MapleVector opnds(icall->GetNopnd().begin() + 1, icall->GetNopnd().end(), +CallNode *CallGraph::ReplaceIcallToCall(BlockNode &body, IcallNode &icall, PUIdx newPUIdx) const { + MapleVector opnds(icall.GetNopnd().begin() + 1, icall.GetNopnd().end(), CurFunction()->GetCodeMPAllocator().Adapter()); CallNode *newCall = nullptr; - if (icall->GetOpCode() == OP_icall) { + if (icall.GetOpCode() == OP_icall) { newCall = mirBuilder->CreateStmtCall(newPUIdx, opnds, OP_call); - } else if (icall->GetOpCode() == OP_icallassigned) { + } else if (icall.GetOpCode() == OP_icallassigned) { newCall = mirBuilder->CreateStmtCallAssigned( - newPUIdx, opnds, icall->GetCallReturnSymbol(mirBuilder->GetMirModule()), OP_callassigned, icall->GetRetTyIdx()); + newPUIdx, opnds, icall.GetCallReturnSymbol(mirBuilder->GetMirModule()), OP_callassigned, icall.GetRetTyIdx()); } else { CHECK_FATAL(false, "NYI"); } - body.ReplaceStmt1WithStmt2(icall, newCall); - newCall->SetSrcPos(icall->GetSrcPos()); + body.ReplaceStmt1WithStmt2(&icall, newCall); + newCall->SetSrcPos(icall.GetSrcPos()); if (debugFlag) { - icall->Dump(0); + icall.Dump(0); newCall->Dump(0); LogInfo::MapleLogger() << "replace icall successfully!\n"; } @@ -704,7 +705,7 @@ void CallGraph::HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint3 } if (symbol->GetKonst()->GetKind() == kConstAddrofFunc) { auto *addrofFuncConst = static_cast(symbol->GetKonst()); - stmt = ReplaceIcallToCall(body, icall, addrofFuncConst->GetValue()); + stmt = ReplaceIcallToCall(body, *icall, addrofFuncConst->GetValue()); HandleCall(body, node, *stmt, loopDepth); return; } @@ -713,7 +714,7 @@ void CallGraph::HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint3 auto *elem = aggConst->GetAggConstElement(dread->GetFieldID()); if (elem->GetKind() == kConstAddrofFunc) { auto *addrofFuncConst = static_cast(elem); - stmt = ReplaceIcallToCall(body, icall, addrofFuncConst->GetValue()); + stmt = ReplaceIcallToCall(body, *icall, addrofFuncConst->GetValue()); HandleCall(body, node, *stmt, loopDepth); return; } @@ -727,7 +728,7 @@ void CallGraph::HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint3 auto *rhsNode = localConstValueMap[symbol->GetStIdx()]; if (rhsNode != nullptr && rhsNode->GetOpCode() == OP_addroffunc) { auto *funcNode = static_cast(rhsNode); - stmt = ReplaceIcallToCall(body, icall, funcNode->GetPUIdx()); + stmt = ReplaceIcallToCall(body, *icall, funcNode->GetPUIdx()); HandleCall(body, node, *stmt, loopDepth); return; } @@ -771,12 +772,12 @@ void CallGraph::HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint3 auto *konst = static_cast(arrayNode->GetNopndAt(i))->GetConstVal(); auto index = static_cast(konst)->GetExtValue(); if (result->GetKind() == kConstAggConst) { - result = static_cast(result)->GetConstVecItem(index); + result = static_cast(result)->GetConstVecItem(static_cast(index)); } } CHECK_FATAL(result->GetKind() == kConstAddrofFunc, "Must be"); auto *constValue = static_cast(result); - stmt = ReplaceIcallToCall(body, icall, constValue->GetValue()); + stmt = ReplaceIcallToCall(body, *icall, constValue->GetValue()); HandleCall(body, node, *stmt, loopDepth); return; } @@ -808,7 +809,7 @@ void CallGraph::HandleICall(BlockNode &body, CGNode &node, StmtNode *stmt, uint3 } case OP_addroffunc: { auto *funcNode = static_cast(funcAddr); - stmt = ReplaceIcallToCall(body, icall, funcNode->GetPUIdx()); + stmt = ReplaceIcallToCall(body, *icall, funcNode->GetPUIdx()); HandleCall(body, node, *stmt, loopDepth); return; } @@ -1175,7 +1176,7 @@ void IPODevirtulize::SearchDefInClinit(const Klass &klass) const { } } -void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) { +void IPODevirtulize::SearchDefInMemberMethods(const Klass &klass) const { SearchDefInClinit(klass); MIRClassType *classType = static_cast(klass.GetMIRStructType()); std::vector finalPrivateFieldID; @@ -1624,7 +1625,7 @@ void DoDevirtual(const Klass &klass, const KlassHierarchy &klassh) { } } -void IPODevirtulize::DevirtualFinal() { +void IPODevirtulize::DevirtualFinal() const { // Search all klass in order to find final variables MapleMap::const_iterator it = klassh->GetKlasses().begin(); for (; it != klassh->GetKlasses().end(); ++it) { @@ -1663,7 +1664,7 @@ void IPODevirtulize::DevirtualFinal() { } } } - DoDevirtual(*klass, *GetKlassh()); + DoDevirtual(*klass, GetKlassh()); } } } diff --git a/src/mapleall/mpl2mpl/src/class_hierarchy.cpp b/src/mapleall/mpl2mpl/src/class_hierarchy.cpp index 686d57a5a5c18803d5801aba8aba45461add624d..6c4d50712cf9213524682c3706f43b362f69b4d7 100644 --- a/src/mapleall/mpl2mpl/src/class_hierarchy.cpp +++ b/src/mapleall/mpl2mpl/src/class_hierarchy.cpp @@ -739,7 +739,7 @@ void KlassHierarchy::CountVirtualMethods() const { } } -Klass *KlassHierarchy::AddClassFlag(const std::string &name, uint32 flag) { +Klass *KlassHierarchy::AddClassFlag(const std::string &name, uint32 flag) const { Klass *refKlass = GetKlassFromLiteral(name); if (refKlass != nullptr) { refKlass->SetFlag(flag); diff --git a/src/mapleall/mpl2mpl/src/constantfold.cpp b/src/mapleall/mpl2mpl/src/constantfold.cpp index b1dda1f1783acd5b34112f8ea30c236615a5fb0d..b1aa495c642a72e9c78c1adb906de9ab29122a8c 100644 --- a/src/mapleall/mpl2mpl/src/constantfold.cpp +++ b/src/mapleall/mpl2mpl/src/constantfold.cpp @@ -140,14 +140,18 @@ BaseNode *ConstantFold::PairToExpr(PrimType resultType, const std::pair(pair.first)->Opnd(0); result = mirModule->CurFuncCodeMemPool()->New(OP_sub, resultType, val, r); } else { - if ((!pair.second->GetSignBit() && pair.second->GetSXTValue(GetPrimTypeBitSize(resultType)) > 0) || - pair.second->GetSXTValue() == INT64_MIN) { + if ((!pair.second->GetSignBit() && + pair.second->GetSXTValue(static_cast(GetPrimTypeBitSize(resultType))) > 0) || + pair.second->GetSXTValue() == INT64_MIN) { // +-a, 5 -> a + 5 - ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(pair.second->GetExtValue(), resultType); + ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(static_cast(pair.second->GetExtValue()), + resultType); result = mirModule->CurFuncCodeMemPool()->New(OP_add, resultType, pair.first, val); } else { // +-a, -5 -> a + -5 - ConstvalNode *val = mirModule->GetMIRBuilder()->CreateIntConst(static_cast((-pair.second.value()).GetExtValue()), resultType); + ConstvalNode *val = + mirModule->GetMIRBuilder()->CreateIntConst(static_cast((-pair.second.value()).GetExtValue()), + resultType); result = mirModule->CurFuncCodeMemPool()->New(OP_sub, resultType, pair.first, val); } } @@ -374,7 +378,7 @@ MIRIntConst *ConstantFold::FoldIntConstComparisonMIRConst(Opcode opcode, PrimTyp } else if (equal) { result = kEqual; } else if (less) { - result = kLess; + result = static_cast(kLess); } break; } @@ -409,10 +413,10 @@ ConstvalNode *ConstantFold::FoldIntConstComparison(Opcode opcode, PrimType resul return resultConst; } -MIRConst *ConstantFold::FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst *intConst0, - const MIRIntConst *intConst1) { - IntVal intVal0 = intConst0->GetValue(); - IntVal intVal1 = intConst1->GetValue(); +MIRConst *ConstantFold::FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst &intConst0, + const MIRIntConst &intConst1) { + IntVal intVal0 = intConst0.GetValue(); + IntVal intVal1 = intConst1.GetValue(); IntVal result(static_cast(0), resultType); switch (opcode) { @@ -506,7 +510,7 @@ ConstvalNode *ConstantFold::FoldIntConstBinary(Opcode opcode, PrimType resultTyp const MIRIntConst *intConst1 = safe_cast(const1.GetConstVal()); CHECK_NULL_FATAL(intConst0); CHECK_NULL_FATAL(intConst1); - MIRConst *constValue = FoldIntConstBinaryMIRConst(opcode, resultType, intConst0, intConst1); + MIRConst *constValue = FoldIntConstBinaryMIRConst(opcode, resultType, *intConst0, *intConst1); // form the ConstvalNode ConstvalNode *resultConst = mirModule->CurFuncCodeMemPool()->New(); resultConst->SetPrimType(resultType); @@ -723,7 +727,7 @@ ConstvalNode *ConstantFold::FoldFPConstComparison(Opcode opcode, PrimType result } MIRConst *ConstantFold::FoldConstComparisonMIRConst(Opcode opcode, PrimType resultType, PrimType opndType, - const MIRConst &const0, const MIRConst &const1) { + const MIRConst &const0, const MIRConst &const1) const { MIRConst *returnValue = nullptr; if (IsPrimitiveInteger(opndType) || IsPrimitiveDynInteger(opndType)) { const auto *intConst0 = safe_cast(&const0); @@ -753,7 +757,7 @@ ConstvalNode *ConstantFold::FoldConstComparison(Opcode opcode, PrimType resultTy } CompareNode *ConstantFold::FoldConstComparisonReverse(Opcode opcode, PrimType resultType, PrimType opndType, - BaseNode &l, BaseNode &r) { + BaseNode &l, BaseNode &r) const { CompareNode *result = nullptr; Opcode op = opcode; switch (opcode) { @@ -802,12 +806,12 @@ ConstvalNode *ConstantFold::FoldConstBinary(Opcode opcode, PrimType resultType, return returnValue; } -MIRIntConst *ConstantFold::FoldIntConstUnaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst *cst) { - CHECK_NULL_FATAL(cst); - IntVal result = cst->GetValue().TruncOrExtend(resultType); +MIRIntConst *ConstantFold::FoldIntConstUnaryMIRConst(Opcode opcode, PrimType resultType, const MIRIntConst *constNode) { + CHECK_NULL_FATAL(constNode); + IntVal result = constNode->GetValue().TruncOrExtend(resultType); switch (opcode) { case OP_abs: { - if (IsSignedInteger(cst->GetType().GetPrimType()) && result.GetSignBit()) { + if (IsSignedInteger(constNode->GetType().GetPrimType()) && result.GetSignBit()) { result = -result; } break; @@ -1151,7 +1155,7 @@ MIRConst *ConstantFold::FoldFloorMIRConst(const MIRConst &cst, PrimType fromType return nullptr; } doubleValue = CalIntValueFromFloatValue(doubleValue, resultType); - return GlobalTables::GetIntConstTable().GetOrCreateIntConst(doubleValue, resultType); + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(doubleValue), resultType); } } @@ -1164,7 +1168,7 @@ ConstvalNode *ConstantFold::FoldFloor(const ConstvalNode &cst, PrimType fromType MIRConst *ConstantFold::FoldRoundMIRConst(const MIRConst &cst, PrimType fromType, PrimType toType) const { if (fromType == PTY_f128 || toType == PTY_f128) { - LogInfo::MapleLogger() << "FoldRoundMIRConst is not supported for f128\n"; + // folding while rounding float128 is not supported yet return nullptr; } @@ -1182,7 +1186,8 @@ MIRConst *ConstantFold::FoldRoundMIRConst(const MIRConst &cst, PrimType fromType if (DoubleToIntOverflow(doubleValue, toType)) { return nullptr; } - return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(static_cast(doubleValue)), resultType); + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(static_cast(doubleValue)), + resultType); } else if (toType == PTY_f32 && IsPrimitiveInteger(fromType)) { const auto &constValue = static_cast(cst); if (IsSignedInteger(fromType)) { @@ -1274,7 +1279,8 @@ MIRConst *ConstantFold::FoldTypeCvtMIRConst(const MIRConst &cst, PrimType fromTy } const MIRIntConst *constVal = safe_cast(cst); ASSERT_NOT_NULL(constVal); - toConst = FoldSignExtendMIRConst(op, toType, fromSize, constVal->GetValue().TruncOrExtend(fromType)); + toConst = FoldSignExtendMIRConst(op, toType, static_cast(fromSize), + constVal->GetValue().TruncOrExtend(fromType)); } else { const MIRIntConst *constVal = safe_cast(cst); ASSERT_NOT_NULL(constVal); @@ -1306,8 +1312,8 @@ MIRConst *ConstantFold::FoldTypeCvtMIRConst(const MIRConst &cst, PrimType fromTy MIRFloatConst *toValue = GlobalTables::GetFpConstTable().GetOrCreateFloatConst(static_cast(doubleValue)); toConst = toValue; } else if (toType == PTY_f64) { - MIRDoubleConst *toValue = GlobalTables::GetFpConstTable().GetOrCreateDoubleConst( - static_cast(doubleValue)); + MIRDoubleConst *toValue = + GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(static_cast(doubleValue)); toConst = toValue; } } else { @@ -1471,7 +1477,7 @@ PrimType GetExprValueRangePtyp(BaseNode *expr) { } } } - return GetNearestSizePtyp(maxTypeSize, ptyp); + return GetNearestSizePtyp(static_cast(maxTypeSize), ptyp); } std::pair> ConstantFold::FoldTypeCvt(TypeCvtNode *node) { @@ -1551,15 +1557,15 @@ ConstvalNode *ConstantFold::FoldSignExtend(Opcode opcode, PrimType resultType, u } // check if truncation is redundant due to dread or iread having same effect -static bool ExtractbitsRedundant(const ExtractbitsNode &x, MIRFunction *f) { - if (GetPrimTypeSize(x.GetPrimType()) == 8) { +static bool ExtractbitsRedundant(const ExtractbitsNode &x, MIRFunction &f) { + if (GetPrimTypeSize(x.GetPrimType()) == k8ByteSize) { return false; // this is trying to be conservative } BaseNode *opnd = x.Opnd(0); MIRType *mirType = nullptr; if (opnd->GetOpCode() == OP_dread) { DreadNode *dread = static_cast(opnd); - MIRSymbol *sym = f->GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRSymbol *sym = f.GetLocalOrGlobalSymbol(dread->GetStIdx()); ASSERT_NOT_NULL(sym); mirType = sym->GetType(); if (dread->GetFieldID() != 0) { @@ -1594,7 +1600,7 @@ static bool ExtractbitsRedundant(const ExtractbitsNode &x, MIRFunction *f) { return false; } return IsPrimitiveInteger(mirType->GetPrimType()) && - mirType->GetSize() * 8 == x.GetBitsSize() && + mirType->GetSize() * k8BitSize == x.GetBitsSize() && mirType->GetPrimType() == x.GetPrimType(); } @@ -1629,7 +1635,7 @@ std::pair> ConstantFold::FoldExtractbits(Extrac } } if (offset == 0 && size >= 8 && IsPowerOf2(size)) { - if (ExtractbitsRedundant(*static_cast(result), mirModule->CurFunction())) { + if (ExtractbitsRedundant(*static_cast(result), *(mirModule->CurFunction()))) { return std::make_pair(result->Opnd(0), std::nullopt); } } @@ -1722,6 +1728,10 @@ std::pair> ConstantFold::FoldBinary(BinaryNode PrimType primType = node->GetPrimType(); PrimType lPrimTypes = node->Opnd(0)->GetPrimType(); PrimType rPrimTypes = node->Opnd(1)->GetPrimType(); + if (lPrimTypes == PTY_f128 || rPrimTypes == PTY_f128 || node->GetPrimType() == PTY_f128) { + // folding of non-unary float128 is not supported yet + return std::make_pair(static_cast(node), std::nullopt); + } std::pair> lp = DispatchFold(node->Opnd(0)); std::pair> rp = DispatchFold(node->Opnd(1)); BaseNode *l = lp.first; @@ -1762,6 +1772,9 @@ std::pair> ConstantFold::FoldBinary(BinaryNode // after cf: // add i32 (cvt i32 u1 (neg u1 (eq u1 i32 (dread i32 %i, constval i32 16))), constval i32 17)) sum = cst - rp.second; + if (GetPrimTypeSize(rPrimTypes) < GetPrimTypeSize(primType)) { + r = mirModule->CurFuncCodeMemPool()->New(OP_cvt, primType, rPrimTypes, r); + } result = NegateTree(r); } else if ((op == OP_mul || op == OP_div || op == OP_rem || op == OP_ashr || op == OP_lshr || op == OP_shl || op == OP_band || op == OP_cand || op == OP_land) && @@ -1780,7 +1793,7 @@ std::pair> ConstantFold::FoldBinary(BinaryNode result = r; } else if (op == OP_bior && cst == -1) { // (-1) | X -> -1 - result = mirModule->GetMIRBuilder()->CreateIntConst(-1, cstTyp); + result = mirModule->GetMIRBuilder()->CreateIntConst(static_cast(-1), cstTyp); } else if (op == OP_mul && rp.second.has_value() && *rp.second != 0) { // lConst * (X + konst) -> the pair [(lConst*X), (lConst*konst)] sum = cst * rp.second; @@ -1898,7 +1911,7 @@ std::pair> ConstantFold::FoldBinary(BinaryNode ConstvalNode *shrOpnd = static_cast(shrNode->Opnd(1)); int64 shrAmt = static_cast(shrOpnd->GetConstVal())->GetExtValue(); uint64 ucst = cst.GetZXTValue(); - uint64 bsize = 0; + uint32 bsize = 0; do { bsize++; ucst >>= 1; @@ -1909,7 +1922,7 @@ std::pair> ConstantFold::FoldBinary(BinaryNode fold2extractbits = true; // change to use extractbits result = mirModule->GetMIRBuilder()->CreateExprExtractbits(OP_extractbits, - GetUnsignedPrimType(primType), shrAmt, bsize, shrNode->Opnd(0)); + GetUnsignedPrimType(primType), static_cast(shrAmt), bsize, shrNode->Opnd(0)); sum = std::nullopt; } } @@ -1920,7 +1933,7 @@ std::pair> ConstantFold::FoldBinary(BinaryNode } } else if (op == OP_bior && cst == -1) { // X | (-1) -> -1 - result = mirModule->GetMIRBuilder()->CreateIntConst(-1, cstTyp); + result = mirModule->GetMIRBuilder()->CreateIntConst(-1ULL, cstTyp); } else if ((op == OP_lior || op == OP_cior)) { if (cst == 0) { // X || 0 -> X @@ -2067,6 +2080,10 @@ std::pair> ConstantFold::FoldCompare(CompareNod std::pair> rp = DispatchFold(node->Opnd(1)); ConstvalNode *lConst = safe_cast(lp.first); ConstvalNode *rConst = safe_cast(rp.first); + if (node->GetOpndType() == PTY_f128 || node->GetPrimType() == PTY_f128) { + // folding of non-unary float128 is not supported yet + return std::make_pair(static_cast(node), std::nullopt); + } Opcode opcode = node->GetOpCode(); if (lConst != nullptr && rConst != nullptr && !IsPrimitiveDynType(node->GetOpndType())) { result = FoldConstComparison(node->GetOpCode(), node->GetPrimType(), node->GetOpndType(), @@ -2277,7 +2294,7 @@ StmtNode *ConstantFold::SimplifyDassign(DassignNode *node) { return node; } -StmtNode *ConstantFold::SimplifyIassignWithAddrofBaseNode(IassignNode &node, const AddrofNode &base) { +StmtNode *ConstantFold::SimplifyIassignWithAddrofBaseNode(IassignNode &node, const AddrofNode &base) const { auto *mirTypeOfIass = GlobalTables::GetTypeTable().GetTypeFromTyIdx(node.GetTyIdx()); if (!mirTypeOfIass->IsMIRPtrType()) { return &node; diff --git a/src/mapleall/mpl2mpl/src/expand128floats.cpp b/src/mapleall/mpl2mpl/src/expand128floats.cpp index 2c809f71c36f9230da30cb16d1c83ac2456d6c6d..fdf652a1e013b5722afa27f27aef51b599d7649c 100644 --- a/src/mapleall/mpl2mpl/src/expand128floats.cpp +++ b/src/mapleall/mpl2mpl/src/expand128floats.cpp @@ -34,7 +34,7 @@ std::unordered_map f128OpsRes = { {OP_le, {OP_le, OP_gt}} }; -std::string Expand128Floats::GetSequentialName0(const std::string &prefix, uint32_t num) { +std::string Expand128Floats::GetSequentialName0(const std::string &prefix, uint32_t num) const { std::stringstream ss; ss << prefix << num; return ss.str(); @@ -45,17 +45,26 @@ uint32 Expand128Floats::GetSequentialNumber() const { return unnamedSymbolIdx++; } -std::string Expand128Floats::GetSequentialName(const std::string &prefix) { +std::string Expand128Floats::GetSequentialName(const std::string &prefix) const { std::string name = GetSequentialName0(prefix, GetSequentialNumber()); return name; } -std::string Expand128Floats::SelectSoftFPCall(Opcode opCode, const BaseNode *node) { +std::string Expand128Floats::SelectSoftFPCall(Opcode opCode, BaseNode *node) const { + CHECK_FATAL(node, "Nullptr at Expand129Floats::SelectSoftFPCall method"); switch (opCode) { case OP_cvt: case OP_trunc: - if (static_cast(node)->FromType() == PTY_f128) { - switch (static_cast(node)->ptyp) { + if (static_cast(node)->FromType() == PTY_f128) { + switch (static_cast(node)->ptyp) { + case PTY_i8: + case PTY_i16: + node->ptyp = PTY_i32; + return "__fixtfsi"; + case PTY_u8: + case PTY_u16: + node->ptyp = PTY_u32; + return "__fixunstfsi"; case PTY_i32: return "__fixtfsi"; case PTY_u32: @@ -76,8 +85,22 @@ std::string Expand128Floats::SelectSoftFPCall(Opcode opCode, const BaseNode *nod } } else if (static_cast(node)->ptyp == PTY_f128) { switch (static_cast(node)->FromType()) { + case PTY_i8: + case PTY_i16: { + auto *cvtNode = static_cast(node); + cvtNode->SetFromType(PTY_i32); + cvtNode->Opnd(0)->ptyp = PTY_i32; + return "__floatsitf"; + } case PTY_i32: return "__floatsitf"; + case PTY_u8: + case PTY_u16: { + auto *cvtNode = static_cast(node); + cvtNode->SetFromType(PTY_u32); + cvtNode->Opnd(0)->ptyp = PTY_u32; + return "__floatunsitf"; + } case PTY_u32: case PTY_a32: return "__floatunsitf"; diff --git a/src/mapleall/mpl2mpl/src/ext_constantfold.cpp b/src/mapleall/mpl2mpl/src/ext_constantfold.cpp index d244d043bc7571fa7cfa8fe3bacd33c070c3716c..f3f2bd157d167e69c51af87c7fd39d3bbb1cf769 100644 --- a/src/mapleall/mpl2mpl/src/ext_constantfold.cpp +++ b/src/mapleall/mpl2mpl/src/ext_constantfold.cpp @@ -133,7 +133,7 @@ BaseNode *ExtConstantFold::ExtFold(BaseNode *node) { return DispatchFold(node); } -BaseNode *ExtConstantFold::ExtFoldIor(BinaryNode *node) { +BaseNode *ExtConstantFold::ExtFoldIor(BinaryNode *node) const { CHECK_NULL_FATAL(node); // The target pattern (Cior, Lior): // x == c || x == c+1 || ... || x == c+k @@ -156,15 +156,12 @@ BaseNode *ExtConstantFold::ExtFoldIor(BinaryNode *node) { operands.push(static_cast(operand)->GetBOpnd(1)); } else if (op == OP_eq) { BinaryNode *bNode = static_cast(operand); - if (lNode == nullptr) { - if (bNode->Opnd(0)->GetOpCode() == OP_dread || - bNode->Opnd(0)->GetOpCode() == OP_iread) { - lNode = bNode->Opnd(0); - } else { - // Consider other cases in future - isWorkable = false; - break; - } + if (lNode == nullptr && (bNode->Opnd(0)->GetOpCode() == OP_dread || bNode->Opnd(0)->GetOpCode() == OP_iread)) { + lNode = bNode->Opnd(0); + } else if (lNode == nullptr) { + // Consider other cases in future + isWorkable = false; + break; } ASSERT_NOT_NULL(lNode); @@ -187,7 +184,7 @@ BaseNode *ExtConstantFold::ExtFoldIor(BinaryNode *node) { if (isWorkable) { std::sort(uniqOperands.begin(), uniqOperands.end()); - uniqOperands.erase(std::unique(uniqOperands.begin(), uniqOperands.end()), uniqOperands.cend()); + (void)uniqOperands.erase(std::unique(uniqOperands.begin(), uniqOperands.end()), uniqOperands.cend()); if ((uniqOperands.size() >= 2) && (uniqOperands[uniqOperands.size() - 1] == uniqOperands[0] + static_cast(uniqOperands.size()) - 1)) { PrimType nPrimType = node->GetPrimType(); diff --git a/src/mapleall/mpl2mpl/src/gen_check_cast.cpp b/src/mapleall/mpl2mpl/src/gen_check_cast.cpp index ac82b0ccbcd26c648d78d87433f0c554933a731f..d552a7f15d31a820a164332e9373fc64c117b43f 100644 --- a/src/mapleall/mpl2mpl/src/gen_check_cast.cpp +++ b/src/mapleall/mpl2mpl/src/gen_check_cast.cpp @@ -75,7 +75,7 @@ void CheckCastGenerator::InitFuncs() { castExceptionFunc->SetAttr(FUNCATTR_nosideeffect); } -MIRSymbol *CheckCastGenerator::GetOrCreateClassInfoSymbol(const std::string &className) { +MIRSymbol *CheckCastGenerator::GetOrCreateClassInfoSymbol(const std::string &className) const { std::string classInfoName = CLASSINFO_PREFIX_STR + className; builder->GlobalLock(); MIRSymbol *classInfoSymbol = builder->GetGlobalDecl(classInfoName); @@ -306,7 +306,7 @@ void CheckCastGenerator::GenAllCheckCast() { } } -BaseNode *CheckCastGenerator::GetObjectShadow(BaseNode *opnd) { +BaseNode *CheckCastGenerator::GetObjectShadow(BaseNode *opnd) const { FieldID fieldID = builder->GetStructFieldIDFromFieldNameParentFirst(WKTypes::Util::GetJavaLangObjectType(), namemangler::kShadowClassName); BaseNode *ireadExpr = @@ -386,7 +386,7 @@ BaseNode *CheckCastGenerator::GetObjectShadow(BaseNode *opnd) { // } // } // } -void CheckCastGenerator::AssignedCastValue(StmtNode &stmt) { +void CheckCastGenerator::AssignedCastValue(StmtNode &stmt) const { if (stmt.GetOpCode() == OP_intrinsiccallwithtype) { return; } @@ -451,10 +451,10 @@ void CheckCastGenerator::ConvertCheckCastToIsAssignableFrom(StmtNode &stmt) { opndArgs.push_back(objectClassReadNode); isAssignableFromNode = builder->CreateExprIntrinsicop(INTRN_JAVA_ISASSIGNABLEFROM, OP_intrinsicopwithtype, *checkType, opndArgs); - PregIdx IsAssignableFromResultIdx = currFunc->GetPregTab()->CreatePreg(PTY_u1); + PregIdx isAssignableFromResultIdx = currFunc->GetPregTab()->CreatePreg(PTY_u1); isAssignableFromResultAssign = - builder->CreateStmtRegassign(PTY_u1, IsAssignableFromResultIdx, isAssignableFromNode); - isAssignableFromResultReadNode = builder->CreateExprRegread(PTY_u1, IsAssignableFromResultIdx); + builder->CreateStmtRegassign(PTY_u1, isAssignableFromResultIdx, isAssignableFromNode); + isAssignableFromResultReadNode = builder->CreateExprRegread(PTY_u1, isAssignableFromResultIdx); } else { MIRSymbol *objectClassSym = builder->GetOrCreateLocalDecl(kObjectClassSym, *GlobalTables::GetTypeTable().GetPtr()); @@ -468,10 +468,10 @@ void CheckCastGenerator::ConvertCheckCastToIsAssignableFrom(StmtNode &stmt) { opndArgs.push_back(objectClassReadNode); isAssignableFromNode = builder->CreateExprIntrinsicop(INTRN_JAVA_ISASSIGNABLEFROM, OP_intrinsicopwithtype, *checkType, opndArgs); - MIRSymbol *IsAssignableFromResultSym = + MIRSymbol *isAssignableFromResultSym = builder->GetOrCreateLocalDecl(kIsAssignableFromResult, *GlobalTables::GetTypeTable().GetUInt1()); - isAssignableFromResultAssign = builder->CreateStmtDassign(*IsAssignableFromResultSym, 0, isAssignableFromNode); - isAssignableFromResultReadNode = builder->CreateExprDread(*IsAssignableFromResultSym); + isAssignableFromResultAssign = builder->CreateStmtDassign(*isAssignableFromResultSym, 0, isAssignableFromNode); + isAssignableFromResultReadNode = builder->CreateExprDread(*isAssignableFromResultSym); } BaseNode *condZero = builder->CreateExprCompare( @@ -527,7 +527,7 @@ void CheckCastGenerator::GenAllCheckCast(bool isHotFunc) { // Use "srcclass == targetclass" replace instanceof if target class is final. void CheckCastGenerator::ReplaceNoSubClassIsAssignableFrom(BlockNode &blockNode, StmtNode &stmt, const MIRPtrType &ptrType, - const IntrinsicopNode &intrinsicNode) { + const IntrinsicopNode &intrinsicNode) const { MapleVector nopnd = intrinsicNode.GetNopnd(); BaseNode *subClassNode = nopnd[0]; MIRClassType &targetClassType = static_cast(*ptrType.GetPointedType()); @@ -577,7 +577,7 @@ void CheckCastGenerator::ReplaceNoSubClassIsAssignableFrom(BlockNode &blockNode, } bool CheckCastGenerator::IsDefinedConstClass(const StmtNode &stmt, const MIRPtrType &targetClassType, - PregIdx &classSymPregIdx, MIRSymbol *&classSym) { + PregIdx &classSymPregIdx, MIRSymbol *&classSym) const { StmtNode *stmtPre = stmt.GetPrev(); Opcode opPre = stmtPre->GetOpCode(); if ((opPre != OP_dassign) && (opPre != OP_regassign)) { @@ -612,7 +612,7 @@ bool CheckCastGenerator::IsDefinedConstClass(const StmtNode &stmt, const MIRPtrT // inline check cache, it implements __MRT_IsAssignableFromCheckCache void CheckCastGenerator::ReplaceIsAssignableFromUsingCache(BlockNode &blockNode, StmtNode &stmt, const MIRPtrType &targetClassType, - const IntrinsicopNode &intrinsicNode) { + const IntrinsicopNode &intrinsicNode) const { StmtNode *resultFalse = nullptr; StmtNode *resultTrue = nullptr; StmtNode *cacheFalseClassesAssign = nullptr; @@ -752,7 +752,8 @@ void CheckCastGenerator::CheckIsAssignableFrom(BlockNode &blockNode, StmtNode &s ReplaceIsAssignableFromUsingCache(blockNode, stmt, *ptrType, intrinsicNode); } -void CheckCastGenerator::ConvertInstanceofToIsAssignableFrom(StmtNode &stmt, const IntrinsicopNode &intrinsicNode) { +void CheckCastGenerator::ConvertInstanceofToIsAssignableFrom(StmtNode &stmt, + const IntrinsicopNode &intrinsicNode) const { MIRType *targetClassType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinsicNode.GetTyIdx()); StmtNode *resultFalse = nullptr; StmtNode *result = nullptr; diff --git a/src/mapleall/mpl2mpl/src/gen_profile.cpp b/src/mapleall/mpl2mpl/src/gen_profile.cpp index 8c54e3fa498c59c6a0b3a066a8c475f05552b8e3..04f1da7e5a6f8838acafb2449aaeb7842bb8fa84 100644 --- a/src/mapleall/mpl2mpl/src/gen_profile.cpp +++ b/src/mapleall/mpl2mpl/src/gen_profile.cpp @@ -41,8 +41,7 @@ void ProfileGen::CreateModProfDesc() { TyIdx ptrTyIdx = GlobalTables::GetTypeTable().GetPtr()->GetTypeIndex(); MIRType *voidTy = GlobalTables::GetTypeTable().GetVoid(); MIRType *voidPtrTy = GlobalTables::GetTypeTable().GetVoidPtr(); - TyIdx charPtrTyIdx = GlobalTables::GetTypeTable(). - GetOrCreatePointerType(TyIdx(PTY_u8))->GetTypeIndex(); + TyIdx charPtrTyIdx = GlobalTables::GetTypeTable().GetOrCreatePointerType(TyIdx(PTY_u8))->GetTypeIndex(); // Ref: __gcov_merge_add (gcov_type *, unsigned) MIRType *retTy = GlobalTables::GetTypeTable().GetVoid(); @@ -137,7 +136,7 @@ void ProfileGen::CreateModProfDesc() { modProfDescSymMirConst->AddItem(mergeFuncsMirConst, 7); - uint32 numOfFunc = validFuncs.size(); + uint64 numOfFunc = validFuncs.size(); MIRIntConst *nfuncsMirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(numOfFunc, *u32Ty); modProfDescSymMirConst->AddItem(nfuncsMirConst, 8); @@ -206,7 +205,7 @@ void ProfileGen::CreateFuncProfDesc() { continue; } - uint64 nCtrs = f->GetNumCtrs(); + uint32 nCtrs = f->GetNumCtrs(); MIRSymbol *ctrTblSym = f->GetProfCtrTbl(); // Initialization of counter table @@ -280,7 +279,7 @@ void ProfileGen::CreateFuncProfDesc() { } void ProfileGen::CreateFuncProfDescTbl() { - uint tblSize = validFuncs.size(); + size_t tblSize = validFuncs.size(); if (tblSize == 0) { funcProfDescTbl = nullptr; return; @@ -288,7 +287,8 @@ void ProfileGen::CreateFuncProfDescTbl() { // Create function descriptor table MIRType *funcProfDescPtrTy = GlobalTables::GetTypeTable().GetOrCreatePointerType(funcProfDescs[0]->GetTyIdx()); - MIRType *arrOffuncProfDescPtrTy = GlobalTables::GetTypeTable().GetOrCreateArrayType(*funcProfDescPtrTy, tblSize); + MIRType *arrOffuncProfDescPtrTy = GlobalTables::GetTypeTable().GetOrCreateArrayType(*funcProfDescPtrTy, + static_cast(tblSize)); std::string fileName = mod.GetFileName(); MIRSymbol *funcProfDescTblSym = mod.GetMIRBuilder()->CreateGlobalDecl( namemangler::kprefixProfFuncDescTbl + FlatenName(fileName), *arrOffuncProfDescPtrTy, kScFstatic); @@ -337,10 +337,10 @@ void ProfileGen::CreateInitProc() { gccProfInitArgSym->SetTyIdx(argPtrTy->GetTypeIndex()); gccProfInitProtoTy->AddArgument(gccProfInitArgSym); - MapleVector ActArg(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + MapleVector actArg(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); AddrofNode *addrModProfDesc = mirBuilder->CreateExprAddrof(0, *modProfDesc); - ActArg.push_back(addrModProfDesc); - CallNode *callGInit = mirBuilder->CreateStmtCall(gccProfInitProtoTy->GetPuidx(), ActArg); + actArg.push_back(addrModProfDesc); + CallNode *callGInit = mirBuilder->CreateStmtCall(gccProfInitProtoTy->GetPuidx(), actArg); BlockNode *block = mplProfInit->GetCodeMemPool()->New(); block->AddStatement(callGInit); @@ -367,8 +367,8 @@ void ProfileGen::CreateExitProc() { MIRFunction *gccProfExitProtoTy = mirBuilder->GetOrCreateFunction(namemangler::kGCCProfExit, voidTy->GetTypeIndex()); gccProfExitProtoTy->AllocSymTab(); - MapleVector ActArg(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); - CallNode *callGExit = mirBuilder->CreateStmtCall(gccProfExitProtoTy->GetPuidx(), ActArg); + MapleVector actArg(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + CallNode *callGExit = mirBuilder->CreateStmtCall(gccProfExitProtoTy->GetPuidx(), actArg); BlockNode *block = mplProfExit->GetCodeMemPool()->New(); block->AddStatement(callGExit); diff --git a/src/mapleall/mpl2mpl/src/ginline.cpp b/src/mapleall/mpl2mpl/src/ginline.cpp index 12467e1aab4f74d633ef6a5c134704e7dd6069fd..bb17c35721024dc666f1e3cd4cd904e2023bf383 100644 --- a/src/mapleall/mpl2mpl/src/ginline.cpp +++ b/src/mapleall/mpl2mpl/src/ginline.cpp @@ -91,7 +91,7 @@ int64 GInline::GetFunctionStaticInsns(MIRFunction &func) const { int64 GInline::ComputeTotalSize() const { int64 total = 0; - for (auto it = cg->Begin(); it != cg->End(); ++it) { + for (auto it = cg->CBegin(); it != cg->CEnd(); ++it) { MIRFunction *func = it->first; if (func->GetBody() == nullptr) { continue; @@ -218,7 +218,7 @@ void GInline::PrintGInlineReport() const { } // We try to inline shallow small callee (especially with inline attr), ignoring overall growth limit -bool GInline::CanIgnoreGrowthLimit(const CallSiteNode &callSiteNode) { +bool GInline::CanIgnoreGrowthLimit(const CallSiteNode &callSiteNode) const { auto ifCode = callSiteNode.GetCallInfo()->GetInlineFailedCode(); if (ifCode == kIfcInlineList || ifCode == kIfcInlineListCallsite || ifCode == kIfcHardCoded || ifCode == kIfcProfileHotCallsite) { @@ -254,14 +254,14 @@ bool GInline::CanIgnoreGrowthLimit(const CallSiteNode &callSiteNode) { void GInline::PrepareCallsiteSet() { // Init callsite set - for (auto it = cg->Begin(); it != cg->End(); ++it) { + for (auto it = cg->CBegin(); it != cg->CEnd(); ++it) { CGNode *node = it->second; - for (auto &edge : node->GetCallee()) { - CallInfo *info = edge.first; + for (auto edgeIter = node->GetCallee().cbegin(); edgeIter != node->GetCallee().cend(); ++edgeIter) { + CallInfo *info = edgeIter->first; if (!ConsiderInlineCallsite(*info, 0)) { continue; } - CHECK_FATAL(edge.second->size() == 1, "Call stmt has only one candidate."); + CHECK_FATAL(edgeIter->second->size() == 1, "Call stmt has only one candidate."); InsertNewCallSite(*info, 0); } } @@ -330,7 +330,7 @@ void GInline::InlineCallsiteSet() { heapNode->Dump(); LogInfo::MapleLogger() << (ignoreLimit ? " ignoreLimit\n" : "\n"); } - auto *transformer = alloc.New(kGreedyInline, *caller, *callee, *callNode, true, cg); + auto *transformer = alloc.New(kGreedyInline, *caller, *callee, *callNode, dumpDetail, cg); std::vector newCallInfo; bool inlined = transformer->PerformInline(&newCallInfo); if (dumpDetail && !newCallInfo.empty()) { @@ -405,7 +405,7 @@ void GInline::CleanupInline() { InlineListInfo::Clear(); BlockCallBackMgr::ClearCallBacks(); // Release all inline summary - for (auto *func : module.GetFunctionList()) { + for (auto *func : std::as_const(module.GetFunctionList())) { if (func != nullptr) { func->DiscardInlineSummary(); } diff --git a/src/mapleall/mpl2mpl/src/inline.cpp b/src/mapleall/mpl2mpl/src/inline.cpp index 340117038f5842128e3ace23251d459e5ff44891..50ac42ef00360033dec66ad99658d5ddbe33bbd6 100644 --- a/src/mapleall/mpl2mpl/src/inline.cpp +++ b/src/mapleall/mpl2mpl/src/inline.cpp @@ -327,7 +327,7 @@ void MInline::InlineCalls(CGNode &node) { } while (changed && currInlineDepth < Options::inlineDepth && GetNumStmtsOfFunc(*func) <= kBigFuncNumStmts); } -bool MInline::CalleeReturnValueCheck(StmtNode &stmtNode, CallNode &callStmt) { +bool MInline::CalleeReturnValueCheck(StmtNode &stmtNode, CallNode &callStmt) const { NaryStmtNode &returnNode = static_cast(stmtNode); if (!kOpcodeInfo.IsCallAssigned(callStmt.GetOpCode()) && returnNode.NumOpnds() == 0) { return true; @@ -468,7 +468,7 @@ void MInline::AdjustInlineThreshold(const MIRFunction &caller, MIRFunction &call } } -bool MInline::IsSmallCalleeForEarlyInline(MIRFunction &callee, int32 *outInsns = nullptr) { +bool MInline::IsSmallCalleeForEarlyInline(MIRFunction &callee, int32 *outInsns = nullptr) const { MemPool tempMemPool(memPoolCtrler, ""); StmtCostAnalyzer stmtCostAnalyzer(&tempMemPool, &callee); int32 insns = 0; @@ -520,8 +520,8 @@ InlineResult MInline::AnalyzeCallee(const MIRFunction &caller, MIRFunction &call } void MInline::PostInline(MIRFunction &caller) { - auto it = funcToCostMap.find(&caller); - if (it != funcToCostMap.end()) { + auto it = std::as_const(funcToCostMap).find(&caller); + if (it != funcToCostMap.cend()) { (void)funcToCostMap.erase(it); } } diff --git a/src/mapleall/mpl2mpl/src/inline_analyzer.cpp b/src/mapleall/mpl2mpl/src/inline_analyzer.cpp index 8fb86f269de9a9a0a980d5f4e9e38f23865a58fb..c1d4e71be230bb3e07b5d3bcc3082abac2d81669 100644 --- a/src/mapleall/mpl2mpl/src/inline_analyzer.cpp +++ b/src/mapleall/mpl2mpl/src/inline_analyzer.cpp @@ -141,8 +141,8 @@ void InlineListInfo::ApplyInlineListInfo(const std::string &path, std::map(); (void)listCallee.emplace(calleeStrIdx, callerList); } @@ -151,8 +151,8 @@ void InlineListInfo::ApplyInlineListInfo(const std::string &path, std::map' "); str = str.substr(pos); GStrIdx callerStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(str); - auto it = listCallee.find(calleeStrIdx); - CHECK_FATAL(it != listCallee.end(), "illegal configuration for inlineList"); + const auto it = std::as_const(listCallee).find(calleeStrIdx); + CHECK_FATAL(it != listCallee.cend(), "illegal configuration for inlineList"); (void)it->second->insert(callerStrIdx); } } @@ -219,13 +219,13 @@ void InlineListInfo::Clear() { } valid = false; - for (auto &pair : inlineList) { - delete pair.second; + for (auto pairIter = inlineList.cbegin(); pairIter != inlineList.cend(); ++pairIter) { + delete pairIter->second; } inlineList.clear(); - for (auto &pair : noInlineList) { - delete pair.second; + for (auto pairIter = noInlineList.cbegin(); pairIter != noInlineList.cend(); ++pairIter) { + delete pairIter->second; } noInlineList.clear(); @@ -277,10 +277,10 @@ void InlineListInfo::Prepare() { (void)excludedCallees.insert(strIdx); } - std::set whitelistFunc { + std::set kWhiteListFunc { #include "rcwhitelist.def" }; - for (auto it = whitelistFunc.begin(); it != whitelistFunc.end(); ++it) { + for (auto it = kWhiteListFunc.begin(); it != kWhiteListFunc.end(); ++it) { GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(*it); (void)rcWhiteList.insert(strIdx); } diff --git a/src/mapleall/mpl2mpl/src/inline_mplt.cpp b/src/mapleall/mpl2mpl/src/inline_mplt.cpp index fe1c2fb12dd376888fef90744e3696566f8cf92a..d48ff627bb025c0febae0ca16dfd9b0b9363de13 100644 --- a/src/mapleall/mpl2mpl/src/inline_mplt.cpp +++ b/src/mapleall/mpl2mpl/src/inline_mplt.cpp @@ -22,7 +22,7 @@ bool InlineMplt::CollectInlineInfo(uint32 inlineSize, uint32 level) { if (tempSet.empty()) { return false; } - for (auto *func : tempSet) { + for (auto func : std::as_const(tempSet)) { if (func == nullptr || func->GetBody() == nullptr || func->IsStatic() || func->GetAttr(FUNCATTR_noinline) || GetFunctionSize(*func) > inlineSize) { continue; @@ -233,7 +233,7 @@ void InlineMplt::DumpOptimizedFunctionTypes() { } } -uint32 InlineMplt::GetFunctionSize(MIRFunction &mirFunc) { +uint32 InlineMplt::GetFunctionSize(MIRFunction &mirFunc) const { auto *tempMemPool = memPoolCtrler.NewMemPool("temp mempool", false); StmtCostAnalyzer sca(tempMemPool, &mirFunc); uint32 funcSize = static_cast(sca.GetStmtsCost(mirFunc.GetBody())) / static_cast(kSizeScale); diff --git a/src/mapleall/mpl2mpl/src/inline_summary.cpp b/src/mapleall/mpl2mpl/src/inline_summary.cpp index 75dd3b10e6923cbe54f984c3f9aa8440d7817045..e93b15bbc01a0071110f5c152e97273d357f89c4 100644 --- a/src/mapleall/mpl2mpl/src/inline_summary.cpp +++ b/src/mapleall/mpl2mpl/src/inline_summary.cpp @@ -288,7 +288,7 @@ BaseNode *LiteExpr::ConvertToMapleIR(MapleAllocator &alloc, const ArgInfoVec *ar if (op == OP_select) { auto *node1 = opnds[1]->ConvertToMapleIR(alloc, argInfoVec); auto *node2 = opnds[2]->ConvertToMapleIR(alloc, argInfoVec); - return alloc.New(op, type, node0, node1, node2); + return (!node1 || !node2) ? nullptr : alloc.New(op, type, node0, node1, node2); } } else { CHECK_FATAL(false, "NYI"); @@ -525,7 +525,7 @@ void MergeInlineSummary(MIRFunction &caller, MIRFunction &callee, const StmtNode auto callStmtId = callStmt.GetStmtID(); auto &argInfosMap = callerSummary->GetArgInfosMap(); ArgInfoVec *argInfoVec = nullptr; - const auto &it = argInfosMap.find(callStmtId); + const auto &it = std::as_const(argInfosMap).find(callStmtId); if (it != argInfosMap.end()) { argInfoVec = it->second; } @@ -551,7 +551,7 @@ void InlineSummary::MergeSummary(const InlineSummary &fromSummary, uint32 callSt InlineEdgeSummary *callEdgeSummary = nullptr; int32 callFrequency = -1; auto *callBBPredicate = Predicate::TruePredicate(); - const auto &eit = edgeSummaryMap.find(callStmtId); + const auto &eit = std::as_const(edgeSummaryMap).find(callStmtId); if (eit != edgeSummaryMap.end()) { callEdgeSummary = eit->second; callFrequency = callEdgeSummary->frequency; diff --git a/src/mapleall/mpl2mpl/src/inline_transformer.cpp b/src/mapleall/mpl2mpl/src/inline_transformer.cpp index db506756cfe8f5770d5f120680d088a55c914f2b..e285d72cb32894b9403d4123ccaea418a4c941e4 100644 --- a/src/mapleall/mpl2mpl/src/inline_transformer.cpp +++ b/src/mapleall/mpl2mpl/src/inline_transformer.cpp @@ -500,9 +500,9 @@ BlockNode *InlineTransformer::CloneFuncBody(BlockNode &funcBody, bool recursiveF if (updateFreq) { auto *callerProfData = caller.GetFuncProfData(); auto *calleeProfData = callee.GetFuncProfData(); - uint64 callsiteFreq = static_cast(callerProfData->GetStmtFreq(callStmt.GetStmtID())); + FreqType callsiteFreq = callerProfData->GetStmtFreq(callStmt.GetStmtID()); FreqType calleeEntryFreq = calleeProfData->GetFuncFrequency(); - uint32_t updateOp = static_cast(kKeepOrigFreq | kUpdateFreqbyScale); + uint32_t updateOp = static_cast(kKeepOrigFreq) | static_cast(kUpdateFreqbyScale); BlockNode *blockNode; if (recursiveFirstClone) { blockNode = funcBody.CloneTreeWithFreqs(theMIRModule->GetCurFuncCodeMPAllocator(), callerProfData->GetStmtFreqs(), @@ -511,7 +511,7 @@ BlockNode *InlineTransformer::CloneFuncBody(BlockNode &funcBody, bool recursiveF blockNode = funcBody.CloneTreeWithFreqs(theMIRModule->GetCurFuncCodeMPAllocator(), callerProfData->GetStmtFreqs(), calleeProfData->GetStmtFreqs(), callsiteFreq, calleeEntryFreq, updateOp); // update callee left entry Frequency - int64_t calleeFreq = calleeProfData->GetFuncRealFrequency(); + FreqType calleeFreq = calleeProfData->GetFuncRealFrequency(); calleeProfData->SetFuncRealFrequency(calleeFreq - callsiteFreq); } return blockNode; @@ -549,6 +549,97 @@ BlockNode *InlineTransformer::GetClonedCalleeBody() { } } +void DoReplaceConstFormal(const MIRFunction &caller, BaseNode &parent, uint32 opndIdx, BaseNode &expr, + const RealArgPropCand &realArg) { + if (realArg.kind == RealArgPropCandKind::kVar && expr.GetOpCode() == OP_dread) { + auto &dread = static_cast(expr); + dread.SetStFullIdx(realArg.data.symbol->GetStIdx().FullIdx()); + } else if (realArg.kind == RealArgPropCandKind::kPreg && expr.GetOpCode() == OP_regread) { + auto ®read = static_cast(expr); + auto pregNo = static_cast(realArg.data.symbol->GetPreg()->GetPregNo()); + PregIdx idx = caller.GetPregTab()->GetPregIdxFromPregno(pregNo); + regread.SetRegIdx(idx); + } else if (realArg.kind == RealArgPropCandKind::kConst) { + MIRConst *mirConst = realArg.data.mirConst; + CHECK_NULL_FATAL(mirConst); + auto *constExpr = + theMIRModule->CurFuncCodeMemPool()->New(mirConst->GetType().GetPrimType(), mirConst); + parent.SetOpnd(constExpr, opndIdx); + } +} + +void InlineTransformer::TryReplaceConstFormalWithRealArg(BaseNode &parent, uint32 opndIdx, + MIRSymbol &formal, const RealArgPropCand &realArg, const std::pair &offsetPair) { + uint32 stIdxOff = offsetPair.first; + uint32 regIdxOff = offsetPair.second; + auto &expr = *parent.Opnd(opndIdx); + Opcode op = expr.GetOpCode(); + if (op == OP_dread && formal.IsVar()) { + auto &dread = static_cast(expr); + // After RenameSymbols, we should consider stIdxOff + uint32 newIdx = formal.GetStIndex() + stIdxOff; + if (dread.GetStIdx().Islocal() && dread.GetStIdx().Idx() == newIdx) { + DoReplaceConstFormal(caller, parent, opndIdx, expr, realArg); + } + } else if (op == OP_regread && formal.IsPreg()) { + auto ®read = static_cast(expr); + PregIdx idx = callee.GetPregTab()->GetPregIdxFromPregno(static_cast(formal.GetPreg()->GetPregNo())); + // After RenamePregs, we should consider regIdxOff + int32 newIdx = idx + static_cast(regIdxOff); + if (regread.GetRegIdx() == newIdx) { + DoReplaceConstFormal(caller, parent, opndIdx, expr, realArg); + } + } +} + +// Propagate const formal in maple IR node (stmt and expr) +void InlineTransformer::PropConstFormalInNode(BaseNode &baseNode, MIRSymbol &formal, const RealArgPropCand &realArg, + uint32 stIdxOff, uint32 regIdxOff) { + for (uint32 i = 0; i < baseNode.NumOpnds(); ++i) { + TryReplaceConstFormalWithRealArg(baseNode, i, formal, realArg, {stIdxOff, regIdxOff}); + PropConstFormalInNode(*baseNode.Opnd(i), formal, realArg, stIdxOff, regIdxOff); + } +} + +// Replace all accesses of `formal` in the `newBody` with the `realArg`. +// The `stIdxOff` and `regIdxOff` are necessary for computing new stIdx/regIdx of callee's symbols/pregs +// because symbol/preg table of caller and callee have been merged. +void InlineTransformer::PropConstFormalInBlock(BlockNode &newBody, MIRSymbol &formal, const RealArgPropCand &realArg, + uint32 stIdxOff, uint32 regIdxOff) { + for (auto &stmt : newBody.GetStmtNodes()) { + switch (stmt.GetOpCode()) { + case OP_foreachelem: { + auto *subBody = static_cast(stmt).GetLoopBody(); + PropConstFormalInBlock(*subBody, formal, realArg, stIdxOff, regIdxOff); + break; + } + case OP_doloop: { + auto *subBody = static_cast(stmt).GetDoBody(); + PropConstFormalInBlock(*subBody, formal, realArg, stIdxOff, regIdxOff); + break; + } + case OP_dowhile: + case OP_while: { + auto *subBody = static_cast(stmt).GetBody(); + PropConstFormalInBlock(*subBody, formal, realArg, stIdxOff, regIdxOff); + break; + } + case OP_if: { + IfStmtNode &ifStmt = static_cast(stmt); + PropConstFormalInBlock(*ifStmt.GetThenPart(), formal, realArg, stIdxOff, regIdxOff); + if (ifStmt.GetElsePart() != nullptr) { + PropConstFormalInBlock(*ifStmt.GetElsePart(), formal, realArg, stIdxOff, regIdxOff); + } + break; + } + default: { + PropConstFormalInNode(stmt, formal, realArg, stIdxOff, regIdxOff); + break; + } + } + } +} + void InlineTransformer::AssignActualToFormal(BlockNode &newBody, uint32 stIdxOff, uint32 regIdxOff, BaseNode &oldActual, const MIRSymbol &formal) { BaseNode *actual = &oldActual; @@ -587,6 +678,27 @@ void InlineTransformer::AssignActualToFormal(BlockNode &newBody, uint32 stIdxOff return; } +// The parameter `argExpr` is a real argument of a callStmt in the `caller`. +// This function checks whether `argExpr` is a candidate of propagable argument. +void RealArgPropCand::Parse(MIRFunction &caller, BaseNode &argExpr) { + kind = RealArgPropCandKind::kUnknown; // reset kind + Opcode op = argExpr.GetOpCode(); + if (op == OP_constval) { + auto *constVal = static_cast(argExpr).GetConstVal(); + kind = RealArgPropCandKind::kConst; + data.mirConst = constVal; + } else if (op == OP_dread) { + auto stIdx = static_cast(argExpr).GetStIdx(); + // only consider const variable + auto *symbol = caller.GetLocalOrGlobalSymbol(stIdx); + ASSERT_NOT_NULL(symbol); + if (symbol->GetAttr(ATTR_const)) { + kind = RealArgPropCandKind::kVar; + data.symbol = symbol; + } + } +} + void InlineTransformer::AssignActualsToFormals(BlockNode &newBody, uint32 stIdxOff, uint32 regIdxOff) { if (static_cast(callStmt.NumOpnds()) != callee.GetFormalCount()) { LogInfo::MapleLogger() << "warning: # formal arguments != # actual arguments in the function " << callee.GetName() @@ -605,6 +717,14 @@ void InlineTransformer::AssignActualsToFormals(BlockNode &newBody, uint32 stIdxO MIRSymbol *formal = callee.GetFormal(i); CHECK_NULL_FATAL(currBaseNode); CHECK_NULL_FATAL(formal); + // Try to prop const value/symbol of real argument to const formal + RealArgPropCand realArg; + realArg.Parse(caller, *currBaseNode); + if (formal->GetAttr(ATTR_const) && realArg.kind != RealArgPropCandKind::kUnknown && + // Type consistency check can be relaxed further + formal->GetType()->GetPrimType() == realArg.GetPrimType()) { + PropConstFormalInBlock(newBody, *formal, realArg, stIdxOff, regIdxOff); + } AssignActualToFormal(newBody, stIdxOff, regIdxOff, *currBaseNode, *formal); if (updateFreq) { caller.GetFuncProfData()->CopyStmtFreq(newBody.GetFirst()->GetStmtID(), callStmt.GetStmtID()); diff --git a/src/mapleall/mpl2mpl/src/java_eh_lower.cpp b/src/mapleall/mpl2mpl/src/java_eh_lower.cpp index 744cfdaf6ac6cc4ecaf8b2c05aec7f0a59d2490d..21007c0fe577a39dc8f4669fc0ddbfb81133484a 100644 --- a/src/mapleall/mpl2mpl/src/java_eh_lower.cpp +++ b/src/mapleall/mpl2mpl/src/java_eh_lower.cpp @@ -19,10 +19,10 @@ #include "option.h" namespace { -const std::string strDivOpnd = "__div_opnd1"; -const std::string strDivRes = "__div_res"; -const std::string strMCCThrowArrayIndexOutOfBoundsException = "MCC_ThrowArrayIndexOutOfBoundsException"; -const std::string strMCCThrowNullPointerException = "MCC_ThrowNullPointerException"; +const std::string kStrDivOpnd = "__div_opnd1"; +const std::string kStrDivRes = "__div_res"; +const std::string kStrMCCThrowArrayIndexOutOfBoundsException = "MCC_ThrowArrayIndexOutOfBoundsException"; +const std::string kStrMCCThrowNullPointerException = "MCC_ThrowNullPointerException"; } // namespace // Do exception handling runtime insertion of runtime function call @@ -47,7 +47,7 @@ BaseNode *JavaEHLowerer::DoLowerDiv(BinaryNode &expr, BlockNode &blknode) { // Store divopnd to a tmp st if not a leaf node. BaseNode *divOpnd = expr.Opnd(1); if (!divOpnd->IsLeaf()) { - std::string opnd1name(strDivOpnd); + std::string opnd1name(kStrDivOpnd); opnd1name.append(std::to_string(divSTIndex)); if (useRegTmp) { PregIdx pregIdx = func->GetPregTab()->CreatePreg(ptype); @@ -70,7 +70,7 @@ BaseNode *JavaEHLowerer::DoLowerDiv(BinaryNode &expr, BlockNode &blknode) { divStmt = mirBuilder->CreateStmtRegassign(ptype, resPregIdx, &expr); retExprNode = GetMIRModule().GetMIRBuilder()->CreateExprRegread(ptype, resPregIdx); } else { - std::string resName(strDivRes); + std::string resName(kStrDivRes); resName.append(std::to_string(divSTIndex++)); MIRSymbol *divResSymbol = mirBuilder->CreateSymbol(TyIdx(ptype), resName, kStVar, kScAuto, GetMIRModule().CurFunction(), kScopeLocal); @@ -123,7 +123,7 @@ void JavaEHLowerer::DoLowerBoundaryCheck(IntrinsiccallNode &intrincall, BlockNod LabelNode *labStmt = GetMIRModule().CurFuncCodeMemPool()->New(); labStmt->SetLabelIdx(lbidx); MIRFunction *func = - GetMIRModule().GetMIRBuilder()->GetOrCreateFunction(strMCCThrowArrayIndexOutOfBoundsException, TyIdx(PTY_void)); + GetMIRModule().GetMIRBuilder()->GetOrCreateFunction(kStrMCCThrowArrayIndexOutOfBoundsException, TyIdx(PTY_void)); MapleVector args(GetMIRModule().GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); CallNode *callStmt = GetMIRModule().GetMIRBuilder()->CreateStmtCall(func->GetPuidx(), args); newblk.AddStatement(callStmt); @@ -194,7 +194,7 @@ BlockNode *JavaEHLowerer::DoLowerBlock(BlockNode &block) { auto *intConst = safe_cast(static_cast(opnd0)->GetConstVal()); CHECK_FATAL(intConst->IsZero(), "can only be zero"); MIRFunction *func = - GetMIRModule().GetMIRBuilder()->GetOrCreateFunction(strMCCThrowNullPointerException, TyIdx(PTY_void)); + GetMIRModule().GetMIRBuilder()->GetOrCreateFunction(kStrMCCThrowNullPointerException, TyIdx(PTY_void)); func->SetNoReturn(); MapleVector args(GetMIRModule().GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); CallNode *callStmt = GetMIRModule().GetMIRBuilder()->CreateStmtCall(func->GetPuidx(), args); diff --git a/src/mapleall/mpl2mpl/src/java_intrn_lowering.cpp b/src/mapleall/mpl2mpl/src/java_intrn_lowering.cpp index 2706ea1e22e2bf10863d319fbe98578005e208ee..dcda988ad79e27cc927245f564ac74780a539dfc 100644 --- a/src/mapleall/mpl2mpl/src/java_intrn_lowering.cpp +++ b/src/mapleall/mpl2mpl/src/java_intrn_lowering.cpp @@ -259,7 +259,7 @@ void JavaIntrnLowering::ProcessForNameClassLoader(CallNode &callNode) { } } -void JavaIntrnLowering::ProcessJavaIntrnMerge(StmtNode &assignNode, const IntrinsicopNode &intrinNode) { +void JavaIntrnLowering::ProcessJavaIntrnMerge(StmtNode &assignNode, const IntrinsicopNode &intrinNode) const { CHECK_FATAL(intrinNode.GetNumOpnds() == 1, "invalid JAVA_MERGE intrinsic node"); PrimType destType; DassignNode *dassign = nullptr; @@ -289,7 +289,7 @@ void JavaIntrnLowering::ProcessJavaIntrnMerge(StmtNode &assignNode, const Intrin } } -BaseNode *JavaIntrnLowering::JavaIntrnMergeToCvtType(PrimType destType, PrimType srcType, BaseNode *src) { +BaseNode *JavaIntrnLowering::JavaIntrnMergeToCvtType(PrimType destType, PrimType srcType, BaseNode *src) const { CHECK_FATAL(IsPrimitiveInteger(destType) || IsPrimitiveFloat(destType), "typemerge source type is not a primitive type"); CHECK_FATAL(IsPrimitiveInteger(srcType) || IsPrimitiveFloat(srcType), @@ -335,7 +335,7 @@ BaseNode *JavaIntrnLowering::JavaIntrnMergeToCvtType(PrimType destType, PrimType CHECK_FATAL(false, "NYI. Don't know what to do"); } -void JavaIntrnLowering::ProcessJavaIntrnFillNewArray(IntrinsiccallNode &intrinCall) { +void JavaIntrnLowering::ProcessJavaIntrnFillNewArray(IntrinsiccallNode &intrinCall) const { // First create a new array. CHECK_FATAL(intrinCall.GetReturnVec().size() == 1, "INTRN_JAVA_FILL_NEW_ARRAY should have 1 return value"); CallReturnPair retPair = intrinCall.GetCallReturnPair(0); diff --git a/src/mapleall/mpl2mpl/src/muid_replacement.cpp b/src/mapleall/mpl2mpl/src/muid_replacement.cpp index 2649944de16efa39f8c2dd26a33eecdb5f06991a..ee8663a5f841be9f35fe172a4ddc90b6b296691f 100644 --- a/src/mapleall/mpl2mpl/src/muid_replacement.cpp +++ b/src/mapleall/mpl2mpl/src/muid_replacement.cpp @@ -62,7 +62,7 @@ MIRSymbol *MUIDReplacement::GetSymbolFromName(const std::string &name) { return GlobalTables::GetGsymTable().GetSymbolFromStrIdx(gStrIdx); } -ConstvalNode* MUIDReplacement::GetConstvalNode(uint64 index) { +ConstvalNode* MUIDReplacement::GetConstvalNode(uint64 index) const { #ifdef USE_ARM32_MACRO return builder->CreateIntConst(index, PTY_i32); #else @@ -127,7 +127,7 @@ void MUIDReplacement::InsertArrayClassSet(const MIRType &type) { arrayClassSet.insert(klassJavaDescriptor); } -MIRType *MUIDReplacement::GetIntrinsicConstArrayClass(StmtNode &stmt) { +MIRType *MUIDReplacement::GetIntrinsicConstArrayClass(StmtNode &stmt) const { Opcode op = stmt.GetOpCode(); if (op == OP_dassign || op == OP_regassign) { auto &unode = static_cast(stmt); @@ -253,24 +253,25 @@ void MUIDReplacement::CollectFuncAndDataFromGlobalTab() { // entry 0 is reserved as nullptr MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); CHECK_FATAL(mirSymbol != nullptr, "Invalid global data symbol at index %u", i); + if (mirSymbol->GetStorageClass() == kScExtern && + (mirSymbol->IsReflectionClassInfo() || mirSymbol->IsStatic())) { + AddUndefData(mirSymbol); + continue; + } if (mirSymbol->GetStorageClass() == kScGlobal) { - if (mirSymbol->IsReflectionClassInfo()) { - if (!mirSymbol->IsForcedGlobalClassinfo() && - preloadedClassInfo.find(mirSymbol->GetName()) == preloadedClassInfo.end()) { + if (mirSymbol->IsReflectionClassInfo() && !mirSymbol->IsForcedGlobalClassinfo() && + preloadedClassInfo.find(mirSymbol->GetName()) == preloadedClassInfo.end()) { // With maple linker, global data can be declared as local mirSymbol->SetStorageClass(kScFstatic); - } - if (mirSymbol->GetKonst() != nullptr) { - // Use this to exclude forward-declared classinfo symbol - AddDefData(mirSymbol); - } - } else if (mirSymbol->IsStatic()) { + } + if (mirSymbol->IsReflectionClassInfo() && mirSymbol->GetKonst() != nullptr) { + // Use this to exclude forward-declared classinfo symbol + AddDefData(mirSymbol); + } + if (!mirSymbol->IsReflectionClassInfo() && mirSymbol->IsStatic()) { mirSymbol->SetStorageClass(kScFstatic); AddDefData(mirSymbol); } - } else if (mirSymbol->GetStorageClass() == kScExtern && - (mirSymbol->IsReflectionClassInfo() || mirSymbol->IsStatic())) { - AddUndefData(mirSymbol); } } } @@ -755,7 +756,7 @@ void MUIDReplacement::GenerateFuncDefTable() { } } -void MUIDReplacement::ReplaceMethodMetaFuncAddr(const MIRSymbol &funcSymbol, uint64 index) { +void MUIDReplacement::ReplaceMethodMetaFuncAddr(const MIRSymbol &funcSymbol, uint64 index) const { std::string symbolName = funcSymbol.GetName(); MIRSymbol *methodAddrDataSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kMethodAddrDataPrefixStr + symbolName)); @@ -870,7 +871,7 @@ void MUIDReplacement::GenerateDataDefTable() { } } -void MUIDReplacement::ReplaceFieldMetaStaticAddr(const MIRSymbol &mirSymbol, uint32 index) { +void MUIDReplacement::ReplaceFieldMetaStaticAddr(const MIRSymbol &mirSymbol, uint32 index) const { std::string symbolName = mirSymbol.GetName(); MIRSymbol *fieldOffsetDataSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kFieldOffsetDataPrefixStr + symbolName)); @@ -1127,10 +1128,7 @@ void MUIDReplacement::GenerateRangeTable() { } std::string bbProfileName = namemangler::kBBProfileTabPrefixStr + GetMIRModule().GetFileNameAsPostfix(); MIRSymbol *funcProfCounterTabSym = GetSymbolFromName(bbProfileName); - std::vector irProfWorkList = { - funcProfInfTabSym, - funcProfCounterTabSym - }; + std::vector irProfWorkList = {funcProfInfTabSym, funcProfCounterTabSym}; InitRangeTabUseSym(irProfWorkList, rangeTabEntryType, *rangeTabConst); if (!rangeTabConst->GetConstVec().empty()) { rangeArrayType.SetSizeArrayItem(0, rangeTabConst->GetConstVec().size()); @@ -1837,7 +1835,7 @@ void MUIDReplacement::GenericSourceMuid() { MIRArrayType &arrayType = *GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetUInt8(), 0); MIRAggConst *newConst = GetMIRModule().GetMemPool()->New(GetMIRModule(), arrayType); - for (const char &c : Options::sourceMuid) { + for (const unsigned char &c : Options::sourceMuid) { MIRConst *charConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( static_cast(c), *GlobalTables::GetTypeTable().GetUInt8()); newConst->AddItem(charConst, 0); @@ -1906,7 +1904,7 @@ void MUIDReplacement::GenerateSourceInfo() { } } -void MUIDReplacement::ReleasePragmaMemPool() { +void MUIDReplacement::ReleasePragmaMemPool() const { for (Klass *klass : klassHierarchy->GetTopoSortedKlasses()) { MIRStructType *mirStruct = klass->GetMIRStructType(); mirStruct->GetPragmaVec().clear(); diff --git a/src/mapleall/mpl2mpl/src/native_stub_func.cpp b/src/mapleall/mpl2mpl/src/native_stub_func.cpp index 29c22c1c027e767b1b277c2a6a50f91bdf60e5d0..88477df288faf8be774288bdf92416689420a2f7 100644 --- a/src/mapleall/mpl2mpl/src/native_stub_func.cpp +++ b/src/mapleall/mpl2mpl/src/native_stub_func.cpp @@ -178,7 +178,7 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { args.push_back(Options::usePreg ? (static_cast(builder->CreateExprRegread(PTY_ptr, envPregIdx))) : (static_cast(builder->CreateExprDread(*envPtrSym)))); CallNode *postFuncCall = - builder->CreateStmtCallAssigned(MRTPostNativeFunc->GetPuidx(), args, nullptr, OP_callassigned); + builder->CreateStmtCallAssigned(mrtPostNativeFunc->GetPuidx(), args, nullptr, OP_callassigned); MapleVector allocCallArgs(func->GetCodeMempoolAllocator().Adapter()); if (!func->GetAttr(FUNCATTR_critical_native)) { @@ -232,7 +232,7 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { CHECK_FATAL(stubFuncRet != nullptr, "stubfunc_ret is nullptr"); decodeArgs.push_back(builder->CreateExprDread(*stubFuncRet)); CallNode *decodeFuncCall = - builder->CreateStmtCallAssigned(MRTDecodeRefFunc->GetPuidx(), decodeArgs, stubFuncRet, OP_callassigned); + builder->CreateStmtCallAssigned(mrtDecodeRefFunc->GetPuidx(), decodeArgs, stubFuncRet, OP_callassigned); func->GetBody()->AddStatement(decodeFuncCall); } if (needNativeCall) { @@ -262,13 +262,13 @@ void NativeStubFuncGeneration::ProcessFunc(MIRFunction *func) { // check pending exception just before leaving this stub frame except for critical natives if (needCheckExceptionCall) { MapleVector getExceptArgs(func->GetCodeMempoolAllocator().Adapter()); - CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(MRTCheckThrowPendingExceptionFunc->GetPuidx(), + CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(mrtCheckThrowPendingExceptionFunc->GetPuidx(), getExceptArgs, nullptr, OP_callassigned); func->GetBody()->AddStatement(callGetExceptFunc); } else if (!func->GetAttr(FUNCATTR_critical_native) && !(existsFuncProperty && funcProperty.jniType == kJniTypeCriticalNeedArg)) { MapleVector frameStatusArgs(func->GetCodeMempoolAllocator().Adapter()); - CallNode *callSetFrameStatusFunc = builder->CreateStmtCallAssigned(MCCSetReliableUnwindContextFunc->GetPuidx(), + CallNode *callSetFrameStatusFunc = builder->CreateStmtCallAssigned(mccSetReliableUnwindContextFunc->GetPuidx(), frameStatusArgs, nullptr, OP_callassigned); func->GetBody()->AddStatement(callSetFrameStatusFunc); } @@ -465,7 +465,7 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun StmtNode *wrapperCall = CreateNativeWrapperCallNode(func, readFuncPtr, args, ret, needIndirectCall); ifStmt->GetThenPart()->AddStatement(wrapperCall); MapleVector opnds(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); - CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(MRTCheckThrowPendingExceptionFunc->GetPuidx(), + CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(mrtCheckThrowPendingExceptionFunc->GetPuidx(), opnds, nullptr, OP_callassigned); ifStmt->GetThenPart()->AddStatement(callGetExceptFunc); auto *elseBlock = func.GetCodeMempool()->New(); @@ -490,7 +490,7 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun ifStmt->GetThenPart()->AddStatement(callGetFindNativeFunc); if (!needCheckThrowPendingExceptionFunc) { MapleVector opnds(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); - CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(MRTCheckThrowPendingExceptionFunc->GetPuidx(), + CallNode *callGetExceptFunc = builder->CreateStmtCallAssigned(mrtCheckThrowPendingExceptionFunc->GetPuidx(), opnds, nullptr, OP_callassigned); ifStmt->GetThenPart()->AddStatement(callGetExceptFunc); } @@ -521,7 +521,7 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun if (!needCheckThrowPendingExceptionFunc) { MapleVector opnds(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); CallNode *callGetExceptFunc = - builder->CreateStmtCallAssigned(MRTCheckThrowPendingExceptionFunc->GetPuidx(), opnds, nullptr, OP_callassigned); + builder->CreateStmtCallAssigned(mrtCheckThrowPendingExceptionFunc->GetPuidx(), opnds, nullptr, OP_callassigned); ifStmt->GetThenPart()->AddStatement(callGetExceptFunc); } func.GetBody()->AddStatement(ifStmt); @@ -539,8 +539,7 @@ void NativeStubFuncGeneration::GenerateRegisteredNativeFuncCall(MIRFunction &fun // } // } StmtNode *NativeStubFuncGeneration::CreateNativeWrapperCallNode(MIRFunction &func, BaseNode *funcPtr, - const MapleVector &args, const MIRSymbol *ret, - bool needIndirectCall) { + const MapleVector &args, const MIRSymbol *ret, bool needIndirectCall) { #ifdef USE_ARM32_MACRO constexpr size_t numOfArgs = 4; #else @@ -561,9 +560,9 @@ StmtNode *NativeStubFuncGeneration::CreateNativeWrapperCallNode(MIRFunction &fun } } if (args.size() > numOfArgs) { - wrapperFunc = MRTCallSlowNativeExtFunc; + wrapperFunc = mrtCallSlowNativeExtFunc; } else { - wrapperFunc = MRTCallSlowNativeFunc[args.size()]; + wrapperFunc = mrtCallSlowNativeFunc[args.size()]; } if (ret == nullptr) { return builder->CreateStmtCall(wrapperFunc->GetPuidx(), wrapperArgs); @@ -583,11 +582,11 @@ void NativeStubFuncGeneration::GenerateHelperFuncDecl() { MIRType *voidPointerType = GlobalTables::GetTypeTable().GetVoidPtr(); MIRType *refType = GlobalTables::GetTypeTable().GetRef(); // MRT_PendingException - MRTCheckThrowPendingExceptionFunc = + mrtCheckThrowPendingExceptionFunc = builder->GetOrCreateFunction(namemangler::kCheckThrowPendingExceptionFunc, voidType->GetTypeIndex()); - CHECK_FATAL(MRTCheckThrowPendingExceptionFunc != nullptr, "MRTCheckThrowPendingExceptionFunc is null."); - MRTCheckThrowPendingExceptionFunc->SetAttr(FUNCATTR_nosideeffect); - MRTCheckThrowPendingExceptionFunc->SetBody(nullptr); + CHECK_FATAL(mrtCheckThrowPendingExceptionFunc != nullptr, "mrtCheckThrowPendingExceptionFunc is null."); + mrtCheckThrowPendingExceptionFunc->SetAttr(FUNCATTR_nosideeffect); + mrtCheckThrowPendingExceptionFunc->SetBody(nullptr); // MRT_PreNativeCall MRTPreNativeFunc = builder->GetOrCreateFunction(namemangler::kPreNativeFunc, voidType->GetTypeIndex()); CHECK_FATAL(MRTPreNativeFunc != nullptr, "MRTPreNativeFunc is null."); @@ -595,36 +594,36 @@ void NativeStubFuncGeneration::GenerateHelperFuncDecl() { // MRT_PostNativeCall ArgVector postArgs(GetMIRModule().GetMPAllocator().Adapter()); postArgs.push_back(ArgPair("env", voidPointerType)); - MRTPostNativeFunc = builder->GetOrCreateFunction(namemangler::kPostNativeFunc, voidType->GetTypeIndex()); - CHECK_FATAL(MRTPostNativeFunc != nullptr, "MRTPostNativeFunc is null."); - MRTPostNativeFunc->SetBody(nullptr); + mrtPostNativeFunc = builder->GetOrCreateFunction(namemangler::kPostNativeFunc, voidType->GetTypeIndex()); + CHECK_FATAL(mrtPostNativeFunc != nullptr, "mrtPostNativeFunc is null."); + mrtPostNativeFunc->SetBody(nullptr); // MRT_DecodeReference ArgVector decodeArgs(GetMIRModule().GetMPAllocator().Adapter()); decodeArgs.push_back(ArgPair("obj", refType)); - MRTDecodeRefFunc = builder->CreateFunction(namemangler::kDecodeRefFunc, *refType, decodeArgs); - CHECK_FATAL(MRTDecodeRefFunc != nullptr, "MRTDecodeRefFunc is null."); - MRTDecodeRefFunc->SetAttr(FUNCATTR_nosideeffect); - MRTDecodeRefFunc->SetBody(nullptr); + mrtDecodeRefFunc = builder->CreateFunction(namemangler::kDecodeRefFunc, *refType, decodeArgs); + CHECK_FATAL(mrtDecodeRefFunc != nullptr, "mrtDecodeRefFunc is null."); + mrtDecodeRefFunc->SetAttr(FUNCATTR_nosideeffect); + mrtDecodeRefFunc->SetBody(nullptr); // MCC_CallSlowNative ArgVector callArgs(GetMIRModule().GetMPAllocator().Adapter()); callArgs.push_back(ArgPair("func", voidPointerType)); for (int i = 0; i < kSlownativeFuncnum; ++i) { - MRTCallSlowNativeFunc[i] = builder->CreateFunction(callSlowNativeFuncs[i], *voidPointerType, callArgs); - CHECK_FATAL(MRTCallSlowNativeFunc[i] != nullptr, "MRTCallSlowNativeFunc is null."); - MRTCallSlowNativeFunc[i]->SetBody(nullptr); + mrtCallSlowNativeFunc[i] = builder->CreateFunction(callSlowNativeFuncs[i], *voidPointerType, callArgs); + CHECK_FATAL(mrtCallSlowNativeFunc[i] != nullptr, "mrtCallSlowNativeFunc is null."); + mrtCallSlowNativeFunc[i]->SetBody(nullptr); } // MCC_CallSlowNativeExt ArgVector callExtArgs(GetMIRModule().GetMPAllocator().Adapter()); callExtArgs.push_back(ArgPair("func", voidPointerType)); - MRTCallSlowNativeExtFunc = builder->CreateFunction(namemangler::kCallSlowNativeExt, *voidPointerType, callExtArgs); - CHECK_FATAL(MRTCallSlowNativeExtFunc != nullptr, "MRTCallSlowNativeExtFunc is null."); - MRTCallSlowNativeExtFunc->SetBody(nullptr); + mrtCallSlowNativeExtFunc = builder->CreateFunction(namemangler::kCallSlowNativeExt, *voidPointerType, callExtArgs); + CHECK_FATAL(mrtCallSlowNativeExtFunc != nullptr, "mrtCallSlowNativeExtFunc is null."); + mrtCallSlowNativeExtFunc->SetBody(nullptr); // MCC_SetReliableUnwindContext - MCCSetReliableUnwindContextFunc = + mccSetReliableUnwindContextFunc = builder->GetOrCreateFunction(namemangler::kSetReliableUnwindContextFunc, voidType->GetTypeIndex()); - CHECK_FATAL(MCCSetReliableUnwindContextFunc != nullptr, "MCCSetReliableUnwindContextFunc is null"); - MCCSetReliableUnwindContextFunc->SetAttr(FUNCATTR_nosideeffect); - MCCSetReliableUnwindContextFunc->SetBody(nullptr); + CHECK_FATAL(mccSetReliableUnwindContextFunc != nullptr, "mccSetReliableUnwindContextFunc is null"); + mccSetReliableUnwindContextFunc->SetAttr(FUNCATTR_nosideeffect); + mccSetReliableUnwindContextFunc->SetBody(nullptr); } void NativeStubFuncGeneration::GenerateRegTable() { diff --git a/src/mapleall/mpl2mpl/src/outline.cpp b/src/mapleall/mpl2mpl/src/outline.cpp index fa707962376f9b788407336e4e6e6308f56fc8c5..847a8a70e4040ba1fbefc4cea48ddcb1513e1499 100644 --- a/src/mapleall/mpl2mpl/src/outline.cpp +++ b/src/mapleall/mpl2mpl/src/outline.cpp @@ -121,7 +121,7 @@ class OutlineInfoCollector { } } private: - void CollectParameter(BaseNode &node) { + void CollectParameter(BaseNode &node) const { auto ®ionInputs = candidate->GetRegionCandidate()->GetRegionInPuts(); auto symbolRegPair = RegionCandidate::GetSymbolRegPair(node); if (regionInputs.find(symbolRegPair) == regionInputs.end()) { @@ -181,7 +181,8 @@ class OutlineRegionExtractor { case OP_callassigned: case OP_icallassigned: case OP_icallprotoassigned: - case OP_intrinsiccallassigned: { + case OP_intrinsiccallassigned: + case OP_intrinsiccallwithtypeassigned: { ReplaceCallReturnVector(node); break; } @@ -272,7 +273,7 @@ class OutlineRegionExtractor { candidate->GetReplacedStmtMap()[&node] = iassign; } - StIdx CreateNewStIdx(const StIdx oldStIdx) { + StIdx CreateNewStIdx(const StIdx oldStIdx) const { auto *oldSym = oldFunc->GetSymbolTabItem(oldStIdx.Idx()); auto *newSym = newFunc->GetSymTab()->CloneLocalSymbol(*oldSym); newSym->SetIsTmp(true); @@ -330,7 +331,7 @@ class OutlineRegionExtractor { return symbol; } - BaseNode *ReloadVar(BaseNode &originNode, BaseNode &newBase) { + BaseNode *ReloadVar(BaseNode &originNode, BaseNode &newBase) const { auto &dread = static_cast(originNode); auto *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(TyIdx(dread.GetPrimType())); return builder->CreateExprIread(dread.GetPrimType(), ptrType->GetTypeIndex(), 0, &newBase); @@ -429,7 +430,7 @@ class OutlineRegionExtractor { } } - void GenerateOutput() { + void GenerateOutput() const { newFunc->GetModule()->SetCurFunction(newFunc); if (endLabel != kInvalidLabelIdx) { newFunc->GetBody()->AddStatement(builder->CreateStmtLabel(endLabel)); diff --git a/src/mapleall/mpl2mpl/src/reflection_analysis.cpp b/src/mapleall/mpl2mpl/src/reflection_analysis.cpp index 9fd42c8b72d9acaf1ce477396ccb162fc142dbb8..332c04e97cdaeeeddb314c271d001f684634c9fd 100644 --- a/src/mapleall/mpl2mpl/src/reflection_analysis.cpp +++ b/src/mapleall/mpl2mpl/src/reflection_analysis.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include "vtable_analysis.h" #include "vtable_impl.h" #include "option.h" @@ -170,7 +171,7 @@ uint32_t ReflectionAnalysis::totalCStr = 0; std::map, std::string> ReflectionAnalysis::superClasesIdxMap{}; void ReflectionAnalysis::GenFieldTypeClassInfo(const MIRType &type, const Klass &klass, std::string &classInfo, - const std::string fieldName, bool &isClass) { + const std::string fieldName, bool &isClass) const { switch (type.GetKind()) { case kTypeScalar: { isClass = false; @@ -199,7 +200,7 @@ void ReflectionAnalysis::GenFieldTypeClassInfo(const MIRType &type, const Klass } } -bool ReflectionAnalysis::IsMemberClass(const std::string &annotationString) { +bool ReflectionAnalysis::IsMemberClass(const std::string &annotationString) const { uint32_t idx = ReflectionAnalysis::FindOrInsertReflectString(kEnclosingClassStr); std::string target = annoDelimiterPrefix + std::to_string(idx) + annoDelimiter; if (annotationString.find(target, 0) != std::string::npos) { @@ -208,18 +209,18 @@ bool ReflectionAnalysis::IsMemberClass(const std::string &annotationString) { return false; } -int8_t ReflectionAnalysis::GetAnnoFlag(const std::string &annotationString) { - constexpr int8_t kMemberPosValid = 1; - constexpr int8_t kMemberPosValidOffset = 2; - constexpr int8_t kIsMemberClassOffset = 1; +int8_t ReflectionAnalysis::GetAnnoFlag(const std::string &annotationString) const{ + constexpr uint8_t kMemberPosValid = 1; + constexpr uint8_t kMemberPosValidOffset = 2; + constexpr uint8_t kIsMemberClassOffset = 1; constexpr int8_t kNewMeta = 1; bool isMemberClass = IsMemberClass(annotationString); - int8_t value = (kMemberPosValid << kMemberPosValidOffset) + - (static_cast(isMemberClass) << kIsMemberClassOffset) + kNewMeta; + int8_t value = static_cast((kMemberPosValid << kMemberPosValidOffset) + + (static_cast(isMemberClass) << kIsMemberClassOffset)) + kNewMeta; return value; } -int ReflectionAnalysis::GetDeflateStringIdx(const std::string &subStr, bool needSpecialFlag) { +int ReflectionAnalysis::GetDeflateStringIdx(const std::string &subStr, bool needSpecialFlag) const { std::string flag = needSpecialFlag ? (std::to_string(GetAnnoFlag(subStr)) + annoDelimiter) : "1!"; return FindOrInsertReflectString(flag + subStr); } @@ -437,7 +438,7 @@ int ReflectionAnalysis::SolveAnnotation(MIRStructType &classType, const MIRFunct } uint32 ReflectionAnalysis::GetTypeNameIdxFromType(const MIRType &type, const Klass &klass, - const std::string &fieldName) { + const std::string &fieldName) const { uint32 typeNameIdx = 0; switch (type.GetKind()) { case kTypeScalar: { @@ -476,7 +477,7 @@ uint32 ReflectionAnalysis::GetTypeNameIdxFromType(const MIRType &type, const Kla return typeNameIdx; } -void ReflectionAnalysis::CheckPrivateInnerAndNoSubClass(Klass &clazz, const std::string &annoArr) { +void ReflectionAnalysis::CheckPrivateInnerAndNoSubClass(Klass &clazz, const std::string &annoArr) const { // LMain_24A_3B `EC!`VL!24!LMain_3B!`IC!`AF!4!2!name!23!A! uint32_t idx = FindOrInsertReflectString(kEnclosingClassStr); std::string target = annoDelimiterPrefix + std::to_string(idx) + annoDelimiter; @@ -522,7 +523,7 @@ void ReflectionAnalysis::ConvertMethodSig(std::string &signature) { void ReflectionAnalysis::GenAllMethodHash(std::vector> &methodInfoVec, std::unordered_map &baseNameMap, - std::unordered_map &fullNameMap) { + std::unordered_map &fullNameMap) const { std::vector methodVector; std::vector hashVector; for (auto &methodInfo : methodInfoVec) { @@ -565,7 +566,8 @@ uint16 GetFieldHash(const std::vector> &fieldV, con return 0; } -MIRSymbol *ReflectionAnalysis::GetOrCreateSymbol(const std::string &name, TyIdx tyIdx, bool needInit = false) { +MIRSymbol *ReflectionAnalysis::GetOrCreateSymbol(const std::string &name, const TyIdx &tyIdx, + bool needInit = false) const { const GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); MIRSymbol *st = GetSymbol(strIdx, tyIdx); if (st != nullptr && !needInit) { @@ -584,7 +586,7 @@ MIRSymbol *ReflectionAnalysis::GetOrCreateSymbol(const std::string &name, TyIdx return st; } -MIRSymbol *ReflectionAnalysis::GetSymbol(const std::string &name, TyIdx tyIdx) { +MIRSymbol *ReflectionAnalysis::GetSymbol(const std::string &name, const TyIdx &tyIdx) const { const GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); MIRSymbol *st = GetSymbol(strIdx, tyIdx); return st; @@ -599,7 +601,7 @@ static bool IsSameType(TyIdx tyIdx1, TyIdx tyIdx2) { return type1->EqualTo(*type2); } -MIRSymbol *ReflectionAnalysis::GetSymbol(GStrIdx strIdx, TyIdx tyIdx) { +MIRSymbol *ReflectionAnalysis::GetSymbol(GStrIdx strIdx, TyIdx tyIdx) const { MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); if (st != nullptr && st->GetSKind() == kStVar) { if (IsSameType(st->GetTyIdx(), tyIdx)) { @@ -616,7 +618,7 @@ MIRSymbol *ReflectionAnalysis::GetSymbol(GStrIdx strIdx, TyIdx tyIdx) { return nullptr; } -MIRSymbol *ReflectionAnalysis::CreateSymbol(GStrIdx strIdx, TyIdx tyIdx) { +MIRSymbol *ReflectionAnalysis::CreateSymbol(GStrIdx strIdx, TyIdx tyIdx) const { MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); st->SetStorageClass(kScGlobal); st->SetSKind(kStVar); @@ -633,8 +635,7 @@ bool ReflectionAnalysis::VtableFunc(const MIRFunction &func) const { bool RtRetentionPolicyCheck(const MIRSymbol &clInfo) { GStrIdx strIdx; - auto *annoType = - static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(clInfo.GetTyIdx())); + auto *annoType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(clInfo.GetTyIdx())); for (MIRPragma *p : annoType->GetPragmaVec()) { if (GlobalTables::GetStrTable().GetStringFromStrIdx( GlobalTables::GetTypeTable().GetTypeFromTyIdx(p->GetTyIdx())->GetNameStrIdx()) == @@ -723,8 +724,7 @@ struct HashCodeComparator { const std::unordered_map &basenameMp; const std::unordered_map &fullnameMp; HashCodeComparator(const std::unordered_map &arg1, - const std::unordered_map &arg2) - : basenameMp(arg1), fullnameMp(arg2) {} + const std::unordered_map &arg2) : basenameMp(arg1), fullnameMp(arg2) {} bool operator()(std::pair a, std::pair b) { const MIRSymbol *funcSymA = GlobalTables::GetGsymTable().GetSymbolFromStidx(a.first->first.Idx()); @@ -790,10 +790,9 @@ uint32 ReflectionAnalysis::GetMethodFlag(const MIRFunction &func) const { return flag; } -void ReflectionAnalysis::GenMethodMeta(const Klass &klass, MIRStructType &methodsInfoType, - const MIRSymbol &funcSym, MIRAggConst &aggConst, int idx, - std::unordered_map &baseNameMp, - std::unordered_map &fullNameMp) { +void ReflectionAnalysis::GenMethodMeta(const Klass &klass, MIRStructType &methodsInfoType, const MIRSymbol &funcSym, + MIRAggConst &aggConst, int idx, std::unordered_map &baseNameMp, + std::unordered_map &fullNameMp) { MIRFunction &func = *funcSym.GetFunction(); MIRAggConst &newConst = *mirModule->GetMemPool()->New(*mirModule, methodsInfoType); uint32 fieldID = 1; @@ -862,13 +861,11 @@ void ReflectionAnalysis::GenMethodMeta(const Klass &klass, MIRStructType &method } MIRSymbol *ReflectionAnalysis::GenMethodsMeta(const Klass &klass, - std::vector> &methodInfoVec, - std::unordered_map &baseNameMp, - std::unordered_map &fullNameMp) { + std::vector> &methodInfoVec, std::unordered_map &baseNameMp, + std::unordered_map &fullNameMp) { MIRStructType *classType = klass.GetMIRStructType(); size_t arraySize = classType->GetMethods().size(); - auto &methodsInfoType = - static_cast(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(methodsInfoTyIdx)); + auto &methodsInfoType = static_cast(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(methodsInfoTyIdx)); MIRArrayType &arrayType = *GlobalTables::GetTypeTable().GetOrCreateArrayType(methodsInfoType, arraySize); MIRAggConst *aggConst = mirModule->GetMemPool()->New(*mirModule, arrayType); ASSERT(aggConst != nullptr, "null ptr check!"); @@ -959,10 +956,8 @@ MIRSymbol *ReflectionAnalysis::GetMethodSignatureSymbol(std::string signature) { } void ReflectionAnalysis::GenMethodMetaCompact(const Klass &klass, MIRStructType &methodsInfoCompactType, int idx, - const MIRSymbol &funcSym, MIRAggConst &aggConst, - int &allDeclaringClassOffset, - std::unordered_map &baseNameMp, - std::unordered_map &fullNameMp) { + const MIRSymbol &funcSym, MIRAggConst &aggConst, int &allDeclaringClassOffset, + std::unordered_map &baseNameMp, std::unordered_map &fullNameMp) { MIRFunction &func = *funcSym.GetFunction(); MIRAggConst &newConstCompact = *mirModule->GetMemPool()->New(*mirModule, methodsInfoCompactType); std::vector methodsCompactLeb128Vec; @@ -1153,7 +1148,7 @@ MIRSymbol *ReflectionAnalysis::GenSuperClassMetaData(std::list superClas MIRArrayType &arrayType = *GlobalTables::GetTypeTable().GetOrCreateArrayType(superclassMetadataType, size); MIRSymbol *superclassArraySt = nullptr; - const auto &itFindSuper = superClasesIdxMap.find(superClassList); + const auto &itFindSuper = std::as_const(superClasesIdxMap).find(superClassList); if (itFindSuper == superClasesIdxMap.end()) { std::string superClassArrayInfo = SUPERCLASSINFO_PREFIX_STR + std::to_string(superClasesIdxMap.size()); superClasesIdxMap[superClassList] = superClassArrayInfo; @@ -1408,7 +1403,7 @@ MIRSymbol *ReflectionAnalysis::GenFieldsMetaData(const Klass &klass, bool isHot) return fieldsArraySt; } -void ReflectionAnalysis::ConvertMapleClassName(const std::string &mplClassName, std::string &javaDsp) { +void ReflectionAnalysis::ConvertMapleClassName(const std::string &mplClassName, std::string &javaDsp) const { // Convert classname end with _3B, 3 is strlen("_3B") unsigned int len = strlen(kClassSuffix); if (mplClassName.size() > len && mplClassName.rfind(kClassSuffix, mplClassName.size() - len) != std::string::npos) { @@ -1418,7 +1413,7 @@ void ReflectionAnalysis::ConvertMapleClassName(const std::string &mplClassName, } } -void ReflectionAnalysis::AppendValueByType(std::string &annoArr, const MIRPragmaElement &elem) { +void ReflectionAnalysis::AppendValueByType(std::string &annoArr, const MIRPragmaElement &elem) const { std::ostringstream oss; std::string tmp; switch (elem.GetType()) { @@ -1631,7 +1626,7 @@ uint32 ReflectionAnalysis::GetAnnoCstrIndex(std::map &idxNumMap, const return signatureIdx; } -uint32 ReflectionAnalysis::BKDRHash(const std::string &strName, uint32 seed) { +uint32 ReflectionAnalysis::BKDRHash(const std::string &strName, uint32 seed) const { uint32 hash = 0; for (auto name : strName) { auto uName = static_cast(name); @@ -1640,7 +1635,7 @@ uint32 ReflectionAnalysis::BKDRHash(const std::string &strName, uint32 seed) { return hash; } -uint32 ReflectionAnalysis::GetHashIndex(const std::string &strName) { +uint32 ReflectionAnalysis::GetHashIndex(const std::string &strName) const { constexpr int hashSeed = 211; return BKDRHash(strName, hashSeed); } @@ -1663,7 +1658,7 @@ void ReflectionAnalysis::GenHotClassNameString(const Klass &klass) { (void)ReflectionAnalysis::FindOrInsertRepeatString(klassJavaDescriptor, true); // Always used. } -uint32 ReflectionAnalysis::FindOrInsertReflectString(const std::string &str) { +uint32 ReflectionAnalysis::FindOrInsertReflectString(const std::string &str) const { uint8 hotType = 0; bool isHot = mirModule->GetProfile().CheckReflectionStrHot(str, hotType); if (isHot) { @@ -1673,7 +1668,7 @@ uint32 ReflectionAnalysis::FindOrInsertReflectString(const std::string &str) { return ReflectionAnalysis::FindOrInsertRepeatString(str, isHot, hotType); } -MIRSymbol *ReflectionAnalysis::GetClinitFuncSymbol(const Klass &klass) { +MIRSymbol *ReflectionAnalysis::GetClinitFuncSymbol(const Klass &klass) const { MIRStructType *classType = klass.GetMIRStructType(); if (classType == nullptr || classType->GetMethods().empty()) { return nullptr; @@ -1921,7 +1916,7 @@ void ReflectionAnalysis::GenClassMetaData(Klass &klass) { classTab.push_back(classSt); } -int8 ReflectionAnalysis::JudgePara(MIRStructType &classType) { +int8 ReflectionAnalysis::JudgePara(MIRStructType &classType) const { for (MIRPragma *prag : classType.GetPragmaVec()) { if (prag->GetKind() == kPragmaClass) { if ((GlobalTables::GetTypeTable().GetTypeFromTyIdx(prag->GetTyIdx())->GetName() == @@ -1934,14 +1929,14 @@ int8 ReflectionAnalysis::JudgePara(MIRStructType &classType) { return 0; } -bool ReflectionAnalysis::IsAnonymousClass(const std::string &annotationString) { +bool ReflectionAnalysis::IsAnonymousClass(const std::string &annotationString) const { // eg: `IC!`AF!4!0!name!30!! uint32_t idx = ReflectionAnalysis::FindOrInsertReflectString(kInnerClassStr); std::string target = annoDelimiterPrefix + std::to_string(idx) + annoDelimiter; size_t pos = annotationString.find(target, 0); if (pos != std::string::npos) { int i = kAnonymousClassIndex; - while (i--) { + while ((i--) != 0) { pos = annotationString.find("!", pos + 1); CHECK_FATAL(pos != std::string::npos, "Error:annotationString in func: isAnonymousClass()"); } @@ -1953,7 +1948,7 @@ bool ReflectionAnalysis::IsAnonymousClass(const std::string &annotationString) { return false; } -bool ReflectionAnalysis::IsLocalClass(const std::string annotationString) { +bool ReflectionAnalysis::IsLocalClass(const std::string annotationString) const { uint32_t idx = ReflectionAnalysis::FindOrInsertReflectString(kEnclosingMethod); std::string target = annoDelimiterPrefix + std::to_string(idx) + annoDelimiter; size_t pos = annotationString.find(target, 0); @@ -2159,7 +2154,7 @@ static void ReflectionAnalysisGenStrTab(MIRModule &mirModule, const std::string strTabSt->SetStorageClass(kScFstatic); for (char c : strTab) { MIRConst *newConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( - static_cast(c), *GlobalTables::GetTypeTable().GetUInt8()); + static_cast(static_cast(c)), *GlobalTables::GetTypeTable().GetUInt8()); strTabAggconst->AddItem(newConst, 0); } strTabSt->SetKonst(strTabAggconst); @@ -2178,7 +2173,7 @@ void ReflectionAnalysis::GenStrTab(MIRModule &module) { ReflectionAnalysisGenStrTab(module, strTab, strTabName); } -void ReflectionAnalysis::MarkWeakMethods() { +void ReflectionAnalysis::MarkWeakMethods() const { if (!isLibcore) { return; } @@ -2203,7 +2198,7 @@ void ReflectionAnalysis::MarkWeakMethods() { } } -void ReflectionAnalysis::DumpPGOSummary() { +void ReflectionAnalysis::DumpPGOSummary() const { LogInfo::MapleLogger() << "PGO summary \n"; LogInfo::MapleLogger() << "hot method meta " << ReflectionAnalysis::hotMethodMeta << " total " << ReflectionAnalysis::totalMethodMeta << std::setprecision(2) << " ratio " diff --git a/src/mapleall/mpl2mpl/src/retype.cpp b/src/mapleall/mpl2mpl/src/retype.cpp index 5b74b145a1b462ef674cb9fd8a6b9c75e4bd9e14..cda59ff8a81cdf214edb9425e8aaa5072f662018 100644 --- a/src/mapleall/mpl2mpl/src/retype.cpp +++ b/src/mapleall/mpl2mpl/src/retype.cpp @@ -85,9 +85,9 @@ void Retype::DoRetype() { if (func->IsEmpty()) { continue; } - for (auto pair : func->GetAliasVarMap()) { - GStrIdx regNameStrIdx = pair.second.mplStrIdx; - reg2varGenericInfo[regNameStrIdx] = pair.second; + for (auto pairIter = func->GetAliasVarMap().cbegin(); pairIter != func->GetAliasVarMap().cend(); ++pairIter) { + GStrIdx regNameStrIdx = pairIter->second.mplStrIdx; + reg2varGenericInfo[regNameStrIdx] = pairIter->second; } RetypeStmt(*func); reg2varGenericInfo.clear(); diff --git a/src/mapleall/mpl2mpl/src/scalarreplacement.cpp b/src/mapleall/mpl2mpl/src/scalarreplacement.cpp index 44a07c0023dd1e69604a7acb46408c8b516d39d8..d5b5732389b0014013305d19f427934164284eff 100644 --- a/src/mapleall/mpl2mpl/src/scalarreplacement.cpp +++ b/src/mapleall/mpl2mpl/src/scalarreplacement.cpp @@ -23,7 +23,7 @@ // perform the actual optimization. namespace maple { template -void ScalarReplacement::IterateStmt(StmtNode *stmt, Func const &applyFunc) { +void ScalarReplacement::IterateStmt(StmtNode *stmt, Func const &applyFunc) const { if (!stmt) { return; } @@ -59,7 +59,7 @@ void ScalarReplacement::IterateStmt(StmtNode *stmt, Func const &applyFunc) { } template -BaseNode *ScalarReplacement::IterateExpr(StmtNode *stmt, BaseNode *expr, Func const &applyFunc) { +BaseNode *ScalarReplacement::IterateExpr(StmtNode *stmt, BaseNode *expr, Func const &applyFunc) const { if (!expr) { return expr; } @@ -198,15 +198,15 @@ bool ScalarReplacement::CanBeReplaced(const StmtVec *refs) const { return true; } -BaseNode *ScalarReplacement::ReplaceDassignDread(StmtNode *stmt, BaseNode *opnd) { +BaseNode *ScalarReplacement::ReplaceDassignDread(StmtNode &stmt, BaseNode *opnd) const { if (!opnd) { // opnd nullptr means to handle the statment itself - if (stmt->GetOpCode() == OP_dassign) { - DassignNode *dassignNode = static_cast(stmt); - if (dassignNode->GetStIdx() == curSym->GetStIdx() && dassignNode->GetFieldID() == curFieldid) { + if (stmt.GetOpCode() == OP_dassign) { + DassignNode &dassignNode = static_cast(stmt); + if (dassignNode.GetStIdx() == curSym->GetStIdx() && dassignNode.GetFieldID() == curFieldid) { // Update to the new scalar - dassignNode->SetStIdx(newScalarSym->GetStIdx()); - dassignNode->SetFieldID(0); + dassignNode.SetStIdx(newScalarSym->GetStIdx()); + dassignNode.SetFieldID(0); } } } else if (opnd->GetOpCode() == OP_dread || opnd->GetOpCode() == OP_addrof) { @@ -252,7 +252,7 @@ void ScalarReplacement::ReplaceWithScalar(const StmtVec *refs) { AppendLocalRefCleanup(newScalarSym); } for (StmtNode *stmt : (*refs)) { - IterateStmt(stmt, [this](StmtNode *stmt, BaseNode *expr) { return this->ReplaceDassignDread(stmt, expr); }); + IterateStmt(stmt, [this](StmtNode *stmt, BaseNode *expr) { return this->ReplaceDassignDread(*stmt, expr); }); } } } diff --git a/src/mapleall/mpl2mpl/src/simplify.cpp b/src/mapleall/mpl2mpl/src/simplify.cpp index 1e29f825452181b76b00bfc57357d45166b3b903..541145cc3dec3b99aecae93d82c4538683b7eba6 100644 --- a/src/mapleall/mpl2mpl/src/simplify.cpp +++ b/src/mapleall/mpl2mpl/src/simplify.cpp @@ -17,8 +17,8 @@ #include #include #include -#include "triple.h" #include "constantfold.h" +#include "../../maple_be/include/cg/cg_option.h" namespace maple { @@ -39,6 +39,10 @@ constexpr char kFuncNameOfSprintfS[] = "sprintf_s"; constexpr char kFuncNameOfSnprintfS[] = "snprintf_s"; constexpr char kFuncNameOfVsnprintfS[] = "vsnprintf_s"; constexpr uint64_t kSecurecMemMaxLen = 0x7fffffffUL; +static constexpr int64_t kWidthLL = 64; +static constexpr int64_t kWidthInt = 32; +static constexpr int64_t kWidthShort = 16; +static constexpr int64_t kWidthChar = 8; static constexpr int32 kProbUnlikely = 1000; constexpr uint32_t kMemOpDstOpndIdx = 0; constexpr uint32_t kMemOpSDstSizeOpndIdx = 1; @@ -78,7 +82,7 @@ MIRConst *TruncateUnionConstant(const MIRStructType &unionType, MIRConst *fieldC return fieldCst; } - bool isBigEndian = MeOption::IsBigEndian() || Options::IsBigEndian(); + bool isBigEndian = Options::IsBigEndian(); IntVal val = intCst->GetValue(); uint8 bitSize = bitFieldType->GetFieldSize(); @@ -87,7 +91,7 @@ MIRConst *TruncateUnionConstant(const MIRStructType &unionType, MIRConst *fieldC } if (isBigEndian) { - val = val.LShr(static_cast(val.GetBitWidth() - bitSize)); + val = val.LShr(static_cast(val.GetBitWidth()) - static_cast(bitSize)); } else { val = val & ((uint64(1) << bitSize) - 1); } @@ -108,13 +112,13 @@ static void MayPrintLog(bool debug, bool success, OpKind opKind, const char *str return; } const char *op = ""; - if (opKind == MEM_OP_memset) { + if (opKind == kMemOpMemset) { op = "memset"; - } else if (opKind == MEM_OP_memcpy) { + } else if (opKind == kMemOpMemcpy) { op = "memcpy"; - } else if (opKind == MEM_OP_memset_s) { + } else if (opKind == kMemOpMemsetS) { op = "memset_s"; - } else if (opKind == KMemOpMemcpyS) { + } else if (opKind == kMemOpMemcpyS) { op = "memcpy_s"; } LogInfo::MapleLogger() << op << " expand " << (success ? "success: " : "failure: ") << str << std::endl; @@ -193,35 +197,6 @@ void Simplify::SimplifyCallAssigned(StmtNode &stmt, BlockNode &block) { } constexpr uint32 kUpperLimitOfFieldNum = 10; -static MIRStructType *GetDassignedStructType(const DassignNode *dassign, MIRFunction *func) { - const auto &lhsStIdx = dassign->GetStIdx(); - auto lhsSymbol = func->GetLocalOrGlobalSymbol(lhsStIdx); - ASSERT_NOT_NULL(lhsSymbol); - auto lhsAggType = lhsSymbol->GetType(); - ASSERT_NOT_NULL(lhsAggType); - if (!lhsAggType->IsStructType()) { - return nullptr; - } - if (lhsAggType->GetKind() == kTypeUnion) { // no need to split union's field - return nullptr; - } - auto lhsFieldID = dassign->GetFieldID(); - if (lhsFieldID != 0) { - CHECK_FATAL(lhsAggType->IsStructType(), "only struct has non-zero fieldID"); - lhsAggType = static_cast(lhsAggType)->GetFieldType(lhsFieldID); - if (!lhsAggType->IsStructType()) { - return nullptr; - } - if (lhsAggType->GetKind() == kTypeUnion) { // no need to split union's field - return nullptr; - } - } - if (static_cast(lhsAggType)->NumberOfFieldIDs() > kUpperLimitOfFieldNum) { - return nullptr; - } - return static_cast(lhsAggType); -} - static MIRStructType *GetIassignedStructType(const IassignNode *iassign) { auto ptrTyIdx = iassign->GetTyIdx(); auto *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyIdx); @@ -281,12 +256,15 @@ static StmtNode *SplitAggCopy(const AssignType *assignNode, MIRStructType *struc return nullptr; } - for (FieldID id = 1; id <= static_cast(structureType->NumberOfFieldIDs()); ++id) { + FieldID id = 1; + while (id <= static_cast(structureType->NumberOfFieldIDs())) { MIRType *fieldType = structureType->GetFieldType(id); if (fieldType->GetSize() == 0) { + id++; continue; // field size is zero for empty struct/union; } if (fieldType->GetKind() == kTypeBitField && static_cast(fieldType)->GetFieldSize() == 0) { + id++; continue; // bitfield size is zero } auto *newAssign = assignNode->CloneTree(func->GetCodeMemPoolAllocator()); @@ -299,37 +277,30 @@ static StmtNode *SplitAggCopy(const AssignType *assignNode, MIRStructType *struc if (fieldType->IsMIRUnionType()) { id += static_cast(fieldType->NumberOfFieldIDs()); } + id++; } auto newAssign = assignNode->GetNext(); block->RemoveStmt(assignNode); return newAssign; } -static StmtNode *SplitDassignAggCopy(DassignNode *dassign, BlockNode *block, MIRFunction *func) { - auto *rhs = dassign->GetRHS(); - if (rhs->GetPrimType() != PTY_agg) { - return nullptr; - } - - auto *lhsAggType = GetDassignedStructType(dassign, func); - if (lhsAggType == nullptr) { - return nullptr; - } - - if (rhs->GetOpCode() == OP_dread) { - auto *lhsSymbol = func->GetLocalOrGlobalSymbol(dassign->GetStIdx()); - auto *rhsSymbol = func->GetLocalOrGlobalSymbol(static_cast(rhs)->GetStIdx()); - ASSERT_NOT_NULL(lhsSymbol); - ASSERT_NOT_NULL(rhsSymbol); - if (!lhsSymbol->IsLocal() && !rhsSymbol->IsLocal()) { - return nullptr; +static bool RegularSplittableAgg(MIRStructType &aggType) { + FieldID id = 1; + while (id <= static_cast(aggType.NumberOfFieldIDs())) { + auto *fieldType = aggType.GetFieldType(id); + if (fieldType->IsMIRStructType()) { + ++id; + continue; } - - return SplitAggCopy(dassign, lhsAggType, block, func); - } else if (rhs->GetOpCode() == OP_iread) { - return SplitAggCopy(dassign, lhsAggType, block, func); + if (!fieldType->IsScalarType() && !fieldType->IsMIRPtrType()) { + return false; + } + if (GetRegPrimType(fieldType->GetPrimType()) != fieldType->GetPrimType()) { + return false; + } + ++id; } - return nullptr; + return true; } static StmtNode *SplitIassignAggCopy(IassignNode *iassign, BlockNode *block, MIRFunction *func) { @@ -343,6 +314,10 @@ static StmtNode *SplitIassignAggCopy(IassignNode *iassign, BlockNode *block, MIR return nullptr; } + if (!RegularSplittableAgg(*lhsAggType)) { + return nullptr; + } + if (rhs->GetOpCode() == OP_dread) { return SplitAggCopy(iassign, lhsAggType, block, func); } else if (rhs->GetOpCode() == OP_iread) { @@ -366,7 +341,7 @@ bool UseGlobalVar(const BaseNode *expr) { return false; } -StmtNode *Simplify::SimplifyToSelect(MIRFunction *func, IfStmtNode *ifNode, BlockNode *block) { +StmtNode *Simplify::SimplifyToSelect(MIRFunction &func, IfStmtNode *ifNode, BlockNode *block) const { // Example: if (condition) { // Example: res = trueRes // Example: } @@ -378,7 +353,7 @@ StmtNode *Simplify::SimplifyToSelect(MIRFunction *func, IfStmtNode *ifNode, Bloc if (ifNode->GetPrev() != nullptr && ifNode->GetPrev()->GetOpCode() == OP_label) { // simplify shortCircuit will stop opt in cfg_opt, and generate extra compare auto *labelNode = static_cast(ifNode->GetPrev()); - const std::string &labelName = func->GetLabelTabItem(labelNode->GetLabelIdx()); + const std::string &labelName = func.GetLabelTabItem(labelNode->GetLabelIdx()); if (labelName.find("shortCircuit") != std::string::npos) { return nullptr; } @@ -419,7 +394,7 @@ StmtNode *Simplify::SimplifyToSelect(MIRFunction *func, IfStmtNode *ifNode, Bloc if (UseGlobalVar(thenDass->GetRHS()) || UseGlobalVar(elseDass->GetRHS())) { return nullptr; } - MIRBuilder *mirBuiler = func->GetModule()->GetMIRBuilder(); + MIRBuilder *mirBuiler = func.GetModule()->GetMIRBuilder(); MIRType *type = GlobalTables::GetTypeTable().GetPrimType(thenDass->GetRHS()->GetPrimType()); auto *selectExpr = mirBuiler->CreateExprTernary(OP_select, *type, ifNode->Opnd(0), thenDass->GetRHS(), elseDass->GetRHS()); @@ -449,21 +424,21 @@ static bool ExtractBitField(const MIRPtrType &type, FieldID fldID, BitFieldExtra } auto bitOffset = type.GetPointedType()->GetBitOffsetFromBaseAddr(fldID); auto extractSize = static_cast(fieldType)->GetFieldSize(); - if ((bitOffset / LLONG_WIDTH) != ((bitOffset + extractSize) / LLONG_WIDTH)) { + if ((bitOffset / kWidthLL) != ((bitOffset + extractSize) / kWidthLL)) { return false; } - if (bitOffset % CHAR_WIDTH == 0 && (extractSize == CHAR_WIDTH || extractSize == SHRT_WIDTH || - extractSize == INT_WIDTH || extractSize == LLONG_WIDTH)) { + if (bitOffset % kWidthChar == 0 && (extractSize == kWidthChar || extractSize == kWidthShort || + extractSize == kWidthInt || extractSize == kWidthLL)) { return false; } - auto byteOffset = (bitOffset / LLONG_WIDTH) * CHAR_WIDTH; // expand the read length to 64 bit + auto byteOffset = (bitOffset / kWidthLL) * kWidthChar; // expand the read length to 64 bit auto *readType = GlobalTables::GetTypeTable().GetUInt64(); - if ((bitOffset / INT_WIDTH) == ((bitOffset + extractSize) / INT_WIDTH)) { - byteOffset = (bitOffset / INT_WIDTH) * INT_WIDTH / CHAR_WIDTH; // expand the read length to 32 bit + if ((bitOffset / kWidthInt) == ((bitOffset + extractSize) / kWidthInt)) { + byteOffset = (bitOffset / kWidthInt) * kWidthInt / kWidthChar; // expand the read length to 32 bit readType = GlobalTables::GetTypeTable().GetUInt32(); } bfe.byteOffset = byteOffset; - bfe.extractStart = bitOffset - byteOffset * CHAR_WIDTH; + bfe.extractStart = bitOffset - byteOffset * kWidthChar; bfe.extractSize = extractSize; bfe.extractType = readType; return true; @@ -472,7 +447,7 @@ static bool ExtractBitField(const MIRPtrType &type, FieldID fldID, BitFieldExtra // Bitfield can not write directly, when write 2 bitfields that belong to the same 4-bytes memory, // we can expose the 4-bytes memory's read & write to remove partial/fully redundant. This function // lowers bitfield write to `4-bytes memory's read + bits insert + 4-bytes memory write`. -StmtNode *Simplify::SimplifyBitFieldWrite(const IassignNode &iass) { +StmtNode *Simplify::SimplifyBitFieldWrite(const IassignNode &iass) const { if (iass.GetFieldID() == 0) { return nullptr; } @@ -504,6 +479,103 @@ StmtNode *Simplify::SimplifyBitFieldWrite(const IassignNode &iass) { return newIass; } +uint64 GetTLSVarOffset(MIRModule &m, const MIRSymbol &st) { + uint64 offset = 0; + if (!st.IsConst()) { + MapleMap &tbssVarOffset = m.GetTbssVarOffset(); + if (tbssVarOffset.find(&st) != tbssVarOffset.end()) { + offset = tbssVarOffset.at(&st); + } else { + CHECK_FATAL_FALSE("All uninitialized TLS should be in tbssVarOffset"); + } + } else { + MapleMap &tdataVarOffset = m.GetTdataVarOffset(); + if (tdataVarOffset.find(&st) != tdataVarOffset.end()) { + offset = tdataVarOffset.at(&st); + } else { + CHECK_FATAL_FALSE("All initialized TLS should be in tdataVarOffset"); + } + } + return offset; +} + +StmtNode *LocalDynamicTLSOptDassign(const DassignNode &dassign, BlockNode &block, MIRFunction &func) { + MIRBuilder *mirBuilder = func.GetModule()->GetMIRBuilder(); + MIRSymbol *lhsSymbol = func.GetLocalOrGlobalSymbol(dassign.GetStIdx()); + BaseNode *rhs = dassign.GetRHS(); + MIRType *lhsType = lhsSymbol->GetType(); + if (MustBeAddress(lhsType->GetPrimType())) { + lhsType = GlobalTables::GetTypeTable().GetAddr64(); + } + MIRType *lhsPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*lhsType); + ConstvalNode *offset = mirBuilder->CreateIntConst(GetTLSVarOffset(*func.GetModule(), *lhsSymbol), PTY_u64); + MIRType *addrsType = GlobalTables::GetTypeTable().GetUInt64(); + + MapleVector args0(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + IntrinsicopNode *tlsAnchor = nullptr; + if (!lhsSymbol->IsConst()) { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tbss_anchor, OP_intrinsicop, *addrsType, args0); + } else { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tdata_anchor, OP_intrinsicop, *addrsType, args0); + } + BaseNode *addTlsGlobalOffset = mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), + tlsAnchor, offset); + StmtNode *newAssign = nullptr; + newAssign = mirBuilder->CreateStmtIassign(*lhsPtrType, dassign.GetFieldID(), addTlsGlobalOffset, rhs); + block.ReplaceStmt1WithStmt2(&dassign, newAssign); + return newAssign; +} + +BaseNode *LocalDynamicTLSOptDread(const DreadNode &dread, MIRFunction &func) { + MIRBuilder *mirBuilder = func.GetModule()->GetMIRBuilder(); + MIRSymbol *lhsSymbol = func.GetLocalOrGlobalSymbol(dread.GetStIdx()); + + MIRType *lhsType = lhsSymbol->GetType(); + MIRType *varType = nullptr; + if (MustBeAddress(lhsType->GetPrimType())) { + varType = GlobalTables::GetTypeTable().GetAddr64(); + } else if (lhsType->IsMIRStructType()) { + varType = static_cast(lhsType)->GetFieldType(dread.GetFieldID()); + } else { + varType = lhsType; + } + MIRType *lhsPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*lhsType); + + ConstvalNode *offset = mirBuilder->CreateIntConst(GetTLSVarOffset(*func.GetModule(), *lhsSymbol), PTY_u64); + MIRType *addrsType = GlobalTables::GetTypeTable().GetUInt64(); + + MapleVector args0(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + IntrinsicopNode *tlsAnchor = nullptr; + if (!lhsSymbol->IsConst()) { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tbss_anchor, OP_intrinsicop, *addrsType, args0); + } else { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tdata_anchor, OP_intrinsicop, *addrsType, args0); + } + BaseNode *addTlsGlobalOffset = mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), + tlsAnchor, offset); + IreadNode *newRead = nullptr; + newRead = mirBuilder->CreateExprIread(*varType, *lhsPtrType, dread.GetFieldID(), addTlsGlobalOffset); + return newRead; +} + +BaseNode *LocalDynamicTLSOptAddrof(const AddrofNode &addrofNode, MIRFunction &func) { + MIRBuilder *mirBuilder = func.GetModule()->GetMIRBuilder(); + MIRSymbol *lhsSymbol = func.GetLocalOrGlobalSymbol(addrofNode.GetStIdx()); + ConstvalNode *offset = mirBuilder->CreateIntConst(GetTLSVarOffset(*func.GetModule(), *lhsSymbol), PTY_u64); + MIRType *addrsType = GlobalTables::GetTypeTable().GetUInt64(); + + MapleVector args0(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + IntrinsicopNode *tlsAnchor = nullptr; + if (!lhsSymbol->IsConst()) { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tbss_anchor, OP_intrinsicop, *addrsType, args0); + } else { + tlsAnchor = mirBuilder->CreateExprIntrinsicop(INTRN_C___tls_get_tdata_anchor, OP_intrinsicop, *addrsType, args0); + } + BaseNode *addTlsGlobalOffset = mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), + tlsAnchor, offset); + return addTlsGlobalOffset; +} + void Simplify::ProcessStmt(StmtNode &stmt) { switch (stmt.GetOpCode()) { case OP_callassigned: { @@ -517,9 +589,13 @@ void Simplify::ProcessStmt(StmtNode &stmt) { break; } case OP_dassign: { - auto *newStmt = SplitDassignAggCopy(static_cast(&stmt), currBlock, currFunc); - if (newStmt) { - ProcessBlock(*newStmt); + if (opts::aggressiveTlsLocalDynamicOpt || maplebe::CGOptions::IsShlib()) { + MIRSymbol *symbol = currFunc->GetLocalOrGlobalSymbol(static_cast(&stmt)->GetStIdx()); + if (symbol && symbol->IsThreadLocal() && symbol->GetStorageClass() != kScExtern && + (opts::aggressiveTlsLocalDynamicOpt || symbol->IsHiddenVisibility())) { + StmtNode *newStmtTLS = LocalDynamicTLSOptDassign(static_cast(stmt), *currBlock, *currFunc); + ProcessStmt(*newStmtTLS); + } } break; } @@ -556,13 +632,40 @@ BaseNode *Simplify::SimplifyExpr(BaseNode &expr) { switch (expr.GetOpCode()) { case OP_dread: { auto &dread = static_cast(expr); - return ReplaceExprWithConst(dread); + BaseNode &tmpNode = *ReplaceExprWithConst(dread); + if (tmpNode.GetOpCode() == OP_dread && (opts::aggressiveTlsLocalDynamicOpt || maplebe::CGOptions::IsShlib())) { + MIRSymbol *symbol = currFunc->GetLocalOrGlobalSymbol(static_cast(tmpNode).GetStIdx()); + if (symbol && symbol->IsThreadLocal() && symbol->GetStorageClass() != kScExtern && + (opts::aggressiveTlsLocalDynamicOpt || symbol->IsHiddenVisibility())) { + BaseNode *newNode = LocalDynamicTLSOptDread(static_cast(tmpNode), *currFunc); + return newNode; + } + } + return &tmpNode; } case OP_iread: { - return SimplifyBitFieldRead(static_cast(expr)); + BaseNode *tmpNode = SimplifyBitFieldRead(static_cast(expr)); + for (size_t i = 0; i < expr.GetNumOpnds(); i++) { + if (expr.Opnd(i)) { + expr.SetOpnd(SimplifyExpr(*expr.Opnd(i)), i); + } + } + return tmpNode; + } + case OP_addrof: { + if (opts::aggressiveTlsLocalDynamicOpt || maplebe::CGOptions::IsShlib()) { + AddrofNode &addrofNode = static_cast(expr); + MIRSymbol *symbol = currFunc->GetLocalOrGlobalSymbol(addrofNode.GetStIdx()); + if (symbol && symbol->IsThreadLocal() && symbol->GetStorageClass() != kScExtern && + (opts::aggressiveTlsLocalDynamicOpt || symbol->IsHiddenVisibility())) { + BaseNode *newNode = LocalDynamicTLSOptAddrof(addrofNode, *currFunc); + return newNode; + } + } + break; } default: { - for (auto i = 0; i < expr.GetNumOpnds(); i++) { + for (size_t i = 0; i < expr.GetNumOpnds(); i++) { if (expr.Opnd(i)) { expr.SetOpnd(SimplifyExpr(*expr.Opnd(i)), i); } @@ -576,7 +679,7 @@ BaseNode *Simplify::SimplifyExpr(BaseNode &expr) { // Bitfield can not read directly, when read 2 bitfields that belong to the same 4-bytes memory, // we can expose the 4-bytes memory's read to remove partial/fully redundant. This function lowers // bitfield read to `4-bytes memory's read + bits extract. -BaseNode *Simplify::SimplifyBitFieldRead(IreadNode &iread) { +BaseNode *Simplify::SimplifyBitFieldRead(IreadNode &iread) const { if (iread.GetFieldID() == 0) { return &iread; } @@ -611,7 +714,7 @@ BaseNode *Simplify::SimplifyBitFieldRead(IreadNode &iread) { return extract; } -BaseNode *Simplify::ReplaceExprWithConst(DreadNode &dread) { +BaseNode *Simplify::ReplaceExprWithConst(DreadNode &dread) const { auto stIdx = dread.GetStIdx(); auto fieldId = dread.GetFieldID(); auto *symbol = currFunc->GetLocalOrGlobalSymbol(stIdx); @@ -658,7 +761,7 @@ bool Simplify::IsConstRepalceable(const MIRConst &mirConst) const { } } -MIRConst *Simplify::GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirConst) { +MIRConst *Simplify::GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirConst) const { FieldID currFieldId = 1; MIRConst *resultConst = nullptr; auto originAggConst = static_cast(mirConst); @@ -666,13 +769,17 @@ MIRConst *Simplify::GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirCon bool reached = false; bool isUpperLayerUnion = false; std::function traverseAgg = [&] (MIRConst *currConst, MIRType *currType) { + if (currType->GetKind() == kTypeArray) { + currFieldId += static_cast(currType->EmbeddedStructType()->GetFieldsSize()); + return; + } if (isUpperLayerUnion && (!currConst || currConst->GetKind() != kConstAggConst)) { reached = currFieldId == fieldId; resultConst = reached ? currConst : resultConst; return; } auto *currAggConst = safe_cast(currConst); - auto *currAggType = safe_cast(currType); + auto *currAggType = currType->EmbeddedStructType(); ASSERT_NOT_NULL(currAggConst); ASSERT_NOT_NULL(currAggType); for (size_t iter = 0; iter < currAggType->GetFieldsSize() && !reached; ++iter) { @@ -680,17 +787,13 @@ MIRConst *Simplify::GetElementConstFromFieldId(FieldID fieldId, MIRConst &mirCon auto *fieldType = originAggType.GetFieldType(currFieldId); if (currFieldId == fieldId) { - if (auto *truncCst = TruncateUnionConstant(*currAggType, fieldConst, *fieldType)) { - resultConst = truncCst; - } else { - resultConst = TruncateUnionConstant(*currAggType, fieldConst, *fieldType); - } + resultConst = TruncateUnionConstant(*currAggType, fieldConst, *fieldType); reached = true; return; } ++currFieldId; - if (fieldType->GetKind() == kTypeUnion || fieldType->GetKind() == kTypeStruct) { + if (fieldType->EmbeddedStructType()) { bool isPrevUpperLayerUnion = isUpperLayerUnion; isUpperLayerUnion = currAggType->GetKind() == kTypeUnion; traverseAgg(fieldConst, fieldType); @@ -721,7 +824,7 @@ void Simplify::Finish() { static uint64 JoinBytes(int byte, uint32 num) { CHECK_FATAL(num <= 8, "not support"); - uint64 realByte = static_cast(byte & 0xff); + uint64 realByte = static_cast(byte) & 0xff; if (realByte == 0) { return 0; } @@ -772,7 +875,7 @@ static BaseNode *ConstructConstvalNode(uint64 val, PrimType primType, MIRBuilder } static BaseNode *ConstructConstvalNode(int64 byte, uint64 num, PrimType primType, MIRBuilder &mirBuilder) { - auto val = JoinBytes(byte, static_cast(num)); + auto val = JoinBytes(static_cast(byte), static_cast(num)); return ConstructConstvalNode(val, primType, mirBuilder); } @@ -857,9 +960,10 @@ bool MemEntry::ComputeMemEntry(BaseNode &expr, MIRFunction &func, MemEntry &memE const auto &concreteExpr = static_cast(expr); auto *symbol = func.GetLocalOrGlobalSymbol(concreteExpr.GetStIdx()); MIRType *curType = symbol->GetType(); + ASSERT_NOT_NULL(curType); if (concreteExpr.GetFieldID() != 0) { - CHECK_NULL_FATAL(curType); curType = static_cast(curType)->GetFieldType(concreteExpr.GetFieldID()); + CHECK_NULL_FATAL(curType); } mirType = curType; break; @@ -886,7 +990,7 @@ bool MemEntry::ComputeMemEntry(BaseNode &expr, MIRFunction &func, MemEntry &memE return true; } const auto &concreteExpr = static_cast(expr); - MIRPreg *preg = func.GetPregItem(concreteExpr.GetRegIdx()); + const MIRPreg *preg = func.GetPregItem(concreteExpr.GetRegIdx()); bool isFromDread = (preg->GetOp() == OP_dread); bool isFromAddrof = (preg->GetOp() == OP_addrof); if (isFromDread || isFromAddrof) { @@ -1039,7 +1143,7 @@ class BlockOperationHelper { // dassign %ret (constval i32 errnNumber) // goto @finalLab void HandleErrorWithDstSizeCheckAndReset(LabelIdx curLabIdx, LabelIdx condLabIdx, LabelIdx finalLabIdx, - OpKind memOpKind, ErrorNumber errNumer, bool isDstSizeConst, uint64 dstSize) { + OpKind memOpKind, ErrorNumber errNumer, bool isDstSizeConst, uint64 dstSize) { InsertLableNode(curLabIdx); if (!isDstSizeConst) { CreateAndInsertCheckStmt(OP_eq, stmt.Opnd(kMemOpSDstSizeOpndIdx), ConstructConstvalNode(0, PTY_u64, *mirBuilder), @@ -1075,15 +1179,15 @@ class BlockOperationHelper { // abs ptr (sub ptr (regread ptr %1, regread ptr %2)), // constval u64 xxx)) static CondGotoNode *CreateOverlapCheckStmt(BaseNode &expr1, BaseNode &expr2, BaseNode &size, LabelIdx labIdx, - MIRBuilder *mirBuilder) { + MIRBuilder &mirBuilder) { auto cmpResType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8)); auto cmpU64Type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_ptr)); auto cmpI64Type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_i64)); - auto *checkExpr = mirBuilder->CreateExprCompare(OP_ge, *cmpResType, *cmpU64Type, - mirBuilder->CreateExprUnary(OP_abs, *cmpU64Type, - mirBuilder->CreateExprBinary(OP_sub, *cmpI64Type, &expr1, &expr2)), + auto *checkExpr = mirBuilder.CreateExprCompare(OP_ge, *cmpResType, *cmpU64Type, + mirBuilder.CreateExprUnary(OP_abs, *cmpU64Type, + mirBuilder.CreateExprBinary(OP_sub, *cmpI64Type, &expr1, &expr2)), &size); - auto *checkStmt = mirBuilder->CreateStmtCondGoto(checkExpr, OP_brfalse, labIdx); + auto *checkStmt = mirBuilder.CreateStmtCondGoto(checkExpr, OP_brfalse, labIdx); return checkStmt; } @@ -1150,7 +1254,7 @@ void MemEntry::ExpandMemsetLowLevel(int64 byte, uint64 size, MIRFunction &func, } BaseNode *readConst = nullptr; // rhs const is big, extract it to avoid redundant expression - bool shouldExtractRhs = blocks.size() > 1 && (byte & 0xff) != 0; + bool shouldExtractRhs = blocks.size() > 1 && (static_cast(byte) & 0xff) != 0; for (auto curSize : blocks) { // low level memset expand result: // iassignoff (dstAddrExpr, constval xx) @@ -1377,7 +1481,7 @@ void MemEntry::ExpandMemcpyLowLevel(const MemEntry &srcMem, uint64 copySize, MIR rhsAddrExpr = mirBuilder->CreateExprBinary(OP_add, *ptrType, realSrcExpr, offsetConstExpr); } BaseNode *rhsExpr = mirBuilder->CreateExprIread(*constMIRType, *constMIRPtrType, 0, rhsAddrExpr); - auto *iassignoff = mirBuilder->CreateStmtIassignoff(constType, offset, realDstExpr, rhsExpr); + auto *iassignoff = mirBuilder->CreateStmtIassignoff(constType, static_cast(offset), realDstExpr, rhsExpr); InsertBeforeAndMayPrintStmt(block, stmt, debug, iassignoff); offset += curSize; continue; @@ -1536,10 +1640,10 @@ StmtNode *MemEntry::GenRetAssign(StmtNode &stmt, MIRFunction &func, bool isLowLe MIRBuilder *mirBuilder = func.GetModule()->GetMIRBuilder(); BaseNode *rhs = callStmt.Opnd(0); // for memset, memcpy switch (opKind) { - case MEM_OP_memset_s: - case KMemOpMemcpyS: - case SPRINTF_OP_sprintf: - case SPRINTF_OP_sprintf_s:{ + case kMemOpMemsetS: + case kMemOpMemcpyS: + case kSprintfOpSprintf: + case kSprintfOpSprintfS:{ // memset_s memcpy_s sprintf_s must return an returnVal MIRType *constType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_i32)); MIRConst *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( @@ -1570,49 +1674,49 @@ OpKind SimplifyOp::ComputeOpKind(StmtNode &stmt) { if (stmt.GetOpCode() == OP_intrinsiccall) { auto intrinsicID = static_cast(stmt).GetIntrinsic(); if (intrinsicID == INTRN_C_memset) { - return MEM_OP_memset; + return kMemOpMemset; } else if (intrinsicID == INTRN_C_memcpy) { - return MEM_OP_memcpy; + return kMemOpMemcpy; } } // lowered memop function (such as memset) may be a call, not callassigned if (stmt.GetOpCode() != OP_callassigned && stmt.GetOpCode() != OP_call) { - return MEM_OP_unknown; + return kMemOpUnknown; } auto &callStmt = static_cast(stmt); MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt.GetPUIdx()); static const std::unordered_map hashFuncName = { - {kFuncNameOfMemset, MEM_OP_memset}, - {kFuncNameOfMemcpy, MEM_OP_memcpy}, - {kFuncNameOfMemsetS, MEM_OP_memset_s}, - {kFuncNameOfMemcpyS, KMemOpMemcpyS}, - {kFuncNameOfSprintf, SPRINTF_OP_sprintf}, - {kFuncNameOfSprintfS, SPRINTF_OP_sprintf_s}, - {kFuncNameOfSnprintfS, SPRINTF_OP_snprintf_s}, - {kFuncNameOfVsnprintfS, SPRINTF_OP_vsnprintf_s} + {kFuncNameOfMemset, kMemOpMemset}, + {kFuncNameOfMemcpy, kMemOpMemcpy}, + {kFuncNameOfMemsetS, kMemOpMemsetS}, + {kFuncNameOfMemcpyS, kMemOpMemcpyS}, + {kFuncNameOfSprintf, kSprintfOpSprintf}, + {kFuncNameOfSprintfS, kSprintfOpSprintfS}, + {kFuncNameOfSnprintfS, kSprintfOpSnprintfS}, + {kFuncNameOfVsnprintfS, kSprintfOpVsnprintfS} }; auto iter = hashFuncName.find(mirFunc->GetName().c_str()); if (iter != hashFuncName.end()) { return iter->second; } - return MEM_OP_unknown; + return kMemOpUnknown; } bool SimplifyOp::AutoSimplify(StmtNode &stmt, BlockNode &block, bool isLowLevel) { OpKind opKind = ComputeOpKind(stmt); switch (opKind) { - case MEM_OP_memset: - case MEM_OP_memset_s: { + case kMemOpMemset: + case kMemOpMemsetS: { return SimplifyMemset(stmt, block, isLowLevel); } - case MEM_OP_memcpy: - case KMemOpMemcpyS: { + case kMemOpMemcpy: + case kMemOpMemcpyS: { return SimplifyMemcpy(stmt, block, isLowLevel); } - case SPRINTF_OP_sprintf: - case SPRINTF_OP_sprintf_s: - case SPRINTF_OP_snprintf_s: - case SPRINTF_OP_vsnprintf_s:{ + case kSprintfOpSprintf: + case kSprintfOpSprintfS: + case kSprintfOpSnprintfS: + case kSprintfOpVsnprintfS:{ auto simplifySprintf = sprintfMap.find(opKind)->second; return simplifySprintf->ReplaceSprintfIfNeeded(stmt, block, isLowLevel, opKind); } @@ -1633,14 +1737,14 @@ StmtNode *SprintfBaseOper::InsertMemcpyCallStmt(const MapleVector &a memcpyFunc->AllocSymTab(); // handle memcpy return val CHECK_NULL_FATAL(op.GetFunction()); - auto *retAssign = MemEntry::GenRetAssign(stmt, *op.GetFunction(), isLowLevel, MEM_OP_memcpy, retVal); + auto *retAssign = MemEntry::GenRetAssign(stmt, *op.GetFunction(), isLowLevel, kMemOpMemcpy, retVal); InsertBeforeAndMayPrintStmtList(block, stmt, op.IsDebug(), {memcpyCallStmt, retAssign}); return memcpyCallStmt; } static bool GetFmtSrcSize(const StmtNode &stmt, const ConststrNode &fmt, uint32 &fmtOpndIdx, const OpKind &opKind, uint64 &srcSize) { - uint32 origOpndIdx = (opKind == SPRINTF_OP_snprintf_s || opKind == SPRINTF_OP_vsnprintf_s) ? + uint32 origOpndIdx = (opKind == kSprintfOpSnprintfS || opKind == kSprintfOpVsnprintfS) ? kSnprintfSOrigOpndIdx : kSprintfSOrigOpndIdx; const std::string fmtStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(fmt.GetStrIdx()); if (strchr(fmtStr.c_str(), '%') == nullptr) { /* eg: snprintf_s(dst, dstMax, count, constStr) */ @@ -1671,7 +1775,7 @@ bool SprintfBaseOper::CheckInvalidPara(uint64 count, uint64 dstMax, uint64 srcSi return true; } -bool SprintfBaseOper::DealWithDstOrEndZero(StmtNode &stmt, BlockNode &block, bool isLowLevel, uint64 count) { +bool SprintfBaseOper::DealWithDstOrEndZero(const StmtNode &stmt, BlockNode &block, bool isLowLevel, uint64 count) { MemEntry dstMemEntry; bool valid = MemEntry::ComputeMemEntry(*(stmt.Opnd(0)), *op.GetFunction(), dstMemEntry, isLowLevel); if (!valid) { @@ -1692,7 +1796,7 @@ bool SprintfBaseOper::CompareDstMaxSrcSize(StmtNode &stmt, BlockNode &block, uin return false; } int32 retVal = srcSize >= dstMax ? kTruncate : static_cast(srcSize); - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, retVal, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, retVal, isLowLevel); return true; } @@ -1702,14 +1806,14 @@ bool SprintfBaseOper::CompareCountSrcSize(StmtNode &stmt, BlockNode &block, uint return false; } int32 retVal = srcSize > count ? kTruncate : static_cast(std::min(srcSize, count)); - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, retVal, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, retVal, isLowLevel); return true; } -bool SprintfBaseOper::DealWithFmtConstStr(StmtNode &stmt, BaseNode *fmt, BlockNode &block, bool isLowLevel) { +bool SprintfBaseOper::DealWithFmtConstStr(StmtNode &stmt, const BaseNode *fmt, BlockNode &block, bool isLowLevel) { StmtNode *sprintfCallStmt = &stmt; const std::string fmtStr = - GlobalTables::GetUStrTable().GetStringFromStrIdx(static_cast(fmt)->GetStrIdx()); + GlobalTables::GetUStrTable().GetStringFromStrIdx(static_cast(fmt)->GetStrIdx()); uint64 srcSize = strlen(fmtStr.c_str()); /* If the format doesn't contain % args or %%, the format is a literal string constant. use memcpy */ if (strchr(fmtStr.c_str(), '%') == nullptr) { @@ -1771,7 +1875,7 @@ bool SprintfBaseOper::ReplaceSprintfWithMemcpy(StmtNode &stmt, BlockNode &block, if (memcpyCallStmt == nullptr) { return false; } - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf, static_cast(copySize), isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintf, static_cast(copySize), isLowLevel); (void)op.SimplifyMemcpy(*memcpyCallStmt, block, isLowLevel); return true; @@ -1821,10 +1925,10 @@ bool SimplifySnprintfS::ReplaceSprintfIfNeeded(StmtNode &stmt, BlockNode &block, return false; } if (!isLowLevel) { - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, kSprintfErrNum, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, kSprintfErrNum, isLowLevel); return false; } - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, kSprintfErrNum, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, kSprintfErrNum, isLowLevel); } else if (dstMax <= count) { /* if dstMax <= count, we need compare dstMax and srcSize */ if (!ReplaceSprintfWithMemcpy(stmt, block, fmtOpndIdx, dstMax, isLowLevel)) { @@ -1879,10 +1983,10 @@ bool SimplifySprintfS::ReplaceSprintfIfNeeded(StmtNode &stmt, BlockNode &block, return false; } if (!isLowLevel) { - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, kSprintfErrNum, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, kSprintfErrNum, isLowLevel); return false; } - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, kSprintfErrNum, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, kSprintfErrNum, isLowLevel); } else { uint64 copySize = dstMax <= srcSize ? dstMax : srcSize; if (!ReplaceSprintfWithMemcpy(stmt, block, fmtOpndIdx, copySize, isLowLevel)) { @@ -1894,7 +1998,7 @@ bool SimplifySprintfS::ReplaceSprintfIfNeeded(StmtNode &stmt, BlockNode &block, return false; } int32 retVal = dstMax <= srcSize ? kTruncate : static_cast(srcSize); - ProcessRetValue(stmt, block, SPRINTF_OP_sprintf_s, retVal, isLowLevel); + ProcessRetValue(stmt, block, kSprintfOpSprintfS, retVal, isLowLevel); } } else { // We do not support non-constant str. @@ -1904,24 +2008,27 @@ bool SimplifySprintfS::ReplaceSprintfIfNeeded(StmtNode &stmt, BlockNode &block, return true; } -// expand memset_s call statement, return pointer of memset call statement node to be expanded in the next step, return -// nullptr if memset_s is expanded completely. -StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) const{ - ErrorNumber errNum = ERRNO_OK; - - uint64 srcSize = 0; - bool isSrcSizeConst = false; +void SimplifyOp::FoldMemsExpr(StmtNode &stmt, uint64 &srcSize, bool &isSrcSizeConst, uint64 &dstSize, + bool &isDstSizeConst) const { BaseNode *foldSrcSizeExpr = FoldIntConst(stmt.Opnd(kMemOpSSrcSizeOpndIdx), srcSize, isSrcSizeConst); if (foldSrcSizeExpr != nullptr) { stmt.SetOpnd(foldSrcSizeExpr, kMemOpSDstSizeOpndIdx); } - - uint64 dstSize = 0; - bool isDstSizeConst = false; BaseNode *foldDstSizeExpr = FoldIntConst(stmt.Opnd(kMemOpSDstSizeOpndIdx), dstSize, isDstSizeConst); if (foldDstSizeExpr != nullptr) { stmt.SetOpnd(foldDstSizeExpr, kMemOpSDstSizeOpndIdx); } +} + +// expand memset_s call statement, return pointer of memset call statement node to be expanded in the next step, return +// nullptr if memset_s is expanded completely. +StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) const { + ErrorNumber errNum = ERRNO_OK; + uint64 srcSize = 0; + bool isSrcSizeConst = false; + uint64 dstSize = 0; + bool isDstSizeConst = false; + FoldMemsExpr(stmt, srcSize, isSrcSizeConst, dstSize, isDstSizeConst); if (isDstSizeConst) { if ((srcSize > dstSize && dstSize == 0) || dstSize > kSecurecMemMaxLen) { errNum = ERRNO_RANGE; @@ -1939,7 +2046,7 @@ StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) c BlockOperationHelper helper(stmt, *func, block, false, debug); LabelIdx finalLabIdx = func->GetLabelTab()->CreateLabelWithPrefix('f'); if (errNum != ERRNO_OK || (isSrcSizeConst && isDstSizeConst && dstSize == 0 && srcSize == 0)) { - auto errnoAssign = MemEntry::GenRetAssign(stmt, *func, true, MEM_OP_memset_s, errNum); + auto errnoAssign = MemEntry::GenRetAssign(stmt, *func, true, kMemOpMemsetS, errNum); InsertBeforeAndMayPrintStmt(block, stmt, debug, errnoAssign); block.RemoveStmt(&stmt); return nullptr; @@ -1963,9 +2070,9 @@ StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) c if (isDstSizeConst && isSrcSizeConst) { if (srcSize > dstSize) { auto *callStmt = helper.InsertMemsetCallStmt(stmt.Opnd(kMemOpDstOpndIdx), stmt.Opnd(kMemOpSSrcOpndIdx)); - helper.InsertRetAssignAndGoto(finalLabIdx, MEM_OP_memset_s, ERRNO_RANGE_AND_RESET); + helper.InsertRetAssignAndGoto(finalLabIdx, kMemOpMemsetS, ERRNO_RANGE_AND_RESET); if (!isDstAddrSafe) { - helper.HandleError(nullPtrLabIdx, finalLabIdx, ERRNO_INVAL, MEM_OP_memset_s); + helper.HandleError(nullPtrLabIdx, finalLabIdx, ERRNO_INVAL, kMemOpMemsetS); } helper.InsertLableNode(finalLabIdx); block.RemoveStmt(&stmt); @@ -1986,25 +2093,25 @@ StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) c auto memCallStmt = mirBuilder->CreateStmtCallAssigned(memFunc->GetPuidx(), args, nullptr, OP_callassigned); memCallStmt->SetSrcPos(stmt.GetSrcPos()); InsertBeforeAndMayPrintStmt(block, stmt, debug, memCallStmt); - helper.InsertRetAssignAndGoto(finalLabIdx, MEM_OP_memset_s, errNum); + helper.InsertRetAssignAndGoto(finalLabIdx, kMemOpMemsetS, errNum); if (!isSrcSizeConst || !isDstSizeConst) { // handle src size error // in memset_s, we need check if dstSize is 0 before handle srcSize > dstSize, and memset dst to zero - helper.HandleErrorWithDstSizeCheckAndReset(srcSizeCheckLabIdx, dstSizeCheckLabIdx, finalLabIdx, MEM_OP_memset_s, + helper.HandleErrorWithDstSizeCheckAndReset(srcSizeCheckLabIdx, dstSizeCheckLabIdx, finalLabIdx, kMemOpMemsetS, ERRNO_RANGE_AND_RESET, isDstSizeConst, dstSize); } // handle dst nullptr error // in memset_s, we need check if dstSize is 0 before handle dst == nullptr if (!isDstAddrSafe) { - helper.HandleErrorWithDstSizeCheck(nullPtrLabIdx, dstSizeCheckLabIdx, finalLabIdx, MEM_OP_memset_s, ERRNO_INVAL, + helper.HandleErrorWithDstSizeCheck(nullPtrLabIdx, dstSizeCheckLabIdx, finalLabIdx, kMemOpMemsetS, ERRNO_INVAL, isDstSizeConst, dstSize); } if (!isDstSizeConst) { // handle dst size error - helper.HandleError(dstSizeCheckLabIdx, finalLabIdx, ERRNO_RANGE, MEM_OP_memset_s); + helper.HandleError(dstSizeCheckLabIdx, finalLabIdx, ERRNO_RANGE, kMemOpMemsetS); } auto *finalLabelNode = mirBuilder->CreateStmtLabel(finalLabIdx); InsertBeforeAndMayPrintStmt(block, stmt, debug, finalLabelNode); @@ -2020,22 +2127,22 @@ StmtNode *SimplifyOp::PartiallyExpandMemsetS(StmtNode &stmt, BlockNode &block) c // for primitive type, array type with element size < 4 bytes and struct type without padding // (2) cglower memset expand // for array type with element size >= 4 bytes and struct type with paddings -bool SimplifyOp::SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLevel) { +bool SimplifyOp::SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLevel) const { OpKind memOpKind = ComputeOpKind(stmt); - if (memOpKind != MEM_OP_memset && memOpKind != MEM_OP_memset_s) { + if (memOpKind != kMemOpMemset && memOpKind != kMemOpMemsetS) { return false; } uint32 dstOpndIdx = 0; uint32 srcOpndIdx = 1; uint32 srcSizeOpndIdx = 2; - bool isSafeVersion = memOpKind == MEM_OP_memset_s; + bool isSafeVersion = memOpKind == kMemOpMemsetS; if (debug) { LogInfo::MapleLogger() << "[funcName] " << func->GetName() << std::endl; stmt.Dump(0); } StmtNode *memsetCallStmt = &stmt; - if (memOpKind == MEM_OP_memset_s && !isLowLevel) { + if (memOpKind == kMemOpMemsetS && !isLowLevel) { memsetCallStmt = PartiallyExpandMemsetS(stmt, block); if (!memsetCallStmt) { return true; // Expand memset_s completely, no extra memset is generated, so just return true @@ -2062,7 +2169,7 @@ bool SimplifyOp::SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLeve return false; } if (srcSize == 0) { - if (memOpKind == MEM_OP_memset) { + if (memOpKind == kMemOpMemset) { auto *retAssign = MemEntry::GenRetAssign(stmt, *func, isLowLevel, memOpKind); InsertBeforeAndMayPrintStmt(block, *memsetCallStmt, debug, retAssign); } @@ -2099,8 +2206,8 @@ bool SimplifyOp::SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLeve } bool ret = false; if (srcSize != 0) { - ret = dstMemEntry.ExpandMemset(static_cast(val), static_cast(srcSize), *func, *memsetCallStmt, block, isLowLevel, - debug, errNum); + ret = dstMemEntry.ExpandMemset(static_cast(val), static_cast(srcSize), *func, *memsetCallStmt, + block, isLowLevel, debug, errNum); } else { // if size == 0, no need to set memory, just return error nummber auto *retAssign = MemEntry::GenRetAssign(*memsetCallStmt, *func, isLowLevel, memOpKind, errNum); @@ -2116,20 +2223,11 @@ bool SimplifyOp::SimplifyMemset(StmtNode &stmt, BlockNode &block, bool isLowLeve StmtNode *SimplifyOp::PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block) { ErrorNumber errNum = ERRNO_OK; - uint64 srcSize = 0; bool isSrcSizeConst = false; - BaseNode *foldSrcSizeExpr = FoldIntConst(stmt.Opnd(kMemOpSSrcSizeOpndIdx), srcSize, isSrcSizeConst); - if (foldSrcSizeExpr != nullptr) { - stmt.SetOpnd(foldSrcSizeExpr, kMemOpSDstSizeOpndIdx); - } - uint64 dstSize = 0; bool isDstSizeConst = false; - BaseNode *foldDstSizeExpr = FoldIntConst(stmt.Opnd(kMemOpSDstSizeOpndIdx), dstSize, isDstSizeConst); - if (foldDstSizeExpr != nullptr) { - stmt.SetOpnd(foldDstSizeExpr, kMemOpSDstSizeOpndIdx); - } + FoldMemsExpr(stmt, srcSize, isSrcSizeConst, dstSize, isDstSizeConst); if (isDstSizeConst) { if ((dstSize == 0) || static_cast(dstSize) > kSecurecMemMaxLen) { errNum = ERRNO_RANGE; @@ -2153,7 +2251,7 @@ StmtNode *SimplifyOp::PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block) { BlockOperationHelper helper(stmt, *func, block, false, debug); LabelIdx finalLabIdx = func->GetLabelTab()->CreateLabelWithPrefix('f'); if (errNum != ERRNO_OK || (isSrcSizeConst && isDstSizeConst && dstSize == 0 && srcSize == 0)) { - auto errnoAssign = MemEntry::GenRetAssign(stmt, *func, true, KMemOpMemcpyS, errNum); + auto errnoAssign = MemEntry::GenRetAssign(stmt, *func, true, kMemOpMemcpyS, errNum); InsertBeforeAndMayPrintStmt(block, stmt, debug, errnoAssign); block.RemoveStmt(&stmt); return nullptr; @@ -2189,14 +2287,14 @@ StmtNode *SimplifyOp::PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block) { // handle srcsize > dstsize auto *callStmt = helper.InsertMemsetCallStmt(stmt.Opnd(kMemOpDstOpndIdx), ConstructConstvalNode(0, PTY_i32, *mirBuilder)); - helper.InsertRetAssignAndGoto(finalLabIdx, KMemOpMemcpyS, ERRNO_RANGE_AND_RESET); + helper.InsertRetAssignAndGoto(finalLabIdx, kMemOpMemcpyS, ERRNO_RANGE_AND_RESET); // handle src = nullptr if (!isSrcAddrSafe) { - helper.HandleErrorAndReset(srcNullPtrLabIdx, finalLabIdx, KMemOpMemcpyS, ERRNO_INVAL_AND_RESET); + helper.HandleErrorAndReset(srcNullPtrLabIdx, finalLabIdx, kMemOpMemcpyS, ERRNO_INVAL_AND_RESET); } // handle dst = nullptr if (!isDstAddrSafe) { - helper.HandleError(dstNullPtrLabIdx, finalLabIdx, ERRNO_INVAL, KMemOpMemcpyS); + helper.HandleError(dstNullPtrLabIdx, finalLabIdx, ERRNO_INVAL, kMemOpMemcpyS); } helper.InsertLableNode(finalLabIdx); block.RemoveStmt(&stmt); @@ -2216,7 +2314,7 @@ StmtNode *SimplifyOp::PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block) { // check overlap overlapLabIdx = func->GetLabelTab()->CreateLabelWithPrefix('o'); // 'o' means overlap auto *checkOverlapStmt = CreateOverlapCheckStmt(*stmt.Opnd(kMemOpDstOpndIdx), *stmt.Opnd(kMemOpSSrcOpndIdx), - *stmt.Opnd(kMemOpSSrcSizeOpndIdx), overlapLabIdx, mirBuilder); + *stmt.Opnd(kMemOpSSrcSizeOpndIdx), overlapLabIdx, *mirBuilder); InsertBeforeAndMayPrintStmt(block, stmt, debug, checkOverlapStmt); // insert memcpy call @@ -2228,32 +2326,32 @@ StmtNode *SimplifyOp::PartiallyExpandMemcpyS(StmtNode &stmt, BlockNode &block) { auto memCallStmt = mirBuilder->CreateStmtCallAssigned(memFunc->GetPuidx(), args, nullptr, OP_callassigned); memCallStmt->SetSrcPos(stmt.GetSrcPos()); InsertBeforeAndMayPrintStmt(block, stmt, debug, memCallStmt); - helper.InsertRetAssignAndGoto(finalLabIdx, KMemOpMemcpyS, errNum); + helper.InsertRetAssignAndGoto(finalLabIdx, kMemOpMemcpyS, errNum); // Add handler IR if dst and src are overlapped - helper.HandleErrorAndReset(overlapLabIdx, finalLabIdx, KMemOpMemcpyS, ERRNO_OVERLAP_AND_RESET); + helper.HandleErrorAndReset(overlapLabIdx, finalLabIdx, kMemOpMemcpyS, ERRNO_OVERLAP_AND_RESET); // handle addr equal - helper.HandleError(addrEqLabIdx, finalLabIdx, ERRNO_OK, KMemOpMemcpyS); + helper.HandleError(addrEqLabIdx, finalLabIdx, ERRNO_OK, kMemOpMemcpyS); if (!isSrcSizeConst || !isDstSizeConst) { // handle src size error - helper.HandleErrorAndReset(srcSizeCheckLabIdx, finalLabIdx, KMemOpMemcpyS, ERRNO_RANGE_AND_RESET); + helper.HandleErrorAndReset(srcSizeCheckLabIdx, finalLabIdx, kMemOpMemcpyS, ERRNO_RANGE_AND_RESET); } // handle src nullptr error if (!isSrcAddrSafe) { - helper.HandleErrorAndReset(srcNullPtrLabIdx, finalLabIdx, KMemOpMemcpyS, ERRNO_INVAL_AND_RESET); + helper.HandleErrorAndReset(srcNullPtrLabIdx, finalLabIdx, kMemOpMemcpyS, ERRNO_INVAL_AND_RESET); } // handle dst nullptr error if (!isDstAddrSafe) { - helper.HandleError(dstNullPtrLabIdx, finalLabIdx, ERRNO_INVAL, KMemOpMemcpyS); + helper.HandleError(dstNullPtrLabIdx, finalLabIdx, ERRNO_INVAL, kMemOpMemcpyS); } if (!isDstSizeConst) { // handle dst size error - helper.HandleError(dstSizeCheckLabIdx, finalLabIdx, ERRNO_RANGE, KMemOpMemcpyS); + helper.HandleError(dstSizeCheckLabIdx, finalLabIdx, ERRNO_RANGE, kMemOpMemcpyS); } helper.InsertLableNode(finalLabIdx); block.RemoveStmt(&stmt); @@ -2268,20 +2366,20 @@ bool SimplifyOp::SimplifyMemcpy(StmtNode &stmt, BlockNode &block, bool isLowLeve } OpKind memOpKind = ComputeOpKind(stmt); - if (memOpKind != MEM_OP_memcpy && memOpKind != KMemOpMemcpyS) { + if (memOpKind != kMemOpMemcpy && memOpKind != kMemOpMemcpyS) { return false; } uint32 dstOpndIdx = 0; uint32 srcOpndIdx = 1; uint32 srcSizeOpndIdx = 2; - bool isSafeVersion = memOpKind == KMemOpMemcpyS; + bool isSafeVersion = memOpKind == kMemOpMemcpyS; if (debug) { LogInfo::MapleLogger() << "[funcName] " << func->GetName() << std::endl; stmt.Dump(0); } StmtNode* memcpyCallStmt = &stmt; - if (memOpKind == KMemOpMemcpyS) { + if (memOpKind == kMemOpMemcpyS) { memcpyCallStmt = PartiallyExpandMemcpyS(stmt, block); if (!memcpyCallStmt) { return true; // Expand memcpy_s completely, no extra memcpy is generated, so just return true @@ -2313,7 +2411,7 @@ bool SimplifyOp::SimplifyMemcpy(StmtNode &stmt, BlockNode &block, bool isLowLeve return false; } if (srcSize == 0) { - if (memOpKind == MEM_OP_memcpy) { + if (memOpKind == kMemOpMemcpy) { auto *retAssign = MemEntry::GenRetAssign(stmt, *func, isLowLevel, memOpKind); InsertBeforeAndMayPrintStmt(block, *memcpyCallStmt, debug, retAssign); } @@ -2351,7 +2449,7 @@ bool SimplifyOp::SimplifyMemcpy(StmtNode &stmt, BlockNode &block, bool isLowLeve } bool ret = false; ret = dstMemEntry.ExpandMemcpy(srcMemEntry, copySize, *func, *memcpyCallStmt, block, - isLowLevel, debug, errNum); + isLowLevel, debug, errNum); if (ret) { MayPrintLog(debug, true, memOpKind, "well done"); } @@ -2363,12 +2461,112 @@ void M2MSimplify::GetAnalysisDependence(maple::AnalysisDep &aDep) const { aDep.SetPreservedAll(); } +static constexpr uint64 RoundUpConst(uint64 offset, uint32 align) { + return (~(align - 1)) & (offset + align - 1); +} + +static inline uint64 RoundUp(uint64 offset, uint32 align) { + if (align == 0) { + return offset; + } + return RoundUpConst(offset, align); +} + +// calculate all local dynamic TLS offset from the anchor +static void CalculateLocalDynamicTLS(MIRModule &m) { + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + MIRType *mirType = nullptr; + uint64 tdataOffset = 0; + uint64 tbssOffset = 0; + MapleMap &tdataVarOffset = m.GetTdataVarOffset(); + MapleMap &tbssVarOffset = m.GetTbssVarOffset(); + + for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + MIRFunction *mirFunc = *it; + if (mirFunc->GetBody() == nullptr) { + continue; + } + MIRSymbolTable *lSymTab = mirFunc->GetSymTab(); + if (lSymTab == nullptr) { + continue; + } + size_t lsize = lSymTab->GetSymbolTableSize(); + for (size_t i = 0; i < lsize; i++) { + MIRSymbol *mirSymbol = lSymTab->GetSymbolFromStIdx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->GetStorageClass() != kScPstatic) { + continue; + } + uint32 align = 0; + mirType = mirSymbol->GetType(); + if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeArray || mirType->GetKind() == kTypeUnion) { + align = k8ByteSize; + } else { + align = mirType->GetAlign(); + } + if (mirSymbol->IsThreadLocal()) { + mirType = mirSymbol->GetType(); + if (!mirSymbol->IsConst()) { + if (!m.GetTbssAnchor() && !opts::aggressiveTlsLocalDynamicOpt) { + m.SetTbssAnchor(mirSymbol); + } + tbssOffset = RoundUp(tbssOffset, align); + tbssVarOffset[mirSymbol] = tbssOffset; + tbssOffset += mirType->GetSize(); + } else { + if (!m.GetTdataAnchor() && !opts::aggressiveTlsLocalDynamicOpt) { + m.SetTdataAnchor(mirSymbol); + } + tdataOffset = RoundUp(tdataOffset, align); + tdataVarOffset[mirSymbol] = tdataOffset; + tdataOffset += mirType->GetSize(); + } + } + } + } + + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->GetStorageClass() == kScExtern) { + continue; + } + if (mirSymbol->IsThreadLocal() && (opts::aggressiveTlsLocalDynamicOpt || mirSymbol->IsHiddenVisibility())) { + mirType = mirSymbol->GetType(); + uint32 align = 0; + if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeArray || mirType->GetKind() == kTypeUnion) { + align = k8ByteSize; + } else { + align = mirType->GetAlign(); + } + if (!mirSymbol->IsConst()) { + if (!m.GetTbssAnchor() && !opts::aggressiveTlsLocalDynamicOpt) { + m.SetTbssAnchor(mirSymbol); + } + tbssOffset = RoundUp(tbssOffset, align); + tbssVarOffset[mirSymbol] = tbssOffset; + tbssOffset += mirType->GetSize(); + } else { + if (!m.GetTdataAnchor() && !opts::aggressiveTlsLocalDynamicOpt) { + m.SetTdataAnchor(mirSymbol); + } + tdataOffset = RoundUp(tdataOffset, align); + tdataVarOffset[mirSymbol] = tdataOffset; + tdataOffset += mirType->GetSize(); + } + } + } +} + bool M2MSimplify::PhaseRun(maple::MIRModule &m) { auto *kh = GET_ANALYSIS(M2MKlassHierarchy, m); ASSERT_NOT_NULL((kh)); std::unique_ptr funcOptImpl = std::make_unique(m, kh, *GetPhaseMemPool(), TRACE_MAPLE_PHASE); ASSERT_NOT_NULL(funcOptImpl); + if (opts::aggressiveTlsLocalDynamicOpt || maplebe::CGOptions::IsShlib()) { + CalculateLocalDynamicTLS(m); + } FuncOptimizeIterator opt(PhaseName(), std::move(funcOptImpl)); opt.Init(); opt.Run(); diff --git a/src/mapleall/mpl2mpl/src/stmt_cost_analyzer.cpp b/src/mapleall/mpl2mpl/src/stmt_cost_analyzer.cpp index eb8831c3fb26d42b862c7408483161e37cc414a9..6f1d50ba9f0e7ead86dfb753f5ba9c7d55409c92 100644 --- a/src/mapleall/mpl2mpl/src/stmt_cost_analyzer.cpp +++ b/src/mapleall/mpl2mpl/src/stmt_cost_analyzer.cpp @@ -165,7 +165,7 @@ int64 StmtCostAnalyzer::GetStmtsCost(BlockNode *block) { return cost; } -int64 StmtCostAnalyzer::GetMoveCost(size_t sizeInByte) { +int64 StmtCostAnalyzer::GetMoveCost(size_t sizeInByte) const { return static_cast(((sizeInByte + ti->GetMaxMoveBytes() - 1) / ti->GetMaxMoveBytes()) * kSizeScale); } @@ -389,7 +389,7 @@ int64 StmtCostAnalyzer::GetExprCost(BaseNode *expr) { return cost; } -MIRType *StmtCostAnalyzer::GetMIRTypeFromStIdxAndField(const StIdx idx, FieldID fieldID) { +MIRType *StmtCostAnalyzer::GetMIRTypeFromStIdxAndField(const StIdx idx, FieldID fieldID) const { auto *symbol = curFunc->GetLocalOrGlobalSymbol(idx); ASSERT_NOT_NULL(symbol); auto *mirType = symbol->GetType(); diff --git a/src/mapleall/mpl2mpl/src/vtable_analysis.cpp b/src/mapleall/mpl2mpl/src/vtable_analysis.cpp index f50063fa3c4d139dbd9c1cf5ff3cad75c2fb4b57..266c99ea94498647195dbe64fa483cd162e92f13 100644 --- a/src/mapleall/mpl2mpl/src/vtable_analysis.cpp +++ b/src/mapleall/mpl2mpl/src/vtable_analysis.cpp @@ -88,7 +88,7 @@ bool VtableAnalysis::CheckOverrideForCrossPackage(const MIRFunction &baseMethod, // If the method is not in method_table yet, add it in, otherwise update it. // Note: the method to add should already pass VtableCandidate test -void VtableAnalysis::AddMethodToTable(MethodPtrVector &methodTable, MethodPair &methodPair) { +void VtableAnalysis::AddMethodToTable(MethodPtrVector &methodTable, MethodPair &methodPair) const { MIRFunction *method = builder->GetFunctionFromStidx(methodPair.first); ASSERT_NOT_NULL(method); GStrIdx strIdx = method->GetBaseFuncNameWithTypeStrIdx(); @@ -574,7 +574,7 @@ void VtableAnalysis::ReplacePolymorphicInvoke(CallNode &stmt) { currFunc->GetBody()->ReplaceStmt1WithStmt2(&stmt, intrinCall); } -BaseNode *VtableAnalysis::GenVtabItabBaseAddr(BaseNode &obj, bool isVirtual) { +BaseNode *VtableAnalysis::GenVtabItabBaseAddr(BaseNode &obj, bool isVirtual) const { ASSERT_NOT_NULL(builder); BaseNode *classInfoAddress = ReflectionAnalysis::GenClassInfoAddr(&obj, *builder); auto *classMetadataType = static_cast( @@ -624,10 +624,9 @@ size_t VtableAnalysis::SearchWithoutRettype(const MIRFunction &callee, const MIR } if (isCalleeScalar || isCurrVtabScalar) { if (isFindMethod) { - CHECK_FATAL(klassHierarchy->GetKlassFromTyIdx(structType.GetTypeIndex()) != nullptr, "null ptr check"); - LogInfo::MapleLogger() << "warning: this " - << (klassHierarchy->GetKlassFromTyIdx(structType.GetTypeIndex()))->GetKlassName() - << " has mult methods with the same function name but with different return type!\n"; + CHECK_FATAL(klassHierarchy->GetKlassFromTyIdx(structType.GetTypeIndex()) != nullptr, "warning: this ", + (klassHierarchy->GetKlassFromTyIdx(structType.GetTypeIndex()))->GetKlassName().c_str(), + " has mult methods with the same function name but with different return type!"); break; } entryoffset = id; @@ -798,7 +797,7 @@ bool VtableAnalysis::CheckInterfaceImplemented(const CallNode &stmt) const { return false; } -void VtableAnalysis::ReplaceInterfaceInvoke(CallNode &stmt) { +void VtableAnalysis::ReplaceInterfaceInvoke(CallNode &stmt) const { CHECK_FATAL(!stmt.GetNopnd().empty(), "container check"); if (Options::deferredVisit && !CheckInterfaceImplemented(stmt)) { return; diff --git a/src/mapleall/mpl2mpl/src/vtable_impl.cpp b/src/mapleall/mpl2mpl/src/vtable_impl.cpp index 88fb17b57a58f688d5df286eaeece7b937adca88..b0d73531a840658e3e8bc1c5fa86445712ba1003 100644 --- a/src/mapleall/mpl2mpl/src/vtable_impl.cpp +++ b/src/mapleall/mpl2mpl/src/vtable_impl.cpp @@ -37,7 +37,7 @@ VtableImpl::VtableImpl(MIRModule &mod, KlassHierarchy *kh, bool dump) mccItabFunc->SetAttr(FUNCATTR_nosideeffect); } #if defined(TARGARM) || defined(TARGAARCH64) || defined(TARGRISCV64) -bool VtableImpl::Intrinsify(MIRFunction &func, CallNode &cnode) { +bool VtableImpl::Intrinsify(MIRFunction &func, CallNode &cnode) const { MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(cnode.GetPUIdx()); const std::string funcName = calleeFunc->GetName(); MIRIntrinsicID intrnId = INTRN_UNDEFINED; @@ -103,11 +103,10 @@ void VtableImpl::ProcessFunc(MIRFunction *func) { }; const std::string funcName = calleefunc->GetName(); if (!Options::buildApp && Options::O2 && intrisicsList.find(funcName) != intrisicsList.end() && - funcName != "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I") { - if (Intrinsify(*func, *cnode)) { - stmt = next; - continue; - } + funcName != "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I" && + Intrinsify(*func, *cnode)) { + stmt = next; + continue; } } #endif /* TARGARM || TARGAARCH64 */ @@ -259,7 +258,7 @@ void VtableImpl::DeferredVisit(CallNode &stmt, enum CallKind kind) { DeferredVisitCheckFloat(stmt, *mirFunc); } -void VtableImpl::DeferredVisitCheckFloat(CallNode &stmt, const MIRFunction &mirFunc) { +void VtableImpl::DeferredVisitCheckFloat(CallNode &stmt, const MIRFunction &mirFunc) const { if (!stmt.GetReturnVec().empty() && mirFunc.GetReturnTyIdx() == PTY_f32) { if (stmt.GetReturnVec().begin()->second.IsReg()) { PregIdx returnIdx = stmt.GetReturnVec().begin()->second.GetPregIdx(); @@ -316,7 +315,7 @@ void VtableImpl::DeferredVisitCheckFloat(CallNode &stmt, const MIRFunction &mirF } } -void VtableImpl::ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode) { +void VtableImpl::ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode &resolveNode) const { MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(resolveNode.GetPuIdx()); std::string signature = VtableAnalysis::DecodeBaseNameWithType(*func); MIRType *compactPtrType = GlobalTables::GetTypeTable().GetCompactPtr(); @@ -350,7 +349,7 @@ void VtableImpl::ReplaceResolveInterface(StmtNode &stmt, const ResolveFuncNode & void VtableImpl::ItabProcess(const StmtNode &stmt, const ResolveFuncNode &resolveNode, const std::string &signature, const PregIdx &pregFuncPtr, const MIRType &compactPtrType, - const PrimType &compactPtrPrim) { + const PrimType &compactPtrPrim) const { int64 hashCode = GetHashIndex(signature.c_str()); uint64 secondHashCode = GetSecondHashIndex(signature.c_str()); PregIdx pregItabAddress = currFunc->GetPregTab()->CreatePreg(PTY_ptr); diff --git a/src/mapleall/test/BUILD.gn b/src/mapleall/test/BUILD.gn index f764fad2cd02ff4a6072594676b5e2b979b19df6..9fe0ed76fa57aac33a70c346b1d7b37f5c5d51a8 100644 --- a/src/mapleall/test/BUILD.gn +++ b/src/mapleall/test/BUILD.gn @@ -44,6 +44,8 @@ src_mapleallUT = [ "maple_string_test.cpp", "peep_revTest.cpp", "int128_val_test.cpp", + "int128_lexer.cpp", + "float128_ut_test.cpp", ] executable("mapleallUT") { diff --git a/src/mapleall/test/CMakeLists.txt b/src/mapleall/test/CMakeLists.txt index a6c0ba032b7fe9bb66cad5057a1cc2b3f09a33e7..6189be2fb75114810b63d41250dc6d1317286372 100644 --- a/src/mapleall/test/CMakeLists.txt +++ b/src/mapleall/test/CMakeLists.txt @@ -42,12 +42,15 @@ set(src_mapleallUT stacktrace_test.cpp maple_string_test.cpp int128_val_test.cpp + int128_lexer.cpp + float128_ut_test.cpp ) set(deps libmpl libmaplepgo libcglowerer + libmaplepgo libmplbe libmplme libcg diff --git a/src/mapleall/test/cl_ut_test.cpp b/src/mapleall/test/cl_ut_test.cpp index 1f75a84f861ebbf4e7d2505cdb1544c63e29e53e..f26bf54ed541f422129577b73ea1ec1aab960f8c 100644 --- a/src/mapleall/test/cl_ut_test.cpp +++ b/src/mapleall/test/cl_ut_test.cpp @@ -120,8 +120,8 @@ namespace testopts { maplecl::Option equalStr({"--eqstr"}, ""); maplecl::Option equalDig({"--eqdig"}, ""); - maplecl::Option reqVal({"--reqval"}, "", maplecl::requiredValue, maplecl::Init(-42)); - maplecl::Option optVal({"--optval"}, "", maplecl::optionalValue, maplecl::Init(-42)); + maplecl::Option reqVal({"--reqval"}, "", maplecl::kRequiredValue, maplecl::Init(-42)); + maplecl::Option optVal({"--optval"}, "", maplecl::kOptionalValue, maplecl::Init(-42)); maplecl::Option woVal({"--woval"}, "", maplecl::disallowedValue, maplecl::Init(-42)); maplecl::Option cat1Opt1({"--c1opt1"}, "", {testCategory1}); diff --git a/src/mapleall/test/float128_ut_test.cpp b/src/mapleall/test/float128_ut_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..56e9a83565e3e85774a471a2da0b3646fc1ad136 --- /dev/null +++ b/src/mapleall/test/float128_ut_test.cpp @@ -0,0 +1,241 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "mir_const.h" +#include "mir_type.h" +#include "gtest/gtest.h" + +// ##################### "GetDoubleValue" check ##################### +TEST(f128, f128GetDoubleVal) { + // Check outcomes of GetDoubleValue + maple::MIRType testMIRType = maple::MIRType(maple::MIRTypeKind::kTypeArray, maple::PTY_f128); + maple::MIRType &ref = testMIRType; + + maple::uint64 test1[2] = {0x3FFF666666666666, 0x6666666666666666}; // 1.4 + maple::uint64 test2[2] = {0x43FFFFFFC57CA82A, 0xDAEE582CB9D96768}; // 1.79769e+308L * 2.0L + maple::uint64 test3[2] = {0xC3FFFFFFC57CA82A, 0xDAEE582CB9D96768}; // -(1.79769e+308L * 2.0L) + maple::uint64 test4[2] = {0x0000000000000000, 0x0000000000000000}; // 0.0 + maple::uint64 test5[2] = {0x8000000000000000, 0x0000000000000000}; // -0.0 + maple::uint64 test6[2] = {0x3B50EA65D1273BF9, 0x75C078537313CB91}; // 2.22507e-361L + + double test1_d = maple::MIRFloat128Const(test1, ref).GetDoubleValue(); + double test2_d = maple::MIRFloat128Const(test2, ref).GetDoubleValue(); + double test3_d = maple::MIRFloat128Const(test3, ref).GetDoubleValue(); + double test4_d = maple::MIRFloat128Const(test4, ref).GetDoubleValue(); + double test5_d = maple::MIRFloat128Const(test5, ref).GetDoubleValue(); + double test6_d = maple::MIRFloat128Const(test6, ref).GetDoubleValue(); + + EXPECT_EQ(*reinterpret_cast(&test1_d), 0x3FF6666666666666); + EXPECT_EQ(*reinterpret_cast(&test2_d), 0x7FF0000000000000); + EXPECT_EQ(*reinterpret_cast(&test3_d), 0xFFF0000000000000); + EXPECT_EQ(*reinterpret_cast(&test4_d), 0x0000000000000000); + EXPECT_EQ(*reinterpret_cast(&test5_d), 0x8000000000000000); + EXPECT_EQ(*reinterpret_cast(&test6_d), 0x0000000000000000); +} + +// ##################### "GetDoubleValue" subnormal check ##################### +TEST(f128, f128GetDoubleValSubnormal) { + maple::MIRType testMIRType = maple::MIRType(maple::kTypeArray, maple::PTY_f128); + maple::MIRType &ref = testMIRType; + + maple::uint64 test1[2] = {0x3C00FFFFFFFFFFFF, 0xE7300F7053533502}; // 2.225073858507201e-308L + maple::uint64 test2[2] = {0x3BF4FFDF579B31FF, 0xFFFFFFFFFFFFFFFF}; // 5.430955715315e-312L + maple::uint64 test3[2] = {0xBC00FFFFFFFFFFFF, 0xE7300F7053533502}; // -2.225073858507201e-308L + maple::uint64 test4[2] = {0xBBF4FFDF579B31FF, 0xFFFFFFFFFFFFFFFF}; // -5.430955715315e-312L + maple::uint64 test5[2] = {0x3BCD03132B9CF541, 0xC364E687DFD7A328}; // 5e-324L + maple::uint64 test6[2] = {0xBBCD03132B9CF541, 0xC364E687DFD7A328}; // -5e-324L + + double test1_d = maple::MIRFloat128Const(test1, ref).GetDoubleValue(); + double test2_d = maple::MIRFloat128Const(test2, ref).GetDoubleValue(); + double test3_d = maple::MIRFloat128Const(test3, ref).GetDoubleValue(); + double test4_d = maple::MIRFloat128Const(test4, ref).GetDoubleValue(); + double test5_d = maple::MIRFloat128Const(test5, ref).GetDoubleValue(); + double test6_d = maple::MIRFloat128Const(test6, ref).GetDoubleValue(); + + EXPECT_EQ(*reinterpret_cast(&test1_d), 0x000FFFFFFFFFFFFF); + EXPECT_EQ(*reinterpret_cast(&test2_d), 0x000000FFEFABCD98); + EXPECT_EQ(*reinterpret_cast(&test3_d), 0x800FFFFFFFFFFFFF); + EXPECT_EQ(*reinterpret_cast(&test4_d), 0x800000ffefabcd98); + EXPECT_EQ(*reinterpret_cast(&test5_d), 0x0000000000000001); + EXPECT_EQ(*reinterpret_cast(&test6_d), 0x8000000000000001); +} + +// ##################### "GetFloat128Value" special values check ##################### +TEST(f128, f64GetFloat128Value) { + // Check outcomes of MIRDoubleConst::GetFloat128Value + maple::MIRType doubleMIRType = maple::MIRType(maple::kTypeArray, maple::PTY_f64); + maple::MIRType &double_ref = doubleMIRType; + + std::pair test1 = {0x0000000000000000, 0x0}; // 0.0 + std::pair test2 = {0x8000000000000000, 0x0}; // -0.0 + std::pair test3 = {0x7fff000000000000, 0x0}; // infinity + std::pair test4 = {0xffff000000000000, 0x0}; // -infinity + std::pair test5 = {0x7fff800000000000, 0x0}; // NaN + + ASSERT_EQ(maple::MIRDoubleConst(0.0, double_ref).GetFloat128Value(), test1); + ASSERT_EQ(maple::MIRDoubleConst(-0.0, double_ref).GetFloat128Value(), test2); + ASSERT_EQ(maple::MIRDoubleConst(std::numeric_limits::infinity(), double_ref).GetFloat128Value(), test3); + ASSERT_EQ(maple::MIRDoubleConst(-std::numeric_limits::infinity(), double_ref).GetFloat128Value(), test4); + ASSERT_EQ(maple::MIRDoubleConst(std::numeric_limits::quiet_NaN(), double_ref).GetFloat128Value(), test5); + + // Check outcomes of MIRFloatConst::GetFloat128Value + maple::MIRType floatMIRType = maple::MIRType(maple::kTypeArray, maple::PTY_f32); + maple::MIRType &float_ref = floatMIRType; + + ASSERT_EQ(maple::MIRFloatConst(0.0, float_ref).GetFloat128Value(), test1); + ASSERT_EQ(maple::MIRFloatConst(-0.0, float_ref).GetFloat128Value(), test2); + ASSERT_EQ(maple::MIRFloatConst(std::numeric_limits::infinity(), float_ref).GetFloat128Value(), test3); + ASSERT_EQ(maple::MIRFloatConst(-std::numeric_limits::infinity(), float_ref).GetFloat128Value(), test4); + ASSERT_EQ(maple::MIRFloatConst(std::numeric_limits::quiet_NaN(), double_ref).GetFloat128Value(), test5); +} + +// ##################### "GetFloat128Value" check ##################### +TEST(f128, DoubleGetFloat128Value) { + // Check outcomes of MIRDoubleConst::GetFloat128Value + maple::MIRType doubleMIRType = maple::MIRType(maple::kTypeScalar, maple::PTY_f64); + maple::MIRType& double_ref = doubleMIRType; + + double test1 = 1.4; + double test2 = 1000000.1243456; + double test3 = -1.6; + double test4 = -12345678.9876543; + + std::pair test1_ans = {0x3fff666666666666, 0x6000000000000000}; + std::pair test2_ans = {0x4012e84803faa39f, 0xb000000000000000}; + std::pair test3_ans = {0xbfff999999999999, 0xa000000000000000}; + std::pair test4_ans = {0xc01678c29df9add3, 0x1000000000000000}; + + ASSERT_EQ(maple::MIRDoubleConst(test1, double_ref).GetFloat128Value(), test1_ans); + ASSERT_EQ(maple::MIRDoubleConst(test2, double_ref).GetFloat128Value(), test2_ans); + ASSERT_EQ(maple::MIRDoubleConst(test3, double_ref).GetFloat128Value(), test3_ans); + ASSERT_EQ(maple::MIRDoubleConst(test4, double_ref).GetFloat128Value(), test4_ans); + + // Check outcomes of MIRFloatConst::GetFloat128Value + maple::MIRType floatMIRType = maple::MIRType(maple::kTypeScalar, maple::PTY_f32); + maple::MIRType& float_ref = floatMIRType; + + float test5 = 1.4; + float test6 = 1000000.1243456; + float test7 = -1.6; + float test8 = -12345678.9876543; + + std::pair test5_ans = {0x3fff666666000000, 0x0}; + std::pair test6_ans = {0x4012e84804000000, 0x0}; + std::pair test7_ans = {0xbfff99999a000000, 0x0}; + std::pair test8_ans = {0xc01678c29e000000, 0x0}; + + ASSERT_EQ(maple::MIRFloatConst(test5, float_ref).GetFloat128Value(), test5_ans); + ASSERT_EQ(maple::MIRFloatConst(test6, float_ref).GetFloat128Value(), test6_ans); + ASSERT_EQ(maple::MIRFloatConst(test7, float_ref).GetFloat128Value(), test7_ans); + ASSERT_EQ(maple::MIRFloatConst(test8, float_ref).GetFloat128Value(), test8_ans); +} + +// ##################### "GetFloat128Value" subnormal check ##################### +TEST(f128, DoubleGetFloat128ValueSubnormal) { + // Check outcomes of MIRDoubleConst::GetFloat128Value + maple::MIRType doubleMIRType = maple::MIRType(maple::kTypeScalar, maple::PTY_f64); + maple::MIRType& double_ref = doubleMIRType; + + double test1 = 2.225073858507201e-308; + double test2 = -2.225073858507201e-308; + double test3 = 2.09514337455e-313; + double test4 = -2.09514337455e-313; + double test5 = 5e-324; + double test6 = -5e-324; + + std::pair test1_ans = {0x3c00ffffffffffff, 0xe000000000000000}; + std::pair test2_ans = {0xbc00ffffffffffff, 0xe000000000000000}; + std::pair test3_ans = {0x3bf03bf35ba62000, 0x0}; + std::pair test4_ans = {0xbbf03bf35ba62000, 0x0}; + std::pair test5_ans = {0x3bcd000000000000, 0x0}; + std::pair test6_ans = {0xbbcd000000000000, 0x0}; + + ASSERT_EQ(maple::MIRDoubleConst(test1, double_ref).GetFloat128Value(), test1_ans); + ASSERT_EQ(maple::MIRDoubleConst(test2, double_ref).GetFloat128Value(), test2_ans); + ASSERT_EQ(maple::MIRDoubleConst(test3, double_ref).GetFloat128Value(), test3_ans); + ASSERT_EQ(maple::MIRDoubleConst(test4, double_ref).GetFloat128Value(), test4_ans); + ASSERT_EQ(maple::MIRDoubleConst(test5, double_ref).GetFloat128Value(), test5_ans); + ASSERT_EQ(maple::MIRDoubleConst(test6, double_ref).GetFloat128Value(), test6_ans); + + // Check outcomes of MIRFloatConst::GetFloat128Value + maple::MIRType floatMIRType = maple::MIRType(maple::kTypeScalar, maple::PTY_f32); + maple::MIRType& float_ref = floatMIRType; + + float test7 = 1.17549e-38; + float test8 = -1.17549e-38; + float test9 = 6.17286e-41; + float test10 = -6.17286e-41; + float test11 = 1.4013e-45; + float test12 = -1.4013e-45; + + std::pair test7_ans = {0x3f80ffff84000000, 0x0}; + std::pair test8_ans = {0xbf80ffff84000000, 0x0}; + std::pair test9_ans = {0x3f79582600000000, 0x0}; + std::pair test10_ans = {0xbf79582600000000, 0x0}; + std::pair test11_ans = {0x3f6a000000000000, 0x0}; + std::pair test12_ans = {0xbf6a000000000000, 0x0}; + + ASSERT_EQ(maple::MIRDoubleConst(test7, float_ref).GetFloat128Value(), test7_ans); + ASSERT_EQ(maple::MIRDoubleConst(test8, float_ref).GetFloat128Value(), test8_ans); + ASSERT_EQ(maple::MIRDoubleConst(test9, float_ref).GetFloat128Value(), test9_ans); + ASSERT_EQ(maple::MIRDoubleConst(test10, float_ref).GetFloat128Value(), test10_ans); + ASSERT_EQ(maple::MIRDoubleConst(test11, float_ref).GetFloat128Value(), test11_ans); + ASSERT_EQ(maple::MIRDoubleConst(test12, float_ref).GetFloat128Value(), test12_ans); +} + +TEST(f128, Float128GettersAndFlags) { + maple::MIRType f128MIRType = maple::MIRType(maple::kTypeScalar, maple::PTY_f128); + maple::MIRType& f128_ref = f128MIRType; + + maple::uint64 test1[2] = {0x3fff666666666666, 0x6000000000000000}; + maple::uint64 test2[2] = {0x4012e84803faa39f, 0xb000000000000000}; + maple::uint64 test3[2] = {0xbfff999999999999, 0xa000000000000000}; + maple::uint64 test4[2] = {0xc01678c29df9add3, 0x1000000000000000}; + + maple::MIRFloat128Const test1_f128{test1, f128_ref}; + maple::MIRFloat128Const test2_f128{test2, f128_ref}; + maple::MIRFloat128Const test3_f128{test3, f128_ref}; + maple::MIRFloat128Const test4_f128{test4, f128_ref}; + + ASSERT_EQ(test1_f128.GetExponent(), 0x3fff); + ASSERT_EQ(test1_f128.GetSign(), 0); + + ASSERT_EQ(test2_f128.GetExponent(), 0x4012); + ASSERT_EQ(test2_f128.GetSign(), 0); + + ASSERT_EQ(test3_f128.GetExponent(), 0x3fff); + ASSERT_EQ(test3_f128.GetSign(), 1); + + ASSERT_EQ(test4_f128.GetExponent(), 0x4016); + ASSERT_EQ(test4_f128.GetSign(), 1); + + maple::uint64 test5[2] = {0x0000000000000000, 0x0}; // 0.0 + maple::uint64 test6[2] = {0x8000000000000000, 0x0}; // -0.0 + maple::uint64 test7[2] = {0x7fff000000000000, 0x0}; // infinity + maple::uint64 test8[2] = {0xffff000000000000, 0x0}; // -infinity + maple::uint64 test9[2] = {0x7fff800000000000, 0x0}; // NaN + + maple::MIRFloat128Const test5_f128{test5, f128_ref}; + maple::MIRFloat128Const test6_f128{test6, f128_ref}; + maple::MIRFloat128Const test7_f128{test7, f128_ref}; + maple::MIRFloat128Const test8_f128{test8, f128_ref}; + maple::MIRFloat128Const test9_f128{test9, f128_ref}; + + ASSERT_EQ(test5_f128.IsZero(), true); + ASSERT_EQ(test6_f128.IsZero(), true); + ASSERT_EQ(test7_f128.IsInf(), true); + ASSERT_EQ(test8_f128.IsInf(), true); + ASSERT_EQ(test9_f128.IsNan(), true); +} diff --git a/src/mapleall/test/int128_lexer.cpp b/src/mapleall/test/int128_lexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..188a7312339a48d990e28cb9587aad9bb95a2cd0 --- /dev/null +++ b/src/mapleall/test/int128_lexer.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "gtest/gtest.h" +#include "int128_util.h" +#include "lexer.h" +#include "mempool_allocator.h" + +using namespace maple; + +TEST(MIRLexer, LexInt128Const) { + std::array init; + + // init MIRLexer + MemPool *funcDatatMp = memPoolCtrler.NewMemPool("mempool", true); + MapleAllocator funcDataMa(funcDatatMp); + MIRLexer lexer(nullptr, funcDataMa); + + // parse positive decimal constant + std::string decConstString = "295147905179352825855"; // 2^68 - 1 + lexer.PrepareForString(decConstString); + ASSERT_EQ(lexer.GetTokenKind(), TK_intconst); + IntVal decIntVal = lexer.GetTheInt128Val(); + init = {0xffffffffffffffff, 0xf}; + ASSERT_EQ(decIntVal, IntVal(init.data(), kInt128BitSize, /*sign*/ false)); + + // parse negative decimal constant + std::string negConstString = "-1"; + lexer.PrepareForString(negConstString); + ASSERT_EQ(lexer.GetTokenKind(), TK_intconst); + IntVal negIntVal = lexer.GetTheInt128Val(); + init = {0xffffffffffffffff, 0xffffffffffffffff}; + ASSERT_EQ(negIntVal, IntVal(init.data(), kInt128BitSize, /*sign*/ true)); + + // parse hex constant + std::string hexConstString = "0xabcdef0123456789abcdef0123456789"; + lexer.PrepareForString(hexConstString); + ASSERT_EQ(lexer.GetTokenKind(), TK_intconst); + IntVal hexIntVal = lexer.GetTheInt128Val(); + init = {0xabcdef0123456789, 0xabcdef0123456789}; + ASSERT_EQ(hexIntVal, IntVal(init.data(), kInt128BitSize, /*sign*/ false)); +} diff --git a/src/mapleall/test/int128_val_test.cpp b/src/mapleall/test/int128_val_test.cpp index 69501d56931f1b80026d85eeff55d7bcecad0e15..f1013afb26f25271981f6496bc0db5c52d8c1ab3 100644 --- a/src/mapleall/test/int128_val_test.cpp +++ b/src/mapleall/test/int128_val_test.cpp @@ -322,6 +322,8 @@ TEST(WideIntVal, Division) { ASSERT_EQ(sMin / (sMin + sOne), sOne); ASSERT_EQ(sMin % (sMin + sOne), -sOne); + + ASSERT_EQ(sMax / sMax, sOne); } TEST(WideIntVal, TruncExtend) { @@ -360,19 +362,19 @@ TEST(WideIntVal, TemporaryMethods) { // dumps std::stringstream ss; ss << sMin; - ASSERT_EQ(ss.str(), "0xL80000000000000000000000000000000"); + ASSERT_EQ(ss.str(), "0x80000000000000000000000000000000"); ss.str(std::string()); ss << uZero; - ASSERT_EQ(ss.str(), "0xL0"); + ASSERT_EQ(ss.str(), "0x0"); ss.str(std::string()); ss << sMax; - ASSERT_EQ(ss.str(), "0xL7fffffffffffffffffffffffffffffff"); + ASSERT_EQ(ss.str(), "0x7fffffffffffffffffffffffffffffff"); ss.str(std::string()); ss << uMax; - ASSERT_EQ(ss.str(), "0xLffffffffffffffffffffffffffffffff"); + ASSERT_EQ(ss.str(), "0xffffffffffffffffffffffffffffffff"); // IsOneSignificantWord ASSERT_EQ(sInt128(-1).IsOneSignificantWord(), true); diff --git a/src/mapleall/test/int_val_test.cpp b/src/mapleall/test/int_val_test.cpp index b6f169c0a6eaa9140b57c29309203a238de4c9ab..a7447afd580847afb1cf2c6231e029a0d925ced7 100644 --- a/src/mapleall/test/int_val_test.cpp +++ b/src/mapleall/test/int_val_test.cpp @@ -15,8 +15,19 @@ #include "mpl_int_val.h" +#include "gmock/gmock.h" #include "gtest/gtest.h" +using namespace maple; +using namespace testing; + +class MockOldIntVal : public IntVal { + public: + MockOldIntVal(uint64 val, uint16 bitWidth, bool isSigned) : IntVal(val, bitWidth, isSigned) { } + MOCK_METHOD1(GetSXTValue, int64(uint8)); + static constexpr uint8 wordBitSize = sizeof(uint64) * CHAR_BIT; +}; + TEST(IntVals, IncDec) { maple::IntVal uValInc(254, 8, false); maple::IntVal sValInc(126, 8, true); @@ -44,3 +55,29 @@ TEST(IntVals, IncDec) { ASSERT_EQ((sValDec--).GetExtValue(), -128); ASSERT_EQ(sValDec.GetExtValue(), 127); } + +#define DEFINE_CASE_GETSXTVALUE_BODY(size, width, value) \ + MockOldIntVal oldVal(value, width, true); \ + uint8 bitWidth = size ? size : width; \ + EXPECT_CALL(oldVal, GetSXTValue(size)) \ + .WillOnce(Return(static_cast(*oldVal.GetRawData()) << (MockOldIntVal::wordBitSize - bitWidth) \ + >> (MockOldIntVal::wordBitSize - bitWidth))); \ + EXPECT_EQ(IntVal(value, width, true).GetSXTValue(size), oldVal.GetSXTValue(size)) + +#define DEFINE_CASE_GETSXTVALUE(size, width, unsigned_value, signed_value) \ +TEST(IntVals, GetSXTValue_size_##size##_width_##width##_unsigned) { \ + DEFINE_CASE_GETSXTVALUE_BODY(size, width, unsigned_value); \ +} \ + \ +TEST(IntVals, GetSXTValue_size_##size##_width_##width##_signed) { \ + DEFINE_CASE_GETSXTVALUE_BODY(size, width, signed_value); \ +} + +DEFINE_CASE_GETSXTVALUE(0, 8, 0x7f, 0x80) +DEFINE_CASE_GETSXTVALUE(0, 16, 0x7fff, 0xffff) +DEFINE_CASE_GETSXTVALUE(0, 32, 0x70000000ULL, 0xffffffffULL) +DEFINE_CASE_GETSXTVALUE(0, 64, 0x7000000000000000ULL, 0x8000000000000000ULL) + +DEFINE_CASE_GETSXTVALUE(4, 8, 0x17, 0x18) +DEFINE_CASE_GETSXTVALUE(8, 8, 0x70, 0x80) +DEFINE_CASE_GETSXTVALUE(64, 64, 0x7000000000000000ULL, 0x8000000000000000ULL) diff --git a/src/mrt/maplert/include/literalstrname.h b/src/mrt/maplert/include/literalstrname.h index dbb5e7a3b6706bb013229e097fb1bb252a9e01c2..836de465d5a4cb68dc6e3a1bbb5effef8e10e967 100644 --- a/src/mrt/maplert/include/literalstrname.h +++ b/src/mrt/maplert/include/literalstrname.h @@ -21,7 +21,6 @@ const std::string kConstString = "_C_STR_"; const std::string kConstStringPtr = "_PTR_C_STR_"; const std::string kLocalStringPrefix = "L_STR_"; -constexpr int kConstStringLen = 7; class LiteralStrName { public: @@ -37,7 +36,8 @@ class LiteralStrName { } else { const char16_t *end = data + len; while (data < end) { - hash = (static_cast(hash) << 5) - hash + *data++; // calculate the hash code of data + // calculate the hash code of data + hash = (static_cast(hash) << 5) - hash + static_cast(*data++); } } return hash; @@ -46,6 +46,7 @@ class LiteralStrName { static std::string GetHexStr(const uint8_t *bytes, uint32_t len); static std::string GetLiteralStrName(const uint8_t *bytes, uint32_t len); static std::string ComputeMuid(const uint8_t *bytes, uint32_t len); + static constexpr int kConstStringLen = 7; }; #endif diff --git a/src/mrt/maplert/src/itab_util.cpp b/src/mrt/maplert/src/itab_util.cpp index eed9d9e291974b2f7e01de1c6d565ac09b141f70..27d15a1660be4729e58db276ce92a99970a1d3db 100644 --- a/src/mrt/maplert/src/itab_util.cpp +++ b/src/mrt/maplert/src/itab_util.cpp @@ -20,7 +20,7 @@ namespace maple { unsigned int DJBHash(const char *str) { unsigned int hash = 5381; // 5381: initial value for DJB hash algorithm - while (*str) { + while (*str != 0) { hash += (hash << 5) + static_cast(*str++); // 5: calculate the hash code of data } return (hash & 0x7FFFFFFF); diff --git a/testsuite/c_test/ast_test/AST0101-ReturnUnionInRegs/test.c b/testsuite/c_test/ast_test/AST0101-ReturnUnionInRegs/test.c index 875bba1b6495f9cd166739cd1f820aa1f703a22c..2dd8b745fa99b05e01eaba949a0cc0bd707fee71 100644 --- a/testsuite/c_test/ast_test/AST0101-ReturnUnionInRegs/test.c +++ b/testsuite/c_test/ast_test/AST0101-ReturnUnionInRegs/test.c @@ -12,14 +12,12 @@ test_union1_t g_union1; test_union2_t g_union2; test_union1_t func1(void) { - // CHECK: ldr d0 - // CHECK: ldr d1 + // CHECK: ld{{[rp]}} d{{[0-1]+}} return g_union1; } test_union2_t func2(void) { - // CHECK: ldr x0 - // CHECK: ldr x1 + // CHECK: ldr x{{[0-1]+}} return g_union2; } diff --git a/testsuite/c_test/ast_test/AST0105-atomic/atomic.c b/testsuite/c_test/ast_test/AST0105-atomic/atomic.c index 89b306099fe5e9f63bd7403f3e296bf73a1fdb9e..160ea4c7d2339bc84d1d398efc4b5bde600062dc 100644 --- a/testsuite/c_test/ast_test/AST0105-atomic/atomic.c +++ b/testsuite/c_test/ast_test/AST0105-atomic/atomic.c @@ -11,7 +11,7 @@ int main() { int *next = &b; enum OrderEnum order = MEMORY_ORDER_RELEAXED; // CHECK: LOC [[# FILENUM]] [[# @LINE + 2 ]] - // CHECK: intrinsiccallwithtype i32 C___atomic_store_n + // CHECK: intrinsiccallwithtype <* i32> C___atomic_store_n __atomic_store_n(&prev, (next), order); int v = 0; int count = 0; diff --git a/testsuite/c_test/ast_test/AST0135-FirstArgReturn/HelloWorld.c b/testsuite/c_test/ast_test/AST0135-FirstArgReturn/HelloWorld.c new file mode 100644 index 0000000000000000000000000000000000000000..88011dfacae892a6ee868a8b33478cfbb805837b --- /dev/null +++ b/testsuite/c_test/ast_test/AST0135-FirstArgReturn/HelloWorld.c @@ -0,0 +1,41 @@ +struct S{ + long a[4]; +}; +typedef struct S S; + +struct S1 { + int a; + // CHECK: @fp1 <* >,u8) void>> align(8), + // CHECK-NEXT: @fp2 <[2] <* >,u8) void>>> align(8), + S(*fp1)(char); + S(*fp2[2])(char); + int b; +}; + +union S2 { + int a; + // CHECK: @fp1 <* >,u8) void>> align(8), + // CHECK-NEXT: @fp2 <[2] <* >,u8) void>>> align(8), + S(*fp1)(char); + S(*fp2[2])(char); + int b; +}; + +// CHECK: func &f1 public firstarg_return (var %first_arg_return <* <$S>>, var %a u8) void +S f1(char a) { + S s; + return s; +} + +// CHECK: var $fp1 <* >,u8) void>> +S(*fp1)(char); +// CEHCK: var $fp2 <[2] <* >,u8) void>>> +S(*fp2[2])(char); + +int main() { + // CHECK: var %fp3_{{[0-9_]+}} <* >,u8) void>> + S(*fp3)(char); + // CHECK: var %fp4_{{[0-9_]+}} <[2] <* >,u8) void>>> + S(*fp4[2])(char); + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/ast_test/AST0135-FirstArgReturn/expected.txt b/testsuite/c_test/ast_test/AST0135-FirstArgReturn/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/ast_test/AST0135-FirstArgReturn/test.cfg b/testsuite/c_test/ast_test/AST0135-FirstArgReturn/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..dde2eceb19644f12eca3c2abe9a2df3f9ae9f66a --- /dev/null +++ b/testsuite/c_test/ast_test/AST0135-FirstArgReturn/test.cfg @@ -0,0 +1,3 @@ +ASTO0: +compile(HelloWorld) +cat HelloWorld.mpl | ${OUT_ROOT}/tools/bin/FileCheck HelloWorld.c diff --git a/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/HelloWorld.c b/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/HelloWorld.c new file mode 100644 index 0000000000000000000000000000000000000000..7a6363a80c59aeb910c77f43c828d29780a53213 --- /dev/null +++ b/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/HelloWorld.c @@ -0,0 +1,13 @@ +struct X { + int a; +}; + +struct X funcA(int i) { + struct X x = {.a = i}; + return x; +} +int i = 10; +int main() { + funcA(i).a; + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/test.cfg b/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0eead8f23e7f1ac0d170bcc6135fdc2dd060c397 --- /dev/null +++ b/testsuite/c_test/ast_test/AST0136-StmtMemberExpr/test.cfg @@ -0,0 +1 @@ +compile(HelloWorld) \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/expected.txt b/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/main.c b/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/main.c new file mode 100644 index 0000000000000000000000000000000000000000..27ca0647a38cd48f1a1a3e6e750c316cc7f75a1a --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/main.c @@ -0,0 +1,18 @@ +void abort (void); + +__thread int t0 = 0x10; +__thread int t1 = 0x10; + +int main (int argc, char **argv) +{ + // CHECK: mrs {{x[0-9]}}, tpidr_el0 + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, #:tprel_hi12:t0, lsl #12 + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, #:tprel_lo12_nc:t0 + // CHECK: mrs {{x[0-9]}}, tpidr_el0 + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, #:tprel_hi12:t1, lsl #12 + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, #:tprel_lo12_nc:t1 + if (t0 != t1) + abort(); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/test.cfg b/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6aed7e358bd399a0bdc961520e33a897127ed27f --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0016-ftlsmodel-localexec/test.cfg @@ -0,0 +1,3 @@ +compile(APP=main.c,OPTION="--no-pic -ftls-model=local-exec --save-temps") +run(a) +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/expected.txt b/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/main.c b/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/main.c new file mode 100644 index 0000000000000000000000000000000000000000..577adfa9344847dbc54625af0b5076a3a2d46d8f --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/main.c @@ -0,0 +1,20 @@ +void abort (void); + +__thread int t0 = 0x10; +__thread int t1 = 0x10; + +int main (int argc, char **argv) +{ + // CHECK: adrp {{x[0-9]}}, :tlsdesc:t0 + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:tlsdesc_lo12:t0] + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, :tlsdesc_lo12:t0 + // CHECK-NEXT: .tlsdesccall t0 + // CHECK: adrp {{x[0-9]}}, :tlsdesc:t1 + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:tlsdesc_lo12:t1] + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, :tlsdesc_lo12:t1 + // CHECK-NEXT: .tlsdesccall t1 + if (t0 != t1) + abort(); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/test.cfg b/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c575d2dc6319804c3e8f242377f32cfd9c4a2165 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic/test.cfg @@ -0,0 +1,3 @@ +compile(APP=main.c,OPTION="-fPIC -ftls-model=local-dynamic --save-temps") +run(a) +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/main.c b/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/main.c new file mode 100644 index 0000000000000000000000000000000000000000..3dbdb562e72fff40d6d7506da32aa2e879217e4a --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/main.c @@ -0,0 +1,12 @@ +#include + +__thread int a = 0; + +int main() { + // CHECK: mrs {{x[0-9]}}, tpidr_el0 + // CHECK-NEXT: adrp {{x[0-9]}}, :gottprel:a + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:gottprel_lo12:a] + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, {{x[0-9]}} + *(&a) = 1; + printf("%d\n", a); +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/test.cfg b/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ce58de35aa6d2a963b941c23d2b664db80fc7a2c --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0018-ftlsmodel-initialexec/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-fPIC -ftls-model=initial-exec --save-temps -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/expected.txt b/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/main.c b/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/main.c new file mode 100644 index 0000000000000000000000000000000000000000..577adfa9344847dbc54625af0b5076a3a2d46d8f --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/main.c @@ -0,0 +1,20 @@ +void abort (void); + +__thread int t0 = 0x10; +__thread int t1 = 0x10; + +int main (int argc, char **argv) +{ + // CHECK: adrp {{x[0-9]}}, :tlsdesc:t0 + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:tlsdesc_lo12:t0] + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, :tlsdesc_lo12:t0 + // CHECK-NEXT: .tlsdesccall t0 + // CHECK: adrp {{x[0-9]}}, :tlsdesc:t1 + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:tlsdesc_lo12:t1] + // CHECK-NEXT: add {{x[0-9]}}, {{x[0-9]}}, :tlsdesc_lo12:t1 + // CHECK-NEXT: .tlsdesccall t1 + if (t0 != t1) + abort(); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/test.cfg b/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b779f4081a065d96ea5d0b57576ebbe8dcb22365 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0019-ftlsmodel-globaldynamic/test.cfg @@ -0,0 +1,3 @@ +compile(APP=main.c,OPTION="-fPIC -ftls-model=global-dynamic --save-temps") +run(a) +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/main.c b/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/main.c new file mode 100644 index 0000000000000000000000000000000000000000..fe971dd32b2c93fd7d8e01b996e8ee3d954de70b --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/main.c @@ -0,0 +1,28 @@ +#include + +extern void externFunc(); +static int staticFunc() { + return 2; +} +__attribute((visibility("hidden"))) int hiddenFunc() { + return 8; +} +// CHECK-NOT: .set hiddenFunc.localalias, hiddenFunc + +int globalFunc(int a) { + return a + 1; +} +// CHECK: .set globalFunc.localalias, globalFunc + +int main() { + externFunc(); + // CHECK: bl externFunc + + printf("%d", staticFunc() + globalFunc(3) + hiddenFunc()); + // CHECK: bl staticFunc + // CHECK: bl globalFunc.localalias + // CHECK: bl hiddenFunc + // CHECK-NOT: bl hiddenFunc.localalias + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/test.cfg b/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..08350ec2b7b20835c53175bc2b4d0d6ffce5acad --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0020-fnosemanticinterposition1/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-fPIC -fno-semantic-interposition -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/a.c b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/a.c new file mode 100644 index 0000000000000000000000000000000000000000..014c2c9c95575007c0a887f3fc07f8514b2d8a33 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/a.c @@ -0,0 +1,3 @@ +int bar() { + return 666; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/b.c b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/b.c new file mode 100644 index 0000000000000000000000000000000000000000..0f8012b42f94599336c21392bbbc558826736474 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/b.c @@ -0,0 +1,7 @@ +int bar() { + return 888; +} + +int foo() { + return bar(); +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/expected.txt b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd6be3717d12a0f79cc3c0e79637bc8adde60362 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/expected.txt @@ -0,0 +1 @@ +888 \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/main.c b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/main.c new file mode 100644 index 0000000000000000000000000000000000000000..297f377c72c12d6deb170950b0fcbd4c753ad7e7 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/main.c @@ -0,0 +1,9 @@ +#include + +extern int foo(); + +int main() { + int x = foo(); + printf("%d", x); + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/test.cfg b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ac47920702b5edb6414e9099d0b53854021c7f5e --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0021-fnosemanticinterposition2/test.cfg @@ -0,0 +1,4 @@ +compile(APP=a.c,OPTION="-O0 -fPIC -shared -o a.so") +compile(APP=b.c,OPTION="-O0 -fPIC -fno-semantic-interposition -shared -o b.so") +compileWithGcc(APP=main.c,OPTION="-O0 -fPIC -L. -l:a.so -l:b.so") +run(a) \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/main.c b/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/main.c new file mode 100644 index 0000000000000000000000000000000000000000..f96e74206a8dbcdceb0cce644da12ef411fa1117 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/main.c @@ -0,0 +1,29 @@ +#include + +__attribute((visibility("hidden"))) int Bar1() { + return 7; +} + +int Bar2(int a) +{ + int b = 2; + return a + b; +} + +static int Bar3(int a) +{ + int b = 3; + return a + b; +} + +int main() +{ + int j = Bar1() + Bar2(4); + // CHECK: bl Bar1 + // CHECK: bl Bar2 + int k = Bar3(5); + // CHECK: bl Bar3 + printf("out = %d\n", j + k); + // CHECK: adrp {{x[0-9]}}, :got:printf + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:got_lo12:printf] +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/test.cfg b/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..40ffcc0f549f4cb689ac68c47daa808e133f711a --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0022-fpie-fnoplt/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-fPIE -fno-plt -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/main.c b/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/main.c new file mode 100644 index 0000000000000000000000000000000000000000..f5ad0aab585339d9c6f7e617db6259738df7cbd9 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/main.c @@ -0,0 +1,30 @@ +#include + +__attribute((visibility("hidden"))) int Bar1() { + return 7; +} + +int Bar2(int a) +{ + int b = 2; + return a + b; +} + +static int Bar3(int a) +{ + int b = 3; + return a + b; +} + +int main() +{ + int j = Bar1() + Bar2(4); + // CHECK: bl Bar1 + // CHECK: adrp {{x[0-9]}}, :got:Bar2 + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:got_lo12:Bar2] + int k = Bar3(5); + // CHECK: bl Bar3 + printf("out = %d\n", j + k); + // CHECK: adrp {{x[0-9]}}, :got:printf + // CHECK-NEXT: ldr {{x[0-9]}}, [{{x[0-9]}}, #:got_lo12:printf] +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/test.cfg b/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0bcc76170724590c3ba82195c323e3d1bbb1945f --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0023-fpic-fnoplt/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-fPIC -fno-plt -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0024-visibility1/main.c b/testsuite/c_test/driver_test/DRIVER0024-visibility1/main.c new file mode 100644 index 0000000000000000000000000000000000000000..420ddddbfc6f31ee68b1927c99c5823d6d096a92 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0024-visibility1/main.c @@ -0,0 +1,27 @@ +#include + +__attribute((visibility("hidden"))) extern void externFunc(); +// CHECK: .hidden externFunc +__attribute((visibility("protected"))) extern int protectedVar; +__attribute((visibility("hidden"))) extern int hiddenVar; + +static int staticFunc() { + return 2; +} +__attribute((visibility("protected"))) int protectedFunc() { + return 8; +} +// CHECK: .protected protectedFunc + +int globalFunc(int a) { + return a + 1; +} + +int main() { + externFunc(); + printf("%d", staticFunc() + globalFunc(3) + protectedFunc() + protectedVar + hiddenVar); + + return 0; +} +// CHECK: .protected protectedVar +// CHECK: .local hiddenVar \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0024-visibility1/test.cfg b/testsuite/c_test/driver_test/DRIVER0024-visibility1/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..91e500a7d4a90ab07c2c49c66833df6a7646e7dd --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0024-visibility1/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-O0 -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0025-visibility2/main.c b/testsuite/c_test/driver_test/DRIVER0025-visibility2/main.c new file mode 100644 index 0000000000000000000000000000000000000000..8f83b9c1bf2d7cb8d56ccc75d043d50aa60ca976 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0025-visibility2/main.c @@ -0,0 +1,29 @@ +#include + +extern void externFunc(); +// CHECK-NOT: .hidden externFunc +extern int a; +// CHECK-NOT: .hidden a + +static int staticFunc() { + return 2; +} +// CHECK-NOT: .hidden staticFunc + +__attribute((visibility("protected"))) int protectedFunc() { + return 8; +} +// CHECK: .protected protectedFunc +// CHECK-NOT: .hidden protectedFunc + +int globalFunc(int a) { + return a + 1; +} +// CHECK: .hidden globalFunc + +int main() { + externFunc(); + printf("%d", staticFunc() + globalFunc(3) + protectedFunc() + a); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0025-visibility2/test.cfg b/testsuite/c_test/driver_test/DRIVER0025-visibility2/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fef9d6ff1ca865605f0e926bb34b83a1c1801713 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0025-visibility2/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-O0 -fvisibility=hidden -S") +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/driver_test/DRIVER0026-clang-option/expected.txt b/testsuite/c_test/driver_test/DRIVER0026-clang-option/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b18e512dba79e4c8300dd08aeb37f8e728b8dad --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0026-clang-option/expected.txt @@ -0,0 +1 @@ +hello world diff --git a/testsuite/c_test/driver_test/DRIVER0026-clang-option/main.c b/testsuite/c_test/driver_test/DRIVER0026-clang-option/main.c new file mode 100644 index 0000000000000000000000000000000000000000..2945166ca0c5014c6a9f1542c755e5958ea89179 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0026-clang-option/main.c @@ -0,0 +1,24 @@ +/* + * Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. + * + * Licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include +#include + +int main() +{ + printf("hello world\n"); + return 0; +} + diff --git a/testsuite/c_test/driver_test/DRIVER0026-clang-option/test.cfg b/testsuite/c_test/driver_test/DRIVER0026-clang-option/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..427b1c95493e016198e89dab84cd18f1cccb4135 --- /dev/null +++ b/testsuite/c_test/driver_test/DRIVER0026-clang-option/test.cfg @@ -0,0 +1,2 @@ +compile(APP=main.c,OPTION="-fworking-directory -fno-pch-preprocess -fno-extended-identifiers -no-integrated-cpp") +run(a) diff --git a/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/arith_check_func_input.c b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/arith_check_func_input.c new file mode 100644 index 0000000000000000000000000000000000000000..0cfac161d176d1ebec4f8951b028f4efdcf87f68 --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/arith_check_func_input.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include + +int f(int *p) { + return *p; +} + +int main(int argc, char **args) { + if(argc < 2) { + printf("need input offset\n"); + return -1; + } + int *p = (int*)malloc(sizeof(int) * 5); + if (p == NULL) { + return -2; + } + int offset = atoi(args[1]); + // CHECK: [[# @LINE + 2]] warning: can't prove the pointer >= the lower bounds after calculation + // CHECK: [[# @LINE + 1]] warning: can't prove the pointer < the upper bounds after calculation + f(p + offset); + return 0; +} diff --git a/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/expected.txt b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae04e49b89ae7c99d6a66b0a9039fdecbd67bcaa --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/expected.txt @@ -0,0 +1,3 @@ +arith_check_func_input.c:35 error: the offset >= the upper bounds after pointer arithmetic! +qemu: uncaught target signal 11 (Segmentation fault) - core dumped +Segmentation fault diff --git a/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/test.cfg b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4e9374b9ea00baa8287e4dcb3a31645cd87c50a5 --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0147-arith_check_func_input/test.cfg @@ -0,0 +1,5 @@ +ENCO2_B_D_A_C: +compile(arith_check_func_input) +sort compile.log -t ':' -k 3 -n -o compile.log +cat compile.log | ${OUT_ROOT}/tools/bin/FileCheck arith_check_func_input.c +run_err(arg=5) diff --git a/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/arith_check_func_return.c b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/arith_check_func_return.c new file mode 100644 index 0000000000000000000000000000000000000000..7cb1a1c00ac9b29285abe2c1ca59f0fa3169ff2b --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/arith_check_func_return.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include + +int* f(int *p __attribute__((count(5))), int offset) { + // CHECK: [[# @LINE + 2]] warning: can't prove the pointer >= the lower bounds after calculation + // CHECK: [[# @LINE + 1]] warning: can't prove the pointer < the upper bounds after calculation + return (p + offset); + +} + +int main(int argc, char **args) { + if(argc < 2) { + printf("need input offset\n"); + return -1; + } + int *p = (int*)malloc(sizeof(int) * 5); + if (p == NULL) { + return -2; + } + int offset = atoi(args[1]); + printf("the nu:%d \n", *f(p, offset)); + return 0; +} diff --git a/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/expected.txt b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ad7cb4478bb358899044e7fc843dc194c4ce5f0 --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/expected.txt @@ -0,0 +1,3 @@ +arith_check_func_return.c:22 error: the offset >= the upper bounds after pointer arithmetic! +qemu: uncaught target signal 11 (Segmentation fault) - core dumped +Segmentation fault \ No newline at end of file diff --git a/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/test.cfg b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..cf100f32fcbefbffde5bdc828fbe144a7adc73af --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0148-arith_check_func_return/test.cfg @@ -0,0 +1,5 @@ +ENCO2_B_D_A_C: +compile(arith_check_func_return) +sort compile.log -t ':' -k 3 -n -o compile.log +cat compile.log | ${OUT_ROOT}/tools/bin/FileCheck arith_check_func_return.c +run_err(arg=5) diff --git a/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/arith_check_func_ptr.c b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/arith_check_func_ptr.c new file mode 100644 index 0000000000000000000000000000000000000000..30904fcc49809cb3e41ad3b4cd9f60e0a5ea6419 --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/arith_check_func_ptr.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include + +int sum(int a, int b) { + return a + b; +} + +int plus(int(*total)(int a, int b) __attribute__((count(1)))) { + int x = 1; + int y = 1; + return (*total)(x, y); +} + +int main(int argc, char **args) { + if(argc < 2) { + printf("need input offset\n"); + return -1; + } + int *p = (int*)malloc(sizeof(int) * 5); + if (p == NULL) { + return -2; + } + int offset = atoi(args[1]); + int (*f)(int, int) __attribute__((count(1)))= sum; + int a = plus((f + offset)); + printf("plus()的值为:%d\n", a); + return 0; +} diff --git a/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/expected.txt b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..95806e5a084f85aa0cbbbc5e09526b9c698bbf0c --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/expected.txt @@ -0,0 +1,3 @@ +arith_check_func_ptr.c:40 error: the offset < lower bounds after pointer arithmetic! +qemu: uncaught target signal 11 (Segmentation fault) - core dumped +Segmentation fault \ No newline at end of file diff --git a/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/test.cfg b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..3520ad8a201fd70d527dbc5ffd5dacd9225e81c7 --- /dev/null +++ b/testsuite/c_test/enhancec_test/ENC0149-arith_check_func_ptr/test.cfg @@ -0,0 +1,3 @@ +ENCO2_B_D_A_C: +compile(arith_check_func_ptr) +run_err(arg=0) diff --git a/testsuite/c_test/noinline_test/NOINLINE0001-vrp-noinline-longmax-check/test.cfg b/testsuite/c_test/noinline_test/NOINLINE0001-vrp-noinline-longmax-check/test.cfg index afedc065504897039cf062d6dcc3d61249cf3d0d..adc38f7035e7ffeb0f5140119077df1fef989f09 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0001-vrp-noinline-longmax-check/test.cfg +++ b/testsuite/c_test/noinline_test/NOINLINE0001-vrp-noinline-longmax-check/test.cfg @@ -1,5 +1,2 @@ -${OUT_ROOT}/tools/bin/clang -emit-ast --target=aarch64 -U __SIZEOF_INT128__ -isystem ${OUT_ROOT}/aarch64-clang-release/lib/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include -isystem ../lib/include -o vrp_longmax_check.ast vrp_longmax_check.c -${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl vrp_longmax_check.ast -o vrp_longmax_check.mpl -${OUT_ROOT}/aarch64-clang-release/bin/maple --run=me:mpl2mpl:mplcg --option="--O2 --quiet:--no-inline --O2 --quiet: --O2 --fpic --quiet --no-pie --verbose-asm" vrp_longmax_check.mpl -${OUT_ROOT}/tools/bin/aarch64-linux-gnu-gcc -o vrp_longmax_check.out vrp_longmax_check.s -lm +compile(vrp_longmax_check) run(vrp_longmax_check) diff --git a/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/nonull_test.c b/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/nonull_test.c index b61be33c8001643da78e3108e8256e5e0268f7a1..9495e71138c863a42df661025192e24d25b1486c 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/nonull_test.c +++ b/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/nonull_test.c @@ -15,6 +15,8 @@ #include #include +int g; + __attribute__((returns_nonnull, nonnull)) int *testReturnPtr(int *ptr) { for (int i = 0; i < 10; ++i) { @@ -24,7 +26,9 @@ int *testReturnPtr(int *ptr) { } __attribute__((returns_nonnull)) -extern int *getNonnullPtr(); +int *getNonnullPtr() { + return &g; +} int* testy() { int *p = getNonnullPtr(); diff --git a/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/test.cfg b/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/test.cfg index 532676f161576047345330680bd0ef5856b385c8..b9afa80f1f10d6545b1f88dc2ea82253a6e8c8a2 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/test.cfg +++ b/testsuite/c_test/noinline_test/NOINLINE0002-ipa-noinlne-proptype-check/test.cfg @@ -1,3 +1,3 @@ -ENCO2_N_D_NO_LINK: +ENCO2_N_D: compile(nonull_test) python3 ${TEST_BIN}/check.py --check=num --n=0 --str="callassertnonnull" --result=nonull_test.me.mpl diff --git a/testsuite/c_test/noinline_test/NOINLINE0003-ipa-noinlne-proptype-check/test.cfg b/testsuite/c_test/noinline_test/NOINLINE0003-ipa-noinlne-proptype-check/test.cfg index 0b0f421c31616caf9cafd6109b0d6108c100b012..23315ad41184e088d3d77a19e6d1c188ddefb563 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0003-ipa-noinlne-proptype-check/test.cfg +++ b/testsuite/c_test/noinline_test/NOINLINE0003-ipa-noinlne-proptype-check/test.cfg @@ -1,3 +1,3 @@ -ENCO2_N_D_NO_LINK: +ENCO2_N_D: compile(nonull_test) python3 ${TEST_BIN}/check.py --check=num --n=3 --str="callassertnonnull" --result=nonull_test.me.mpl diff --git a/testsuite/c_test/noinline_test/NOINLINE0004-ipa-noinlne-proptype-check/test.cfg b/testsuite/c_test/noinline_test/NOINLINE0004-ipa-noinlne-proptype-check/test.cfg index 0519cbd7e35eab940425d22f61e9358489e1a357..64b42c024cb57b9528d82ea8639814473004a525 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0004-ipa-noinlne-proptype-check/test.cfg +++ b/testsuite/c_test/noinline_test/NOINLINE0004-ipa-noinlne-proptype-check/test.cfg @@ -1,7 +1,2 @@ -${OUT_ROOT}/tools/bin/clang -emit-ast --target=aarch64 -U __SIZEOF_INT128__ -DC_ENHANCED -isystem ${OUT_ROOT}/aarch64-clang-release/lib/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include -isystem ../lib/include -o call_sideeffect.ast call_sideeffect.c - -${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl -npe-check-dynamic call_sideeffect.ast -o call_sideeffect.mpl - -${OUT_ROOT}/aarch64-clang-release/bin/maple --run=me:mpl2mpl:mplcg --option="-O2 --quiet:-O2 --skip-phase=inline:-O2 --fpic --quiet" --save-temps --npe-check-dynamic --infile call_sideeffect.mpl - -python3 ${TEST_BIN}/check.py --check=num --n=0 --str="callassertnonnull" --result=call_sideeffect.me.mpl +ENCO2_N_D: +compile_err(call_sideeffect) diff --git a/testsuite/c_test/noinline_test/NOINLINE0005-ipa-noinlne-clone-check/test.cfg b/testsuite/c_test/noinline_test/NOINLINE0005-ipa-noinlne-clone-check/test.cfg index 17964e6335ecf03dcb2896f9a801195e315d4f51..0339f0f182654bf1a9e84982b3aaacec5fc202e4 100644 --- a/testsuite/c_test/noinline_test/NOINLINE0005-ipa-noinlne-clone-check/test.cfg +++ b/testsuite/c_test/noinline_test/NOINLINE0005-ipa-noinlne-clone-check/test.cfg @@ -1,8 +1,4 @@ -${OUT_ROOT}/tools/bin/clang -emit-ast --target=aarch64 -U __SIZEOF_INT128__ -DC_ENHANCED -isystem ${OUT_ROOT}/aarch64-clang-release/lib/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include -isystem ../lib/include -o test_clone.ast test_clone.c - -${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl test_clone.ast -o test_clone.mpl - -${OUT_ROOT}/aarch64-clang-release/bin/maple --run=me:mpl2mpl:mplcg --option="-O2 --quiet:-O2 --skip-phase=inline --ipa-clone:-O2 --fpic --quiet" --save-temps --infile test_clone.mpl - +ENCO2_N_D: +compile(test_clone) python3 ${TEST_BIN}/check.py --check=num --n=6 --str="func &add.clone" --result=test_clone.me.mpl python3 ${TEST_BIN}/check.py --check=num --n=2 --str="func &sub.constprop" --result=test_clone.me.mpl diff --git a/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/expected.txt b/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/mineq1.c b/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/mineq1.c new file mode 100644 index 0000000000000000000000000000000000000000..4a7c18094f7c1ef935b3d73fa3d7378f7d079cfa --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/mineq1.c @@ -0,0 +1,12 @@ +signed char a = 8; +unsigned char b = 4; +signed short c = 2; +unsigned short d = 1; +long double e = 8; + +void main() { + a -= e; + b += e; + c *= e; + d /= e; +} \ No newline at end of file diff --git a/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/test.cfg b/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c6cc8e5728ebe633bec4f4ef0367e7c99bf61615 --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0046-ld_char_sub/test.cfg @@ -0,0 +1,2 @@ +compile(mineq1) +run(mineq1) \ No newline at end of file diff --git a/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/expected.txt b/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/fmaxminl.c b/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/fmaxminl.c new file mode 100644 index 0000000000000000000000000000000000000000..bf3029af0fdd3884513fc7a1fbdf071f095de850 --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/fmaxminl.c @@ -0,0 +1,72 @@ + +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * Description: + * - @TestCaseID: fmaxl + * - @TestCaseName: fmaxl + * - @TestCaseType: Function Testing + * - @RequirementID: SR.IREQ9939ff5a.001 + * - @RequirementName: C编译器能力 + * - @Design Description: + * - #本例选取输入参数的全组合进行测试 + * - @Condition: Lit test environment ready + * - @Brief: Test function fmaxl + * - #step1: Call Function + * - #step2: Check whether function returned value is expected + * - @Expect: Code exit 0. + * - @Priority: Level 1 + */ + +#include +#include +#include + +#define ARG0_0 ((-1) * LDBL_MAX) +#define ARG0_1 LDBL_MAX +#define ARG0_2 2.333333L + +#define ARG1_0 ((-1) * LDBL_MAX) +#define ARG1_1 LDBL_MAX +#define ARG1_2 (-4.145213L) + +#define ESPILON 0.000001 + +int TEST(long double x, long double y, long double expMax, long double expMin) +{ + long double resMax = fmaxl(x, y); + long double resMin = fminl(x, y); + long double absResMax = (resMax - expMax) > 0 ? (resMax - expMax) : (expMax - resMax); + long double absResMin = (resMin - expMin) > 0 ? (resMin - expMin) : (expMin - resMin); + if (absResMax > ESPILON) { + printf("Error: expect max result is %Lf, but actual result is %Lf.\n", expMax, resMax); + return 0; + } + + if (absResMin > ESPILON) { + printf("Error: expect min result is %Lf, but actual result is %Lf.\n", expMin, resMin); + return 0; + } + + return 0; +} + +int main() +{ + int ret = 0; + ret += TEST(ARG0_0, ARG1_0, ARG1_0, ARG0_0); + ret += TEST(ARG0_0, ARG1_1, ARG1_1, ARG0_0); + ret += TEST(ARG0_0, ARG1_2, ARG1_2, ARG0_0); + + ret += TEST(ARG0_1, ARG1_0, ARG0_1, ARG1_0); + ret += TEST(ARG0_1, ARG1_1, ARG0_1, ARG1_1); + ret += TEST(ARG0_1, ARG1_2, ARG0_1, ARG1_2); + + ret += TEST(ARG0_2, ARG1_0, ARG0_2, ARG1_0); + ret += TEST(ARG0_2, ARG1_1, ARG1_1, ARG0_2); + ret += TEST(ARG0_2, ARG1_2, ARG0_2, ARG1_2); + return ret; +} + +// RUN: %CC %CFLAGS %s -lm -o %t.bin &> %t.build.log +// RUN: %SIMULATOR %SIM_OPTS %t.bin &> %t.run.log +// RUN: rm -rf %t.bin %t.build.log %t.run.log %S/Output/%basename_t.script \ No newline at end of file diff --git a/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/test.cfg b/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b7f34bc52a6b4e6c0855cebcef5c204cd479f3ed --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0047-ld_max_min/test.cfg @@ -0,0 +1,2 @@ +compile(fmaxminl) +run(fmaxminl) diff --git a/testsuite/c_test/sanity_test/SANITY0048-is_infl/expected.txt b/testsuite/c_test/sanity_test/SANITY0048-is_infl/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/sanity_test/SANITY0048-is_infl/is_infl.c b/testsuite/c_test/sanity_test/SANITY0048-is_infl/is_infl.c new file mode 100644 index 0000000000000000000000000000000000000000..1b0b30655d9259778d012501e6131d176c9e4895 --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0048-is_infl/is_infl.c @@ -0,0 +1,47 @@ +#include +#include + +#define ARG0 (-1.0) +#define ARG1 1.0 + +int TESTDouble(double x, int expV) +{ + int res = __builtin_isinf_sign (x); + if (res != expV) { + printf("Error: expect result is %d, but actual result is %d.\n", expV, res); + return 1; + } + return 0; +} + +int TESTFloat(float x, int expV) +{ + int res = __builtin_isinf_sign (x); + if (res != expV) { + printf("Error: expect result is %d, but actual result is %d.\n", expV, res); + return 1; + } + return 0; +} + +int TESTLongDouble(long double x, int expV) +{ + int res = __builtin_isinf_sign (x); + if (res != expV) { + printf("Error: expect result is %d, but actual result is %d.\n", expV, res); + return 1; + } + return 0; +} + +int main() +{ + int ret = 0; + ret += TESTDouble((double) (ARG0 / 0.0), -1); + ret += TESTDouble((double) (ARG1 / 0.0), 1); + ret += TESTFloat((float) (ARG0 / 0.0), -1); + ret += TESTFloat((float) (ARG1 / 0.0), 1); + ret += TESTLongDouble((long double) (ARG0 / 0.0), -1); + ret += TESTLongDouble((long double) (ARG1 / 0.0), 1); + return ret; +} \ No newline at end of file diff --git a/testsuite/c_test/sanity_test/SANITY0048-is_infl/test.cfg b/testsuite/c_test/sanity_test/SANITY0048-is_infl/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..a34e60840bd038cdeb85c0512589ba82fcdaceeb --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0048-is_infl/test.cfg @@ -0,0 +1,2 @@ +compile(is_infl) +run(is_infl) diff --git a/testsuite/c_test/sanity_test/SANITY0049-va_args/expected.txt b/testsuite/c_test/sanity_test/SANITY0049-va_args/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/sanity_test/SANITY0049-va_args/test.cfg b/testsuite/c_test/sanity_test/SANITY0049-va_args/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..09d1f1d5ef5466991d9e876a2e040e65b96a9c76 --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0049-va_args/test.cfg @@ -0,0 +1,2 @@ +compile(va_args) +run(va_args) diff --git a/testsuite/c_test/sanity_test/SANITY0049-va_args/va_args.c b/testsuite/c_test/sanity_test/SANITY0049-va_args/va_args.c new file mode 100644 index 0000000000000000000000000000000000000000..13fd6febe305fd7525ffa28bea2423dd3b9b8e38 --- /dev/null +++ b/testsuite/c_test/sanity_test/SANITY0049-va_args/va_args.c @@ -0,0 +1,41 @@ +#include +#include + +long double funct2(int NumArgs, ...) +{ + long double AddVal = 0.0; + va_list vl; + + va_start(vl, NumArgs); + while (NumArgs--) { + AddVal += va_arg(vl, long double); + } + + va_end(vl); + + return AddVal; +} + +int main() +{ + long double d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, expd, gotd; + d0 = 0.1e+1; + d1 = 1.02e+1; + d2 = 1.003e+1; + d3 = 1.0004e+1; + d4 = 1.00005e+1; + d5 = 1.000006e+1; + d6 = 1.0000007e+1; + d7 = 1.00000008e+1; + d8 = 1.000000009e+1; + d9 = 1.0000000000e+1; + expd = d0 + d1 + d2 + d3 + d4 + d5 + d6 + d7 + d8 + d9; + gotd = funct2(10,d0,d1,d2,d3,d4,d5,d6,d7,d8,d9); + + if (gotd != expd) { + printf("%Le, expect %Le\n", gotd, expd); + return 1; + } + + return 0; +} diff --git a/testsuite/c_test/struct_test/STRUCT0009-2023030814321/2023030814321.c b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/2023030814321.c new file mode 100644 index 0000000000000000000000000000000000000000..b52efcce8fd567d11ff62d6d905ea34ed6263998 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/2023030814321.c @@ -0,0 +1,29 @@ +#include + +void f_check_field_offset(void* ps, void* pf, int ofst) +{ + if ((((char*)ps) + ofst) != ((char*)pf)) { + printf("error\n"); + } else { + printf("ok\n"); + } +} + +struct empty { +}; + +struct BFu15i_Sf_BFu15i { + unsigned int v1:15; + struct empty v2; + unsigned int v3:15; +}; + +unsigned long long hide_ull(unsigned long long p) { return p; } + +int main() +{ + struct BFu15i_Sf_BFu15i a; + printf("sizeof(struct) = %lu\n", sizeof(struct BFu15i_Sf_BFu15i)); + f_check_field_offset(&a, &a.v2, 2);//预期v2的地址与a的地址偏移两字节 + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/struct_test/STRUCT0009-2023030814321/expected.txt b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4d21ec048203f6a9fccf0a0e75785e255601e8d --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/expected.txt @@ -0,0 +1,2 @@ +sizeof(struct) = 4 +ok diff --git a/testsuite/c_test/struct_test/STRUCT0009-2023030814321/test.cfg b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e433367419fa2406091c472aee2fdd8a73b427b4 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0009-2023030814321/test.cfg @@ -0,0 +1,2 @@ +compile(2023030814321) +run(2023030814321) diff --git a/testsuite/c_test/struct_test/STRUCT0010-2023031308745/2023031308745.c b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/2023031308745.c new file mode 100644 index 0000000000000000000000000000000000000000..d4992dfa5c73b0c18a31af1d36db3b757866ba56 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/2023031308745.c @@ -0,0 +1,28 @@ +#include + +void f_check_field_offset(void* ps, void* pf, int ofst) +{ + if ((((char*)ps) + ofst) != ((char*)pf)) { + printf("error\n"); + } else { + printf("ok\n"); + } +} + +struct empty { +}; + +struct BFu15i_BFu15i_Sf { + unsigned int v1:15; + unsigned int v2:15; + struct empty v3; +}; + +int main() +{ + struct BFu15i_BFu15i_Sf a; + printf("sizeof(struct) = %lu\n", sizeof(struct BFu15i_BFu15i_Sf)); + f_check_field_offset(&a, &a.v3, 4);//预期v2的地址与a的地址偏移4字节 + // printf("address of a=%p, address of a.v3=%p\n", &a, &a.v3); + return 0; +} diff --git a/testsuite/c_test/struct_test/STRUCT0010-2023031308745/expected.txt b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4d21ec048203f6a9fccf0a0e75785e255601e8d --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/expected.txt @@ -0,0 +1,2 @@ +sizeof(struct) = 4 +ok diff --git a/testsuite/c_test/struct_test/STRUCT0010-2023031308745/test.cfg b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..25169dbbc55e598f73679ce0d9c7f1397876f291 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0010-2023031308745/test.cfg @@ -0,0 +1,2 @@ +compile(2023031308745) +run(2023031308745) diff --git a/testsuite/c_test/struct_test/STRUCT0011-2023031308171/2023031308171.c b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/2023031308171.c new file mode 100644 index 0000000000000000000000000000000000000000..0aeff90f7f155258990a0817c32c138a8729008d --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/2023031308171.c @@ -0,0 +1,34 @@ +#include + +typedef struct empty {} empty; + +struct BFu17i_Sf_BFu15ll { + unsigned int v1:17; + struct empty v2; + unsigned long long v3:15; +}; + +union U { + unsigned long long v0; + struct BFu17i_Sf_BFu15ll v; +}; + +int main() +{ + union U u; + u.v0 = 0x0; + printf("sizeof(union) = %lu\n", sizeof(union U)); + u.v.v1 = 0x0;//v1值都是0. + u.v.v3 = ~(0x0);//v3值都是1 + unsigned char* s; + s = (unsigned char*)&u; + printf("byte 1 of u = %d\n", s[0]); + printf("byte 2 of u = %d\n", s[1]); + printf("byte 3 of u = %d\n", s[2]); + printf("byte 4 of u = %d\n", s[3]); + printf("byte 5 of u = %d\n", s[4]); + printf("byte 6 of u = %d\n", s[5]); + printf("byte 7 of u = %d\n", s[6]); + printf("byte 8 of u = %d\n", s[7]); + return 0; +} diff --git a/testsuite/c_test/struct_test/STRUCT0011-2023031308171/expected.txt b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..10b17970821f1ddced3308cdf66ca203a3efe10d --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/expected.txt @@ -0,0 +1,9 @@ +sizeof(union) = 8 +byte 1 of u = 0 +byte 2 of u = 0 +byte 3 of u = 0 +byte 4 of u = 255 +byte 5 of u = 127 +byte 6 of u = 0 +byte 7 of u = 0 +byte 8 of u = 0 diff --git a/testsuite/c_test/struct_test/STRUCT0011-2023031308171/test.cfg b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..655e61de3de2196886f717095d6ae6b543e61f6e --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0011-2023031308171/test.cfg @@ -0,0 +1,2 @@ +compile(2023031308171) +run(2023031308171) diff --git a/testsuite/c_test/struct_test/STRUCT0012-2023030708359/2023030708359.c b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/2023030708359.c new file mode 100644 index 0000000000000000000000000000000000000000..61afa42a4bf78ba01d962dda247d6e946a221028 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/2023030708359.c @@ -0,0 +1,61 @@ +#include "csmith.h" +#include "stdalign.h" +#pragma pack(1) +struct S0 { + const signed f0; + signed : 0; + const volatile signed f1; + volatile signed f2; + int8_t f3; + signed f4; +}; + +struct S1 { + const signed f0; + signed : 1; + const volatile signed f1; + volatile signed f2; + int8_t f3; + signed f4; +}; + +struct S2 { + const signed f0; + const volatile signed f1; + volatile signed f2; + int8_t f3; + signed f4; +}; +#pragma pack() + +struct __attribute__((packed)) S3 { + unsigned int a:3; + unsigned int :0; + unsigned int b:7; + unsigned long c:21; +}; + +struct __attribute__((packed)) S4 { + unsigned int a:3; + unsigned int :8; + unsigned int b:7; + unsigned long c:21; +}; + +int main() { + struct S0 s0; + struct S1 s1; + struct S2 s2; + struct S3 s3; + struct S4 s4; + printf("S0 size: = %lu\n", sizeof(struct S0)); // 20 + printf("S0 align: = %lu\n", alignof(struct S0)); // 4 + printf("S1 size: = %lu\n", sizeof(struct S1)); // 18 + printf("S1 align: = %lu\n", alignof(struct S1)); // 1 + printf("S2 size: = %lu\n", sizeof(struct S2)); // 17 + printf("S2 align: = %lu\n", alignof(struct S2)); // 1 + printf("S3 size: = %lu\n", sizeof(struct S3)); // 8 + printf("S3 align: = %lu\n", alignof(struct S3)); // 4 + printf("S4 size: = %lu\n", sizeof(struct S4)); // 5 + printf("S4 align: = %lu\n", alignof(struct S4)); // 1 +} \ No newline at end of file diff --git a/testsuite/c_test/struct_test/STRUCT0012-2023030708359/expected.txt b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3e316f0a60ac6fd4022e54fe81b63e01c53df29 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/expected.txt @@ -0,0 +1,10 @@ +S0 size: = 20 +S0 align: = 4 +S1 size: = 18 +S1 align: = 1 +S2 size: = 17 +S2 align: = 1 +S3 size: = 8 +S3 align: = 4 +S4 size: = 5 +S4 align: = 1 diff --git a/testsuite/c_test/struct_test/STRUCT0012-2023030708359/test.cfg b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..70497c0713424eeabf805da192e7c7f350e235ab --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0012-2023030708359/test.cfg @@ -0,0 +1,2 @@ +compile(2023030708359) +run(2023030708359) diff --git a/testsuite/c_test/struct_test/STRUCT0013-2023042404640/expected.txt b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..9766475a4185a151dc9d56d614ffb9aaea3bfd42 --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/expected.txt @@ -0,0 +1 @@ +ok diff --git a/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.c b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.c new file mode 100644 index 0000000000000000000000000000000000000000..f87d60eb87a1739943e2b4ceb04dc3e515f7710d --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.c @@ -0,0 +1,27 @@ + +#include "stdio.h" +#pragma pack(1) +struct S0 { + volatile long long f2; + signed : 0; + short f5; +}; + +struct S0 a[2] = { + {2, 1}, + {2, 1} +}; + +void f_check_struct_size(void* ps, void* pf, int ofst) +{ + if ((((char*)ps) + ofst) != ((char*)pf)) { + printf("error\n"); + } else { + printf("ok\n"); + } +} + + +int main() { + f_check_struct_size(&a[0], &a[1], 12); +} diff --git a/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.cfg b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b6be19f1dee198d250d4b76ec9fcd5dfe41cdc3f --- /dev/null +++ b/testsuite/c_test/struct_test/STRUCT0013-2023042404640/test.cfg @@ -0,0 +1,2 @@ +compile(test) +run(test) diff --git a/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/expected.txt b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/expected.txt @@ -0,0 +1 @@ +0 diff --git a/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/main.c b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/main.c new file mode 100644 index 0000000000000000000000000000000000000000..523e59ca40798a788ebe3adb070e1bef3c7d4728 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/main.c @@ -0,0 +1,52 @@ +#include + +int N; +int *t; + +int chk(int x, int y) +{ + int i; + int r; + + for (r=i=0; i<8; i++) { + r = r + t[x + 8*i]; + r = r + t[i + 8*y]; + if (x+i < 8 & y+i < 8) + r = r + t[x+i + 8*(y+i)]; + if (x+i < 8 & y-i >= 0) + r = r + t[x+i + 8*(y-i)]; + if (x-i >= 0 & y+i < 8) + r = r + t[x-i + 8*(y+i)]; + if (x-i >= 0 & y-i >= 0) + r = r + t[x-i + 8*(y-i)]; + } + return r; +} + +int go(int n, int x, int y) +{ + if (n == 8) { + N++; + return 0; + } + for (; y<8; y++) { + for (; x<8; x++) + if (chk(x, y) == 0) { + t[x + 8*y]++; + go(n+1, x, y); + t[x + 8*y]--; + } + x = 0; + } + return 0; +} + +int main() +{ + t = calloc(64, sizeof(int)); + go(0, 0, 0); + if(N != 92) + printf("%d\n", 1); + printf("%d\n", 0); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/test.cfg b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT00100-CGCFG-globalopt-ExtendShiftOptPattern/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/expected.txt b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..d00491fd7e5bb6fa28c517a0bb32b8b506539d4d --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/expected.txt @@ -0,0 +1 @@ +1 diff --git a/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/main.c b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/main.c new file mode 100644 index 0000000000000000000000000000000000000000..23c93ea89f0f000b02f78b65cbc8d035ec9f3e1e --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/main.c @@ -0,0 +1,17 @@ +#include +int g = 0; + +struct A { + int *a; +}; + +struct A foo() { + return (struct A){ &g }; +} + +int main () { + struct A x = foo(); + (*(x.a))++; + printf("%d\n", g); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/test.cfg b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0059-alias-callassigned-agg/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/expected.txt b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..3cc5e646d57ad4ecc1358ee7564816a1f7251fe2 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/expected.txt @@ -0,0 +1,13 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 diff --git a/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/func.c b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/func.c new file mode 100644 index 0000000000000000000000000000000000000000..50e81fd21f1d0ef338a98c5aa461ce49b366ad1b --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/func.c @@ -0,0 +1,56 @@ +/* +yarpgen version 2.0 (build on 2021:05:18) +Seed: 1737115969 +Invocation: /home/jenkins/workspace/MapleC_pipeline/MapleC_Yarpgen_pipeline/maplec-test/Public/tools/yarpgen/yarpgen -o /home/jenkins/workspace/MapleC_pipeline/MapleC_Yarpgen_pipeline/maplec-test/../Report/LangFuzz/report/1677097781_5979/src --std=c +*/ +short var_0 = (short)27984; +int var_1 = -602765228; +short var_2 = (short)18531; +unsigned short var_3 = (unsigned short)53065; +unsigned char var_4 = (unsigned char)137; +unsigned int var_5 = 981406895U; +int var_6 = 642968968; +unsigned long long int var_7 = 6761959113873782143ULL; +short var_8 = (short)12146; +short var_9 = (short)14376; +unsigned short var_10 = (unsigned short)9521; +unsigned short var_11 = (unsigned short)28341; +unsigned int var_12 = 3424791700U; +unsigned int var_13 = 3326626420U; +int var_14 = 1580706874; +_Bool var_49 = (_Bool)1; +unsigned long long int var_67 = 4834657044061192081ULL; +int arr_147 [14] [10] [23] [16] [14] ; + + +__attribute__((noinline)) +void test(short var_0, int var_1, short var_2, unsigned short var_3, unsigned char var_4, unsigned int var_5, int var_6, unsigned long long int var_7, short var_8, short var_9, unsigned short var_10, unsigned short var_11, unsigned int var_12, unsigned int var_13, int var_14, int arr_147 [14] [10] [23] [16] [14]) { + var_49 = ((/* implicit */_Bool) ((((/* implicit */int) ((((/* implicit */unsigned int) ((/* implicit */int) var_3))) > (var_12)))) == (((((/* implicit */int) (unsigned short)65535)) ^ (-1530240743))))); + + for(short i_28 = 0; i_28 < 1; i_28 += 4) { + for(short i_29 = 0; i_29 < 1; i_29 += 3) { + for(short i_35 = 0; i_35 < 1; i_35 += 1) { + for(short i_36 = 0; i_36 < 1; i_36 += 4) { + // +// The original type of var_12 is uint. In the source code, it will be converted to int. Since i_38 subsequently accesses the array arr147 as a subscript, the type conversion will be performed. The corresponding conversion statement is cvt (i64, i32) (u32 var_12). Because ivopt uses u32 instead of i32 when creating cvt, the wrong type conversion is performed. + for (int i_38 = ((((/* implicit */int) var_8)) - (12146))/*0*/; i_38 < 13/*13*/; i_38 += ((((/* implicit */int) var_12)) + (870175597))/*1*/) + { + if (((/* implicit */_Bool) ((((/* implicit */_Bool) (((_Bool)1) ? (1383282605) : (-2147483630)))) ? (((/* implicit */long long int) ((((/* implicit */_Bool) -743261339)) ? (((/* implicit */int) (signed char)-2)) : (((/* implicit */int) (short)-20123))))) : (((((/* implicit */_Bool) 31)) ? (7184191891616795253LL) : (((/* implicit */long long int) ((/* implicit */int) (unsigned char)6)))))))) + { printf("%d\n", i_38); // When ivopt inserts the wrong cvt, the output of 38 is not in range [0,12]. + var_67 = ((/* implicit */unsigned long long int) ((((((/* implicit */_Bool) 1563014865U)) ? (((/* implicit */int) (signed char)-101)) : (((/* implicit */int) (unsigned char)168)))) >= (arr_147 [i_28] [i_29] [i_35] [i_36] [i_38]))); + + } + + } + } + } + } + } +} + +int main() { + test(var_0, var_1, var_2, var_3, var_4, var_5, var_6, var_7, var_8, var_9, var_10, var_11, var_12, var_13, var_14, arr_147); + return 0; +} + +// When ivopt inserts the wrong cvt, the output of 38 is not in range [0,12]. diff --git a/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/test.cfg b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..df9a2eb8d2b77c4f4f728cf89894f7aaf625e06a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0078-ivopt-cvt/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(func) +run(func) diff --git a/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/expected.txt b/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/func.c b/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/func.c new file mode 100644 index 0000000000000000000000000000000000000000..980186de6d9d7f8f39411ca57bd78a49561a904d --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/func.c @@ -0,0 +1,33 @@ +#include +uint32_t ui_0 = 9; +uint8_t uc_1 = 0; +uint64_t uli_3 = 0; +int16_t s_6 = 3; +uint16_t us_7 = 0; +uint8_t uc_8 = 1; +uint32_t fn1(int32_t i_12) { + int li_13 = 0, uli_11 = 0; + uint8_t *ptr_14 = &uc_1; + int32_t i_15 = 2; + int64_t *ptr_16 = &li_13; + for (*ptr_14 = 6; *ptr_14 <= 82; uli_11++) { + int16_t *ptr_17 = &s_6; + lblEB190337: + for (us_7 = 4; uc_8 <= 9; i_15++) + for (ui_0 = 30; ui_0 <= 68; ui_0++) { + uint32_t *ptr_21 = &ui_0; + int64_t li_23 = 2; + if (((2 &&*ptr_17 ? uc_1 ^= 0 : 1 && *ptr_21) + ? 0 >= uli_3 ? 0 : *ptr_17 == li_23 + : 0) > (i_12 < 0 & (*ptr_17 ? *ptr_16 : uc_8) || 0)) { + uint32_t **ptr_24 = &ptr_21; + } + } + for (*ptr_17 = 5; *ptr_14 <= 8; *ptr_14 = 1) + ; + } +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/test.cfg b/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..df9a2eb8d2b77c4f4f728cf89894f7aaf625e06a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0079-optimizeCFG-delete-emptybb/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(func) +run(func) diff --git a/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/colorRA-spillSize.c b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/colorRA-spillSize.c new file mode 100644 index 0000000000000000000000000000000000000000..be87baf776519785a30745d0f5aa555186f1ed00 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/colorRA-spillSize.c @@ -0,0 +1,53 @@ +#include +long long a; +int b, d, f; +short e, g; +_Bool i[][23][23][10][21]; +unsigned short aa[][23][23][10][21]; +unsigned short j[][15][21]; +unsigned char k[][15][16]; +long long l[][15][16][22]; +int m[]; +char n[]; +short o[][5][0]; +void fn1(long long *a, int c) { *a = c; } +void p(int, long long, unsigned short, _Bool, int, unsigned long long, + signed char, unsigned, short, unsigned char, _Bool, + _Bool[][23][23][10][21], unsigned short[][23][23][10][21], + unsigned short[][15][21], unsigned char[][15][16], + long long[][15][16][22]); +int main() { + p(5, b, 80, 1, 64392445, 725817855635, 5, 70259, 2, 143, 0, i, aa, j, k, l); + fn1(&a, f); + printf("%llu\n", a); +} +#define ab(q, r) q +#define ac(q, r) q < r ? q : 0 +void p(int s, long long b, unsigned short u, _Bool t, int w, + unsigned long long v, signed char y, unsigned x, short ad, + unsigned char ae, _Bool af, _Bool i[][23][23][10][21], + unsigned short aa[][23][23][10][21], unsigned short j[][15][21], + unsigned char k[][15][16], long long l[][15][16][22]) { + d = d ? (long)ad : 0; + for (long ag = ae - 143LL; ag; ag += 3) + for (char ah = 0; ah < (char)b; ah += 3) + for (char ai = 0; ai < ab((long)(0 ? 0 : s), ); ai += 9) { + for (; g;) + for (long aj = 0; aj < 037045; aj += 4) + m[g] = aa[ag][ah][ai][g][aj]; + n[ag + ah + ai] = 7 ? ad : 0; + } + for (; ac(e, ae);) + ; + for (int ak = 0; ak < 7; ak += 2) + for (char al = 0; al < 5; al += 3) { + f = u; + for (char am = 0; am < s; am += 4) + for (long an = 0; an < 2; an += 4) + for (short ao = 0; ao < 3; ao += 2) + for (short ap = (v ?: b) + w; ap < 0; ap += e) + for (long aq = 0; aq < t + 3ULL; aq += 2) + o[ak][al][2] = (w ?: x) + (ac(af, y)); + } +} + diff --git a/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/expected.txt b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..d15a2cc44e31b14c65264271b81072e5ba0e1634 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/expected.txt @@ -0,0 +1 @@ +80 diff --git a/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/test.cfg b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..ff46abc9fdb1efebbf3d0c2996c44016f1c85a20 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0080-colorRA-spillSize/test.cfg @@ -0,0 +1,2 @@ +compile(colorRA-spillSize) +run(colorRA-spillSize) diff --git a/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/hpf_init.mpl b/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/hpf_init.mpl new file mode 100644 index 0000000000000000000000000000000000000000..2a86793db216d58acd17b98fb9dc1de30b5cb251 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/hpf_init.mpl @@ -0,0 +1,6721 @@ +flavor 1 +srclang 1 +id 65535 +numfuncs 4706 +var $g_mbufGlobalCtl extern <* <$HpeMbufGlobalCtl>> used +var $g_hpfHlogModId u32 used = 255 +var $g_hpfHlogForcePrint extern u8 used +var $g_hpfHlogLevel extern i8 used +type $imaxdiv_t +type $_IO_FILE +type $_G_fpos64_t , + @__lldata i64 align(8), + @__align f64 align(8)}> +type $__va_list align(8), + @__gr_top <* void> align(8), + @__vr_top <* void> align(8), + @__gr_offs i32 align(4), + @__vr_offs i32 align(4)}> +type $_IO_cookie_io_functions_t ,<* i8>,u64) i64>> align(8), + @write <* ,<* i8>,u64) i64>> align(8), + @seek <* ,<* i64>,i32) i32>> align(8), + @close <* ) i32>> align(8)}> +type $div_t +type $ldiv_t +type $lldiv_t +type $__locale_struct +type $hpe_struct align(8), + @hpeDrv <* void> align(8), + @hpeCrypto <* void> align(8), + @hpeLcore <* void> align(8), + @hpeModm <* void> align(8), + @hpeNotify <* void> align(8)}> +type $sched_param > align(8), + @__reserved3 i32 align(4)}> +type $timespec +type $cpu_set_t align(8)}> +type $HpeAtom32 +type $HpeAtom16 +type $HpeAtom64 +type $HpeSpinlock +type $HpeSpinDelayStat align(8)}> +type $HpeSpinDelayConf +type $tagLflistEnds > volatile align(8), + @tail <* <$tagLflistNode>> volatile align(8), + @lock <$HpeSpinlock> align(4)}> +type $tagLflistNode > volatile align(8)}> +type $tagHpeListHead > align(8), + @next <* <$tagHpeListHead>> align(8)}> +type $HPE_DLL align(8), + @count u32 align(4)}> +type $HPE_DLL_NODE > align(8), + @pPrev <* <$HPE_DLL_NODE>> align(8), + @ulHandle u64 align(8)}> +type $hpe_slist_head > volatile align(8)}> +type $HpeHlistNode > align(8), + @pprev <* <* <$HpeHlistNode>>> align(8)}> +type $HpeHlistHead > align(8)}> +type $HpeMemDllNode align(8), + @size u32 align(4), + @flag u32 align(4), + @magic u32 align(4), + @padSize u32 align(4)}> +type $tagHpeMemzone , + @phys_addr u64 align(8), + @unnamed.3370 <$unnamed.3371> implicit align(8), + @len u64 align(8), + @socket_id i32 align(4), + @usedflags u32 align(4)}> +type $HpeRing align(8), + @name <[32] i8>, + @flags i32 align(4), + @ringSize u32 align(4), + @ringCapacity u32 align(4), + @ringMask u32 align(4), + @prod <$HpeRingHeadTail> align(4), + @cons <$HpeRingHeadTail> align(4)}> +type $HpeRingHeadTail +type $HpeRingControlBlock align(4), + @ringList <$tagHpeListHead> align(8)}> +type $tm align(8)}> +type $sigevent +type $itimerspec align(8), + @it_value <$timespec> align(8)}> +type $__pthread +type $pthread_attr_t align(8)}> +type $pthread_mutex_t align(8)}> +type $pthread_mutexattr_t +type $pthread_cond_t align(8)}> +type $pthread_condattr_t +type $pthread_rwlock_t align(8)}> +type $pthread_rwlockattr_t align(4)}> +type $pthread_barrier_t align(8)}> +type $pthread_barrierattr_t +type $__ptcb ) void>> align(8), + @__x <* void> align(8), + @__next <* <$__ptcb>> align(8)}> +type $tagHpeAtomicCntBmp align(8), + @pair <$tagHpeCntBmp> align(4)}> +type $tagHpeCntBmp +type $fd_set align(8)}> +type $timeval +type $__sigset_t align(8)}> +type $itimerval align(8), + @it_value <$timeval> align(8)}> +type $timezone +type $tagHpeEvent , + @nd i32 align(4), + @events u16 align(2), + @resEvents u16 align(2), + @unnamed.5852 <$unnamed.5851> implicit align(8), + @base <* <$tagHpeEventBase>> align(8), + @callback <$tagHpeEventCallback> align(8)}> +type $tagHpeEventBase > align(8), + @opsBacker <* void> align(8), + @activeQues <* <$HpeEventCallbackList>> align(8), + @nActiveQues i32 align(4), + @eventActiveCnt i32 align(4), + @eventCnt i32 align(4), + @runningFlag i32 align(4), + @curProcEvent <* <$tagHpeEvent>> align(8), + @levelTriggerList <$HpeEventCallbackList> align(8), + @breakFlag i32 align(4), + @lock <$HpeSpinlock> align(4), + @limitCallbacksAfterPrio i32 align(4), + @maxDispatchCallbacks i32 align(4), + @maxDispatchInterval <$timeval> align(8)}> +type $tagHpeEventCbFunc ) void>> align(8), + @cbArg <* void> align(8)}> +type $tagHpeEventCfg , + @nPriorities i32 align(4), + @limitCallbacksAfterPrio i32 align(4), + @maxDispatchCallbacks i32 align(4), + @maxDispatchInterval <$timeval> align(8)}> +type $tagHpeLcore > align(8), + @tid u32 align(4), + @base <* <$tagHpeEventBase>> align(8), + @chanId1 i32 align(4), + @chanId2 i32 align(4), + @drvWork <[128] <* void>> align(8), + @flushWork <* void> align(8), + @mngWork <* void> align(8), + @nRxPort u32 align(4), + @rxPortList <[64] u32> align(4), + @tmpAllocBitmap <[1] u64> align(8), + @tmpBuf <[98304] u8>}> +type $HpeMempoolOps align(64)>) i32>> align(8), + @free <* align(64)>) void>> align(8), + @getCount <* align(64)>) u32>> align(8), + @enqueue <* align(64)>,<* <* void>>,u32) i32>> align(8), + @dequeue <* align(64)>,<* <* void>>,u32) i32>> align(8), + @objIter <* align(64)>,<* align(64)>,<* void>,<* void>,u32) void>>,<* void>) i32>> align(8)}> +type $tagHpeMempool align(8), + @name <[32] i8>, + @unnamed.5856 <$unnamed.5855> implicit align(4), + @flags u32 align(4), + @opsType i32 align(4), + @poolData <* void> align(8), + @pool_config <* void> align(8), + @mz <* <$tagHpeMemzone>> align(8), + @socketId i32 align(4), + @privateDataSize u32 align(4), + @size u32 align(4), + @populatedSize u32 align(4), + @cacheSize u32 align(4), + @eltSize u32 align(4), + @headerSize u32 align(4), + @trailerSize u32 align(4), + @localCache <* <$HpeMempoolCache> align(64)> align(8), + @eltList <$tagHpeListHead> align(8), + @memChunk <$HpeMempoolMemChunk> align(8)}> +type $HpeMempoolOpsTable align(4), + @mpOpsClass <[4] <* <$HpeMempoolOps> align(64)>> align(8)}> +type $HPE_MEMPOOL_INFO > align(8)}> +type $HpeMempoolObjsz +type $HpeMempoolCache > align(8)}> +type $hpe_mbuf align(8), + @buf_physaddr u64 align(8), + @data_off u16 align(2), + @port u16 align(2), + @data_len u16 align(2), + @buf_len u16 align(2), + @pool <* void> align(8), + @next <* <$hpe_mbuf> align(64)> align(8), + @pkt_len u32 align(4), + @nb_segs u16 align(2), + @seqn u16 align(2), + @priv_size u16 align(2), + @refcnt <$HpeAtom16> align(2), + @qid u16 align(2), + @capCode u16 align(2), + @reorder_data u64 align(8), + @userdata <* void> align(8), + @sec_op <* void> align(8), + @reorder_flag u8, + @rsv2 u8, + @tx_port u16 align(2), + @last_worker u32 align(4), + @dynfield1 <[2] u64> align(8), + @unnamed.5871 <$unnamed.5859> implicit align(4), + @unnamed.5872 <$unnamed.5867> implicit align(2), + @ol_flags u64 align(8), + @unnamed.5873 <$unnamed.5868> implicit align(8), + @pkt_next <* <$hpe_mbuf> align(64)> align(8)}> +type $HpeRwlock align(4)}> +type $tagHpeWfSpinlock align(4), + @counter <$HpeAtom32> align(4)}> +type $HpeRwlockT +type $HpeRWSpinStat +type $HpeCallSite align(8), + @line i32 align(4), + @func <* i8> align(8)}> +type $HpeDateTime align(4), + @time <$HpeTime> align(2), + @weekDay u8, + @second u32 align(4), + @milliSecond u64 align(8), + @secondSince1970 u32 align(4), + @timezoneOffset u32 align(4), + @summertimeOffset u32 align(4), + @isDST u32 align(4), + @summertimeOffTime u32 align(4), + @summertimeBeginDate <$HpeDate> align(4), + @summertimeBeginTime <$HpeTime> align(2), + @summertimeEndDate <$HpeDate> align(4), + @summertimeEndTime <$HpeTime> align(2), + @utcDate <$HpeDate> align(4), + @utcTime <$HpeTime> align(2), + @utcWeekDay u8, + @uTCSecondSince1970 u32 align(4), + @uTCMilliSecondSince1970 u64 align(8), + @lock <$HpeRwlock> align(4), + @uTCMilliSecondOffSince1970 i64 align(8), + @timeTscFreq u64 align(8), + @cyclePerMs u64 align(8)}> +type $HpeDate +type $HpeTime +type $tagTraceStatRoot > align(8)}> +type $tagTrace > align(8)}> +type $HpeMbufGlobalCtl > align(8)}> +type $HpeMbufPrivInfo align(4), + @MbufTrace <$unnamed.2048> align(8), + @IbcNpCarInfo <$unnamed.5875> align(4), + @zeroCopyReserve <[64] i8>}> +type $unnamed.2048 align(2), + @ownerWorkId u16 align(2), + @unnamed.2049 <$unnamed.2050> implicit align(2), + @ownerTid u32 align(4), + @isoNode u64 align(8)}> +type $unnamed.2050 +type $HpeMbufPoolParam , + @num u32 align(4), + @bufSize u32 align(4), + @privDataSize u32 align(4), + @cacheSize u32 align(4), + @socketId i32 align(4), + @opsName <* i8> align(8)}> +type $HpeBoardInfo , + @ibcPortMac <[6] i8>, + @selfBoardFlag u32 align(4), + @peerBoardID u32 align(4), + @mainBoardID u32 align(4), + @sourceMod u32 align(4), + @methMod u32 align(4), + @methPort u32 align(4), + @haveLpuBoard u8, + @haveSpuBoard u8, + @cpuType u8, + @haveRemoteGX u8, + @version u32 align(4), + @fullmesh u8, + @closeChnPri u8, + @mmpuUpdatedByFwm u8, + @stkEnable u32 align(4), + @rsv <[2] u8>, + @listNode <$tagHpeListHead> align(8), + @boardLock <$HpeSpinlock> align(4)}> +type $tagHpeLfhashTable > align(8), + @mem_alloc_func <* >> align(8), + @mem_free_func <* ) u32>> align(8), + @get_hash_index_func <* ) u32>> align(8)}> +type $tagHpeLfhashCreateInfo >> align(8), + @mem_free_func <* ) u32>> align(8), + @get_hashidx_func <* ) u32>> align(8)}> +type $tagHpeLfhashNode > volatile align(8), + @state u32 align(4), + @ref <$HpeAtom32> align(4)}> +type $tagLfhashArray > volatile align(8), + @lock <$HpeSpinlock> align(4), + @node_num u32 volatile align(4)}> +type $HpeStatModStrTag align(4), + @normalStatStr <* <* i8>> align(8), + @errStatStr <* <* i8>> align(8), + @modAddrNormal <* void> align(8), + @modAddrError <* void> align(8), + @maxNormalNum u32 align(4), + @maxErrNum u32 align(4), + @modId i32 align(4), + @name <[32] i8>}> +type $stat align(8), + @st_mtim <$timespec> align(8), + @st_ctim <$timespec> align(8), + @__unused <[2] u32> align(4)}> +type $file_handle }> +type $iovec align(8), + @iov_len u64 align(8)}> +type $HpfIpHdr , + @ip_stDst <$HpfInAddr>}> +type $HpfIp6Hdr , + @ip6_stSrc <$HpfIn6Addr>, + @ip6_stDst <$HpfIn6Addr>}> +type $unnamed.2295 , + @ip6_un2 <$unnamed.5879>}> +type $unnamed.2296 +type $HpeTmwbase > align(8), + @valid u8, + @intvltypemax u8, + @intvltypefree u8, + @bktspertmw u8, + @tmwBktBits u32 align(4), + @tmw <* <$HpeTmw>> align(8), + @jobs <* <$HpeTmwjob>> align(8), + @maxjobs u32 align(4), + @invalidjob u32 align(4), + @bktmaxlLen u32 align(4), + @tmwUse <$HpeTmwuse>, + @seq u32 align(4)}> +type $HpeTmwmsg +type $HpeTmwbkt +type $HpeTmwuse +type $HpeTmw align(4), + @astbkt <[128] <$HpeTmwbkt>> align(4)}> +type $HpfNatIntf >,<* <$HpfNatAgingParam>>,<* u8>) u32>> align(8), + @natRefresh <* align(64)>,<* <$HpfNatParam>>,<* <$HpfNatParamRet>>,<* u8>) u32>> align(8), + @natAddSuccess <* align(64)>,<* <$HpfNatParam>>,<* <$HpfNatParamRet>>,<* u8>) u32>> align(8), + @dNat <* align(64)>,<* <$HpfNatParam>>,<* <$HpfNatParamRet>>,<* u8>) u32>> align(8), + @sNat <* align(64)>,<* <$HpfNatParam>>,<* <$HpfNatParamRet>>,<* u8>) u32>> align(8)}> +type $HpfSessionKey implicit align(2), + @vpn u16 align(2), + @proto u8, + @resv1 u8}> +type $int8x8x2_t align(8)}> +type $int8x16x2_t align(16)}> +type $int16x4x2_t align(8)}> +type $int16x8x2_t align(16)}> +type $int32x2x2_t align(8)}> +type $int32x4x2_t align(16)}> +type $uint8x8x2_t align(8)}> +type $uint8x16x2_t align(16)}> +type $uint16x4x2_t align(8)}> +type $uint16x8x2_t align(16)}> +type $uint32x2x2_t align(8)}> +type $uint32x4x2_t align(16)}> +type $int64x1x2_t align(8)}> +type $uint64x1x2_t align(8)}> +type $int64x2x2_t align(16)}> +type $uint64x2x2_t align(16)}> +type $int8x8x3_t align(8)}> +type $int8x16x3_t align(16)}> +type $int16x4x3_t align(8)}> +type $int16x8x3_t align(16)}> +type $int32x2x3_t align(8)}> +type $int32x4x3_t align(16)}> +type $uint8x8x3_t align(8)}> +type $uint8x16x3_t align(16)}> +type $uint16x4x3_t align(8)}> +type $uint16x8x3_t align(16)}> +type $uint32x2x3_t align(8)}> +type $uint32x4x3_t align(16)}> +type $int64x1x3_t align(8)}> +type $uint64x1x3_t align(8)}> +type $int64x2x3_t align(16)}> +type $uint64x2x3_t align(16)}> +type $int8x8x4_t align(8)}> +type $int8x16x4_t align(16)}> +type $int16x4x4_t align(8)}> +type $int16x8x4_t align(16)}> +type $int32x2x4_t align(8)}> +type $int32x4x4_t align(16)}> +type $uint8x8x4_t align(8)}> +type $uint8x16x4_t align(16)}> +type $uint16x4x4_t align(8)}> +type $uint16x8x4_t align(16)}> +type $uint32x2x4_t align(8)}> +type $uint32x4x4_t align(16)}> +type $int64x1x4_t align(8)}> +type $uint64x1x4_t align(8)}> +type $int64x2x4_t align(16)}> +type $uint64x2x4_t align(16)}> +type $HpfMbufContext align(4), + @flag u32 align(4), + @flag_ext u32 align(4), + @egress_port u32 align(4), + @sb u16 align(2), + @sp u16 align(2), + @egress_phy_port u32 align(4), + @recv_phy_port u32 align(4), + @recv_port u32 align(4), + @vlan_key u16 align(2), + @vlan_in u16 align(2), + @ids_revert :1 u8, + @l2_ipsec_flag :1 u8, + @in_vlanmapp_flag :1 u8, + @phy_send :1 u8, + @mbuftr_debug_flag :1 u8, + @bIsHitWls :1 u8, + @iic_crc_flag :1 u8, + @trunk_mem_send :1 u8, + @trace4_debug_flag :2 u16 align(2), + @trace6_debug_flag :2 u16 align(2), + @trace_pkt_id :4 u16 align(2), + @trace_pkt_dir :2 u16 align(2), + @acl_recv_stat_flag :1 u16 align(2), + @atkFlag :1 u16 align(2), + @resv :4 u16 align(2), + @l2_head_length u8, + @tcp_flag u8, + @in_zone u16 align(2), + @out_zone u16 align(2), + @frame_flag <$HpfMbufFrameFlag> align(2), + @packet_key <$HpfFlowKey> align(8), + @eth_type u16 align(2), + @l3_hdr_offset u16 align(2), + @src_nat_ip u32 align(4), + @unnamed.5995 <$unnamed.5976> implicit align(4), + @dst_nat_ip u32 align(4), + @dst6NatIp <$HpfIn6Addr>, + @src_nat_port u16 align(2), + @dst_nat_port u16 align(2), + @dslite_tunnel_id u16 align(2), + @protocol u8, + @fwd_type u8, + @send_vrf u16 align(2), + @vsys_index u16 align(2), + @unnamed.5996 <$unnamed.5979> implicit align(4), + @user_id u32 align(4), + @app_data_len u16 align(2), + @icmp_type u8, + @icmp_code u8, + @icmpCheckSum u16 align(2), + @np_hashindex :31 u32 align(4), + @np_hash_isvaild :1 u32 align(4), + @icmpNsDstAdd <[4] u32> align(4), + @src_inst_id u8, + @dst_mac_type u8, + @vlan_pri :4 u8, + @hit_mail_cache :1 u8, + @blacklist :1 u8, + @whitelist :2 u8, + @vsys_inbound_handled :1 u8, + @vsys_outbound_handled :1 u8, + @vsys_entire_handled :1 u8, + @vsys_trans :1 u8, + @vsys_from_flag :1 u8, + @vsys_to_flag :1 u8, + @vsys_drop_flag :1 u8, + @vsys_handled_flag :1 u8, + @unnamed.5997 <$unnamed.5981> implicit align(4), + @not_used :3 u16 align(2), + @mac_learn_disable_act_drop :1 u16 align(2), + @flow_split :1 u16 align(2), + @trace_pkt_in_tag :1 u16 align(2), + @icmp_error_nated_flag :1 u16 align(2), + @hrp_mgt_fwd_pkt :1 u16 align(2), + @np_debug_flag :1 u16 align(2), + @hash_index_valid :1 u16 align(2), + @ah_esp_oper_flag :3 u16 align(2), + @cross_vsys_limit :3 u16 align(2), + @app_protocol u8, + @quota_flag :1 u8, + @ids_mode :1 u8, + @acl_stat_dir :2 u8, + @capt_flag :1 u8, + @reassemble :1 u8, + @from_vrp :1 u8, + @ike_acquire :1 u8, + @hash_index u16 align(2), + @rev_fwd_type u8, + @src_inst_type u8, + @user_group_id u16 align(2), + @ucNetworkType u8, + @tcp_head_length u8, + @usSrcSecGroupId u16 align(2), + @usDstSecGroupId u16 align(2), + @unnamed.5998 <$unnamed.5984> implicit align(4), + @unnamed.5999 <$unnamed.5988> implicit align(4), + @unnamed.6000 <$unnamed.5990> implicit align(4), + @logic_send_port u32 align(4), + @bwm_rule_index <[4] u16> align(2), + @pbr6rdflag :1 u8, + @pbrtpdnspro :1 u8, + @tpdnspro :1 u8, + @cgn_portrng_fwd :1 u8, + @hit_dslite_fullcone :1 u8, + @hit_nat_fullcone_portrange :1 u8, + @hit_dst_fullcone :1 u8, + @hit_src_fullcone :1 u8, + @nat_is_stmp :1 u8, + @resv1 :5 u8, + @acl_fwd_stat_flag :1 u8, + @tunnel_acl_stat :1 u8, + @rev_vrf u16 align(2), + @l2_pkt_fmt u8, + @replayCount u8, + @ipsec_pkt_length u16 align(2), + @stat_node u64 align(8), + @stat6_node u64 align(8), + @unnamed.6001 <$unnamed.5991> implicit align(8), + @l2_ipsec_outport u32 align(4), + @tos u32 align(4), + @l4_hdr_offset u16 align(2), + @um_dev_id u16 align(2), + @nat_pool_id u16 align(2), + @nat_pool_section_id u16 align(2), + @verify_tag u32 align(4), + @init_tag u32 align(4), + @unnamed.6002 <$unnamed.5992> implicit align(4), + @um_acs_type u8, + @portal_index :3 u8, + @transmit :1 u8, + @portal_type :4 u8, + @slb_sslflag :1 u16 align(2), + @slb_vservid :15 u16 align(2), + @Is_twamp :1 u16 align(2), + @Is_twamp_reflector :1 u16 align(2), + @twamp_sess_id :14 u16 align(2), + @unnamed.6003 <$unnamed.5994> implicit, + @user_mac <[6] u8>, + @np_type u8, + @np_flow_version u8, + @np_flow_deny u8, + @Is_TLSSip :1 u16 align(2), + @ipsec_tailused :1 u16 align(2), + @l2tp_trace_debug_flag :1 u16 align(2), + @ipsec_df_bit :3 u16 align(2), + @clust_pat_transfer_end :1 u16 align(2), + @isArpNdCached :1 u16 align(2), + @clust_issu_version :8 u16 align(2), + @clust_node_id :4 u8, + @clust_bg_master_node :4 u8, + @clust_flow_need_circus :1 u8, + @clust_first_packet :1 u8, + @clust_flow_need_transfer :1 u8, + @clust_flow_need_nat :1 u8, + @clust_owner_id :4 u8, + @flow_id_ext u64 align(8), + @np_port_flag :8 u64 align(8), + @cp2np_opcode :8 u64 align(8), + @np_car_id0 :18 u64 align(8), + @np_car_id1 :18 u64 align(8), + @delta_car_len :8 u64 align(8), + @np_master :1 u64 align(8), + @np_ing_public :1 u64 align(8), + @np_resv :2 u64 align(8), + @ingress_hgport :8 u32 align(4), + @ucLQId :8 u32 align(4), + @ulStreamTag :16 u32 align(4), + @proc_id u32 align(4), + @opp_inst_id u8, + @ucAgileRefresh :1 u8, + @isCopyToSpu :1 u8, + @decapGreHead :1 u8, + @encapGreHead :1 u8, + @isBfdLinkBundle :1 u8, + @isIpv6LinkLocal :1 u8, + @resvChar :2 u8, + @ethHdrBak <$HpfEthHdr>, + @rsv1 u8, + @bsIngSAclDone :1 u8, + @bsEgrSAclDone :1 u8, + @bsIpv4Redirect :1 u8, + @bsL2L3DecodedFlag :1 u8, + @bsL3IngSAclDone :1 u8, + @bsL3Proc :1 u8, + @bsSAclRsv :2 u8, + @vsiIndex u16 align(2), + @srvSetInfo <$HpfMbufSrvSetInfo> align(4), + @u8TxFwdifTop :4 u8, + @u8RxFwdifTop :4 u8, + @fromCp :2 u8, + @bsMngIf :1 u8, + @bsRsvHost :1 u8, + @bsForbiddenFwd :1 u8, + @bsHostCached :1 u8, + @bsVpnCrossed :2 u8, + @appPktType u8, + @TxFwdif <[8] <* void>> align(8), + @RxFwdif <[8] <* void>> align(8), + @bridgeType u8, + @hostIngressType u8, + @appType u8, + @hostEgressType u8, + @vDev u16 align(2), + @reasonCode u16 align(2), + @subreasonCode u16 align(2), + @bridgeId u32 align(4), + @vlanTag <[2] <$HpfVlanBaseInfo>>, + @vlanTagNum u8, + @protoType u8, + @feTxPktType u8, + @hostDiagSwitch u8, + @lbFactor u32 align(4), + @tb u16 align(2), + @tp u16 align(2), + @fwdVlanIndex u32 align(4), + @fabricMid u32 align(4), + @tmMid u32 align(4), + @pruneVlan u16 align(2), + @specialTpId u16 align(2), + @pruneIf u32 align(4), + @oeIfIndex u32 align(4), + @sysTimeMs u32 align(4), + @pktCos u8, + @pktPri u8, + @ldmFlag u8, + @moduleInfo u8, + @l2SocketPktType u8, + @l2SocketCauseType u8, + @mlagId u16 align(2), + @recvOrigPortCtrlIfIdx u32 align(4), + @recvPortCtrlIfIdx u32 align(4), + @l2SocketFd u32 align(4), + @l2SocketModuleID u8, + @l2SocketNextID u8, + @causeId u16 align(2), + @forceSendToLpu u8, + @debugFlag u32 align(4), + @msgId u8, + @arpHrdType u16 align(2), + @arpProtocolType u16 align(2), + @arpHrdLen u8, + @arpProtocolLen u8, + @opCode u16 align(2), + @arpSrcIp u32 align(4), + @arpDstIp u32 align(4), + @arpSrcMac <[6] u8>, + @arpDstMac <[6] u8>, + @isArpMissCache :1 u8, + @isNdMissCache :1 u8, + @peerlinkFlag :1 u8, + @mlagFlag :1 u8, + @isL3socket :1 u8, + @plcyPktTrace :1 u8, + @crossBroadFlag :1 u8, + @smallNpFlag :1 u8, + @priorityFlag u8, + @hpsFromHpfFlag u8, + @aiFabricFlag u8, + @stackRsv u8, + @dstPid u32 align(4), + @traceAddr <* void> align(8), + @ngsfHdrOffset u16 align(2), + @isSlowPath u16 align(2), + @recvTimeMic u32 align(4), + @saidType u8, + @tracePktFlag u8, + @tracePktInstance u8, + @vxlanEncapCnt u8, + @workIfIndex u32 align(4), + @svp u16 align(2), + @timeStamp u32 align(4), + @tracePrevCycle u64 align(8), + @tcpPcb <* void> align(8)}> +type $fwd_hook_param_s align(8), + @flow_s <* void> align(8), + @recv_if <* void> align(8), + @send_if <* void> align(8), + @dst_svrmap <* void> align(8), + @src_svrmap <* void> align(8), + @flow_sync_flag u32 align(4), + @dispatch_code u8, + @car_id u8, + @flags u8, + @fib_selected_index u8, + @drop_index u32 align(4), + @flow_bak_flag u32 align(4), + @usNatPoolID u16 align(2), + @usNatSectionID u16 align(2), + @ucGetPortTimes u8, + @flow_new_sesslog_flag :1 u8, + @carnat_noport :1 u8, + @bidnat_flag :1 u8, + @npflow_refresh :1 u8, + @npflow_finrst :1 u8, + @ucReserverd :3 u8, + @ulReserverd u16 align(2), + @fib_query_info <* void> align(8), + @fib_ret_info <* void> align(8), + @srcMacEntry <* void> align(8), + @dstMacEntry <* void> align(8), + @bwm_policy_ret <* void> align(8), + @twamp_rcv_stamp <* void> align(8), + @resev64 u64 align(8)}> +type $tagFwdIf align(4), + @phy <$tagPhyInfo> align(2), + @link <$tagLinkInfo> align(4), + @ipv4 <$tagIpv4Info> align(4), + @ipv6 <$tagIpv6Info> align(2), + @mpls <$tagMplsInfo>, + @l1IngSvc <$tagIngL1Fsvc> align(2), + @l3IngSvc <$tagIngL3Fsvc> align(4), + @l3EgrSvc <$tagEgrL3Fsvc>, + @arpfIfCfg <$ArpfIfCfg> align(4), + @l1EgrSvc <$tagEgrL1Fsvc> align(2), + @Qos <$tagSrvInfo> align(4), + @Tunnel <$tagTunnelInfo> align(4), + @ns <$tagNsInfo> align(4), + @stat <$FwdIfStat> align(8), + @multiCoreStat <$FwdIfMultiCoreStat> align(64), + @flowfwdIfIdx u32 align(4), + @flag u32 align(4), + @portPairIdx u32 align(4), + @subFwdIfNum <$HpeAtom32> align(4), + @subFwdIf <* <* <$tagFwdIf>>> align(8), + @decodeLinkType u8, + @outLinkType u8, + @outPhyType u8, + @state u8, + @vrId u32 align(4), + @portSecEnable :1 u32 align(4), + @sacPktRecvDir u8, + @resv23 :23 u32 align(4), + @rwlock <$HpeRwlock> align(4), + @name <[64] u8>, + @res1 <* void> align(8), + @ddos_appdata <* void> align(8), + @atkAppData <* void> align(8), + @res2 <* void> align(8), + @bwm_appdata <* void> align(8), + @res3 <* void> align(8), + @res4 <* void> align(8), + @defend_appdata <* void> align(8), + @resv5 u8, + @loopTmTp u8, + @loopTmQid u16 align(2), + @protoQid u16 align(2), + @protoLoopQid u16 align(2), + @srvmanage_value u8, + @redirect_next_hop u32 align(4), + @vsiIndex u16 align(2), + @res6 u16 align(2), + @ulGateWay u32 align(4), + @res7 u32 align(4), + @res8 u32 align(4), + @l2Idx u32 align(4), + @slotId u8, + @redirectPerPkt u8, + @redirectNextHop6 <$HpfIn6Addr>, + @res9 <[4] u32> align(4), + @res10 <[8] u32> align(4), + @phyFwdIfIdx u32 align(4), + @brId u32 align(4), + @version u32 align(4), + @sacCountId u16 align(2), + @rsv u16 align(2)}> +type $HpfPortIpv4Node +type $HpfPortIpv6Node , + @prefix u32 align(4), + @ipv6Type u32 align(4), + @version u32 align(4), + @resv1 u32 align(4)}> +type $HpfIn6Addr }> +type $unnamed.3371 align(8), + @addr_64 u64 align(8)}> +type $HpfFlowKey align(4), + @raw <$unnamed.5359> align(8)}> +type $unnamed.3448 , + @dstMac <[6] u8>, + @ethType u16 align(2), + @bridgeId u16 align(2), + @unnamed.4623 <$unnamed.4624> implicit align(4), + @unnamed.4617 <$unnamed.4618> implicit align(4), + @vrfIndex u16 align(2), + @protocol u8, + @isL2 :1 u8, + @reserved :7 u8, + @srcPort u16 align(2), + @dstPort u16 align(2)}> +type $HpfMbufFrameFlag +type $HpfMbufSrvSetInfo +type $frame_param_s implicit align(8), + @out_port <* void> align(8), + @vrfIndex :15 u16 align(2), + @v6_gateway_flag :1 u16 align(2), + @frame_flag <$HpfMbufFrameFlag> align(2), + @vlan_if_id u16 align(2), + @mtu :14 u16 align(2), + @lpu_type :2 u16 align(2), + @inner_label u32 align(4), + @lsp_token u32 align(4), + @ingress_port u32 align(4), + @resv u32 align(4), + @ucresv u8, + @mac_version u8, + @mac_address <[6] u8>}> +type $HpfTcpHdr +type $HpfArpEntry align(2), + @stData <$HpfArpData> align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $HpfArpTblEntry align(2), + @stData <$HpfArpTblData> align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $tagCAP_ND_ENTRY_S align(4), + @stData <$HpfNdData> align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $HpeLcoreMask align(8)}> +type $HpeRcuHeadType > align(8), + @func <* >) void>> align(8)}> +type $tagHpeRbtNode > align(8), + @lchild <* <$tagHpeRbtNode>> align(8), + @rchild <* <$tagHpeRbtNode>> align(8), + @color i32 align(4), + @dbg_key u64 align(8)}> +type $tagHpeRbTree > align(8)}> +type $tagHpeDpHashTable > align(8), + @pstNodeBaseAddr <* <$tagHpeDpHashBucket>> align(8), + @pstFreeListHead <* <$tagHpeDpHashBucket>> align(8), + @pstFreeListTail <* <$tagHpeDpHashBucket>> align(8), + @ulCurUsedNodeNum u32 align(4), + @ulError u32 align(4), + @ullAgeTimeId u64 align(8), + @ulTimerState u32 align(4), + @FreeListLock <$HpeRwlock> align(4)}> +type $tagHpeDpHashBucket > align(8), + @preNode <* <$tagHpeDpHashBucket>> align(8)}> +type $unnamed.3670_57_2 +type $unnamed.3671_63_2 +type $HpfAclRuleIdSet align(8), + @threshold u32 align(4), + @cmnSegCode u32 align(4), + @ruleStat <* <$HpfAclRuleStat>> align(8)}> +type $HpfAclRuleStat align(8), + @mask <[6] u64> align(8), + @validBit <[6] u64> align(8), + @validMask <[6] u64> align(8), + @validLen u32 align(4), + @tid u32 align(4), + @validSegCode u32 align(4)}> +type $HpfAclRuleList align(8), + @data <* u32> align(8)}> +type $HpfAclLinkList > align(8), + @tail <* <$tagHpfAclListNode>> align(8)}> +type $HpfAclJumpTable > align(8), + @ruleStat <* <$HpfAclRuleStat>> align(8), + @cutbitMask <[6] u64> align(8), + @pos <[6][64] u32> align(4), + @bucketSize u16 align(2), + @maxBinTreeDepth u16 align(2), + @updDupThreshold u16 align(2), + @cutbitsNum u16 align(2), + @isEmpty u16 align(2), + @jumpTableId u16 align(2), + @cutBits <[15] u16> align(2), + @searchJumptable <* <$tag_acl_search_jumptable>> align(8)}> +type $HpfAclSearchTable align(8), + @aclEntries u32 align(4), + @keyWidth u16 align(2), + @keyByLen u16 align(2), + @numJumpTable u32 align(4), + @numDtRules u16 align(2), + @ruleUintSize u16 align(2), + @dtHighPriority u32 align(4), + @dtRules <* u8> align(8), + @jumpTables <* <$tag_acl_search_jumptable>> align(8)}> +type $HpfAclTable align(8), + @memPoolId u32 align(4), + @tid :16 u32 align(4), + @isEmpty :2 u32 align(4), + @bucketSize :14 u32 align(4), + @maxBinTreeDepth u16 align(2), + @updDupThreshold u16 align(2), + @keyLen u16 align(2), + @numSeg u16 align(2), + @nextTableId u32 align(4), + @numNode u32 align(4), + @maxNumJumpTable u16 align(2), + @numJumpTable u16 align(2), + @ruleUnitSize u32 align(4), + @numRules u32 align(4), + @ruleIdBv <[320] u64> align(8), + @segcodeWeight <[20] u32> align(4), + @numSegCode u32 align(4), + @numGraphData u32 align(4), + @numDtRules u32 align(4), + @dtRules <* <$HpfAclRule>> align(8), + @pickedRuleIds <$HpfAclLinkList> align(8), + @graph <* void> align(8), + @graphDataSet <* void> align(8), + @segCodeClass <* void> align(8), + @CBRule_To_VMRule <* ,<* <$HpfAclRule>>) void>> align(8), + @Print_CBRule <* >) void>> align(8), + @jumpTables <* void> align(8)}> +type $HpfAclRule align(8), + @paddingData <[28] u64> align(8), + @keyLen u32 align(4), + @rangeMask <[4] u8>, + @jumpTableId u16 align(2), + @ruleId u32 align(4), + @subTreeRuleId u32 align(4), + @segCode u32 align(4), + @subTreeId u32 align(4)}> +type $HpfAclNodeResource align(8), + @bvCutBits <[6] u64> align(8), + @node <* void> align(8), + @ruleSet <* void> align(8)}> +type $HpfAclBinTree > align(8), + @searchRoot <* <$tagAclSearchNode>> align(8), + @statInfo <$HpfAclBinTreeStats> align(4), + @treeRuleSet <* <$HpfAclRuleList>> align(8), + @ruleStat <* <$HpfAclRuleStat>> align(8), + @updInfo <* <$HpfAclLeafUpdInfo>> align(8)}> +type $tagAclBinNode >> align(8), + @ruleIds <* u32> align(8), + @rules <* <$tagHpfAclSearchRule>> align(8), + @searchNode <* <$tagAclSearchNode>> align(8)}> +type $tagHpfAclSearchRule align(8)}> +type $HpfAclLeafUpdInfo > align(8), + @parent <* <$tagAclBinNode>> align(8), + @cutBit u32 align(4), + @treeRuleId u32 align(4), + @notEmpty u32 align(4)}> +type $HpfAclTableArrayState +type $HpeCapFwdtblDeepOps >,<* void>,<* void>,<* <* void>>) u32>> align(8), + @del <* >,<* void>,<* void>) u32>> align(8), + @update <* >,<* void>,<* void>,<* void>) u32>> align(8), + @search <* >,<* void>,<* <* void>>) u32>> align(8), + @init <* >) u32>> align(8), + @exit <* >) u32>> align(8), + @match <* >,<* void>,<* <* void>>) u32>> align(8)}> +type $HpeCapTbmCb > align(8)}> +type $HpeTbmRoot > align(8), + @memLock <$HpeSpinlock> align(4), + @memList <$tagHpeListHead> align(8), + @tbmMemCnt <$HpeAtom32> align(4), + @blockNum u32 align(4), + @tbmOpCnt <[54] <$HpeTbmOperate>> align(4), + @tbmOpFailCnt <[54] <$HpeTbmOperate>> align(4), + @tbmFailRet <[54] <$HpeTbmOperate>> align(4)}> +type $HpeFwdTblInfo align(2), + @tblSpec <$HpeFwdTblSpec> align(8), + @entNum u32 align(4), + @pBaseAddr <* void> align(8), + @unnamed.6130 <$unnamed.6129> implicit align(4)}> +type $HpeFwdtblTravContext +type $HpeTableLinearCmpParam ,<* void>) u1>> align(8), + @param <* void> align(8)}> +type $HpeTableEmListCmpParam ,<* void>) u1>> align(8), + @param <* void> align(8)}> +type $HpeTableEmListHead align(8), + @lock <$HpeRwlock> align(4)}> +type $HpeFwdTblSpec align(8), + @tblSpec u32 align(4), + @specExt u32 align(4), + @hashSize u32 align(4), + @verifyEnable u32 align(4)}> +type $HpeSrvmapField +type $HpeCapFibKey }> +type $tagCAP_RE_ENTRY_S implicit, + @magicNum u16 align(2), + @voiceRoute :1 u32 align(4), + @maskLen :6 u32 align(4), + @vcLabel :20 u32 align(4), + @ttlMode :1 u32 align(4), + @expAppoint :1 u32 align(4), + @exp :3 u32 align(4), + @nhpIndex u32 align(4), + @nhpIndex2 u32 align(4), + @siteNhpIndex u32 align(4), + @unnamed.6023 <$unnamed.6018> implicit align(4), + @cibIdx u32 align(4), + @frrIndex u32 align(4), + @reIndex u32 align(4), + @l3Vpn :1 u32 align(4), + @vpnVni :24 u32 align(4), + @ipsecUnrRoute :1 u32 align(4), + @fwFlag :2 u32 align(4), + @spr :1 u32 align(4), + @vcLabelValid :1 u32 align(4), + @resv1 :2 u32 align(4), + @version u32 align(4), + @unnamed.6024 <$unnamed.6021> implicit align(8), + @fwdFunc <* >,<* <$tagCAP_RE_ENTRY_S>>,<* <$HpfNextHopEntry>>) void>> align(8), + @fibKey <$HpeCapFibKey> align(2)}> +type $CAP_FIB6_KEY_S }> +type $tagCAP_RE6_ENTRY_S implicit align(4), + @re6Index u32 align(4), + @version u32 align(4), + @unnamed.6042 <$unnamed.6040> implicit align(8), + @fwdFunc <* >,<* <$tagCAP_RE6_ENTRY_S>>,<* <$tagHpfNhp6Entry>>) void>> align(8), + @fib6Key <$CAP_FIB6_KEY_S> align(2)}> +type $HpeAclGroup implicit align(8), + @aclTableTemp <$HpfAclTable> align(8), + @pSearchTable <* <$HpfAclSearchTable>> align(8), + @ruleSet <$HpfAclRuleSet> align(8), + @updHistoryArray <$HpfAclUpdHistory> align(8), + @actionIndex <$HpeFwdTblInfo> align(8), + @instanceId u32 align(4), + @groupId u32 align(4), + @groupType u16 align(2), + @keyWidth u16 align(2), + @version u32 align(4), + @resv u32 align(4)}> +type $HpeAclRuleBinaryEntry > align(2)}> +type $HpeMscTctlTravContext align(4), + @respDataLen u32 align(4), + @continueFlag u32 align(4), + @maxDataLen u32 align(4), + @oneDataLen u32 align(4), + @matchEn u32 align(4), + @param1 u32 align(4), + @param2 u32 align(4), + @param3 u32 align(4), + @param4 u32 align(4), + @param5 u32 align(4), + @param6 u32 align(4), + @param7 u32 align(4), + @param8 u32 align(4), + @param9 u32 align(4), + @param10 u32 align(4), + @param11 u32 align(4), + @param12 u32 align(4)}> +type $HpeFwdTblDef +type $HpeCapFwdtblOps >) u32>> align(8), + @exit <* >) u32>> align(8), + @insert <* >,<* void>,<* void>,<* <* void>>) u32>> align(8), + @del <* >,<* void>,<* void>) u32>> align(8), + @search <* >,<* void>,<* <* void>>) u32>> align(8), + @clean <* >) u32>> align(8), + @entNum <* >,<* u32>) u32>> align(8), + @travNext <* >,<* <$HpeFwdtblTravContext>>,<* u8>,<* u32>) u32>> align(8), + @verifyAge <* >,<* u32>) u32>> align(8), + @update <* >,<* void>,<* void>,<* void>) u32>> align(8), + @match <* >,<* void>,<* <* void>>) u32>> align(8)}> +type $HpfGlobalCfg , + @sysMacEnd <[6] u8>, + @macNum u32 align(4), + @lmFwdIfIndex u32 align(4), + @lmactiveIfIndex u32 align(4), + @lmbackupIfIndex u32 align(4), + @dscp u8, + @qosInner u8, + @natArpReplyEnable :1 u8, + @ndFastReplyEnable :1 u8, + @icmpFastEnable :1 u8, + @isolateEnable :1 u8, + @dscpEnable :1 u8, + @qosInnerEnable :1 u8, + @isPipeLine :1 u8, + @vlanPriEnable :1 u8, + @vlanPri u8, + @eNpNum u16 align(2), + @pipeLineId u8, + @qosWithoutIfg :1 u8, + @dualGates :1 u8, + @arpReplyEnable :1 u8, + @bfdEnable :1 u8, + @hostCapture :1 u8, + @srv6Enable :1 u8, + @dhcpClientEnable :1 u8, + @dhcpRelayEnable :1 u8, + @recvArpRequestNum u64 align(8), + @sendArpReplyNum u64 align(8), + @mirrorPktLen u32 align(4), + @hostcarEnable u32 align(4), + @evpnFlowAggPeriod u32 align(4), + @srv6SrcAddr <[4] u32> align(4), + @srv6PathMtu u16 align(2), + @srv6ReservedMtu u16 align(2), + @srv6ActiveMtu u16 align(2), + @resv u16 align(2), + @fwdifNull0 u32 align(4), + @fwdifLo u32 align(4)}> +type $HpfAclIpv4Key +type $HpfAclL2Key , + @srcMac <[6] u8>, + @ethType u16 align(2), + @outerVlan u16 align(2), + @innerVlan u16 align(2), + @doubleTag u8, + @valid u8, + @encapType u32 align(4)}> +type $HpeAclStatData align(4), + @priorityList <* <$HpeAclPriorityNode>> align(8), + @stat <[1] <$unnamed.6131>> align(8)}> +type $HpfMbuftrCarExtNode align(4), + @sampleArray <* <$HpfMbuftrCarExtSampleNode>> align(8)}> +type $HpfMbuftrCarExtSpec +type $HpfFrag4CopyInfo , + @frag4_pkt <* <$hpe_mbuf> align(64)> align(8), + @offset u16 align(2), + @frag4_fifo u8}> +type $HpfFrag4Cfg +type $HpfFrag6CopyInfo , + @dst_ip <$HpfIn6Addr>, + @pkt_id u32 align(4), + @vlan_id u16 align(2), + @src_port u16 align(2), + @dst_port u16 align(2), + @frag6_pkt <* <$hpe_mbuf> align(64)> align(8), + @frag6_fifo u8, + @offset u16 align(2)}> +type $HpfFrag6Cfg +type $HpfInAddr +type $HpfArpKey }> +type $HpfNdKey }> +type $HpfTracePktStat +type $HpfIbcHead , + @mbuftrMod u32 align(4), + @unnamed.5971 <$unnamed.5970> implicit align(4)}> +type $WatchPointInfo +type $LcoreWatch > align(8)}> +type $HpfCmdMsgHead +type $HpeCli align(8), + @len u32 align(4), + @maxLen u32 align(4)}> +type $tagHpeDiagFunc align(8), + @fCmdhelp <* >,<* i8>,u32,i32,<* <* i8>>) void>> align(8), + @cmdNum u32 align(4), + @cmds <* <$tagHpeDiagCmdInfo>> align(8)}> +type $tagHpeDiagCmdInfo align(8), + @cb <* >,<* i8>,u32,i32,<* <* i8>>) void>> align(8), + @param_num u32 align(4), + @params <[10] i32> align(4)}> +type $HpfFlowPrivate align(4), + @packets u32 align(4), + @bytes u64 align(8), + @fwdKey <$HpfFwdKey> align(8), + @ext u64 align(8), + @fwdParam <$unnamed.5007> align(8), + @trafficMerge :48 u64 align(8), + @lastPps :16 u64 align(8)}> +type $HpfFlowPublic align(4), + @srcMac <[6] u8>, + @dstMac <[6] u8>, + @ethType u16 align(2), + @bridgeId u16 align(2), + @unnamed.4615 <$unnamed.4616> implicit align(4), + @unnamed.4621 <$unnamed.4622> implicit align(4), + @srcPort u16 align(2), + @dstPort u16 align(2), + @obvVrfIndex u16 align(2), + @revVrfIndex u16 align(2), + @protocol u8, + @varCursor u8, + @outZone u16 align(2), + @securityPolicyId :18 u32 align(4), + @slb :1 u32 align(4), + @icmpEchoreplyFast :1 u32 align(4), + @vlan :12 u32 align(4), + @ttl u32 align(4), + @hitTime u32 align(4), + @createTime u32 align(4), + @umNodeIndex :20 u32 align(4), + @onlineIpChecked :1 u32 align(4), + @onlineAppChecked :1 u32 align(4), + @migrateFlag :1 u32 align(4), + @migrateDoneFlag :1 u32 align(4), + @pingProxyFlag :1 u32 align(4), + @isRedirectPerPkt :1 u32 align(4), + @isL2 :1 u32 align(4), + @reservedBit :5 u32 align(4), + @unnamed.5225 <$unnamed.5226> implicit, + @appId u8, + @appIdExt u16 align(2), + @obvHashIndex :11 u32 align(4), + @revHashIndex :11 u32 align(4), + @tcpProxyStep :2 u32 align(4), + @varSize :8 u32 align(4), + @recvPort u32 align(4), + @natNext u64 align(8), + @inZone u16 align(2), + @backTime u16 align(2), + @varOffset <$var_offset_s>, + @resverd <[3] u32> align(4)}> +type $HpfFlowNatInfo +type $var_offset_s implicit, + @misc u8}> +type $unnamed.4616 , + @srcIp u32 align(4)}> +type $unnamed.4618 }> +type $unnamed.4622 , + @dstIp u32 align(4)}> +type $unnamed.4624 }> +type $HpfFlowSplitInfo +type $HpfFlowPrivateExt align(4), + @unnamed.5639 <$unnamed.5640> implicit align(2), + @parentProtocol u8, + @resvered u8, + @parentPortParam <$unnamed.5346> align(4), + @unnamed.5339 <$unnamed.5340> implicit align(4), + @agileSrcGroup u16 align(2), + @agileDstGroup u16 align(2), + @quotaProfileId u16 align(2), + @gtpRefreshId :14 u16 align(2), + @agileSrcUser :1 u16 align(2), + @agileDstUser :1 u16 align(2), + @stickySession :1 u32 align(4), + @stickyHitted :3 u32 align(4), + @smartRtType :4 u32 align(4), + @pbrPolicyId :24 u32 align(4), + @imLogFlag :18 u32 align(4), + @tcpHasPkt :1 u32 align(4), + @resvered1 :13 u32 align(4), + @tcpExceptSeq u32 align(4), + @maxDelayMicSec u32 align(4), + @avgDelayMicSec u32 align(4)}> +type $HpfFlowMiscInfo align(4), + @slbPersisAlgo u16 align(2), + @slbVserverId u16 align(2), + @reserved <[2] u64> align(8)}> +type $HpfFlowTcpInfo +type $HpfFlowShowStatRetMsg align(8)}> +type $tagALARM_COMM_ATTR_S > align(8), + @reasonNum u32 align(4), + @carrierObjIndex u32 align(4), + @res u32 align(4)}> +type $tagPacketType align(64)>,<* <$tagPacketType>>) i32>> align(8), + @privinfo <* void> align(8), + @next <* <$tagPacketType>> align(8)}> +type $unnamed.5007 , + @sswSvcParam <$unnamed.5365> align(4), + @npFlowAdpt <$unnamed.5008> align(2), + @perPacketNhp <$unnamed.5709> align(2)}> +type $unnamed.5008 +type $HpfFwdKey align(4), + @outputParam u64 align(8)}> +type $unnamed.5009 +type $unnamed.5010 }> +type $HpfFlowProbeInfo +type $unnamed.5028 +type $HpfFlowDsliteInfo , + @toCpe <$HpfIn6Addr>, + @fromCpeTunnel u16 align(2), + @toCpeTunnel u16 align(2), + @obvCpeNexthop6 <$HpfIn6Addr>, + @revCpeNexthop6 <$HpfIn6Addr>, + @sessLimit :1 u16 align(2), + @_rvs7 :15 u16 align(2), + @_rvs16 u16 align(2)}> +type $HpfFlowNat64Info , + @v6DstIp <$HpfIn6Addr>, + @v6SrcPort u16 align(2), + @v6DstPort u16 align(2), + @patType u8, + @resv1 u8, + @resv2 u16 align(2)}> +type $HpfFlowDdosInfo }> +type $unnamed.5226 +type $HpfFlowSctpInfo +type $HpfFlowUdpInfo +type $unnamed.5325 align(2), + @ipsecParam <$unnamed.6145> align(4), + @mplsParam <$unnamed.5670> align(4)}> +type $unnamed.5330 +type $unnamed.5340 implicit align(4), + @unnamed.5454 <$unnamed.5455> implicit align(4)}> +type $unnamed.5342 +type $unnamed.5346 align(2)}> +type $unnamed.5347 +type $unnamed.5359 +type $unnamed.5364 align(2), + @vlanOutPort u32 align(4)}> +type $unnamed.5365 +type $HpfFlowNgeInfo align(2), + @decodeId u16 align(2), + @urlcateId u16 align(2)}> +type $unnamed.5445 align(2)}> +type $unnamed.5446 +type $unnamed.5455 implicit align(4)}> +type $unnamed.5457 +type $unnamed.5547 , + @xffIp u32 align(4), + @umUpdateFlowIndex u16 align(2)}> +type $tag_portal +type $unnamed.5640 +type $HpfFwdHookFlow , + @fwd_hook_flow_aging <[19] u8>, + @fwd_hook_flow_scan <[21] u8>}> +type $unnamed.5670 +type $unnamed.5709 }> +type $HpfIcmpErrCfgMsg +type $HpfEthHdr , + @smac <[6] u8>, + @ethType u16 align(2)}> +type $DrvLinkCb align(64)>>,i32) void>> align(8)}> +type $HpfFunDeployPos > align(8), + @posBitmap u64 align(8)}> +type $HpeModTag align(8), + @regNext <* <$HpeModTag>> align(8), + @name <* i8> align(8), + @params <* i8> align(8), + @initEntry <* > align(8), + @exitEntry <* > align(8), + @localInitEntry <* > align(8), + @localExitEntry <* > align(8), + @deploy u32 align(4), + @where <[1] u64> align(8), + @modId u32 align(4), + @isPlugin u32 align(4), + @initialized u32 align(4), + @pluginHandle <* void> align(8), + @pluginInitStage2 <* > align(8), + @regPri u32 align(4), + @dumpCb <* ,u32,u32) i32>> align(8)}> +type $max_align_t +type $unnamed.5844 +type $HpeHeapMng align(4), + @isCreate u32 align(4), + @nodeList <$HPE_DLL> align(8), + @freeSize u64 align(8)}> +type $tagHpeMemZonelCluster align(4), + @isCreate u32 align(4), + @zoneMaxNum u32 align(4), + @virtStartAddr <* void> align(8), + @freeSize u64 align(8), + @nodeList <$HPE_DLL> align(8), + @HpeMemzonePool <[1] <$tagHpeMemzone> incomplete_array> align(8)}> +type $unnamed.5845 align(4), + @__vi <[14] i32> volatile align(4), + @__s <[7] u64> align(8)}> +type $unnamed.5846 align(4), + @__vi <[10] i32> volatile align(4), + @__p <[5] <* void volatile>> volatile align(8)}> +type $unnamed.5847 align(4), + @__vi <[12] i32> volatile align(4), + @__p <[6] <* void>> align(8)}> +type $unnamed.5848 align(4), + @__vi <[14] i32> volatile align(4), + @__p <[7] <* void>> align(8)}> +type $unnamed.5849 align(4), + @__vi <[8] i32> volatile align(4), + @__p <[4] <* void>> align(8)}> +type $HpePipeBlock align(4), + @name <* i8> align(8), + @flg u32 align(4), + @werrn u32 align(4), + @rerrn u32 align(4)}> +type $tagHpeEventCallback align(8), + @flags u16 align(2), + @priority u8, + @pad u8, + @lastSeqno u32 align(4), + @func <$tagHpeEventCbFunc> align(8)}> +type $unnamed.5850 > align(8), + @tqe_prev <* <* <$tagHpeEventCallback>>> align(8)}> +type $HpeEventCallbackList > align(8), + @tqh_last <* <* <$tagHpeEventCallback>>> align(8)}> +type $tagHpeEventOps +type $unnamed.5851 +type $HpeMempoolMemChunk align(8), + @unnamed.5854 <$unnamed.5853> implicit align(8), + @len u64 align(8), + @reserve u32 align(4)}> +type $unnamed.5853 +type $unnamed.5855 +type $HpeMempoolObjhdr align(8), + @mp <* <$tagHpeMempool> align(64)> align(8), + @unnamed.5858 <$unnamed.5857> implicit align(8)}> +type $unnamed.5857 +type $HPE_MEMPOOL_MEM align(8), + @gpa u64 align(8), + @hpa u64 align(8), + @totalSize u32 align(4), + @freePos u32 align(4), + @mplock <$HpeSpinlock> align(4)}> +type $HPE_MEMPOOL_USER , + @size u64 align(8)}> +type $unnamed.5859 align(4)}> +type $unnamed.5860 align(4), + @txadapter <$unnamed.5866> align(4), + @usr u32 align(4)}> +type $unnamed.5861 implicit align(4), + @hi u32 align(4)}> +type $unnamed.5862 implicit align(2), + @lo u32 align(4)}> +type $unnamed.5863 +type $unnamed.5866 +type $unnamed.5867 +type $unnamed.5868 implicit align(8)}> +type $unnamed.5869 +type $unnamed.5874 +type $unnamed.5875 +type $tagHpeHugePageInfo , + @hugepageSize u32 align(4)}> +type $tagHpeProcMeminfo , + @virtualMem u64 align(8), + @phyMem u64 align(8), + @shmMem u64 align(8), + @cache u64 align(8), + @buffer u64 align(8), + @memAvial u64 align(8), + @pid u32 align(4)}> +type $HpeMbufControl +type $HpeLfhashTimerStat +type $flock +type $f_owner_ex +type $HpfEthHdrTail , + @type u16 align(2)}> +type $HpfEthHdrSap +type $HpfEthVlanHdr , + @smac <[6] u8>, + @ethType u16 align(2), + @vlanId u16 align(2), + @type u16 align(2)}> +type $HpfVlanBaseInfo +type $HpfVlanHdr +type $Hpf802Hdr +type $HpfEthSnapHdr , + @smac <[6] u8>, + @len u16 align(2), + @dsap u8, + @ssap u8, + @ctrl u8, + @ori <[3] u8>, + @type u16 align(2)}> +type $HpfEthLlcHdr , + @type u16 align(2)}> +type $HpfArpHdr +type $HpfEthArpHdr , + @arpSrcMacAddr <[6] u8>, + @arpSrcProAddr <[4] u8>, + @arpDstMacAddr <[6] u8>, + @arpDstProAddr <[4] u8>}> +type $HpfEthDot1qHdr , + @srcAddr <[6] u8>, + @tpID u16 align(2), + @TCI u16 align(2), + @typeLen u16 align(2)}> +type $HpfDotqHdr +type $HpfEthQinqHdr +type $HpfEthLswTag +type $HpfEthLswHdr , + @smac <[6] u8>, + @lswTag <$HpfEthLswTag>}> +type $HpfIcmp6Hdr }> +type $unnamed.5876 align(4), + @data16 <[2] u16> align(2), + @data8 <[4] u8>}> +type $HpfIcmp6NaHdr align(4), + @opType u8, + @length u8, + @dstMac <[6] u8>}> +type $HpfIcmp6NsHdr align(4), + @opType u8, + @length u8, + @srcMac <[6] u8>}> +type $unnamed.5877 , + @u6_usaddr <[8] u16> align(2), + @u6_uladdr <[4] u32> align(4), + @u6_ulladdr <[2] u64> align(8)}> +type $HpfIn46Addr , + @uaddr <$unnamed.5878>}> +type $unnamed.5878 , + @st4Addr <$HpfInAddr>}> +type $HpfIp6Ext +type $HpfIp6Hbh +type $HpfIp6Route +type $HpfIp6SrhHdr >}> +type $HpfIp6Dest +type $HpfIp6FragHdr +type $HpfIp6AhHdr +type $HpfNdNsPkt , + @stNSTarget <$HpfIn6Addr>}> +type $HpfNdOptLla }> +type $HpfNdRsPkt }> +type $HpfNdNaPkt , + @stNATarget <$HpfIn6Addr>}> +type $unnamed.5879 +type $HpfUdpHdr +type $HpfDnsHdr +type $HpfDhcpCHdrS , + @yiaddr <$HpfInAddr>, + @siaddr <$HpfInAddr>, + @giaddr <$HpfInAddr>, + @chaddr <[16] u8>, + @sname <[64] i8>, + @file <[128] i8>, + @options <[1200] i8>}> +type $HpfIcmpHdr , + @icmpDun <$unnamed.5883>}> +type $unnamed.5880 , + @ihIdSeq <$unnamed.5881>, + @ihNVoid i32 align(4), + @ihPMtu <$unnamed.5882>}> +type $unnamed.5881 +type $unnamed.5882 +type $unnamed.5883 , + @idIp <$unnamed.5885>, + @idMask u32 align(4), + @idDataA <[1] i8>}> +type $unnamed.5884 +type $unnamed.5885 }> +type $HpeTmwjob ) u32>> align(8), + @pParam <* void> align(8), + @tmw u8, + @bkt u8, + @loop :1 u16 align(2), + @rsv :15 u16 align(2), + @timerid u32 align(4), + @skipts u32 align(4), + @seq u32 align(4)}> +type $HpeTmwbaseStruct +type $HpeTmwInstance , + @jobId u32 align(4)}> +type $HpfNatParam implicit align(4)}> +type $unnamed.5886 align(4)}> +type $unnamed.5887 +type $HpfNat6Param +type $HpfNatParamRet +type $HpfNat6ParamRet align(4), + @newDip <[4] u32> align(4), + @newSport u16 align(2), + @newDport u16 align(2)}> +type $HpfNatSuccessParam +type $HpfNatAgingParam +type $unnamed.5889 implicit align(2), + @unnamed.5893 <$unnamed.5891> implicit align(2)}> +type $unnamed.5890 +type $unnamed.5891 +type $HpfSAclActionCarInfo align(4), + @cir i64 align(8), + @pbs i64 align(8), + @cbs i64 align(8), + @pir i64 align(8), + @ts i64 align(8), + @tc i64 align(8), + @tpte i64 align(8)}> +type $HpfCarColorAction +type $HpfSAclActionCarAction >}> +type $HpfSAclActionCarCoreInfo +type $HpfSAclActionCarStat align(4), + @bytes u64 align(8), + @pkts u64 align(8), + @unnamed.5899 <$unnamed.5895> implicit align(8), + @unnamed.5904 <$unnamed.5900> implicit align(8)}> +type $unnamed.5895 implicit align(8), + @color <[3] <$unnamed.5897>> align(8)}> +type $unnamed.5896 +type $unnamed.5897 +type $unnamed.5900 implicit align(8), + @access <[2] <$unnamed.5902>> align(8)}> +type $unnamed.5901 +type $unnamed.5902 +type $HpfSAclActionCar align(8), + @carAction <$HpfSAclActionCarAction>, + @carStat <[1] <$HpfSAclActionCarStat> align(64)> align(64)}> +type $HpfSAclActionRemark +type $HpfSAclActionMirror +type $HpfSAclActionKey +type $HpfSAclActionRedirect +type $HpfSAclActionData align(4), + @car <$HpfSAclActionCar> align(64), + @remark <$HpfSAclActionRemark>, + @mirror <$HpfSAclActionMirror> align(2)}> +type $HpfSAclActionTblEntry align(4), + @stData <$HpfSAclActionData> align(64), + @version u32 align(4), + @resv u32 align(4)}> +type $HpfSAclSvc +type $HpfSAclGroupid +type $HpfSAclActionApply align(4), + @egrSvc <$HpfSAclSvc> align(4), + @gid <$HpfSAclGroupid> align(4), + @ingAction <$HpfSAclActionData> align(64), + @egrAction <$HpfSAclActionData> align(64)}> +type $HpfAclIpv6Key align(4), + @dstIpv6Addr <[4] u32> align(4), + @srcPort u16 align(2), + @dstPort u16 align(2), + @vrfIndex u16 align(2), + @protocol u8, + @tos u8, + @tcpFlag u8, + @icmpType u8, + @icmpCode u8, + @valid u8, + @fragFlag u32 align(4)}> +type $HpfAclAction +type $HpfVlanKey +type $HpfVlanData implicit align(2), + @macLimitCycle u32 align(4), + @bcMid u16 align(2), + @statId u16 align(2), + @macLimitNum u16 align(2), + @curMacNum u16 align(2), + @version u32 align(4)}> +type $unnamed.5905 implicit align(2)}> +type $unnamed.5906 +type $HpfVlanTbl align(2), + @data <$HpfVlanData> align(4)}> +type $VlanBitmap align(4)}> +type $VlanUntagBitmap align(4)}> +type $HpfBridgeCfg , + @macAgeTime u32 align(4), + @macLimitNum u32 align(4), + @curMacNum u32 align(4)}> +type $HostEthHdr , + @smac <[6] u8>, + @ethType u16 align(2)}> +type $HostVlanBaseInfo +type $tagTnlExtInfo align(4)}> +type $unnamed.5909 align(4)}> +type $unnamed.5910 +type $tagMbufTunnelIntf , + @uiTunnelID u32 align(4), + @stTnlExtInfo <$tagTnlExtInfo> align(4)}> +type $tagMbufEvpnVlanIntf }> +type $tagMBufLinkBDIntf align(4)}> +type $unnamed.5911 align(4), + @stPw <$unnamed.5913> align(4)}> +type $unnamed.5912 align(2)}> +type $unnamed.5913 +type $HostMbufExtVxlanLbInfo , + @srcPort u16 align(2), + @dstMac <[6] u8>, + @dstPort u16 align(2), + @protocolId u8, + @upFlag u8, + @ttl u8, + @tos u8}> +type $HostMbufExtVxlanInfo align(4)}> +type $HostMbufTunnelInfo , + @tunnelID u32 align(4), + @extTnlFlag u32 align(4), + @unExtInfo <$unnamed.5914> align(4)}> +type $unnamed.5914 align(4)}> +type $unnamed.5915 +type $HostMbufFwdVxlanInfo +type $HostMbufSndSockKey +type $HostMbufVxlanInfo align(4), + @extVxlanInfo <$HostMbufExtVxlanInfo> align(4), + @fwdVxlanInfo <$HostMbufFwdVxlanInfo> align(2)}> +type $HostIngressIntf implicit align(4), + @vr u32 align(4), + @vrfId u32 align(4), + @recvIfIndex u32 align(4), + @recvPhyIfIndex u32 align(4), + @bridgeId u32 align(4), + @appProto u8, + @protoType u8, + @cutHdrLen u8, + @bsFrmPeerLink :1 u8, + @rsv2 :3 u8, + @bridgeType :4 u8, + @reasonCode u16 align(2), + @subReasonCode u16 align(2), + @causeId u16 align(2), + @svp u16 align(2), + @srcMac <[6] u8>, + @dstMac <[6] u8>, + @vlanBaseInfo <[2] <$HpfVlanBaseInfo>>, + @oeIfIndex u32 align(4), + @sysTimeMs u32 align(4), + @tunnelIntf <$tagMbufTunnelIntf> align(4), + @evpnVlanIntf <$tagMbufEvpnVlanIntf> align(4), + @bdIntf <$tagMBufLinkBDIntf> align(4), + @unnamed.5921 <$unnamed.5918> implicit align(4), + @sndSockKey <$HostMbufSndSockKey> align(4), + @workIfIndex u32 align(4), + @sendIfIndex u32 align(4), + @nodeId u32 align(4), + @rsv3 u32 align(4), + @rsv4 u32 align(4)}> +type $unnamed.5916 align(4), + @ingressBitSetValue u32 align(4)}> +type $unnamed.5917 +type $unnamed.5918 align(4), + @ingressExBitSetValue u32 align(4)}> +type $unnamed.5919 +type $HostEgressIntf align(4), + @nextHop <[4] u32> align(4), + @unnamed.5924 <$unnamed.5922> implicit align(4), + @unnamed.5927 <$unnamed.5925> implicit align(2), + @oePriority u8, + @protoType u8, + @srcMac <[6] u8>, + @dstMac <[6] u8>, + @fwdPktType u8, + @tunnelType u8, + @oeIfIndex u32 align(4), + @tpId u16 align(2), + @pruneVlan u16 align(2), + @ovIfIndex u32 align(4), + @vlanBaseInfo <[2] <$HostVlanBaseInfo>>, + @pruneIf u32 align(4), + @unnamed.5929 <$unnamed.5928> implicit align(4), + @bdId u32 align(4), + @unnamed.5932 <$unnamed.5930> implicit align(4), + @tunnelID u32 align(4), + @srcAddr <[4] u32> align(4), + @unnamed.5934 <$unnamed.5933> implicit align(8), + @extVxlanInfo <$HostMbufExtVxlanInfo> align(4), + @fwdVxlanInfo <$HostMbufFwdVxlanInfo> align(2), + @sndSockKey <$HostMbufSndSockKey> align(4), + @l2PortListId u16 align(2), + @svp u16 align(2), + @rsv1 u32 align(4), + @rsv2 u32 align(4)}> +type $unnamed.5922 align(4), + @egressBitSetValue u32 align(4)}> +type $unnamed.5923 +type $unnamed.5925 align(2), + @linkStackBitSetValue u16 align(2)}> +type $unnamed.5926 +type $unnamed.5928 +type $unnamed.5930 align(4), + @egressExBitSetValue u32 align(4)}> +type $unnamed.5931 +type $unnamed.5933 align(4), + @tcpPcb <* void> align(8)}> +type $HpfIbcLpu2Mpu , + @ingressIntf <$HostIngressIntf> align(4)}> +type $HpfFeTxInfo align(4), + @extFlag u32 align(4), + @TlvTxLen u32 align(4)}> +type $unnamed.5935 align(4), + @L2UC <$unnamed.5941> align(4), + @L2MC <$unnamed.5945> align(4), + @stL3MC <$unnamed.5953> align(4), + @SoftFwd <$unnamed.5961> align(2), + @P2MPMC <$unnamed.5965> align(4), + @Flow <$unnamed.5966> align(4), + @FORWARDL3IPSec <$unnamed.5967> align(4), + @AtmL2UC <$unnamed.5968> align(4)}> +type $unnamed.5936 align(4)}> +type $unnamed.5937 align(4), + @L2BDUCPhyIf <$unnamed.5939> align(4), + @L2BDUCTrunkIf <$unnamed.5940> align(2)}> +type $unnamed.5938 align(2)}> +type $unnamed.5939 +type $unnamed.5940 +type $unnamed.5941 , + @UCPrune <$unnamed.5942> align(4)}> +type $unnamed.5942 align(4), + @UCTrunkIf <$unnamed.5944> align(2)}> +type $unnamed.5943 +type $unnamed.5944 +type $unnamed.5945 align(4)}> +type $unnamed.5946 align(4), + @Vxlan <$unnamed.5947> align(4), + @TrunkIf <$unnamed.5948> align(2), + @SubIf <$unnamed.5949> align(4), + @TrunkSubIf <$unnamed.5950> align(2), + @Label <$unnamed.5951> align(4), + @FlowId <$unnamed.5952> align(4)}> +type $unnamed.5947 +type $unnamed.5948 +type $unnamed.5949 +type $unnamed.5950 +type $unnamed.5951 +type $unnamed.5952 +type $unnamed.5953 , + @L3MCPrune <$unnamed.5954> align(4)}> +type $unnamed.5954 align(4), + @L3MCTrunkIf <$unnamed.5956> align(2), + @L3MCVlanIf <$unnamed.5957> align(4)}> +type $unnamed.5955 +type $unnamed.5956 +type $unnamed.5957 align(4)}> +type $unnamed.5958 align(4), + @L3MCPruneTrunkIf <$unnamed.5960> align(2)}> +type $unnamed.5959 +type $unnamed.5960 +type $unnamed.5961 align(2)}> +type $unnamed.5962 > align(2), + @dot1qInfo <$unnamed.5964> align(2)}> +type $unnamed.5963 +type $unnamed.5964 +type $unnamed.5965 +type $unnamed.5966 +type $unnamed.5967 +type $unnamed.5968 +type $HpfIbcSpu2Mpu align(4)}> +type $unnamed.5969 align(4)}> +type $HpfIbcMpu2Spu align(4), + @oobData u32 align(4)}> +type $HpfHostPktEgressInfo +type $HpfIbcMpu2Lpu , + @egressInfo <$HpfHostPktEgressInfo> align(4)}> +type $HpfIbcTraceFlag +type $unnamed.5970 align(4), + @mpu2lpu <$HpfIbcMpu2Lpu> align(4), + @spu2mpu <$HpfIbcSpu2Mpu> align(4), + @mpu2spu <$HpfIbcMpu2Spu> align(4)}> +type $HpfMbufEspNatInfo +type $HpfMbufNgeInfo align(8), + @ullNgeSessIndex u64 align(8), + @uiDpSessVer u32 align(4), + @uiTTL u32 align(4), + @uiPolicyRuleID u32 align(4), + @uiAuditRuleID u32 align(4), + @iTcpProxyS3 i32 align(4), + @sNatAlgDelta i16 align(2), + @usAppTrace u16 align(2), + @usAppID u16 align(2), + @usDecoderID u16 align(2), + @ulStreamDirection :1 u32 align(4), + @bIsIPv6 :1 u32 align(4), + @bisTcpProxySend :1 u32 align(4), + @bisNatALG :1 u32 align(4), + @bisNatDest :1 u32 align(4), + @bisNatSrc :1 u32 align(4), + @bisResetPkt :1 u32 align(4), + @bRefreshFwdInfo :1 u32 align(4), + @bCapture :1 u32 align(4), + @bReassPayload :1 u32 align(4), + @bPayloadMalloc :1 u32 align(4), + @bIsCacheCpy :1 u32 align(4), + @bIsInNge :1 u32 align(4), + @bisChangedByIPS :1 u32 align(4), + @bMailBypass :1 u32 align(4), + @bIsIdentifiedByPM :1 u32 align(4), + @bIsNeedSA :1 u32 align(4), + @bIsNeedNGE :1 u32 align(4), + @bIsNeedRepolicy :1 u32 align(4), + @bIsFstPkt :1 u32 align(4), + @bFWisBlacklistEnable :1 u32 align(4), + @bIsDpDistrBypassed :1 u32 align(4), + @bIsDpDistrBlocked :1 u32 align(4), + @bIsHTTPSProxy :1 u32 align(4), + @bBypassDetect :1 u32 align(4), + @bIsSingleDetect :1 u32 align(4), + @uiNoNeedContinue :1 u32 align(4), + @uiResultChange :1 u32 align(4), + @uiSAFlag :2 u32 align(4), + @bIsSSL :1 u32 align(4), + @bIsPassive :1 u32 align(4), + @uTraceDebugTblId u16 align(2), + @uTraceNGEStepRst u16 align(2), + @bIsLwhttp :1 u32 align(4), + @bIsTunnelMode :1 u32 align(4), + @bIsPassThroughMode :1 u32 align(4), + @bIsNgeCreate :1 u32 align(4), + @bMailDetectBypass :1 u32 align(4), + @bIsTcpProxy :1 u32 align(4), + @bIsNgeSocket :1 u32 align(4), + @bIsReferUrlCat :1 u32 align(4), + @bIsIcmpUnreach :1 u32 align(4), + @bIsIdcDetect :1 u32 align(4), + @bIsNohanshake :1 u32 align(4), + @bIsUdpRebound :1 u32 align(4), + @bIsNgeMetadata :1 u32 align(4), + @bIsMbufTracer :1 u32 align(4), + @bIsMbufTracerId :4 u32 align(4), + @bIsMbufTracerDir :1 u32 align(4), + @bIsMbufTracerlage :1 u32 align(4), + @bIsUmPortal :1 u32 align(4), + @bIsMailProxy :1 u32 align(4), + @uiReserve :10 u32 align(4), + @uiParentSrcPort u16 align(2), + @uiParentDstPort u16 align(2), + @uiParentProtocol u8, + @uiReserve2 <[3] u8>}> +type $HpfHostMbufRxVlanInfo +type $HpfHostMbufRxInfo align(4), + @tbtp u32 align(4), + @dst <$unnamed.5974> align(4), + @flag u16 align(2), + @productInfo u16 align(2), + @vrfIndex u32 align(4), + @vrIndex u32 align(4), + @extFlag u32 align(4), + @tlvRxLen u32 align(4), + @mtu u32 align(4)}> +type $unnamed.5972 > align(2), + @atmPvcInfo <$unnamed.5973> align(2)}> +type $unnamed.5973 +type $unnamed.5974 > align(2)}> +type $unnamed.5975 align(4), + @mbufRxInfo <$HpfHostMbufRxInfo> align(4)}> +type $unnamed.5976 , + @sndSockKey <$HostMbufSndSockKey> align(4), + @unnamed.5978 <$unnamed.5977> implicit align(4)}> +type $unnamed.5977 +type $unnamed.5979 align(4), + @nexthop6 <$HpfIn6Addr>, + @to_cpe_ip <$HpfIn6Addr>, + @nat64_v6_dstip <$HpfIn6Addr>}> +type $unnamed.5980 +type $unnamed.5981 align(4), + @nsh_s <$unnamed.5983> align(4)}> +type $unnamed.5982 +type $unnamed.5983 +type $unnamed.5984 , + @unnamed.5987 <$unnamed.5985> implicit align(2), + @dhcp_s <$unnamed.5986> align(4), + @trunk_mem_outport_index u32 align(4)}> +type $unnamed.5985 align(2)}> +type $unnamed.5986 , + @usRes u16 align(2), + @ulPortPhyIndex u32 align(4)}> +type $unnamed.5988 align(2), + @esp_nat_info <$HpfMbufEspNatInfo> align(4)}> +type $unnamed.5989 , + @dst_ip <$HpfIn6Addr>, + @hbh_offset u16 align(2), + @dest_offset u16 align(2), + @route_offset u16 align(2), + @frag_offset u16 align(2)}> +type $unnamed.5990 +type $unnamed.5991 align(8), + @ipv4RedirectAddr u32 align(4)}> +type $unnamed.5992 }> +type $unnamed.5993 +type $unnamed.5994 +type $float32x2x2_t align(8)}> +type $SswMacKey , + @rsv u16 align(2)}> +type $SswMacData implicit, + @rsv1 <[3] u8>, + @flowBitMap <[8] u32> align(4), + @outIfIndex u32 align(4), + @updateTime u32 align(4), + @macIndex u32 align(4), + @lastCycle u64 align(8), + @lock <$HpeRwlock> align(4)}> +type $unnamed.6004 implicit}> +type $unnamed.6005 +type $SswMacIndex align(2), + @macIndex u32 align(4)}> +type $SswMacHash align(2), + @data <$SswMacData> align(8)}> +type $SswBcElbKey +type $SswBcElbData +type $SswBcElbHash align(2), + @unnamed.6009 <$unnamed.6008> implicit align(4)}> +type $unnamed.6008 , + @elbData <$SswBcElbData> align(4)}> +type $SswBcDupPara align(2), + @dupNum u32 align(4), + @inFlowIfIdx u32 align(4), + @mbuf <* <$hpe_mbuf> align(64)> align(8)}> +type $SswVlanIfBcDupPara align(2), + @dupNum u32 align(4), + @inFlowIfIdx u32 align(4), + @dupTb <[4] u32> align(4), + @dupBitMap <[4] u32> align(4), + @outFlowIfIdx <[4] u32> align(4), + @mbuf <* <$hpe_mbuf> align(64)> align(8)}> +type $BlackHoleMacKey , + @rsv u16 align(2), + @brId u32 align(4)}> +type $BlackHoleMacData +type $BlackHoleMacHash align(4), + @data <$BlackHoleMacData> align(4)}> +type $SswPortIsolateKey +type $SswPortIsolateData +type $SswPortIsolateHash align(4), + @data <$SswPortIsolateData> align(4)}> +type $EgrFsvc +type $tagCommonInfo +type $tagPhyInfo +type $tagLinkInfo align(2), + @ac2pw <$unnamed.6065> align(2), + @linkInfo <$unnamed.6066> align(4), + @celnInfo <$unnamed.6070> align(4), + @macInfo <$unnamed.6071> align(4)}> +type $tagIpv4Info +type $tagIpv6Info , + @linkLocalAddr <$HpfIn6Addr>}> +type $tagMplsInfo }> +type $tagIngL1Fsvc +type $tagIngL3Fsvc , + @rsv6 :6 u8, + @RxL3svcTop u8, + @RxL3svcSet <[16] <$IngFsvc>> align(4)}> +type $tagEgrL3Fsvc }> +type $ArpfIfCfg +type $tagEgrL1Fsvc +type $tagSrvInfo align(2), + @ingL2AclFilterGid u32 align(4), + @ingL4AclFilterGid u32 align(4), + @ingL2AclStatGid u32 align(4), + @ingL4AclStatGid u32 align(4), + @ingL2AclRedirectGid u32 align(4), + @ingL4AclRedirectGid u32 align(4), + @ingL2AclRemarkGid u32 align(4), + @ingL4AclRemarkGid u32 align(4), + @ingL2AclCarGid u32 align(4), + @ingL4AclCarGid u32 align(4), + @ingL2AclMirrorGid u32 align(4), + @ingL4AclMirrorGid u32 align(4), + @ingIprlGid u16 align(2), + @ingIpv4UprfGid u16 align(2), + @ingIpv6UrpfGid u16 align(2), + @resrv16 u16 align(2), + @egrL2AclGid u16 align(2), + @egrL4AclGid <[3] u16> align(2), + @egrL2AclFilterGid u32 align(4), + @egrL4AclFilterGid u32 align(4), + @egrL2AclStatGid u32 align(4), + @egrL4AclStatGid u32 align(4), + @egrL2AclRemarkGid u32 align(4), + @egrL4AclRemarkGid u32 align(4), + @egrL2AclCarGid u32 align(4), + @egrL4AclCarGid u32 align(4), + @egrL2AclMirrorGid u32 align(4), + @egrL4AclMirrorGid u32 align(4), + @egrIplrGid u16 align(2), + @snatAclGid u16 align(2), + @dnatAclGid u16 align(2), + @gdoiGid u16 align(2), + @ipsecFlowGid u16 align(2), + @ipsecFlowexGid u16 align(2), + @ipsecGid u16 align(2), + @resrv2_16 u16 align(2), + @ingCarid u32 align(4), + @egrCarid u32 align(4), + @observeId u32 align(4)}> +type $tagTunnelInfo +type $tagNsInfo +type $FwdIfStat +type $FwdIfMultiCoreStat align(64)> align(64), + @ifStatistic <$FwdIfStatAligned> align(64)}> +type $tagEgrInfo align(64)>,<* <$tagContext>>,<* u8>,<* <$tagEgrInfo>>) u32>> align(8), + @TxPvcFwdif <* <$tagFwdIf>> align(8), + @unnamed.6011 <$unnamed.6010> implicit align(8), + @l2aclKey <$PfaAclL2Key> align(2), + @encap <[72] u8>, + @TxFwdif <[6] <* <$tagFwdIf>>> align(8), + @TxL3svcSet <[8] <$EgrFsvc>> align(4), + @TxL1svcSet <[8] <$EgrFsvc>> align(4), + @TxLinksvcSet <[8] <$EgrFsvc>> align(4), + @vpn u16 align(2), + @fwdif u32 align(4)}> +type $tagContext +type $unnamed.6010 align(8), + @trunk <* void> align(8)}> +type $PfaAclL2Key , + @smac <[6] u8>, + @ethType u16 align(2), + @outerVlan u16 align(2), + @innerVlan u16 align(2), + @flags <$unnamed.6012>, + @qosGroup u8, + @fwdIfIdx u16 align(2), + @subFwdIfIdx u16 align(2)}> +type $unnamed.6012 implicit}> +type $unnamed.6013 +type $unnamed.6015 implicit}> +type $unnamed.6016 +type $unnamed.6018 implicit align(2)}> +type $unnamed.6019 +type $unnamed.6021 > align(8), + @nhpGrp <* <$HpfNextHopGrpEntry>> align(8)}> +type $HpfNextHopEntry implicit align(2), + @u16Vpn u16 align(2), + @u16LspToken u16 align(2), + @u16NlbID u16 align(2), + @Nexthop <[16] u8>, + @ctrlIfIdx u32 align(4), + @u16Mtu u16 align(2), + @unnamed.6032 <$unnamed.6028> implicit, + @linkLocal :1 u8, + @srv6Enable :1 u8, + @srv6NhpType :3 u8, + @bfdDown :1 u8, + @bgpState :1 u8, + @resv :1 u8, + @version u32 align(4), + @tunnelType u32 align(4), + @tunnelId u32 align(4), + @vni u32 align(4), + @vxlanMac <[6] u8>, + @outVpn u16 align(2), + @fwdIfIndex u32 align(4), + @srv6NextIndex u32 align(4), + @srv6VpnSid <[4] u32> align(4), + @bfdDisc u32 align(4), + @bfdSession u32 align(4), + @egrInfo <$tagEgrInfo> align(8)}> +type $HpfNextHopGrpEntry align(4), + @version u32 align(4), + @nhp <[128] <* <$HpfNextHopEntry>>> align(8)}> +type $Ipv4FwdCache +type $unnamed.6025 implicit align(2)}> +type $unnamed.6026 +type $unnamed.6028 implicit}> +type $unnamed.6029 +type $HpfArpData , + @unLinkInfo <$unnamed.6033> align(2), + @u32Vni u32 align(4), + @u32Dip u32 align(4), + @u32Sip u32 align(4), + @replyTime u32 align(4)}> +type $unnamed.6033 align(2)}> +type $unnamed.6034 +type $HpfArpTblData , + @unLinkInfo <$unnamed.6035> align(2), + @u32Vni u32 align(4), + @u32Dip u32 align(4), + @u32Sip u32 align(4), + @replyTime u32 align(4)}> +type $unnamed.6035 align(2), + @u16PvcIf u16 align(2), + @u16VlanId u16 align(2)}> +type $unnamed.6036 +type $unnamed.6037 implicit align(2)}> +type $unnamed.6038 +type $unnamed.6040 > align(8), + @nhp6Grp <* <$tagHpfNhp6GrpEntry>> align(8)}> +type $tagHpfNhp6Entry implicit, + @srv6Enable :1 u8, + @srv6NhpType :3 u8, + @bfdDown :1 u8, + @bgpState :1 u8, + @resv :2 u8, + @Nexthop <[16] u8>, + @version u32 align(4), + @tunnelType u32 align(4), + @tunnelId u32 align(4), + @vni u32 align(4), + @vxlanMac <[6] u8>, + @outVpn u16 align(2), + @srv6NextIndex u32 align(4), + @srv6VpnSid <[4] u32> align(4), + @bfdDisc u32 align(4), + @bfdSession u32 align(4), + @egrInfo <$tagEgrInfo> align(8)}> +type $tagHpfNhp6GrpEntry align(4), + @version u32 align(4)}> +type $tagIpv6Cache +type $unnamed.6043 implicit}> +type $unnamed.6044 +type $HpfNdData , + @ctrlIfIdx u32 align(4), + @fwdIfIndex u32 align(4), + @l3CtrlIfIdx u32 align(4), + @l3FwdIfIndex u32 align(4), + @u16VrfId u16 align(2), + @u16Resv u16 align(2), + @unLinkInfo <$unnamed.6047> align(2), + @u32Vni u32 align(4), + @u32Sip u32 align(4), + @u32Dip u32 align(4)}> +type $unnamed.6047 align(2)}> +type $unnamed.6048 +type $tagMfib6Key , + @mcip <[16] u8>}> +type $tagMfib6Data +type $tagMfib6Entry align(2), + @data <$tagMfib6Data> align(4)}> +type $tagElb6Node , + @fwdIfIndex u32 align(4), + @vlan u16 align(2), + @rsv16 u16 align(2), + @egrInfo <$tagEgrInfo> align(8)}> +type $tagElb6Key implicit align(8)}> +type $unnamed.6049 , + @elb6Node <$tagElb6Node> align(8)}> +type $HpfPathMtuKey }> +type $HpfPathMtuEntry align(4), + @mtu u32 align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $HpsStreamKey align(2), + @u8Protocol u8, + @u8Rsv u8, + @u16Vpn u16 align(2), + @u32Dip u32 align(4), + @u32Sip u32 align(4)}> +type $unnamed.6051 align(2), + @stIcmp <$unnamed.6053> align(2)}> +type $unnamed.6052 +type $unnamed.6053 +type $HpfPortIpv4Index +type $HpfPortIpv6Index +type $HpfPortIpv4NodeKey implicit align(4)}> +type $unnamed.6054 , + @portIpv4Node <$HpfPortIpv4Node> align(4)}> +type $HpfPortIpv6NodeKey implicit align(4)}> +type $unnamed.6056 , + @portIpv6Node <$HpfPortIpv6Node> align(4)}> +type $HpfPortIpv4TravType align(4)}> +type $HpfPortIpv6TravType , + @prefix u32 align(4), + @ipv6Type <[7] u32> align(4)}> +type $HpfGreTunnelStatisticNode +type $HpfGreTunnelStat align(8), + @tunnelIfLastStat <$HpfGreTunnelStatisticNode> align(8), + @fwmTunnelId u32 align(4), + @resv u32 align(4)}> +type $HpfGreUpTunnelCfg , + @srcAddr <$HpfIn46Addr>, + @dstAddr <$HpfIn46Addr>, + @tunnelCheckSum u8, + @tunnelKey u8, + @ifStatEnable u8, + @nameId u16 align(2), + @tunnelKeyValue u32 align(4), + @encapProto u8, + @transmitProto u8, + @tunnelMode u8, + @isIpsecOverGre u8, + @outPortIndex u32 align(4), + @outPortIndexVlanIf u32 align(4), + @routeRefreshId u32 align(4), + @nextHop u32 align(4), + @vlanId u16 align(2), + @nhrpRedirect :1 u16 align(2), + @nhrpShortCut :1 u16 align(2), + @nhrpServer :1 u16 align(2), + @res :13 u16 align(2), + @nhrpDomainId u16 align(2), + @arpRefreshId u16 align(2), + @ipv4PktId u32 align(4), + @token u32 align(4), + @innerLabel u32 align(4), + @tunnelIfStat <* <$HpfGreTunnelStatisticNode>> align(8), + @version u32 align(4), + @resv u32 align(4), + @unnamed.6059 <$unnamed.6058> implicit align(8), + @unnamed.6061 <$unnamed.6060> implicit align(8)}> +type $unnamed.6058 > align(8), + @re6 <* <$tagCAP_RE6_ENTRY_S>> align(8)}> +type $unnamed.6060 > align(8), + @nhp6 <* <$tagHpfNhp6Entry>> align(8), + @nhpGrp <* <$HpfNextHopGrpEntry>> align(8), + @nhp6Grp <* <$tagHpfNhp6GrpEntry>> align(8)}> +type $HpfGreTunnelDecapKey +type $HpfGreTunnelDecapEntry align(4), + @tunnelId u32 align(4), + @fwdIfIndex u32 align(4), + @keyEn u8, + @resv u8, + @tnlIfVrf u16 align(2), + @keyValue u32 align(4), + @version u32 align(4), + @fwdIf <* <$tagFwdIf>> align(8)}> +type $FwdIfStatAligned +type $CtrlIfMapFwdIfTbl +type $FlowIfMapFwdIfTbl +type $HpfLinkEthInfo +type $HpfLinkInfo implicit align(4)}> +type $unnamed.6062 align(4)}> +type $unnamed.6064 +type $unnamed.6065 +type $unnamed.6066 align(4), + @pppInfo <$unnamed.6068> align(4), + @atmInfo <$unnamed.6069> align(4)}> +type $unnamed.6067 , + @pvid u16 align(2), + @dot1qTpid u16 align(2), + @qinqTpid u16 align(2), + @vlanIdBegin :12 u32 align(4), + @vlanIdEnd :12 u32 align(4), + @innerVlanIdBegin :12 u32 align(4), + @innerVlanIdEnd :12 u32 align(4), + @tpId u16 align(2)}> +type $unnamed.6068 +type $unnamed.6069 +type $unnamed.6070 , + @rsv16 u16 align(2)}> +type $unnamed.6071 +type $IngFsvc +type $HashFwdIf align(64)}> +type $HpfCtlIfName }> +type $HpfCtlIfIndex +type $HpfCtlIfAndName align(4), + @data <$HpfCtlIfName>}> +type $HpfNameAndCtlIf , + @data <$HpfCtlIfIndex> align(4)}> +type $HpfTrunkTbl , + @fwdIfIdx <[256] u32> align(4), + @memberStat <[256] u8>, + @pktHashCnt u32 align(4), + @version u32 align(4), + @fwdIf <* <$tagFwdIf>> align(8)}> +type $HpfArpNatPoolKey +type $HpfArpNatPool +type $HpfArpNatPoolMsg align(4), + @data <$HpfArpNatPool> align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $HpfArpNatPoolQueryMsg align(4), + @data <$HpfArpNatPool> align(4)}> +type $HpfArpNatPoolPara > align(8)}> +type $HpfFwdCaptAclCfg +type $HpfFwdCaptCfg align(4), + @acl <$HpfFwdCaptAclCfg> align(4)}> +type $NdFastReplyInfo align(4), + @opType u8, + @length u8, + @srcMac <[6] u8>}> +type $HpfUrpfCfg +type $HpfAtkPortCfg align(4), + @urpfCfgV6 <$HpfUrpfCfg> align(4), + @dropNum u32 align(4), + @suppressNum u32 align(4), + @dropNumV6 u32 align(4), + @suppressNumV6 u32 align(4)}> +type $HpfTunnel4over6Cfg , + @dstAddr <$HpfIn6Addr>, + @outPort u32 align(4), + @ndIndex u32 align(4), + @nextHop <[4] u32> align(4), + @routeRefreshId u8, + @gwFlag u8, + @vlanId u16 align(2), + @classOriginal u8, + @encapLimit u8, + @hopLimit u8, + @dscpCpy u8, + @tClassFlow u32 align(4), + @tunnelMode u8, + @reserve u8, + @encapProcess u16 align(2)}> +type $HpfTunnel6Rd , + @ipv6RdDelegatePrefix <$HpfIn6Addr>, + @ipv6PrefixLen u8, + @ipv4PrefixLen u8, + @ipv6RdDelegatePrefixLen u8, + @ipv4PrefixLenSet :1 u8, + @ipv6RdDelegatePrefixValid :1 u8, + @reserve :6 u8, + @brAddr u32 align(4)}> +type $HpfTunnel6over4Cfg align(4)}> +type $HpfTunnel6over4DecapKey +type $HpfTunnel6over4DecapEntry align(4), + @tunnelId u32 align(4), + @tunnelId2 u32 align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $HpfTunnel4over6DecapKey , + @dstAddr <$HpfIn6Addr>, + @tunnelMode u32 align(4), + @reserve u32 align(4)}> +type $HpfTunnel4over6DecapEntry align(4), + @tunnelId u32 align(4), + @version u32 align(4), + @resv u32 align(4)}> +type $MirrObserve +type $HpfBfdFsm align(8), + @detectTimerId <* void> align(8)}> +type $HpfBfdPkt align(4), + @pkt <[128] u8>, + @linkHdr <[64] u8>, + @destMac <[6] u8>, + @linkHdrLen u8, + @npHdrLen u8, + @rxPkts u32 align(4), + @txPkts u32 align(4)}> +type $HpfBfdTimer align(4), + @remainTimes <$HpeAtom32> align(4), + @txPktTtlTicks u32 align(4), + @txPktTicks i32 volatile align(4), + @jobId u32 align(4)}> +type $HpfBfdEntry align(8), + @bfdPkt <$HpfBfdPkt> align(4), + @timer <$HpfBfdTimer> align(4)}> +type $tagHpfBfdRxKey +type $tagHpfBfdRxEntry align(4), + @sessionId u32 align(4)}> +type $HpfBfdDownMsgHead , + @msgCount u8, + @resv1 u32 align(4)}> +type $HpfVpnNameIdKey }> +type $HpfVpnNameIdData align(4), + @vrfId u32 align(4), + @peerVrfId u32 align(4)}> +type $HpfFwdIfNotifyHead align(4), + @head <$tagHpeListHead> align(8)}> +type $HpfFwdIfNotifyNode align(8), + @cb <* >,<* <$tagFwdIf>>) void>> align(8)}> +type $HpfFwdIfNotifyFunc align(8), + @cb <* > align(8)}> +type $HpfFwdIfMatchIp +type $HpfFwdIfTravRet +type $HpfMbuftrMsgHd > align(8)}> +type $HpfMbuftrPktMsgBody align(4), + @dstAddr <[4] u32> align(4), + @outIfIndex u32 align(4), + @nextHop <[4] u32> align(4), + @vrfIndex u32 align(4)}> +type $HpfTraceDbgMsg , + @dstMac <[6] u8>, + @srcAddr <$HpfIn46Addr>, + @dstAddr <$HpfIn46Addr>, + @protocol u8, + @resv <[3] u8>, + @ethType u16 align(2), + @vlanId u16 align(2), + @srcPort u16 align(2), + @dstPort u16 align(2), + @srcIntf u32 align(4), + @maxLen u16 align(2), + @minLen u16 align(2)}> +type $HpfLinkStateCheckMsg +type $HpfLinkStateCheckIpv6Msg +type $HpfLinkStateExcludeaclMsg +type $HpfLinkStateExcludeaclIpv6Msg +type $HpfFlowFastAgingCfg +type $HpfDnsFastAgeCfg +type $HpfOnlineIpMsgInfo +type $HpfOnlineIpSortInfo implicit align(8), + @upRate u64 align(8), + @downRate u64 align(8), + @userId u32 align(4), + @remain u32 align(4)}> +type $unnamed.6072 +type $HpfOnlineIpNumInfo +type $HpfFwdHookReq +type $HpfFlowHookMsg +type $HpfFlowHookMsgRet +type $HpfFlowHookInfo , + @flowHookAging <[32] u8>, + @flowHookScan <[32] u8>}> +type $HpfFlowHookIpv6Info , + @flowHookAging <[32] u8>, + @flowHookScan <[32] u8>}> +type $HpfTopnLogHead +type $HpfDbcuHead +type $HpfDbcuDiscardInfo +type $HpfDbcuPkt implicit, + @unnamed.6077 <$unnamed.6076> implicit, + @unnamed.6079 <$unnamed.6078> implicit, + @unnamed.6081 <$unnamed.6080> implicit align(2), + @unnamed.6083 <$unnamed.6082> implicit align(4), + @unnamed.6085 <$unnamed.6084> implicit align(2), + @unnamed.6087 <$unnamed.6086> implicit align(4), + @unnamed.6089 <$unnamed.6088> implicit align(4), + @unnamed.6091 <$unnamed.6090> implicit align(4)}> +type $unnamed.6074 +type $unnamed.6076 +type $unnamed.6078 +type $unnamed.6080 +type $unnamed.6082 +type $unnamed.6084 +type $unnamed.6086 }> +type $unnamed.6088 }> +type $unnamed.6090 align(4), + @pktDiscardInfo <$HpfDbcuDiscardInfo> align(4)}> +type $HpfFlowTopnCfgMsg +type $HpfFlowTopnSessNumInfo +type $HpfFlowTopnSessRespInfo > align(4)}> +type $HpfFlowTopnTrSortInfo +type $HpfFlowTopnTrRespReturnInfo > align(8), + @ppsResult <[512] <$HpfFlowTopnTrSortInfo>> align(8)}> +type $HpfFlowInfoBasic , + @dstMac <[6] u8>, + @bridgeId u16 align(2), + @ethType u16 align(2), + @unnamed.6093 <$unnamed.6092> implicit align(4), + @unnamed.6095 <$unnamed.6094> implicit align(4), + @srcPort u16 align(2), + @dstPort u16 align(2), + @srcVrfIndex u16 align(2), + @dstVrfIndex u16 align(2), + @vlan u16 align(2), + @fwdType u8, + @isL2 u8, + @protocol u8, + @appId u8, + @appIdExt u16 align(2), + @srcNatIp u32 align(4), + @dstNatIp u32 align(4), + @srcNatPort u16 align(2), + @dstNatPort u16 align(2), + @privateFlags u32 align(4), + @publicFlags u32 align(4), + @fromCpe <[4] u32> align(4), + @toCpe <[4] u32> align(4), + @fromCpeTunnel u16 align(2), + @toCpeTunnel u16 align(2), + @userService u32 align(4)}> +type $unnamed.6092 }> +type $unnamed.6094 }> +type $HpfFlowSessIdInfo +type $HpfFlowTopnPktFlowInfo align(4), + @sessId <$HpfFlowSessIdInfo> align(4), + @revPkts u32 align(4), + @sendPkts u32 align(4), + @revBytes u64 align(8), + @sendBytes u64 align(8), + @totalPkts u64 align(8)}> +type $HpfFlowTopnPktFlowRespInfo > align(8)}> +type $HpfFlowTopnHundredSessionCfg , + @outPort u32 align(4), + @nextHop u32 align(4), + @recvPort u32 align(4), + @obvVrfIndex u32 align(4)}> +type $HpfFwdIicMsgInfo +type $HpfTcpMssZoneMsg +type $HpfTcpMssMsg +type $HpfTcpMssKeychainMsg +type $HpfIpcmv6ErrMsg +type $HpfTcpSeqMsg +type $HpfAckRateLimit +type $HpfFwdTraceMsgHead +type $HpfFwdTraceMsgData , + @vsysIndex u16 align(2)}> +type $HpfFwdTraceDbgWebMsg align(4), + @unDA <$unnamed.6097> align(4), + @usSPBegin u16 align(2), + @usSPEnd u16 align(2), + @usDP u16 align(2), + @usApp u16 align(2), + @usVlan u16 align(2), + @usVsysIndex u16 align(2), + @ucPro u8, + @ucMode u8, + @ucIPv6 :1 u8, + @ucChs :1 u8, + @ucEnable :1 u8, + @ucHadSMAC :1 u8, + @ucHadDMAC :1 u8, + @ucRes :3 u8, + @ucreserve u8, + @ucSMAC <[6] u8>, + @ucDMAC <[6] u8>, + @ulUserId u32 align(4), + @ulInIf u32 align(4)}> +type $unnamed.6096 }> +type $unnamed.6097 }> +type $HpfDbgMsg }> +type $HpfDbgExtTrace align(4), + @unDA <$unnamed.6099> align(4), + @ctrlIfIndex u32 align(4), + @dropIndex u32 align(4), + @usSrcPort u16 align(2), + @usDstPort u16 align(2), + @usIpId u16 align(2), + @usIpFrag u16 align(2), + @usSrcVrf u16 align(2), + @usDstVrf u16 align(2), + @usVsysIndex u16 align(2), + @ucPro u8, + @tcpFlag u8, + @pktDir u8, + @pktIndex u8, + @ucIsWeb u8, + @traceFlag u8, + @zoneIn u16 align(2), + @ulTimestamp u32 align(4), + @srcMac <[6] u8>, + @dstMac <[6] u8>, + @ethType u16 align(2), + @vlanId u16 align(2), + @ucData <[0] u8>}> +type $unnamed.6098 }> +type $unnamed.6099 }> +type $HpfDbgExtTraceInfo , + @detail <[512] i8>}> +type $HpfPktTraceInfoS , + @detail <[512] i8>, + @dropFlag :1 u8, + @res :7 u8, + @traceId u16 align(2), + @infoNext <* <$HpfPktTraceInfoS>> align(8)}> +type $HpfPktTracePacketS > align(8), + @infoHead <* <$HpfPktTraceInfoS>> align(8)}> +type $HpfPktTraceFlow align(4), + @unDA <$unnamed.6101> align(4), + @usSP u16 align(2), + @usDP u16 align(2), + @usVsysIndex u16 align(2), + @ucPro u8, + @ucRse u8, + @usPktNum u32 align(4), + @pktHead <* <$HpfPktTracePacketS>> align(8)}> +type $unnamed.6100 }> +type $unnamed.6101 }> +type $HpfScrlCfg +type $HpfSessRateCarCfg +type $HpfScrlPassCfg +type $HpfFlowHashTblInfo align(4), + @emptyNotZero u32 align(4), + @zeroNotEmpty u32 align(4), + @zeroNotEmptyIndex <[64] u32> align(4)}> +type $HpfFlow6LogMsgData +type $HpfAclStatMsg +type $HpfGetAclStatMsg +type $HpfAclRange32 +type $HpfAclRange16 +type $HpfAclRange8 +type $HpfAclStatRule , + @tos <$HpfAclRange8>, + @srcIp <$HpfAclRange32> align(4), + @destIp <$HpfAclRange32> align(4), + @dstPort <$HpfAclRange16> align(2), + @srcPort <$HpfAclRange16> align(2), + @icmp <$HpfAclRange16> align(2)}> +type $frag_print_info_s +type $frag6_print_info_s , + @dst_ip <$HpfIn6Addr>, + @pkt_id u32 align(4), + @vrf_index u16 align(2), + @src_port u16 align(2), + @dst_port u16 align(2), + @vlan_id u16 align(2), + @protocol u8, + @pkt_cnt u16 align(2), + @cur_offset u16 align(2), + @create_time u64 align(8), + @ttl u32 align(4), + @valid :1 u8, + @fst_rcv :1 u8, + @reassemble :1 u8, + @last_rcv :1 u8, + @frag6_fifo :1 u8, + @ucRes :3 u8}> +type $HpfSystemStatMsg +type $HpfSystemStatKeyData +type $HpfStat6Cfg +type $HpfStat6CfgRetMsg +type $HpfStat6Info align(8), + @ip6DiscardCnt <[1077] u64> align(8), + @ip6SndCnt <[9] u64> align(8), + @ip6RcvCnt <[26] u64> align(8), + @ip6SessCnt <[21] u64> align(8), + @ip6AppByteCnt <[71] u64> align(8), + @ip6InnerCnt <[48] u64> align(8)}> +type $HpfMbuftrPktDbgInfo align(4), + @srcIp u32 align(4), + @dstIp u32 align(4), + @srcPort u16 align(2), + @dstPort u16 align(2), + @number u32 align(4), + @proto u8, + @res <[3] u8>}> +type $HpfMbuftrStatInfo align(8), + @sendNum <[84] u64> align(8), + @carDiscNum <[170] u64> align(8)}> +type $HpfSessMonReport align(4), + @pktData <[256] i8>}> +type $unnamed.6102 align(4), + @sctpInfo <$unnamed.6104> align(4)}> +type $unnamed.6103 +type $unnamed.6104 +type $HpfSnmpTrapCfg +type $HpfGreCfgMsgHead +type $HpfGreReturnTunMsgHead +type $tbl_flt_tlv_s +type $tbl_cfg_msg_head_s +type $tag_tbl_total_sess_info +type $tbl_cfg_return_msg_head_s +type $tbl_cfg_get_info_ctx_s +type $HpfFlowCfgMsgHead +type $HpfFlowFltTlv +type $HpfFlowCfgGetInfoCtx +type $HpfFlowCfgMsg align(2), + @ctx <$HpfFlowCfgGetInfoCtx> align(4), + @filterTlv <[124] <$HpfFlowFltTlv>> align(4)}> +type $tag_HpfAclStatid align(4), + @eid2 <[15] u32> align(4), + @statid1 u16 align(2), + @statid2 u16 align(2), + @statid3 u16 align(2), + @statid4 u16 align(2), + @next <* <$tag_HpfAclStatid>> align(8)}> +type $HpfAclStatNpMsg +type $fwd_acl_stat_result_vm_t align(8)}> +type $tag_HpfIpv6AclStatid align(4), + @eid2 <[15] u32> align(4), + @statid1 u16 align(2), + @statid2 u16 align(2), + @statid3 u16 align(2), + @statid4 u16 align(2), + @next <* <$tag_HpfIpv6AclStatid>> align(8)}> +type $HpfIpv6AclStatNpMsg , + @srcIpv6Mask <[16] u8>, + @dstIpv6Addr <[16] u8>, + @dstIpv6Mask <[16] u8>, + @srcPortBegin u16 align(2), + @srcPortEnd u16 align(2), + @dstPortBegin u16 align(2), + @dstPortEnd u16 align(2), + @protocol u8, + @dscpValue u8, + @vrfIndex u16 align(2), + @icmpType u8, + @icmpCode u8, + @anyFlag u8, + @resv u8, + @ruleId u64 align(8), + @statId1 u16 align(2), + @statId2 u16 align(2), + @statId3 u16 align(2), + @statId4 u16 align(2)}> +type $HpfIpv6AclStatResult align(8)}> +type $HpfArpMissCachePrintTblInfo +type $HpfArpMissCachePrintPktInfo +type $HpfNdMissCachePrintTblInfo align(4), + @cachedPktCnt u32 align(4)}> +type $HpfNdMissCachePrintPktInfo align(4), + @dstIp <[4] u32> align(4), + @ttl u32 align(4), + @elapseTime u32 align(4)}> +type $HpfTransCtrlIfIndex +type $HppNpFlowSessStatRet +type $HpfDelayWatchDesc align(8), + @strChinese <* i8> align(8), + @solution <* i8> align(8)}> +type $WatchPointRetMsg +type $HppPktBtGetResultMsg }> +type $unnamed.6105 +type $HpfServerMapMsgHead +type $HpfServerMapFilterTlv +type $HpfNatAddressGroupMsg +type $HpfPktDecode , + @dstMac <[6] i8>, + @srcAddr <$HpfIn46Addr>, + @dstAddr <$HpfIn46Addr>, + @ethType u16 align(2), + @vlanId u16 align(2), + @protocol u8, + @srcPort u16 align(2), + @dstPort u16 align(2), + @tos u8, + @isFrag u8, + @isFirstFrag u8, + @isLastFrag u8, + @ipId u16 align(2), + @ethLen u16 align(2)}> +type $unnamed.6106 align(4), + @align_rsv u64 align(8)}> +type $fwd_pkt_back_from_remote_nge_info_s implicit align(4), + @ip6Token u32 align(4), + @inZone u16 align(2), + @outZone u16 align(2), + @vlan u16 align(2), + @routeGateWay :1 u8, + @isCrossVrf :1 u8, + @timeStampValid :1 u8, + @dfxTrace :1 u8, + @traceInstance :3 u8, + @resv2 :1 u8, + @timeStamp u32 align(4)}> +type $unnamed.6108 }> +type $HpfArpMissNp implicit align(4), + @srcVsiId u16 align(2), + @dstVsiId u16 align(2), + @vxlanFlag u8, + @direct u8, + @srcVrf u16 align(2)}> +type $unnamed.6110 implicit align(2)}> +type $unnamed.6111 +type $HpfArpMissCpu +type $HpfArpMissMsg , + @unnamed.6115 <$unnamed.6114> implicit align(4)}> +type $unnamed.6114 align(4), + @arpMissNp <$HpfArpMissNp> align(4)}> +type $HpfEthAdpArpMissOpcodeIpv4 align(4), + @rsvd4 u32 align(4), + @rsvd5 u32 align(4), + @rsvd6 u32 align(4)}> +type $unnamed.6116 +type $HpfEthAdpNdMissOpcodeIpv6 , + @nhp6 <$unnamed.6117> align(4)}> +type $unnamed.6117 , + @nhpIndex u32 align(4)}> +type $HpfEthAdpArpNdMiss align(4), + @LW1 u32 align(4), + @unnamed.6120 <$unnamed.6119> implicit align(4)}> +type $unnamed.6118 +type $unnamed.6119 align(4), + @ipv6 <$HpfEthAdpNdMissOpcodeIpv6> align(4)}> +type $HpfV2EthAdpArpNdMiss implicit align(4)}> +type $unnamed.6121 align(4), + @ipv6 <$HpfEthAdpNdMissOpcodeIpv6> align(4)}> +type $HpfNdMissNp align(4), + @nhpIp <[4] u32> align(4), + @unnamed.6126 <$unnamed.6123> implicit align(4), + @srcVsiId u16 align(2), + @dstVsiId u16 align(2), + @direct u8, + @vxlanFlag u8, + @srcVrf u16 align(2)}> +type $unnamed.6123 implicit align(2)}> +type $unnamed.6124 +type $HpfNdMissCpu align(4), + @nhpIp <[4] u32> align(4), + @ifIndex u32 align(4), + @srcVrf u16 align(2), + @rsv3 u16 align(2)}> +type $HpfNdMissMsg , + @unnamed.6128 <$unnamed.6127> implicit align(4)}> +type $unnamed.6127 align(4), + @ndMissNp <$HpfNdMissNp> align(4)}> +type $tagDPHashTableInfo +type $tagDP_HASHARRAY_S > align(8), + @rwlock <$HpeRwlock> align(4), + @ulCacheCounter u32 align(4), + @lNodeCounter u32 align(4)}> +type $HpfAclRmRange +type $HpfAclRmEntry , + @range <[0] <$HpfAclRmRange>> align(2)}> +type $tagAclSearchNode > align(8), + @child <[2] <* <$tagAclSearchNode>>> align(8)}> +type $tag_acl_search_jumptable align(2), + @numRoots u32 align(4), + @numCutbits u32 align(4), + @bitset <[15] u64> align(8), + @cutbitMask <[6] u64> align(8), + @offset <[6] u8>, + @keyId <[15] u8>, + @roots <* <* <$tagAclSearchNode>>> align(8)}> +type $HpfAclRuleSet align(8), + @data <* <$HpfAclRule>> align(8)}> +type $tagHpfAclListNode > align(8)}> +type $HpfAclBinTreeStats +type $HpfAclSortPair +type $HpfAclCutBitStat +type $HpfAclUpdHistory align(8), + @oldDtRules <* <$HpfAclRuleIdSet>> align(8)}> +type $HpeTbmOperate +type $HpeFwdTblRelaSpec align(2), + @relaTimes <[10] u16> align(2)}> +type $unnamed.6129 align(4)}> +type $HpeCapSrvmapKey align(4), + @mask <$HpeSrvmapField> align(4)}> +type $HpeCapSrvmapData align(4), + @ifNext <* <$HpeCapSrvmapData>> align(8), + @fake :1 u32 align(4), + @srvMap :1 u32 align(4), + @srvMapRsv :1 u32 align(4), + @natFilter :1 u32 align(4), + @natmap :1 u32 align(4), + @nopat :1 u32 align(4), + @dynamic :1 u32 align(4), + @alp :1 u32 align(4), + @acl :1 u32 align(4), + @fthrVaild :1 u32 align(4), + @again :1 u32 align(4), + @aspfType :5 u32 align(4), + @deny :1 u32 align(4), + @rsv1 :15 u32 align(4), + @newIp u32 align(4), + @newIpMask u32 align(4), + @newport u16 align(2), + @outVpnId u16 align(2), + @pptPort u16 align(2), + @natPoolId u16 align(2), + @alpSsnIndex u16 align(2), + @alpFthrSsnIndex u16 align(2), + @pFthrFlwAddr <* void> align(8), + @natIfIndex u16 align(2), + @rsv2 u16 align(2), + @referCnt <$HpeAtom32> align(4)}> +type $HpeAclRuleBinaryField +type $HpeAclGroupCfg +type $HpeAclGroupKey +type $HpeAclPriorityNode > align(8)}> +type $HpeAclStatKey +type $unnamed.6131 +type $HpeAclStatEntry align(4), + @statData <$HpeAclStatData> align(8), + @version u32 align(4), + @resv u32 align(4)}> +type $unnamed.6132 align(8), + @actionTemp <$HpeFwdTblInfo> align(8)}> +type $HpeAclGroupEntry align(4), + @data <$HpeAclGroup> align(8)}> +type $tagPfaAclGroupEntry align(8), + @ruleIndex <$HpeFwdTblInfo> align(8)}> +type $HpeMbLock align(8), + @timestamp u64 align(8), + @m <* void> align(8), + @type u32 align(4)}> +type $HpeTbmVerify +type $HpeTbmGlobalVerify align(4)}> +type $HpeGlobalTblVerifyAge align(8), + @globalTblAge <* > align(8), + @verifyEnable u32 align(4)}> +type $HpfAclNotifyHead align(4), + @head <$tagHpeListHead> align(8)}> +type $HpfAclNotifyNode align(8), + @cb <* > align(8)}> +type $HpfMbuftrCarNode +type $HpfMbuftrCarExtSampleNode implicit align(4), + @count u32 align(4)}> +type $unnamed.6134 , + @mac <[6] u8>, + @port u32 align(4)}> +type $HpfMbuftrSecHead implicit align(4), + @unnamed.6139 <$unnamed.6138> implicit align(4), + @inIfIdx u32 align(4), + @vrf u16 align(2), + @vlanIn u16 align(2)}> +type $unnamed.6136 }> +type $unnamed.6138 }> +type $HostNpHeadTotalCar , + @smac <[6] u8>, + @vlanTag u32 align(4), + @ethType u16 align(2), + @causeId u16 align(2)}> +type $HpfCpcarIndexParseMap align(64)>) u32>> align(8)}> +type $HpfGreTunnelPhyInfo +type $HpfGreTunnelProductInfo +type $HpfGreUptunnelTable , + @vrfIndex u32 align(4), + @srcAddr u32 align(4), + @srcVrf u32 align(4), + @dstAddr u32 align(4), + @dstVrf u32 align(4), + @zoneId u32 align(4), + @slot u32 align(4), + @tunnelId u32 align(4), + @encapProto u8, + @transmitProto u8, + @tunnelMode u8, + @greIpsec u8, + @nhrpRedirect u8, + @nhrpShortCut u8, + @nhrpServer u8, + @res u8}> +type $HpfGreStat align(8)}> +type $HpfGreRemainTunnel +type $HpfFrag4SpecInfo +type $HpfFrag4OffSetInfo +type $tagHpfFrag4OffsetNode > align(8)}> +type $HpfFrag4TblInfo , + @rwlock <$HpeRwlock> align(4), + @frag4_pkt <* <$hpe_mbuf> align(64)> align(8), + @pkt_cnt u16 align(2), + @cur_offset u16 align(2), + @sendFragLen u16 align(2), + @valid :1 u8, + @fst_rcv :1 u8, + @reassemble :1 u8, + @last_rcv :1 u8, + @frag4_fifo :1 u8, + @overlap :1 u8, + @buff_full :1 u8, + @reserved :1 u8, + @frag4_offset <[21] <$HpfFrag4OffSetInfo>> align(2), + @fragOffsetNode <* <$tagHpfFrag4OffsetNode>> align(8), + @fragOffsetCount u32 align(4)}> +type $HpfFrag4IicMsgInfo +type $HpfFrag4CacheCount align(4)}> +type $HpfFragFloodAtkInfo align(4), + @protocol <[256] u32> align(4), + @srcIp <[1024] u32> align(4), + @dstIp <[1024] u32> align(4), + @pktId <[512] u32> align(4), + @interface <[512] u32> align(4), + @recordTime u32 align(4), + @resetTime u32 align(4), + @pktCount u64 align(8)}> +type $HpfFrag6TblInfo , + @dst_ip <$HpfIn6Addr>, + @pkt_id u32 align(4), + @vrf_index u16 align(2), + @src_port u16 align(2), + @dst_port u16 align(2), + @vlan_id u16 align(2), + @protocol u8, + @first_frag_len u16 align(2), + @rwlock <$HpeRwlock> align(4), + @frag6_pkt <* <$hpe_mbuf> align(64)> align(8), + @pkt_cnt u16 align(2), + @cur_offset u16 align(2), + @create_time u64 align(8), + @ttl u32 align(4), + @valid :1 u8, + @fst_rcv :1 u8, + @reassemble :1 u8, + @last_rcv :1 u8, + @frag6_fifo :1 u8, + @reserved :3 u8}> +type $HpfFrag6SpecInfo +type $HpfFrag6IicMsgInfo +type $HpfFrag6FloodAtkInfo align(4), + @protocol <[256] u32> align(4), + @srcIp <[1024] u32> align(4), + @dstIp <[1024] u32> align(4), + @pktId <[512] u32> align(4), + @interface <[512] u32> align(4), + @recordTime u32 align(4), + @resetTime u32 align(4), + @pktCount u64 align(8)}> +type $HpfArpMissCacheSpec +type $tagHpfArpMissCachePktNode > align(8), + @mbuf <* <$hpe_mbuf> align(64)> align(8), + @ttl u32 align(4), + @createTime u32 align(4)}> +type $HpfArpMissCacheTblNode align(4), + @cachedPktCnt u32 align(4), + @cachedPktHead <* <$tagHpfArpMissCachePktNode>> align(8)}> +type $HpfArpMissCacheTblKey +type $HpfNdMissCacheSpec +type $tagHpfNdMissCachePktNode > align(8), + @mbuf <* <$hpe_mbuf> align(64)> align(8), + @ttl u32 align(4), + @createTime u32 align(4)}> +type $HpfNdMissCacheTblNode align(4), + @isValid u8, + @rwlock <$HpeRwlock> align(4), + @cachedPktCnt u32 align(4), + @cachedPktHead <* <$tagHpfNdMissCachePktNode>> align(8)}> +type $HpfNdMissCacheTblKey align(4)}> +type $HpfTunnelIfStat +type $HpfTunnelFuncCb align(64)>,u32,<* <$fwd_hook_param_s>>) u64>> align(8), + @tunnelIs4o6 <* > align(8), + @tunnel6over4Output <* align(64)>,u32) u64>> align(8), + @tunnelIs6o4 <* > align(8), + @tunnel6over4Input <* align(64)>) u64>> align(8), + @tunnel4over6Input <* align(64)>,<* <$fwd_hook_param_s>>) u64>> align(8)}> +type $HpfTraceIpv4Addr +type $HpfTraceIpv6Addr , + @dstIp <$HpfIn6Addr>}> +type $HpfTracePktCondition , + @dstAddr <$HpfIn46Addr>, + @srcMac <[6] u8>, + @dstMac <[6] u8>, + @protocol u8, + @resv <[3] u8>, + @number u32 align(4), + @ethType u16 align(2), + @vlanId u16 align(2), + @srcPort u16 align(2), + @dstPort u16 align(2), + @srcIntf u32 align(4), + @maxLen u16 align(2), + @minLen u16 align(2), + @timeout u32 align(4), + @timeTag u64 align(8)}> +type $tag_dpdbg_trace , + @vsysIndex u16 align(2)}> +type $HpfTraceDbgCfg align(4), + @webCfg <$HpfFwdTraceDbgWebMsg> align(4), + @flow <[10] <$HpfPktTraceFlow>> align(8)}> +type $HpfPktStatDesc align(8), + @strChinese <* i8> align(8), + @solution <* i8> align(8)}> +type $HpfModuleCounter +type $HpfPortCounter +type $HpfCatchFilterCounter > align(8), + @filterPort <[3] <$HpfPortCounter>> align(8), + @eventCounter <[3] u64> align(8), + @errorCounter <[3] u64> align(8)}> +type $HpfPktInfo align(4), + @dstIp <$unnamed.6141> align(4), + @srcPort u16 align(2), + @dstPort u16 align(2), + @protocol u8, + @isV6 u8, + @ethtype u16 align(2), + @port u32 align(4), + @pktLen u32 align(4)}> +type $unnamed.6140 }> +type $unnamed.6141 }> +type $HpfPktCollect align(4), + @toltalPkts <[1] u64> align(8), + @toltalBytes <[1] u64> align(8), + @collectPkts <$HpeAtom32> align(4), + @collectBytes <[1] u32> align(4), + @pkt <[8192] <$HpfPktInfo>> align(4)}> +type $HpfPktCollectCtl align(4), + @hookIsRegsiter u32 align(4), + @hookNotifyRegsiter u32 align(4), + @hookEnhance u32 align(4), + @burstTimespan u32 align(4)}> +type $HpfPktAnalysisNodeL > align(8), + @next <* <$HpfPktAnalysisNodeL>> align(8), + @queue <* void> align(8), + @isIpv6 u8, + @pktType u8, + @directionType u8, + @rsv <[1] u8>, + @ipAddr <$unnamed.6142> align(4), + @pktNum u32 align(4), + @byteLen u32 align(4), + @ethtype <[8] u16> align(2), + @ethtypeCnt <[8] u32> align(4), + @protocol <[8] u8>, + @protocolCnt <[8] u32> align(4), + @srcPort <[8] u16> align(2), + @srcPortCnt <[8] u32> align(4), + @dstPort <[8] u16> align(2), + @dstPortCnt <[8] u32> align(4), + @port <[8] u32> align(4), + @portCnt <[8] u32> align(4), + @peerIp <[8] <$unnamed.6143>> align(4), + @peerIpCnt <[8] u32> align(4)}> +type $unnamed.6142 }> +type $unnamed.6143 }> +type $HpfPktAnalysisOrderq > align(8), + @tail <* <$HpfPktAnalysisNodeL>> align(8), + @lock <$HpeRwlock> align(4), + @count u32 align(4), + @state u32 align(4)}> +type $HpfPktAnalysisResult > align(8), + @orderq <[3][2] <$HpfPktAnalysisOrderq>> align(8)}> +type $unnamed.6144 +type $HpfPktAnalysisRecord > align(8), + @burst <[3] <$HpfPktAnalysisResult>> align(8), + @manual <[3] <$HpfPktAnalysisResult>> align(8)}> +type $HpfPktAnalysisCtl +type $TagPktBtStatistics +type $HpfBtpeerIp }> +type $HpfPktBtPara +type $HpfPktBtSetType +type $HpfFlowMngProHead align(4), + @flag u8, + @tblType u8}> +type $HpfFlowCfgReturnMsgHead +type $HpfFlowCfgSpecInfo +type $HpfFlowCfgGetInfoIndex +type $HpfFlowCfgSearchParam align(8), + @vlan u16 align(2), + @cpeFlag u8, + @masterFlag u8, + @reserved <[4] u8>, + @cpe <[4] u32> align(4)}> +type $HpfFlowStatCpuAndMemRecord align(4), + @heapMemTotal u64 align(8), + @heapMemUsed u64 align(8), + @sliceMemTotal u64 align(8), + @sliceMemUsed u64 align(8), + @chunkMemTotal u64 align(8), + @chunkMemUsed u64 align(8)}> +type $HpfFlowShowCpuStatRetMsg align(8)}> +type $HpfFlowShowStatRetMsgCondition +type $HpfFlowTotalSessInfo +type $HpfFlowSessInstId +type $HpfFlowSessNode +type $HpfFlowSessCfg +type $HpfFlowFtpClientData , + @szUserName <[65] i8>, + @szPassword <[65] i8>, + @szFileName <[65] i8>}> +type $HpfFlowStatTotalCurAndMax align(8)}> +type $HpfFlowStatCurAndMax align(8), + @flowAgeNum <[8] u64> align(8), + @lastSaveFlowCreateNum u64 align(8), + @lastRateCalTime u64 align(8), + @curFlowRate u64 align(8), + @maxFlowNum u64 align(8), + @maxFlowNumTime u64 align(8), + @maxFlowRate u64 align(8), + @maxFlowRateTime u64 align(8), + @maxFlowCpuMemRec <$HpfFlowStatCpuAndMemRecord> align(8)}> +type $HpfFlowMsg +type $HpfHrpFuncCb > align(8), + @migSetMirSessState <* > align(8), + @hrpIsDataPermit <* > align(8), + @hrpOutPacketToCpu <* align(64)>,i32,u8) i32>> align(8), + @hrpOutPacketMbuf <* align(64)>,i32) i32>> align(8), + @hrpMirSesEnabled <* > align(8), + @hrpAuIsClosed <* > align(8), + @hrpIssuNegoVerGet <* ) u32>> align(8), + @hrpSecBkupState <* > align(8), + @hrpGetRunState <* > align(8), + @hrpIntfGetconfigstate <* > align(8), + @hrpGetRunStateInit <* > align(8), + @hrpGetRunStateStandby <* > align(8), + @hrpGetRunStateSlave <* > align(8), + @hrpGetRunStateLoadbalance <* > align(8), + @hrpGetSessIp6 <* > align(8), + @hrpGetSessIp6Portion <* > align(8), + @hrpGetSessIp6Require <* > align(8), + @hrpGetSessMigrateIp6 <* > align(8), + @hrpGetSessMigrateIp6Portion <* > align(8), + @hrpGetSessMigrateIp6Require <* > align(8), + @hrpGetPacketSes <* > align(8), + @hrpGetPacketFwd <* > align(8), + @hrpGetPacketDpIpsec <* > align(8), + @hrpGetPacketSermapIp6 <* > align(8), + @hrpGetIssuIpsecInt <* > align(8), + @hrpGetPacketSvr <* > align(8), + @hrpGetPacketDomainset <* > align(8), + @hrpGetHrpOk <* > align(8), + @hrpGetPacketSla <* > align(8), + @hrpIsCfgEnabled <* > align(8), + @hrpDpIfVrrpIsMaster <* ,u8,u8) u32>> align(8), + @hrpDpIsHrpInterface <* > align(8), + @hrpDataModRegister <* ,<* ,u64) i32>>,<* >) i32>> align(8), + @hrpGetVpnMapping <* > align(8)}> +type $HpfHrpPacketS +type $unnamed.6145 +type $HpfSpeedLimit +type $unnamed.6146 > align(8), + @tqe_prev <* <* <$HpeModTag>>> align(8)}> +type $tagImSystm +type $HpfZoneFuncCb > align(8), + @findZoneFlagAndTcpMss <* >,<* <* u16>>) u32>> align(8)}> +type $HpfFlowTtlInfo align(2)}> +type $HpfFwdAgingTime align(2), + @appProNum u16 align(2)}> +type $HpfFwdAgingTimeAppinfo +type $HpfFwdAgingTimeUserSrv +type $HpfAppProtoInfo }> +type $HpfAppProtoDes align(8)}> +type $HpfFwdRpcMsgHead +type $HpfFwdTcpMssZoneMsg +type $HpfFwdTcpMssMsg +type $HpfFwdTcpSeqMsg +type $HpfIcmpErrCntMsg +type $HpfPortMatDstIp +type $HpfNatArpTbl align(8), + @exSectQueryResult <$HpfArpNatPoolPara> align(8)}> +LOC 55 120 6 +func &HpfRecvDirect public used (var %mbuf <* <* <$hpe_mbuf> align(64)>> used, var %num i32 used) void +func &MbufAddrCheck public used (var %mbuf <* <$hpe_mbuf> align(64)>) void +func &HpeMbufSafeCheck public static used inline (var %mbuf <* <$hpe_mbuf> align(64)> used) void +LOC 60 133 20 +func &MBUF_CLEAR_CONTEXT public static used inline (var %mbuf <* <$hpe_mbuf> align(64)> used) void +func &HpfDecodeLswTag public used extern (var %mbuf <* <$hpe_mbuf> align(64)>) void +LOC 60 133 20 +func &HpfPrintMbufWithInfo public static used inline (var %m <* <$hpe_mbuf> align(64)> used) void +func &HpfPrintContent public static used inline (var %m <* <$hpe_mbuf> align(64)> used) void +func &HpeDiagLogFmt public used varargs (var %level u32, var %mod u32, var %format <* i8>, ...) i32 +func &HpfPktProcess public used extern (var %mbuf <* void>) u64 + +func &MBUF_CLEAR_CONTEXT public static used inline (var %mbuf <* <$hpe_mbuf> align(64)> used) void { + funcid 3310 + funcinfo { + @INFO_fullname "MBUF_CLEAR_CONTEXT"} + +LOC 60 136 14 + var %i_136_14 u32 used +LOC 60 137 9 + var %size_137_9 i32 used +LOC 60 138 15 + var %a_138_15 v4i32 used +LOC 60 139 16 + var %dst_ptr_139_16 <* v4i32> used + +LOC 60 137 9 + dassign %size_137_9 0 (constval i32 896) +LOC 60 138 15 + dassign %a_138_15 0 (intrinsicop v4i32 vector_from_scalar_v4i32 (constval i32 0)) +LOC 60 139 16 + dassign %dst_ptr_139_16 0 (add ptr ( + add ptr ( + dread ptr %mbuf, + cvt ptr u64 (constval u64 192)), + cvt ptr i32 (cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 6 (dread ptr %mbuf))))) +LOC 60 140 12 + dassign %i_136_14 0 (constval u32 0) +LOC 60 140 5 + while (lt u1 u32 ( + dread u32 %i_136_14, + lshr u32 ( + cvt u32 i32 (dread i32 %size_137_9), + constval u64 4))) { +LOC 60 141 9 + iassign <* v4i32> 0 ( + add ptr ( + dread ptr %dst_ptr_139_16, + mul ptr ( + cvt ptr u32 (dread u32 %i_136_14), + constval ptr 16)), + dread v4i32 %a_138_15) +LOC 60 140 60 + dassign %i_136_14 0 (add u32 (dread u32 %i_136_14, constval u32 1)) + } +LOC 60 143 + return () +} + +LOC 43 193 20 +func &HpeMbufSafeCheck public static used inline (var %mbuf <* <$hpe_mbuf> align(64)> used) void { + funcid 1085 + funcinfo { + @INFO_fullname "HpeMbufSafeCheck"} + + +LOC 43 195 5 + if (ne u1 i64 ( + intrinsicop i64 C___builtin_expect ( + cvt i64 i32 (eq u1 i32 ( + eq u1 i32 ( + ne u1 i32 ( + cvt i32 u32 (iread u32 <* <$HpeMbufGlobalCtl>> 3 (dread ptr $g_mbufGlobalCtl)), + constval i32 0), + constval i32 0), + constval i32 0)), + constval i64 0), + constval i64 0)) { +LOC 43 196 9 + call &MbufAddrCheck (dread ptr %mbuf) + } +LOC 43 198 + return () +} + +LOC 55 120 6 +func &HpfRecvDirect public used (var %mbuf <* <* <$hpe_mbuf> align(64)>> used, var %num i32 used) void { + funcid 4697 + funcinfo { + @INFO_fullname "HpfRecvDirect"} + +LOC 55 122 13 + var %i_122_13 i32 used +LOC 55 123 14 + var %ifIndex_123_14 u32 used +LOC 55 129 20 + var %ethHdr_129_20 <* <$HpfEthHdr>> used +LOC 55 134 127 + var %level_134_127 u32 used +LOC 55 135 64 + var %temp__135_64 <* <$hpe_mbuf> align(64)> used +LOC 55 135 169 + var %traceIdx__135_169 u16 used +LOC 55 135 214 + var %funcId__135_214 u16 used + var %levVar_5741 i32 + var %retVar_5743 i32 + var %retVar_5759 u8 + var %retVar_5768 u64 + +LOC 55 125 12 + dassign %i_122_13 0 (constval i32 0) +LOC 55 125 5 + while (lt u1 i32 (dread i32 %i_122_13, dread i32 %num)) { +LOC 55 126 9 + call &MBUF_CLEAR_CONTEXT (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) +LOC 55 127 17 + dassign %ifIndex_123_14 0 (cvt u32 i32 (bior i32 ( + constval i32 0, + bior i32 ( + bior i32 ( + constval i32 0, + band i32 ( + shl i32 ( + cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 4 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)))), + constval i32 7), + constval i32 0x7f80)), + constval i32 0)))) +LOC 55 129 20 + dassign %ethHdr_129_20 0 (add ptr ( + cvt ptr u64 (cvt u64 ptr (iread ptr <* <$hpe_mbuf> align(64)> 1 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))))), + cvt ptr i32 (cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 3 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))))))) +LOC 55 130 9 + if (eq u1 i32 ( + bior i32 ( + ashr i32 ( + band i32 ( + cvt i32 u32 (iread u32 <* <$HpfEthHdr>> 3 (dread ptr %ethHdr_129_20)), + constval i32 0xff00), + constval i32 8), + shl i32 ( + band i32 ( + cvt i32 u32 (iread u32 <* <$HpfEthHdr>> 3 (dread ptr %ethHdr_129_20)), + constval i32 255), + constval i32 8)), + constval i32 0x8874)) { +LOC 55 131 13 + call &HpfDecodeLswTag (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) +LOC 55 132 21 + dassign %ifIndex_123_14 0 (bior u32 ( + constval u32 0, + bior u32 ( + bior u32 ( + constval u32 0, + band u32 ( + shl u32 ( + iread u32 <* <$HpfMbufContext>> 638 (add ptr ( + add ptr ( + iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)), + cvt ptr u64 (constval u64 192)), + cvt ptr i32 (cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 6 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))))))), + constval i32 7), + constval u32 0x7f80)), + constval u32 0))) + } +LOC 55 134 11 + if (ne u1 i64 ( + intrinsicop i64 C___builtin_expect ( + cvt i64 i32 (eq u1 i32 ( + eq u1 i32 ( + ge u1 i32 ( + sext i32 8 (dread i32 $g_hpfHlogLevel), + constval i32 6), + constval i32 0), + constval i32 0)), + constval i64 0), + constval i64 0)) { +LOC 55 134 80 + dowhile { +LOC 55 134 85 + if (le u1 i32 ( + constval i32 4, + sext i32 8 (dread i32 $g_hpfHlogLevel))) { +LOC 55 134 135 + if (ne u1 i32 ( + cvt i32 u32 (dread u32 $g_hpfHlogForcePrint), + constval i32 0)) { +LOC 55 134 164 + dassign %levVar_5741 0 (constval i32 0) + } + else { +LOC 55 134 171 + dassign %levVar_5741 0 (constval i32 4) + } +LOC 55 134 127 + dassign %level_134_127 0 (cvt u32 i32 (dread i32 %levVar_5741)) +LOC 55 134 178 + callassigned &HpeDiagLogFmt ( + bior u32 (constval u32 256, dread u32 %level_134_127), + dread u32 $g_hpfHlogModId, + conststr ptr "[pkt_trace:%s] : hpf recv pkt from phy, port:%u", + conststr ptr "HpfRecvDirect", + cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 4 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))))) { dassign %retVar_5743 0 } + } + } (constval u1 0) +LOC 55 134 347 + if (ne u1 ptr ( + iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)), + cvt ptr i32 (constval i32 0))) { +LOC 55 134 376 + call &HpfPrintMbufWithInfo (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) +LOC 55 134 407 + call &HpfPrintContent (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) + } + } +LOC 55 135 9 + dowhile { +LOC 55 135 14 + if (eq u1 i32 ( + cvt i32 u32 (iread u32 <* <$HpeMbufGlobalCtl>> 1 (dread ptr $g_mbufGlobalCtl)), + constval i32 1)) { +LOC 55 135 84 + dassign %temp__135_64 0 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) +LOC 55 135 71 + while (ne u1 ptr ( + dread ptr %temp__135_64, + cvt ptr i32 (constval i32 0))) { +LOC 55 135 180 + call &HpeMbufSafeCheck (dread ptr %temp__135_64) +LOC 55 135 214 + dassign %funcId__135_214 0 (constval u16 640) +LOC 55 135 270 + if (ne u1 i64 ( + intrinsicop i64 C___builtin_expect ( + cvt i64 i32 (eq u1 i32 ( + eq u1 i32 ( + eq u1 i32 ( + cvt i32 u32 (iread u32 <* <$HpeMbufGlobalCtl>> 4 (dread ptr $g_mbufGlobalCtl)), + constval i32 0), + constval i32 0), + constval i32 0)), + constval i64 1), + constval i64 0)) { +LOC 55 135 354 + dassign %traceIdx__135_169 0 (zext u32 16 (band i32 ( + cvt i32 u32 (iread u32 <* <$HpeMbufPrivInfo>> 17 (add ptr ( + iread ptr <* <$hpe_mbuf> align(64)> 18 (cvt ptr i64 (cvt i64 ptr (dread ptr %temp__135_64))), + cvt ptr u64 (constval u64 880)))), + constval i32 31))) +LOC 55 135 653 + iassign <* u16> 0 ( + array 1 ptr <* <[32] u16>> ( + iaddrof ptr <* <$HpeMbufPrivInfo>> 22 (add ptr ( + iread ptr <* <$hpe_mbuf> align(64)> 18 (cvt ptr i64 (cvt i64 ptr (dread ptr %temp__135_64))), + cvt ptr u64 (constval u64 880))), + dread u32 %traceIdx__135_169), + dread u32 %funcId__135_214) +LOC 55 135 677 + dassign %traceIdx__135_169 0 (zext u32 16 (band i32 ( + add i32 ( + cvt i32 u32 (dread u32 %traceIdx__135_169), + constval i32 1), + constval i32 31))) +LOC 55 135 843 + iassign <* <$HpeMbufPrivInfo>> 17 ( + add ptr ( + iread ptr <* <$hpe_mbuf> align(64)> 18 (cvt ptr i64 (cvt i64 ptr (dread ptr %temp__135_64))), + cvt ptr u64 (constval u64 880)), + zext u32 8 (dread u32 %traceIdx__135_169)) + } + else { +LOC 55 135 880 + intrinsiccallwithtypeassigned u8 C___sync_add_and_fetch_1 ( + iaddrof ptr <* <$HpeMbufPrivInfo>> 17 (add ptr ( + iread ptr <* <$hpe_mbuf> align(64)> 18 (cvt ptr i64 (cvt i64 ptr (dread ptr %temp__135_64))), + cvt ptr u64 (constval u64 880))), + constval u8 1) { dassign %retVar_5759 0 } +LOC 55 135 878 + dassign %traceIdx__135_169 0 (zext u32 16 (dread u32 %retVar_5759)) +LOC 55 135 1211 + iassign <* u16> 0 ( + array 1 ptr <* <[32] u16>> ( + iaddrof ptr <* <$HpeMbufPrivInfo>> 22 (add ptr ( + iread ptr <* <$hpe_mbuf> align(64)> 18 (cvt ptr i64 (cvt i64 ptr (dread ptr %temp__135_64))), + cvt ptr u64 (constval u64 880))), + band i32 ( + sub i32 ( + cvt i32 u32 (dread u32 %traceIdx__135_169), + constval i32 1), + constval i32 31)), + dread u32 %funcId__135_214) + } +LOC 55 135 128 + dassign %temp__135_64 0 (iread ptr <* <$hpe_mbuf> align(64)> 8 (dread ptr %temp__135_64)) + } + } + } (constval u1 0) +LOC 55 136 119 + iassign <* <$HpfMbufContext>> 157 ( + add ptr ( + add ptr ( + iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)), + cvt ptr u64 (constval u64 192)), + cvt ptr i32 (cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 6 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)))))), + dread u32 %ifIndex_123_14) +LOC 55 137 123 + iassign <* <$HpfMbufContext>> 156 ( + add ptr ( + add ptr ( + iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)), + cvt ptr u64 (constval u64 192)), + cvt ptr i32 (cvt i32 u32 (iread u32 <* <$hpe_mbuf> align(64)> 6 (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13)))))), + dread u32 %ifIndex_123_14) +LOC 55 139 15 + # CHECK-NOT: lsl + # CHECK:str {{.*}} #16 + callassigned &HpfPktProcess (iread ptr <* <* <$hpe_mbuf> align(64)>> 0 (array 1 ptr <* <[1] <* <$hpe_mbuf> align(64)>>> (dread ptr %mbuf, dread i32 %i_122_13))) { dassign %retVar_5768 0 } +LOC 55 139 9 + eval (dread u64 %retVar_5768) +LOC 55 125 27 + dassign %i_122_13 0 (add i32 (dread i32 %i_122_13, constval i32 1)) + } +LOC 55 141 + return () +} diff --git a/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/test.cfg b/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..47e9d7ac62cc17c9ae1a40a1677b69aaa1ac0b51 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0084-ivopt-str-post-inc/test.cfg @@ -0,0 +1,3 @@ +CO2: +$MAPLE_ROOT/output/aarch64-clang-release/bin/maple --run=me:mpl2mpl:mplcg --option=" --quiet -O2 --quiet: --quiet -O2 --quiet --side-effect-white-list: --quiet -O2 --fPIC --duplicate_asm_list=$MAPLE_ROOT/memcpy_mpl.s --no-common --no-common" -S --save-temps --debug --stack-protector-strong -o hpf_init.s --quiet -fPIC --no-common hpf_init.mpl +cat hpf_init.s | ${OUT_ROOT}/tools/bin/FileCheck hpf_init.mpl diff --git a/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/expected.txt b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/expected.txt @@ -0,0 +1 @@ +0 diff --git a/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/main.c b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/main.c new file mode 100644 index 0000000000000000000000000000000000000000..4328b2e58e95c8cebc4076f8d76ef9a53cd95703 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/main.c @@ -0,0 +1,22 @@ +#include +long long a; +int b; +int c[]; +short d[][13][11][18]; +long long e[][13][11][18]; +char f; +void g(long long *p1, int i) { *p1 = i; } +void fn2(int, unsigned char, short[][13][11][18], long long[][13][11][18]); +int main() { + fn2(601, 2, d, e); + g(&a, b); + printf("%llu\n", a); +} +void fn2(int p1, unsigned char i, short j[][13][11][18], + long long m[][13][11][18]) { + for (int k = 0; k < 4; k = p1) + if (c) + for (int l = 0; l < 40933431; l++) + b = f - i ? 0 : 90807144187420; +} + diff --git a/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/test.cfg b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0084-vrp-judge-equal/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/expected.txt b/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/func.c b/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/func.c new file mode 100644 index 0000000000000000000000000000000000000000..ec3171ff8cb7b30bd53e7d0c014560489ca5c67a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/func.c @@ -0,0 +1,39 @@ +#define int32_t int +#define uint64_t unsigned long +#define int64_t long + +struct S0 { + int32_t f0; +}; +struct { + struct S0 f4; +} g_88; +uint64_t g_56, func_21_i, g_803 = 1, g_755 = 4073709551615, g_301 = 8; +const volatile int32_t *volatile g_1773 = &g_88.f4.f0; +uint64_t *fn1(int64_t p_22, safe_add_func_uint8_t_u_u, safe_add_func_uint16_t_u_u) { + int32_t l_1470 = 0; + for (; 0 < 7; func_21_i++) { + for (g_803 = 0;; g_803 = safe_add_func_uint8_t_u_u) { + if (p_22) + break; + return &g_56; + } + for (l_1470 = 1;; ) { + if (*g_1773) + break; + if (*g_1773) { + for (g_755 = 2; 0; ) + ; + continue; + } + if (p_22) + break; + } + for (g_301 = 1; 0; g_301 = safe_add_func_uint16_t_u_u) + ; + } +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/test.cfg b/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..df9a2eb8d2b77c4f4f728cf89894f7aaf625e06a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0085-bblayout-OptimizeBranchTarget/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(func) +run(func) diff --git a/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/expected.txt b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8626c4cff2849624fb67f87cd0ad72b163671ad --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/expected.txt @@ -0,0 +1 @@ +4 diff --git a/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/main.c b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/main.c new file mode 100644 index 0000000000000000000000000000000000000000..9e034911e8bb298a83256b0e0950420f14043a2c --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/main.c @@ -0,0 +1,22 @@ +#include +long long a; +short d = 057170; +_Bool e = 1; +int f = -2124005834; +int g[2][20][17][3]; +char i; +long(l)[][4]; +void b(long long *m, int n) { *m ^= n; } +int main() { + for (short b = 0; b < (short)f; b = d) + for (short c = 0; c < (short)448128273 + 20739; c = f) + for (long h = e - 1LL; h < 2846839013 - 2846838996; h += 4) + for (_Bool j = (_Bool)d - 1; j < 1; j = l[h]) + g[b][c][h][i] = 4; + for (size_t j = 0; j < 2; ++j) + for (size_t k = 0; k < 20; ++k) + for (size_t ac = 0; ac < 17; ++ac) + for (size_t ad = 0; ad < 3; ++ad) + b(&a, g[j][k][ac][ad]); + printf("%llu\n", a); +} diff --git a/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/test.cfg b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0088-ivopts-iv-contains-bool/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/expected.txt b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..45a4fb75db864000d01701c0f7a51864bd4daabf --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/expected.txt @@ -0,0 +1 @@ +8 diff --git a/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/t4.c b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/t4.c new file mode 100644 index 0000000000000000000000000000000000000000..a20884ef61218d330888aa385bd349cd7c7b4439 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/t4.c @@ -0,0 +1,18 @@ +/* + * (c) Copyright 2019 by Solid Sands B.V., + * Amsterdam, the Netherlands. All rights reserved. + * Subject to conditions in the RESTRICTIONS file. + * (c) Copyright 2007 ACE Associated Computer Experts bv + * (c) Copyright 2007 ACE Associated Compiler Experts bv + * All rights reserved. Subject to conditions in RESTRICTIONS file. + */ +long long +test_it(int x) +{ + return x < 32 ? 1ll << x : 0ll; +} + +int main() { + printf("%d\n", test_it(3)); +} + diff --git a/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/test.cfg b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..95fa6a7aa3431fd82222b11cf9765b8a93218ba5 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0089-ico-move-insn/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(t4) +run(t4) diff --git a/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/expected.txt b/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/main.c b/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/main.c new file mode 100644 index 0000000000000000000000000000000000000000..7504fbf9a9083ad1d445fec258c35566b8eb45d0 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/main.c @@ -0,0 +1,36 @@ +#include + +void test1(_Bool b[][6]) { + for (; b[2][0] << 9223372036854775807;) + ; +} + +void test2(_Bool b[][6]) { + for (; b[2][0] << -9223372036854775807;) + ; +} + +void test3(_Bool b[][6]) { + for (; b[2][0] << 8090864131964928;) + ; +} + +void test4(_Bool b[][6]) { + for (; b[2][0] >> 9223372036854775807;) + ; +} + +void test5(_Bool b[][6]) { + for (; b[2][0] >> -9223372036854775807;) + ; +} + +void test6(_Bool b[][6]) { + for (; b[2][0] >> 8090864131964928;) + ; +} + + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/test.cfg b/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c60d32f37b78b1e9c0e148aac03e275edb482cc3 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0089-imm-shift-truncat/test.cfg @@ -0,0 +1,3 @@ +COS,CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0090-hfa_param/expected.txt b/testsuite/c_test/unit_test/UNIT0090-hfa_param/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..5ae7d43ff7b7426fc1bd17d35e54f96111fcc2f6 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0090-hfa_param/expected.txt @@ -0,0 +1 @@ +10.00 diff --git a/testsuite/c_test/unit_test/UNIT0090-hfa_param/hfa_param.c b/testsuite/c_test/unit_test/UNIT0090-hfa_param/hfa_param.c new file mode 100644 index 0000000000000000000000000000000000000000..5432989144fe5b56ad840a0c203f8760eb287bf3 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0090-hfa_param/hfa_param.c @@ -0,0 +1,24 @@ +typedef struct { + double s1; + double s2; + double s3; + double s4; +} st_t; + +__attribute__((noinline)) double func(st_t a0, st_t a1, st_t a2) { + st_t *a00 = &a0; + double *s1 = &a1.s1; + double s2 = a2.s3; + st_t *a01 = &a2; + + return a00->s4 + *s1 + s2 + a01->s2; +} + +int main() { + st_t a0 = {1.0, 2.0, 3.0, 4.0}; + double res = func(a0, a0, a0); + + printf("%.2f\n", res); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0090-hfa_param/test.cfg b/testsuite/c_test/unit_test/UNIT0090-hfa_param/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..d63a9498d201a7c2b48743aa8d3aa2004452b305 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0090-hfa_param/test.cfg @@ -0,0 +1,2 @@ +compile(hfa_param) +run(hfa_param) diff --git a/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/HomogeneousAggregates.c b/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/HomogeneousAggregates.c new file mode 100644 index 0000000000000000000000000000000000000000..a91aad7fcb7b1b90a4dfe0691dbbd35c688ec854 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/HomogeneousAggregates.c @@ -0,0 +1,166 @@ +#include + +typedef struct { + float a[2]; + float b; +} S0; +S0 func0() { + // CHECK: ldr s0 + // CHECK-NEXT: ldr s1 + // CHECK-NEXT: ldr s2 + S0 s; + return s; +} + +typedef struct { + double a[2]; + double b; +} S1; +S1 func1() { + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + // CHECK-NEXT: ldr d2 + S1 s; + return s; +} + +typedef struct { + S1 a; + double b; +} S2; +S2 func2() { + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + // CHECK-NEXT: ldr d2 + // CHECK-NEXT: ldr d3 + S2 s; + return s; +} + +typedef struct __attribute__ ((__packed__)) { + float a; +} S3; +S3 func3() { + // CHECK: ldr s0 + S3 s; + return s; +} + +typedef struct { + S3 a; + float b; +} S4; +S4 func4() { + // CHECK: ldr s0 + // CHECK-NEXT: ldr s1 + S4 s; + return s; +} + +typedef struct { + uint8x8_t a; + uint16x4_t b; +} S5; +S5 func5() { + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + S5 s; + return s; +} + +typedef struct { + uint8x16_t a; + uint16x8_t b; +} S6; +S6 func6() { + // CHECK: ldr q0 + // CHECK-NEXT: ldr q1 + S6 s; + return s; +} + +typedef struct {} Empty; +typedef struct { + uint8x16_t a; + Empty b; +} S7; +S7 func7() { + // CHECK: ldr q0 + S7 s; + return s; +} + +typedef union { + double a[2]; + double b; +} S8; +S8 func8() { + S8 s; + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + return s; +} + +int8x8x2_t func9() { + int8x8x2_t a; + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + return a; +} + +typedef struct { + int8x8x2_t a; + int8x8_t b; +} S10; +S10 func10() { + S10 a; + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + // CHECK-NEXT: ldr d2 + return a; +} + +typedef union { + double a[2]; + double b; + Empty c; +} S11; +S11 func11() { + S11 s; + // CHECK: ldr d0 + // CHECK-NEXT: ldr d1 + return s; +} + +typedef int v4si __attribute__ ((vector_size (16))); +v4si fucn12() { + v4si a; + // CHECK: ldr q0 + return a; +} + +typedef struct { + v4si a, b; +} S13; +S13 func13() { + S13 s; + // CHECK: ldr q0 + // CHECK-NEXT: ldr q1 + return s; +} + +typedef struct { + v4si a, b; + int8x16_t c; +} S14; +S14 func14() { + S14 s; + // CHECK: ldr q0 + // CHECK-NEXT: ldr q1 + // CHECK-NEXT: ldr q2 + return s; +} + +int main() { + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/expected.txt b/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/test.cfg b/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..66e286e81dd22e9eb37ff431457e954b0f3bbd50 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0091-HomogeneousAggregates/test.cfg @@ -0,0 +1,3 @@ +CO0: +compile(HomogeneousAggregates) +cat HomogeneousAggregates.s | ${OUT_ROOT}/tools/bin/FileCheck HomogeneousAggregates.c \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/expected.txt b/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.c b/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.c new file mode 100644 index 0000000000000000000000000000000000000000..0a3496c12756044a6b038e9bbe300ffb7f62851e --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.c @@ -0,0 +1,29 @@ +#include "csmith.h" + +const uint32_t g_8 = 18446744073709551615; +int8_t g_39 = 1; +uint32_t g_40 = 4073709551615; +int32_t g_42 = 6; +int16_t g_60 = 0; +uint64_t g_102 = 3; +int32_t *g_125 = &g_42; +uint32_t g_204 = 1; + +void fn1() { + for (g_102 = 1;; g_102++) { + uint32_t *l_253 = &g_204; + for (g_40 = 0; g_40 <= 1; g_40++) + for (g_39 = 0; g_39 <= 1; g_39++) { + uint32_t *l_254 = 0; + if (0) + ; + else { + *g_125 = (0, l_253) != l_254 ^ 0 ^ (1UL | g_60) ^ g_8 ^ -2L; + } + } + } +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.cfg b/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b529d798b30a0944d503f99a9085a5efaeaaf637 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0092-emit-rm-imm-size/test.cfg @@ -0,0 +1,3 @@ +COS,CO2: +compile(test) +run(test) diff --git a/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/StructNearMumap.c b/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/StructNearMumap.c new file mode 100644 index 0000000000000000000000000000000000000000..ff3bb6303f3e65db7d8de0b15212bb3666e19637 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/StructNearMumap.c @@ -0,0 +1,144 @@ +/* PR middle-end/36043 target/58744 target/65408 */ +/* { dg-do run { target mmap } } */ +/* { dg-options "-O2" } */ + +#include +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef MAP_ANON +#define MAP_ANON 0 +#endif +#ifndef MAP_FAILED +#define MAP_FAILED ((void*)-1) +#endif + +typedef struct { + unsigned char r; + unsigned char g; + unsigned char b; +} __attribute__((packed)) pr58744; + +typedef struct { + unsigned short r; + unsigned short g; + unsigned short b; +} pr36043; + +typedef struct { + int r; + int g; + int b; +} pr65408; + +// check struct pass by reg +__attribute__((noinline, noclone)) void f1a(pr58744 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +// check struct pass by stack +__attribute__((noinline, noclone)) void f1as(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr58744 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f1(pr58744* x) { + f1a(*x); +} + +__attribute__((noinline, noclone)) void f1s(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr58744* x) { + f1as(0, 1, 2, 3, 4, 5, 6, 7, *x); +} + +__attribute__((noinline, noclone)) void f2a(pr36043 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f2as(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr36043 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f2(pr36043* x) { + f2a(*x); +} + +__attribute__((noinline, noclone)) void f2s(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr36043* x) { + f2as(0, 1, 2, 3, 4, 5, 6, 7, *x); +} + +__attribute__((noinline, noclone)) void f3a(pr65408 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f3as(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr65408 x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f3(pr65408* x) { + f3a(*x); +} + +__attribute__((noinline, noclone)) void f3s(int v0, int v1, int v2, int v3, int v4, int v5, int v6, int v7, pr65408* x) { + f3as(0, 1, 2, 3, 4, 5, 6, 7, *x); +} + +typedef struct { + unsigned char r; + unsigned char g; + unsigned char b; + unsigned char m[32]; +} __attribute__((packed)) mstruct; + +// check struct pass by pointer +__attribute__((noinline, noclone)) void f4a(mstruct x) { + if (x.r != 1 || x.g != 2 || x.b != 3) + __builtin_abort(); +} + +__attribute__((noinline, noclone)) void f4(mstruct* x) { + f4a(*x); +} + +int main() { + char* p = mmap((void*)0, 131072, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) + return 0; + char* endp = p + 65536; + if (munmap(endp, 65536) < 0) + return 0; + + pr58744* s1 = (pr58744*)endp - 1; + s1->r = 1; + s1->g = 2; + s1->b = 3; + f1(s1); + f1s(0, 1, 2, 3, 4, 5, 6, 7, s1); + + pr36043* s2 = (pr36043*)endp - 1; + s2->r = 1; + s2->g = 2; + s2->b = 3; + f2(s2); + f2s(0, 1, 2, 3, 4, 5, 6, 7, s2); + + pr65408* s3 = (pr65408*)endp - 1; + s3->r = 1; + s3->g = 2; + s3->b = 3; + f3(s3); + f3s(0, 1, 2, 3, 4, 5, 6, 7, s3); + + mstruct* s4 = (mstruct*)endp - 1; + s4->r = 1; + s4->g = 2; + s4->b = 3; + f4(s4); + + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/expected.txt b/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/test.cfg b/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..4f0b2ac318a53901eb70c694b9772c0a8d831a98 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0093-StructNearMumap/test.cfg @@ -0,0 +1,2 @@ +compile(StructNearMumap) +run(StructNearMumap) diff --git a/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/LowerFiledIDNotZero.c b/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/LowerFiledIDNotZero.c new file mode 100644 index 0000000000000000000000000000000000000000..1ce8c672ded4a8aa0693c7b51602613a0f937259 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/LowerFiledIDNotZero.c @@ -0,0 +1,54 @@ +/* { dg-do run } */ +/* { dg-options "-O1" } */ + +struct S { + int i, j; +}; + +struct Z { + struct S d, s; +}; + +struct S __attribute__((noinline, noclone)) get_s(void) { + struct S s; + s.i = 5; + s.j = 6; + + return s; +} + +struct S __attribute__((noinline, noclone)) get_d(void) { + struct S d; + d.i = 0; + d.j = 0; + + return d; +} + +int __attribute__((noinline, noclone)) get_c(void) { + return 1; +} + +int __attribute__((noinline, noclone)) my_nop(int i) { + return i; +} + +int __attribute__((noinline, noclone)) foo(void) { + struct Z z; + int i, c = get_c(); + + z.d = get_d(); + z.s = get_s(); + + for (i = 0; i < c; i++) { + z.s.i = my_nop(z.s.i); + z.s.j = my_nop(z.s.j); + } + + return z.s.i + z.s.j; +} + +int main(int argc, char *argv[]) { + if (foo() != 11) __builtin_abort(); + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/expected.txt b/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/test.cfg b/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..e6228e8600dce98818826f71d521cfcf71b5b777 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0094-LowerFiledIDNotZero/test.cfg @@ -0,0 +1,2 @@ +compile(LowerFiledIDNotZero) +run(LowerFiledIDNotZero) diff --git a/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/expected.txt b/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/main.c b/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/main.c new file mode 100644 index 0000000000000000000000000000000000000000..50b20169ba5bcd0ede3519d8fe815e964dccc710 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/main.c @@ -0,0 +1,32 @@ +int a = 0; +int b = 0; + +// test if available tripcount of refused loop equals to 0 +__attribute__ ((noinline)) +void refuse1() { + unsigned int i = -2; + for (;i <= 5; i++) { + a += 1; + } +} + +// test if guarded tripcount guards refused loop +__attribute__ ((noinline)) +unsigned int refuse2(unsigned int i) { + for (;i <= 5; i++) { + b += 1; + } + return i; +} + +int main() { + refuse1(); + if (a != 0) { + abort(); + } + + unsigned int i = refuse2(-2); + if (i != -2 || b != 0) { + abort(); + } +} diff --git a/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/test.cfg b/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0095-lfo-refused-loop/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/SimplifyOrMeExpr.c b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/SimplifyOrMeExpr.c new file mode 100644 index 0000000000000000000000000000000000000000..2c794fa8356e69817265a747e1d9d13c22ec0f65 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/SimplifyOrMeExpr.c @@ -0,0 +1,54 @@ +#include +#include + +__attribute__((noinline)) int8_t foo_8() { + return 0xF; +} +void func_8() { + int8_t a = foo_8(); + int8_t b = 4; + int8_t c = -16; + c = (a ^ b) | c; + printf("%d\n", c); +} + +__attribute__((noinline)) int16_t foo_16() { + return 0xF; +} +void func_16() { + int16_t a = foo_16(); + int16_t b = 4; + int16_t c = -16; + c = (a ^ b) | c; + printf("%d\n", c); +} + +__attribute__((noinline)) int32_t foo_32() { + return 0xF; +} +void func_32() { + int32_t a = foo_32(); + int32_t b = 4; + int32_t c = -16; + c = (a ^ b) | c; + printf("%d\n", c); +} + +__attribute__((noinline)) int64_t foo_64() { + return 0xF; +} +void func_64() { + int64_t a = foo_64(); + int64_t b = 4; + int64_t c = -16; + c = (a ^ b) | c; + printf("%ld\n", c); +} + +int main() { + func_8(); + func_16(); + func_32(); + func_64(); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/expected.txt b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..153a9247e39655ee23e6d9651aa652238c9e03d1 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/expected.txt @@ -0,0 +1,4 @@ +-5 +-5 +-5 +-5 diff --git a/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/test.cfg b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f9e0cd15783754562b5c0d617fab142a88d2943a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0096-SimplifyOrMeExpr/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(SimplifyOrMeExpr) +run(SimplifyOrMeExpr) \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/ParamWithLargeStack.c b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/ParamWithLargeStack.c new file mode 100644 index 0000000000000000000000000000000000000000..5e6ca5552a1a04304068c25ef2baf3fd89713928 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/ParamWithLargeStack.c @@ -0,0 +1,141 @@ +#include + +#pragma pack(1) +struct S1 { + short f1 : 10; + int f2; + char f5 : 7; +}; // size is 7, passed by one reg + +struct S2 { + long f1; + struct S1 f3; +}; // size is 15, passed by two reg + +struct S3 { + long f1; + long f2; + struct S1 f3; +}; // size is 23, passed by pointer + +void printS1(struct S1 s) { + printf("%d,%d,%d\n", s.f1, s.f2, s.f5); +} + +__attribute__((noinline)) struct S1 PassByRegS1(struct S1 s) { + struct S1 c[20480]; + return s; +} + +__attribute__((noinline)) struct S1 PassByStackS1(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S1 s) { + struct S1 c[20480]; + return s; +} + +void testS1() { + struct S1 a[20480]; + struct S1 b = {2, 5, 2}; + b = PassByRegS1(b); + printS1(b); + struct S1 c = {5, 6, 3}; + c = PassByStackS1(0, 1, 2, 3, 4, 5, 6, 7, c); + printS1(c); +} + +void printS2(struct S2 s) { + printf("%ld,", s.f1); + printS1(s.f3); +} + +__attribute__((noinline)) struct S2 PassByRegS2(struct S2 s) { + struct S2 c[20480]; + return s; +} + +__attribute__((noinline)) struct S2 PassByStackS2(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S2 s) { + struct S2 c[20480]; + return s; +} + +void testS2() { + struct S2 a[20480]; + struct S2 b = {2, 5, 2, 1}; + b = PassByRegS2(b); + printS2(b); + struct S2 c = {5, 6, 3, 5}; + c = PassByStackS2(0, 1, 2, 3, 4, 5, 6, 7, c); + printS2(c); +} + +void printS3(struct S3 s) { + printf("%ld,%ld,", s.f1, s.f2); + printS1(s.f3); +} + +__attribute__((noinline)) struct S3 PassByRegS3(struct S3 s) { + struct S3 c[20480]; + return s; +} + +__attribute__((noinline)) struct S3 PassByStackS3(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S3 s) { + struct S3 c[20480]; + return s; +} + +void testS3() { + struct S3 a[20480]; + struct S3 b = {2, 5, 2, 1, 9}; + b = PassByRegS3(b); + printS3(b); + struct S3 c = {5, 6, 3, 5, 8}; + c = PassByStackS3(0, 1, 2, 3, 4, 5, 6, 7, c); + printS3(c); +} + +#define TEST_BASE_TYPE(base, passType, printStr) \ + __attribute__((noinline)) base PassByReg_##base(base a) { \ + base c[20480]; \ + return a; \ + } \ + __attribute__((noinline)) base PassByStack_##base(passType a0, passType a1, passType a2, passType a3, passType a4, \ + passType a5, passType a6, passType a7, base a) { \ + base c[20480]; \ + return a; \ + } \ + void test_##base() { \ + base a[40960]; \ + base b = (base)(56); \ + b = PassByReg_##base(b); \ + printf(printStr, b); \ + base c = (base)(78); \ + c = PassByStack_##base(0, 1, 2, 3, 4, 5, 6, 7, c); \ + printf(printStr, c); \ + } + +TEST_BASE_TYPE(char, int, "%d\n"); +TEST_BASE_TYPE(short, int, "%d\n"); +TEST_BASE_TYPE(int, int, "%d\n"); +TEST_BASE_TYPE(long, int, "%ld\n"); +TEST_BASE_TYPE(float, float, "%.2f\n"); +TEST_BASE_TYPE(double, float, "%.2f\n"); +#undef TEST_BASE_TYPE + +int main() { + printf("sizeof(struct S1) = %ld\n", sizeof(struct S1)); + printf("sizeof(struct S2) = %ld\n", sizeof(struct S2)); + printf("sizeof(struct S3) = %ld\n", sizeof(struct S3)); + testS1(); + testS2(); + testS3(); +#define TEST_BASE_TYPE(base, ...) test_##base() + TEST_BASE_TYPE(char, int, "%d\n"); + TEST_BASE_TYPE(short, int, "%d\n"); + TEST_BASE_TYPE(int, int, "%d\n"); + TEST_BASE_TYPE(long, int, "%ld\n"); + TEST_BASE_TYPE(float, float, "%.2f\n"); + TEST_BASE_TYPE(double, float, "%.2f\n"); + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/expected.txt b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..c80ed2ca967c8466e610b66c8695d24f698abdce --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/expected.txt @@ -0,0 +1,21 @@ +sizeof(struct S1) = 7 +sizeof(struct S2) = 15 +sizeof(struct S3) = 23 +2,5,2 +5,6,3 +2,5,2,1 +5,6,3,5 +2,5,2,1,9 +5,6,3,5,8 +56 +78 +56 +78 +56 +78 +56 +78 +56.00 +78.00 +56.00 +78.00 diff --git a/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/test.cfg b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1ce32d9ce8e61590f879f633869837596d64001b --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0097-ParamWithLargeStack/test.cfg @@ -0,0 +1,2 @@ +compile(ParamWithLargeStack) +run(ParamWithLargeStack) diff --git a/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/expected.txt b/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/main.c b/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/main.c new file mode 100644 index 0000000000000000000000000000000000000000..c34cd79d2e270573bb0917fb016b8e5ec538c7f0 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/main.c @@ -0,0 +1,15 @@ +// To test updating commonExit info after cg-cfgo in infinite-loop case, +// which may cause pdom analysis errors. +int a; +int +main () +{ + int b = 0; + while (a < 0 || b) + { + b = 0; + for (; b < 9; b++) + ; + } + exit (0); +} diff --git a/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/test.cfg b/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfd783d51c73c319ed8c9f8d161f2083af99fe63 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0098-CGCFG-infinite-loop/test.cfg @@ -0,0 +1,2 @@ +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/ParamWithLargeStack2.c b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/ParamWithLargeStack2.c new file mode 100644 index 0000000000000000000000000000000000000000..ebefe25f9840bffc547d874dddaff6cadf689cbc --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/ParamWithLargeStack2.c @@ -0,0 +1,149 @@ +#include + +#pragma pack(1) +struct S1 { + short f1 : 10; + int f2; + char f5 : 7; +}; // size is 7, passed by one reg + +struct S2 { + long f1; + struct S1 f3; +}; // size is 15, passed by two reg + +struct S3 { + long f1; + long f2; + struct S1 f3; +}; // size is 23, passed by pointer + +void printS1(struct S1 s) { + printf("%d,%d,%d\n", s.f1, s.f2, s.f5); +} + +__attribute__((noinline)) struct S1 PassByRegS1(struct S1 s) { + struct S1 c[20480]; + c[20000] = s; + return c[20000]; +} + +__attribute__((noinline)) struct S1 PassByStackS1(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S1 s) { + struct S1 c[20480]; + c[20000] = s; + return c[20000]; +} + +void testS1() { + struct S1 a[20480]; + struct S1 b = {2, 5, 2}; + b = PassByRegS1(b); + printS1(b); + struct S1 c = {5, 6, 3}; + c = PassByStackS1(0, 1, 2, 3, 4, 5, 6, 7, c); + printS1(c); +} + +void printS2(struct S2 s) { + printf("%ld,", s.f1); + printS1(s.f3); +} + +__attribute__((noinline)) struct S2 PassByRegS2(struct S2 s) { + struct S2 c[20480]; + c[20000] = s; + return c[20000]; +} + +__attribute__((noinline)) struct S2 PassByStackS2(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S2 s) { + struct S2 c[20480]; + c[20000] = s; + return c[20000]; +} + +void testS2() { + struct S2 a[20480]; + struct S2 b = {2, 5, 2, 1}; + b = PassByRegS2(b); + printS2(b); + struct S2 c = {5, 6, 3, 5}; + c = PassByStackS2(0, 1, 2, 3, 4, 5, 6, 7, c); + printS2(c); +} + +void printS3(struct S3 s) { + printf("%ld,%ld,", s.f1, s.f2); + printS1(s.f3); +} + +__attribute__((noinline)) struct S3 PassByRegS3(struct S3 s) { + struct S3 c[20480]; + c[20000] = s; + return c[20000]; +} + +__attribute__((noinline)) struct S3 PassByStackS3(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, + struct S3 s) { + struct S3 c[20480]; + c[20000] = s; + return c[20000]; +} + +void testS3() { + struct S3 a[20480]; + struct S3 b = {2, 5, 2, 1, 9}; + b = PassByRegS3(b); + printS3(b); + struct S3 c = {5, 6, 3, 5, 8}; + c = PassByStackS3(0, 1, 2, 3, 4, 5, 6, 7, c); + printS3(c); +} + +#define TEST_BASE_TYPE(base, passType, printStr) \ + __attribute__((noinline)) base PassByReg_##base(base a) { \ + base c[20480]; \ + c[20000] = a; \ + return c[20000]; \ + } \ + __attribute__((noinline)) base PassByStack_##base(passType a0, passType a1, passType a2, passType a3, passType a4, \ + passType a5, passType a6, passType a7, base a) { \ + base c[20480]; \ + c[20000] = a; \ + return c[20000]; \ + } \ + void test_##base() { \ + base a[40960]; \ + base b = (base)(56); \ + b = PassByReg_##base(b); \ + printf(printStr, b); \ + base c = (base)(78); \ + c = PassByStack_##base(0, 1, 2, 3, 4, 5, 6, 7, c); \ + printf(printStr, c); \ + } + +TEST_BASE_TYPE(char, int, "%d\n"); +TEST_BASE_TYPE(short, int, "%d\n"); +TEST_BASE_TYPE(int, int, "%d\n"); +TEST_BASE_TYPE(long, int, "%ld\n"); +TEST_BASE_TYPE(float, float, "%.2f\n"); +TEST_BASE_TYPE(double, float, "%.2f\n"); +#undef TEST_BASE_TYPE + +int main() { + printf("sizeof(struct S1) = %ld\n", sizeof(struct S1)); + printf("sizeof(struct S2) = %ld\n", sizeof(struct S2)); + printf("sizeof(struct S3) = %ld\n", sizeof(struct S3)); + testS1(); + testS2(); + testS3(); +#define TEST_BASE_TYPE(base, ...) test_##base() + TEST_BASE_TYPE(char, int, "%d\n"); + TEST_BASE_TYPE(short, int, "%d\n"); + TEST_BASE_TYPE(int, int, "%d\n"); + TEST_BASE_TYPE(long, int, "%ld\n"); + TEST_BASE_TYPE(float, float, "%.2f\n"); + TEST_BASE_TYPE(double, float, "%.2f\n"); + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/expected.txt b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..c80ed2ca967c8466e610b66c8695d24f698abdce --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/expected.txt @@ -0,0 +1,21 @@ +sizeof(struct S1) = 7 +sizeof(struct S2) = 15 +sizeof(struct S3) = 23 +2,5,2 +5,6,3 +2,5,2,1 +5,6,3,5 +2,5,2,1,9 +5,6,3,5,8 +56 +78 +56 +78 +56 +78 +56 +78 +56.00 +78.00 +56.00 +78.00 diff --git a/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/test.cfg b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..7ecd79f4db7fbfa856648a09b12e9704fa58f644 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0098-ParamWithLargeStack2/test.cfg @@ -0,0 +1,2 @@ +compile(ParamWithLargeStack2) +run(ParamWithLargeStack2) diff --git a/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/expected.txt b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/expected.txt @@ -0,0 +1 @@ +0 diff --git a/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/main.c b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/main.c new file mode 100644 index 0000000000000000000000000000000000000000..f4888743f1caace399a5edd8be3f82e445e5e9d9 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/main.c @@ -0,0 +1,28 @@ +#include +int64_t fn1(uint8_t uc_5) { + int uc_4 = 0, ui_3 = 0, ui_2 = 0; + uint16_t us_6 = 0; + uint32_t *ptr_8 = &ui_2; + for (int i = 0; i <= 1; ++i) { + uint64_t uli_11 = 0; + for (ui_3; ui_3 <= 3; ui_3++) { + uint16_t *ptr_12 = &us_6; + uint64_t uli_13 = 2; + uint16_t **ptr_15 = &ptr_12; + uli_11 = us_6 /= 0 | *ptr_8 <= 0 && 0 != (uc_4 = 10) << 0; + uli_13 = (0 >= 0) / (**ptr_15 = *ptr_12 = uc_5) && + 0 != (uli_11 <<= 1020429) * uli_11 + ?: ui_3 < 0; + **ptr_15 = 0 & uc_4 || uli_13 != 0 > 0; + for (uc_4; uc_4 <= 3; uc_4 += 2) + ; + } + for (3; 3 <= 0; 3) + ; + } +} + +int main() { + int64_t res = fn1(6); + printf("%d\n", res); +} diff --git a/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/test.cfg b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfd783d51c73c319ed8c9f8d161f2083af99fe63 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0099-CGCFG-skip-succ/test.cfg @@ -0,0 +1,2 @@ +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/main.c b/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/main.c new file mode 100644 index 0000000000000000000000000000000000000000..2c07e60d9ea03617d2b4fd3439944c21637ea42b --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/main.c @@ -0,0 +1,136 @@ +#include +#include + +struct StructReturnTypeOneInt { + int a1; +}; + +struct StructReturnTypeOneInt FuncTestStructReturnTypeOneInt(void); + +__attribute__((noinline)) int FuncTestStructReturnTypeOneInt1() { + int res = FuncTestStructReturnTypeOneInt().a1; + return res; +} + +__attribute__((noinline)) int FuncTestStructReturnTypeOneInt2() { + struct StructReturnTypeOneInt res = FuncTestStructReturnTypeOneInt(); + return res.a1; +} + +struct StructReturnTypeTwoInt { + int a1, a2; +}; + +struct StructReturnTypeTwoInt FuncTestStructReturnTypeTwoInt(void); + +__attribute__((noinline)) int FuncTestStructReturnTypeTwoInt1() { + int res = FuncTestStructReturnTypeTwoInt().a1; + return res; +} + +__attribute__((noinline)) int FuncTestStructReturnTypeTwoInt2() { + struct StructReturnTypeTwoInt res = FuncTestStructReturnTypeTwoInt(); + return res.a1; +} + +struct StructReturnTypeThreeInt { + int a1, a2, a3; +}; + +struct StructReturnTypeThreeInt FuncTestStructReturnTypeThreeInt(void); + +__attribute__((noinline)) int FuncTestStructReturnTypeThreeInt1() { + int res = FuncTestStructReturnTypeThreeInt().a1; + return res; +} + +__attribute__((noinline)) int FuncTestStructReturnTypeThreeInt2() { + struct StructReturnTypeThreeInt res = FuncTestStructReturnTypeThreeInt(); + return res.a1; +} + +struct StructReturnTypeFourInt { + int a1, a2, a3, a4; +}; + +struct StructReturnTypeFourInt FuncTestStructReturnTypeFourInt(void); + +__attribute__((noinline)) int FuncTestStructReturnTypeFourInt1() { + int res = FuncTestStructReturnTypeFourInt().a1; + return res; +} + +__attribute__((noinline)) int FuncTestStructReturnTypeFourInt2() { + struct StructReturnTypeFourInt res = FuncTestStructReturnTypeFourInt(); + return res.a1; +} + +struct StructReturnTypeOneFloat { + float a1; +}; + +struct StructReturnTypeOneFloat FuncTestStructReturnTypeOneFloat(void); + +__attribute__((noinline)) float FuncTestStructReturnTypeOneFloat1() { + float res = FuncTestStructReturnTypeOneFloat().a1; + return res; +} + +__attribute__((noinline)) float FuncTestStructReturnTypeOneFloat2() { + struct StructReturnTypeOneFloat res = FuncTestStructReturnTypeOneFloat(); + return res.a1; +} + +struct StructReturnTypeTwoFloat { + float a1, a2; +}; + +struct StructReturnTypeTwoFloat FuncTestStructReturnTypeTwoFloat(void); + +__attribute__((noinline)) float FuncTestStructReturnTypeTwoFloat1() { + float res = FuncTestStructReturnTypeTwoFloat().a1; + return res; +} + +__attribute__((noinline)) float FuncTestStructReturnTypeTwoFloat2() { + struct StructReturnTypeTwoFloat res = FuncTestStructReturnTypeTwoFloat(); + return res.a1; +} + +struct StructReturnTypeThreeFloat { + float a1, a2, a3; +}; + +struct StructReturnTypeThreeFloat FuncTestStructReturnTypeThreeFloat(void); + +__attribute__((noinline)) float FuncTestStructReturnTypeThreeFloat1() { + float res = FuncTestStructReturnTypeThreeFloat().a1; + return res; +} + +__attribute__((noinline)) float FuncTestStructReturnTypeThreeFloat2() { + struct StructReturnTypeThreeFloat res = FuncTestStructReturnTypeThreeFloat(); + return res.a1; +} + +struct StructReturnTypeFourFloat { + float a1, a2, a3, a4; +}; + +struct StructReturnTypeFourFloat FuncTestStructReturnTypeFourFloat(void); + +__attribute__((noinline)) float FuncTestStructReturnTypeFourFloat1() { + float res = FuncTestStructReturnTypeFourFloat().a1; + return res; +} + +__attribute__((noinline)) float FuncTestStructReturnTypeFourFloat2() { + struct StructReturnTypeFourFloat res = FuncTestStructReturnTypeFourFloat(); + return res.a1; +} + +int main() { + return 0; +} + +// CHECK-NOT: __stack_chk_guard \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/test.cfg b/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..711b40aa0d96db085cb17beebbe19e86d038e95e --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0099-UnuseStackChkGuard/test.cfg @@ -0,0 +1,2 @@ +${MAPLE_BUILD_OUTPUT}/bin/maple -S --stack-protector-strong main.c -o main.s +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/expected.txt b/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/main.c b/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/main.c new file mode 100644 index 0000000000000000000000000000000000000000..d74abfea979bbb2a8cfd133068dc9c28ad81fafe --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/main.c @@ -0,0 +1,21 @@ +struct TbmTblOpStat { + int a; + int b; +}; + +struct TbmRoot { + int tbmOpCnt[100]; + int tbmOpFailCnt[100]; +}; + +int func(struct TbmRoot *tbmRoot, num) { + for (int i = 0; i < num; ++i) { + memset(&tbmRoot->tbmOpCnt[i], 0, 10); + memset(&tbmRoot->tbmOpFailCnt[i], 0, 10); + } + return 0; +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/test.cfg b/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfd783d51c73c319ed8c9f8d161f2083af99fe63 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0100-objsize-cycle-phi/test.cfg @@ -0,0 +1,2 @@ +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/expected.txt b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e7ddc021794d8eccd94721fb2a4b7104d14f178d --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/expected.txt @@ -0,0 +1,49 @@ +=============func1============= +1 +0 +0 +0 +0 +1 +=============func2============= +1 +0 +0 +0 +0 +0 +=============func3============= +1 +0 +0 +0 +1 +1 +=============func4============= +1 +0 +0 +0 +1 +1 +=============func5============= +1 +1 +0 +0 +1 +1 +=============func6============= +1 +1 +1 +0 +0 +1 +=============func7============= +1 +1 +1 +0 +0 +0 diff --git a/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/main.c b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/main.c new file mode 100644 index 0000000000000000000000000000000000000000..a4385ab5dd08632f233a4a9042d3ded7631c2ec7 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/main.c @@ -0,0 +1,133 @@ +struct A { + int a; + int b; + short c; + short d; + short e; +}; + +__attribute__((noinline)) +int func1(struct A *aa1, struct A *aa2) { + // CHECK-NOT: and + // CHECK: cmp + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->a == aa2->a && aa1->b == aa2->b && aa1->c == aa2->c && aa1->d == aa2->d); +} + +__attribute__((noinline)) +int func2(struct A *aa1, struct A *aa2) { + // CHECK-NOT: and + // CHECK: cmp + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->a == aa2->a && aa1->b == aa2->b && aa1->c == aa2->c && aa1->d == aa2->d && aa1->e == aa2->e); +} + +__attribute__((noinline)) +int func3(struct A *aa1, struct A *aa2) { + // CHECK-NOT: and + // CHECK: cmp + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->a == aa2->a && aa1->b == aa2->b && aa1->c == aa2->c); +} + +__attribute__((noinline)) +int func4(struct A *aa1, struct A *aa2) { + // CHECK-NOT: and + // CHECK: cmp + // CHECK: cmp + // CHECK: cmp + return (aa1->b == aa2->b && aa1->a == aa2->a && aa1->c == aa2->c); +} + +__attribute__((noinline)) +int func5(struct A *aa1, struct A *aa2) { + // CHECK: and + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->b == aa2->b && aa1->c == aa2->c); +} + +__attribute__((noinline)) +int func6(struct A *aa1, struct A *aa2) { + // CHECK-NOT: and + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->c == aa2->c && aa1->d == aa2->d); +} + +__attribute__((noinline)) +int func7(struct A *aa1, struct A *aa2) { + // CHECK: and + // CHECK: cmp + // CHECK-NOT: cmp + return (aa1->c == aa2->c && aa1->d == aa2->d && aa1->e == aa2->e); +} + +int main() { + struct A aa1 = {1,1,1,1,1}; + struct A aa2 = {1,1,1,1,1}; + struct A aa3 = {1,1,1,1,1}; + struct A aa4 = {0,1,1,1,1}; + struct A aa5 = {1,1,1,1,1}; + struct A aa6 = {1,0,1,1,1}; + struct A aa7 = {1,1,1,1,1}; + struct A aa8 = {1,1,0,1,1}; + struct A aa9 = {1,1,1,1,1}; + struct A aa10 = {1,1,1,0,1}; + struct A aa11 = {1,1,1,1,1}; + struct A aa12 = {1,1,1,1,0}; + + printf("=============func1=============\n"); + printf("%d\n", func1(&aa1, &aa2)); + printf("%d\n", func1(&aa3, &aa4)); + printf("%d\n", func1(&aa5, &aa6)); + printf("%d\n", func1(&aa7, &aa8)); + printf("%d\n", func1(&aa9, &aa10)); + printf("%d\n", func1(&aa11, &aa12)); + printf("=============func2=============\n"); + printf("%d\n", func2(&aa1, &aa2)); + printf("%d\n", func2(&aa3, &aa4)); + printf("%d\n", func2(&aa5, &aa6)); + printf("%d\n", func2(&aa7, &aa8)); + printf("%d\n", func2(&aa9, &aa10)); + printf("%d\n", func2(&aa11, &aa12)); + printf("=============func3=============\n"); + printf("%d\n", func3(&aa1, &aa2)); + printf("%d\n", func3(&aa3, &aa4)); + printf("%d\n", func3(&aa5, &aa6)); + printf("%d\n", func3(&aa7, &aa8)); + printf("%d\n", func3(&aa9, &aa10)); + printf("%d\n", func3(&aa11, &aa12)); + printf("=============func4=============\n"); + printf("%d\n", func4(&aa1, &aa2)); + printf("%d\n", func4(&aa3, &aa4)); + printf("%d\n", func4(&aa5, &aa6)); + printf("%d\n", func4(&aa7, &aa8)); + printf("%d\n", func4(&aa9, &aa10)); + printf("%d\n", func4(&aa11, &aa12)); + printf("=============func5=============\n"); + printf("%d\n", func5(&aa1, &aa2)); + printf("%d\n", func5(&aa3, &aa4)); + printf("%d\n", func5(&aa5, &aa6)); + printf("%d\n", func5(&aa7, &aa8)); + printf("%d\n", func5(&aa9, &aa10)); + printf("%d\n", func5(&aa11, &aa12)); + printf("=============func6=============\n"); + printf("%d\n", func6(&aa1, &aa2)); + printf("%d\n", func6(&aa3, &aa4)); + printf("%d\n", func6(&aa5, &aa6)); + printf("%d\n", func6(&aa7, &aa8)); + printf("%d\n", func6(&aa9, &aa10)); + printf("%d\n", func6(&aa11, &aa12)); + printf("=============func7=============\n"); + printf("%d\n", func7(&aa1, &aa2)); + printf("%d\n", func7(&aa3, &aa4)); + printf("%d\n", func7(&aa5, &aa6)); + printf("%d\n", func7(&aa7, &aa8)); + printf("%d\n", func7(&aa9, &aa10)); + printf("%d\n", func7(&aa11, &aa12)); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/test.cfg b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fcd98120d95384a03f974279024e1221cc2993ab --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0101-MergeAdjacentIread/test.cfg @@ -0,0 +1,4 @@ +FORTIFY_O2: +compile(main) +cat main.s | ${OUT_ROOT}/tools/bin/FileCheck main.c +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/expected.txt b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/expected.txt @@ -0,0 +1 @@ +0 diff --git a/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/main.c b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/main.c new file mode 100644 index 0000000000000000000000000000000000000000..f4b5600121f2df2587b13373b9e451cbf5a41c13 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/main.c @@ -0,0 +1,22 @@ +#include +long long a; +short b, c; +int d = 9; +long e; +char f; +void g(long long *p1, int i) { *p1 = i; } +void fn2(short); +int main() { + fn2(80); + g(&a, d); + printf("%llu\n", a); +} +void fn2(short p1) { + if (p1) { + if (f) + b = 0; + if (c ? p1 : 6) + d = 0; + (_Bool) f || e; + } +} diff --git a/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/test.cfg b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfd783d51c73c319ed8c9f8d161f2083af99fe63 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0102-optimizeCFG-updatessa/test.cfg @@ -0,0 +1,2 @@ +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/expected.txt b/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/main.c b/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/main.c new file mode 100644 index 0000000000000000000000000000000000000000..384a06b803e196b37cbec0cefa55d888e58986e5 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/main.c @@ -0,0 +1,12 @@ +#include + +int a; +char b; + +void c(long d) { + a = b ? (unsigned)d : 30786325577728; +} + +int main() { + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/test.cfg b/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..038ee2e22748811f9a7a772ec5b5e66b7c631f32 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0103-MovImmVerification/test.cfg @@ -0,0 +1,3 @@ +COS,CO2: +compile(main) +run(main) \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/main.c b/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/main.c new file mode 100644 index 0000000000000000000000000000000000000000..facca9fdecd0e2a9f828071232a09aa759af4a9f --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/main.c @@ -0,0 +1,55 @@ +#include +uint32_t ui_0 = 0; +uint64_t u3 = 3; +uint64_t u4 = 5; +uint8_t u5 = 5; +uint32_t s8 = 6; +int64_t l9 = 1; +int64_t foo() { + uint64_t *p_10 = &u4; + for (*p_10 = 9; *p_10 <= 0; *p_10++) { + uint8_t uc_15 = 1; + uint32_t *p_17 = &s8; + for (uc_15 = 3; uc_15 <= 3; uc_15 = 3) { + for (*p_17 = 8; *p_17 <= 0; *p_17 = 4) + ; + uint32_t ui_20 = 0; + for (u3 = 7; u3 <= 4; u3 = 4) + for (ui_20 = 3; ui_20 <= 0; ui_20 = 3) + ; + labelA:; + } + } + for (u3 = 1; u3 <= 8; u3 = 3) { + int16_t s_12 = 0; + int8_t c_13 = 3; + for (c_13 = 5; l9 <= 6; l9 = 5) { + uint16_t us_16 = 2; + for (*p_10 = 1; *p_10 <= 70; u5 = 2) { + uint32_t *p_18 = &ui_0; + uint32_t **p_20 = &p_18; + for (**p_20 = 2; **p_20 <= 5; **p_20 = 5) + goto labelA; + } + for (us_16 = 7; us_16 <= 1; us_16 = 4) + for (s_12 = 1; s_12 <= 9; s_12++) + ; + } + } +} + +struct a { + signed b; +}; +int16_t baz(signed x, int16_t y); +void bar(struct a d) { + int16_t e[5]; + int f; + for (f = 0; f < 5; f++) + e[f] = baz(d.b, e[1]); + for (;;) + ; +} + +int main() {} + diff --git a/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/test.cfg b/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..401c8f039b640f30f89aa36d8f6c71c6479caf26 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0104-dead-loop-exit-bb/test.cfg @@ -0,0 +1 @@ +compile(APP=main.c,OPTION="-O2 -fstack-protector-strong -S") diff --git a/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/expected.txt b/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/main.c b/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/main.c new file mode 100644 index 0000000000000000000000000000000000000000..c1001b3ea4c3357918c5c0c49d0e0216bb03887a --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/main.c @@ -0,0 +1,67 @@ +int i = 0; + +// test iread +void __attribute__ ((noinline)) iread(int *a, int *b, int cond) { + if (cond) { + i = *a; + } else { + i = *b; + } + if (cond && i != *a) { + abort(); + } +} + +// test conststr +void __attribute__ ((noinline)) conststr(int cond) { + char *s; + if (cond) { + s = "hello"; + } else { + s = "wello"; + } + if (cond && s[0] != 'h') { + abort(); + } +} + +// test addrof func +void __attribute__ ((noinline)) addroffunc(int cond) { + void *func = 0; + if (cond) { + func = &iread; + } else { + func = &conststr; + } + if (cond && func != &iread) { + abort(); + } +} + +// test addrof label +void __attribute__ ((noinline)) addroflabel(int cond) { + void *label = 0; + int t = 0; + l1: + t = 1; + l2: + t = 2; + if (cond) { + label = &&l1; + } else { + label = &&l2; + } + i = *(int *)label; + if (cond && label != &&l1) { + abort(); + } +} + +int main() { + int a = 1; + int b = 2; + iread(&a,&b,1); + conststr(1); + addroffunc(1); + addroflabel(1); +} diff --git a/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/test.cfg b/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bfd783d51c73c319ed8c9f8d161f2083af99fe63 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0105-IsSameContent-check/test.cfg @@ -0,0 +1,2 @@ +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/expected.txt b/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/main.c b/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/main.c new file mode 100644 index 0000000000000000000000000000000000000000..020a0665331d420c31142bc5d12c0c4397ec9dd5 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/main.c @@ -0,0 +1,15 @@ +struct A { + int f1; + long f2; + long f3[100]; + long f4; + long f5; +}; + +int main() { + static struct A a = {1234567}; + if (a.f1 != 1234567) { + abort(); + } + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/test.cfg b/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0105-sra-pustatic/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/main.c b/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/main.c new file mode 100644 index 0000000000000000000000000000000000000000..172965acae77e302d9a98ca59b8e2b28bc5a2d27 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/main.c @@ -0,0 +1,27 @@ +#include "csmith.h" + +struct a { + uint16_t c; + uint64_t d; + int64_t e; + int8_t f; + uint64_t g; + uint64_t h; +}; + +struct { + uint32_t b; + struct a i; +} k[1][9][6]; + +int32_t j[][1]; + +int32_t m(l) { + int32_t *n = &j[2][6]; + *n = safe_mul_func_int8_t_s_s(l, k[1][4][2].b); + return 0; +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/test.cfg b/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..b909c24a90a5dbb0083fc96d284515c0854d8cdb --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-ebo-verify-imm/test.cfg @@ -0,0 +1 @@ +compileWithCsmith(APP=main.c, OPTION="-O2 -fPIE") diff --git a/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/expected.txt b/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/main.c b/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/main.c new file mode 100644 index 0000000000000000000000000000000000000000..46d0f87ad5de145b5a1a0080a96c83120903a14d --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/main.c @@ -0,0 +1,21 @@ +#include + +static inline void SkipOverflow(long long *a, long long *b, int len) { + unsigned int i; + for (i = (INT_MAX - (len)); i < INT_MAX; i++) { + if ((i - (INT_MAX - (len))) > 32) { + a[(i - (INT_MAX - (len)))] = a[(i - (INT_MAX - (len)))-1] - 1; + } else { + b[(i - (INT_MAX - (len)))] -= (i - (INT_MAX - (len))); + } + } +} + +long long a[40]; +long long b[32]; + +int main() { + a[32] = 9; + SkipOverflow(a, b, 40); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/test.cfg b/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-ivopts-negsubscript/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/expected.txt b/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/main.c b/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/main.c new file mode 100644 index 0000000000000000000000000000000000000000..8a83ab2688a204505aa727db8dbadebee4453de3 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/main.c @@ -0,0 +1,16 @@ +#include "csmith.h" +struct { + signed a; +} b[]; +struct { + int16_t c; +} d[][1][1]; +struct { + signed e; +} f; +void g(uint32_t h) { + for (;; h++) + if (h <= f.e) + b[1].a = safe_lshift_func_uint8_t_u_u(0, d[4][2][1].c) < h || 0; +} +int main() {} diff --git a/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/test.cfg b/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0106-vrp-same-primtype/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/expected.txt b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ad96d51599fb734101f6229f6c1a8a509bd6255 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/expected.txt @@ -0,0 +1 @@ +default diff --git a/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/main.c b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/main.c new file mode 100644 index 0000000000000000000000000000000000000000..ba3fd65e7b38859014d4eab0f2915aca65fa1dbd --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/main.c @@ -0,0 +1,19 @@ +#include + +int main() { + switch((_Bool)1) { + case (signed short int) 10489: + printf("10489\n"); + break; + case (signed short int) -16267: + printf("-16267\n"); + break; + case (signed short int) -2268: + printf("-2268\n"); + break; + default: + printf("default\n"); + break; + } + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/test.cfg b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0107-vrp-switch-bool/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/expected.txt b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..7ed6ff82de6bcc2a78243fc9c54d3ef5ac14da69 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/expected.txt @@ -0,0 +1 @@ +5 diff --git a/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/main.c b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/main.c new file mode 100644 index 0000000000000000000000000000000000000000..a816ef46ae399d1da97c9ced185cb426f82894da --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/main.c @@ -0,0 +1,26 @@ +#include +struct { + int32_t a; +} b; +int64_t c, e; +uint64_t **d; +int32_t *f = &b.a; +int8_t g() { + int32_t h; + int16_t k[2][1]; + int i, j; + i = j = 0; + k[i][j] = 0; + uint8_t l = 4; + for (c = 5; c <= 4;) { + **d = h; + if (l) + break; + } + *f = e == e && c; + return k[0][0]; +} +int main() { + g(); + printf("%d\n", c); +} diff --git a/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/test.cfg b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0108-vrp-delete-stmt/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/main.c b/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/main.c new file mode 100644 index 0000000000000000000000000000000000000000..4e773688ec8505e8a49d65510ca5ab4ab07ee3de --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/main.c @@ -0,0 +1,16 @@ +#include + +char a() { + int b = 0; + return b; +} + +int *c(int b, int d, int g, int e) { + int *f[6]; + return f[3219]; +} + + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/test.cfg b/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..2391600316de78ed19e30293dfa1a38425bc6e72 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0109-reach-def-for-undef-behavior/test.cfg @@ -0,0 +1 @@ +compile(APP=main.c, OPTION="-O2") diff --git a/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/global_vars.h b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/global_vars.h new file mode 100644 index 0000000000000000000000000000000000000000..a7db4abe8c6e990c9e54f391bd051d547835fa93 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/global_vars.h @@ -0,0 +1,89 @@ +#ifndef __global_vars_h__ +#define __global_vars_h__ + +static signed short int *global_pointer_signed_short_int_1 = NULL; +static struct struct0 *global_pointer_struct_struct0_1 = NULL; +static struct struct0 global_struct_struct0_1; +static signed short int global_signed_short_int_1 = /* LITERAL */ (signed short int) 8388; +static signed int global_array_5_signed_int_1[5]; +static double global_double_1 = /* LITERAL */ (double) 1.6573567494374555e+308; +static bool global_bool_1 = false; +static unsigned long long int **global_pointer_pointer_unsigned_long_long_int_1 = NULL; +static struct struct1 global_struct_struct1_1; +static struct struct1 **global_pointer_pointer_struct_struct1_1 = NULL; +static signed int global_signed_int_1 = /* LITERAL */ (signed int) 1890803674; +static signed long int global_array_2_signed_long_int_1[2]; +static unsigned char global_unsigned_char_1 = 182U; +static signed int *global_pointer_signed_int_1 = NULL; +static unsigned long int global_unsigned_long_int_1 = 3583235821UL; +static signed long long int (*global_pointer_array_5_signed_long_long_int_1)[5] = NULL; +static signed long int global_signed_long_int_1 = -807071461L; +static signed long int global_signed_long_int_2 = 691940590L; +static bool (*global_pointer_array_1_bool_1)[1] = NULL; +static struct struct1 *global_pointer_struct_struct1_1 = NULL; +static unsigned short int global_unsigned_short_int_1 = 61585U; +static bool global_array_6_bool_1[6]; +static signed char global_signed_char_1 = /* LITERAL */ (signed char) -3; +static signed char global_signed_char_2 = /* LITERAL */ (signed char) -43; +static signed long long int global_signed_long_long_int_1 = 592797371024599937LL; +static float ***global_pointer_pointer_pointer_float_1 = NULL; +static signed short int **global_pointer_pointer_signed_short_int_1 = NULL; +static struct struct2 *global_array_8_pointer_struct_struct2_1[8]; +static struct struct3 *global_array_5_pointer_struct_struct3_1[5]; +static unsigned char **global_pointer_pointer_unsigned_char_1 = NULL; +static unsigned int global_unsigned_int_1 = 4123212846U; +static unsigned char *global_pointer_unsigned_char_1 = "tkMChf615Lw1Jcz90rjrTTDLKT"; +static unsigned long long int global_unsigned_long_long_int_1 = 12555378492034730318ULL; +static unsigned long long int global_unsigned_long_long_int_2 = 4747384395032661624ULL; +static struct struct2 global_struct_struct2_1; +static float **global_pointer_pointer_float_1 = NULL; +static unsigned char global_array_8_unsigned_char_1[8]; +static struct struct6 *global_pointer_struct_struct6_1 = NULL; +static unsigned long int *global_array_9_pointer_unsigned_long_int_1[9]; +static float *global_pointer_float_1 = NULL; +static struct struct2 *global_pointer_struct_struct2_1 = NULL; +static unsigned char (**global_pointer_pointer_array_8_unsigned_char_1)[8] = NULL; +static unsigned long int *global_pointer_unsigned_long_int_1 = NULL; +static unsigned long int global_array_6_unsigned_long_int_1[6]; +static float global_float_1 = /* LITERAL */ (float) 3.3060160755169525e+38; +static signed char *global_pointer_signed_char_1 = "ymo2SSl0SzUao"; +static unsigned char **global_array_1_pointer_pointer_unsigned_char_1[1]; +static unsigned int global_array_3_unsigned_int_1[3] = { 4280423329U, 4221351715U, 693396179U }; +static struct struct5 global_struct_struct5_1; +static struct struct3 *global_pointer_struct_struct3_1 = NULL; +static signed char global_array_6_signed_char_1[6]; +static struct struct0 global_array_9_struct_struct0_1[9]; +static struct struct9 *global_pointer_struct_struct9_1 = NULL; +static struct struct10 *global_pointer_struct_struct10_1 = NULL; +static double *global_pointer_double_1 = NULL; +static signed long int *global_pointer_signed_long_int_1 = NULL; +static unsigned short int (*global_pointer_array_6_unsigned_short_int_1)[6] = NULL; +static unsigned char (*global_pointer_array_2_unsigned_char_1)[2] = NULL; +static signed char (*global_pointer_array_8_signed_char_1)[8] = NULL; +static bool *global_pointer_bool_1 = NULL; +static struct struct7 global_struct_struct7_1 = { .pointer_double_0 = NULL }; +static struct struct6 **global_pointer_pointer_struct_struct6_1 = NULL; +static struct struct0 **global_pointer_pointer_struct_struct0_1 = NULL; +static signed long int global_array_4_signed_long_int_1[4]; +static struct struct11 global_struct_struct11_1; +static unsigned short int (*global_pointer_array_3_unsigned_short_int_1)[3] = NULL; +static unsigned int *global_pointer_unsigned_int_1 = NULL; +static struct struct12 global_struct_struct12_1; +static float global_array_8_float_1[8]; +static bool global_array_5_bool_1[5]; +static signed int *(*global_pointer_array_9_pointer_signed_int_1)[9] = NULL; +static struct struct0 *global_array_2_pointer_struct_struct0_1[2]; +static unsigned char global_array_1_unsigned_char_1[1]; +static unsigned long int global_array_5_unsigned_long_int_1[5] = { 3674003780UL, 499859140UL, 2852414259UL, 1924627370UL, 1355005978UL }; +static struct struct12 *global_pointer_struct_struct12_1 = NULL; +static struct struct14 global_array_5_struct_struct14_1[5]; +static struct struct6 global_struct_struct6_1 = {.pointer_signed_short_int_0 = NULL, .pointer_struct_struct0_1 = NULL, + .array_4_signed_long_int_2 = { -64516496L, 900633063L, 513604919L, 1440464860L }, + .signed_char_3 = /* LITERAL */ (signed char) 19 }; +static unsigned int global_array_3_array_3_unsigned_int_1[3][3]; +static double global_array_6_double_1[6]; +static struct struct3 global_struct_struct3_1; +static signed char global_array_9_signed_char_1[9]; +static struct struct2 (*global_pointer_array_8_struct_struct2_1)[8] = NULL; + +#endif diff --git a/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/main.c b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/main.c new file mode 100644 index 0000000000000000000000000000000000000000..6d02a433c6d62cf5a84c7098fb6c8d997192f94f --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/main.c @@ -0,0 +1,17 @@ +#define bool _Bool +#define false 0 +#define NULL 0 +#include"structs.h" +#include"prototypes.h" +#include"global_vars.h" + +long a() { + struct struct2 *b; + global_struct_struct3_1.struct_struct12_6.bool_2 = 0; + return func36(global_unsigned_int_1, (*b).pointer_pointer_unsigned_long_long_int_7); +} + +int main() { + a(); + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/prototypes.h b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/prototypes.h new file mode 100644 index 0000000000000000000000000000000000000000..5d5d922cd301bb802611f2985e0c26f3704ba60e --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/prototypes.h @@ -0,0 +1,255 @@ +#ifndef __prototypes_h__ +#define __prototypes_h__ + +/* + * return type: unsigned long int + * param_0 type: float + */ +unsigned long int func0(float param_0); +/* + * return type: bool + * param_0 type: pointer unsigned char + * param_1 type: array 4 signed long int + * param_2 type: float + */ +bool func1(unsigned char *param_0, signed long int param_1[4], float param_2); +/* + * return type: unsigned char + * param_0 type: unsigned long int + */ +unsigned char func2(unsigned long int param_0); +/* + * return type: unsigned char + */ +unsigned char func3(); +/* + * return type: pointer struct struct0 + * param_0 type: unsigned char + */ +struct struct0 * func4(unsigned char param_0); +/* + * return type: array 1 double + * param_0 type: unsigned int + */ +double * func5(unsigned int param_0); +/* + * return type: unsigned short int + * param_0 type: pointer float + * param_1 type: signed char + * param_2 type: signed short int + */ +unsigned short int func6(float *param_0, signed char param_1, signed short int param_2); +/* + * return type: struct struct0 + * param_0 type: double + * param_1 type: unsigned long int + * param_2 type: signed short int + */ +struct struct0 func7(double param_0, unsigned long int param_1, signed short int param_2); +/* + * return type: signed short int + */ +signed short int func8(); +/* + * return type: double + * param_0 type: array 8 signed char + * param_1 type: signed long int + * param_2 type: pointer bool + * param_3 type: unsigned char + */ +double func9(signed char param_0[8], signed long int param_1, bool *param_2, unsigned char param_3); +/* + * return type: signed long int + * param_0 type: float + * param_1 type: unsigned short int + * param_2 type: signed short int + */ +signed long int func10(float param_0, unsigned short int param_1, signed short int param_2); +/* + * return type: pointer unsigned char + * param_0 type: signed char + * param_1 type: signed long long int + * param_2 type: signed char + */ +unsigned char * func11(signed char param_0, signed long long int param_1, signed char param_2); +/* + * return type: float + * param_0 type: double + * param_1 type: signed char + * param_2 type: unsigned short int + * param_3 type: unsigned int + * param_4 type: double + * param_5 type: double + */ +float func12(double param_0, signed char param_1, unsigned short int param_2, unsigned int param_3, double param_4, double param_5); +/* + * return type: signed long long int + * param_0 type: signed long int + * param_1 type: unsigned int + * param_2 type: unsigned int + * param_3 type: unsigned char + */ +signed long long int func13(signed long int param_0, unsigned int param_1, unsigned int param_2, unsigned char param_3); +/* + * return type: array 1 bool + * param_0 type: double + */ +bool * func14(double param_0); +/* + * return type: void + * param_0 type: signed long long int + * param_1 type: bool + * param_2 type: signed int + * param_3 type: pointer pointer float + */ +void func15(signed long long int param_0, bool param_1, signed int param_2, float **param_3); +/* + * return type: signed int + * param_0 type: double + * param_1 type: unsigned int + * param_2 type: signed long int + * param_3 type: array 6 bool + */ +signed int func16(double param_0, unsigned int param_1, signed long int param_2, bool param_3[6]); +/* + * return type: unsigned int + * param_0 type: unsigned char + */ +unsigned int func17(unsigned char param_0); +/* + * return type: array 8 signed char + * param_0 type: signed long long int + */ +signed char * func18(signed long long int param_0); +/* + * return type: array 4 unsigned long int + * param_0 type: array 8 unsigned char + * param_1 type: unsigned char + */ +unsigned long int * func19(unsigned char param_0[8], unsigned char param_1); +/* + * return type: signed char + * param_0 type: signed short int + * param_1 type: double + * param_2 type: signed char + * param_3 type: pointer unsigned int + */ +signed char func20(signed short int param_0, double param_1, signed char param_2, unsigned int *param_3); +/* + * return type: array 4 unsigned long int + * param_0 type: unsigned int + * param_1 type: unsigned long int + * param_2 type: unsigned long long int + * param_3 type: float + */ +unsigned long int * func21(unsigned int param_0, unsigned long int param_1, unsigned long long int param_2, float param_3); +/* + * return type: unsigned long long int + * param_0 type: signed long long int + * param_1 type: signed long long int + * param_2 type: unsigned short int + * param_3 type: unsigned long int + */ +unsigned long long int func22(signed long long int param_0, signed long long int param_1, unsigned short int param_2, unsigned long int param_3); +/* + * return type: pointer pointer unsigned short int + * param_0 type: unsigned long int + * param_1 type: unsigned long long int + * param_2 type: pointer signed char + */ +unsigned short int ** func23(unsigned long int param_0, unsigned long long int param_1, signed char *param_2); +/* + * return type: array 9 signed short int + * param_0 type: signed long int + * param_1 type: unsigned long int + * param_2 type: pointer float + * param_3 type: signed long long int + */ +signed short int * func24(signed long int param_0, unsigned long int param_1, float *param_2, signed long long int param_3); +/* + * return type: pointer pointer float + * param_0 type: signed char + * param_1 type: signed short int + */ +float ** func25(signed char param_0, signed short int param_1); +/* + * return type: pointer unsigned char + * param_0 type: struct struct8 + * param_1 type: float + * param_2 type: float + * param_3 type: struct struct1 + */ +unsigned char * func26(struct struct8 param_0, float param_1, float param_2, struct struct1 param_3); +/* + * return type: struct struct1 + */ +struct struct1 func27(); +/* + * return type: array 6 bool + * param_0 type: unsigned char + * param_1 type: unsigned char + * param_2 type: signed short int + */ +bool * func28(unsigned char param_0, unsigned char param_1, signed short int param_2); +/* + * return type: signed short int + * param_0 type: unsigned int + * param_1 type: signed long int + */ +signed short int func29(unsigned int param_0, signed long int param_1); +/* + * return type: array 4 signed long int + * param_0 type: signed long int + * param_1 type: double + */ +signed long int * func30(signed long int param_0, double param_1); +/* + * return type: struct struct6 + * param_0 type: signed int + * param_1 type: pointer unsigned char + * param_2 type: signed short int + * param_3 type: float + * param_4 type: double + * param_5 type: struct struct6 + */ +struct struct6 func31(signed int param_0, unsigned char *param_1, signed short int param_2, float param_3, double param_4, struct struct6 param_5); +/* + * return type: struct struct12 + * param_0 type: unsigned char + */ +struct struct12 func32(unsigned char param_0); +/* + * return type: pointer signed long int + * param_0 type: unsigned long long int + */ +signed long int * func33(unsigned long long int param_0); +/* + * return type: pointer unsigned long int + * param_0 type: unsigned long int + * param_1 type: signed int + * param_2 type: unsigned long int + * param_3 type: signed int + * param_4 type: unsigned short int + */ +unsigned long int * func34(unsigned long int param_0, signed int param_1, unsigned long int param_2, signed int param_3, unsigned short int param_4); +/* + * return type: pointer unsigned int + * param_0 type: bool + * param_1 type: unsigned long int + */ +unsigned int * func35(bool param_0, unsigned long int param_1); +/* + * return type: unsigned long int + * param_0 type: unsigned int + * param_1 type: pointer pointer unsigned long long int + */ +unsigned long int func36(unsigned int param_0, unsigned long long int **param_1); +/* + * return type: pointer float + * param_0 type: unsigned long long int + * param_1 type: double + * param_2 type: unsigned int + */ +float * func37(unsigned long long int param_0, double param_1, unsigned int param_2); + +#endif diff --git a/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/structs.h b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/structs.h new file mode 100644 index 0000000000000000000000000000000000000000..c04e42378db94681045bd0a700d8a474a16c927d --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/structs.h @@ -0,0 +1,92 @@ +#ifndef __structs_h__ +#define __structs_h__ + +struct struct0 { + bool bool_0; + float float_1; + bool *pointer_bool_2; + signed int signed_int_3; + signed long int signed_long_int_4; + signed int *array_8_pointer_signed_int_5[8]; + unsigned long int unsigned_long_int_6; + float **pointer_pointer_float_7; + unsigned int *(*pointer_array_4_pointer_unsigned_int_8)[4]; +}; +struct struct2 { + signed long long int signed_long_long_int_0; + unsigned char unsigned_char_1; + unsigned short int unsigned_short_int_2; + float *pointer_float_3; + unsigned char (*pointer_array_9_unsigned_char_4)[9]; + signed char *pointer_signed_char_5; + signed short int signed_short_int_6; + unsigned long long int **pointer_pointer_unsigned_long_long_int_7; +}; +struct struct5 { + bool array_6_bool_0[6]; + unsigned int unsigned_int_1; +}; +struct struct6 { + signed short int *pointer_signed_short_int_0; + struct struct0 *pointer_struct_struct0_1; + signed long int array_4_signed_long_int_2[4]; + signed char signed_char_3; + unsigned long int array_3_unsigned_long_int_4[3]; +}; +struct struct7 { + double *pointer_double_0; +}; +struct struct9 { + struct struct2 *pointer_struct_struct2_0; +}; +struct struct10 { + unsigned long long int *pointer_unsigned_long_long_int_0; + signed int array_4_signed_int_1[4]; +}; +struct struct11 { + struct struct6 *pointer_struct_struct6_0; + double (*pointer_array_1_double_1)[1]; +}; +struct struct12 { + signed long long int *pointer_signed_long_long_int_0; + unsigned int *pointer_unsigned_int_1; + bool bool_2; +}; +struct struct14 { + signed char array_8_signed_char_0[8]; +}; +struct struct15 { + unsigned long long int array_4_unsigned_long_long_int_0[4]; +}; +struct struct1 { + struct struct0 struct_struct0_0; + unsigned long int *pointer_unsigned_long_int_1; + struct struct0 array_2_struct_struct0_2[2]; + unsigned char *pointer_unsigned_char_3; + double double_4; + struct struct2 struct_struct2_5; + unsigned long int array_4_unsigned_long_int_6[4]; + signed short int array_9_signed_short_int_7[9]; + bool (*pointer_array_6_bool_8)[6]; +}; +struct struct8 { + struct struct0 array_3_struct_struct0_0[3]; +}; +struct struct13 { + struct struct2 struct_struct2_0; +}; +struct struct3 { + unsigned long long int unsigned_long_long_int_0; + struct struct1 array_9_struct_struct1_1[9]; + unsigned short int **pointer_pointer_unsigned_short_int_2; + struct struct8 array_5_struct_struct8_3[5]; + struct struct6 struct_struct6_4; + struct struct0 struct_struct0_5; + struct struct12 struct_struct12_6; +}; +struct struct4 { + struct struct1 struct_struct1_0; + struct struct3 **pointer_pointer_struct_struct3_1; +}; + +#endif diff --git a/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/test.cfg b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f0543eed7908473955bf0cb46e932625ac0cde4e --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0110-prepeephole-verify-immopnd/test.cfg @@ -0,0 +1 @@ +compile(APP=main.c,OPTION="-O2 -S -freg-struct-return -ffunction-sections -fstack-protector-strong -fPIE -fno-plt -Wl,-Bsymbolic -g -fPIC -fPIE -freg-struct-return -ffunction-sections -Wl,--copy-dt-needed-entries -Wl,-z,noexecstack -fcommon -w -I structs.h -I prototypes.h -I global_vars.h") diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/AggCopyMemory.c b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/AggCopyMemory.c new file mode 100644 index 0000000000000000000000000000000000000000..c16d07fbc629b9b6443e990781d74d2e76407f31 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/AggCopyMemory.c @@ -0,0 +1,48 @@ +#include +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif +#ifndef MAP_ANON +#define MAP_ANON 0 +#endif +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +typedef struct { + unsigned char dmac[6]; + unsigned char smac[6]; + unsigned short ethType; +} HpfEthHdr; + +int test_1() { + char *p = mmap((void *)0, 131072, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) return 0; + char *endp = p + 65536; + if (munmap(endp, 65536) < 0) return 0; + + HpfEthHdr *s1 = (HpfEthHdr *)endp - 1; + HpfEthHdr s2; + HpfEthHdr *s3 = &s2; + *s3 = *s1; + + return 0; +} + +int test_2() { + char *p = mmap((void *)0, 131072, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (p == MAP_FAILED) return 0; + char *endp = p + 65536; + if (munmap(endp, 65536) < 0) return 0; + + HpfEthHdr *s1 = (HpfEthHdr *)endp; + HpfEthHdr s2; + HpfEthHdr *s3 = &s2; + *s3 = *s1; + + return 0; +} + +int main() { + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/expected.txt b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/test.cfg b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6d34a947a708180f33f83bc1d2087b39415a003f --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheck/test.cfg @@ -0,0 +1,2 @@ +compile(AggCopyMemory) +run(AggCopyMemory) diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/AggCopyMemoryPacked.c b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/AggCopyMemoryPacked.c new file mode 100644 index 0000000000000000000000000000000000000000..75ae6ec347069b8f4d16a104f901e01d41d44221 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/AggCopyMemoryPacked.c @@ -0,0 +1,22 @@ +typedef struct { + unsigned char r; + unsigned char g; + unsigned char b; +} __attribute__((packed)) S0; + +typedef struct { + unsigned char r; + S0 s0; + unsigned char b; +} __attribute__((packed)) S1; + +S1 s1 = {'a', {'b', 'c', 'd'}, 'e'}; + +int main() { + S0 *s0 = &s1.s0; + S0 tmp = {'r', 'f', 's'}; + *s0 = tmp; + printf("%c,%c,%c,%c,%c\n", s1.b, s1.r, s1.s0.b, s1.s0.g, s1.s0.r); + + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/expected.txt b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..86d425a28c48abb08684b1508798cd377bbbf7a9 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/expected.txt @@ -0,0 +1 @@ +e,a,s,f,r diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/test.cfg b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..dac010e9bc752dfe3b51d6b6cdc521f28e74e5b7 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-MemoryCheckPacked/test.cfg @@ -0,0 +1,2 @@ +compile(AggCopyMemoryPacked) +run(AggCopyMemoryPacked) diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/AggCopy.c b/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/AggCopy.c new file mode 100644 index 0000000000000000000000000000000000000000..1b2af4f95b3253e6b5dd034293441e26d5664146 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/AggCopy.c @@ -0,0 +1,167 @@ +#include + +typedef struct { + uint8_t a[31]; +} S31; + +void foo_31(S31 *dst, S31 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#16 + // CHECK-NEXT: str x{{.*}}#16 + // CHECK-NEXT: ldr x{{.*}}#23 + // CHECK-NEXT: str x{{.*}}#23 + *dst = *src; +} + +typedef struct { + uint8_t a[30]; +} S30; + +void foo_30(S30 *dst, S30 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#16 + // CHECK-NEXT: str x{{.*}}#16 + // CHECK-NEXT: ldr x{{.*}}#22 + // CHECK-NEXT: str x{{.*}}#22 + *dst = *src; +} + +typedef struct { + uint8_t a[29]; +} S29; + +void foo_29(S29 *dst, S29 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#16 + // CHECK-NEXT: str x{{.*}}#16 + // CHECK-NEXT: ldr x{{.*}}#21 + // CHECK-NEXT: str x{{.*}}#21 + *dst = *src; +} + +typedef struct { + uint8_t a[27]; +} S27; + +void foo_27(S27 *dst, S27 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#16 + // CHECK-NEXT: str x{{.*}}#16 + // CHECK-NEXT: ldr w{{.*}}#23 + // CHECK-NEXT: str w{{.*}}#23 + *dst = *src; +} + +typedef struct { + uint8_t a[23]; +} S23; + +void foo_23(S23 *dst, S23 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#15 + // CHECK-NEXT: str x{{.*}}#15 + *dst = *src; +} + +typedef struct { + uint8_t a[22]; +} S22; + +void foo_22(S22 *dst, S22 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#14 + // CHECK-NEXT: str x{{.*}}#14 + *dst = *src; +} + +typedef struct { + uint8_t a[21]; +} S21; + +void foo_21(S21 *dst, S21 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr x{{.*}}#13 + // CHECK-NEXT: str x{{.*}}#13 + *dst = *src; +} + +typedef struct { + uint8_t a[19]; +} S19; + +void foo_19(S19 *dst, S19 *src) { + // CHECK: ldp + // CHECK-NEXT: stp + // CHECK-NEXT: ldr w{{.*}}#15 + // CHECK-NEXT: str w{{.*}}#15 + *dst = *src; +} + +typedef struct { + uint8_t a[15]; +} S15; + +void foo_15(S15 *dst, S15 *src) { + // CHECK: ldr x + // CHECK-NEXT: str x + // CHECK-NEXT: ldr x{{.*}}#7 + // CHECK-NEXT: str x{{.*}}#7 + *dst = *src; +} + +typedef struct { + uint8_t a[14]; +} S14; + +void foo_14(S14 *dst, S14 *src) { + // CHECK: ldr x + // CHECK-NEXT: str x + // CHECK-NEXT: ldr x{{.*}}#6 + // CHECK-NEXT: str x{{.*}}#6 + *dst = *src; +} + +typedef struct { + uint8_t a[13]; +} S13; + +void foo_13(S13 *dst, S13 *src) { + // CHECK: ldr x + // CHECK-NEXT: str x + // CHECK-NEXT: ldr x{{.*}}#5 + // CHECK-NEXT: str x{{.*}}#5 + *dst = *src; +} + +typedef struct { + uint8_t a[11]; +} S11; + +void foo_11(S11 *dst, S11 *src) { + // CHECK: ldr x + // CHECK-NEXT: str x + // CHECK-NEXT: ldr w{{.*}}#7 + // CHECK-NEXT: str w{{.*}}#7 + *dst = *src; +} + +typedef struct { + uint8_t a[7]; +} S7; + +void foo_7(S7 *dst, S7 *src) { + // CHECK: ldr w + // CHECK-NEXT: str w + *dst = *src; +} + +int main() { + return 0; +} \ No newline at end of file diff --git a/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/test.cfg b/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..519297d6cc7cd2b51150f5138adbc0051e0fc024 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0111-AggCopy-OffsetVerification/test.cfg @@ -0,0 +1,6 @@ +CO2: +${MAPLE_BUILD_OUTPUT}/bin/maple AggCopy.c -o AggCopy.out -isystem ${MAPLE_BUILD_OUTPUT}/lib/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include -isystem ../lib/include -isystem ${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86 -O2 -fPIC -lpthread -lm ${option} --save-temps +cat AggCopy.s | ${OUT_ROOT}/tools/bin/FileCheck AggCopy.c +COS: +${MAPLE_BUILD_OUTPUT}/bin/maple AggCopy.c -o AggCopy.out -isystem ${MAPLE_BUILD_OUTPUT}/lib/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include -isystem ../lib/include -isystem ${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86 -Os -fPIC -lpthread -lm ${option} --save-temps +cat AggCopy.s | ${OUT_ROOT}/tools/bin/FileCheck AggCopy.c diff --git a/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/expected.txt b/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/expected.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/main.c b/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/main.c new file mode 100644 index 0000000000000000000000000000000000000000..d90adc6919a9ee2005a67a26a9dda214d3756476 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/main.c @@ -0,0 +1,15 @@ +int a[10]; + +int main() { + int i = 0; + a[0] = 1; + // can not compute trip count + while ((double) 8.584 >= i) { + a[i]++; + i++; + } + if (a[0] != 2) { + abort(); + } + return 0; +} diff --git a/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/test.cfg b/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/test.cfg new file mode 100644 index 0000000000000000000000000000000000000000..12276add4f3d98bc0bad2447fc23a652d1a88401 --- /dev/null +++ b/testsuite/c_test/unit_test/UNIT0112-lfo-floatcond/test.cfg @@ -0,0 +1,3 @@ +CO2: +compile(main) +run(main) diff --git a/testsuite/driver/config/aarch64-clang-debug.conf b/testsuite/driver/config/aarch64-clang-debug.conf index a8d5ac1f1b5f87b25fe4b36a3c1c1e5298150078..3d139ada4ea62d0c4a0208cb529f2d08d37c4fc0 100644 --- a/testsuite/driver/config/aarch64-clang-debug.conf +++ b/testsuite/driver/config/aarch64-clang-debug.conf @@ -1,15 +1,17 @@ [MODE_SET] java_common_mode_set: O0, O2, GC_O0, GC_O2 c_common_mode_set: CO0, CO2, CMBCO2 - spec_mode_set: SCO0_TEST, SCO2_TEST, SCO0_TRAIN, SCO2_TRAIN, SAFEO2_TEST, SAFEO2_TRAIN, SCO0_TEST_MERGE, SCO2_TEST_MERGE, SCO0_TRAIN_MERGE, SCO2_TRAIN_MERGE, SCMBCO2_TEST, SCMBCO2_TRAIN + spec_mode_set: SCO0_TEST, SCO2_TEST, SCO0_TRAIN, SCO2_TRAIN, SCOS_TRAIN, SAFEO2_TEST, SAFEO2_TRAIN, SCO0_TEST_MERGE, SCO2_TEST_MERGE, SCO0_TRAIN_MERGE, SCO2_TRAIN_MERGE, SCOS_TRAIN_MERGE, SCMBCO2_TEST, SCMBCO2_TRAIN co3_set: CO3, CO3_NOINLINE c_driver_mode_set: DRIVER [DEFAULT_TEST_SUITE] c_test/atomic_test: CO0 c_test/ast_test: ASTO0, ASTO2, ASTMBC + c_test/lto_test: LTO_TEST c_test/enhancec_test: ENCO2_N_D, ENCO2_B_D, ENCO2_S_D, ENCO2_N_D_ALL, ENCO2_B_D_A_C c_test/sanity_test: c_common_mode_set + c_test/struct_test: c_common_mode_set c_test/gtorture_test: c_common_mode_set, co3_set c_test/unit_test: CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1 c_test/noinline_test: CO2, ENCO2_N_D_NO_LINK, CMBCO2 @@ -39,6 +41,23 @@ c_test/csmith_test: CO2, COS c_test/unit_test/UNIT0064-parameter-validbit-muxed: CO2_MUXED c_test/unit_test/UNIT0066-os-merge-string-secctions: COS + c_test/unit_test/UNIT0091-HomogeneousAggregates: CO0 + c_test/unit_test/UNIT0097-ParamWithLargeStack: CO0, CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1 + c_test/unit_test/UNIT0098-ParamWithLargeStack2: CO0, CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1 + c_test/unit_test/UNIT0106-ebo-verify-imm: DRIVER + c_test/unit_test/UNIT0109-reach-def-for-undef-behavior: DRIVER + c_test/unit_test/UNIT0110-prepeephole-verify-immopnd: DRIVER + c_test/aapcs64_test: AAPCS64_O0, AAPCS64_O2 + c_test/lto_test/LTO0001-OneHeadFile: LTOASTO0 + c_test/lto_test/LTO0002-DiffCFile: LTOASTO0 + c_test/lto_test/LTO0003-DiffStruct: LTOASTO0 + c_test/lto_test/LTO0004-DiffMarco: LTOASTO0 + c_test/lto_test/LTO0005-UnionNest: LTOASTO0 + c_test/lto_test/LTO0006-ArrayLTO: LTOASTO0 + c_test/lto_test/LTO0007-UnnamedStruct: LTOASTO0 + c_test/lto_test/LTO0008-StructNest: LTOASTO0 + c_test/lto_test/LTO0009-TypedefStruct: LTOASTO0 + c_test/lto_test/LTO0010-TypeDiffRecord: LTOASTO0 [BAN_TEST_SUITE] @@ -412,4 +431,28 @@ c_test/neon_test/NEON02123-vsrid_n_u64: NEONO0 #libc_enhanced + c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic: DRIVER + c_test/driver_test/DRIVER0018-ftlsmodel-initialexec: DRIVER + #undefined reference to `inline_me' + c_test/lto_test/LTO0033-gcc.dg-materialize-1: LTO_TEST + + #O2 core dumped + c_test/lto_test/LTO0030-gcc.dg-ipacp: LTO_TEST + c_test/lto_test/LTO0031-gcc.dg-ipareference: LTO_TEST + c_test/lto_test/LTO0032-gcc.dg-ipareference2: LTO_TEST + + #O3 core dumped + c_test/lto_test/LTO0028-gcc.dg-c-compatible-types-1: LTO_TEST + + #The function callmealias has unsupported attribute(s): alias + c_test/lto_test/LTO0027-gcc.dg-attr-weakref-1: LTO_TEST + + #undefined reference to `cabs' + c_test/lto_test/LTO0022-gcc.dg-20110201-1: LTO_TEST + + #clang error function definition is not allowed here + c_test/lto_test/LTO0037-gcc.dg-pr55703: LTO_TEST + + #relay on QEMU env + c_test/driver_test/DRIVER0021-fnosemanticinterposition2: DRIVER \ No newline at end of file diff --git a/testsuite/driver/config/aarch64-clang-release.conf b/testsuite/driver/config/aarch64-clang-release.conf index b08bf3e686320d0e85de499f2fbde5355895e510..ddf40f9353fdcfafe7c3285139774941df5e73f0 100644 --- a/testsuite/driver/config/aarch64-clang-release.conf +++ b/testsuite/driver/config/aarch64-clang-release.conf @@ -1,7 +1,7 @@ [MODE_SET] java_common_mode_set: O0, O2, GC_O0, GC_O2 c_common_mode_set: CO0, CO2, CMBCO2 - spec_mode_set: SCO0_TEST, SCO2_TEST, SCO0_TRAIN, SCO2_TRAIN, SAFEO2_TEST, SAFEO2_TRAIN, SCO0_TEST_MERGE, SCO2_TEST_MERGE, SCO0_TRAIN_MERGE, SCO2_TRAIN_MERGE, SCMBCO2_TEST, SCMBCO2_TRAIN + spec_mode_set: SCO0_TEST, SCO2_TEST, SCO0_TRAIN, SCO2_TRAIN, SCOS_TRAIN, SAFEO2_TEST, SAFEO2_TRAIN, SCO0_TEST_MERGE, SCO2_TEST_MERGE, SCO0_TRAIN_MERGE, SCO2_TRAIN_MERGE, SCOS_TRAIN_MERGE, SCMBCO2_TEST, SCMBCO2_TRAIN co3_set: CO3, CO3_NOINLINE c_driver_mode_set: DRIVER x64_common_mode_set: X64_O0, X64_LITECG, X64_LITECG_ME_O2, X64_LITECG_MPL2MPL_O2 @@ -9,18 +9,19 @@ [DEFAULT_TEST_SUITE] c_test/ast_test: ASTO0, ASTO2, ASTMBC + c_test/lto_test: LTO_TEST c_test/enhancec_test: ENCO2_N_D, ENCO2_B_D, ENCO2_S_D, ENCO2_N_D_ALL, ENCO2_B_D_A_C c_test/sanity_test: c_common_mode_set - c_test/gtorture_test: c_common_mode_set, co3_set, + c_test/gtorture_test: c_common_mode_set, co3_set c_test/unit_test: CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1, COS - c_test/noinline_test: CO2, ENCO2_N_D_NO_LINK, CMBCO2 + c_test/noinline_test: CO2, ENCO2_N_D, CMBCO2 c_test/driver_test: c_driver_mode_set c_test/stackprotect_test: SP_STRONG, SP_ALL c_test/tsvc_test: TSVO2 c_test/struct_test: c_common_mode_set c_test/gdb_test: DEJAO0 c_test/gnu_test: CO0, CO2 - c_test/gnu_test/builtin_function_test: CO0, CO2 + c_test/gnu_test/arm_builtin_function_test: CO0, CO2 java_test/app_test: java_common_mode_set java_test/arrayboundary_test: java_common_mode_set java_test/clinit_test: java_common_mode_set @@ -45,6 +46,24 @@ c_test/csmith_test: CO2, COS c_test/unit_test/UNIT0064-parameter-validbit-muxed: CO2_MUXED c_test/unit_test/UNIT0066-os-merge-string-secctions: COS + c_test/unit_test/UNIT0091-HomogeneousAggregates: CO0 + c_test/unit_test/UNIT0097-ParamWithLargeStack: CO0, CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1 + c_test/unit_test/UNIT0098-ParamWithLargeStack2: CO0, CO2, CMBCO2, FORTIFY_O2, FORTIFY_O1 + c_test/unit_test/UNIT0104-dead-loop-exit-bb: DRIVER + c_test/unit_test/UNIT0106-ebo-verify-imm: DRIVER + c_test/unit_test/UNIT0109-reach-def-for-undef-behavior: DRIVER + c_test/unit_test/UNIT0110-prepeephole-verify-immopnd: DRIVER + c_test/aapcs64_test: AAPCS64_O0, AAPCS64_O2 + c_test/lto_test/LTO0001-OneHeadFile: LTOASTO0 + c_test/lto_test/LTO0002-DiffCFile: LTOASTO0 + c_test/lto_test/LTO0003-DiffStruct: LTOASTO0 + c_test/lto_test/LTO0004-DiffMarco: LTOASTO0 + c_test/lto_test/LTO0005-UnionNest: LTOASTO0 + c_test/lto_test/LTO0006-ArrayLTO: LTOASTO0 + c_test/lto_test/LTO0007-UnnamedStruct: LTOASTO0 + c_test/lto_test/LTO0008-StructNest: LTOASTO0 + c_test/lto_test/LTO0009-TypedefStruct: LTOASTO0 + c_test/lto_test/LTO0010-TypeDiffRecord: LTOASTO0 [BAN_TEST_SUITE] java_test/compact_test/RT0194-rt-compact-AnnotationStaticFieldSetTest: GC_O0, GC_O2 @@ -439,9 +458,6 @@ #maple compiler error c_test/gtorture_test/GCC01074-g.torture.execute-pr42248: CO0, CO2,CMBCO2 - #cg compiler error - c_test/gtorture_test/GCC01042-g.torture.execute-pr38533: CO0 - #linker error c_test/gtorture_test/GCC00094-g.torture.execute-20010122-1: CO0, CO2,CMBCO2 c_test/gtorture_test/GCC00167-g.torture.execute-20020418-1: CO0, CO2,CMBCO2 @@ -1031,13 +1047,66 @@ c_test/neon_test/NEON02089-vqrshrnd_n_u64: NEONO0 c_test/neon_test/NEON02122-vsrid_n_s64: NEONO0 c_test/neon_test/NEON02123-vsrid_n_u64: NEONO0 + # will be add back when local dynamic is fully supported + c_test/driver_test/DRIVER0017-ftlsmodel-localdynamic: DRIVER + c_test/driver_test/DRIVER0018-ftlsmodel-initialexec: DRIVER - #libc_enhanced + #-boundary-check-dynamic --npe-check-dynamic #driver me --quiet - c_test/enhancec_test/ENC0095-boundary_string: ENCO2_B_D_OLD + c_test/enhancec_test/ENC0095-boundary_string: ENCO2_B_D - #-boundary-check-dynamic --npe-check-dynamic + #echo $? 47 - # is empty - c_test/enhancec_test/ENC0095-boundary_string: ENCO2_B_D + #cg compiler error + c_test/gtorture_test/GCC01042-g.torture.execute-pr38533: CO0 + + #undefined reference to `inline_me' + c_test/lto_test/LTO0033-gcc.dg-materialize-1: LTO_TEST + + #O2 core dumped + c_test/lto_test/LTO0030-gcc.dg-ipacp: LTO_TEST + c_test/lto_test/LTO0031-gcc.dg-ipareference: LTO_TEST + c_test/lto_test/LTO0032-gcc.dg-ipareference2: LTO_TEST + + #O3 core dumped + c_test/lto_test/LTO0028-gcc.dg-c-compatible-types-1: LTO_TEST + + #The function callmealias has unsupported attribute(s): alias + c_test/lto_test/LTO0027-gcc.dg-attr-weakref-1: LTO_TEST + + #undefined reference to `cabs' + c_test/lto_test/LTO0022-gcc.dg-20110201-1: LTO_TEST + + #clang error function definition is not allowed here + c_test/lto_test/LTO0037-gcc.dg-pr55703: LTO_TEST + + #result not same + c_test/lto_test/LTO0098-gcc.dg-pr55525: LTO_TEST + c_test/lto_test/LTO0100-gcc.dg-pr60449: LTO_TEST + c_test/lto_test/LTO0077-gcc.dg-20100227-1: LTO_TEST + + #unsupportedVarAttrs.empty() + c_test/lto_test/LTO0096-gcc.dg-pr52634: LTO_TEST + c_test/lto_test/LTO0090-gcc.dg-pr46940: LTO_TEST + c_test/lto_test/LTO0089-gcc.dg-pr45721: LTO_TEST + c_test/lto_test/LTO0056-gcc.dg-20081222: LTO_TEST + + #clang error + c_test/lto_test/LTO0074-gcc.dg-20091209-1: LTO_TEST + c_test/lto_test/LTO0062-gcc.dg-20090706-1: LTO_TEST + + #link error + c_test/lto_test/LTO0069-gcc.dg-20091006-1: LTO_TEST + c_test/lto_test/LTO0051-gcc.dg-20081201-1: LTO_TEST + c_test/lto_test/LTO0049-gcc.dg-20081118: LTO_TEST + c_test/lto_test/LTO0048-gcc.dg-20081115: LTO_TEST + + #compile error + c_test/lto_test/LTO0065-gcc.dg-20090729: LTO_TEST + + #core dumped + c_test/lto_test/LTO0057-gcc.dg-20090120: LTO_TEST + + #relay on QEMU env + c_test/driver_test/DRIVER0021-fnosemanticinterposition2: DRIVER diff --git a/testsuite/driver/config/x86_64-clang-debug.conf b/testsuite/driver/config/x86_64-clang-debug.conf index a2d0ae02774a0373db71b5845e39927a2525d112..e1ff7c6327394f6ddb5ec62807f559205ba8eb9a 100644 --- a/testsuite/driver/config/x86_64-clang-debug.conf +++ b/testsuite/driver/config/x86_64-clang-debug.conf @@ -114,8 +114,13 @@ c_test/x64_copy_test/SUP04129-misra2004-generic-C90-rule12_7-trule_12_7v0: X64_LITECG_ME_O2 c_test/x64_copy_test/GCC00924-g.torture.execute-pr19606: X64_LITECG_MPL2MPL_O2,X64_O2,X64_LITECG_ME_O2 - # struct test - c_test/struct_test/STRUCT0005-UseStructArgsWithBitField: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 + # unsupported intrinsicop + c_test/x64_copy_test/GCC00223-g.torture.execute-20030323-1: X64_O0,X64_LITECG,X64_LITECG_ME_O2,X64_LITECG_MPL2MPL_O2,X64_O2 + c_test/x64_copy_test/GCC00238-g.torture.execute-20030811-1: X64_O0,X64_LITECG,X64_LITECG_ME_O2,X64_LITECG_MPL2MPL_O2,X64_O2 + # struct test + c_test/struct_test/STRUCT0005-UseStructArgsWithBitField: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 + c_test/struct_test/STRUCT0012-2023030708359: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 + c_test/struct_test/STRUCT0013-2023042404640: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 # sudo ln -f /usr/include/asm-generic /usr/include/asm c_test/x64_copy_test/SUP03985-misra2004-cconform-C90-rule20_5-txrule_20_5: X64_O0, X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2,X64_O2 diff --git a/testsuite/driver/config/x86_64-clang-release.conf b/testsuite/driver/config/x86_64-clang-release.conf index af08fe3182ff5b1562b6e89d64a21f7e30390bcb..e17a58805ea35e0a2f467ee55eeabb4cde19fe68 100644 --- a/testsuite/driver/config/x86_64-clang-release.conf +++ b/testsuite/driver/config/x86_64-clang-release.conf @@ -112,8 +112,14 @@ c_test/x64_copy_test/SUP04129-misra2004-generic-C90-rule12_7-trule_12_7v0: X64_LITECG_ME_O2 c_test/x64_copy_test/GCC00924-g.torture.execute-pr19606: X64_LITECG_MPL2MPL_O2,X64_O2,X64_LITECG_ME_O2 + # unsupported intrinsicop + c_test/x64_copy_test/GCC00223-g.torture.execute-20030323-1: X64_O0,X64_LITECG,X64_LITECG_ME_O2,X64_LITECG_MPL2MPL_O2,X64_O2 + c_test/x64_copy_test/GCC00238-g.torture.execute-20030811-1: X64_O0,X64_LITECG,X64_LITECG_ME_O2,X64_LITECG_MPL2MPL_O2,X64_O2 + # struct test c_test/struct_test/STRUCT0005-UseStructArgsWithBitField: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 + c_test/struct_test/STRUCT0012-2023030708359: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 + c_test/struct_test/STRUCT0013-2023042404640: X64_O2,X64_O0,X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2 # sudo ln -f /usr/include/asm-generic /usr/include/asm c_test/x64_copy_test/SUP03985-misra2004-cconform-C90-rule20_5-txrule_20_5: X64_O0, X64_LITECG_ME_O2,X64_LITECG,X64_LITECG_MPL2MPL_O2,X64_O2 diff --git a/testsuite/driver/src/api/.api/__init__.py b/testsuite/driver/src/api/.api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a055c3126768375451e1e962ccf8ba049192afc --- /dev/null +++ b/testsuite/driver/src/api/.api/__init__.py @@ -0,0 +1,32 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +import os + +names = locals() + +my_dir = os.path.dirname(__file__) +for py in os.listdir(my_dir): + if py == '__init__.py': + continue + + if py.endswith('.py'): + name = py[:-3] + + clsn = name.capitalize() + while clsn.find('_') > 0: + h = clsn.index('_') + clsn = clsn[0:h] + clsn[h + 1:].capitalize() + api = __import__("api." + name, fromlist=[clsn]) + names[clsn] = getattr(api, clsn) diff --git a/testsuite/driver/src/api/c2ast.py b/testsuite/driver/src/api/.api/c2ast.py similarity index 100% rename from testsuite/driver/src/api/c2ast.py rename to testsuite/driver/src/api/.api/c2ast.py diff --git a/testsuite/driver/src/api/c_linker.py b/testsuite/driver/src/api/.api/c_linker.py similarity index 100% rename from testsuite/driver/src/api/c_linker.py rename to testsuite/driver/src/api/.api/c_linker.py diff --git a/testsuite/driver/src/api/c_linker_all.py b/testsuite/driver/src/api/.api/c_linker_all.py similarity index 100% rename from testsuite/driver/src/api/c_linker_all.py rename to testsuite/driver/src/api/.api/c_linker_all.py diff --git a/testsuite/driver/src/api/.api/check_file_equal.py b/testsuite/driver/src/api/.api/check_file_equal.py new file mode 100644 index 0000000000000000000000000000000000000000..4741025b7152e178b985b09cab043c004b751052 --- /dev/null +++ b/testsuite/driver/src/api/.api/check_file_equal.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class CheckFileEqual(ShellOperator): + + def __init__(self, file1, file2, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.file1 = file1 + self.file2 = file2 + + def get_command(self, variables): + self.command = "diff " + self.file1 + " " + self.file2 + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/.api/check_reg_contain.py b/testsuite/driver/src/api/.api/check_reg_contain.py new file mode 100644 index 0000000000000000000000000000000000000000..c9964972a293f2224cf4397c1266911fd8c4ade4 --- /dev/null +++ b/testsuite/driver/src/api/.api/check_reg_contain.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class CheckRegContain(ShellOperator): + + def __init__(self, reg, file, return_value_list=None): + super().__init__(return_value_list) + self.reg = reg + self.file = file + + def get_command(self, variables): + self.command = "python3 ${OUT_ROOT}/script/check.py --check=contain --str=\"" + self.reg + "\" --result=" + self.file + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/clang_linker.py b/testsuite/driver/src/api/.api/clang_linker.py similarity index 100% rename from testsuite/driver/src/api/clang_linker.py rename to testsuite/driver/src/api/.api/clang_linker.py diff --git a/testsuite/driver/src/api/driver.py b/testsuite/driver/src/api/.api/driver.py similarity index 100% rename from testsuite/driver/src/api/driver.py rename to testsuite/driver/src/api/.api/driver.py diff --git a/testsuite/driver/src/api/gen_bin.py b/testsuite/driver/src/api/.api/gen_bin.py similarity index 100% rename from testsuite/driver/src/api/gen_bin.py rename to testsuite/driver/src/api/.api/gen_bin.py diff --git a/testsuite/driver/src/api/hir2mpl.py b/testsuite/driver/src/api/.api/hir2mpl.py similarity index 100% rename from testsuite/driver/src/api/hir2mpl.py rename to testsuite/driver/src/api/.api/hir2mpl.py diff --git a/testsuite/driver/src/api/.api/irbuild.py b/testsuite/driver/src/api/.api/irbuild.py new file mode 100644 index 0000000000000000000000000000000000000000..b71f7424491b906a3ee1fb229b19723ac8ca7c95 --- /dev/null +++ b/testsuite/driver/src/api/.api/irbuild.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Irbuild(ShellOperator): + + def __init__(self, irbuild, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.irbuild = irbuild + self.infile = infile + + def get_command(self, variables): + self.command = self.irbuild + " " + self.infile + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/.api/java2dex.py b/testsuite/driver/src/api/.api/java2dex.py new file mode 100644 index 0000000000000000000000000000000000000000..43c61e6a0f1dda6e5dbdcba3a909bc1cd71ae3c0 --- /dev/null +++ b/testsuite/driver/src/api/.api/java2dex.py @@ -0,0 +1,28 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Java2dex(ShellOperator): + + def __init__(self, jar_file, outfile, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.jar_file = jar_file + self.outfile = outfile + self.infile = infile + + def get_command(self, variables): + self.command = "bash ${OUT_ROOT}/tools/bin/java2dex -o " + self.outfile + " -p " + ":".join(self.jar_file) + " -i " + ":".join(self.infile) + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/.api/linker.py b/testsuite/driver/src/api/.api/linker.py new file mode 100644 index 0000000000000000000000000000000000000000..300cc265711d9ff009ce2377f2a9e99cf354c9ba --- /dev/null +++ b/testsuite/driver/src/api/.api/linker.py @@ -0,0 +1,26 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Linker(ShellOperator): + + def __init__(self, lib, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.lib = lib + + def get_command(self, variables): + self.command = "${OUT_ROOT}/tools/bin/clang++ -g3 -O2 -x assembler-with-cpp -march=armv8-a -target aarch64-linux-gnu -c ${APP}.VtableImpl.s && ${OUT_ROOT}/tools/bin/clang++ ${APP}.VtableImpl.o -L${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/" + self.lib + " -g3 -O2 -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -o ${APP}.so ${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/mrt_module_init.o -fuse-ld=lld -rdynamic -lcore-all -lcommon-bridge -Wl,-z,notext -Wl,-T${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/linker/maplelld.so.lds" + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/.api/maple.py b/testsuite/driver/src/api/.api/maple.py new file mode 100644 index 0000000000000000000000000000000000000000..d55873013fa2bd245314aed1aa30d4484d69991b --- /dev/null +++ b/testsuite/driver/src/api/.api/maple.py @@ -0,0 +1,41 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Maple(ShellOperator): + + def __init__(self, maple, run, option, global_option, infiles, return_value_list=None, redirection=None, outfile=""): + super().__init__(return_value_list, redirection) + self.maple = maple + self.run = run + self.option_dict = option + self.global_option = global_option + self.infiles = infiles + self.outfile = outfile + + def get_command(self, variables): + self.command = self.maple + " " + self.command += "--infile " + " ".join(self.infiles) + if self.run: + self.command += " --run=" + ":".join(self.run) + " " + option = [] + for cmd in self.run: + option.append(self.option_dict[cmd]) + self.command += "--option=\"" + ":".join(option) + "\" " + self.command += " " + self.global_option + " " + if self.outfile: + self.command += " -o " + self.outfile + " " + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/maple_cg.py b/testsuite/driver/src/api/.api/maple_cg.py similarity index 100% rename from testsuite/driver/src/api/maple_cg.py rename to testsuite/driver/src/api/.api/maple_cg.py diff --git a/testsuite/driver/src/api/maple_driver.py b/testsuite/driver/src/api/.api/maple_driver.py similarity index 74% rename from testsuite/driver/src/api/maple_driver.py rename to testsuite/driver/src/api/.api/maple_driver.py index f7bbedd04d19bcecdd7a9b0473e9da468bdf47f6..55e1969ba1b7ceaaee4977695b7060fa760676b7 100644 --- a/testsuite/driver/src/api/maple_driver.py +++ b/testsuite/driver/src/api/.api/maple_driver.py @@ -17,7 +17,7 @@ from api.shell_operator import ShellOperator class MapleDriver(ShellOperator): - def __init__(self, maple, infiles, outfile, option, return_value_list=None, redirection=None, include_path="", extra_opt=""): + def __init__(self, maple, infiles, option, return_value_list=None, redirection=None, outfile="", include_path="", extra_opt=""): super().__init__(return_value_list, redirection) self.maple = maple self.include_path = include_path @@ -29,8 +29,12 @@ class MapleDriver(ShellOperator): def get_command(self, variables): include_path_str = " ".join(["-isystem " + path for path in self.include_path]) self.command = self.maple + " " - self.command += " ".join(self.infiles) - self.command += " -o " + self.outfile + if isinstance(self.infiles,list): + self.command += " ".join(self.infiles) + if isinstance(self.infiles,str): + self.command += self.infiles + " " + if self.outfile != "": + self.command += " -o " + self.outfile self.command += " " + include_path_str self.command += " " + self.option + " " + self.extra_opt return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/.api/mplsh.py b/testsuite/driver/src/api/.api/mplsh.py new file mode 100644 index 0000000000000000000000000000000000000000..ec379177b9716c738a8c0f0356e3df8402d77fff --- /dev/null +++ b/testsuite/driver/src/api/.api/mplsh.py @@ -0,0 +1,47 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Mplsh(ShellOperator): + + def __init__(self, mplsh, xbootclasspath, infile, garbage_collection_kind, return_value_list=None, env=None, qemu=None, qemu_libc=None, qemu_ld_lib=None, main="${APP}", args=None, redirection=None): + super().__init__(return_value_list, redirection) + self.env = env + self.qemu = qemu + self.qemu_libc = qemu_libc + self.qemu_ld_lib = qemu_ld_lib + self.mplsh = mplsh + self.garbage_collection_kind = garbage_collection_kind + self.xbootclasspath = xbootclasspath + self.infile = infile + self.main = main + self.args = args + + def get_command(self, variables): + self.command = "" + if self.env is not None: + for env_var in self.env.keys(): + self.command += env_var + "=" + self.env[env_var] + " " + if self.qemu is not None: + self.command += self.qemu + " -L " + self.qemu_libc + " -E LD_LIBRARY_PATH=" + ":".join(self.qemu_ld_lib) + " " + self.command += self.mplsh + " " + if self.garbage_collection_kind == "GC": + self.command += "-Xgconly " + if self.args is not None: + self.command +="-Xbootclasspath:" + self.xbootclasspath + " -cp " + self.infile + " " + self.main + " " + self.args + else: + self.command += "-Xbootclasspath:" + self.xbootclasspath + " -cp " + self.infile + " " + self.main + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/qemu.py b/testsuite/driver/src/api/.api/qemu.py similarity index 100% rename from testsuite/driver/src/api/qemu.py rename to testsuite/driver/src/api/.api/qemu.py diff --git a/testsuite/driver/src/api/.api/shell.py b/testsuite/driver/src/api/.api/shell.py new file mode 100644 index 0000000000000000000000000000000000000000..cb3a626da731dac7ea3388a06803fcc14cb5df1b --- /dev/null +++ b/testsuite/driver/src/api/.api/shell.py @@ -0,0 +1,25 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Shell(ShellOperator): + + def __init__(self, command, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.command = command + + def get_command(self, variables): + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/.api/shell_operator.py b/testsuite/driver/src/api/.api/shell_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..08938546b98aa9ab34424b47dec53d0d236eec93 --- /dev/null +++ b/testsuite/driver/src/api/.api/shell_operator.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +class ShellOperator(object): + + def __init__(self, return_value_list, redirection=None): + self.command = "" + if return_value_list is None: + self.return_value_list = [0] + else: + self.return_value_list = return_value_list + self.redirection = redirection + + def val_replace(self, cmd, variables): + if variables is not None: + for variable in variables.keys(): + cmd = cmd.replace("${" + variable + "}", variables[variable]) + return cmd + + def get_redirection(self): + save="cat tmp.log | tee -a %s && rm tmp.log"%(self.redirection) + red = ">tmp.log 2>&1 && (%s) || (%s && exit 1)"%(save, save) + if self.redirection is not None: + return red + else: + return "" + + def get_check_command(self, variables): + if type(self.return_value_list) == str and self.return_value_list.startswith("${"): + cmd = self.val_replace(self.return_value_list, variables) + return " || [ $? -eq %s ]"%(cmd) + elif len(self.return_value_list) == 1: + if 0 in self.return_value_list: + return "" + else: + return " || [ $? -eq " + str(self.return_value_list[0]) + " ]" + elif len(self.return_value_list) == 0: + return " || true" + else: + return_value_check_str_list = [] + for return_value in self.return_value_list: + return_value_check_str_list.append("[ ${return_value} -eq " + str(return_value) + " ]") + return " || (return_value=$? && (" + " || ".join(return_value_check_str_list) + "))" + + def get_final_command(self, variables): + final_command = self.val_replace(self.command, variables) + final_command += self.get_redirection() + final_command += self.get_check_command(variables) + return final_command diff --git a/testsuite/driver/src/api/simple_maple.py b/testsuite/driver/src/api/.api/simple_maple.py similarity index 100% rename from testsuite/driver/src/api/simple_maple.py rename to testsuite/driver/src/api/.api/simple_maple.py diff --git a/testsuite/driver/src/api/benchmark_native.py b/testsuite/driver/src/api/benchmark_native.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c970cd97afda13a01466b8ac1b4de1d243e45f --- /dev/null +++ b/testsuite/driver/src/api/benchmark_native.py @@ -0,0 +1,34 @@ +import os +import re +from api.shell_operator import ShellOperator + + +class BenchmarkNative(ShellOperator): + + def __init__(self, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.command = "" + self.native_lib_name = "" + self.native_src = "" + self.native_include = "-I${MAPLE_ROOT}/../libnativehelper/include_jni" + self.native_linker = "" + + def get_command(self, variables): + if "NATIVE_LIB_NAME" in variables: + self.native_lib_name = variables["NATIVE_LIB_NAME"] + if "NATIVE_SRC" in variables: + srcs = variables["NATIVE_SRC"].split(":") + for src in srcs: + self.native_src += " " + src + if "NATIVE_INCLUDE" in variables: + includes = variables["NATIVE_INCLUDE"].split(":") + for include in includes: + if include != "": + self.native_include += " -I${MAPLE_ROOT}/../" + include + if "NATIVE_LINKE" in variables: + links = variables["NATIVE_LINKE"].split(":") + for link in links: + if link != "": + self.native_linker += " ${MAPLE_ROOT}/../" + link + self.command = "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/clang++ " + self.native_include + " " + self.native_src + " ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtbegin_so/android_arm64_armv8-a_core/crtbegin_so.o ${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/lib64/clang/9.0.3/lib/linux/libclang_rt.builtins-aarch64-android.a ${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/lib64/libatomic.a ${MAPLE_ROOT}/../out/soong/.intermediates/build/soong/libgcc_stripped/android_arm64_armv8-a_core_static/libgcc_stripped.a " + self.native_linker + " ${MAPLE_ROOT}/../out/soong/.intermediates/external/libcxx/libc++/android_arm64_armv8-a_core_shared/libc++.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/libc/android_arm64_armv8-a_core_shared_10000/libc.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libm/libm/android_arm64_armv8-a_core_shared_10000/libm.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libdl/libdl/android_arm64_armv8-a_core_shared_10000/libdl.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtend_so/android_arm64_armv8-a_core/obj/bionic/libc/arch-common/bionic/crtend_so.o -o ${BENCHMARK_ACTION}/lib" + self.native_lib_name + ".so -nostdlib -Wl,--gc-sections -shared -Wl,-soname,lib" + self.native_lib_name + ".so -target aarch64-linux-android -B${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--build-id=md5 -Wl,--warn-shared-textrel -Wl,--fatal-warnings -Wl,--no-undefined-version -Wl,--exclude-libs,libgcc.a -Wl,--exclude-libs,libgcc_stripped.a -fuse-ld=lld -Wl,--pack-dyn-relocs=android+relr -Wl,--use-android-relr-tags -Wl,--no-undefined -Wl,--hash-style=gnu -Wl,--icf=safe -Wl,-z,max-page-size=4096 ${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/lib64/clang/9.0.3/lib/linux/libclang_rt.ubsan_minimal-aarch64-android.a -Wl,--exclude-libs,libclang_rt.ubsan_minimal-aarch64-android.a -Wl,-execute-only -fPIC" + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/benchmark_vogar.py b/testsuite/driver/src/api/benchmark_vogar.py new file mode 100644 index 0000000000000000000000000000000000000000..67e55bccc483f74f6b92ead011b819dc2695513e --- /dev/null +++ b/testsuite/driver/src/api/benchmark_vogar.py @@ -0,0 +1,18 @@ +import os +import re +from api.shell_operator import ShellOperator + + +class BenchmarkVogar(ShellOperator): + + def __init__(self, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.command = "" + self.vogar_deps_dir = "${MAPLE_ROOT}/zeiss/prebuilt/tools/mygote_script/benchmark_scripts" + self.sourcepath = "" + + def get_command(self, variables): + if "SOURCEPATH" in variables: + self.sourcepath = "--sourcepath " + variables["SOURCEPATH"] + self.command = "PATH=${MAPLE_ROOT}/../out/soong/host/linux-x86/bin:/$PATH ANDROID_BUILD_TOP=${MAPLE_ROOT}/.. VOGAR_DEPS_DIR=" + self.vogar_deps_dir + " java -classpath " + self.vogar_deps_dir + "/vogar.jar vogar.Vogar --results-dir . --toolchain D8 --mode DEVICE --variant X64 " + self.sourcepath + " --benchmark " + variables["BENCHMARK_CASE"] + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/check_file_equal.py b/testsuite/driver/src/api/check_file_equal.py index 4741025b7152e178b985b09cab043c004b751052..d4472a1981e5486e0e8c0828f9adbccc9ffe520c 100644 --- a/testsuite/driver/src/api/check_file_equal.py +++ b/testsuite/driver/src/api/check_file_equal.py @@ -17,11 +17,11 @@ from api.shell_operator import ShellOperator class CheckFileEqual(ShellOperator): - def __init__(self, file1, file2, return_value_list=None, redirection=None): - super().__init__(return_value_list, redirection) + def __init__(self, file1, file2, return_value_list=None): + super().__init__(return_value_list) self.file1 = file1 self.file2 = file2 def get_command(self, variables): self.command = "diff " + self.file1 + " " + self.file2 - return super().get_final_command(variables) + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/check_reg_contain.py b/testsuite/driver/src/api/check_reg_contain.py index c9964972a293f2224cf4397c1266911fd8c4ade4..49007a19c3576f65e3f955c1b8f2b60966392359 100644 --- a/testsuite/driver/src/api/check_reg_contain.py +++ b/testsuite/driver/src/api/check_reg_contain.py @@ -17,11 +17,15 @@ from api.shell_operator import ShellOperator class CheckRegContain(ShellOperator): - def __init__(self, reg, file, return_value_list=None): + def __init__(self, reg, file, choice=None, return_value_list=None): super().__init__(return_value_list) self.reg = reg self.file = file + self.choice = choice def get_command(self, variables): - self.command = "python3 ${OUT_ROOT}/script/check.py --check=contain --str=\"" + self.reg + "\" --result=" + self.file + if self.choice is None: + self.command = "python3 ${OUT_ROOT}/target/product/public/bin/check.py --check=contain --str=\"" + self.reg + "\" --result=" + self.file + elif self.choice == "num": + self.command = "python3 ${OUT_ROOT}/target/product/public/bin/check.py --check=num --result=" + self.file + " --str=" + self.reg + " --num=${EXPECTNUM}" return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/class2panda.py b/testsuite/driver/src/api/class2panda.py new file mode 100644 index 0000000000000000000000000000000000000000..bdc94ccfb69a76b853dc2e3842023876c6f8d2cb --- /dev/null +++ b/testsuite/driver/src/api/class2panda.py @@ -0,0 +1,28 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Class2panda(ShellOperator): + + def __init__(self, class2panda, infile, outfile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.class2panda = class2panda + self.infile = infile + self.outfile = outfile + + def get_command(self, variables): + self.command = "LD_LIBRARY_PATH=${OUT_ROOT}/target/product/public/lib:$LD_LIBRARY_PATH " + self.class2panda + " " + self.infile + " " + self.outfile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/dex2mpl.py b/testsuite/driver/src/api/dex2mpl.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5715b92c5debc8298cdc926ba893010364e3b2 --- /dev/null +++ b/testsuite/driver/src/api/dex2mpl.py @@ -0,0 +1,28 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Dex2mpl(ShellOperator): + + def __init__(self, dex2mpl, option, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.dex2mpl = dex2mpl + self.option = option + self.infile = infile + + def get_command(self, variables): + self.command = self.dex2mpl + " " + self.option + " " + self.infile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/gendeps.py b/testsuite/driver/src/api/gendeps.py new file mode 100644 index 0000000000000000000000000000000000000000..44dc09c5035e51046e8c9c65c7a44e594effc33c --- /dev/null +++ b/testsuite/driver/src/api/gendeps.py @@ -0,0 +1,34 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Gendeps(ShellOperator): + + def __init__(self, gendeps, apk, emui, infile, return_value_list=None, redirection=None, extra_option=""): + super().__init__(return_value_list, redirection) + self.gendeps = gendeps + self.apk = apk + self.emui = emui + self.infile = infile + self.extra_option = extra_option + + def get_command(self, variables): + self.command = self.gendeps + " -emui-map ${OUT_ROOT}/target/product/public/lib/dex_module_map_sdk.list -classpath libmaplecore-all -verbose -out-module-name " + self.command += "-apk " + self.apk + " " + self.command += "-emui " + self.emui + " " + self.command += self.extra_option + " " + self.command +="-in-dex " + self.infile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/irbuild.py b/testsuite/driver/src/api/irbuild.py index b71f7424491b906a3ee1fb229b19723ac8ca7c95..3688c00e9387ef3a2ac29eba163737133a2e00d3 100644 --- a/testsuite/driver/src/api/irbuild.py +++ b/testsuite/driver/src/api/irbuild.py @@ -24,4 +24,4 @@ class Irbuild(ShellOperator): def get_command(self, variables): self.command = self.irbuild + " " + self.infile - return super().get_final_command(variables) + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/jar2dex.py b/testsuite/driver/src/api/jar2dex.py new file mode 100644 index 0000000000000000000000000000000000000000..327a2dbd552f01efaa4cd66f065b05f5ab93e204 --- /dev/null +++ b/testsuite/driver/src/api/jar2dex.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Jar2dex(ShellOperator): + + def __init__(self, jar_file, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.jar_file = jar_file + self.infile = infile + + def get_command(self, variables): + self.command = "${OUT_ROOT}/target/product/public/bin/jar2dex " + " -p " + ":".join(self.jar_file) + " -i " + self.infile + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/jasm2jar.py b/testsuite/driver/src/api/jasm2jar.py new file mode 100644 index 0000000000000000000000000000000000000000..252119e227628dacb9f1cedecbbb057d01d875c3 --- /dev/null +++ b/testsuite/driver/src/api/jasm2jar.py @@ -0,0 +1,26 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Jasm2jar(ShellOperator): + + def __init__(self, file, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.file = file + + def get_command(self, variables): + self.command = "java -jar ${OUT_ROOT}/target/product/public/bin/asmtools.jar jasm " + " ".join(self.file) + ";jar -cvf ${APP}.jar *.class" + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/java2dex.py b/testsuite/driver/src/api/java2dex.py index 43c61e6a0f1dda6e5dbdcba3a909bc1cd71ae3c0..fcc25c966ecb50c54ce343ac6d3f9c6341e0fa09 100644 --- a/testsuite/driver/src/api/java2dex.py +++ b/testsuite/driver/src/api/java2dex.py @@ -17,12 +17,23 @@ from api.shell_operator import ShellOperator class Java2dex(ShellOperator): - def __init__(self, jar_file, outfile, infile, return_value_list=None, redirection=None): + def __init__(self, jar_file, outfile, infile, usesimplejava=False, return_value_list=None, redirection=None): super().__init__(return_value_list, redirection) self.jar_file = jar_file self.outfile = outfile self.infile = infile + self.usesimplejava = usesimplejava + + def java2dex_i_output(self, variables): + if 'EXTRA_JAVA_FILE' in variables.keys(): + variables['EXTRA_JAVA_FILE'] = variables['EXTRA_JAVA_FILE'].replace('[','').replace(']','').replace(',',':') + return ':'.join(self.infile) + return self.infile[0] def get_command(self, variables): - self.command = "bash ${OUT_ROOT}/tools/bin/java2dex -o " + self.outfile + " -p " + ":".join(self.jar_file) + " -i " + ":".join(self.infile) + if not self.usesimplejava: + self.command = "${OUT_ROOT}/target/product/public/bin/java2dex -o " + self.outfile + " -p " + ":".join(self.jar_file) + " -i " + self.java2dex_i_output(variables) + else: + self.command = "${OUT_ROOT}/target/product/public/bin/java2dex -o " + self.outfile + " -p " + ":".join(self.jar_file) + " -i " + self.java2dex_i_output(variables) + " -s useSimpleJava" + # print(super().get_final_command(variables)) return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/jcod2jar.py b/testsuite/driver/src/api/jcod2jar.py new file mode 100644 index 0000000000000000000000000000000000000000..bd066a69ca5db331ecf1980e4c9c8b6ee950bb18 --- /dev/null +++ b/testsuite/driver/src/api/jcod2jar.py @@ -0,0 +1,26 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Jcod2jar(ShellOperator): + + def __init__(self, file, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.file = file + + def get_command(self, variables): + self.command = "java -jar ${OUT_ROOT}/target/product/public/bin/asmtools.jar jcoder " + " ".join(self.file) + ";jar -cvf ${APP}.jar *.class" + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/linker.py b/testsuite/driver/src/api/linker.py index 300cc265711d9ff009ce2377f2a9e99cf354c9ba..f912c36ba93c1d61ffb7763e47fa03f9ea3d355b 100644 --- a/testsuite/driver/src/api/linker.py +++ b/testsuite/driver/src/api/linker.py @@ -17,10 +17,16 @@ from api.shell_operator import ShellOperator class Linker(ShellOperator): - def __init__(self, lib, return_value_list=None, redirection=None): + def __init__(self, infile, lib, model, native_src=None, return_value_list=None, redirection=None): super().__init__(return_value_list, redirection) + self.model = model self.lib = lib + self.native_src = native_src + self.infile = infile def get_command(self, variables): - self.command = "${OUT_ROOT}/tools/bin/clang++ -g3 -O2 -x assembler-with-cpp -march=armv8-a -target aarch64-linux-gnu -c ${APP}.VtableImpl.s && ${OUT_ROOT}/tools/bin/clang++ ${APP}.VtableImpl.o -L${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/" + self.lib + " -g3 -O2 -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -o ${APP}.so ${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/mrt_module_init.o -fuse-ld=lld -rdynamic -lcore-all -lcommon-bridge -Wl,-z,notext -Wl,-T${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/linker/maplelld.so.lds" - return super().get_final_command(variables) + if self.native_src: + self.command = "${OUT_ROOT}/target/product/public/bin/linker -m " + self.model + " -l " + self.lib + " -i " + self.infile + " -n " + self.native_src + else: + self.command = "${OUT_ROOT}/target/product/public/bin/linker -m " + self.model + " -l " + self.lib + " -i " + self.infile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/maple.py b/testsuite/driver/src/api/maple.py index d55873013fa2bd245314aed1aa30d4484d69991b..6417addd2d0e0e6cf97d12717e6e0eb5f2fb6996 100644 --- a/testsuite/driver/src/api/maple.py +++ b/testsuite/driver/src/api/maple.py @@ -17,25 +17,20 @@ from api.shell_operator import ShellOperator class Maple(ShellOperator): - def __init__(self, maple, run, option, global_option, infiles, return_value_list=None, redirection=None, outfile=""): + def __init__(self, maple, run, option, global_option, infile, return_value_list=None, redirection=None): super().__init__(return_value_list, redirection) self.maple = maple self.run = run self.option_dict = option self.global_option = global_option - self.infiles = infiles - self.outfile = outfile + self.infile = infile def get_command(self, variables): - self.command = self.maple + " " - self.command += "--infile " + " ".join(self.infiles) - if self.run: - self.command += " --run=" + ":".join(self.run) + " " - option = [] - for cmd in self.run: - option.append(self.option_dict[cmd]) - self.command += "--option=\"" + ":".join(option) + "\" " - self.command += " " + self.global_option + " " - if self.outfile: - self.command += " -o " + self.outfile + " " + self.command = self.maple + " --run=" + ":".join(self.run) + " " + option = [] + for cmd in self.run: + option.append(self.option_dict[cmd]) + self.command += "--option=\"" + ":".join(option) + "\" " + self.command += self.global_option + " " + self.command += "--infile " + self.infile return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/mplme.py b/testsuite/driver/src/api/mplme.py new file mode 100644 index 0000000000000000000000000000000000000000..627343c0ca18e1a7974ee340155f44220af317fd --- /dev/null +++ b/testsuite/driver/src/api/mplme.py @@ -0,0 +1,28 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Mplme(ShellOperator): + + def __init__(self, mplme, option, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.mplme = mplme + self.option = option + self.infile = infile + + def get_command(self, variables): + self.command = self.mplme + " " + self.option + " " + self.infile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/mplverf.py b/testsuite/driver/src/api/mplverf.py new file mode 100644 index 0000000000000000000000000000000000000000..00297886b87372664c10ddf992d14c73ecc77af2 --- /dev/null +++ b/testsuite/driver/src/api/mplverf.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Mplverf(ShellOperator): + + def __init__(self, mplverf, infile, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.mplverf = mplverf + self.infile = infile + + def get_command(self, variables): + self.command = self.mplverf + " " + self.infile + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/native_compile.py b/testsuite/driver/src/api/native_compile.py new file mode 100644 index 0000000000000000000000000000000000000000..68cc06b8152501d9a781b2b319ed5d06718e947b --- /dev/null +++ b/testsuite/driver/src/api/native_compile.py @@ -0,0 +1,75 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class NativeCompile(ShellOperator): + + def __init__(self, mpldep, infile, model, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.mpldep = mpldep + self.infile = infile + self.model =model + + def get_command(self, variables): + if self.model == "arm32_hard": + self.command = "/usr/bin/clang++-9 -O2 -g3 -c -fPIC -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -target armv7a-linux-gnueabihf -c " + for file in self.mpldep: + self.command += "-I" + file + " " + self.command += " -isystem /usr/arm-linux-gnueabihf/include/c++/5 -isystem /usr/arm-linux-gnueabihf/include/c++/5/arm-linux-gnueabihf -isystem /usr/arm-linux-gnueabihf/include/c++/5/backward -isystem /usr/lib/gcc-cross/arm-linux-gnueabihf/5/include -isystem /usr/lib/gcc-cross/arm-linux-gnueabihf/5/include-fixed -isystem /usr/arm-linux-gnueabihf/include" + self.command += " " + self.infile + ".cpp; " + self.command += " /usr/bin/clang++-9 " + self.command += " " + self.infile + ".o" + self.command += " -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -target armv7a-linux-gnueabihf -fPIC -shared -fuse-ld=lld -rdynamic" + self.command += " -L" + self.mpldep[0] + " -lcore-all -lcommon-bridge" + self.command += " -Wl,-z,notext -o" + self.command += " lib" + self.infile + ".so;" + if self.model == "arm32_softfp": + self.command = "/usr/bin/clang++-9 -O2 -g3 -c -fPIC -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -target armv7a-linux-gnueabi -c " + for file in self.mpldep: + self.command += "-I" + file + " " + self.command += " -isystem /usr/arm-linux-gnueabi/include/c++/5 -isystem /usr/arm-linux-gnueabi/include/c++/5/arm-linux-gnueabi -isystem /usr/arm-linux-gnueabi/include/c++/5/backward -isystem /usr/lib/gcc-cross/arm-linux-gnueabi/5/include -isystem /usr/lib/gcc-cross/arm-linux-gnueabi/5/include-fixed -isystem /usr/arm-linux-gnueabi/include" + self.command += " " + self.infile + ".cpp; " + self.command += " /usr/bin/clang++-9 " + self.command += " " + self.infile + ".o" + self.command += " -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -target armv7a-linux-gnueabi -fPIC -shared -fuse-ld=lld -rdynamic" + self.command += " -L" + self.mpldep[0] + " -lcore-all -lcommon-bridge" + self.command += " -Wl,-z,notext -o" + self.command += " lib" + self.infile + ".so;" + if self.model == "arm64": + self.command = " /usr/bin/clang++-9 -O2 -g3 -c -fPIC -march=armv8-a -target aarch64-linux-gnu " + for file in self.mpldep: + self.command += "-I" + file + " " + self.command += " -isystem /usr/aarch64-linux-gnu/include/c++/5 -isystem /usr/aarch64-linux-gnu/include/c++/5/aarch64-linux-gnu -isystem /usr/aarch64-linux-gnu/include/c++/5/backward -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include-fixed -isystem /usr/aarch64-linux-gnu/include" + self.command += " " + self.infile + ".cpp; " + self.command += " /usr/bin/clang++-9 " + self.command += " " + self.infile + ".o" + self.command += " -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -fuse-ld=lld -rdynamic" + self.command += " -L" + self.mpldep[0] + " -lcore-all -lcommon-bridge" + self.command += " -Wl,-z,notext -o" + self.command += " lib" + self.infile + ".so;" + if self.model == "arm64_ifile": + self.command = " /usr/bin/clang++-9 -O2 -g3 -c -fPIC -march=armv8-a -target aarch64-linux-gnu " + for file in self.mpldep: + self.command += "-I" + file + " " + self.command += " -isystem /usr/aarch64-linux-gnu/include/c++/5 -isystem /usr/aarch64-linux-gnu/include/c++/5/aarch64-linux-gnu -isystem /usr/aarch64-linux-gnu/include/c++/5/backward -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include-fixed -isystem /usr/aarch64-linux-gnu/include" + self.command += " " + self.infile + ".cpp; " + self.command += " /usr/bin/clang++-9 " + self.command += " " + self.infile + ".o" + self.command += " -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -fuse-ld=lld -rdynamic" + self.command += " -L" + self.mpldep[0] + " -lmrt -lcommon-bridge" + self.command += " -Wl,-z,notext -o" + self.command += " lib" + self.infile + ".so;" + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/qemu_linker_arm32.py b/testsuite/driver/src/api/qemu_linker_arm32.py new file mode 100644 index 0000000000000000000000000000000000000000..429fe3b2bea976659ce454dafa1d3ad2e412afc8 --- /dev/null +++ b/testsuite/driver/src/api/qemu_linker_arm32.py @@ -0,0 +1,38 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class QemuLinkerArm32(ShellOperator): + + def __init__(self, lib, model, parse=None, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.lib = lib + self.model = model + if parse is None: + self.parse = '${APP}' + else: + self.parse = parse + + def get_command(self, variables): + if self.model == "hard": + self.command = "/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -g3 -O2 -x assembler-with-cpp -target armv7a-linux-gnueabihf -c " + self.parse + ".VtableImpl.s -o " + self.parse + ".VtableImpl.qemu.o;/usr/bin/clang++-9 -g3 -O2 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -target armv7a-linux-gnueabihf -fPIC -shared -o " + self.parse + ".so ${OUT_ROOT}/target/product/maple_arm32/lib/hard/mrt_module_init.o -I${OUT_ROOT}/target/product/maple_arm32/lib/nativehelper " + self.parse + ".VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm32/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl\,-z\,notext -Wl\,-T ${OUT_ROOT}/target/product/public/lib/linker/mapleArm32lld.so.lds" + if self.model == "native_hard": + self.command = "/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -g3 -O2 -x assembler-with-cpp -target armv7a-linux-gnueabihf -c " + self.parse + ".VtableImpl.s -o " + self.parse + ".VtableImpl.qemu.o;/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=hard -g3 -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -fPIC -std=c++14 -nostdlibinc -isystem /usr/arm-linux-gnueabihf/include/c++/5 -isystem /usr/arm-linux-gnueabihf/include/c++/5/arm-linux-gnueabihf -isystem /usr/arm-linux-gnueabihf/include/c++/5/backward -isystem /usr/lib/gcc-cross/arm-linux-gnueabihf/5/include -isystem /usr/lib/gcc-cross/arm-linux-gnueabihf/5/include-fixed -isystem /usr/arm-linux-gnueabihf/include -target armv7a-linux-gnueabihf -fPIC -shared -o " + self.parse + ".so ${OUT_ROOT}/target/product/maple_arm32/lib/hard/mrt_module_init.o ${NATIVE_SRC} -I${OUT_ROOT}/target/product/public/lib " + self.parse + ".VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm32/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl\,-z\,notext -Wl\,-T ${OUT_ROOT}/target/product/public/lib/linker/mapleArm32lld.so.lds" + if self.model == "native_softfp": + self.command = "/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -g3 -O2 -x assembler-with-cpp -target armv7a-linux-gnueabi -c " + self.parse + ".VtableImpl.s -o " + self.parse + ".VtableImpl.qemu.o;/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -g3 -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -fPIC -std=c++14 -nostdlibinc -isystem /usr/arm-linux-gnueabi/include/c++/5 -isystem /usr/arm-linux-gnueabi/include/c++/5/arm-linux-gnueabi -isystem /usr/arm-linux-gnueabi/include/c++/5/backward -isystem /usr/lib/gcc-cross/arm-linux-gnueabi/5/include -isystem /usr/lib/gcc-cross/arm-linux-gnueabi/5/include-fixed -isystem /usr/arm-linux-gnueabi/include -target armv7a-linux-gnueabi -fPIC -shared -o " + self.parse + ".so ${OUT_ROOT}/target/product/maple_arm32/lib/softfp/mrt_module_init.o ${NATIVE_SRC} -I${OUT_ROOT}/target/product/public/lib " + self.parse + ".VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm32/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl\,-z\,notext -Wl\,-T ${OUT_ROOT}/target/product/public/lib/linker/mapleArm32lld.so.lds" + if self.model == "softfp": + self.command = "/usr/bin/clang++-9 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -g3 -O2 -x assembler-with-cpp -target armv7a-linux-gnueabi -c " + self.parse + ".VtableImpl.s -o " + self.parse + ".VtableImpl.qemu.o;/usr/bin/clang++-9 -g3 -O2 -march=armv7-a -mfpu=vfpv4 -mfloat-abi=softfp -target armv7a-linux-gnueabi -fPIC -shared -o " + self.parse + ".so ${OUT_ROOT}/target/product/maple_arm32/lib/softfp/mrt_module_init.o -I${OUT_ROOT}/target/product/maple_arm32/lib/nativehelper " + self.parse + ".VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm32/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl\,-z\,notext -Wl\,-T ${OUT_ROOT}/target/product/public/lib/linker/mapleArm32lld.so.lds" + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/qemu_linker_arm64.py b/testsuite/driver/src/api/qemu_linker_arm64.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc7549c24dfac768581b88bef7bc5b2e631859a --- /dev/null +++ b/testsuite/driver/src/api/qemu_linker_arm64.py @@ -0,0 +1,30 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class QemuLinkerArm64(ShellOperator): + + def __init__(self, lib, parse=None, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.lib = lib + if parse is None: + self.parse = '${APP}' + else: + self.parse = parse + + def get_command(self, variables): + self.command = "/usr/bin/clang++-9 -march=armv8-a -g3 -O2 -x assembler-with-cpp -target aarch64-linux-gnu -c " + self.parse + ".VtableImpl.s -o " + self.parse + ".VtableImpl.qemu.o;/usr/bin/clang++-9 -g3 -O2 -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -o " + self.parse + ".so ${OUT_ROOT}/target/product/maple_arm64/lib/mrt_module_init.o -I${OUT_ROOT}/target/product/maple_arm64/lib/nativehelper " + self.parse + ".VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm64/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl,-z,notext -Wl,-T ${OUT_ROOT}/target/product/public/lib/linker/maplelld.so.lds" + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/qemu_native_linker.py b/testsuite/driver/src/api/qemu_native_linker.py new file mode 100644 index 0000000000000000000000000000000000000000..7007bff9a2ac8ca03d392925f315c0a51b5a4bc2 --- /dev/null +++ b/testsuite/driver/src/api/qemu_native_linker.py @@ -0,0 +1,25 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class QemuNativeLinker(ShellOperator): + def __init__(self, lib, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.lib = lib + + def get_command(self, variables): + self.command = "/usr/bin/clang++-9 -march=armv8-a -g3 -O2 -x assembler-with-cpp -target aarch64-linux-gnu -c ${APP}.VtableImpl.s -o ${APP}.VtableImpl.qemu.o;/usr/bin/clang++-9 -g3 -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -fPIC -std=c++14 -nostdlibinc -march=armv8-a -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5 -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5/aarch64-linux-gnu -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5/backward -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include-fixed -isystem /usr/aarch64-linux-gnu/include -target aarch64-linux-gnu -fPIC -shared -o ${APP}.so ${OUT_ROOT}/target/product/maple_arm64/lib/mrt_module_init.o ${NATIVE_SRC} -I${OUT_ROOT}/target/product/public/lib ${APP}.VtableImpl.qemu.o -fuse-ld=lld -rdynamic -L${OUT_ROOT}/target/product/maple_arm64/lib/" + self.lib + "/ -lcore-all -lcommon-bridge -Wl\,-z\,notext -Wl\,-T ${OUT_ROOT}/target/product/public/lib/linker/maplelld.so.lds" + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/shell.py b/testsuite/driver/src/api/shell.py index cb3a626da731dac7ea3388a06803fcc14cb5df1b..04d6a776103405f7c9c1031e9cfb9276a9d8d82d 100644 --- a/testsuite/driver/src/api/shell.py +++ b/testsuite/driver/src/api/shell.py @@ -22,4 +22,4 @@ class Shell(ShellOperator): self.command = command def get_command(self, variables): - return super().get_final_command(variables) + return super().get_final_command(variables) \ No newline at end of file diff --git a/testsuite/driver/src/api/shell_operator.py b/testsuite/driver/src/api/shell_operator.py index 08938546b98aa9ab34424b47dec53d0d236eec93..a33412022d04f367a410e8b4461311a3dd3d60d6 100644 --- a/testsuite/driver/src/api/shell_operator.py +++ b/testsuite/driver/src/api/shell_operator.py @@ -22,25 +22,14 @@ class ShellOperator(object): self.return_value_list = return_value_list self.redirection = redirection - def val_replace(self, cmd, variables): - if variables is not None: - for variable in variables.keys(): - cmd = cmd.replace("${" + variable + "}", variables[variable]) - return cmd - def get_redirection(self): - save="cat tmp.log | tee -a %s && rm tmp.log"%(self.redirection) - red = ">tmp.log 2>&1 && (%s) || (%s && exit 1)"%(save, save) if self.redirection is not None: - return red + return " > " + self.redirection + " 2>&1" else: return "" - def get_check_command(self, variables): - if type(self.return_value_list) == str and self.return_value_list.startswith("${"): - cmd = self.val_replace(self.return_value_list, variables) - return " || [ $? -eq %s ]"%(cmd) - elif len(self.return_value_list) == 1: + def get_check_command(self): + if len(self.return_value_list) == 1: if 0 in self.return_value_list: return "" else: @@ -54,7 +43,11 @@ class ShellOperator(object): return " || (return_value=$? && (" + " || ".join(return_value_check_str_list) + "))" def get_final_command(self, variables): - final_command = self.val_replace(self.command, variables) + final_command = self.command + if variables is not None: + for variable in variables.keys(): + if "${" + variable + "}" in final_command: + final_command = final_command.replace("${" + variable + "}", variables[variable]) final_command += self.get_redirection() - final_command += self.get_check_command(variables) - return final_command + final_command += self.get_check_command() + return final_command \ No newline at end of file diff --git a/testsuite/driver/src/api/smali2dex.py b/testsuite/driver/src/api/smali2dex.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8050ebfd72bca486931b115099f8671248e8bd --- /dev/null +++ b/testsuite/driver/src/api/smali2dex.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Smali2dex(ShellOperator): + + def __init__(self, file, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.file = file + + def get_command(self, variables): + self.command = "java -jar ${OUT_ROOT}/target/product/public/bin/smali-2.2.4.jar ass " + " ".join(self.file) + "; mv out.dex ${APP}.dex" + self.command += super().get_redirection() + return super().get_final_command(variables) diff --git a/testsuite/driver/src/api/unzip.py b/testsuite/driver/src/api/unzip.py new file mode 100644 index 0000000000000000000000000000000000000000..11faffe74b65aec7e71b0e67305023746921400d --- /dev/null +++ b/testsuite/driver/src/api/unzip.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api.shell_operator import ShellOperator + + +class Unzip(ShellOperator): + + def __init__(self, file, target_path, return_value_list=None, redirection=None): + super().__init__(return_value_list, redirection) + self.file = file + self.target_path = target_path + + def get_command(self, variables): + self.command = "unzip -o " + self.file + " -d " + self.target_path + return super().get_final_command(variables) diff --git a/testsuite/driver/src/mode/.mode/AAPCS64_O0.py b/testsuite/driver/src/mode/.mode/AAPCS64_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..3a8e1dfd07c959c2d6da82dbba1f69a4ea93f628 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/AAPCS64_O0.py @@ -0,0 +1,53 @@ +# +# Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +AAPCS64_O0 = { + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP}.c"], + outfile="${APP}.s", + include_path=[ + "${MAPLE_BUILD_OUTPUT}/lib/include" + ], + option="-O0 -fPIC -g -static -L../../lib/c -lst -lm --save-temps -S", + redirection="compile.log" + ) + ], + "gcc_compile": [ + Shell( + "${OUT_ROOT}/tools/bin/aarch64-linux-gnu-gcc \ + -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include \ + -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include \ + -U __SIZEOF_INT128__ -O0 -S ${APP}.c", + ), + ], + "link_run": [ + CLinker( + infiles=["${APP}"], + front_option="-std=gnu99 -no-pie", + outfile="a.out", + back_option="-lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + ), + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc a.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/AAPCS64_O2.py b/testsuite/driver/src/mode/.mode/AAPCS64_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..78c58e9b00fc17a4276f8df149b0a22e425c4cd4 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/AAPCS64_O2.py @@ -0,0 +1,53 @@ +# +# Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +AAPCS64_O2 = { + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP}.c"], + outfile="${APP}.s", + include_path=[ + "${MAPLE_BUILD_OUTPUT}/lib/include" + ], + option="-O2 -fPIC -g -static -L../../lib/c -lst -lm --save-temps -S", + redirection="compile.log" + ) + ], + "gcc_compile": [ + Shell( + "${OUT_ROOT}/tools/bin/aarch64-linux-gnu-gcc \ + -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include \ + -isystem ${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include \ + -U __SIZEOF_INT128__ -O2 -S ${APP}.c", + ), + ], + "link_run": [ + CLinker( + infiles=["${APP}"], + front_option="-std=gnu99 -no-pie", + outfile="a.out", + back_option="-lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + ), + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc a.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/ASTMBC.py b/testsuite/driver/src/mode/.mode/ASTMBC.py similarity index 100% rename from testsuite/driver/src/mode/ASTMBC.py rename to testsuite/driver/src/mode/.mode/ASTMBC.py diff --git a/testsuite/driver/src/mode/ASTO0.py b/testsuite/driver/src/mode/.mode/ASTO0.py similarity index 100% rename from testsuite/driver/src/mode/ASTO0.py rename to testsuite/driver/src/mode/.mode/ASTO0.py diff --git a/testsuite/driver/src/mode/ASTO0_OLD.py b/testsuite/driver/src/mode/.mode/ASTO0_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ASTO0_OLD.py rename to testsuite/driver/src/mode/.mode/ASTO0_OLD.py diff --git a/testsuite/driver/src/mode/ASTO2.py b/testsuite/driver/src/mode/.mode/ASTO2.py similarity index 100% rename from testsuite/driver/src/mode/ASTO2.py rename to testsuite/driver/src/mode/.mode/ASTO2.py diff --git a/testsuite/driver/src/mode/ASTO2_OLD.py b/testsuite/driver/src/mode/.mode/ASTO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ASTO2_OLD.py rename to testsuite/driver/src/mode/.mode/ASTO2_OLD.py diff --git a/testsuite/driver/src/mode/CMBCO2.py b/testsuite/driver/src/mode/.mode/CMBCO2.py similarity index 95% rename from testsuite/driver/src/mode/CMBCO2.py rename to testsuite/driver/src/mode/.mode/CMBCO2.py index 8d00e15c9eaaa82cd23a8164f4e44b5cd92f474a..2bdb9a0c8176f7ec4c2abb7e759252e130a49013 100644 --- a/testsuite/driver/src/mode/CMBCO2.py +++ b/testsuite/driver/src/mode/.mode/CMBCO2.py @@ -22,7 +22,8 @@ CMBCO2 = { "${MAPLE_BUILD_OUTPUT}/lib/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", - "../lib/include" + "../lib/include", + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], option="--target=aarch64 -U __SIZEOF_INT128__", infile="${APP}.c", diff --git a/testsuite/driver/src/mode/CO0.py b/testsuite/driver/src/mode/.mode/CO0.py similarity index 91% rename from testsuite/driver/src/mode/CO0.py rename to testsuite/driver/src/mode/.mode/CO0.py index 62b40461dd365e6186ea2fa7b5207da08efeca37..ca2bd368217e31fd7e0e175feb509217173f29db 100644 --- a/testsuite/driver/src/mode/CO0.py +++ b/testsuite/driver/src/mode/.mode/CO0.py @@ -24,9 +24,10 @@ CO0 = { "${MAPLE_BUILD_OUTPUT}/lib/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", - "../lib/include" + "../lib/include", + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], - option="-O0 -fPIC -g -lpthread -lm" + option="-O0 -fPIC -g -lpthread -lm --save-temps" ) ], "compile_err": [ @@ -55,4 +56,4 @@ CO0 = { file2="expected.txt" ) ] -} \ No newline at end of file +} diff --git a/testsuite/driver/src/mode/CO0_OLD.py b/testsuite/driver/src/mode/.mode/CO0_OLD.py similarity index 100% rename from testsuite/driver/src/mode/CO0_OLD.py rename to testsuite/driver/src/mode/.mode/CO0_OLD.py diff --git a/testsuite/driver/src/mode/CO0_PIE.py b/testsuite/driver/src/mode/.mode/CO0_PIE.py similarity index 100% rename from testsuite/driver/src/mode/CO0_PIE.py rename to testsuite/driver/src/mode/.mode/CO0_PIE.py diff --git a/testsuite/driver/src/mode/CO0_SHARED.py b/testsuite/driver/src/mode/.mode/CO0_SHARED.py similarity index 100% rename from testsuite/driver/src/mode/CO0_SHARED.py rename to testsuite/driver/src/mode/.mode/CO0_SHARED.py diff --git a/testsuite/driver/src/mode/CO0_SHARED_OLD.py b/testsuite/driver/src/mode/.mode/CO0_SHARED_OLD.py similarity index 100% rename from testsuite/driver/src/mode/CO0_SHARED_OLD.py rename to testsuite/driver/src/mode/.mode/CO0_SHARED_OLD.py diff --git a/testsuite/driver/src/mode/CO2.py b/testsuite/driver/src/mode/.mode/CO2.py similarity index 96% rename from testsuite/driver/src/mode/CO2.py rename to testsuite/driver/src/mode/.mode/CO2.py index a3800269b2f49e7086e40a720010f7f56d651291..1511d60cfc54eba3eca4c5f5c930dc2128ce5091 100644 --- a/testsuite/driver/src/mode/CO2.py +++ b/testsuite/driver/src/mode/.mode/CO2.py @@ -25,7 +25,7 @@ CO2 = { "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", "../lib/include", - "../../csmith_test/runtime_x86" + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], option="-O2 -fPIC -lpthread -lm ${option}" ) diff --git a/testsuite/driver/src/mode/CO2_MUXED.py b/testsuite/driver/src/mode/.mode/CO2_MUXED.py similarity index 96% rename from testsuite/driver/src/mode/CO2_MUXED.py rename to testsuite/driver/src/mode/.mode/CO2_MUXED.py index cf60850d0e218eb1bcb6fca9eb4639f71d53f025..6db595c72712a9280a82438bd85fc76f7864a7a8 100644 --- a/testsuite/driver/src/mode/CO2_MUXED.py +++ b/testsuite/driver/src/mode/.mode/CO2_MUXED.py @@ -23,7 +23,7 @@ CO2_MUXED = { "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", "../lib/include", - "../../csmith_test/runtime_x86" + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], option="--target=aarch64 -U __SIZEOF_INT128__", infile="${APP}.c", diff --git a/testsuite/driver/src/mode/CO2_OLD.py b/testsuite/driver/src/mode/.mode/CO2_OLD.py similarity index 97% rename from testsuite/driver/src/mode/CO2_OLD.py rename to testsuite/driver/src/mode/.mode/CO2_OLD.py index 0b75ebf4b31622fe07395770b89a289562ec162b..c7ec8bf6a234ae7db4484ca1134494a7ffd7dffd 100644 --- a/testsuite/driver/src/mode/CO2_OLD.py +++ b/testsuite/driver/src/mode/.mode/CO2_OLD.py @@ -23,7 +23,7 @@ CO2_OLD = { "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", "../lib/include", - "../../csmith_test/runtime_x86" + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], option="--target=aarch64 -U __SIZEOF_INT128__", infile="${APP}.c", diff --git a/testsuite/driver/src/mode/CO2_PIE.py b/testsuite/driver/src/mode/.mode/CO2_PIE.py similarity index 100% rename from testsuite/driver/src/mode/CO2_PIE.py rename to testsuite/driver/src/mode/.mode/CO2_PIE.py diff --git a/testsuite/driver/src/mode/CO2_SHARED.py b/testsuite/driver/src/mode/.mode/CO2_SHARED.py similarity index 100% rename from testsuite/driver/src/mode/CO2_SHARED.py rename to testsuite/driver/src/mode/.mode/CO2_SHARED.py diff --git a/testsuite/driver/src/mode/CO2_SHARED_OLD.py b/testsuite/driver/src/mode/.mode/CO2_SHARED_OLD.py similarity index 100% rename from testsuite/driver/src/mode/CO2_SHARED_OLD.py rename to testsuite/driver/src/mode/.mode/CO2_SHARED_OLD.py diff --git a/testsuite/driver/src/mode/CO3.py b/testsuite/driver/src/mode/.mode/CO3.py similarity index 100% rename from testsuite/driver/src/mode/CO3.py rename to testsuite/driver/src/mode/.mode/CO3.py diff --git a/testsuite/driver/src/mode/CO3_NOINLINE.py b/testsuite/driver/src/mode/.mode/CO3_NOINLINE.py similarity index 100% rename from testsuite/driver/src/mode/CO3_NOINLINE.py rename to testsuite/driver/src/mode/.mode/CO3_NOINLINE.py diff --git a/testsuite/driver/src/mode/CO3_NOINLINE_OLD.py b/testsuite/driver/src/mode/.mode/CO3_NOINLINE_OLD.py similarity index 100% rename from testsuite/driver/src/mode/CO3_NOINLINE_OLD.py rename to testsuite/driver/src/mode/.mode/CO3_NOINLINE_OLD.py diff --git a/testsuite/driver/src/mode/CO3_OLD.py b/testsuite/driver/src/mode/.mode/CO3_OLD.py similarity index 100% rename from testsuite/driver/src/mode/CO3_OLD.py rename to testsuite/driver/src/mode/.mode/CO3_OLD.py diff --git a/testsuite/driver/src/mode/COS.py b/testsuite/driver/src/mode/.mode/COS.py similarity index 94% rename from testsuite/driver/src/mode/COS.py rename to testsuite/driver/src/mode/.mode/COS.py index 053cda2f2d8c222601eaab6b8baa445e80b39a0f..a0ada277b5a9f2677fa9a8da9f63e6c4d6048de4 100644 --- a/testsuite/driver/src/mode/COS.py +++ b/testsuite/driver/src/mode/.mode/COS.py @@ -25,7 +25,7 @@ COS = { "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", "../lib/include", - "../../csmith_test/runtime_x86" + "${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86" ], option="-Os -fPIC -lm --save-temps" ) diff --git a/testsuite/driver/src/mode/COS_OLD.py b/testsuite/driver/src/mode/.mode/COS_OLD.py similarity index 100% rename from testsuite/driver/src/mode/COS_OLD.py rename to testsuite/driver/src/mode/.mode/COS_OLD.py diff --git a/testsuite/driver/src/mode/DEJAO0.py b/testsuite/driver/src/mode/.mode/DEJAO0.py similarity index 100% rename from testsuite/driver/src/mode/DEJAO0.py rename to testsuite/driver/src/mode/.mode/DEJAO0.py diff --git a/testsuite/driver/src/mode/DEJAO0_OLD.py b/testsuite/driver/src/mode/.mode/DEJAO0_OLD.py similarity index 100% rename from testsuite/driver/src/mode/DEJAO0_OLD.py rename to testsuite/driver/src/mode/.mode/DEJAO0_OLD.py diff --git a/testsuite/driver/src/mode/DRIVER.py b/testsuite/driver/src/mode/.mode/DRIVER.py similarity index 83% rename from testsuite/driver/src/mode/DRIVER.py rename to testsuite/driver/src/mode/.mode/DRIVER.py index 8d44e4829a6c88cc5adcee6a42ea2bf9424b2933..d9529728fa29948c4e3652a5e47ec882a2aa8a4a 100644 --- a/testsuite/driver/src/mode/DRIVER.py +++ b/testsuite/driver/src/mode/.mode/DRIVER.py @@ -22,6 +22,20 @@ DRIVER = { inputs="${APP}" ) ], + "compileWithCsmith": [ + Driver( + maple="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/maple", + global_option="${OPTION} -I ${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86", + inputs="${APP}" + ) + ], + "compileWithGcc": [ + Driver( + maple="${OUT_ROOT}/tools/gcc-linaro-7.5.0/bin/aarch64-linux-gnu-gcc ", + global_option="${OPTION}", + inputs="${APP}" + ) + ], "mpl2S2out": [ Driver( maple="${OUT_ROOT}/${MAPLE_BUILD_TYPE}/bin/maple", diff --git a/testsuite/driver/src/mode/ENCO2_B_D.py b/testsuite/driver/src/mode/.mode/ENCO2_B_D.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_B_D.py rename to testsuite/driver/src/mode/.mode/ENCO2_B_D.py diff --git a/testsuite/driver/src/mode/ENCO2_B_D_A_C.py b/testsuite/driver/src/mode/.mode/ENCO2_B_D_A_C.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_B_D_A_C.py rename to testsuite/driver/src/mode/.mode/ENCO2_B_D_A_C.py diff --git a/testsuite/driver/src/mode/ENCO2_B_D_A_C_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_B_D_A_C_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_B_D_A_C_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_B_D_A_C_OLD.py diff --git a/testsuite/driver/src/mode/ENCO2_B_D_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_B_D_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_B_D_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_B_D_OLD.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D_ALL.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D_ALL.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D_ALL.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D_ALL.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D_ALL_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D_ALL_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D_ALL_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D_ALL_OLD.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D_NO_LINK.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D_NO_LINK.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D_NO_LINK.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D_NO_LINK.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D_NO_LINK_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D_NO_LINK_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D_NO_LINK_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D_NO_LINK_OLD.py diff --git a/testsuite/driver/src/mode/ENCO2_N_D_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_N_D_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_N_D_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_N_D_OLD.py diff --git a/testsuite/driver/src/mode/ENCO2_S_D.py b/testsuite/driver/src/mode/.mode/ENCO2_S_D.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_S_D.py rename to testsuite/driver/src/mode/.mode/ENCO2_S_D.py diff --git a/testsuite/driver/src/mode/ENCO2_S_D_OLD.py b/testsuite/driver/src/mode/.mode/ENCO2_S_D_OLD.py similarity index 100% rename from testsuite/driver/src/mode/ENCO2_S_D_OLD.py rename to testsuite/driver/src/mode/.mode/ENCO2_S_D_OLD.py diff --git a/testsuite/driver/src/mode/FORTIFY_O1.py b/testsuite/driver/src/mode/.mode/FORTIFY_O1.py similarity index 100% rename from testsuite/driver/src/mode/FORTIFY_O1.py rename to testsuite/driver/src/mode/.mode/FORTIFY_O1.py diff --git a/testsuite/driver/src/mode/FORTIFY_O1_OLD.py b/testsuite/driver/src/mode/.mode/FORTIFY_O1_OLD.py similarity index 100% rename from testsuite/driver/src/mode/FORTIFY_O1_OLD.py rename to testsuite/driver/src/mode/.mode/FORTIFY_O1_OLD.py diff --git a/testsuite/driver/src/mode/FORTIFY_O2.py b/testsuite/driver/src/mode/.mode/FORTIFY_O2.py similarity index 100% rename from testsuite/driver/src/mode/FORTIFY_O2.py rename to testsuite/driver/src/mode/.mode/FORTIFY_O2.py diff --git a/testsuite/driver/src/mode/FORTIFY_O2_OLD.py b/testsuite/driver/src/mode/.mode/FORTIFY_O2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/FORTIFY_O2_OLD.py rename to testsuite/driver/src/mode/.mode/FORTIFY_O2_OLD.py diff --git a/testsuite/driver/src/mode/GC_O0.py b/testsuite/driver/src/mode/.mode/GC_O0.py similarity index 100% rename from testsuite/driver/src/mode/GC_O0.py rename to testsuite/driver/src/mode/.mode/GC_O0.py diff --git a/testsuite/driver/src/mode/GC_O2.py b/testsuite/driver/src/mode/.mode/GC_O2.py similarity index 100% rename from testsuite/driver/src/mode/GC_O2.py rename to testsuite/driver/src/mode/.mode/GC_O2.py diff --git a/testsuite/driver/src/mode/.mode/IR.py b/testsuite/driver/src/mode/.mode/IR.py new file mode 100644 index 0000000000000000000000000000000000000000..918f6630a7b4d025edfce351321443d79277e980 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/IR.py @@ -0,0 +1,32 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +IR = { + "compile": [ + Irbuild( + irbuild="${MAPLE_BUILD_OUTPUT}/bin/irbuild", + infile="${APP}.mpl" + ), + Irbuild( + irbuild="${MAPLE_BUILD_OUTPUT}/bin/irbuild", + infile="${APP}.irb.mpl" + ), + CheckFileEqual( + file1="${APP}.irb.mpl", + file2="${APP}.irb.irb.mpl" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/LTOASTO0.py b/testsuite/driver/src/mode/.mode/LTOASTO0.py new file mode 100644 index 0000000000000000000000000000000000000000..340de07a4fc9df88fb60b0f672f1927dc12f7e4e --- /dev/null +++ b/testsuite/driver/src/mode/.mode/LTOASTO0.py @@ -0,0 +1,40 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +LTOASTO0 = { + "c2ast": [ + C2ast( + clang="${OUT_ROOT}/tools/bin/clang", + include_path=[ + "${OUT_ROOT}/aarch64-clang-release/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include" + ], + option="--target=aarch64", + infile="${APP}.c", + outfile="${APP}.ast", + ) + ], + # multiple ast input + "lto2mpl": [ + Hir2mpl( + hir2mpl="${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl", + option="-wpaa", + infile="${APP}", + outfile="${TARGET}" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/LTO_TEST.py b/testsuite/driver/src/mode/.mode/LTO_TEST.py new file mode 100644 index 0000000000000000000000000000000000000000..282732e93978026ba9a200dbb24ec9bce4861dbd --- /dev/null +++ b/testsuite/driver/src/mode/.mode/LTO_TEST.py @@ -0,0 +1,52 @@ +# +# Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +LTO_TEST = { + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles="${APP}", + include_path=[ + "${MAPLE_BUILD_OUTPUT}/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", + ], + option="${option}" + ) + ], + "link": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles="${BPP}", + outfile="a.out", + include_path=[ + "${MAPLE_BUILD_OUTPUT}/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include", + ], + option="${linkoption}" + ) + ], + "run": [ + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc a.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/LVMMBCO2.py b/testsuite/driver/src/mode/.mode/LVMMBCO2.py similarity index 100% rename from testsuite/driver/src/mode/LVMMBCO2.py rename to testsuite/driver/src/mode/.mode/LVMMBCO2.py diff --git a/testsuite/driver/src/mode/LVMO0_DEBUG.py b/testsuite/driver/src/mode/.mode/LVMO0_DEBUG.py similarity index 100% rename from testsuite/driver/src/mode/LVMO0_DEBUG.py rename to testsuite/driver/src/mode/.mode/LVMO0_DEBUG.py diff --git a/testsuite/driver/src/mode/LVMO0_DEBUG_OLD.py b/testsuite/driver/src/mode/.mode/LVMO0_DEBUG_OLD.py similarity index 100% rename from testsuite/driver/src/mode/LVMO0_DEBUG_OLD.py rename to testsuite/driver/src/mode/.mode/LVMO0_DEBUG_OLD.py diff --git a/testsuite/driver/src/mode/LVMO2.py b/testsuite/driver/src/mode/.mode/LVMO2.py similarity index 100% rename from testsuite/driver/src/mode/LVMO2.py rename to testsuite/driver/src/mode/.mode/LVMO2.py diff --git a/testsuite/driver/src/mode/LVMO2_OLD.py b/testsuite/driver/src/mode/.mode/LVMO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/LVMO2_OLD.py rename to testsuite/driver/src/mode/.mode/LVMO2_OLD.py diff --git a/testsuite/driver/src/mode/LVMO2_SAFE.py b/testsuite/driver/src/mode/.mode/LVMO2_SAFE.py similarity index 100% rename from testsuite/driver/src/mode/LVMO2_SAFE.py rename to testsuite/driver/src/mode/.mode/LVMO2_SAFE.py diff --git a/testsuite/driver/src/mode/LVMO2_SAFE_OLD.py b/testsuite/driver/src/mode/.mode/LVMO2_SAFE_OLD.py similarity index 100% rename from testsuite/driver/src/mode/LVMO2_SAFE_OLD.py rename to testsuite/driver/src/mode/.mode/LVMO2_SAFE_OLD.py diff --git a/testsuite/driver/src/mode/MPLIR.py b/testsuite/driver/src/mode/.mode/MPLIR.py similarity index 100% rename from testsuite/driver/src/mode/MPLIR.py rename to testsuite/driver/src/mode/.mode/MPLIR.py diff --git a/testsuite/driver/src/mode/NEONO0.py b/testsuite/driver/src/mode/.mode/NEONO0.py similarity index 100% rename from testsuite/driver/src/mode/NEONO0.py rename to testsuite/driver/src/mode/.mode/NEONO0.py diff --git a/testsuite/driver/src/mode/NEONO0_OLD.py b/testsuite/driver/src/mode/.mode/NEONO0_OLD.py similarity index 100% rename from testsuite/driver/src/mode/NEONO0_OLD.py rename to testsuite/driver/src/mode/.mode/NEONO0_OLD.py diff --git a/testsuite/driver/src/mode/NEONO2.py b/testsuite/driver/src/mode/.mode/NEONO2.py similarity index 100% rename from testsuite/driver/src/mode/NEONO2.py rename to testsuite/driver/src/mode/.mode/NEONO2.py diff --git a/testsuite/driver/src/mode/NEONO2_OLD.py b/testsuite/driver/src/mode/.mode/NEONO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/NEONO2_OLD.py rename to testsuite/driver/src/mode/.mode/NEONO2_OLD.py diff --git a/testsuite/driver/src/mode/.mode/O0.py b/testsuite/driver/src/mode/.mode/O0.py new file mode 100644 index 0000000000000000000000000000000000000000..e566026fe6d1539a1478560e125209e008760099 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/O0.py @@ -0,0 +1,123 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Hir2mpl( + hir2mpl="${MAPLE_BUILD_OUTPUT}/bin/hir2mpl", + option="-mplt ${MAPLE_BUILD_OUTPUT}/libjava-core/host-x86_64-O0/libcore-all.mplt --rc", + infile="${APP}.dex", + outfile="${APP}.mpl" + ), + Maple( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --emitVtableImpl", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker" + }, + global_option="", + infiles=["${APP}.mpl"] + ), + Linker( + lib="host-x86_64-O0", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/O2.py b/testsuite/driver/src/mode/.mode/O2.py new file mode 100644 index 0000000000000000000000000000000000000000..26e1b99d1ffc429852ced960648b0c13704678b6 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/O2.py @@ -0,0 +1,118 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2 = { + "compile": [ + Java2dex( + jar_file=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Hir2mpl( + hir2mpl="${MAPLE_BUILD_OUTPUT}/bin/hir2mpl", + option="-mplt ${MAPLE_BUILD_OUTPUT}/libjava-core/host-x86_64-O2/libcore-all.mplt --rc", + infile="${APP}.dex", + outfile="${APP}.mpl" + ), + Maple( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--O2 --quiet", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --fPIC --verbose-asm --maplelinker" + }, + global_option="", + infiles=["${APP}.mpl"] + ), + Linker( + lib="host-x86_64-O2", + ) + ], + "run": [ + Mplsh( + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1" + }, + qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${MAPLE_BUILD_OUTPUT}/ops/third_party", + "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "./" + ], + mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/OPTMBCO2.py b/testsuite/driver/src/mode/.mode/OPTMBCO2.py similarity index 100% rename from testsuite/driver/src/mode/OPTMBCO2.py rename to testsuite/driver/src/mode/.mode/OPTMBCO2.py diff --git a/testsuite/driver/src/mode/OPTO2.py b/testsuite/driver/src/mode/.mode/OPTO2.py similarity index 96% rename from testsuite/driver/src/mode/OPTO2.py rename to testsuite/driver/src/mode/.mode/OPTO2.py index 6aacd14f57d053e108575bc3e582d49e1749b043..54d7110de6a0d3e75cc9be7bc053e8c2cb8ce28a 100644 --- a/testsuite/driver/src/mode/OPTO2.py +++ b/testsuite/driver/src/mode/.mode/OPTO2.py @@ -34,7 +34,7 @@ OPTO2 = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}.o","${APP}2.o"], outfile="${APP}.exe", - option="-std=gnu99 --no-pie -L../lib/lib -lst -lm" + option="-std=gnu99 -no-pie -L../lib/lib -lst -lm" ) ], "run": [ diff --git a/testsuite/driver/src/mode/OPTO2_OLD.py b/testsuite/driver/src/mode/.mode/OPTO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/OPTO2_OLD.py rename to testsuite/driver/src/mode/.mode/OPTO2_OLD.py diff --git a/testsuite/driver/src/mode/OPTO2_SAFE.py b/testsuite/driver/src/mode/.mode/OPTO2_SAFE.py similarity index 96% rename from testsuite/driver/src/mode/OPTO2_SAFE.py rename to testsuite/driver/src/mode/.mode/OPTO2_SAFE.py index 1c092fdd6743f99f519e1af84410b96c4f524d49..f5236071d286ba699f1b72bb45c8dfb821f05fdf 100644 --- a/testsuite/driver/src/mode/OPTO2_SAFE.py +++ b/testsuite/driver/src/mode/.mode/OPTO2_SAFE.py @@ -34,7 +34,7 @@ OPTO2_SAFE = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}.o","${APP}2.o"], outfile="${APP}.exe", - option="-std=gnu99 --no-pie -L../lib/lib -lst -lm" + option="-std=gnu99 -no-pie -L../lib/lib -lst -lm" ) ], "run": [ diff --git a/testsuite/driver/src/mode/OPTO2_SAFE_OLD.py b/testsuite/driver/src/mode/.mode/OPTO2_SAFE_OLD.py similarity index 100% rename from testsuite/driver/src/mode/OPTO2_SAFE_OLD.py rename to testsuite/driver/src/mode/.mode/OPTO2_SAFE_OLD.py diff --git a/testsuite/driver/src/mode/SAFEO2_TEST.py b/testsuite/driver/src/mode/.mode/SAFEO2_TEST.py similarity index 94% rename from testsuite/driver/src/mode/SAFEO2_TEST.py rename to testsuite/driver/src/mode/.mode/SAFEO2_TEST.py index 880735cf800e69e2be362b1a79412e427a286c80..04e622a1af2a7240d4b353a2f9cce8bf473ad417 100644 --- a/testsuite/driver/src/mode/SAFEO2_TEST.py +++ b/testsuite/driver/src/mode/.mode/SAFEO2_TEST.py @@ -35,7 +35,7 @@ SAFEO2_TEST = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SAFEO2_TRAIN.py b/testsuite/driver/src/mode/.mode/SAFEO2_TRAIN.py similarity index 94% rename from testsuite/driver/src/mode/SAFEO2_TRAIN.py rename to testsuite/driver/src/mode/.mode/SAFEO2_TRAIN.py index b79863d9f3264015718fab5bef84f664e07e926f..e6fa037673ed1ae7d48d4249ddf37244dd4035ac 100644 --- a/testsuite/driver/src/mode/SAFEO2_TRAIN.py +++ b/testsuite/driver/src/mode/.mode/SAFEO2_TRAIN.py @@ -35,7 +35,7 @@ SAFEO2_TRAIN = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCMBCO2_TEST.py b/testsuite/driver/src/mode/.mode/SCMBCO2_TEST.py similarity index 100% rename from testsuite/driver/src/mode/SCMBCO2_TEST.py rename to testsuite/driver/src/mode/.mode/SCMBCO2_TEST.py diff --git a/testsuite/driver/src/mode/SCMBCO2_TRAIN.py b/testsuite/driver/src/mode/.mode/SCMBCO2_TRAIN.py similarity index 100% rename from testsuite/driver/src/mode/SCMBCO2_TRAIN.py rename to testsuite/driver/src/mode/.mode/SCMBCO2_TRAIN.py diff --git a/testsuite/driver/src/mode/SCO0_TEST.py b/testsuite/driver/src/mode/.mode/SCO0_TEST.py similarity index 100% rename from testsuite/driver/src/mode/SCO0_TEST.py rename to testsuite/driver/src/mode/.mode/SCO0_TEST.py diff --git a/testsuite/driver/src/mode/SCO0_TEST_MERGE.py b/testsuite/driver/src/mode/.mode/SCO0_TEST_MERGE.py similarity index 100% rename from testsuite/driver/src/mode/SCO0_TEST_MERGE.py rename to testsuite/driver/src/mode/.mode/SCO0_TEST_MERGE.py diff --git a/testsuite/driver/src/mode/SCO0_TRAIN.py b/testsuite/driver/src/mode/.mode/SCO0_TRAIN.py similarity index 94% rename from testsuite/driver/src/mode/SCO0_TRAIN.py rename to testsuite/driver/src/mode/.mode/SCO0_TRAIN.py index 23cf65990d949dc4dceaa2eaa235277c608e8853..911eaf0e8aa63f4e8066bc858b65a1233f2ff5da 100644 --- a/testsuite/driver/src/mode/SCO0_TRAIN.py +++ b/testsuite/driver/src/mode/.mode/SCO0_TRAIN.py @@ -35,7 +35,7 @@ SCO0_TRAIN = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCO0_TRAIN_MERGE.py b/testsuite/driver/src/mode/.mode/SCO0_TRAIN_MERGE.py similarity index 96% rename from testsuite/driver/src/mode/SCO0_TRAIN_MERGE.py rename to testsuite/driver/src/mode/.mode/SCO0_TRAIN_MERGE.py index 617216bdcad62ad0b598104cf3b1cdcf509018cf..f36837cd7670e266f95cc7b19405e45dc94b9d13 100644 --- a/testsuite/driver/src/mode/SCO0_TRAIN_MERGE.py +++ b/testsuite/driver/src/mode/.mode/SCO0_TRAIN_MERGE.py @@ -77,7 +77,7 @@ SCO0_TRAIN_MERGE = { maple="${OUT_ROOT}/aarch64-clang-release/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCO2_TEST.py b/testsuite/driver/src/mode/.mode/SCO2_TEST.py similarity index 94% rename from testsuite/driver/src/mode/SCO2_TEST.py rename to testsuite/driver/src/mode/.mode/SCO2_TEST.py index d9eb7d40bd764a33b727ea051e4c697ffa07312e..dfdde3ebb0e7dda26127e52c93fc5d77b268ccd8 100644 --- a/testsuite/driver/src/mode/SCO2_TEST.py +++ b/testsuite/driver/src/mode/.mode/SCO2_TEST.py @@ -35,7 +35,7 @@ SCO2_TEST = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCO2_TEST_MERGE.py b/testsuite/driver/src/mode/.mode/SCO2_TEST_MERGE.py similarity index 96% rename from testsuite/driver/src/mode/SCO2_TEST_MERGE.py rename to testsuite/driver/src/mode/.mode/SCO2_TEST_MERGE.py index 13d35010c22a5f0c5ea0e0421cee18bad9ed8ed0..d352941f4481573dcbc50f40793a1d4262511fe5 100644 --- a/testsuite/driver/src/mode/SCO2_TEST_MERGE.py +++ b/testsuite/driver/src/mode/.mode/SCO2_TEST_MERGE.py @@ -90,7 +90,7 @@ SCO2_TEST_MERGE = { maple="${OUT_ROOT}/aarch64-clang-release/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCO2_TRAIN.py b/testsuite/driver/src/mode/.mode/SCO2_TRAIN.py similarity index 94% rename from testsuite/driver/src/mode/SCO2_TRAIN.py rename to testsuite/driver/src/mode/.mode/SCO2_TRAIN.py index 5010a38889e752dcff06363988bf2b5adfd532ba..9c03bf9ad6a869ee424bed713df0710ae60ebd3f 100644 --- a/testsuite/driver/src/mode/SCO2_TRAIN.py +++ b/testsuite/driver/src/mode/.mode/SCO2_TRAIN.py @@ -35,7 +35,7 @@ SCO2_TRAIN = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/SCO2_TRAIN_MERGE.py b/testsuite/driver/src/mode/.mode/SCO2_TRAIN_MERGE.py similarity index 96% rename from testsuite/driver/src/mode/SCO2_TRAIN_MERGE.py rename to testsuite/driver/src/mode/.mode/SCO2_TRAIN_MERGE.py index ff4353bbce27b42192e164b0cab14c20ef08ffc7..dc6bf4012042d6a404d2b04b5eef39b51e58c231 100644 --- a/testsuite/driver/src/mode/SCO2_TRAIN_MERGE.py +++ b/testsuite/driver/src/mode/.mode/SCO2_TRAIN_MERGE.py @@ -90,7 +90,7 @@ SCO2_TRAIN_MERGE = { maple="${OUT_ROOT}/aarch64-clang-release/bin/maple", infiles=["${APP}"], outfile="${EXE}", - option="-std=gnu99 --no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" ) ], "cp_data":[ diff --git a/testsuite/driver/src/mode/.mode/SCOS_TRAIN.py b/testsuite/driver/src/mode/.mode/SCOS_TRAIN.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb08f47c8b151b38769acb10dd90947d0999b8b --- /dev/null +++ b/testsuite/driver/src/mode/.mode/SCOS_TRAIN.py @@ -0,0 +1,56 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + + +SCOS_TRAIN = { + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP}.c"], + outfile="${APP}.o", + include_path=[ + "${MAPLE_BUILD_OUTPUT}/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include" + ], + option="--Os -fPIC --no-pie -c", + extra_opt="${SPEC_PARAM}" + ) + ], + "link": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP}"], + outfile="${EXE}", + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + ) + ], + "cp_data":[ + Shell( + "cp -r data/train/${APP} ${TARGET}" + ) + ], + "run": [ + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc ${EXE} ${APP} > output.log" + ) + ], + "compare": [ + Shell( + "${MAPLE_ROOT}/testsuite/c_test/spec_test/specperl ${MAPLE_ROOT}/testsuite/c_test/spec_test/specdiff -m -l 10 ${EXTRA_COMPARE} output.log data/train/${APP}" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/SCOS_TRAIN_MERGE.py b/testsuite/driver/src/mode/.mode/SCOS_TRAIN_MERGE.py new file mode 100644 index 0000000000000000000000000000000000000000..68212b486c1ca4b5afcc30b14e7533e8565fffbb --- /dev/null +++ b/testsuite/driver/src/mode/.mode/SCOS_TRAIN_MERGE.py @@ -0,0 +1,98 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + + +SCOS_TRAIN_MERGE = { + "c2ast": [ + C2ast( + clang="${OUT_ROOT}/tools/bin/clang", + include_path=[ + "${OUT_ROOT}/aarch64-clang-release/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include" + ], + option="--target=aarch64", + infile="${APP}.c", + outfile="${APP}.ast", + extra_opt="${SPEC_PARAM}" + ) + ], + # multiple ast input + "ast2mpl": [ + Hir2mpl( + hir2mpl="${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl", + option="-wpaa", + infile="${APP}", + outfile="${TARGET}" + ) + ], + "c2mpl": [ + C2ast( + clang="${OUT_ROOT}/tools/bin/clang", + include_path=[ + "${OUT_ROOT}/aarch64-clang-release/lib/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include", + "${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include" + ], + option="--target=aarch64", + infile="${APP}.c", + outfile="${APP}.ast", + extra_opt="${SPEC_PARAM}" + ), + Hir2mpl( + hir2mpl="${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl", + option="-enable-variable-array -wpaa", + infile="${APP}.ast", + outfile="${APP}.mpl" + ) + ], + "merge_mpl":[ + Shell( + "cat ${APP} > ${TARGET}" + ) + ], + "mpl2o":[ + MapleDriver( + maple="${OUT_ROOT}/aarch64-clang-release/bin/maple", + infiles=["${APP}.mpl"], + outfile="${APP}.o", + option="--Os -fPIC --no-pie -c" + ) + ], + "link": [ + MapleDriver( + maple="${OUT_ROOT}/aarch64-clang-release/bin/maple", + infiles=["${APP}"], + outfile="${EXE}", + option="-std=gnu99 -no-pie -lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/" + ) + ], + "cp_data":[ + Shell( + "cp -r data/train/${APP} ${TARGET}" + ) + ], + "run": [ + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc ${EXE} ${APP} > output.log" + ) + ], + "compare": [ + Shell( + "${MAPLE_ROOT}/testsuite/c_test/spec_test/specperl ${MAPLE_ROOT}/testsuite/c_test/spec_test/specdiff -m -l 10 ${EXTRA_COMPARE} output.log data/train/${APP}" + ) + ] +} diff --git a/testsuite/driver/src/mode/SP_ALL.py b/testsuite/driver/src/mode/.mode/SP_ALL.py similarity index 100% rename from testsuite/driver/src/mode/SP_ALL.py rename to testsuite/driver/src/mode/.mode/SP_ALL.py diff --git a/testsuite/driver/src/mode/SP_ALL_OLD.py b/testsuite/driver/src/mode/.mode/SP_ALL_OLD.py similarity index 100% rename from testsuite/driver/src/mode/SP_ALL_OLD.py rename to testsuite/driver/src/mode/.mode/SP_ALL_OLD.py diff --git a/testsuite/driver/src/mode/SP_STRONG.py b/testsuite/driver/src/mode/.mode/SP_STRONG.py similarity index 100% rename from testsuite/driver/src/mode/SP_STRONG.py rename to testsuite/driver/src/mode/.mode/SP_STRONG.py diff --git a/testsuite/driver/src/mode/SP_STRONG_OLD.py b/testsuite/driver/src/mode/.mode/SP_STRONG_OLD.py similarity index 100% rename from testsuite/driver/src/mode/SP_STRONG_OLD.py rename to testsuite/driver/src/mode/.mode/SP_STRONG_OLD.py diff --git a/testsuite/driver/src/mode/SUPMBCO2.py b/testsuite/driver/src/mode/.mode/SUPMBCO2.py similarity index 100% rename from testsuite/driver/src/mode/SUPMBCO2.py rename to testsuite/driver/src/mode/.mode/SUPMBCO2.py diff --git a/testsuite/driver/src/mode/SUPO0.py b/testsuite/driver/src/mode/.mode/SUPO0.py similarity index 100% rename from testsuite/driver/src/mode/SUPO0.py rename to testsuite/driver/src/mode/.mode/SUPO0.py diff --git a/testsuite/driver/src/mode/SUPO0_OLD.py b/testsuite/driver/src/mode/.mode/SUPO0_OLD.py similarity index 100% rename from testsuite/driver/src/mode/SUPO0_OLD.py rename to testsuite/driver/src/mode/.mode/SUPO0_OLD.py diff --git a/testsuite/driver/src/mode/SUPO2.py b/testsuite/driver/src/mode/.mode/SUPO2.py similarity index 100% rename from testsuite/driver/src/mode/SUPO2.py rename to testsuite/driver/src/mode/.mode/SUPO2.py diff --git a/testsuite/driver/src/mode/SUPO2_OLD.py b/testsuite/driver/src/mode/.mode/SUPO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/SUPO2_OLD.py rename to testsuite/driver/src/mode/.mode/SUPO2_OLD.py diff --git a/testsuite/driver/src/mode/SUPO2_SAFE.py b/testsuite/driver/src/mode/.mode/SUPO2_SAFE.py similarity index 100% rename from testsuite/driver/src/mode/SUPO2_SAFE.py rename to testsuite/driver/src/mode/.mode/SUPO2_SAFE.py diff --git a/testsuite/driver/src/mode/SUPO2_SAFE_OLD.py b/testsuite/driver/src/mode/.mode/SUPO2_SAFE_OLD.py similarity index 100% rename from testsuite/driver/src/mode/SUPO2_SAFE_OLD.py rename to testsuite/driver/src/mode/.mode/SUPO2_SAFE_OLD.py diff --git a/testsuite/driver/src/mode/.mode/TEST_O2.py b/testsuite/driver/src/mode/.mode/TEST_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..d00e909f87713e1180c8d9ee880d893ee5192cd4 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/TEST_O2.py @@ -0,0 +1,54 @@ +# +# Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + + +TEST_O2 = { + "generate_shared_lib": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP1}.c"], + outfile="lib${LIB}.so", + include_path=["${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86"], + option="-shared -fPIC -I. -w -O2 -g" + ) + ], + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP2}.c"], + outfile="${APP2}.o", + include_path=["${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86"], + option="-O2 -c -w -g" + ) + ], + "link": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP2}.o","${APP3}.o"], + outfile="a.out", + option="-w -L. -l${LIB} -Wl,-rpath=." + ) + ], + "run": [ + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc a.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/.mode/TEST_OS.py b/testsuite/driver/src/mode/.mode/TEST_OS.py new file mode 100644 index 0000000000000000000000000000000000000000..b0527e8480bc58563379d81ce178bc268cc4cbe4 --- /dev/null +++ b/testsuite/driver/src/mode/.mode/TEST_OS.py @@ -0,0 +1,54 @@ +# +# Copyright (c) [2023] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + + +TEST_OS = { + "generate_shared_lib": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP1}.c"], + outfile="lib${LIB}.so", + include_path=["${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86"], + option="-shared -fPIC -I. -w -Os -g" + ) + ], + "compile": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP2}.c"], + outfile="${APP2}.o", + include_path=["${MAPLE_ROOT}/testsuite/c_test/csmith_test/runtime_x86"], + option="-Os -c -w -g" + ) + ], + "link": [ + MapleDriver( + maple="${MAPLE_BUILD_OUTPUT}/bin/maple", + infiles=["${APP2}.o","${APP3}.o"], + outfile="a.out", + option="-w -L. -l${LIB} -Wl,-rpath=." + ) + ], + "run": [ + Shell( + "${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc a.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/TSVO2.py b/testsuite/driver/src/mode/.mode/TSVO2.py similarity index 96% rename from testsuite/driver/src/mode/TSVO2.py rename to testsuite/driver/src/mode/.mode/TSVO2.py index 0802dfdae9875b8c4db7af0459380ae2553ee75d..f9eac5aebddf6500b56faa4df51085928c4dd1f5 100644 --- a/testsuite/driver/src/mode/TSVO2.py +++ b/testsuite/driver/src/mode/.mode/TSVO2.py @@ -34,7 +34,7 @@ TSVO2 = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", infiles=["${APP}"], outfile="a.exe", - option="-std=gnu99 --no-pie -std=c99 -lm", + option="-std=gnu99 -no-pie -std=c99 -lm", ) ], "run": [ diff --git a/testsuite/driver/src/mode/TSVO2_OLD.py b/testsuite/driver/src/mode/.mode/TSVO2_OLD.py similarity index 100% rename from testsuite/driver/src/mode/TSVO2_OLD.py rename to testsuite/driver/src/mode/.mode/TSVO2_OLD.py diff --git a/testsuite/driver/src/mode/X64_LITECG.py b/testsuite/driver/src/mode/.mode/X64_LITECG.py similarity index 100% rename from testsuite/driver/src/mode/X64_LITECG.py rename to testsuite/driver/src/mode/.mode/X64_LITECG.py diff --git a/testsuite/driver/src/mode/X64_LITECG_ME_O2.py b/testsuite/driver/src/mode/.mode/X64_LITECG_ME_O2.py similarity index 96% rename from testsuite/driver/src/mode/X64_LITECG_ME_O2.py rename to testsuite/driver/src/mode/.mode/X64_LITECG_ME_O2.py index 2be61a06321a8baa99fd64f396c572722e47e571..336f50580b63c5fff9b43aa7e3a675a072951653 100644 --- a/testsuite/driver/src/mode/X64_LITECG_ME_O2.py +++ b/testsuite/driver/src/mode/.mode/X64_LITECG_ME_O2.py @@ -36,7 +36,7 @@ X64_LITECG_ME_O2 = { maple="${MAPLE_BUILD_OUTPUT}/bin/maple", run=["me", "mplcg"], option={ - "me": "-O2 --skip-phases=slp --no-mergestmts --quiet", + "me": "-O2 --skip-phases=slp,sra --no-mergestmts --quiet", "mplcg": "-Olitecg --verbose-asm --verbose-cg --fPIC" }, global_option="--genVtableImpl --save-temps", diff --git a/testsuite/driver/src/mode/X64_LITECG_MPL2MPL_O2.py b/testsuite/driver/src/mode/.mode/X64_LITECG_MPL2MPL_O2.py similarity index 100% rename from testsuite/driver/src/mode/X64_LITECG_MPL2MPL_O2.py rename to testsuite/driver/src/mode/.mode/X64_LITECG_MPL2MPL_O2.py diff --git a/testsuite/driver/src/mode/X64_O0.py b/testsuite/driver/src/mode/.mode/X64_O0.py similarity index 100% rename from testsuite/driver/src/mode/X64_O0.py rename to testsuite/driver/src/mode/.mode/X64_O0.py diff --git a/testsuite/driver/src/mode/X64_O2.py b/testsuite/driver/src/mode/.mode/X64_O2.py similarity index 100% rename from testsuite/driver/src/mode/X64_O2.py rename to testsuite/driver/src/mode/.mode/X64_O2.py diff --git a/testsuite/driver/src/mode/.mode/__init__.py b/testsuite/driver/src/mode/.mode/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4784970c2c08e3d28962337eb9eed451c6d888ac --- /dev/null +++ b/testsuite/driver/src/mode/.mode/__init__.py @@ -0,0 +1,26 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +import os + +mode_dict = {} +my_dir = os.path.dirname(__file__) +for py in os.listdir(my_dir): + if py == '__init__.py': + continue + + if py.endswith('.py'): + name = py[:-3] + mode = __import__(__name__, globals(), locals(), ['%s' % name]) + mode_dict[name] = getattr(getattr(mode, name), name) \ No newline at end of file diff --git a/testsuite/driver/src/mode/AOT.py b/testsuite/driver/src/mode/AOT.py new file mode 100644 index 0000000000000000000000000000000000000000..9a33481bf58de4e786b5a3c816a5891b3efab867 --- /dev/null +++ b/testsuite/driver/src/mode/AOT.py @@ -0,0 +1,116 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +AOT = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_AOT/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps --aot", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-AOT", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0HD.py b/testsuite/driver/src/mode/ARM32O0HD.py new file mode 100644 index 0000000000000000000000000000000000000000..c60923aba8f4d0824750d588a957747011e14318 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0HD.py @@ -0,0 +1,120 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0HD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0NATIVEHD.py b/testsuite/driver/src/mode/ARM32O0NATIVEHD.py new file mode 100644 index 0000000000000000000000000000000000000000..031639fa111e9a92a0f11c68c23b57cbf1301c09 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0NATIVEHD.py @@ -0,0 +1,131 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0NATIVEHD = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm32_hard" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0NATIVESFP.py b/testsuite/driver/src/mode/ARM32O0NATIVESFP.py new file mode 100644 index 0000000000000000000000000000000000000000..4b72c675e549d88d0ac9099e33d8536bdfc830f4 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0NATIVESFP.py @@ -0,0 +1,131 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0NATIVESFP = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm32_softfp" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0RCHD.py b/testsuite/driver/src/mode/ARM32O0RCHD.py new file mode 100644 index 0000000000000000000000000000000000000000..204f4d1413ff2eb1c8be5594101d21eb9e05303a --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0RCHD.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0RCHD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0RCSFP.py b/testsuite/driver/src/mode/ARM32O0RCSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..58655cbb47c91896da7a58035547b7943df37f24 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0RCSFP.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0RCSFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O0SFP.py b/testsuite/driver/src/mode/ARM32O0SFP.py new file mode 100644 index 0000000000000000000000000000000000000000..6506189a0f13ca253ed4aee2b5e3af87fa06a915 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O0SFP.py @@ -0,0 +1,120 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O0SFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2HD.py b/testsuite/driver/src/mode/ARM32O2HD.py new file mode 100644 index 0000000000000000000000000000000000000000..011adf0a490748d1494a74fad39887a16751d70c --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2HD.py @@ -0,0 +1,116 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2HD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl","mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg":"--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O2", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2NATIVEHD.py b/testsuite/driver/src/mode/ARM32O2NATIVEHD.py new file mode 100644 index 0000000000000000000000000000000000000000..01eb181bfe4ae47aa695928ebb294fae31f60858 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2NATIVEHD.py @@ -0,0 +1,129 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2NATIVEHD = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm32_hard" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl","mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg":"--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O2", + model="arm32_hard", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2NATIVESFP.py b/testsuite/driver/src/mode/ARM32O2NATIVESFP.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf5ff48a70fdaaa34e20faa772b86efd6005030 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2NATIVESFP.py @@ -0,0 +1,129 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2NATIVESFP = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm32_softfp" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --float-abi=softfp --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O2", + model="arm32_softfp", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2RCHD.py b/testsuite/driver/src/mode/ARM32O2RCHD.py new file mode 100644 index 0000000000000000000000000000000000000000..4afbf9dc569f2e03d8c34683cd2a2d7bef725d59 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2RCHD.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2RCHD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O2", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2RCSFP.py b/testsuite/driver/src/mode/ARM32O2RCSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fa848d2b8dc04e944b691e34984f7654c5950f --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2RCSFP.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2RCSFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --float-abi=softfp --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O2", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32O2SFP.py b/testsuite/driver/src/mode/ARM32O2SFP.py new file mode 100644 index 0000000000000000000000000000000000000000..71adc72007452b7cea00a39c7d9e2ff25cefeef7 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32O2SFP.py @@ -0,0 +1,116 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32O2SFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --float-abi=softfp --no-pie --verbose-asm --fPIC --gen-c-macro-def --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O2", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERHD.py b/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERHD.py new file mode 100644 index 0000000000000000000000000000000000000000..7d499e3af035eb1bd19b852808bd5a6a5129b01c --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERHD.py @@ -0,0 +1,299 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPCLASSLOADERHD = { + "java2dex": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "compile": [ + Shell( + 'cp ../lib/child.jar ./ ' + ), + Shell( + 'cp ../lib/parent.jar ./ ' + ), + Shell( + 'cp ../lib/inject.jar ./ ' + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="child.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="child.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="child" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="parent.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="parent.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="parent" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="inject.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="inject.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="inject" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERSFP.py b/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..d934d314a87b3d7651abedc7d922f06893c894f0 --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPCLASSLOADERSFP.py @@ -0,0 +1,299 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPCLASSLOADERSFP = { + "java2dex": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "compile": [ + Shell( + 'cp ../lib/child.jar ./ ' + ), + Shell( + 'cp ../lib/parent.jar ./ ' + ), + Shell( + 'cp ../lib/inject.jar ./ ' + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="child.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="child.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="child" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="parent.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="parent.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="parent" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="inject.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="inject.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="inject" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker--FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPDEXSOHD.py b/testsuite/driver/src/mode/ARM32ZTERPDEXSOHD.py new file mode 100644 index 0000000000000000000000000000000000000000..911b6f7577b5c5d8050dd1a2d7ab2bc26cb6e57a --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPDEXSOHD.py @@ -0,0 +1,144 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPDEXSOHD = { + "java2dex":[ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "java2dex_simplejava":[ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"], + usesimplejava=True + ) + ], + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-hard_O0", + model="arm32_hard", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPDEXSOSFP.py b/testsuite/driver/src/mode/ARM32ZTERPDEXSOSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..36106bbe43cab8fa093d2ee8bfe23a39992050fe --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPDEXSOSFP.py @@ -0,0 +1,144 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPDEXSOSFP = { + "java2dex": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "java2dex_simplejava": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"], + usesimplejava=True + ) + ], + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm32/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --float-abi=softfp --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-softfp_O0", + model="arm32_softfp", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPHD.py b/testsuite/driver/src/mode/ARM32ZTERPHD.py new file mode 100644 index 0000000000000000000000000000000000000000..e6af20039d714de87a1e62a4aad6f0f575fc701f --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPHD.py @@ -0,0 +1,106 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPHD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ARM32ZTERPRCHD.py b/testsuite/driver/src/mode/ARM32ZTERPRCHD.py new file mode 100644 index 0000000000000000000000000000000000000000..fa64e949b73b2b867c98a8e19626e3486db77bfd --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPRCHD.py @@ -0,0 +1,63 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPRCHD = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabihf", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/hard", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-hard_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_hard", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log", + choice="num" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/ARM32ZTERPRCSFP.py b/testsuite/driver/src/mode/ARM32ZTERPRCSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..2908566504105d97bba34c9b0550c8c81974dc9b --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPRCSFP.py @@ -0,0 +1,63 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPRCSFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log", + choice="num" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/ARM32ZTERPSFP.py b/testsuite/driver/src/mode/ARM32ZTERPSFP.py new file mode 100644 index 0000000000000000000000000000000000000000..ef76cd430b22f03e08a3c8283b88a50acd61ef7a --- /dev/null +++ b/testsuite/driver/src/mode/ARM32ZTERPSFP.py @@ -0,0 +1,106 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ARM32ZTERPSFP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-arm", + qemu_libc="/usr/arm-linux-gnueabi", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm32/third-party/softfp", + "${OUT_ROOT}/target/product/maple_arm32/lib/host-x86_64-softfp_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm32/bin/mplsh_arm_softfp", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/COMPACT.py b/testsuite/driver/src/mode/COMPACT.py new file mode 100644 index 0000000000000000000000000000000000000000..ee855cadc204bcc6d8a7aa9ea1ab7433a6ec4d88 --- /dev/null +++ b/testsuite/driver/src/mode/COMPACT.py @@ -0,0 +1,123 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +COMPACT = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "mplipa": "--quiet --effectipa", + "me": "-O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker --compact-meta --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "-O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Shell( + "python3 ${OUT_ROOT}/target/product/public/bin/check_compact.py ${APP}.VtableImpl.s > output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/CORT.py b/testsuite/driver/src/mode/CORT.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed57913e8e1452fe24161e9f6d84a4da5850d6b --- /dev/null +++ b/testsuite/driver/src/mode/CORT.py @@ -0,0 +1,30 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +CORT = { + "compile": [ + Shell("/usr/bin/clang-9 -O2 -g3 -c -fPIC -march=armv8-a -target aarch64-linux-gnu -I${MAPLE_ROOT}/mrt/coroutine/api/ ${APP}.c;/usr/bin/clang++-9 -s -fuse-ld=lld -O2 -g -Wall -fstack-protector-strong -fPIC -Werror -fPIE -rdynamic -pie -W -Wno-macro-redefined -Wno-inconsistent-missing-override -Wno-deprecated -Wno-unused-command-line-argument -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5 -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5/aarch64-linux-gnu -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/../../../../aarch64-linux-gnu/include/c++/5/backward -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include -isystem /usr/lib/gcc-cross/aarch64-linux-gnu/5/include-fixed -isystem /usr/aarch64-linux-gnu/include -target aarch64-linux-gnu -Wl,-z,relro -Wl,-z,now -Wl,-z,noexecstack -fPIE -o test.out -Wl,--start-group test.o -L${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/host-x86_64-O2/ -lcoroutine -ldl -lhuawei_secure_c -Wl,--end-group") + ], + "run": [ + Shell( + "/usr/bin/qemu-aarch64 -L /usr/aarch64-linux-gnu -E LD_LIBRARY_PATH=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2:./ ./${APP}.out > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/DEOPT.py b/testsuite/driver/src/mode/DEOPT.py new file mode 100644 index 0000000000000000000000000000000000000000..02e5d552144384a9b0078104025bec991173c265 --- /dev/null +++ b/testsuite/driver/src/mode/DEOPT.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +DEOPT = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE/libcore-all.mplt", + "me": "--gconly --O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--gconly --O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--gconly --O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --with-ra-linear-scan --no-ico --no-cfgo --no-prepeep --no-peep --no-ebo --no-storeloadopt --no-globalopt --no-schedule --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --aot --deopt --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/DEPENDENCE.py b/testsuite/driver/src/mode/DEPENDENCE.py new file mode 100644 index 0000000000000000000000000000000000000000..03e887b1c5159b2e653f6e245ffe8bacc0eba563 --- /dev/null +++ b/testsuite/driver/src/mode/DEPENDENCE.py @@ -0,0 +1,27 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +DEPENDENCE = { + "gendeps": [ + Gendeps( + gendeps="${OUT_ROOT}/target/product/maple_arm64/bin/gendeps", + apk="${APK}", + emui="${EMUI}", + extra_option="${EXTRA_OPTION}", + infile="${APP}.dex" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/DEX.py b/testsuite/driver/src/mode/DEX.py new file mode 100644 index 0000000000000000000000000000000000000000..b01e76a088b5ed314b02c9de48b9335324ae083c --- /dev/null +++ b/testsuite/driver/src/mode/DEX.py @@ -0,0 +1,39 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +DEX = { + "compile": [ + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=/home/fuqun/maple3.0/out/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list", + infile="${APP}.dex" + ), + Irbuild( + irbuild="${OUT_ROOT}/target/product/maple_arm64/bin/irbuild", + infile="${APP}.mpl" + ), + Mplverf( + mplverf="${OUT_ROOT}/target/product/maple_arm64/bin/mplverf", + infile="${APP}.mpl" + ), + Mplme( + mplme="${OUT_ROOT}/target/product/maple_arm64/bin/mplme", + option="-O2", + infile="${APP}.mpl" + ) + ], + "run": [] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/GCO0.py b/testsuite/driver/src/mode/GCO0.py new file mode 100644 index 0000000000000000000000000000000000000000..e8ec39d377fa7d937cb88f965283264f30a3c2ad --- /dev/null +++ b/testsuite/driver/src/mode/GCO0.py @@ -0,0 +1,65 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GCO0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO0/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/GCO0NATIVE.py b/testsuite/driver/src/mode/GCO0NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..d0f748dcea65e45885615f81875a25c760a08628 --- /dev/null +++ b/testsuite/driver/src/mode/GCO0NATIVE.py @@ -0,0 +1,76 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GCO0NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO0/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO0", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO0", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/GCO2.py b/testsuite/driver/src/mode/GCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..a98c34d5947088614aa78c960189788be0293337 --- /dev/null +++ b/testsuite/driver/src/mode/GCO2.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GCO2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/GCO2NATIVE.py b/testsuite/driver/src/mode/GCO2NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..7285ce51e1fcb1de7bdb825ad6880c4dca11fe8a --- /dev/null +++ b/testsuite/driver/src/mode/GCO2NATIVE.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GCO2NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO2", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2", + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/GCONLY.py b/testsuite/driver/src/mode/GCONLY.py new file mode 100644 index 0000000000000000000000000000000000000000..9a06ec9bc521c2648b7ec4eda0128f5bdd6772eb --- /dev/null +++ b/testsuite/driver/src/mode/GCONLY.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * +from env_var import EnvVar + +GCONLY = { + "compile": [ + BenchmarkVogar(), + Shell( + "mv ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.dex.jar ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.jar;" + "if [ -d \"${BENCHMARK_ACTION}/dex\" ]; then" + " rm -rf ${BENCHMARK_ACTION}/dex;" + "fi;" + "unzip -q ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.jar -d ${BENCHMARK_ACTION}/dex" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["dex2mpl"], + option={ + "dex2mpl": "-checktool -check-invoke -invoke-checklist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/classloaderInvocation.list -check-incomplete -incomplete-whitelist=${MAPLE_ROOT}/mrt/codetricks/compile/incomplete.list -incomplete-detail -staticstringcheck --inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/to_inline.list -dexcatch -litprofile=${MAPLE_ROOT}/mrt/codetricks/profile.pv/meta.list -gconly -output=${BENCHMARK_ACTION}/dex/ -mplt=${MAPLE_ROOT}/../out/soong/.intermediates/vendor/huawei/maple/Lib/core/libmaplecore-all/android_arm64_armv8-a_core_shared/obj/classes.mplt" + }, + global_option="", + infile="${BENCHMARK_ACTION}/dex/classes.dex" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["mplipa"], + option={ + "mplipa": "--effectipa --quiet --inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/inline_funcs.list" + }, + global_option="", + infile="${BENCHMARK_ACTION}/dex/classes.mpl > /dev/null" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/inline_funcs.list -O2 --quiet --no-ignoreipa --gconly", + "mpl2mpl": "-regnativefunc --quiet -O2 --usewhiteclass --maplelinker --dump-muid --check_cl_invocation=${MAPLE_ROOT}/mrt/codetricks/profile.pv/classloaderInvocation.list --regnative-dynamic-only", + "mplcg": "-O2 --quiet --no-pie --nativeopt --verbose-asm --gen-c-macro-def --maplelinker --gsrc --duplicate_asm_list2=${MAPLE_ROOT}/mrt/compiler-rt/src/arch/arm64/fastFuncs.S --gconly --fPIC" + }, + global_option="--genVtableImpl", + infile="${BENCHMARK_ACTION}/dex/classes.mpl" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/clang -target aarch64-linux-android -g -c -x assembler-with-cpp -D__ASSEMBLY__ -DUSE_32BIT_REF -DGCONLY=1 -MD -MF ${BENCHMARK_ACTION}/dex/classes.d -o ${BENCHMARK_ACTION}/dex/classes.o ${BENCHMARK_ACTION}/dex/classes.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/llvm-objcopy --rename-section .debug_info=.maple_java_debug_info --rename-section .debug_abbrev=.maple_java_debug_abbrev --rename-section .debug_line=.maple_java_debug_line --rename-section .debug_aranges=.maple_java_debug_aranges --rename-section .debug_ranges=.maple_java_debug_ranges ${BENCHMARK_ACTION}/dex/classes.o" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/clang++ -nostdlib -Wl,-soname,libmaple${BENCHMARK_ACTION}.so -Wl,--gc-sections -shared ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtbegin_so/android_arm64_armv8-a_core/crtbegin_so.o ${BENCHMARK_ACTION}/dex/classes.o -Wl,--whole-archive ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/mrt_module_init_intermediates/mrt_module_init.a -Wl,--no-whole-archive ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libclang_rt.ubsan_minimal-aarch64-android_intermediates/libclang_rt.ubsan_minimal-aarch64-android.a ${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/lib64/clang/9.0.3/lib/linux//libclang_rt.builtins-aarch64-android.a ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libatomic_intermediates/libatomic.a ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libgcc_intermediates/libgcc.a -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--build-id=md5 -Wl,--warn-shared-textrel -Wl,--fatal-warnings -Wl,--no-undefined-version -Wl,--exclude-libs,libgcc.a -Wl,--exclude-libs,libgcc_stripped.a -fuse-ld=lld -Wl,--hash-style=gnu -Wl,--icf=safe -Wl,-z,max-page-size=4096 -target aarch64-linux-android -B${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin -Wl,-T,${MAPLE_ROOT}/mrt/maplert/linker/maplelld.so.lds -Wl,-execute-only -Wl,--exclude-libs,libclang_rt.ubsan_minimal-aarch64-android.a -Wl,--no-undefined ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libmaplecore-all_intermediates/libmaplecore-all.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libmrt_intermediates/libmrt.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libcommon_bridge_intermediates/libcommon_bridge.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libc++_intermediates/libc++.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libc_intermediates/libc.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libm_intermediates/libm.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libdl_intermediates/libdl.so -o ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}Symbol.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtend_so/android_arm64_armv8-a_core/obj/bionic/libc/arch-common/bionic/crtend_so.o" + ), + Shell( + "CLANG_BIN=${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin CROSS_COMPILE=${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android- XZ=${MAPLE_ROOT}/../prebuilts/build-tools/linux-x86/bin/xz ${MAPLE_ROOT}/../build/soong/scripts/strip.sh -i ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}Symbol.so -o ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so -d ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.d --keep-mini-debug-info" + ), + Shell( + "(${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-readelf -d ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so | grep SONAME || echo \"No SONAME for ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so\") > ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp;" + "${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-readelf --dyn-syms ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so | awk '{$2=\"\"; $3=\"\"; print}' >> ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp;" + "mv ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc;" + "cp ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so ${BENCHMARK_ACTION}" + ), + ], + "native_compile": [ + BenchmarkNative() + ] +} diff --git a/testsuite/driver/src/mode/GC_IFILE.py b/testsuite/driver/src/mode/GC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..3968771ed853e699190e28d414a1f7b26f9691ce --- /dev/null +++ b/testsuite/driver/src/mode/GC_IFILE.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GC_IFILE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE/libcore-all.mplt -dexcatch -gconly -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --threads=4 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl --gconly", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold --gconly" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/GC_IFILE_NATIVE.py b/testsuite/driver/src/mode/GC_IFILE_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..a26247648ffa65bca0a8dd457f99c340c099e083 --- /dev/null +++ b/testsuite/driver/src/mode/GC_IFILE_NATIVE.py @@ -0,0 +1,72 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +GC_IFILE_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE/libcore-all.mplt -dexcatch -gconly -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --threads=4 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl --gconly", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold --gconly" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE.py b/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..d12907eed1a22d8d9e70c06823f5ab1c17348487 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE.py @@ -0,0 +1,110 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_COMMON_RC_IFILE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE/libcore-all.mplt", + "me": "--O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --with-ra-linear-scan --no-ico --no-cfgo --no-prepeep --no-peep --no-ebo --no-storeloadopt --no-globalopt --no-schedule --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.ohex", + xbootclasspath="libcore-all.ohex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE_O0.py b/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc85880e6aa46481f1b2a06b12845515d1a6bd2 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_COMMON_RC_IFILE_O0.py @@ -0,0 +1,109 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +from api import * + +HIR2MPL_COMMON_RC_IFILE_O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE_O0/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0/libcore-all.mplt", + "me": "--quiet --enable-ea --aot", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.ohex", + xbootclasspath="libcore-all.ohex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_CSTO0.py b/testsuite/driver/src/mode/HIR2MPL_CSTO0.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe9e86be82c3e554c9876506a1a1a83b5b30b32 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_CSTO0.py @@ -0,0 +1,68 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_CSTO0 = { + "compile": [ + Shell( + "cp ${OUT_ROOT}/target/product/public/lib/libcore-all.dex ." + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "", + "me": "--quiet --ignore-inferred-ret-type --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --gconly --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Shell( + "mv ${APP}.mpl serial_${APP}.mpl;" + "mv ${APP}.VtableImpl.mpl serial_${APP}.VtableImpl.mpl;" + "mv ${APP}.VtableImpl.s serial_${APP}.VtableImpl.s" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--np 4", + "me": "--quiet --threads=4 --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --gconly --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Shell( + "mv ${APP}.mpl parallel_${APP}.mpl;" + "mv ${APP}.VtableImpl.mpl parallel_${APP}.VtableImpl.mpl;" + "mv ${APP}.VtableImpl.s parallel_${APP}.VtableImpl.s" + ) + ], + "check": [ + Shell( + "diff serial_${APP}.mpl parallel_${APP}.mpl" + ), + Shell( + "diff serial_${APP}.VtableImpl.mpl parallel_${APP}.VtableImpl.mpl" + ), + Shell( + "diff serial_${APP}.VtableImpl.s parallel_${APP}.VtableImpl.s" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_CSTO2.py b/testsuite/driver/src/mode/HIR2MPL_CSTO2.py new file mode 100644 index 0000000000000000000000000000000000000000..961a8777c0b49c0de149ddd1bc0d27f3f8220c8f --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_CSTO2.py @@ -0,0 +1,68 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_CSTO2 = { + "compile": [ + Shell( + "cp ${OUT_ROOT}/target/product/public/lib/libcore-all.dex ." + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --ignore-inferred-ret-type --gconly --movinggc --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --gconly --movinggc --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Shell( + "mv ${APP}.mpl serial_${APP}.mpl;" + "mv ${APP}.VtableImpl.mpl serial_${APP}.VtableImpl.mpl;" + "mv ${APP}.VtableImpl.s serial_${APP}.VtableImpl.s" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--np 4", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --threads=4 --gconly --movinggc --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --gconly --movinggc --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Shell( + "mv ${APP}.mpl parallel_${APP}.mpl;" + "mv ${APP}.VtableImpl.mpl parallel_${APP}.VtableImpl.mpl;" + "mv ${APP}.VtableImpl.s parallel_${APP}.VtableImpl.s" + ) + ], + "check": [ + Shell( + "diff serial_${APP}.mpl parallel_${APP}.mpl" + ), + Shell( + "diff serial_${APP}.VtableImpl.mpl parallel_${APP}.VtableImpl.mpl" + ), + Shell( + "diff serial_${APP}.VtableImpl.s parallel_${APP}.VtableImpl.s" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO0.py b/testsuite/driver/src/mode/HIR2MPL_DEXO0.py new file mode 100644 index 0000000000000000000000000000000000000000..e2e47b7b839306430119b7ca4efa04349e104f7f --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO0.py @@ -0,0 +1,65 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + xbootclasspath="libcore-all.so", + garbage_collection_kind="GC", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO0_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_DEXO0_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..1b44c5e94a16ac9c981e93538746fa1808116df2 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO0_NATIVE.py @@ -0,0 +1,76 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO0_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO0", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0106ccdea3a1ef3befe4af13aed24099f8403c --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_JCK.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_JCK.py new file mode 100644 index 0000000000000000000000000000000000000000..08a0894b401870f62e57b95fb756203659cc6874 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_JCK.py @@ -0,0 +1,67 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_JCK = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "../../lib", + ".", + "${OUT_ROOT}/target/product/public/lib" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_JTREG.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_JTREG.py new file mode 100644 index 0000000000000000000000000000000000000000..488c3d306498ad5ca3d30f3149001652eb717a28 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_JTREG.py @@ -0,0 +1,65 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_JTREG = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..77284701401605f1af3c9a87e6a2a9a0c9177df0 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_NATIVE.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC.py new file mode 100644 index 0000000000000000000000000000000000000000..725ea71a7c707de4e29e6392c94a10607288b038 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_RC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2_RC", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_COMMON.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_COMMON.py new file mode 100644 index 0000000000000000000000000000000000000000..c43c52d24ee7d9d10589be00a784f263c61f469b --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_COMMON.py @@ -0,0 +1,116 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_RC_COMMON = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2_RC", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2f7f22fe794cb07bfb98dea9a5674e2403af29 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_DEXO2_RC_NATIVE.py @@ -0,0 +1,129 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_DEXO2_RC_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--dump-comment --dump-LOC --rc --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2_RC", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2_RC" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_IFILE.py b/testsuite/driver/src/mode/HIR2MPL_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..488d88f060afe9ecf6a42d9f2621e6f824c437d0 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_IFILE.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_IFILE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE/libcore-all.mplt", + "me": "--gconly --O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--gconly --O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--gconly --O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --with-ra-linear-scan --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_IFILE_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_IFILE_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..d923a4efc30dc2ee0f87b671a7d854fb714318c8 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_IFILE_NATIVE.py @@ -0,0 +1,71 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_IFILE_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE/libcore-all.mplt", + "me": "--gconly --O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--gconly --O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--gconly --O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_IFILE_O0.py b/testsuite/driver/src/mode/HIR2MPL_IFILE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..14b71080826c62e56dd4ca629d40a191ccf3fe04 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_IFILE_O0.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_IFILE_O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_IFILE_O0/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE_O0/libcore-all.mplt", + "me": "--gconly --quiet --enable-ea", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl --gconly", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold --gconly" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILE.py b/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..420033c50ade805ebe719e5c011ad01a6319bea2 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILE.py @@ -0,0 +1,123 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_NATIVE_RC_IFILE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--dump-comment --dump-LOC --rc --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE/libcore-all.mplt", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.ohex", + xbootclasspath="libcore-all.ohex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILEEH.py b/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILEEH.py new file mode 100644 index 0000000000000000000000000000000000000000..679575e0bd77e4067f6d0b51022c16ba377f816d --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_NATIVE_RC_IFILEEH.py @@ -0,0 +1,71 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_NATIVE_RC_IFILEEH = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--dump-comment --dump-LOC --rc --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE/libcore-all.mplt", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO0.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO0.py new file mode 100644 index 0000000000000000000000000000000000000000..27c5a50bfe1a5983a102e572e0c1b278d5e93606 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO0.py @@ -0,0 +1,70 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_PANDAO0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java", "${EXTRA_JAVA_FILE}"] + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile=".", + outfile="${APP}.bin" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + xbootclasspath="libcore-all.so", + garbage_collection_kind="GC", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO0_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO0_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a16f9fe2a3d3fdecc03feb44fdfab620097b4b --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO0_NATIVE.py @@ -0,0 +1,81 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_PANDAO0_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile=".", + outfile="${APP}.bin" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt", + "me": "--quiet --gconly", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO0", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO2.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO2.py new file mode 100644 index 0000000000000000000000000000000000000000..598468fd4bbd90fbba5f190e446ec707faa98ab2 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO2.py @@ -0,0 +1,71 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_PANDAO2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java", "${EXTRA_JAVA_FILE}"] + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile=".", + outfile="${APP}.bin" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JCK.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JCK.py new file mode 100644 index 0000000000000000000000000000000000000000..7db6156cc2d525075aaf9a075e1e7b7e58fa17d1 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JCK.py @@ -0,0 +1,56 @@ +from api import * + +HIR2MPL_PANDAO2_JCK = { + "compile": [ + Unzip( + file="${APP}.jar", + target_path="${APP}" + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile="${APP}", + outfile="${APP}.bin" + ), + Shell( + "rm -rf ${APP}" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "../../lib", + ".", + "${OUT_ROOT}/target/product/public/lib" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JTREG.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JTREG.py new file mode 100644 index 0000000000000000000000000000000000000000..2f053d6f3ea06e26f2d7b92036f8e38dead886d9 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_JTREG.py @@ -0,0 +1,68 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_PANDAO2_JTREG = { + "compile": [ + Unzip( + file="${APP}.jar", + target_path="${APP}" + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile="${APP}", + outfile="${APP}.bin" + ), + Shell( + "rm -rf ${APP}" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_PANDAO2_NATIVE.py b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..77141aef90efbed04d890c4c45426a55b8998805 --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_PANDAO2_NATIVE.py @@ -0,0 +1,82 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_PANDAO2_NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile=".", + outfile="${APP}.bin" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL/maple", + run=["hir2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2/libcore-all.mplt", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.bin" + ), + Linker( + lib="host-x86_64-HIR2MPL_DEXO2", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_DEXO2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_RC_IFILE.py b/testsuite/driver/src/mode/HIR2MPL_RC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a0d1d4a825df140b45b3f4d492e1ddb636ffbd --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_RC_IFILE.py @@ -0,0 +1,73 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +HIR2MPL_RC_IFILE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE/libcore-all.mplt", + "me": "--O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/HIR2MPL_RC_IFILE_O0.py b/testsuite/driver/src/mode/HIR2MPL_RC_IFILE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a0b366efef174b076962ab8550c25ce81327dc --- /dev/null +++ b/testsuite/driver/src/mode/HIR2MPL_RC_IFILE_O0.py @@ -0,0 +1,72 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +from api import * + +HIR2MPL_RC_IFILE_O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE_O0/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0/libcore-all.mplt", + "me": "--quiet --enable-ea --aot", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/IFILE.py b/testsuite/driver/src/mode/IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..b5c8965bff51b9f674185a2b8a613f786a18b38e --- /dev/null +++ b/testsuite/driver/src/mode/IFILE.py @@ -0,0 +1,111 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +IFILE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --threads=4 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --threads=4 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.ohex", + xbootclasspath="libcore-all.ohex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/IFILENATIVE.py b/testsuite/driver/src/mode/IFILENATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..d492fb938e3ff6b3cee68af8e4de79d469aa820b --- /dev/null +++ b/testsuite/driver/src/mode/IFILENATIVE.py @@ -0,0 +1,124 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +IFILENATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.ohex", + xbootclasspath="libcore-all.ohex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/IFILENATIVEEH.py b/testsuite/driver/src/mode/IFILENATIVEEH.py new file mode 100644 index 0000000000000000000000000000000000000000..9b4f97aa46d0c480297f1ec0a494b79dd9397d7d --- /dev/null +++ b/testsuite/driver/src/mode/IFILENATIVEEH.py @@ -0,0 +1,72 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +IFILENATIVEEH = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64_ifile" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--save-temps --ifile --aot", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/IR.py b/testsuite/driver/src/mode/IR.py index 918f6630a7b4d025edfce351321443d79277e980..3b2089b5d922a76b868966d8cdee0b5c41ae4985 100644 --- a/testsuite/driver/src/mode/IR.py +++ b/testsuite/driver/src/mode/IR.py @@ -17,11 +17,11 @@ from api import * IR = { "compile": [ Irbuild( - irbuild="${MAPLE_BUILD_OUTPUT}/bin/irbuild", + irbuild="${OUT_ROOT}/target/product/maple_arm64/bin/irbuild", infile="${APP}.mpl" ), Irbuild( - irbuild="${MAPLE_BUILD_OUTPUT}/bin/irbuild", + irbuild="${OUT_ROOT}/target/product/maple_arm64/bin/irbuild", infile="${APP}.irb.mpl" ), CheckFileEqual( @@ -29,4 +29,4 @@ IR = { file2="${APP}.irb.irb.mpl" ) ] -} +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JASMJBC2MPL_LIBCORE.py b/testsuite/driver/src/mode/JASMJBC2MPL_LIBCORE.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac744fd9f085fd744e67de22d40983456b7207f --- /dev/null +++ b/testsuite/driver/src/mode/JASMJBC2MPL_LIBCORE.py @@ -0,0 +1,130 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JASMJBC2MPL_LIBCORE = { + "compile": [ + Shell( + "java -jar ${OUT_ROOT}/target/product/public/bin/asmtools.jar jasm *.jasm" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Shell( + "jar -cvfe Main.jar Main *.class" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="Main.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_JBC2MPL_LIBCORE/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-JASMJBC2MPL_LIBCORE", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JASMJBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JASMJBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JASMJBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JASMO0.py b/testsuite/driver/src/mode/JASMO0.py new file mode 100644 index 0000000000000000000000000000000000000000..ce99eda667d4f4755004bf3b2ccd70460cbf4a78 --- /dev/null +++ b/testsuite/driver/src/mode/JASMO0.py @@ -0,0 +1,135 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JASMO0 = { + "compile": [ + Shell( + "java -jar ${OUT_ROOT}/target/product/public/bin/asmtools.jar jasm *.jasm" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Shell( + "jar -cvfe Main.jar Main *.class" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="Main.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/JASMO2.py b/testsuite/driver/src/mode/JASMO2.py new file mode 100644 index 0000000000000000000000000000000000000000..b739b5a356d0857774f4c4678ae48465f9accedc --- /dev/null +++ b/testsuite/driver/src/mode/JASMO2.py @@ -0,0 +1,131 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JASMO2 = { + "compile": [ + Shell( + "java -jar ${OUT_ROOT}/target/product/public/bin/asmtools.jar jasm *.jasm" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Shell( + "jar -cvfe Main.jar Main *.class" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="Main.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/JASM_O0.py b/testsuite/driver/src/mode/JASM_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..8aaf58a66a9cd4dbea076520c4618db2a2a6a1f0 --- /dev/null +++ b/testsuite/driver/src/mode/JASM_O0.py @@ -0,0 +1,115 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JASM_O0 = { + "compile": [ + Jasm2jar( + file=["${APP}.jasm", "../lib/Printer.jasm"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JASM_O2.py b/testsuite/driver/src/mode/JASM_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..7e2eeee50a8fed040541fe7cdbcdbd27fb719607 --- /dev/null +++ b/testsuite/driver/src/mode/JASM_O2.py @@ -0,0 +1,110 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JASM_O2 = { + "compile": [ + Jasm2jar( + file=["${APP}.jasm", "../lib/Printer.jasm"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt -use-string-factory", + "me": "-O2 --quiet", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker", + "mplcg": "--quiet -O2 --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JBC2MPL_LIBCORE.py b/testsuite/driver/src/mode/JBC2MPL_LIBCORE.py new file mode 100644 index 0000000000000000000000000000000000000000..fd3d428e0d192d42070f5924e38f4a1ae8fe0fc4 --- /dev/null +++ b/testsuite/driver/src/mode/JBC2MPL_LIBCORE.py @@ -0,0 +1,120 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JBC2MPL_LIBCORE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_JBC2MPL_LIBCORE/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-JBC2MPL_LIBCORE", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JBC2MPL_LIBCORENATIVE.py b/testsuite/driver/src/mode/JBC2MPL_LIBCORENATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..ac71634421f55062bc3623ede1f21bac7c025c06 --- /dev/null +++ b/testsuite/driver/src/mode/JBC2MPL_LIBCORENATIVE.py @@ -0,0 +1,132 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JBC2MPL_LIBCORENATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_JBC2MPL_LIBCORE/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-JBC2MPL_LIBCORE", + model="arm64", + infile="${APP}", + + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JBC2MPL_LIBCORERC.py b/testsuite/driver/src/mode/JBC2MPL_LIBCORERC.py new file mode 100644 index 0000000000000000000000000000000000000000..99cbd80d1bb864e6473c9db801847e07e43d778b --- /dev/null +++ b/testsuite/driver/src/mode/JBC2MPL_LIBCORERC.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JBC2MPL_LIBCORERC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_JBC2MPL_LIBCORE/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-JBC2MPL_LIBCORE", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-JBC2MPL_LIBCORE", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JBC_O0.py b/testsuite/driver/src/mode/JBC_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..7a9ba06036d6fe87d4caf8eaaad26d400949a7c8 --- /dev/null +++ b/testsuite/driver/src/mode/JBC_O0.py @@ -0,0 +1,120 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JBC_O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JBC_O2.py b/testsuite/driver/src/mode/JBC_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..22a70e961a29e145d73235f5a03b6efe3c8b0416 --- /dev/null +++ b/testsuite/driver/src/mode/JBC_O2.py @@ -0,0 +1,115 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JBC_O2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt -use-string-factory", + "me": "-O2 --quiet", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker", + "mplcg": "--quiet -O2 --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JCK_AOT.py b/testsuite/driver/src/mode/JCK_AOT.py new file mode 100644 index 0000000000000000000000000000000000000000..11cc888bbedac72ae805947a0cceef7dc30e3ba4 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_AOT.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_AOT = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_AOT/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps --aot", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-AOT", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-AOT", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_DEOPT.py b/testsuite/driver/src/mode/JCK_DEOPT.py new file mode 100644 index 0000000000000000000000000000000000000000..7d189362ace13088245a22b681a8bb8de0548885 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_DEOPT.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_DEOPT = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64-clang-release/bin/bin_HIR2MPL_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE/libcore-all.mplt", + "me": "--gconly --O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--gconly --O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--gconly --O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --with-ra-linear-scan --no-ico --no-cfgo --no-prepeep --no-peep --no-ebo --no-storeloadopt --no-globalopt --no-schedule --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--aot --deopt --ifile --save-temps", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_GCO2.py b/testsuite/driver/src/mode/JCK_GCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..7c8483b2c034a6626993883e7531a21295bb1245 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_GCO2.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_GCO2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck -checktool -check-incomplete -incomplete-whitelist=${OUT_ROOT}/target/product/public/lib/codetricks/compile/incomplete.list -incomplete-detail -opt-switch-disable -incomplete-whitelist-auto -gconly", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_GC_IFILE.py b/testsuite/driver/src/mode/JCK_GC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf1533bf6e71f214dfdd72b50c02822af5aa8c9 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_GC_IFILE.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_GC_IFILE = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck -gconly", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl --gconly", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold --gconly" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GC_IFILE", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE.py b/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..e3475919676eb0db59e85b48fb3bbffd2d03074f --- /dev/null +++ b/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_HIR2MPL_IFILE = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64-clang-release/bin/bin_HIR2MPL_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE/libcore-all.mplt", + "me": "--gconly --O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--gconly --O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--gconly --O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE_O0.py b/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..5612323dd6b7a23f206b8849c2e9e98b085f2c63 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_HIR2MPL_IFILE_O0.py @@ -0,0 +1,60 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_HIR2MPL_IFILE_O0 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64-clang-release/bin/bin_HIR2MPL_IFILE_O0/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE_O0/libcore-all.mplt", + "me": "--gconly --quiet --enable-ea", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl --gconly", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold --gconly" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_IFILE_O0", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE.py b/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec39aadf2958678e143af5c1fec9d06f84af6e4 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_HIR2MPL_RC_IFILE = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE/libcore-all.mplt", + "me": "--O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --with-ra-linear-scan --no-ico --no-cfgo --no-prepeep --no-peep --no-ebo --no-storeloadopt --no-globalopt --no-schedule --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} + diff --git a/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE_O0.py b/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..fc585054217441aecdcf0d54fc900d05b08606bc --- /dev/null +++ b/testsuite/driver/src/mode/JCK_HIR2MPL_RC_IFILE_O0.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_HIR2MPL_RC_IFILE_O0 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_HIR2MPL_RC_IFILE_O0/maple", + run=["hir2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "hir2mpl": "-rc -mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0/libcore-all.mplt", + "me": "--quiet --enable-ea --aot", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-HIR2MPL_RC_IFILE_O0", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} + diff --git a/testsuite/driver/src/mode/JCK_IFILE.py b/testsuite/driver/src/mode/JCK_IFILE.py new file mode 100644 index 0000000000000000000000000000000000000000..0899e54165e49c5d52f7519b72a61790caa2e15b --- /dev/null +++ b/testsuite/driver/src/mode/JCK_IFILE.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_IFILE = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64-clang-release/bin/bin_IFILE/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --threads=4 --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --threads=4 --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/maple_arm64-clang-release/lib/codetricks/arch/arm64/duplicateFunc.s --nativeopt --fPIC --filetype=obj --no-proepilogue --no-prelsra --no-const-fold" + }, + global_option="--aot --save-temps --ifile", + infile="${APP}.dex" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-IFILE", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.ohex", + infile="${APP}.ohex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_MOVO2.py b/testsuite/driver/src/mode/JCK_MOVO2.py new file mode 100644 index 0000000000000000000000000000000000000000..4c076f213edc15f8c2351b19cb85f7297b8954a8 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_MOVO2.py @@ -0,0 +1,64 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_MOVO2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck -checktool -check-incomplete -incomplete-whitelist=${OUT_ROOT}/target/product/public/lib/codetricks/compile/incomplete.list -incomplete-detail -opt-switch-disable -incomplete-whitelist-auto -gconly", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly --movinggc", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly --movinggc", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly --movinggc" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + QemuLinkerArm64( + lib="host-x86_64-MOVO2" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_O2.py b/testsuite/driver/src/mode/JCK_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..60a128f241f88d75e5bb2f2ddfbcd6cedca5e1bc --- /dev/null +++ b/testsuite/driver/src/mode/JCK_O2.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_O2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JCK_TGCO2.py b/testsuite/driver/src/mode/JCK_TGCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..a188999ca16617eb0a570a6b91eda050085b5893 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_TGCO2.py @@ -0,0 +1,58 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_TGCO2 = { + "compile": [ + Shell( + "adb shell \"mkdir -p /data/maple/${CASE}/${OPT}\"" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Shell( + "adb push ${APP}.dex /data/maple/${CASE}/${OPT}/" + ), + Shell( + "adb shell \"/data/maple/maple -O2 --gconly --save-temps --hir2mpl-opt=\\\"-Xbootclasspath /apex/com.android.runtime/javalib/core-oj.jar,/apex/com.android.runtime/javalib/core-libart.jar\\\" --mplcg-opt=\\\"--no-ebo --no-cfgo --no-schedule\\\" --infile /data/maple/${CASE}/${OPT}/${APP}.dex\"" + ), + Shell( + "adb pull /data/maple/${CASE}/${OPT}/${APP}.VtableImpl.s ./" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ -O2 -x assembler-with-cpp -march=armv8-a -DUSE_32BIT_REF -c ${APP}.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ ${APP}.VtableImpl.o -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -fPIC -shared ${MAPLE_ROOT}/out/target/product/maple_arm64/lib/mrt_module_init.cpp -fuse-ld=lld -rdynamic -L${MAPLE_ROOT}/out/target/product/maple_arm64/lib/android -lmaplecore-all -lcommon_bridge -lc++ -lc -lm -ldl -Wl,-T${MAPLE_ROOT}/out/target/product/public/lib/linker/maplelld.so.lds -o ./${APP}.so" + ), + Shell( + "adb push ${APP}.so /data/maple/${CASE}/${OPT}/" + ) + ], + "run": [ + Shell( + "adb shell \"export LD_LIBRARY_PATH=/vendor/lib64:/system/lib64:/data/maple;mplsh -Xgconly -cp /data/maple/${CASE}/${OPT}/${APP}.so ${MAIN} ${ARGS}\" || [ $? -eq 95 ]" + ), + Shell( + "adb shell \"rm -rf /data/maple/${CASE}/${OPT}\"" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JCK_ZTERP.py b/testsuite/driver/src/mode/JCK_ZTERP.py new file mode 100644 index 0000000000000000000000000000000000000000..f0aa2cc0b17d6eb2d47b649dc626eb2bd5df07b7 --- /dev/null +++ b/testsuite/driver/src/mode/JCK_ZTERP.py @@ -0,0 +1,48 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCK_ZTERP = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "../../lib", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + main="${MAIN}", + args="${ARGS}", + return_value_list=[95] + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JCOD_O0.py b/testsuite/driver/src/mode/JCOD_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..6b2915f2eb73fc075d71310fda624f8c608e121c --- /dev/null +++ b/testsuite/driver/src/mode/JCOD_O0.py @@ -0,0 +1,115 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCOD_O0 = { + "compile": [ + Jcod2jar( + file=["${APP}.jcod"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt -use-string-factory", + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JCOD_O2.py b/testsuite/driver/src/mode/JCOD_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..d61868b8d0307e026954a10e354fac0c237ab6b1 --- /dev/null +++ b/testsuite/driver/src/mode/JCOD_O2.py @@ -0,0 +1,110 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JCOD_O2 = { + "compile": [ + Jcod2jar( + file=["${APP}.jcod"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple", + run=["jbc2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "jbc2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt -use-string-factory", + "me": "-O2 --quiet", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker", + "mplcg": "--quiet -O2 --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.jar" + ), + Linker( + lib="host-x86_64-OPS_O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JTREG_GCO2.py b/testsuite/driver/src/mode/JTREG_GCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c3fda76c150309bb70474a9563f3f8e51d3b6f --- /dev/null +++ b/testsuite/driver/src/mode/JTREG_GCO2.py @@ -0,0 +1,65 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JTREG_GCO2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck -checktool -check-incomplete -incomplete-whitelist=${OUT_ROOT}/target/product/public/lib/codetricks/compile/incomplete.list -incomplete-detail -opt-switch-disable -incomplete-whitelist-auto -gconly", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-GCO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-GCO2", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JTREG_MOVO2.py b/testsuite/driver/src/mode/JTREG_MOVO2.py new file mode 100644 index 0000000000000000000000000000000000000000..336d5c4d4774f0fc66946c9d8f1826cfdc9d7201 --- /dev/null +++ b/testsuite/driver/src/mode/JTREG_MOVO2.py @@ -0,0 +1,63 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JTREG_MOVO2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck -checktool -check-incomplete -incomplete-whitelist=${OUT_ROOT}/target/product/public/lib/codetricks/compile/incomplete.list -incomplete-detail -opt-switch-disable -incomplete-whitelist-auto -gconly", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --threads=2 --gconly --movinggc", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --inlineCache=1 --gen-pgo-report --gconly --movinggc", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly --movinggc" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + QemuLinkerArm64( + lib="host-x86_64-MOVO2" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JTREG_O2.py b/testsuite/driver/src/mode/JTREG_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..2c44a5662416de47b93842cf6a898f0d655b3bda --- /dev/null +++ b/testsuite/driver/src/mode/JTREG_O2.py @@ -0,0 +1,65 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JTREG_O2 = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} diff --git a/testsuite/driver/src/mode/JTREG_TGCO2.py b/testsuite/driver/src/mode/JTREG_TGCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..179e59cb791c13e7e81d593c913fbc75b8708719 --- /dev/null +++ b/testsuite/driver/src/mode/JTREG_TGCO2.py @@ -0,0 +1,58 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JTREG_TGCO2 = { + "compile": [ + Shell( + "adb shell \"mkdir -p /data/maple/${CASE}/${OPT}\"" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Shell( + "adb push ${APP}.dex /data/maple/${CASE}/${OPT}/" + ), + Shell( + "adb shell \"/data/maple/maple -O2 --gconly --save-temps --hir2mpl-opt=\\\"-Xbootclasspath /apex/com.android.runtime/javalib/core-oj.jar,/apex/com.android.runtime/javalib/core-libart.jar\\\" --mplcg-opt=\\\"--no-ebo --no-cfgo --no-schedule\\\" --infile /data/maple/${CASE}/${OPT}/${APP}.dex\"" + ), + Shell( + "adb pull /data/maple/${CASE}/${OPT}/${APP}.VtableImpl.s ./" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ -O2 -x assembler-with-cpp -march=armv8-a -DUSE_32BIT_REF -c ${APP}.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ ${APP}.VtableImpl.o -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -fPIC -shared ${MAPLE_ROOT}/out/target/product/maple_arm64/lib/mrt_module_init.cpp -fuse-ld=lld -rdynamic -L${MAPLE_ROOT}/out/target/product/maple_arm64/lib/android -lmaplecore-all -lcommon_bridge -lc++ -lc -lm -ldl -Wl,-T${MAPLE_ROOT}/out/target/product/public/lib/linker/maplelld.so.lds -o ./${APP}.so" + ), + Shell( + "adb push ${APP}.so /data/maple/${CASE}/${OPT}/" + ) + ], + "run": [ + Shell( + "adb shell \"export LD_LIBRARY_PATH=/vendor/lib64:/system/lib64:/data/maple;mplsh -Xgconly -cp /data/maple/${CASE}/${OPT}/${CLASSPATH} ${MAIN} ${ARGS}\" || [ $? -eq 95 ]" + ), + Shell( + "adb shell \"rm -rf /data/maple/${CASE}/${OPT}\"" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/JTREG_ZTERP.py b/testsuite/driver/src/mode/JTREG_ZTERP.py new file mode 100644 index 0000000000000000000000000000000000000000..f3fdc897e2f382585a00286bc0fcd985cdebdebb --- /dev/null +++ b/testsuite/driver/src/mode/JTREG_ZTERP.py @@ -0,0 +1,47 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +JTREG_ZTERP = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "." + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${CLASSPATH}", + main="${MAIN}", + args="${ARGS}", + return_value_list=[0, 95] + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/MAPLETI_ZTERP.py b/testsuite/driver/src/mode/MAPLETI_ZTERP.py new file mode 100644 index 0000000000000000000000000000000000000000..092904adb70696c56b414a4a88d415cb943aaf5e --- /dev/null +++ b/testsuite/driver/src/mode/MAPLETI_ZTERP.py @@ -0,0 +1,130 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +MAPLETI_ZTERP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${ZTERPAPP}.dex", + infile=["${ZTERPAPP}.java"], + usesimplejava=True + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64_mapleti", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so ", + infile="./MapleTiTest.dex:./MapleCode.so", #${CP}", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="./MapleTiTest.dex:./MapleCode.so", #${CP}", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so ", + infile="./MapleTiTest.dex:./MapleCode.so", #${CP}", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/MAPLETI_ZTERP_PANDA.py b/testsuite/driver/src/mode/MAPLETI_ZTERP_PANDA.py new file mode 100644 index 0000000000000000000000000000000000000000..6b4cec348a60decef5bf369e3d73f254a94ada7b --- /dev/null +++ b/testsuite/driver/src/mode/MAPLETI_ZTERP_PANDA.py @@ -0,0 +1,135 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +MAPLETI_ZTERP_PANDA = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${ZTERPAPP}.dex", + infile=["${ZTERPAPP}.java"], + usesimplejava=True + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile="${ZTERPAPP}.class", + outfile="${ZTERPAPP}.bin" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64_mapleti", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so ", + infile="./MapleTiTest.bin:./MapleCode.so", #${CP}", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="./MapleTiTest.bin:./MapleCode.so", #${CP}", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh -agentpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/dummy-agent.so -pluginpath:${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-ZTERP/mapleti.so", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so ", + infile="./MapleTiTest.bin:./MapleCode.so", #${CP}", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/MEMORY_CHECK.py b/testsuite/driver/src/mode/MEMORY_CHECK.py new file mode 100644 index 0000000000000000000000000000000000000000..c38f51f1d33cd5390b115286eb8bb774dc67541f --- /dev/null +++ b/testsuite/driver/src/mode/MEMORY_CHECK.py @@ -0,0 +1,55 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +MEMORY_CHECK = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="valgrind ${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex", + redirection="output.log" + ), + CheckRegContain( + reg="definitely lost: 0 bytes in 0 blocks", + file="output.log" + ), + CheckRegContain( + reg="indirectly lost: 0 bytes in 0 blocks", + file="output.log" + ), + CheckRegContain( + reg="possibly lost: 0 bytes in 0 blocks", + file="output.log" + ), + ], + "run": [] +} diff --git a/testsuite/driver/src/mode/MOVO2.py b/testsuite/driver/src/mode/MOVO2.py new file mode 100644 index 0000000000000000000000000000000000000000..c75c5e035ef9c28e09137d86fcd44af6636e6c57 --- /dev/null +++ b/testsuite/driver/src/mode/MOVO2.py @@ -0,0 +1,66 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +MOVO2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly --movinggc --no-localvar", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly --movinggc", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly --movinggc --no-localvar" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-MOVO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/MOVO2NATIVE.py b/testsuite/driver/src/mode/MOVO2NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..facaa84d0a289daf765a34b2fb35c3c05e2c2341 --- /dev/null +++ b/testsuite/driver/src/mode/MOVO2NATIVE.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +MOVO2NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2/libcore-all.mplt -anti-proguard-auto -dexcatch -gconly -gen-stringfieldvalue -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=32 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -opt-switch-disable -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea --gconly --movinggc --no-localvar", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --gen-pgo-report --gconly --movinggc", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --gconly --movinggc --no-localvar" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-MOVO2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-MOVO2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="GC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/O0.py b/testsuite/driver/src/mode/O0.py index e566026fe6d1539a1478560e125209e008760099..84e2338b54acb68de3f97d034355620d370a66db 100644 --- a/testsuite/driver/src/mode/O0.py +++ b/testsuite/driver/src/mode/O0.py @@ -18,31 +18,28 @@ O0 = { "compile": [ Java2dex( jar_file=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", - "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" ], outfile="${APP}.dex", infile=["${APP}.java","${EXTRA_JAVA_FILE}"] ), - Hir2mpl( - hir2mpl="${MAPLE_BUILD_OUTPUT}/bin/hir2mpl", - option="-mplt ${MAPLE_BUILD_OUTPUT}/libjava-core/host-x86_64-O0/libcore-all.mplt --rc", - infile="${APP}.dex", - outfile="${APP}.mpl" - ), Maple( - maple="${MAPLE_BUILD_OUTPUT}/bin/maple", - run=["me", "mpl2mpl", "mplcg"], + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], option={ - "me": "--quiet", - "mpl2mpl": "--quiet --regnativefunc --maplelinker --emitVtableImpl", - "mplcg": "--quiet --no-pie --fPIC --verbose-asm --maplelinker" + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" }, - global_option="", - infiles=["${APP}.mpl"] + global_option="--save-temps", + infile="${APP}.dex" ), Linker( lib="host-x86_64-O0", + model="arm64", + infile="${APP}" ) ], "run": [ @@ -50,14 +47,14 @@ O0 = { env={ "USE_OLD_STACK_SCAN": "1" }, - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", xbootclasspath="libcore-all.so", infile="${APP}.so", @@ -69,17 +66,17 @@ O0 = { ), Mplsh( env={ - "USE_OLD_STACK_SCAN": "1", + "USE_OLD_STACK_SCAN": "1", "MAPLE_REPORT_RC_LEAK": "1" }, - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", xbootclasspath="libcore-all.so", infile="${APP}.so", @@ -91,17 +88,17 @@ O0 = { ), Mplsh( env={ - "USE_OLD_STACK_SCAN": "1", + "USE_OLD_STACK_SCAN": "1", "MAPLE_VERIFY_RC": "1" }, - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O0", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", xbootclasspath="libcore-all.so", infile="${APP}.so", diff --git a/testsuite/driver/src/mode/O0NATIVE.py b/testsuite/driver/src/mode/O0NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..6196a6d1dc5956486178a98d6ded037c67e56384 --- /dev/null +++ b/testsuite/driver/src/mode/O0NATIVE.py @@ -0,0 +1,131 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O0NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O0NATIVEEH.py b/testsuite/driver/src/mode/O0NATIVEEH.py new file mode 100644 index 0000000000000000000000000000000000000000..a6c1c9a437b5c109de40e7cc62317241cedfade2 --- /dev/null +++ b/testsuite/driver/src/mode/O0NATIVEEH.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O0NATIVEEH = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/O0RC.py b/testsuite/driver/src/mode/O0RC.py new file mode 100644 index 0000000000000000000000000000000000000000..33f36188583188f6a2587ea2b527ef3d5dfac42a --- /dev/null +++ b/testsuite/driver/src/mode/O0RC.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O0RC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O0SMALI.py b/testsuite/driver/src/mode/O0SMALI.py new file mode 100644 index 0000000000000000000000000000000000000000..c2947526d62583b6ffc46e91bdfdde4b76b94227 --- /dev/null +++ b/testsuite/driver/src/mode/O0SMALI.py @@ -0,0 +1,155 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O0SMALI = { + "smali2dex": [ + Smali2dex( + file=["${APP}.smali","${EXTRA_SMALI2DEX_FILE_1}"] + ) + ], + "dex2mpl":[ + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ${EXTRA_DEX2MPL_OPTION}", + infile="${APP}.dex", + redirection="dex2mpl.log" + ) + ], + "check_reg_contain": [ + CheckRegContain( + reg="${REG}", + file="${FILE}" + ) + ], + "maple_mplipa_me_mpl2mpl_mplcg": [ + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "mplipa": "--quiet --effectipa", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.mpl" + ) + ], + "qemu_linker": [ + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}" + ) + ], + "compile": [ + Smali2dex( + file=["${APP}.smali","${EXTRA_SMALI2DEX_FILE_2}","../lib/smali_util_Printer.smali","../lib/smali_util_ArrayI.smali"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2.py b/testsuite/driver/src/mode/O2.py index 26e1b99d1ffc429852ced960648b0c13704678b6..c9b1bb5d18ba971de3bc5ff17d666bf672627da1 100644 --- a/testsuite/driver/src/mode/O2.py +++ b/testsuite/driver/src/mode/O2.py @@ -18,43 +18,41 @@ O2 = { "compile": [ Java2dex( jar_file=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", - "${MAPLE_BUILD_OUTPUT}/ops/third_party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" ], outfile="${APP}.dex", infile=["${APP}.java","${EXTRA_JAVA_FILE}"] ), - Hir2mpl( - hir2mpl="${MAPLE_BUILD_OUTPUT}/bin/hir2mpl", - option="-mplt ${MAPLE_BUILD_OUTPUT}/libjava-core/host-x86_64-O2/libcore-all.mplt --rc", - infile="${APP}.dex", - outfile="${APP}.mpl" - ), Maple( - maple="${MAPLE_BUILD_OUTPUT}/bin/maple", - run=["me", "mpl2mpl", "mplcg"], + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], option={ - "me": "--O2 --quiet", - "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --emitVtableImpl", - "mplcg": "--O2 --quiet --no-pie --fPIC --verbose-asm --maplelinker" + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" }, - global_option="", - infiles=["${APP}.mpl"] + global_option="--save-temps", + infile="${APP}.dex" ), Linker( lib="host-x86_64-O2", + model="arm64", + infile="${APP}" ) ], "run": [ Mplsh( - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", xbootclasspath="libcore-all.so", infile="${APP}.so", @@ -68,14 +66,14 @@ O2 = { env={ "MAPLE_REPORT_RC_LEAK": "1" }, - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", xbootclasspath="libcore-all.so", infile="${APP}.so", @@ -87,19 +85,19 @@ O2 = { ), Mplsh( env={ - "MAPLE_VERIFY_RC": "1" + "MAPLE_VERIFY_RC": "1", }, - qemu="${OUT_ROOT}/tools/bin/qemu-aarch64", + qemu="/usr/bin/qemu-aarch64", qemu_libc="/usr/aarch64-linux-gnu", qemu_ld_lib=[ - "${MAPLE_BUILD_OUTPUT}/ops/third_party", - "${MAPLE_BUILD_OUTPUT}/ops/host-x86_64-O2", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", "./" ], - mplsh="${MAPLE_BUILD_OUTPUT}/ops/mplsh", + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", garbage_collection_kind="RC", - xbootclasspath="libcore-all.so", infile="${APP}.so", + xbootclasspath="libcore-all.so", redirection="rcverify.log" ), CheckRegContain( diff --git a/testsuite/driver/src/mode/O2NATIVE.py b/testsuite/driver/src/mode/O2NATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..75159c5d90ff502141e2fb86bbcab3caafd8039c --- /dev/null +++ b/testsuite/driver/src/mode/O2NATIVE.py @@ -0,0 +1,129 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2NATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2NATIVEEH.py b/testsuite/driver/src/mode/O2NATIVEEH.py new file mode 100644 index 0000000000000000000000000000000000000000..d55bfc050973871ab6b3488681fe53d9446ca12d --- /dev/null +++ b/testsuite/driver/src/mode/O2NATIVEEH.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2NATIVEEH = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2RC.py b/testsuite/driver/src/mode/O2RC.py new file mode 100644 index 0000000000000000000000000000000000000000..6cf4c5510f17ac63b84f5e975b8378db94e154dc --- /dev/null +++ b/testsuite/driver/src/mode/O2RC.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2RC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2SMALI.py b/testsuite/driver/src/mode/O2SMALI.py new file mode 100644 index 0000000000000000000000000000000000000000..763b9a10bc365ec47209adf881b25133d9b84564 --- /dev/null +++ b/testsuite/driver/src/mode/O2SMALI.py @@ -0,0 +1,151 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2SMALI = { + "smali2dex": [ + Smali2dex( + file=["${APP}.smali","${EXTRA_SMALI2DEX_FILE_1}"] + ) + ], + "dex2mpl": [ + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ${EXTRA_DEX2MPL_OPTION}", + infile="${APP}.dex", + redirection="dex2mpl.log" + ) + ], + "check_reg_contain": [ + CheckRegContain( + reg="${REG}", + file="${FILE}" + ) + ], + "maple_mplipa_me_mpl2mpl_mplcg": [ + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "mplipa": "--quiet --effectipa", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.mpl" + ) + ], + "qemu_linker": [ + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "compile": [ + Smali2dex( + file=["${APP}.smali","${EXTRA_SMALI2DEX_FILE_2}","../lib/smali_util_Printer.smali","../lib/smali_util_ArrayI.smali"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2STMTPRE.py b/testsuite/driver/src/mode/O2STMTPRE.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad70b96adae3980a72833c807722414a551c212 --- /dev/null +++ b/testsuite/driver/src/mode/O2STMTPRE.py @@ -0,0 +1,121 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2STMTPRE = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + CheckRegContain( + reg='"callassigned &MCC_GetOrInsertLiteral"', + file="${APP}.VtableImpl.mpl", + choice="num" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/O2SUBSUMERC.py b/testsuite/driver/src/mode/O2SUBSUMERC.py new file mode 100644 index 0000000000000000000000000000000000000000..714d8ecc239d465ffb68038cf7c43ec4a42865d4 --- /dev/null +++ b/testsuite/driver/src/mode/O2SUBSUMERC.py @@ -0,0 +1,121 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +O2SUBSUMERC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + CheckRegContain( + reg="MCCIncRef", + file="${APP}.VtableImpl.mpl", + choice="num" + ), + Linker( + lib="host-x86_64-O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/OPSIR.py b/testsuite/driver/src/mode/OPSIR.py new file mode 100644 index 0000000000000000000000000000000000000000..6e430ee43c3c5073e65fe91f5071e45b9451e5ec --- /dev/null +++ b/testsuite/driver/src/mode/OPSIR.py @@ -0,0 +1,32 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +OPSIR = { + "compile": [ + Irbuild( + irbuild="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/irbuild", + infile="${APP}.mpl" + ), + Irbuild( + irbuild="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/irbuild", + infile="${APP}.irb.mpl" + ), + CheckFileEqual( + file1="${APP}.irb.mpl", + file2="${APP}.irb.irb.mpl" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/OPS_NATIVE_O0.py b/testsuite/driver/src/mode/OPS_NATIVE_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..58a4ab7cd4252569df9e3c9e5274c4421d89c614 --- /dev/null +++ b/testsuite/driver/src/mode/OPS_NATIVE_O0.py @@ -0,0 +1,136 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +OPS_NATIVE_O0 = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + infile="${APP}.dex" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.mpl" + ), + Linker( + lib="host-x86_64-OPS_O0", + model="arm64", + infile="${APP}", + native_src="${NATIVE_SRC}.cpp" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/OPS_NATIVE_O2.py b/testsuite/driver/src/mode/OPS_NATIVE_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..8bc60727c598592911e35c5eb06e57f0c252037b --- /dev/null +++ b/testsuite/driver/src/mode/OPS_NATIVE_O2.py @@ -0,0 +1,133 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +OPS_NATIVE_O2 = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + infile="${APP}.dex" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "-O2 --quiet", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker", + "mplcg": "--quiet -O2 --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.mpl" + ), + Linker( + lib="host-x86_64-OPS_O2", + model="arm64", + infile="${APP}", + native_src="${NATIVE_SRC}.cpp" + ) + ], + "run": [ + Mplsh( + env={ + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/OPS_O0.py b/testsuite/driver/src/mode/OPS_O0.py new file mode 100644 index 0000000000000000000000000000000000000000..7c1097023d15083c18e71d62210125374b831f54 --- /dev/null +++ b/testsuite/driver/src/mode/OPS_O0.py @@ -0,0 +1,124 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +OPS_O0 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + infile="${APP}.dex" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0/libcore-all.mplt", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--quiet", + "mpl2mpl": "--quiet --regnativefunc --maplelinker", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.mpl" + ), + Linker( + lib="host-x86_64-OPS_O0", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O0", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/OPS_O2.py b/testsuite/driver/src/mode/OPS_O2.py new file mode 100644 index 0000000000000000000000000000000000000000..a51491069ca158d03207291c981cf047b0a13b89 --- /dev/null +++ b/testsuite/driver/src/mode/OPS_O2.py @@ -0,0 +1,119 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +OPS_O2 = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list", + infile="${APP}.dex" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/bin_OPS/maple --mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2/libcore-all.mplt", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "-O2 --quiet", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker", + "mplcg": "--quiet -O2 --no-pie --verbose-asm --maplelinker --fPIC" + }, + global_option="--save-temps --genVtableImpl", + infile="${APP}.mpl" + ), + Linker( + lib="host-x86_64-OPS_O2", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-OPS_O2", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + infile="${APP}.so", + xbootclasspath="libcore-all.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/PANDA_ZTERP.py b/testsuite/driver/src/mode/PANDA_ZTERP.py new file mode 100644 index 0000000000000000000000000000000000000000..bf94cc19339032671073beccd5219a797aec2b0b --- /dev/null +++ b/testsuite/driver/src/mode/PANDA_ZTERP.py @@ -0,0 +1,103 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +PANDA_ZTERP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Class2panda( + class2panda="${OUT_ROOT}/target/product/public/bin/c2p", + infile=".", + outfile="${APP}.bin" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.bin", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.bin", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.bin", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/RC.py b/testsuite/driver/src/mode/RC.py new file mode 100644 index 0000000000000000000000000000000000000000..a5b833493eb0e17ac10b4f1b7893e724aea6ce53 --- /dev/null +++ b/testsuite/driver/src/mode/RC.py @@ -0,0 +1,79 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * +from env_var import EnvVar + +RC = { + "compile": [ + BenchmarkVogar(), + Shell( + "mv ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.dex.jar ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.jar;" + "if [ -d \"${BENCHMARK_ACTION}/dex\" ]; then" + " rm -rf ${BENCHMARK_ACTION}/dex;" + "fi;" + "unzip -q ${BENCHMARK_ACTION}/${BENCHMARK_ACTION}.jar -d ${BENCHMARK_ACTION}/dex" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["dex2mpl"], + option={ + "dex2mpl": "-checktool -check-invoke -invoke-checklist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/classloaderInvocation.list -check-incomplete -incomplete-whitelist=${MAPLE_ROOT}/mrt/codetricks/compile/incomplete.list -incomplete-detail -staticstringcheck --inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/to_inline.list -dexcatch -litprofile=${MAPLE_ROOT}/mrt/codetricks/profile.pv/meta.list -output=${BENCHMARK_ACTION}/dex/ -mplt=${MAPLE_ROOT}/../out/soong/.intermediates/vendor/huawei/maple/Lib/core/libmaplecore-all/android_arm64_armv8-a_core_shared/obj/classes.mplt" + }, + global_option="", + infile="${BENCHMARK_ACTION}/dex/classes.dex" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["mplipa"], + option={ + "mplipa": "--effectipa --quiet --inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/inline_funcs.list" + }, + global_option="", + infile="${BENCHMARK_ACTION}/dex/classes.mpl > /dev/null" + ), + Maple( + maple="${MAPLE_ROOT}/../out/soong/host/linux-x86/bin/maple", + run=["me", "mpl2mpl", "mplcg"], + option={ + "me": "--inlinefunclist=${MAPLE_ROOT}/mrt/codetricks/profile.pv/inline_funcs.list -O2 --quiet --no-ignoreipa", + "mpl2mpl": "-regnativefunc --quiet -O2 --usewhiteclass --maplelinker --dump-muid --check_cl_invocation=${MAPLE_ROOT}/mrt/codetricks/profile.pv/classloaderInvocation.list --regnative-dynamic-only", + "mplcg": "-O2 --quiet --no-pie --nativeopt --verbose-asm --gen-c-macro-def --maplelinker --gsrc --duplicate_asm_list2=${MAPLE_ROOT}/mrt/compiler-rt/src/arch/arm64/fastFuncs.S --fPIC" + }, + global_option="--genVtableImpl", + infile="${BENCHMARK_ACTION}/dex/classes.mpl" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/clang -target aarch64-linux-android -g -c -x assembler-with-cpp -D__ASSEMBLY__ -DUSE_32BIT_REF -MD -MF ${BENCHMARK_ACTION}/dex/classes.d -o ${BENCHMARK_ACTION}/dex/classes.o ${BENCHMARK_ACTION}/dex/classes.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/llvm-objcopy --rename-section .debug_info=.maple_java_debug_info --rename-section .debug_abbrev=.maple_java_debug_abbrev --rename-section .debug_line=.maple_java_debug_line --rename-section .debug_aranges=.maple_java_debug_aranges --rename-section .debug_ranges=.maple_java_debug_ranges ${BENCHMARK_ACTION}/dex/classes.o" + ), + Shell( + "${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin/clang++ -nostdlib -Wl,-soname,libmaple${BENCHMARK_ACTION}.so -Wl,--gc-sections -shared ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtbegin_so/android_arm64_armv8-a_core/crtbegin_so.o ${BENCHMARK_ACTION}/dex/classes.o -Wl,--whole-archive ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/mrt_module_init_intermediates/mrt_module_init.a -Wl,--no-whole-archive ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libclang_rt.ubsan_minimal-aarch64-android_intermediates/libclang_rt.ubsan_minimal-aarch64-android.a ${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/lib64/clang/9.0.3/lib/linux//libclang_rt.builtins-aarch64-android.a ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libatomic_intermediates/libatomic.a ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/STATIC_LIBRARIES/libgcc_intermediates/libgcc.a -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--build-id=md5 -Wl,--warn-shared-textrel -Wl,--fatal-warnings -Wl,--no-undefined-version -Wl,--exclude-libs,libgcc.a -Wl,--exclude-libs,libgcc_stripped.a -fuse-ld=lld -Wl,--hash-style=gnu -Wl,--icf=safe -Wl,-z,max-page-size=4096 -target aarch64-linux-android -B${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/aarch64-linux-android/bin -Wl,-T,${MAPLE_ROOT}/mrt/maplert/linker/maplelld.so.lds -Wl,-execute-only -Wl,--exclude-libs,libclang_rt.ubsan_minimal-aarch64-android.a -Wl,--no-undefined ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libmaplecore-all_intermediates/libmaplecore-all.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libmrt_intermediates/libmrt.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libcommon_bridge_intermediates/libcommon_bridge.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libc++_intermediates/libc++.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libc_intermediates/libc.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libm_intermediates/libm.so ${MAPLE_ROOT}/../out/target/product/generic_a15/obj/SHARED_LIBRARIES/libdl_intermediates/libdl.so -o ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}Symbol.so ${MAPLE_ROOT}/../out/soong/.intermediates/bionic/libc/crtend_so/android_arm64_armv8-a_core/obj/bionic/libc/arch-common/bionic/crtend_so.o" + ), + Shell( + "CLANG_BIN=${MAPLE_ROOT}/../prebuilts/clang/host/linux-x86/clang-r353983c/bin CROSS_COMPILE=${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android- XZ=${MAPLE_ROOT}/../prebuilts/build-tools/linux-x86/bin/xz ${MAPLE_ROOT}/../build/soong/scripts/strip.sh -i ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}Symbol.so -o ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so -d ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.d --keep-mini-debug-info" + ), + Shell( + "(${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-readelf -d ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so | grep SONAME || echo \"No SONAME for ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so\") > ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp;" + "${MAPLE_ROOT}/../prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/aarch64-linux-android-readelf --dyn-syms ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so | awk '{$2=\"\"; $3=\"\"; print}' >> ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp;" + "mv ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc.tmp ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so.toc;" + "cp ${BENCHMARK_ACTION}/dex/libmaple${BENCHMARK_ACTION}.so ${BENCHMARK_ACTION}" + ), + ], + "native_compile": [ + BenchmarkNative() + ] +} diff --git a/testsuite/driver/src/mode/REFINECATCH.py b/testsuite/driver/src/mode/REFINECATCH.py new file mode 100644 index 0000000000000000000000000000000000000000..a7160d71eb004c3e7e6e3738f73c931f75b9b80a --- /dev/null +++ b/testsuite/driver/src/mode/REFINECATCH.py @@ -0,0 +1,29 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +REFINECATCH = { + "compile": [ + Smali2dex( + file=["${APP}.smali"] + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list -refine-catch -dumpdataflow -func=${FUNC}", + infile="${APP}.dex", + redirection="dex2mpl.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/SELF.py b/testsuite/driver/src/mode/SELF.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b577abaf553e74762852c90c147255dc1488b1 --- /dev/null +++ b/testsuite/driver/src/mode/SELF.py @@ -0,0 +1,17 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +SELF = {} \ No newline at end of file diff --git a/testsuite/driver/src/mode/STATICSTRING.py b/testsuite/driver/src/mode/STATICSTRING.py new file mode 100644 index 0000000000000000000000000000000000000000..36353211ee4e56525fac117df8523a0196999584 --- /dev/null +++ b/testsuite/driver/src/mode/STATICSTRING.py @@ -0,0 +1,34 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +STATICSTRING = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list -staticstringcheck", + infile="${APP}.dex" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/STATICSTRINGWRONG.py b/testsuite/driver/src/mode/STATICSTRINGWRONG.py new file mode 100644 index 0000000000000000000000000000000000000000..6f7c13fe82a8bfae8ef0c2504cb7720613cefa83 --- /dev/null +++ b/testsuite/driver/src/mode/STATICSTRINGWRONG.py @@ -0,0 +1,40 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +STATICSTRINGWRONG = { + "compile": [ + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="${APP}.jar" + ), + Dex2mpl( + dex2mpl="${OUT_ROOT}/target/product/maple_arm64/bin/dex2mpl", + option="--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-O2/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list -staticstringcheck", + infile="${APP}.dex", + redirection="dex2mpl.log", + return_value_list=[] + ), + CheckRegContain( + reg="@HiLogConstString Error Usage Occured!!!", + file="dex2mpl.log" + ), + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/TGCO0.py b/testsuite/driver/src/mode/TGCO0.py new file mode 100644 index 0000000000000000000000000000000000000000..622a85f83358751ddf16d276bf87e48aa7ecf9d4 --- /dev/null +++ b/testsuite/driver/src/mode/TGCO0.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +TGCO0 = { + "compile": [ + Shell( + "adb shell \"mkdir -p /data/maple/${CASE}/${OPT}\"" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Shell( + "adb push ${APP}.dex /data/maple/${CASE}/${OPT}/" + ), + Shell( + "adb shell \"/data/maple/maple -O0 --gconly --save-temps --hir2mpl-opt=\\\"-Xbootclasspath /apex/com.android.runtime/javalib/core-oj.jar,/apex/com.android.runtime/javalib/core-libart.jar\\\" --mplcg-opt=\\\"--no-ebo --no-cfgo --no-schedule\\\" --infile /data/maple/${CASE}/${OPT}/${APP}.dex\"" + ), + Shell( + "adb pull /data/maple/${CASE}/${OPT}/${APP}.VtableImpl.s ./" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ -O2 -x assembler-with-cpp -march=armv8-a -DUSE_32BIT_REF -c ${APP}.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ ${APP}.VtableImpl.o -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -fPIC -shared ${MAPLE_ROOT}/out/target/product/maple_arm64/lib/mrt_module_init.cpp -fuse-ld=lld -rdynamic -L${MAPLE_ROOT}/out/target/product/maple_arm64/lib/android -lmaplecore-all -lcommon_bridge -lc++ -lc -lm -ldl -Wl,-T${MAPLE_ROOT}/out/target/product/public/lib/linker/maplelld.so.lds -o ./${APP}.so" + ), + Shell( + "adb push ${APP}.so /data/maple/${CASE}/${OPT}/" + ) + ], + "run": [ + Shell( + "adb shell \"export LD_LIBRARY_PATH=/vendor/lib64:/system/lib64:/data/maple;mplsh -Xgconly -cp /data/maple/${CASE}/${OPT}/${APP}.so ${APP}\" > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Shell( + "adb shell \"rm -rf /data/maple/${CASE}/${OPT}\"" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/TGCO2.py b/testsuite/driver/src/mode/TGCO2.py new file mode 100644 index 0000000000000000000000000000000000000000..5974235117a2b41a454cb3e4f6e6ec83abd5057b --- /dev/null +++ b/testsuite/driver/src/mode/TGCO2.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +TGCO2 = { + "compile": [ + Shell( + "adb shell \"mkdir -p /data/maple/${CASE}/${OPT}\"" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Shell( + "adb push ${APP}.dex /data/maple/${CASE}/${OPT}/" + ), + Shell( + "adb shell \"/data/maple/maple -O2 --gconly --save-temps --hir2mpl-opt=\\\"-Xbootclasspath /apex/com.android.runtime/javalib/core-oj.jar,/apex/com.android.runtime/javalib/core-libart.jar\\\" --mplcg-opt=\\\"--no-ebo --no-cfgo --no-schedule\\\" --infile /data/maple/${CASE}/${OPT}/${APP}.dex\"" + ), + Shell( + "adb pull /data/maple/${CASE}/${OPT}/${APP}.VtableImpl.s ./" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ -O2 -x assembler-with-cpp -march=armv8-a -DUSE_32BIT_REF -c ${APP}.VtableImpl.s" + ), + Shell( + "${MAPLE_ROOT}/zeiss/prebuilt/sdk/android-ndk-r20b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android29-clang++ ${APP}.VtableImpl.o -O2 -Wall -Werror -Wno-unused-command-line-argument -fstack-protector-strong -std=c++14 -nostdlibinc -march=armv8-a -fPIC -shared ${MAPLE_ROOT}/out/target/product/maple_arm64/lib/mrt_module_init.cpp -fuse-ld=lld -rdynamic -L${MAPLE_ROOT}/out/target/product/maple_arm64/lib/android -lmaplecore-all -lcommon_bridge -lc++ -lc -lm -ldl -Wl,-T${MAPLE_ROOT}/out/target/product/public/lib/linker/maplelld.so.lds -o ./${APP}.so" + ), + Shell( + "adb push ${APP}.so /data/maple/${CASE}/${OPT}/" + ) + ], + "run": [ + Shell( + "adb shell \"export LD_LIBRARY_PATH=/vendor/lib64:/system/lib64:/data/maple;mplsh -Xgconly -cp /data/maple/${CASE}/${OPT}/${APP}.so ${APP}\" > output.log 2>&1" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Shell( + "adb shell \"rm -rf /data/maple/${CASE}/${OPT}\"" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/TIME.py b/testsuite/driver/src/mode/TIME.py new file mode 100644 index 0000000000000000000000000000000000000000..b63c5da2689f3423011538161a17d91ffc9b7146 --- /dev/null +++ b/testsuite/driver/src/mode/TIME.py @@ -0,0 +1,107 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +TIME = { + "compile": [ + Shell( + "OUTDIR=output;" + "mkdir -p ${OUTDIR}" + ), + Shell( + "num=1;" + "TIMES=5;" + "cpu_list=(0 1 2 3 4 5 6 7);" + "cpu_governor=\'userspace\';" + "offline_cpu_list=(4 5 6 7);" + "enable_cpu_list=(0 1 2 3);" + "cpu_freq=1805000" + ), + Shell( + "for offline_cpu in ${offline_cpu_list[@]}; do" + " adb shell \"echo 0 > /sys/devices/system/cpu/cpu${offline_cpu}/online\";" + "done" + ), + Shell( + "for enable_cpu in ${enable_cpu_list[@]}; do" + " adb shell \"echo $cpu_freq > /sys/devices/system/cpu/cpu${enable_cpu}/cpufreq/scaling_max_freq\";" + " adb shell \"echo $cpu_freq > /sys/devices/system/cpu/cpu${enable_cpu}/cpufreq/scaling_min_freq\";" + " adb shell \"echo $cpu_governor > /sys/devices/system/cpu/cpu${enable_cpu}/cpufreq/scaling_governor\";" + " adb shell \"echo $cpu_freq > /sys/devices/system/cpu/cpu${enable_cpu}/cpufreq/scaling_setspeed\";" + "done" + ), + Shell( + "adb shell stop;" + "sleep 20s" + ), + Shell( + "while true; do" + " if [ ${num} -gt ${TIMES} ]; then" + " break;" + " fi" + ), + Shell( + "adb shell \"mkdir -p /data/maple/${CASE}/${OPT}\"" + ), + Shell( + "adb push ${APP}.dex /data/maple/${CASE}/${OPT}/" + ), + Shell( + "adb shell \"(time /data/maple/maple --gconly -O2 --no-with-ipa /data/maple/${CASE}/${OPT}/${APP}.dex --hir2mpl-opt=\\\"-dump-time -no-mpl-file -Xbootclasspath /data/maple/core-oj.jar,/data/maple/core-libart.jar,/data/maple/bouncycastle.jar,/data/maple/apache-xml.jar,/data/maple/framework.jar,/data/maple/ext.jar,/data/maple/telephony-common.jar,/data/maple/voip-common.jar,/data/maple/ims-common.jar,/data/maple/android.test.base.jar,/data/maple/featurelayer-widget.jar,/data/maple/hwEmui.jar,/data/maple/hwPartBasicplatform.jar,/data/maple/telephony-separated.jar,/data/maple/hwTelephony-common.jar,/data/maple/hwPartTelephony.jar,/data/maple/hwPartTelephonyVSim.jar,/data/maple/hwPartTelephonyCust.jar,/data/maple/hwPartTelephonyTimezoneOpt.jar,/data/maple/hwPartTelephonyOpt.jar,/data/maple/hwPartSecurity.jar,/data/maple/hwIms-common.jar,/data/maple/hwPartMedia.jar,/data/maple/hwPartConnectivity.jar,/data/maple/hwPartPowerOffice.jar,/data/maple/hwPartDeviceVirtualization.jar,/data/maple/hwPartAirSharing.jar,/data/maple/hwPartDefaultDFR.jar,/data/maple/hwPartDFR.jar,/data/maple/hwPartMagicWindow.jar,/data/maple/hwframework.jar,/data/maple/com.huawei.nfc.jar,/data/maple/org.ifaa.android.manager.jar,/data/maple/hwaps.jar,/data/maple/servicehost.jar,/data/maple/hwcustIms-common.jar,/data/maple/hwcustTelephony-common.jar,/data/maple/hwIAwareAL.jar,/data/maple/conscrypt.jar,/data/maple/updatable-media.jar,/data/maple/okhttp.jar --java-staticfield-name=smart\\\" --mplcg-opt=\\\"--no-ico --no-cfgo --no-prepeep --no-ebo --no-storeloadopt --no-globalopt --no-schedule --no-proepilogue --no-peep --no-const-fold --no-lsra-hole --with-ra-linear-scan --no-prelsra --no-prespill --no-lsra-hole\\\" -time-phases)\" >& ${OUTDIR}/maple_${APP}_${num}.txt &" + ), + Shell( + "count=1;" + "mkdir -p ${OUTDIR}/mem_out;" + "while true; do" + " pid=`adb shell pidof maple`;" + " if [[ -z ${pid} ]]; then" + " echo \"compile ${APP} ${num} complete\";" + " break;" + " fi;" + " adb shell showmap ${pid} >> ${OUTDIR}/mem_out/mem_${count}.log;" + " ((count++));" + " sleep 0.5;" + "done" + ), + Shell( + "wait;" + "file_list=`ls ${OUTDIR}/mem_out | grep log | uniq`;" + "PSSMAX=0;" + "for file in ${file_list}; do" + " pss=`cat ${OUTDIR}/mem_out/$file | grep TOTAL | awk '{print $3+$9}'`;" + " if [[ ${pss} -ge ${PSSMAX} ]]; then" + " PSSMAX=${pss};" + " fi;" + "done" + ), + Shell( + "echo \"${PSSMAX}\" >> ${OUTDIR}/pss_max_${APP}.txt;" + "rm -rf ${OUTDIR}/mem_out" + ), + Shell( + " adb shell \"rm -rf /data/maple/${CASE}/${OPT}\";" + " num=`expr ${num} + 1`;" + "done" #end while + ), + Shell( + "adb shell start" + ) + ], + "checktime": [ + Shell( + "python3 ${OUT_ROOT}/target/product/public/bin/checker_compiler_time_ci.py -d ${APP} -n 5 -t ${CHECKTIME} -o ${OUTDIR}" #${TIMES} + ) + ] +} diff --git a/testsuite/driver/src/mode/ZRT.py b/testsuite/driver/src/mode/ZRT.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7344656081bf1a9931d3cee3de24ef90fdfe07 --- /dev/null +++ b/testsuite/driver/src/mode/ZRT.py @@ -0,0 +1,115 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZRT = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/rt.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT/libcore-all.mplt -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -blacklist-invoke=${OUT_ROOT}/target/product/maple_arm64/lib/invoke-black-dex.list", + "mplipa": "--quiet --effectipa", + "me": "-O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "-O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZRT", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZRTNATIVE.py b/testsuite/driver/src/mode/ZRTNATIVE.py new file mode 100644 index 0000000000000000000000000000000000000000..d8b8835db8f51b9c66156e8471e58d136887cbc9 --- /dev/null +++ b/testsuite/driver/src/mode/ZRTNATIVE.py @@ -0,0 +1,131 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZRTNATIVE = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/rt.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl","mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT/libcore-all.mplt -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -blacklist-invoke=${OUT_ROOT}/target/product/maple_arm64/lib/invoke-black-dex.list", + "mplipa": "--quiet --effectipa", + "me": "-O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "-O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZRT", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_REPORT_RC_LEAK": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "MAPLE_VERIFY_RC": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZRTNATIVEEH.py b/testsuite/driver/src/mode/ZRTNATIVEEH.py new file mode 100644 index 0000000000000000000000000000000000000000..1e00141f4e48677778464c525207251ac8479cb0 --- /dev/null +++ b/testsuite/driver/src/mode/ZRTNATIVEEH.py @@ -0,0 +1,77 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZRTNATIVEEH = { + "compile": [ + NativeCompile( + mpldep=[ + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "${OUT_ROOT}/target/product/public/lib/libnativehelper/include" + ], + infile="${NATIVE_SRC}", + model="arm64" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/rt.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl","mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT/libcore-all.mplt -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -blacklist-invoke=${OUT_ROOT}/target/product/maple_arm64/lib/invoke-black-dex.list", + "mplipa": "--quiet --effectipa", + "me": "-O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "-O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZRT", + model="arm64", + infile="${APP}", + ) + ], + "run": [ + Mplsh( + env={ + "USE_OLD_STACK_SCAN": "1", + "JNI_TEST": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "./", + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZRTRC.py b/testsuite/driver/src/mode/ZRTRC.py new file mode 100644 index 0000000000000000000000000000000000000000..3791636612d1a116686b8d43faa24c58101a9a39 --- /dev/null +++ b/testsuite/driver/src/mode/ZRTRC.py @@ -0,0 +1,78 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZRTRC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/rt.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT/libcore-all.mplt -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -blacklist-invoke=${OUT_ROOT}/target/product/maple_arm64/lib/invoke-black-dex.list", + "mplipa": "--quiet --effectipa", + "me": "-O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "-O2 --quiet --regnativefunc --no-nativeopt --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "-O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZRT", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZRT", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZTERP.py b/testsuite/driver/src/mode/ZTERP.py new file mode 100644 index 0000000000000000000000000000000000000000..1ce3355cd82b7c703d07f555cf5834840b759d0b --- /dev/null +++ b/testsuite/driver/src/mode/ZTERP.py @@ -0,0 +1,98 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZTERP = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java","${EXTRA_JAVA_FILE}"] + ) + ], + "run": [ + Mplsh( + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/ZTERPCLASSLOADER.py b/testsuite/driver/src/mode/ZTERPCLASSLOADER.py new file mode 100644 index 0000000000000000000000000000000000000000..f809cbd6cc32d6ff6500b2c6166e848bc13fef48 --- /dev/null +++ b/testsuite/driver/src/mode/ZTERPCLASSLOADER.py @@ -0,0 +1,365 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZTERPCLASSLOADER = { + "java2dex": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "compile": [ + Shell( + 'cp ../lib/child.jar ./' + ), + Shell( + 'cp ../lib/parent.jar ./' + ), + Shell( + 'cp ../lib/inject.jar ./' + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="child.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="child.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64", + infile="child" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="parent.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="parent.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64", + infile="parent" + ), + Jar2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/framework_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/services_intermediates/classes.jar" + ], + infile="inject.jar" + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="inject.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64", + infile="inject" + ), + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "--mplt ${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list ", + "me": "", + "mpl2mpl": "--quiet --regnativefunc --maplelinker --FastNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/fastNative.list --CriticalNative=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/criticalNative.list --nativefunc-property-list=${OUT_ROOT}/target/product/public/lib/codetricks/native_binding/native_func_property.list", + "mplcg": "--quiet --no-pie --verbose-asm --maplelinker --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/arch/arm64/duplicateFunc.s --fPIC" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)' + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)', + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)', + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "APP_SPECIFY_CLASSPATH": '$(echo ${APP}.so|cut -d "=" -f 2)', + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1", + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.so", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZTERPDEXSO.py b/testsuite/driver/src/mode/ZTERPDEXSO.py new file mode 100644 index 0000000000000000000000000000000000000000..927068e9be63141208b902699cce3958242d126f --- /dev/null +++ b/testsuite/driver/src/mode/ZTERPDEXSO.py @@ -0,0 +1,142 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZTERPDEXSO = { + "java2dex": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "java2dex_simplejava": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"], + usesimplejava=True + ) + ], + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ), + Maple( + maple="${OUT_ROOT}/target/product/maple_arm64/bin/maple", + run=["dex2mpl", "mplipa", "me", "mpl2mpl", "mplcg"], + option={ + "dex2mpl": "-mplt=${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP/libcore-all.mplt -dexcatch -inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/to_inline.list -j=16 -j100 -litprofile=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/meta.list -refine-catch -staticstringcheck", + "mplipa": "--effectipa --quiet", + "me": "--O2 --quiet --inlinefunclist=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/inline_funcs.list --no-nativeopt --no-ignoreipa --enable-ea", + "mpl2mpl": "--O2 --quiet --regnativefunc --no-nativeopt --maplelinker --maplelinker-nolocal --dump-muid --check_cl_invocation=${OUT_ROOT}/target/product/public/lib/codetricks/profile.pv/classloaderInvocation.list --emitVtableImpl", + "mplcg": "--O2 --quiet --no-pie --verbose-asm --fPIC --gen-c-macro-def --duplicate_asm_list=${OUT_ROOT}/target/product/public/lib/codetricks/asm/duplicateFunc.s --maplelinker --gsrc --nativeopt --replaceasm" + }, + global_option="--save-temps", + infile="${APP}.dex" + ), + Linker( + lib="host-x86_64-ZTERP", + model="arm64", + infile="${APP}" + ) + ], + "run": [ + Mplsh( + env={ + "USE_ZTERP": "true" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="output.log" + ), + CheckFileEqual( + file1="output.log", + file2="expected.txt" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_REPORT_RC_LEAK": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="leak.log" + ), + CheckRegContain( + reg="Total none-cycle root objects 0", + file="leak.log" + ), + Mplsh( + env={ + "USE_ZTERP": "true", + "MAPLE_VERIFY_RC": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${CP}", + redirection="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential early release", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects potential leak", + file="rcverify.log" + ), + CheckRegContain( + reg="[MS] [RC Verify] total 0 objects weak rc are wrong", + file="rcverify.log" + ) + ] +} diff --git a/testsuite/driver/src/mode/ZTERPRC.py b/testsuite/driver/src/mode/ZTERPRC.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbc98786b9e7564a4d1b40433647c2b3c53823f --- /dev/null +++ b/testsuite/driver/src/mode/ZTERPRC.py @@ -0,0 +1,61 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +from api import * + +ZTERPRC = { + "compile": [ + Java2dex( + jar_file=[ + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar", + "${OUT_ROOT}/target/product/public/third-party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar" + ], + outfile="${APP}.dex", + infile=["${APP}.java"] + ) + ], + "run": [ + Mplsh( + env={ + "MAPLE_REPORT_RC_LEAK": "1", + "PATTERN_FROM_BACKUP_TRACING": "1" + }, + qemu="/usr/bin/qemu-aarch64", + qemu_libc="/usr/aarch64-linux-gnu", + qemu_ld_lib=[ + "${OUT_ROOT}/target/product/maple_arm64/third-party", + "${OUT_ROOT}/target/product/maple_arm64/lib/host-x86_64-ZTERP", + "./" + ], + mplsh="${OUT_ROOT}/target/product/maple_arm64/bin/mplsh", + garbage_collection_kind="RC", + xbootclasspath="libcore-all.so", + infile="${APP}.dex", + redirection="cycle.log" + ), + CheckRegContain( + reg="ExpectResult", + file="cycle.log" + ), + CheckRegContain( + reg="Total Leak Count 0", + file="cycle.log" + ), + CheckRegContain( + choice="num", + reg="ExpectResult", + file="cycle.log" + ) + ] +} \ No newline at end of file diff --git a/testsuite/driver/src/mode/__init__.py b/testsuite/driver/src/mode/__init__.py index 4784970c2c08e3d28962337eb9eed451c6d888ac..b18e0d9c3d2147a0eb8b738444eaaa730a3ae5b7 100644 --- a/testsuite/driver/src/mode/__init__.py +++ b/testsuite/driver/src/mode/__init__.py @@ -23,4 +23,4 @@ for py in os.listdir(my_dir): if py.endswith('.py'): name = py[:-3] mode = __import__(__name__, globals(), locals(), ['%s' % name]) - mode_dict[name] = getattr(getattr(mode, name), name) \ No newline at end of file + mode_dict[name] = getattr(getattr(mode, name), name)