diff --git a/src/mapleall/maple_be/include/be/lower.h b/src/mapleall/maple_be/include/be/lower.h index 3a6157db2e16c4448259d97fcc538355549f4deb..98c6e5d7867e3541ac06a67100df6b7132860098 100644 --- a/src/mapleall/maple_be/include/be/lower.h +++ b/src/mapleall/maple_be/include/be/lower.h @@ -185,7 +185,7 @@ class CGLowerer { void LowerTryCatchBlocks(BlockNode &body); #if TARGARM32 || TARGAARCH64 || TARGRISCV64 - BlockNode *LowerReturnStruct(NaryStmtNode &retNode); + BlockNode *LowerReturnStructUsingFakeParm(NaryStmtNode &retNode); #endif virtual BlockNode *LowerReturn(NaryStmtNode &retNode); void LowerEntry(MIRFunction &func); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h index 8a8fc1dae78de1988f99dc5d96da36a5a7d23d9b..76232802ec40dc2a571a5b40320b9acac6c5b584 100644 --- a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -38,6 +38,7 @@ class LmbcArgInfo { MapleVector lmbcCallArgTypes; MapleVector lmbcCallArgOffsets; MapleVector lmbcCallArgNumOfRegs; // # of regs needed to complete struct + uint32 lmbcTotalStkUsed = -1; // TBD: remove when explicit addr for large agg is available }; class AArch64CGFunc : public CGFunc { @@ -101,7 +102,7 @@ class AArch64CGFunc : public CGFunc { return kRFLAG; } - MIRType *GetAggTyFromCallSite(StmtNode *stmt); + MIRType *LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList); RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); @@ -125,9 +126,11 @@ class AArch64CGFunc : public CGFunc { bool needLow12 = false); MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + uint32 LmbcFindTotalStkUsed(std::vector* paramList); + uint32 LmbcTotalRegsUsed(); void LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn); bool LmbcSmallAggForRet(BlkassignoffNode &bNode, Operand *src); - bool LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src); + bool LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src, std::vector **parmList); void SelectAggDassign(DassignNode &stmt) override; void SelectIassign(IassignNode &stmt) override; void SelectIassignoff(IassignoffNode &stmt) override; @@ -135,6 +138,7 @@ class AArch64CGFunc : public CGFunc { void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; + void SelectReturnSendOfStructInRegs(BaseNode *x) override; void SelectReturn(Operand *opnd0) override; void SelectIgoto(Operand *opnd0) override; void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; @@ -485,7 +489,10 @@ class AArch64CGFunc : public CGFunc { void GenerateCleanupCodeForExtEpilog(BB &bb) override; uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; void AssignLmbcFormalParams() override; - RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType); + void LmbcGenSaveSpForAlloca() override; + MemOperand *GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg base = RFP); + RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType, AArch64reg baseRegno = RFP); + RegOperand *LmbcStructReturnLoad(int32 offset); Operand *GetBaseReg(const AArch64SymbolAlloc &symAlloc); int32 GetBaseOffset(const SymbolAlloc &symAlloc) override; @@ -725,6 +732,22 @@ class AArch64CGFunc : public CGFunc { return lmbcArgInfo->lmbcCallArgNumOfRegs; } + int32 GetLmbcTotalStkUsed() { + return lmbcArgInfo->lmbcTotalStkUsed; + } + + void SetLmbcTotalStkUsed(int32 offset) { + lmbcArgInfo->lmbcTotalStkUsed = offset; + } + + void SetLmbcCallReturnType(MIRType *ty) { + lmbcCallReturnType = ty; + } + + MIRType *GetLmbcCallReturnType() { + return lmbcCallReturnType; + } + private: enum RelationOperator : uint8 { kAND, @@ -792,6 +815,7 @@ class AArch64CGFunc : public CGFunc { regno_t methodHandleVreg = -1; uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5*/ LmbcArgInfo *lmbcArgInfo = nullptr; + MIRType *lmbcCallReturnType = nullptr; void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, AArch64isa::MemoryOrdering memOrd, bool isDirect); diff --git a/src/mapleall/maple_be/include/cg/call_conv.h b/src/mapleall/maple_be/include/cg/call_conv.h index d7a194022c5d054baef0b5c74807e86a186514fa..4310cd986a5315abae655d17ecf5cdfec50cf66e 100644 --- a/src/mapleall/maple_be/include/cg/call_conv.h +++ b/src/mapleall/maple_be/include/cg/call_conv.h @@ -63,8 +63,8 @@ struct CCLocInfo { class LmbcFormalParamInfo { public: LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz) : - type(nullptr), primType(pType), offset(ofst), size(sz), regNO(0), vregNO(0), numRegs(0), - fpSize(0), isReturn(false), isPureFloat(false), isOnStack(false) {} + type(nullptr), primType(pType), offset(ofst), onStackOffset(0), size(sz), regNO(0), vregNO(0), numRegs(0), + fpSize(0), isReturn(false), isPureFloat(false), isOnStack(false), hasRegassign(false) {} ~LmbcFormalParamInfo() = default; @@ -86,6 +86,12 @@ class LmbcFormalParamInfo { void SetOffset(uint32 ofs) { offset = ofs; } + uint32 GetOnStackOffset() { + return onStackOffset; + } + void SetOnStackOffset(uint32 ofs) { + onStackOffset = ofs; + } uint32 GetSize() { return size; } @@ -137,10 +143,17 @@ class LmbcFormalParamInfo { void SetIsOnStack() { isOnStack = true; } + bool HasRegassign() { + return hasRegassign; + } + void SetHasRegassign() { + hasRegassign = true; + } private: MIRStructType *type; PrimType primType; uint32 offset; + uint32 onStackOffset; /* stack location if isOnStack */ uint32 size; /* size primtype or struct */ regno_t regNO = 0; /* param reg num or starting reg num if numRegs > 0 */ regno_t vregNO = 0; /* if no explicit regassing from IR, create move from param reg */ @@ -148,7 +161,8 @@ class LmbcFormalParamInfo { uint32 fpSize = 0; /* size of fp param if isPureFloat */ bool isReturn; bool isPureFloat = false; - bool isOnStack; /* small struct with arrays need to be saved onto stack */ + bool isOnStack; /* large struct is passed by a copy on stack */ + bool hasRegassign; }; } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/cg.h b/src/mapleall/maple_be/include/cg/cg.h index 093289e42bb2f0fef39dfe608b3fc285ed5e8494..e1368acb1d885216888b82c680ccaf96b325b211 100644 --- a/src/mapleall/maple_be/include/cg/cg.h +++ b/src/mapleall/maple_be/include/cg/cg.h @@ -121,11 +121,13 @@ class CG { emitter(nullptr), labelOrderCnt(0), cgOption(cgOptions), - instrumentationFunction(nullptr) { + instrumentationFunction(nullptr), + fileGP(nullptr) { const std::string &internalNameLiteral = namemangler::GetInternalNameLiteral(namemangler::kJavaLangObjectStr); GStrIdx strIdxFromName = GlobalTables::GetStrTable().GetStrIdxFromName(internalNameLiteral); isLibcore = (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdxFromName) != nullptr); DefineDebugTraceFunctions(); + isLmbc = (mirModule->GetFlavor() == MIRFlavor::kFlavorLmbc); } virtual ~CG(); @@ -361,6 +363,10 @@ class CG { return isLibcore; } + bool IsLmbc() const { + return isLmbc; + } + MIRSymbol *GetDebugTraceEnterFunction() { return dbgTraceEnter; } @@ -421,6 +427,13 @@ class CG { /* Object map generation helper */ std::vector GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType); + void SetGP(MIRSymbol *sym) { + fileGP = sym; + } + MIRSymbol *GetGP() const { + return fileGP; + } + static bool IsInFuncWrapLabels(MIRFunction *func) { return funcWrapLabels.find(func) != funcWrapLabels.end(); } @@ -449,12 +462,14 @@ class CG { LabelIDOrder labelOrderCnt; static CGFunc *currentCGFunction; /* current cg function being compiled */ CGOptions cgOption; - bool isLibcore; MIRSymbol *instrumentationFunction; MIRSymbol *dbgTraceEnter; MIRSymbol *dbgTraceExit; MIRSymbol *dbgFuncProfile; + MIRSymbol *fileGP; /* for lmbc, one local %GP per file */ static std::map> funcWrapLabels; + bool isLibcore; + bool isLmbc; }; /* class CG */ } /* namespace maplebe */ diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h index 1994ece015dfd87005faaca556214a5d5d983d8d..c1b6e7cb296cfc8ffb743734a7335eb47885f2c6 100644 --- a/src/mapleall/maple_be/include/cg/cgfunc.h +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -160,6 +160,7 @@ class CGFunc { virtual uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) = 0; virtual void AssignLmbcFormalParams() = 0; LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); + virtual void LmbcGenSaveSpForAlloca() = 0; void GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc); int32 GetFreqFromStmt(uint32 stmtId); void GenerateInstruction(); @@ -201,6 +202,7 @@ class CGFunc { virtual void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) = 0; virtual void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) = 0; virtual void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) = 0; + virtual void SelectReturnSendOfStructInRegs(BaseNode *x) = 0; virtual void SelectReturn(Operand *opnd) = 0; virtual void SelectIgoto(Operand *opnd0) = 0; virtual void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) = 0; diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp index f586b5c8a0522c21c23539c75378e7d7378a45f8..31fb159bb171614619414df185029a7c9a70a9f2 100644 --- a/src/mapleall/maple_be/src/be/lower.cpp +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -121,6 +121,7 @@ void CGLowerer::RegisterExternalLibraryFunctions() { func->AllocSymTab(); MIRSymbol *funcSym = func->GetFuncSymbol(); funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); /* return type */ MIRType *retTy = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[i].retType); @@ -876,7 +877,7 @@ void CGLowerer::LowerTypePtr(BaseNode &node) const { #if TARGARM32 || TARGAARCH64 || TARGRISCV64 -BlockNode *CGLowerer::LowerReturnStruct(NaryStmtNode &retNode) { +BlockNode *CGLowerer::LowerReturnStructUsingFakeParm(NaryStmtNode &retNode) { BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); for (size_t i = 0; i < retNode.GetNopndSize(); ++i) { retNode.SetOpnd(LowerExpr(retNode, *retNode.GetNopndAt(i), *blk), i); @@ -1100,7 +1101,7 @@ void CGLowerer::LowerCallStmt(StmtNode &stmt, StmtNode *&nextStmt, BlockNode &ne return; } - if ((newStmt->GetOpCode() == OP_call) || (newStmt->GetOpCode() == OP_icall)) { + if (newStmt->GetOpCode() == OP_call || newStmt->GetOpCode() == OP_icall || newStmt->GetOpCode() == OP_icallproto) { newStmt = LowerCall(static_cast(*newStmt), nextStmt, newBlk, retty, uselvar); } newStmt->SetSrcPos(stmt.GetSrcPos()); @@ -1171,7 +1172,12 @@ StmtNode *CGLowerer::GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalle StmtNode *CGLowerer::GenIcallNode(PUIdx &funcCalled, IcallNode &origCall) { StmtNode *newCall = nullptr; - newCall = mirModule.GetMIRBuilder()->CreateStmtIcall(origCall.GetNopnd()); + if (origCall.GetOpCode() == OP_icallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcall(origCall.GetNopnd()); + } else { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcallproto(origCall.GetNopnd()); + static_cast(newCall)->SetRetTyIdx(static_cast(origCall).GetRetTyIdx()); + } newCall->SetSrcPos(origCall.GetSrcPos()); CHECK_FATAL(newCall != nullptr, "nullptr is not expected"); funcCalled = kFuncNotFound; @@ -1372,6 +1378,7 @@ BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { static_cast(newCall)->SetReturnVec(*p2nRets); break; } + case OP_icallprotoassigned: case OP_icallassigned: { auto &origCall = static_cast(stmt); newCall = GenIcallNode(funcCalled, origCall); @@ -1479,6 +1486,15 @@ bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, (*p2nrets)[0].first = dnodeStmt->GetStIdx(); (*p2nrets)[0].second.SetFieldID(dnodeStmt->GetFieldID()); lvar = true; + // set ATTR_firstarg_return for callee + if (stmt->GetOpCode() == OP_callassigned) { + CallNode *callNode = static_cast(stmt); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + f->SetFirstArgReturn(); + f->GetMIRFuncType()->SetFirstArgReturn(); + } else { + // for icall, front-end already set ATTR_firstarg_return + } } else { /* struct <= 16 passed in regs lowered into call &foo regassign u64 %1 (regread u64 %%retval0) @@ -1496,13 +1512,19 @@ bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCall(callNode->GetPUIdx(), callNode->GetNopnd()); callStmt->SetSrcPos(callNode->GetSrcPos()); newBlk.AddStatement(callStmt); - } else if (stmt->GetOpCode() == OP_icallassigned) { + } else if (stmt->GetOpCode() == OP_icallassigned || stmt->GetOpCode() == OP_icallprotoassigned) { auto *icallNode = static_cast(stmt); for (size_t i = 0; i < icallNode->GetNopndSize(); ++i) { BaseNode *newOpnd = LowerExpr(*icallNode, *icallNode->GetNopndAt(i), newBlk); icallNode->SetOpnd(newOpnd, i); } - IcallNode *icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode->GetNopnd()); + IcallNode *icallStmt = nullptr; + if (stmt->GetOpCode() == OP_icallassigned) { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode->GetNopnd()); + } else { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcallproto(icallNode->GetNopnd()); + icallStmt->SetRetTyIdx(icallNode->GetRetTyIdx()); + } icallStmt->SetSrcPos(icallNode->GetSrcPos()); newBlk.AddStatement(icallStmt); } else { @@ -1767,6 +1789,7 @@ void CGLowerer::LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); MIRFunction *printf = mirBuilder->GetOrCreateFunction("printf", TyIdx(PTY_i32)); + printf->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*printf->GetMIRFuncType()); MapleVector argsPrintf(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); @@ -1842,7 +1865,8 @@ BlockNode *CGLowerer::LowerBlock(BlockNode &block) { break; } case OP_callassigned: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallprotoassigned: { // pass the addr of lvar if this is a struct call assignment bool lvar = false; // nextStmt could be changed by the call to LowerStructReturn @@ -1862,6 +1886,7 @@ BlockNode *CGLowerer::LowerBlock(BlockNode &block) { case OP_intrinsiccall: case OP_call: case OP_icall: + case OP_icallproto: #if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 // nextStmt could be changed by the call to LowerStructReturn LowerCallStmt(*stmt, nextStmt, *newBlk); @@ -1871,8 +1896,8 @@ BlockNode *CGLowerer::LowerBlock(BlockNode &block) { break; case OP_return: { #if TARGARM32 || TARGAARCH64 || TARGRISCV64 - if (GetCurrentFunc()->IsReturnStruct()) { - newBlk->AppendStatementsFromBlock(*LowerReturnStruct(static_cast(*stmt))); + if (GetCurrentFunc()->IsFirstArgReturn() && stmt->NumOpnds() > 0) { + newBlk->AppendStatementsFromBlock(*LowerReturnStructUsingFakeParm(static_cast(*stmt))); } else { #endif NaryStmtNode *retNode = static_cast(stmt); @@ -1970,7 +1995,9 @@ void CGLowerer::SimplifyBlock(BlockNode &block) { } auto *newFunc = theMIRModule->GetMIRBuilder()->GetOrCreateFunction(asmMap.at(oldFunc->GetName()), callStmt->GetTyIdx()); - newFunc->GetFuncSymbol()->SetStorageClass(kScExtern); + MIRSymbol *funcSym = newFunc->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); callStmt->SetPUIdx(newFunc->GetPuidx()); break; } @@ -2080,6 +2107,7 @@ StmtNode *CGLowerer::LowerCall( if (needCheckStore) { MIRFunction *fn = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); fn->AllocSymTab(); MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -2106,7 +2134,7 @@ StmtNode *CGLowerer::LowerCall( } MIRType *retType = nullptr; - if (callNode.op == OP_icall) { + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { if (retTy == nullptr) { return &callNode; } else { @@ -2152,7 +2180,7 @@ StmtNode *CGLowerer::LowerCall( addrofNode->SetStIdx(dsgnSt->GetStIdx()); addrofNode->SetFieldID(0); - if (callNode.op == OP_icall) { + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { auto ond = callNode.GetNopnd().begin(); newNopnd.emplace_back(*ond); newNopnd.emplace_back(addrofNode); @@ -2174,7 +2202,27 @@ StmtNode *CGLowerer::LowerCall( } void CGLowerer::LowerEntry(MIRFunction &func) { + // determine if needed to insert fake parameter to return struct for current function if (func.IsReturnStruct()) { + MIRType *retType = func.GetReturnType(); +#if TARGAARCH64 + PrimType pty = IsStructElementSame(retType); + if (pty == PTY_f32 || pty == PTY_f64 || IsPrimitiveVector(pty)) { + func.SetStructReturnedInRegs(); + return; + } +#endif + if (retType->GetPrimType() != PTY_agg) { + return; + } + if (retType->GetSize() > k16ByteSize) { + func.SetFirstArgReturn(); + func.GetMIRFuncType()->SetFirstArgReturn(); + } else { + func.SetStructReturnedInRegs(); + } + } + if (func.IsFirstArgReturn() && func.GetReturnType()->GetPrimType() != PTY_void) { MIRSymbol *retSt = func.GetSymTab()->CreateSymbol(kScopeLocal); retSt->SetStorageClass(kScFormal); retSt->SetSKind(kStVar); @@ -2191,12 +2239,14 @@ void CGLowerer::LowerEntry(MIRFunction &func) { auto formal = func.GetFormal(i); formals.emplace_back(formal); } + func.SetFirstArgReturn(); beCommon.AddElementToFuncReturnType(func, func.GetReturnTyIdx()); func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true); auto *funcType = func.GetMIRFuncType(); ASSERT(funcType != nullptr, "null ptr check"); + funcType->SetFirstArgReturn(); beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); } } @@ -2374,6 +2424,7 @@ MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, con func->AllocSymTab(); MIRSymbol *funcSym = func->GetFuncSymbol(); funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); MIRType *argTy = GlobalTables::GetTypeTable().GetPtr(); MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); argSt->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex(paramName)); @@ -2415,6 +2466,7 @@ void CGLowerer::RegisterBuiltIns() { func->AllocSymTab(); MIRSymbol *funcSym = func->GetFuncSymbol(); funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); /* return type */ MIRType *retTy = desc.GetReturnType(); CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); @@ -2570,6 +2622,7 @@ void CGLowerer::ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode) { arrayNode.GetNopndAt(1), lenRegreadNode); CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Array_Boundary_Check", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); fn->AllocSymTab(); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -3101,6 +3154,7 @@ BaseNode *CGLowerer::LowerIntrinJavaArrayLength(const BaseNode &parent, Intrinsi MIRFunction *newFunc = mirBuilder->GetOrCreateFunction("MCC_ThrowNullArrayNullPointerException", GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*newFunc->GetMIRFuncType()); newFunc->AllocSymTab(); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -3472,6 +3526,7 @@ StmtNode *CGLowerer::LowerIntrinsicRCCall(const IntrinsiccallNode &intrincall) { if (intrinFuncIDs.find(intrinDesc) == intrinFuncIDs.end()) { /* add funcid into map */ MIRFunction *fn = mirBuilder->GetOrCreateFunction(intrinDesc->name, TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); fn->AllocSymTab(); intrinFuncIDs[intrinDesc] = fn->GetPuidx(); @@ -3500,6 +3555,7 @@ void CGLowerer::LowerArrayStore(const IntrinsiccallNode &intrincall, BlockNode & if (needCheckStore) { MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); fn->AllocSymTab(); MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -3589,6 +3645,7 @@ StmtNode *CGLowerer::LowerIntrinsiccall(IntrinsiccallNode &intrincall, BlockNode beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); fn->AllocSymTab(); st->SetFunction(fn); + st->SetAppearsInCode(true); return LowerDefaultIntrinsicCall(intrincall, *st, *fn); } @@ -3659,6 +3716,7 @@ PUIdx CGLowerer::GetBuiltinToUse(BuiltinFunctionID id) const { void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc, BlockNode &blkNode, bool perm) { MIRFunction *func = mirBuilder->GetOrCreateFunction((perm ? "MCC_NewPermanentObject" : "MCC_NewObj_fixed_class"), (TyIdx)(LOWERED_PTR_TYPE)); + func->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*func->GetMIRFuncType()); func->AllocSymTab(); /* Get the classinfo */ @@ -3677,6 +3735,7 @@ void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc if (classSym->GetAttr(ATTR_abstract) || classSym->GetAttr(ATTR_interface)) { MIRFunction *funcSecond = mirBuilder->GetOrCreateFunction("MCC_Reflect_ThrowInstantiationError", (TyIdx)(LOWERED_PTR_TYPE)); + funcSecond->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*funcSecond->GetMIRFuncType()); funcSecond->AllocSymTab(); BaseNode *arg = mirBuilder->CreateExprAddrof(0, *classSym); @@ -3795,6 +3854,7 @@ void CGLowerer::LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode & args.emplace_back(mirBuilder->CreateIntConst(0, PTY_u32)); } MIRFunction *func = mirBuilder->GetOrCreateFunction(funcName, (TyIdx)(LOWERED_PTR_TYPE)); + func->GetFuncSymbol()->SetAppearsInCode(true); beCommon.UpdateTypeTable(*func->GetMIRFuncType()); func->AllocSymTab(); CallNode *callAssign = nullptr; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp index e0acbe83e7c79f3e0472fdc8ddf7cb834e8c162c..e3e84d131b64c2dc0d9164024c8f95bbfefa1728 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -38,7 +38,7 @@ void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsL uint32 start = 0; if (numFormal) { MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); - if (func->IsReturnStruct()) { + if (func->IsFirstArgReturn()) { TyIdx tyIdx = func->GetFuncRetStructTyIdx(); if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { start = 1; @@ -436,7 +436,7 @@ void AArch64MoveRegArgs::MoveVRegisterArgs() { uint32 start = 0; if (formalCount) { MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); - if (func->IsReturnStruct()) { + if (func->IsFirstArgReturn()) { TyIdx tyIdx = func->GetFuncRetStructTyIdx(); if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16BitSize) { start = 1; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp index 0be11cdb40dfb6cc1b6e063f216d3506b2bff5c8..d9fa548174c2f97713d0641836e5a96d89f0ce64 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -303,7 +303,7 @@ int32 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, boo if (isFirst) { MIRFunction *func = tFunc != nullptr ? tFunc : const_cast(beCommon.GetMIRModule().CurFunction()); - if (func->IsReturnStruct()) { + if (func->IsFirstArgReturn()) { TyIdx tyIdx = func->GetFuncRetStructTyIdx(); size_t size = beCommon.GetTypeSize(tyIdx); if (size == 0) { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp index 96335c60b4b0f992730820b52f7aa397a78661f9..287c009121f16c1bc9bfffa35bae7afd6eaf4941 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -1410,15 +1410,44 @@ void AArch64CGFunc::SelectAsm(AsmNode &node) { } void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + if (GetCG()->IsLmbc()) { + PrimType lhsSize = stmt.GetPrimType(); + PrimType rhsSize = stmt.Opnd(0)->GetPrimType(); + if (lhsSize != rhsSize && stmt.Opnd(0)->GetOpCode() == OP_ireadoff) { + Insn *prev = GetCurBB()->GetLastInsn(); + if (prev->GetMachineOpcode() == MOP_wldrsb || prev->GetMachineOpcode() == MOP_wldrsh) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(prev->GetMachineOpcode() == MOP_wldrsb ? MOP_xldrsb : MOP_xldrsh); + } else if (prev->GetMachineOpcode() == MOP_wldr && stmt.GetPrimType() == PTY_i64) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(MOP_xldrsw); + } + } + } RegOperand *regOpnd = nullptr; PregIdx pregIdx = stmt.GetRegIdx(); if (IsSpecialPseudoRegister(pregIdx)) { - regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); + if (GetCG()->IsLmbc() && stmt.GetPrimType() == PTY_agg) { + if (static_cast(opnd0).IsOfIntClass()) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_i64); + } else if (opnd0.GetSize() <= k4ByteSize) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f32); + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f64); + } + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); + } } else { regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); } /* look at rhs */ PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + if (GetCG()->IsLmbc() && rhsType == PTY_agg) { + /* This occurs when a call returns a small struct */ + /* The subtree should already taken care of the agg type that is in excess of 8 bytes */ + rhsType = PTY_i64; + } PrimType dtype = rhsType; if (GetPrimTypeBitSize(dtype) < k32BitSize) { ASSERT(IsPrimitiveInteger(dtype), ""); @@ -1440,6 +1469,12 @@ void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(srcBitLength, stype), *regOpnd, *dest)); + } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) { + Insn &pseudo = GetCG()->BuildInstruction(MOP_pseudo_ret_int, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } else if (regOpnd->GetRegisterNumber() >= V0 || regOpnd->GetRegisterNumber() <= V3) { + Insn &pseudo = GetCG()->BuildInstruction(MOP_pseudo_ret_float, *regOpnd); + GetCurBB()->AppendInsn(pseudo); } } @@ -1919,15 +1954,11 @@ void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { SelectCopy(memOpnd, destType, srcOpnd, destType); } -void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { - int32 offset = stmt.GetOffset(); - PrimType primType = stmt.GetPrimType(); - uint32 bitlen = GetPrimTypeSize(primType) * kBitsPerByte; - - Operand &srcOpnd = LoadIntoRegister(opnd, primType); +MemOperand *AArch64CGFunc::GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg baseRegno) { MemOperand *memOpnd; - RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); - if (offset < 0) { + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(baseRegno, k64BitSize, kRegTyInt); + uint32 bitlen = byteSize * kBitsPerByte; + if (offset < 0 && offset < -256) { RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); Insn &addInsn = GetCG()->BuildInstruction(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); @@ -1939,38 +1970,61 @@ void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); } memOpnd->SetStackMem(true); - MOperator mOp = PickStInsn(bitlen, primType); - Insn &store = GetCG()->BuildInstruction(mOp, srcOpnd, *memOpnd); - GetCurBB()->AppendInsn(store); + return memOpnd; } +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + MIRType *rType = GetLmbcCallReturnType(); + bool isPureFpStruct = false; + uint32 numRegs = 0; + if (rType && rType->GetPrimType() == PTY_agg && opnd.IsRegister() && static_cast(opnd).IsPhysicalRegister()) { + CHECK_FATAL(rType->GetSize() <= 16, "SelectIassignfpoff invalid agg size"); + uint32 fpSize; + numRegs = FloatParamRegRequired(static_cast(rType), fpSize); + if (numRegs) { + primType = (fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + isPureFpStruct = true; + } + } + uint32 byteSize = GetPrimTypeSize(primType); + uint32 bitlen = byteSize * kBitsPerByte; + if (isPureFpStruct) { + for (int i = 0 ; i < numRegs; ++i) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset + (i * byteSize), byteSize); + RegOperand &srcOpnd = GetOrCreatePhysicalRegisterOperand(AArch64reg(V0 + i), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } + } else { + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } +} + +/* Load and assign to a new register. To be moved to the correct call register OR stack + location in LmbcSelectParmList */ void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { if (GetLmbcArgInfo() == nullptr) { LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); SetLmbcArgInfo(p); } - uint32 byteLen = GetPrimTypeSize(pTy); - uint32 bitLen = byteLen * kBitsPerByte; RegType regTy = GetRegTyFromPrimTy(pTy); - int32 curRegArgs = GetLmbcArgsInRegs(regTy); - if (curRegArgs < k8ByteSize) { - RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); - SelectCopy(*res, pTy, opnd, pTy); - SetLmbcArgInfo(res, pTy, offset, 1); - } - else { - /* Move into allocated space */ - Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); - Operand ® = LoadIntoRegister(opnd, pTy); - GetCurBB()->AppendInsn(GetCG()->BuildInstruction(PickStInsn(bitLen, pTy), - reg, memOpd)); - } + uint32 byteLen = GetPrimTypeSize(pTy); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, 0, 1); IncLmbcArgsInRegs(regTy); /* num of args in registers */ IncLmbcTotalArgs(); /* num of args */ } -/* Search for CALL/ICALL/ICALLPROTO node, must be called from a blkassignoff node */ -MIRType *AArch64CGFunc::GetAggTyFromCallSite(StmtNode *stmt) { +/* Search for CALL/ICALLPROTO node, must be called from a blkassignoff node */ +MIRType *AArch64CGFunc::LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) { for ( ; stmt != nullptr; stmt = stmt->GetNext()) { if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { break; @@ -1983,25 +2037,20 @@ MIRType *AArch64CGFunc::GetAggTyFromCallSite(StmtNode *stmt) { if (stmt->GetOpCode() == OP_call) { CallNode *callNode = static_cast(stmt); MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); - if (fn->IsReturnStruct()) { - ++nargs; - } if (fn->GetFormalCount() > 0) { - ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetNthParamTyIdx(nargs)); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[nargs].formalTyIdx); } + *parmList = &fn->GetParamTypes(); // would return null if the actual parameter is bogus } else if (stmt->GetOpCode() == OP_icallproto) { IcallNode *icallproto = static_cast(stmt); MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); MIRFuncType *fType = static_cast(type); - MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetRetTyIdx()); - if (retType->GetKind() == kTypeStruct) { - ++nargs; - } ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + *parmList = &fType->GetParamTypeList(); } else { CHECK_FATAL(stmt->GetOpCode() == OP_icallproto, - "GetAggTyFromCallSite:: unexpected call operator"); + "LmbcGetAggTyFromCallSite:: unexpected call operator"); } return ty; } @@ -2078,11 +2127,11 @@ bool AArch64CGFunc::LmbcSmallAggForRet(BlkassignoffNode &bNode, Operand *src) { } /* return true if blkassignoff for return, false otherwise */ -bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src) { +bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src, std::vector **parmList) { AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); if (IsBlkassignForPush(bNode)) { PrimType pTy = PTY_i64; - MIRStructType *ty = static_cast(GetAggTyFromCallSite(&bNode)); + MIRStructType *ty = static_cast(LmbcGetAggTyFromCallSite(&bNode, parmList)); uint32 size = 0; uint32 fpregs = ty ? FloatParamRegRequired(ty, size) : 0; /* fp size determined */ if (fpregs > 0) { @@ -2093,7 +2142,7 @@ bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src) { MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); SelectCopy(*res, pTy, mem, pTy); - SetLmbcArgInfo(res, pTy, bNode.offset + s, fpregs); + SetLmbcArgInfo(res, pTy, 0, fpregs); IncLmbcArgsInRegs(kRegTyFloat); } IncLmbcTotalArgs(); @@ -2134,6 +2183,41 @@ bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src) { return false; } +/* This function is incomplete and may be removed when Lmbc IR is changed + to have the lowerer figures out the address of the large agg to reside */ +uint32 AArch64CGFunc::LmbcFindTotalStkUsed(std::vector *paramList) { + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + for (TyIdx tyIdx : *paramList) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + parmlocator.LocateNextParm(*ty, pLoc); + } + return 0; +} + +/* All arguments passed as registers */ +uint32 AArch64CGFunc::LmbcTotalRegsUsed() { + if (GetLmbcArgInfo() == nullptr) { + return 0; /* no arg */ + } + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + MapleVector &types = GetLmbcCallArgTypes(); + uint32 iCnt = 0; + uint32 fCnt = 0; + for (uint32 i = 0; i < regs.size(); i++) { + if (IsPrimitiveInteger(types[i])) { + if ((iCnt + regs[i]) <= k8ByteSize) { + iCnt += regs[i]; + }; + } else { + if ((fCnt + regs[i]) <= k8ByteSize) { + fCnt += regs[i]; + }; + } + } + return iCnt + fCnt; +} + /* If blkassignoff for argument, this function loads the agg arguments into virtual registers, disregard if there is sufficient physicall call registers. Argument > 16-bytes are copied to preset space and ptr @@ -2142,29 +2226,40 @@ bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, Operand *src) { void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) { CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + std::vector *parmList; if (GetLmbcArgInfo() == nullptr) { LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); SetLmbcArgInfo(p); } if (LmbcSmallAggForRet(bNode, src)) { return; - } else if (LmbcSmallAggForCall(bNode, src)) { + } else if (LmbcSmallAggForCall(bNode, src, &parmList)) { return; } - /* memcpy for agg assign OR large agg for arg/ret */ Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* memcpy for agg assign OR large agg for arg/ret */ + int32 offset = bNode.offset; + if (IsBlkassignForPush(bNode)) { + /* large agg for call, addr to be pushed in SelectCall */ + offset = GetLmbcTotalStkUsed(); + if (offset < 0) { + /* length of ALL stack based args for this call, this location is where the + next large agg resides, its addr will then be passed */ + offset = LmbcFindTotalStkUsed(parmList) + LmbcTotalRegsUsed(); + } + SetLmbcTotalStkUsed(offset + bNode.blockSize); /* next use */ + SetLmbcArgInfo(regResult, PTY_i64, 0, 1); /* 1 reg for ptr */ + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + /* copy large agg arg to offset below */ + } std::vector opndVec; opndVec.push_back(regResult); /* result */ - opndVec.push_back(PrepareMemcpyParamOpnd(bNode.offset, *dest));/* param 0 */ + opndVec.push_back(PrepareMemcpyParamOpnd(offset, *dest)); /* param 0 */ opndVec.push_back(src); /* param 1 */ opndVec.push_back(PrepareMemcpyParamOpnd(bNode.blockSize));/* param 2 */ SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); - if (IsBlkassignForPush(bNode)) { - SetLmbcArgInfo(static_cast(src), PTY_i64, (int32)bNode.offset, 1); - IncLmbcArgsInRegs(kRegTyInt); - IncLmbcTotalArgs(); - } } void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { @@ -2550,6 +2645,159 @@ void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) { } } +void AArch64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { + uint32 offset = 0; + if (x->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(x); + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = sym->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(dread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, dread->GetFieldID()).first); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs for agg return */ + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + parmlocator.LocateNextParm(*mirType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*sym); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[0] = static_cast(pLoc.reg0); + regs[1] = static_cast(pLoc.reg1); + regs[2] = static_cast(pLoc.reg2); + regs[3] = static_cast(pLoc.reg3); + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetCG()->BuildInstruction(mop2, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetCG()->BuildInstruction(mop3, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else if (x->GetOpCode() == OP_iread) { + IreadNode *iread = static_cast(x); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*iread, *iread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, iread->Opnd(0)->GetPrimType()); + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx())); + MIRType *mirType = static_cast(ptrType->GetPointedType()); + bool isRefField = false; + if (iread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(iread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, iread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*structType, iread->GetFieldID()); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs. */ + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(offset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, + rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetCG()->BuildInstruction(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetCG()->BuildInstruction(MOP_xmovrr, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = cg->BuildInstruction(MOP_pseudo_ret_int, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else { // dummy return of 0 inserted by front-end at absence of return + ASSERT(x->GetOpCode() == OP_constval, "SelectReturnSendOfStructInRegs: unexpected return operand"); + uint32 typeSize = GetPrimTypeSize(x->GetPrimType()); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize * kBitsPerByte, kRegTyInt); + ImmOperand &src = CreateImmOperand(0, k16BitSize, false); + GetCurBB()->AppendInsn(GetCG()->BuildInstruction(MOP_xmovri32, dest, src)); + return; + } +} + Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); if (symbol->IsEhIndex()) { @@ -2913,31 +3161,50 @@ Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ire return result; } -RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType) { - MemOperand *memOpnd; - RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); - uint32 bitlen = byteSize * kBitsPerByte; - if (offset < 0) { - RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); - ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); - Insn &addInsn = GetCG()->BuildInstruction(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); - GetCurBB()->AppendInsn(addInsn); - OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); - memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, - nullptr, offsetOpnd, nullptr); - } else { - OfstOperand *offsetOpnd = &CreateOfstOpnd(offset, k32BitSize); - memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, - nullptr, offsetOpnd, nullptr); - } - memOpnd->SetStackMem(true); +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType, + AArch64reg baseRegno) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize, baseRegno); RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); - MOperator mOp = PickLdInsn(bitlen, primType); + MOperator mOp = PickLdInsn(byteSize * kBitsPerByte, primType); Insn &load = GetCG()->BuildInstruction(mOp, *result, *memOpnd); GetCurBB()->AppendInsn(load); return result; } +RegOperand *AArch64CGFunc::LmbcStructReturnLoad(int32 offset) { + RegOperand *result = nullptr; + MIRFunction &func = GetFunction(); + CHECK_FATAL(func.IsReturnStruct(), "LmbcStructReturnLoad: not struct return"); + MIRType *ty = func.GetReturnType(); + uint32 sz = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(static_cast(ty), fpSize); + if (numFpRegs > 0) { + PrimType pType = (fpSize <= k4ByteSize) ? PTY_f32 : PTY_f64; + for (int32 i = (numFpRegs - kOneRegister); i > 0; --i) { + result = GenLmbcParamLoad(offset + (i * fpSize), fpSize, kRegTyFloat, pType); + AArch64reg regNo = static_cast(V0 + i); + RegOperand *reg = &GetOrCreatePhysicalRegisterOperand(regNo, fpSize * kBitsPerByte, kRegTyFloat); + SelectCopy(*reg, pType, *result, pType); + Insn &pseudo = GetCG()->BuildInstruction(MOP_pseudo_ret_float, *reg); + GetCurBB()->AppendInsn(pseudo); + } + result = GenLmbcParamLoad(offset, fpSize, kRegTyFloat, pType); + } else if (sz <= k4ByteSize) { + result = GenLmbcParamLoad(offset, k4ByteSize, kRegTyInt, PTY_u32); + } else if (sz <= k8ByteSize) { + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } else if (sz <= k16ByteSize) { + result = GenLmbcParamLoad(offset + k8ByteSize, k8ByteSize, kRegTyInt, PTY_i64); + RegOperand *r1 = &GetOrCreatePhysicalRegisterOperand(R1, k8ByteSize * kBitsPerByte, kRegTyInt); + SelectCopy(*r1, PTY_i64, *result, PTY_i64); + Insn &pseudo = GetCG()->BuildInstruction(MOP_pseudo_ret_int, *r1); + GetCurBB()->AppendInsn(pseudo); + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } + return result; +} + Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { int32 offset = ireadoff.GetOffset(); PrimType primType = ireadoff.GetPrimType(); @@ -2948,19 +3215,32 @@ Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode if (offset >= 0) { LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(offset); if (info->GetPrimType() == PTY_agg) { - result = GenLmbcParamLoad(offset, bytelen, regty, primType); + if (info->IsOnStack()) { + result = GenLmbcParamLoad(info->GetOnStackOffset(), GetPrimTypeSize(PTY_a64), kRegTyInt, PTY_a64); + regno_t baseRegno = result->GetRegisterNumber(); + result = GenLmbcParamLoad(offset - info->GetOffset(), bytelen, regty, primType, (AArch64reg)baseRegno); + } else if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } } else { CHECK_FATAL(primType == info->GetPrimType(), "Incorrect primtype"); CHECK_FATAL(offset == info->GetOffset(), "Incorrect offset"); - if (info->GetRegNO() == 0) { - /* TODO : follow lmbc sp offset for now */ + if (info->GetRegNO() == 0 || info->HasRegassign() == false) { result = GenLmbcParamLoad(offset, bytelen, regty, primType); } else { result = &GetOrCreatePhysicalRegisterOperand((AArch64reg)(info->GetRegNO()), bitlen, regty); } } } else { - result = GenLmbcParamLoad(offset, bytelen, regty, primType); + if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } } return result; } @@ -5756,6 +6036,9 @@ Operand *AArch64CGFunc::SelectAlloca(UnaryNode &node, Operand &opnd0) { if (!CGOptions::IsArm64ilp32()) { ASSERT((node.GetPrimType() == PTY_a64), "wrong type"); } + if (GetCG()->IsLmbc()) { + SetHasVLAOrAlloca(true); + } PrimType stype = node.Opnd(0)->GetPrimType(); Operand *resOpnd = &opnd0; if (GetPrimTypeBitSize(stype) < GetPrimTypeBitSize(PTY_u64)) { @@ -5771,10 +6054,13 @@ Operand *AArch64CGFunc::SelectAlloca(UnaryNode &node, Operand &opnd0) { SelectShift(aliOp, aliOp, shifOpnd, kShiftLeft, PTY_u64); Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); - int64 argsToStkpassSize = GetMemlayout()->SizeOfArgsToStackPass(); - if (argsToStkpassSize > 0) { + int64 allocaOffset = GetMemlayout()->SizeOfArgsToStackPass(); + if (GetCG()->IsLmbc()) { + allocaOffset -= kDivide2 * k8ByteSize; + } + if (allocaOffset > 0) { RegOperand &resallo = CreateRegisterOperandOfType(PTY_u64); - SelectAdd(resallo, spOpnd, CreateImmOperand(argsToStkpassSize, k64BitSize, true), PTY_u64); + SelectAdd(resallo, spOpnd, CreateImmOperand(allocaOffset, k64BitSize, true), PTY_u64); return &resallo; } else { return &SelectCopy(spOpnd, PTY_u64, PTY_u64); @@ -6216,6 +6502,15 @@ void AArch64CGFunc::AssignLmbcFormalParams() { param->SetRegNO(0); } else { param->SetRegNO(intReg); + if (param->HasRegassign() == false) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(offset, bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } intReg++; } } else if (IsPrimitiveFloat(primType)) { @@ -6223,6 +6518,15 @@ void AArch64CGFunc::AssignLmbcFormalParams() { param->SetRegNO(0); } else { param->SetRegNO(fpReg); + if (param->HasRegassign() == false) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(offset, bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(fpReg), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetCG()->BuildInstruction(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } fpReg++; } } else if (primType == PTY_agg) { @@ -6241,6 +6545,14 @@ void AArch64CGFunc::AssignLmbcFormalParams() { } else { param->SetRegNO(intReg); param->SetIsOnStack(); + param->SetOnStackOffset((intReg - R0 + fpReg - V0) * k8ByteSize); + uint32 bytelen = GetPrimTypeSize(PTY_a64); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(param->GetOnStackOffset(), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, PTY_a64); + Insn &store = GetCG()->BuildInstruction(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); intReg++; } } else if (param->GetSize() <= k8ByteSize) { @@ -6286,7 +6598,6 @@ void AArch64CGFunc::AssignLmbcFormalParams() { Operand *memOpd = &CreateMemOpnd(RFP, offset + (i * rSize), rSize); GetCurBB()->AppendInsn(GetCG()->BuildInstruction( PickStInsn(rSize * kBitsPerByte, pType), dest, *memOpd)); - param->SetIsOnStack(); } } } else { @@ -6295,6 +6606,22 @@ void AArch64CGFunc::AssignLmbcFormalParams() { } } +void AArch64CGFunc::LmbcGenSaveSpForAlloca() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return; + } + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + RegOperand &spSaveOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, kSizeOfPtr)); + Insn &save = GetCG()->BuildInstruction(MOP_xmovrr, spSaveOpnd, spOpnd); + GetFirstBB()->AppendInsn(save); + //save.SetFrameDef(true); + for (auto *retBB : GetExitBBsVec()) { + Insn &restore = GetCG()->BuildInstruction(MOP_xmovrr, spOpnd, spSaveOpnd); + retBB->AppendInsn(restore); + restore.SetFrameDef(true); + } +} + /* if offset < 0, allocation; otherwise, deallocation */ MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, int32 size) { MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); @@ -7579,6 +7906,10 @@ size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { CallNode &callNode = static_cast(naryNode); MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); TyIdx retIdx = callFunc->GetReturnTyIdx(); + if (callFunc->IsFirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[0].formalTyIdx); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); if ((retSize == 0) && callFunc->IsReturnStruct()) { TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); @@ -7595,6 +7926,14 @@ size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { return GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()); } } + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode &icallProto = static_cast(naryNode); + MIRFuncType *funcTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallProto.GetRetTyIdx())); + if (funcTy->FirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTy->GetNthParamType(0)); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + return GetBecommon().GetTypeSize(funcTy->GetRetTyIdx()); } return 0; } @@ -7702,7 +8041,7 @@ void AArch64CGFunc::SelectParmListPreprocess(const StmtNode &naryNode, size_t st */ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) { size_t i = 0; - if ((naryNode.GetOpCode() == OP_icall) || isCallNative) { + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) { i++; } std::set specialArgs; @@ -7729,8 +8068,11 @@ void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bo BaseNode *argExpr = naryNode.Opnd(i); PrimType primType = argExpr->GetPrimType(); ASSERT(primType != PTY_void, "primType should not be void"); - auto calleePuIdx = static_cast(naryNode).GetPUIdx(); - auto *callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + MIRFunction *callee = nullptr; + if (dynamic_cast(&naryNode) != nullptr) { + auto calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + } if (callee != nullptr && pnum < callee->GetFormalCount() && callee->GetFormal(pnum) != nullptr) { is64x1vec = callee->GetFormal(pnum)->GetAttr(ATTR_oneelem_simd); } @@ -8198,32 +8540,42 @@ void AArch64CGFunc::LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn) CHECK_FATAL(GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc, "To be called for Lmbc model only"); MapleVector &args = GetLmbcCallArgs(); MapleVector &types = GetLmbcCallArgTypes(); - MapleVector &offsets = GetLmbcCallArgOffsets(); MapleVector ®s = GetLmbcCallArgNumOfRegs(); int iCnt = 0; int fCnt = 0; - for (int i = isArgReturn ? 1 : 0; i < args.size(); i++) { + uint32 stkOffset = LmbcTotalRegsUsed() * k8ByteSize; + for (int i = isArgReturn ? 1 : 0; i < args.size(); ) { RegType ty = args[i]->GetRegisterType(); PrimType pTy = types[i]; AArch64reg reg; if (args[i]->IsOfIntClass() && (iCnt + regs[i]) <= k8ByteSize) { - reg = static_cast(R0 + iCnt++); - RegOperand *res = &GetOrCreatePhysicalRegisterOperand( - reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); - SelectCopy(*res, pTy, *args[i], pTy); - srcOpnds->PushOpnd(*res); + for (int regCnt = 0; regCnt < regs[i]; ++regCnt) { + reg = static_cast(R0 + iCnt + regCnt); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i + regCnt], pTy); + srcOpnds->PushOpnd(*res); + } + iCnt += regs[i]; + i += regs[i]; } else if (!args[i]->IsOfIntClass() && (fCnt + regs[i]) <= k8ByteSize) { - reg = static_cast(V0 + fCnt++); - RegOperand *res = &GetOrCreatePhysicalRegisterOperand( - reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); - SelectCopy(*res, pTy, *args[i], pTy); - srcOpnds->PushOpnd(*res); + for (int regCnt = 0; regCnt < regs[i]; ++regCnt) { + reg = static_cast(V0 + fCnt + regCnt); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand( + reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i + regCnt], pTy); + srcOpnds->PushOpnd(*res); + } + fCnt += regs[i]; + i += regs[i]; } else { int32 pSize = GetPrimTypeSize(pTy); - Operand &memOpd = CreateMemOpnd(RSP, offsets[i], pSize); + Operand &memOpd = CreateStkTopOpnd(stkOffset, pSize * kBitsPerByte); GetCurBB()->AppendInsn( GetCG()->BuildInstruction(PickStInsn(pSize * kBitsPerByte, pTy), *args[i], memOpd)); + stkOffset += k8ByteSize; + i++; } } /* Load x8 if 1st arg is for agg return */ @@ -8252,7 +8604,18 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { - LmbcSelectParmList(srcOpnds, fn->IsFirstArgReturn()); + SetLmbcCallReturnType(nullptr); + bool largeStructRet = false; + if (fn->IsFirstArgReturn()) { + MIRPtrType *ptrTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[0].formalTyIdx)); + MIRType *sTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTy->GetPointedTyIdx()); + largeStructRet = sTy->GetSize() > k16ByteSize; + SetLmbcCallReturnType(sTy); + } else { + MIRType *ty = fn->GetReturnType(); + SetLmbcCallReturnType(ty); + } + LmbcSelectParmList(srcOpnds, largeStructRet); } bool callNative = false; if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || @@ -8329,7 +8692,17 @@ void AArch64CGFunc::SelectCall(CallNode &callNode) { void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { - LmbcSelectParmList(srcOpnds, false /*fType->GetRetAttrs().GetAttr(ATTR_firstarg_return)*/); + /* icallproto */ + MIRType *retTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); + MIRFuncType *funcTy = static_cast(retTy); + bool largeStructRet = false; + if (funcTy->FirstArgReturn()) { + TyIdx idx = funcTy->GetNthParamType(0); + MIRPtrType *ptrTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(idx)); + MIRType *sTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTy->GetPointedTyIdx()); + largeStructRet = sTy->GetSize() > k16ByteSize; + } + LmbcSelectParmList(srcOpnds, largeStructRet); } else { SelectParmList(icallNode, *srcOpnds); } @@ -8343,7 +8716,7 @@ void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { RegOperand *regOpnd = static_cast(fptrOpnd); Insn &callInsn = GetCG()->BuildInstruction(MOP_xblr, *regOpnd, *srcOpnds); - MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); + MIRType *retType = icallNode.GetCallReturnType(); if (retType != nullptr) { callInsn.SetRetSize(static_cast(retType->GetSize())); callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); @@ -8456,9 +8829,18 @@ RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, Pr case kSregFp: reg = RFP; break; - case kSregGp: - reg = RFP; - break; + case kSregGp: { + MIRSymbol *sym = GetCG()->GetGP(); + if (sym == nullptr) { + sym = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + std::string strBuf("__file__local__GP"); + sym->SetNameStrIdx(GetMirModule().GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + GetCG()->SetGP(sym); + } + RegOperand &result = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAddrof(result, CreateStImmOperand(*sym, 0, 0)); + return result; + } case kSregThrownval: { /* uses x0 == R0 */ ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); if (Globals::GetInstance()->GetOptimLevel() == 0) { @@ -9194,6 +9576,9 @@ int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &sa) { int32 baseOffset = symAlloc->GetOffset(); return baseOffset + sizeofFplr; } else if (sgKind == kMsSpillReg) { + if (GetCG()->IsLmbc()) { + return symAlloc->GetOffset() + memLayout->SizeOfArgsToStackPass(); + } int32 baseOffset = symAlloc->GetOffset() + memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfLocals() + memLayout->GetSizeOfRefLocals(); return baseOffset + sizeofFplr; @@ -9768,16 +10153,21 @@ void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); /* __stack */ - ImmOperand *offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + ImmOperand *offsOpnd; + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + } else { + offsOpnd = &CreateImmOperand(0, k64BitSize, true); + } ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); RegOperand &vReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(LOWERED_PTR_TYPE))); if (stkSize) { SelectAdd(vReg, *offsOpnd, *offsOpnd2, LOWERED_PTR_TYPE); SelectAdd(vReg, stkOpnd, vReg, LOWERED_PTR_TYPE); } else { - SelectAdd(vReg, stkOpnd, *offsOpnd, LOWERED_PTR_TYPE); + SelectAdd(vReg, stkOpnd, *offsOpnd, LOWERED_PTR_TYPE); /* stack pointer */ } - OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); + OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); /* va_list ptr */ /* mem operand in va_list struct (lhs) */ MemOperand *strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, static_cast(nullptr)); @@ -9852,13 +10242,19 @@ void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { AArch64CallConvImpl parmLocator(GetBecommon()); CCLocInfo pLoc; uint32 stkSize = 0; + uint32 inReg = 0; for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetFunction().GetNthParamTyIdx(i)); parmLocator.LocateNextParm(*ty, pLoc); if (pLoc.reg0 == kRinvalid) { /* on stack */ stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } else { + inReg++; } } + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stkSize += (inReg * k8ByteSize); + } if (CGOptions::IsArm64ilp32()) { stkSize = static_cast(RoundUp(stkSize, k8ByteSize)); } else { diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp index 5f2b7f37bab28ad4809f60428662ad62468c5096..60b7462f220c8eb9bf02e62da59f07083bc2118b 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -134,7 +134,7 @@ void AArch64MemLayout::LayoutVarargParams() { if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { for (uint32 i = 0; i < func->GetFormalCount(); i++) { if (i == 0) { - if (func->IsReturnStruct()) { + if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) { TyIdx tyIdx = func->GetFuncRetStructTyIdx(); if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { continue; @@ -198,7 +198,7 @@ void AArch64MemLayout::LayoutFormalParams() { * outparmsize - portion of frame size of current function used by call parameters */ segArgsStkPassed.SetSize(mirFunction->GetOutParmSize()); - segArgsRegPassed.SetSize(mirFunction->GetOutParmSize() + kTwoRegister * k8ByteSize); + segArgsRegPassed.SetSize(mirFunction->GetOutParmSize()); return; } @@ -210,7 +210,7 @@ void AArch64MemLayout::LayoutFormalParams() { AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); SetSymAllocInfo(stIndex, *symLoc); if (i == 0) { - if (mirFunction->IsReturnStruct()) { + if (mirFunction->IsFirstArgReturn()) { symLoc->SetMemSegment(GetSegArgsRegPassed()); symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); @@ -224,13 +224,13 @@ void AArch64MemLayout::LayoutFormalParams() { continue; } } - MIRType *ty = mirFunction->GetNthParamType(i); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); uint32 ptyIdx = ty->GetTypeIndex(); parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); if (ploc.reg0 != kRinvalid) { /* register */ symLoc->SetRegisters(static_cast(ploc.reg0), static_cast(ploc.reg1), static_cast(ploc.reg2), static_cast(ploc.reg3)); - if (mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + if (mirFunction->GetFormalDefVec()[i].formalAttrs.GetAttr(ATTR_localrefvar)) { symLoc->SetMemSegment(segRefLocals); SetSegmentSize(*symLoc, segRefLocals, ptyIdx); } else if (!sym->IsPreg()) { @@ -272,7 +272,7 @@ void AArch64MemLayout::LayoutFormalParams() { } else { segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), kSizeOfPtr))); } - if (mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + if (mirFunction->GetFormalDefVec()[i].formalAttrs.GetAttr(ATTR_localrefvar)) { SetLocalRegLocInfo(sym->GetStIdx(), *symLoc); AArch64SymbolAlloc *symLoc1 = memAllocator->GetMemPool()->New(); symLoc1->SetMemSegment(segRefLocals); @@ -287,7 +287,7 @@ void AArch64MemLayout::LayoutFormalParams() { } void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) { - if (be.GetMIRModule().GetFlavor() == kFlavorLmbc && mirFunction->GetFormalCount() == 0) { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize()); return; } @@ -372,7 +372,7 @@ void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); } if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { - segArgsToStkPass.SetSize(mirFunction->GetOutParmSize()); + segArgsToStkPass.SetSize(mirFunction->GetOutParmSize() + kDivide2 * k8ByteSize); } else { segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); } @@ -423,7 +423,7 @@ void AArch64MemLayout::LayoutActualParams() { * variables get assigned their respecitve storage, i.e. * CallFrameSize (discounting callee-saved and FP/LR) is known. */ - MIRType *ty = mirFunction->GetNthParamType(i); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); uint32 ptyIdx = ty->GetTypeIndex(); static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte); } @@ -440,7 +440,7 @@ void AArch64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmSta */ if (CGOptions::IsArm64ilp32()) { segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), k8ByteSize)); - /* we do need this as SP has to be aligned at a 16-bytes bounardy */ + /* we do need this as SP has to be aligned at a 16-bytes bounardy */ segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize + k8ByteSize)); } else { segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), kSizeOfPtr)); @@ -527,11 +527,13 @@ uint64 AArch64MemLayout::StackFrameSize() const { uint64 total = segArgsRegPassed.GetSize() + static_cast(cgFunc)->SizeOfCalleeSaved() + GetSizeOfRefLocals() + locals().GetSize() + GetSizeOfSpillReg(); - if (GetSizeOfGRSaveArea() > 0) { - total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); - } - if (GetSizeOfVRSaveArea() > 0) { - total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + if (GetSizeOfGRSaveArea() > 0) { + total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (GetSizeOfVRSaveArea() > 0) { + total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } } /* diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp index eaa5726d435a939ffd0706b8ec4e0b37ede60e38..7ef719064537ec0186f231bc2b48cc63eb79655a 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -50,7 +50,7 @@ void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGF if (memBaseReg->GetRegisterNumber() == RFP) { RegOperand &newBaseOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( - MemOperand::kAddrModeBOi, memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), + memOpnd.GetAddrMode(), memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), memOpnd.GetOffsetImmediate(), memOpnd.GetSymbol()); insn.SetOperand(i, newMemOpnd); stackBaseOpnd = true; diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp index 66b7c3f44c344a9c4da734900f8ecec45fd61184..61b48c0bfc82176b0a3f040e8317063fb8ebb434 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -1117,6 +1117,9 @@ void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg ipoint = cgFunc.GetCurBB()->GetLastInsn(); cfiOffset = stackFrameSize; (void)InsertCFIDefCfaOffset(cfiOffset, *ipoint); + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + argsToStkPassSize -= (kDivide2 * k8ByteSize); + } ipoint = &CreateAndAppendInstructionForAllocateCallFrame(argsToStkPassSize, reg0, reg1, rty); CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); cfiOffset = GetOffsetFromCFA(); @@ -1250,9 +1253,16 @@ void AArch64GenProEpilog::GeneratePushRegs() { CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; - auto offset = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - - (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* for FP/LR */) - - cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast(memLayout->RealStackFrameSize() - + aarchCGFunc.SizeOfCalleeSaved() - memLayout->GetSizeOfLocals()); + } else { + offset = static_cast(memLayout->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* for FP/LR */) - + memLayout->SizeOfArgsToStackPass()); + } if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { offset -= static_cast(kAarch64StackPtrAlignment); @@ -1312,10 +1322,19 @@ void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() { size = kSizeOfPtr; } uint32 dataSizeBits = size * kBitsPerByte; - uint32 offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()); - if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { - offset += size; /* End of area should be aligned. Hole between VR and GR area */ + uint32 offset; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()); /* SP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + } else { + offset = -memlayout->GetSizeOfGRSaveArea(); /* FP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset -= size; + } } + uint32 grSize = -offset; uint32 start_regno = k8BitSize - (memlayout->GetSizeOfGRSaveArea() / size); ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for GR Save Area"); for (uint32 i = start_regno + static_cast(R0); i < static_cast(R8); i++) { @@ -1325,16 +1344,25 @@ void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() { tmpOffset += 8U - (dataSizeBits >> 3); } } - Operand &stackloc = aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } RegOperand ® = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); Insn &inst = - currCG->BuildInstruction(aarchCGFunc.PickStInsn(dataSizeBits, PTY_i64), reg, stackloc); + currCG->BuildInstruction(aarchCGFunc.PickStInsn(dataSizeBits, PTY_i64), reg, *stackLoc); cgFunc.GetCurBB()->AppendInsn(inst); offset += size; } if (!CGOptions::UseGeneralRegOnly()) { - offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()); + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()); + } else { + offset = -(memlayout->GetSizeOfVRSaveArea() + grSize); + } start_regno = k8BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for VR Save Area"); for (uint32 i = start_regno + static_cast(V0); i < static_cast(V8); i++) { @@ -1344,11 +1372,16 @@ void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() { tmpOffset += 16U - (dataSizeBits >> 3); } } - Operand &stackloc = aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } RegOperand ® = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); Insn &inst = - currCG->BuildInstruction(aarchCGFunc.PickStInsn(dataSizeBits, PTY_f64), reg, stackloc); + currCG->BuildInstruction(aarchCGFunc.PickStInsn(dataSizeBits, PTY_f64), reg, *stackLoc); cgFunc.GetCurBB()->AppendInsn(inst); offset += (size * k2BitSize); } @@ -1648,11 +1681,17 @@ void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg r * ldp/stp's imm should be within -512 and 504; * if ldp's imm > 504, we fall back to the ldp-add version */ - if (cgFunc.HasVLAOrAlloca() || argsToStkPassSize == 0) { - stackFrameSize -= argsToStkPassSize; - if (stackFrameSize > kStpLdpImm64UpperBound) { + bool isLmbc = (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); + if (cgFunc.HasVLAOrAlloca() || argsToStkPassSize == 0 || isLmbc) { + int lmbcOffset = 0; + if (isLmbc == false) { + stackFrameSize -= argsToStkPassSize; + } else { + lmbcOffset = argsToStkPassSize - (kDivide2 * k8ByteSize); + } + if (stackFrameSize > kStpLdpImm64UpperBound || isLmbc) { Operand *o2; - o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, kSizeOfPtr * kBitsPerByte); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), kSizeOfPtr * kBitsPerByte); Insn &deallocInsn = currCG->BuildInstruction(mOp, o0, o1, *o2); cgFunc.GetCurBB()->AppendInsn(deallocInsn); if (cgFunc.GenCfi()) { @@ -1727,9 +1766,16 @@ void AArch64GenProEpilog::GeneratePopRegs() { CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); ++it; - int32 offset = static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - - (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* for FP/LR */) - - cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast(memLayout->RealStackFrameSize() - + aarchCGFunc.SizeOfCalleeSaved() - memLayout->GetSizeOfLocals()); + } else { + offset = static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* for FP/LR */) - + memLayout->SizeOfArgsToStackPass(); + } if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { offset -= static_cast(kAarch64StackPtrAlignment); @@ -1820,7 +1866,7 @@ void AArch64GenProEpilog::GenerateEpilog(BB &bb) { Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); - if (cgFunc.HasVLAOrAlloca()) { + if (cgFunc.HasVLAOrAlloca() && cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { aarchCGFunc.SelectCopy(spOpnd, PTY_u64, fpOpnd, PTY_u64); } diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp index 433cc85754e9cacd716a2b24c8bac1ef5f2ef6a7..5ca154b1aa818855afe920c888e4846ec765b569 100644 --- a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -161,8 +161,7 @@ void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { Insn &retInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ret_int, regOpnd); bb.AppendInsn(retInsn); pseudoInsns.emplace_back(&retInsn); - } else { - ASSERT(regNO == V0, "CG internal error. Return value should be R0 or V0."); + } else if (regNO == V0) { RegOperand ®Opnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyFloat); Insn &retInsn = cgFunc->GetCG()->BuildInstruction(MOP_pseudo_ret_float, regOpnd); diff --git a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp index be5d446216f24b6c7a4c91aa5239ba08af4de2d0..201335c039b3bf81c164a314431069b8809d62e6 100644 --- a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp +++ b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp @@ -160,6 +160,7 @@ void RecursiveMarkUsedStaticSymbol(const BaseNode *baseNode) { break; } case OP_addrof: + case OP_addrofoff: case OP_dread: { const AddrofNode *dreadNode = static_cast(baseNode); MarkUsedStaticSymbol(dreadNode->GetStIdx()); diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp index 733660e667ce3a803f264d3e188d494dbf2e1728..98afdfd9b0d7f91223ce55defcf88c8f6fab6afb 100644 --- a/src/mapleall/maple_be/src/cg/cgfunc.cpp +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -1146,7 +1146,11 @@ void HandleReturn(StmtNode &stmt, CGFunc &cgFunc) { ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); Operand *opnd = nullptr; if (retNode.NumOpnds() != 0) { - opnd = cgFunc.HandleExpr(retNode, *retNode.Opnd(0)); + if (!cgFunc.GetFunction().StructReturnedInRegs()) { + opnd = cgFunc.HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc.SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } } cgFunc.SelectReturn(opnd); cgFunc.SetCurBBKind(BB::kBBReturn); @@ -1562,14 +1566,15 @@ void CGFunc::CreateLmbcFormalParamInfo() { uint32 offset; uint32 typeSize; MIRFunction &func = GetFunction(); - if (func.GetParamSize() > 0) { + if (func.GetFormalCount() > 0) { + /* Whenever lmbc cannot delete call type info, the prototype is available */ int stackOffset = 0; - for (size_t idx = 0; idx < func.GetParamSize(); ++idx) { + for (size_t idx = 0; idx < func.GetFormalCount(); ++idx) { MIRSymbol *sym = func.GetFormal(idx); MIRType *type; TyIdx tyIdx; if (sym) { - tyIdx = func.GetNthParamTyIdx(idx); + tyIdx = func.GetFormalDefVec()[idx].formalTyIdx; type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); } else { FormalDef vec = const_cast(GetBecommon().GetMIRModule().CurFunction())->GetFormalDefAt(idx); @@ -1616,6 +1621,9 @@ void CGFunc::CreateLmbcFormalParamInfo() { } IreadFPoffNode *ireadNode = static_cast(operand); primType = ireadNode->GetPrimType(); + if (ireadNode->GetOffset() < 0) { + continue; + } offset = ireadNode->GetOffset(); typeSize = GetPrimTypeSize(primType); CHECK_FATAL((offset % k8ByteSize) == 0, ""); /* scalar only, no struct for now */ @@ -1627,6 +1635,31 @@ void CGFunc::CreateLmbcFormalParamInfo() { [] (LmbcFormalParamInfo *x, LmbcFormalParamInfo *y) { return x->GetOffset() < y->GetOffset(); } ); + + /* When a scalar param address is taken, its regassign is not in the 1st block */ + for (StmtNode *stmt = func.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + if (ireadNode->GetOffset() < 0) { + continue; + } + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(ireadNode->GetOffset()); + info->SetHasRegassign(); + } + AssignLmbcFormalParams(); } @@ -2006,6 +2039,7 @@ void CGFunc::HandleFunction() { ASSERT(exitBBVec.size() <= 1, "there are more than one BB_return in func"); } ProcessExitBBVec(); + LmbcGenSaveSpForAlloca(); if (func.IsJava()) { GenerateCleanupCodeForExtEpilog(*cleanupBB); diff --git a/src/mapleall/maple_be/src/cg/emit.cpp b/src/mapleall/maple_be/src/cg/emit.cpp index 4e09e0969539682ab8154b6545071f99fa44ce93..45c51f1710f70202f46af9f7b312e10f67e0a186 100644 --- a/src/mapleall/maple_be/src/cg/emit.cpp +++ b/src/mapleall/maple_be/src/cg/emit.cpp @@ -2177,6 +2177,11 @@ void Emitter::EmitGlobalVar(const MIRSymbol &globalVar) { } void Emitter::EmitGlobalVars(std::vector> &globalVars) { + if (GetCG()->IsLmbc() && GetCG()->GetGP() != nullptr) { + Emit(asmInfo->GetLocal()).Emit("\t").Emit(GetCG()->GetGP()->GetName()).Emit("\n"); + Emit(asmInfo->GetComm()).Emit("\t").Emit(GetCG()->GetGP()->GetName()); + Emit(", ").Emit(GetCG()->GetMIRModule()->GetGlobalMemSize()).Emit(", ").Emit("8\n"); + } /* load globalVars profile */ if (globalVars.empty()) { return; diff --git a/src/mapleall/maple_be/src/cg/memlayout.cpp b/src/mapleall/maple_be/src/cg/memlayout.cpp index 57e45e8a2d78f3e0bcb06372f088dcff6dd0f1ad..1b636207c67336aa08fb752251ed2eeff7cc952c 100644 --- a/src/mapleall/maple_be/src/cg/memlayout.cpp +++ b/src/mapleall/maple_be/src/cg/memlayout.cpp @@ -38,7 +38,7 @@ uint32 MemLayout::FindLargestActualArea(int32 &aggCopySize) { uint32 maxCopyStackSize = 0; // Size of aggregate param stack copy requirement for (; stmt != nullptr; stmt = stmt->GetNext()) { Opcode opCode = stmt->GetOpCode(); - if (opCode < OP_call || opCode > OP_xintrinsiccallassigned) { + if ((opCode < OP_call || opCode > OP_xintrinsiccallassigned) && opCode != OP_icallproto) { continue; } if (opCode == OP_intrinsiccallwithtypeassigned || opCode == OP_intrinsiccallwithtype || @@ -54,9 +54,9 @@ uint32 MemLayout::FindLargestActualArea(int32 &aggCopySize) { * if the following check fails, most likely dex has invoke-custom etc * that is not supported yet */ - DCHECK((opCode == OP_call || opCode == OP_icall), "Not lowered to call or icall?"); + DCHECK((opCode == OP_call || opCode == OP_icall || opCode == OP_icallproto), "Not lowered to call or icall?"); int32 copySize; - uint32 size = ComputeStackSpaceRequirementForCall(*stmt, copySize, opCode == OP_icall); + uint32 size = ComputeStackSpaceRequirementForCall(*stmt, copySize, opCode == OP_icall || opCode == OP_icallproto); if (size > maxParamStackSize) { maxParamStackSize = size; } diff --git a/src/mapleall/maple_ir/include/bin_mpl_export.h b/src/mapleall/maple_ir/include/bin_mpl_export.h index 09952cb8c7ed32b36a9a88037ee0c79fa5be66df..39294da6940cfbe8f1621bc8a1c8089df45bc26f 100644 --- a/src/mapleall/maple_ir/include/bin_mpl_export.h +++ b/src/mapleall/maple_ir/include/bin_mpl_export.h @@ -68,7 +68,7 @@ enum : uint8 { kBinEaStart = 42, kBinNodeBlock = 43, kBinOpStatement = 44, - kBinOpExpression = 45, +//kBinOpExpression = 45, kBinReturnvals = 46, kBinTypeTabStart = 47, kBinSymStart = 48, @@ -81,9 +81,9 @@ enum : uint8 { kBinTypenameStart = 55, kBinHeaderStart = 56, kBinAliasMapStart = 57, - kBinKindTypeViaTypename = 58, - kBinKindSymViaSymname = 59, - kBinKindFuncViaSymname = 60, +//kBinKindTypeViaTypename = 58, +//kBinKindSymViaSymname = 59, +//kBinKindFuncViaSymname = 60, kBinFunctionBodyStart = 61, kBinFormalWordsTypeTagged = 62, kBinFormalWordsRefCounted = 63, @@ -103,8 +103,7 @@ class BinaryMplExport { void Export(const std::string &fname, std::unordered_set *dumpFuncSet); void WriteNum(int64 x); void Write(uint8 b); - void OutputType(TyIdx tyIdx, bool canUseTypename); - void OutputTypeViaTypeName(TyIdx tidx) { OutputType(tidx, true); } + void OutputType(TyIdx tyIdx); void WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet); void OutputConst(MIRConst *c); void OutputConstBase(const MIRConst &c); @@ -133,12 +132,11 @@ class BinaryMplExport { void OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString); void OutputFuncIdInfo(MIRFunction *func); void OutputLocalSymbol(MIRSymbol *sym); - void OutputLocalSymTab(const MIRFunction *func); void OutputPregTab(const MIRFunction *func); void OutputLabelTab(const MIRFunction *func); void OutputLocalTypeNameTab(const MIRTypeNameTable *tyNameTab); void OutputFormalsStIdx(MIRFunction *func); - void OutputFuncViaSymName(PUIdx puIdx); + void OutputFuncViaSym(PUIdx puIdx); void OutputExpression(BaseNode *e); void OutputBaseNode(const BaseNode *b); void OutputReturnValues(const CallReturnVector *retv); @@ -182,6 +180,7 @@ class BinaryMplExport { void ExpandFourBuffSize(); MIRModule &mod; + MIRFunction *curFunc = nullptr; size_t bufI = 0; std::vector buf; std::unordered_map gStrMark; @@ -190,6 +189,7 @@ class BinaryMplExport { std::unordered_map uStrMark; std::unordered_map symMark; std::unordered_map typMark; + std::unordered_map localSymMark; friend class UpdateMplt; std::unordered_map callInfoMark; std::map *func2SEMap = nullptr; diff --git a/src/mapleall/maple_ir/include/bin_mpl_import.h b/src/mapleall/maple_ir/include/bin_mpl_import.h index 36976548723633a0d5f415deadff511789c54716..515a66c17a0d207fa10e6ce7262775cb86035b5b 100644 --- a/src/mapleall/maple_ir/include/bin_mpl_import.h +++ b/src/mapleall/maple_ir/include/bin_mpl_import.h @@ -126,15 +126,14 @@ class BinaryMplImport { void ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString); void ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab); void ImportFuncIdInfo(MIRFunction *func); - void ImportLocalSymbol(MIRFunction *func); - void ImportLocalSymTab(MIRFunction *func); + MIRSymbol *ImportLocalSymbol(MIRFunction *func); void ImportPregTab(const MIRFunction *func); void ImportLabelTab(MIRFunction *func); void ImportFormalsStIdx(MIRFunction *func); void ImportAliasMap(MIRFunction *func); void ImportSrcPos(SrcPosition &pos); void ImportBaseNode(Opcode &o, PrimType &typ); - PUIdx ImportFuncViaSymName(); + PUIdx ImportFuncViaSym(MIRFunction *func); BaseNode *ImportExpression(MIRFunction *func); void ImportReturnValues(MIRFunction *func, CallReturnVector *retv); BlockNode *ImportBlockNode(MIRFunction *fn); @@ -162,6 +161,7 @@ class BinaryMplImport { std::vector typTab; std::vector funcTab; std::vector symTab; + std::vector localSymTab; std::vector callInfoTab; std::vector eaCgTab; std::vector methodSymbols; diff --git a/src/mapleall/maple_ir/include/ir_safe_cast_traits.def b/src/mapleall/maple_ir/include/ir_safe_cast_traits.def index 1439b49d572f3adfbf906aacd1d3f323e5c6ecee..14ed1a367b4ea5f8b738103b6412af97f7ae2988 100644 --- a/src/mapleall/maple_ir/include/ir_safe_cast_traits.def +++ b/src/mapleall/maple_ir/include/ir_safe_cast_traits.def @@ -224,7 +224,9 @@ REGISTER_SAFE_CAST(CallNode, from.GetOpCode() == OP_call || from.GetOpCode() == OP_virtualicallassigned || instance_of(from)); REGISTER_SAFE_CAST(IcallNode, from.GetOpCode() == OP_icall || - from.GetOpCode() == OP_icallassigned); + from.GetOpCode() == OP_icallassigned || + from.GetOpCode() == OP_icallproto || + from.GetOpCode() == OP_icallprotoassigned); REGISTER_SAFE_CAST(IntrinsiccallNode, from.GetOpCode() == OP_intrinsiccall || from.GetOpCode() == OP_intrinsiccallwithtype || from.GetOpCode() == OP_xintrinsiccall || diff --git a/src/mapleall/maple_ir/include/mir_builder.h b/src/mapleall/maple_ir/include/mir_builder.h index ac3215724ed4eb1a5493131c835f4a42ff7fba9d..64f6a2723c813cd7534d3d8200e48e85472af279 100755 --- a/src/mapleall/maple_ir/include/mir_builder.h +++ b/src/mapleall/maple_ir/include/mir_builder.h @@ -262,6 +262,8 @@ class MIRBuilder { IcallNode *CreateStmtIcall(const MapleVector &args); IcallNode *CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret); + IcallNode *CreateStmtIcallproto(const MapleVector &args); + IcallNode *CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret); // For Call, VirtualCall, SuperclassCall, InterfaceCall IntrinsiccallNode *CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, TyIdx tyIdx = TyIdx()); diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h index 4f51ceaeca2c78889851711464548595671c4e33..5f8e399d218edacd45a35324e3787742a5ed1b54 100644 --- a/src/mapleall/maple_ir/include/mir_function.h +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -832,24 +832,24 @@ class MIRFunction { callTimes = times; } - uint16 GetFrameSize() const { + uint32 GetFrameSize() const { return frameSize; } - void SetFrameSize(uint16 size) { + void SetFrameSize(uint32 size) { frameSize = size; } - uint16 GetUpFormalSize() const { + uint32 GetUpFormalSize() const { return upFormalSize; } - void SetUpFormalSize(uint16 size) { + void SetUpFormalSize(uint32 size) { upFormalSize = size; } - uint16 GetOutParmSize() const { + uint32 GetOutParmSize() const { return outParmSize; } - void SetOutParmSize(uint16 size) { + void SetOutParmSize(uint32 size) { outParmSize = size; } @@ -1301,9 +1301,9 @@ class MIRFunction { bool isDirty = false; bool fromMpltInline = false; // Whether this function is imported from mplt_inline file or not. uint8_t layoutType = kLayoutUnused; - uint16 frameSize = 0; - uint16 upFormalSize = 0; - uint16 outParmSize = 0; + uint32 frameSize = 0; + uint32 upFormalSize = 0; + uint32 outParmSize = 0; uint16 moduleID = 0; uint32 funcSize = 0; // size of code in words uint32 tempCount = 0; diff --git a/src/mapleall/maple_ir/include/mir_lower.h b/src/mapleall/maple_ir/include/mir_lower.h index 332a6de89fd527ea48f34f5bc09ae829666a906f..be479f65a61f4e0b9eeb2172a2d1060001ccc664 100644 --- a/src/mapleall/maple_ir/include/mir_lower.h +++ b/src/mapleall/maple_ir/include/mir_lower.h @@ -86,6 +86,7 @@ class MIRLower { ForeachelemNode *ExpandArrayMrtForeachelemBlock(ForeachelemNode &node); BlockNode *ExpandArrayMrtBlock(BlockNode &block); void AddArrayMrtMpl(BaseNode &exp, BlockNode &newblk); + MIRFuncType *FuncTypeFromFuncPtrExpr(BaseNode *x); void SetLowerME() { lowerPhase |= kShiftLowerMe; } diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h index f782bdc03b6cb34e7d1513a0f3269bf81a5e4c05..de813522e654a449f7ee8da03de2358e82479cef 100755 --- a/src/mapleall/maple_ir/include/mir_nodes.h +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -3303,7 +3303,7 @@ class CallNode : public NaryStmtNode { CallReturnVector returnValues; }; -// icall and icallproto +// icall, icallassigned, icallproto and icallprotoassigned class IcallNode : public NaryStmtNode { public: IcallNode(MapleAllocator &allocator, Opcode o) diff --git a/src/mapleall/maple_ir/include/mir_parser.h b/src/mapleall/maple_ir/include/mir_parser.h index d05c844f310ba34bfb2ff7db1a80642ba41fa20b..3b9a688a58e7448c9d90e41f35093763b6376a12 100755 --- a/src/mapleall/maple_ir/include/mir_parser.h +++ b/src/mapleall/maple_ir/include/mir_parser.h @@ -129,6 +129,7 @@ class MIRParser { bool ParseStmtIcall(StmtNodePtr&); bool ParseStmtIcallassigned(StmtNodePtr&); bool ParseStmtIcallproto(StmtNodePtr&); + bool ParseStmtIcallprotoassigned(StmtNodePtr&); bool ParseStmtIntrinsiccall(StmtNodePtr&, bool isAssigned); bool ParseStmtIntrinsiccall(StmtNodePtr&); bool ParseStmtIntrinsiccallassigned(StmtNodePtr&); diff --git a/src/mapleall/maple_ir/include/mir_symbol.h b/src/mapleall/maple_ir/include/mir_symbol.h index 8d7f837cd47402bc44efa0ac914d38d486656881..3f5b03064c80a69381a2882d08d9968af01f521e 100644 --- a/src/mapleall/maple_ir/include/mir_symbol.h +++ b/src/mapleall/maple_ir/include/mir_symbol.h @@ -489,7 +489,7 @@ class MIRSymbol { return true; case kScPstatic: case kScFstatic: - return value.konst == nullptr; + return value.konst == nullptr && !hasPotentialAssignment; default: return false; } diff --git a/src/mapleall/maple_ir/include/mir_type.h b/src/mapleall/maple_ir/include/mir_type.h index d6d32d999b227f92f7db32a8823419030f646c6c..207d80babccd718ca6ae8c10704177d9179b9935 100644 --- a/src/mapleall/maple_ir/include/mir_type.h +++ b/src/mapleall/maple_ir/include/mir_type.h @@ -1928,11 +1928,19 @@ class MIRFuncType : public MIRType { } bool IsVarargs() const { - return isVarArgs; + return funcAttrs.GetAttr(FUNCATTR_varargs); } - void SetVarArgs(bool flag) { - isVarArgs = flag; + void SetVarArgs() { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + bool FirstArgReturn() const { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + void SetFirstArgReturn() { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); } const TypeAttrs &GetRetAttrs() const { @@ -1960,7 +1968,8 @@ class MIRFuncType : public MIRType { std::vector paramTypeList; std::vector paramAttrsList; TypeAttrs retAttrs; - bool isVarArgs = false; + public: + FuncAttrs funcAttrs; }; class MIRTypeByName : public MIRType { diff --git a/src/mapleall/maple_ir/include/opcode_info.h b/src/mapleall/maple_ir/include/opcode_info.h index 814f15b2f26ed804a09cafd19cdf9e61b7b0b9b2..5a82646b11d295cbece3c26f3b1a8c4c44c1f09c 100644 --- a/src/mapleall/maple_ir/include/opcode_info.h +++ b/src/mapleall/maple_ir/include/opcode_info.h @@ -117,6 +117,7 @@ class OpcodeTable { bool IsICall(Opcode o) const { ASSERT(o < OP_last, "invalid opcode"); return o == OP_icall || o == OP_icallassigned || + o == OP_icallproto || o == OP_icallprotoassigned || o == OP_virtualicall || o == OP_virtualicallassigned || o == OP_interfaceicall || o == OP_interfaceicallassigned; } diff --git a/src/mapleall/maple_ir/include/opcodes.def b/src/mapleall/maple_ir/include/opcodes.def index 34cfb51c8d11308ece1fbdf08f0616a76146e3ae..0e842141edcdce169dbbb3ac3ffbb357acad5965 100755 --- a/src/mapleall/maple_ir/include/opcodes.def +++ b/src/mapleall/maple_ir/include/opcodes.def @@ -221,3 +221,4 @@ OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) OPCODE(icallproto, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(icallprotoassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 8) diff --git a/src/mapleall/maple_ir/include/opcodes.h b/src/mapleall/maple_ir/include/opcodes.h index ea4ed2c1c9395ec13ccabffae749d2dbd3a42a92..b9f5e0f8c5b03cc60b53f30c88963fce5540992a 100644 --- a/src/mapleall/maple_ir/include/opcodes.h +++ b/src/mapleall/maple_ir/include/opcodes.h @@ -50,7 +50,7 @@ inline constexpr bool IsCallAssigned(Opcode code) { code == OP_virtualicallassigned || code == OP_superclasscallassigned || code == OP_interfacecallassigned || code == OP_interfaceicallassigned || code == OP_customcallassigned || code == OP_polymorphiccallassigned || - code == OP_icallassigned || code == OP_intrinsiccallassigned || + code == OP_icallassigned || code == OP_icallprotoassigned || code == OP_intrinsiccallassigned || code == OP_xintrinsiccallassigned || code == OP_intrinsiccallwithtypeassigned); } @@ -113,6 +113,8 @@ constexpr bool IsStmtMustRequire(Opcode opcode) { case OP_polymorphiccallassigned: case OP_icall: case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: case OP_intrinsiccall: case OP_xintrinsiccall: case OP_intrinsiccallassigned: diff --git a/src/mapleall/maple_ir/src/bin_func_export.cpp b/src/mapleall/maple_ir/src/bin_func_export.cpp index 5117ed8ed0fe8f6a5bc5512b01ccc2d9efa7f0f9..575c8fa6c63898c23ecf62b8952e6eb605cf938c 100644 --- a/src/mapleall/maple_ir/src/bin_func_export.cpp +++ b/src/mapleall/maple_ir/src/bin_func_export.cpp @@ -49,8 +49,8 @@ void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { } void BinaryMplExport::OutputBaseNode(const BaseNode *b) { - WriteNum(b->GetOpCode()); - WriteNum(b->GetPrimType()); + Write(static_cast(b->GetOpCode())); + Write(static_cast(b->GetPrimType())); } void BinaryMplExport::OutputLocalSymbol(MIRSymbol *sym) { @@ -58,49 +58,37 @@ void BinaryMplExport::OutputLocalSymbol(MIRSymbol *sym) { WriteNum(0); return; } + + std::unordered_map::iterator it = localSymMark.find(sym); + if (it != localSymMark.end()) { + WriteNum(-(it->second)); + return; + } + WriteNum(kBinSymbol); - WriteNum(sym->GetStIndex()); // preserve original st index OutputStr(sym->GetNameStrIdx()); WriteNum(sym->GetSKind()); WriteNum(sym->GetStorageClass()); + size_t mark = localSymMark.size(); + localSymMark[sym] = mark; OutputTypeAttrs(sym->GetAttrs()); WriteNum(static_cast(sym->GetIsTmp())); if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { OutputSrcPos(sym->GetSrcPosition()); } - OutputTypeViaTypeName(sym->GetTyIdx()); + OutputType(sym->GetTyIdx()); if (sym->GetSKind() == kStPreg) { WriteNum(sym->GetPreg()->GetPregNo()); } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { OutputConst(sym->GetKonst()); } else if (sym->GetSKind() == kStFunc) { - OutputFuncViaSymName(sym->GetFunction()->GetPuidx()); - OutputTypeViaTypeName(sym->GetTyIdx()); + OutputFuncViaSym(sym->GetFunction()->GetPuidx()); + OutputType(sym->GetTyIdx()); } else { CHECK_FATAL(false, "should not used"); } } -void BinaryMplExport::OutputLocalSymTab(const MIRFunction *func) { - WriteNum(kBinSymStart); - uint64 outsymSizeIdx = buf.size(); - ExpandFourBuffSize(); /// size of OutSym - int32 size = 0; - - for (uint32 i = 1; i < func->GetSymTab()->GetSymbolTableSize(); i++) { - MIRSymbol *s = func->GetSymTab()->GetSymbolFromStIdx(i); - if (s->IsDeleted()) { - OutputLocalSymbol(nullptr); - } else { - OutputLocalSymbol(s); - } - size++; - } - - Fixup(outsymSizeIdx, size); - WriteNum(~kBinSymStart); -} - void BinaryMplExport::OutputPregTab(const MIRFunction *func) { WriteNum(kBinPregStart); uint64 outRegSizeIdx = buf.size(); @@ -117,7 +105,7 @@ void BinaryMplExport::OutputPregTab(const MIRFunction *func) { WriteNum(kBinPreg); WriteNum(mirpreg->GetPregNo()); TyIdx tyIdx = (mirpreg->GetMIRType() == nullptr) ? TyIdx(0) : mirpreg->GetMIRType()->GetTypeIndex(); - OutputTypeViaTypeName(tyIdx); + OutputType(tyIdx); WriteNum(mirpreg->GetPrimType()); size++; } @@ -140,7 +128,7 @@ void BinaryMplExport::OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab WriteNum(static_cast(typeNameTab->Size())); for (std::pair it : typeNameTab->GetGStrIdxToTyIdxMap()) { OutputStr(it.first); - OutputTypeViaTypeName(it.second); + OutputType(it.second); } WriteNum(~kBinTypenameStart); } @@ -149,7 +137,7 @@ void BinaryMplExport::OutputFormalsStIdx(MIRFunction *func) { WriteNum(kBinFormalStart); WriteNum(func->GetFormalDefVec().size()); for (FormalDef formalDef : func->GetFormalDefVec()) { - WriteNum(formalDef.formalSym->GetStIndex()); + OutputLocalSymbol(formalDef.formalSym); } WriteNum(~kBinFormalStart); } @@ -160,21 +148,19 @@ void BinaryMplExport::OutputAliasMap(MapleMap &aliasVarMa for (std::pair it : aliasVarMap) { OutputStr(it.first); OutputStr(it.second.memPoolStrIdx); - OutputTypeViaTypeName(it.second.tyIdx); + OutputType(it.second.tyIdx); OutputStr(it.second.sigStrIdx); } WriteNum(~kBinAliasMapStart); } -void BinaryMplExport::OutputFuncViaSymName(PUIdx puIdx) { +void BinaryMplExport::OutputFuncViaSym(PUIdx puIdx) { MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); - WriteNum(kBinKindFuncViaSymname); - OutputStr(funcSt->GetNameStrIdx()); + OutputSymbol(funcSt); } void BinaryMplExport::OutputExpression(BaseNode *e) { - WriteNum(kBinOpExpression); OutputBaseNode(e); switch (e->GetOpCode()) { // leaf @@ -195,12 +181,12 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { } case OP_addroffunc: { AddroffuncNode *addrNode = static_cast(e); - OutputFuncViaSymName(addrNode->GetPUIdx()); + OutputFuncViaSym(addrNode->GetPUIdx()); return; } case OP_sizeoftype: { SizeoftypeNode *sot = static_cast(e); - OutputTypeViaTypeName(sot->GetTyIdx()); + OutputType(sot->GetTyIdx()); return; } case OP_addrof: @@ -219,11 +205,9 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { } WriteNum(stIdx.Scope()); if (stIdx.Islocal()) { - WriteNum(stIdx.Idx()); // preserve original st index + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); } else { - MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); - WriteNum(kBinKindSymViaSymname); - OutputStr(sym->GetNameStrIdx()); + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); } return; } @@ -236,7 +220,7 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { case OP_gcpermalloc: case OP_stackmalloc: { GCMallocNode *gcNode = static_cast(e); - OutputTypeViaTypeName(gcNode->GetTyIdx()); + OutputType(gcNode->GetTyIdx()); return; } // unary @@ -245,18 +229,18 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { case OP_floor: case OP_trunc: { TypeCvtNode *typecvtNode = static_cast(e); - WriteNum(typecvtNode->FromType()); + Write(static_cast(typecvtNode->FromType())); break; } case OP_retype: { RetypeNode *retypeNode = static_cast(e); - OutputTypeViaTypeName(retypeNode->GetTyIdx()); + OutputType(retypeNode->GetTyIdx()); break; } case OP_iread: case OP_iaddrof: { IreadNode *irNode = static_cast(e); - OutputTypeViaTypeName(irNode->GetTyIdx()); + OutputType(irNode->GetTyIdx()); WriteNum(irNode->GetFieldID()); break; } @@ -274,20 +258,20 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { case OP_zext: case OP_extractbits: { ExtractbitsNode *extNode = static_cast(e); - WriteNum(extNode->GetBitsOffset()); - WriteNum(extNode->GetBitsSize()); + Write(extNode->GetBitsOffset()); + Write(extNode->GetBitsSize()); break; } case OP_depositbits: { DepositbitsNode *dbNode = static_cast(e); - WriteNum(dbNode->GetBitsOffset()); - WriteNum(dbNode->GetBitsSize()); + Write(dbNode->GetBitsOffset()); + Write(dbNode->GetBitsSize()); break; } case OP_gcmallocjarray: case OP_gcpermallocjarray: { JarrayMallocNode *gcNode = static_cast(e); - OutputTypeViaTypeName(gcNode->GetTyIdx()); + OutputType(gcNode->GetTyIdx()); break; } // binary @@ -320,13 +304,13 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { case OP_cmpl: case OP_cmp: { CompareNode *cmpNode = static_cast(e); - WriteNum(cmpNode->GetOpndType()); + Write(static_cast(cmpNode->GetOpndType())); break; } case OP_resolveinterfacefunc: case OP_resolvevirtualfunc: { ResolveFuncNode *rsNode = static_cast(e); - OutputFuncViaSymName(rsNode->GetPuIdx()); + OutputFuncViaSym(rsNode->GetPuIdx()); break; } // ternary @@ -336,8 +320,8 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { // nary case OP_array: { ArrayNode *arrNode = static_cast(e); - OutputTypeViaTypeName(arrNode->GetTyIdx()); - WriteNum(static_cast(arrNode->GetBoundsCheck())); + OutputType(arrNode->GetTyIdx()); + Write(static_cast(arrNode->GetBoundsCheck())); WriteNum(static_cast(arrNode->NumOpnds())); break; } @@ -350,7 +334,7 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { case OP_intrinsicopwithtype: { IntrinsicopNode *intrnNode = static_cast(e); WriteNum(intrnNode->GetIntrinsic()); - OutputTypeViaTypeName(intrnNode->GetTyIdx()); + OutputType(intrnNode->GetTyIdx()); WriteNum(static_cast(intrnNode->NumOpnds())); break; } @@ -365,6 +349,9 @@ void BinaryMplExport::OutputExpression(BaseNode *e) { static SrcPosition lastOutputSrcPosition; void BinaryMplExport::OutputSrcPos(const SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } if (pos.FileNum() == 0 || pos.LineNum() == 0) { // error case, so output 0 WriteNum(lastOutputSrcPosition.RawData()); WriteNum(lastOutputSrcPosition.LineNum()); @@ -379,9 +366,12 @@ void BinaryMplExport::OutputReturnValues(const CallReturnVector *retv) { WriteNum(kBinReturnvals); WriteNum(static_cast(retv->size())); for (uint32 i = 0; i < retv->size(); i++) { - WriteNum((*retv)[i].first.Idx()); - WriteNum((*retv)[i].second.GetFieldID()); - WriteNum((*retv)[i].second.GetPregIdx()); + RegFieldPair rfp = (*retv)[i].second; + WriteNum(rfp.GetPregIdx()); + if (!rfp.IsReg()) { + WriteNum((rfp.GetFieldID())); + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol((*retv)[i].first)); + } } } @@ -416,36 +406,34 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { } WriteNum(stIdx.Scope()); if (stIdx.Islocal()) { - WriteNum(stIdx.Idx()); // preserve original st index + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); } else { - MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); - WriteNum(kBinKindSymViaSymname); - OutputStr(sym->GetNameStrIdx()); + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); } break; } case OP_regassign: { RegassignNode *rass = static_cast(s); - WriteNum(rass->GetPrimType()); + Write(static_cast(rass->GetPrimType())); WriteNum(rass->GetRegIdx()); break; } case OP_iassign: { IassignNode *iass = static_cast(s); - OutputTypeViaTypeName(iass->GetTyIdx()); + OutputType(iass->GetTyIdx()); WriteNum(iass->GetFieldID()); break; } case OP_iassignoff: { IassignoffNode *iassoff = static_cast(s); - WriteNum(iassoff->GetPrimType()); + Write(static_cast(iassoff->GetPrimType())); WriteNum(iassoff->GetOffset()); break; } case OP_iassignspoff: case OP_iassignfpoff: { IassignFPoffNode *iassfpoff = static_cast(s); - WriteNum(iassfpoff->GetPrimType()); + Write(static_cast(iassfpoff->GetPrimType())); WriteNum(iassfpoff->GetOffset()); break; } @@ -465,9 +453,9 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { case OP_customcall: case OP_polymorphiccall: { CallNode *callnode = static_cast(s); - OutputFuncViaSymName(callnode->GetPUIdx()); + OutputFuncViaSym(callnode->GetPUIdx()); if (s->GetOpCode() == OP_polymorphiccall) { - OutputTypeViaTypeName(static_cast(callnode)->GetTyIdx()); + OutputType(static_cast(callnode)->GetTyIdx()); } WriteNum(static_cast(s->NumOpnds())); break; @@ -480,15 +468,15 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { case OP_interfaceicallassigned: case OP_customcallassigned: { CallNode *callnode = static_cast(s); - OutputFuncViaSymName(callnode->GetPUIdx()); + OutputFuncViaSym(callnode->GetPUIdx()); OutputReturnValues(&callnode->GetReturnVec()); WriteNum(static_cast(s->NumOpnds())); break; } case OP_polymorphiccallassigned: { CallNode *callnode = static_cast(s); - OutputFuncViaSymName(callnode->GetPUIdx()); - OutputTypeViaTypeName(callnode->GetTyIdx()); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputType(callnode->GetTyIdx()); OutputReturnValues(&callnode->GetReturnVec()); WriteNum(static_cast(s->NumOpnds())); break; @@ -496,13 +484,14 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { case OP_icallproto: case OP_icall: { IcallNode *icallnode = static_cast(s); - OutputTypeViaTypeName(icallnode->GetRetTyIdx()); + OutputType(icallnode->GetRetTyIdx()); WriteNum(static_cast(s->NumOpnds())); break; } + case OP_icallprotoassigned: case OP_icallassigned: { IcallNode *icallnode = static_cast(s); - OutputTypeViaTypeName(icallnode->GetRetTyIdx()); + OutputType(icallnode->GetRetTyIdx()); OutputReturnValues(&icallnode->GetReturnVec()); WriteNum(static_cast(s->NumOpnds())); break; @@ -525,14 +514,14 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { case OP_intrinsiccallwithtype: { IntrinsiccallNode *intrnNode = static_cast(s); WriteNum(intrnNode->GetIntrinsic()); - OutputTypeViaTypeName(intrnNode->GetTyIdx()); + OutputType(intrnNode->GetTyIdx()); WriteNum(static_cast(s->NumOpnds())); break; } case OP_intrinsiccallwithtypeassigned: { IntrinsiccallNode *intrnNode = static_cast(s); WriteNum(intrnNode->GetIntrinsic()); - OutputTypeViaTypeName(intrnNode->GetTyIdx()); + OutputType(intrnNode->GetTyIdx()); OutputReturnValues(&intrnNode->GetReturnVec()); WriteNum(static_cast(s->NumOpnds())); break; @@ -621,7 +610,7 @@ void BinaryMplExport::OutputBlockNode(BlockNode *block) { CatchNode *catchNode = static_cast(s); WriteNum(static_cast(catchNode->GetExceptionTyIdxVec().size())); for (TyIdx tidx : catchNode->GetExceptionTyIdxVec()) { - OutputTypeViaTypeName(tidx); + OutputType(tidx); } break; } @@ -713,6 +702,7 @@ void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_s if (not2mplt) { for (MIRFunction *func : GetMIRModule().GetFunctionList()) { + curFunc = func; if (func->GetAttr(FUNCATTR_optimized)) { continue; } @@ -733,11 +723,12 @@ void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_s continue; } } + localSymMark.clear(); + localSymMark[nullptr] = 0; OutputFunction(func->GetPuidx()); CHECK_FATAL(func->GetBody() != nullptr, "WriteFunctionBodyField: no function body"); OutputFuncIdInfo(func); OutputPregTab(func); - OutputLocalSymTab(func); OutputLabelTab(func); OutputLocalTypeNameTab(func->GetTypeNameTab()); OutputFormalsStIdx(func); diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp index d18aac9f58351ce2a9b13d6f7ee857492fe0fd04..5d2f8f2ce0370fb72c01ead7392b87181b95e07a 100644 --- a/src/mapleall/maple_ir/src/bin_func_import.cpp +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -56,20 +56,22 @@ void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { } void BinaryMplImport::ImportBaseNode(Opcode &o, PrimType &typ) { - o = (Opcode)ReadNum(); - typ = (PrimType)ReadNum(); + o = (Opcode)Read(); + typ = (PrimType)Read(); } -void BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { +MIRSymbol *BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { int64 tag = ReadNum(); if (tag == 0) { - func->GetSymTab()->PushNullSymbol(); - return; + return nullptr; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localSymTab.size(), "index out of bounds"); + return localSymTab.at(-tag); } CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol in ImportLocalSymbol()"); - auto indx = static_cast(ReadNum()); - CHECK_FATAL(indx == func->GetSymTab()->GetSymbolTableSize(), "inconsistant local stIdx"); MIRSymbol *sym = func->GetSymTab()->CreateSymbol(kScopeLocal); + localSymTab.push_back(sym); sym->SetNameStrIdx(ImportStr()); (void)func->GetSymTab()->AddToStringSymbolMap(*sym); sym->SetSKind((MIRSymKind)ReadNum()); @@ -91,22 +93,12 @@ void BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { sym->SetKonst(ImportConst(func)); } else if (sym->GetSKind() == kStFunc) { - PUIdx puIdx = ImportFuncViaSymName(); + PUIdx puIdx = ImportFuncViaSym(func); TyIdx tyIdx = ImportType(); sym->SetTyIdx(tyIdx); sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); } -} - -void BinaryMplImport::ImportLocalSymTab(MIRFunction *func) { - int64 tag = ReadNum(); - CHECK_FATAL(tag == kBinSymStart, "kBinSymStart expected in ImportLocalSymTab()"); - int32 size = ReadInt(); - for (int64 i = 0; i < size; ++i) { - ImportLocalSymbol(func); - } - tag = ReadNum(); - CHECK_FATAL(tag == ~kBinSymStart, "pattern mismatch in ImportLocalSymTab()"); + return sym; } void BinaryMplImport::ImportPregTab(const MIRFunction *func) { @@ -161,8 +153,7 @@ void BinaryMplImport::ImportFormalsStIdx(MIRFunction *func) { CHECK_FATAL(tag == kBinFormalStart, "kBinFormalStart expected in ImportFormalsStIdx()"); auto size = ReadNum(); for (int64 i = 0; i < size; ++i) { - uint32 indx = static_cast(ReadNum()); - func->GetFormalDefVec()[static_cast(i)].formalSym = func->GetSymTab()->GetSymbolFromStIdx(indx); + func->GetFormalDefVec()[static_cast(i)].formalSym = ImportLocalSymbol(func); } tag = ReadNum(); CHECK_FATAL(tag == ~kBinFormalStart, "pattern mismatch in ImportFormalsStIdx()"); @@ -184,18 +175,13 @@ void BinaryMplImport::ImportAliasMap(MIRFunction *func) { CHECK_FATAL(tag == ~kBinAliasMapStart, "pattern mismatch in ImportAliasMap()"); } -PUIdx BinaryMplImport::ImportFuncViaSymName() { - int64 tag = ReadNum(); - CHECK_FATAL(tag == kBinKindFuncViaSymname, "kBinKindFuncViaSymname expected"); - GStrIdx strIdx = ImportStr(); - MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); - MIRFunction *func = sym->GetFunction(); - return func->GetPuidx(); +PUIdx BinaryMplImport::ImportFuncViaSym(MIRFunction *func) { + MIRSymbol *sym = InSymbol(func); + MIRFunction *f = sym->GetFunction(); + return f->GetPuidx(); } BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { - int64 tag = ReadNum(); - CHECK_FATAL(tag == kBinOpExpression, "kBinOpExpression expected"); Opcode op; PrimType typ; ImportBaseNode(op, typ); @@ -221,7 +207,9 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { return alabNode; } case OP_addroffunc: { - PUIdx puIdx = ImportFuncViaSymName(); + PUIdx puIdx = ImportFuncViaSym(func); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); AddroffuncNode *addrNode = mod.CurFuncCodeMemPool()->New(typ, puIdx); return addrNode; } @@ -237,18 +225,18 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { int32 num = static_cast(ReadNum()); StIdx stIdx; stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; if (stIdx.Islocal()) { - stIdx.SetIdx(static_cast(ReadNum())); + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); } else { - int32 stag = static_cast(ReadNum()); - CHECK_FATAL(stag == kBinKindSymViaSymname, "kBinKindSymViaSymname expected"); - GStrIdx strIdx = ImportStr(); - MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + sym = InSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); if (op == OP_addrof) { sym->SetHasPotentialAssignment(); } - stIdx.SetIdx(sym->GetStIdx().Idx()); } + stIdx.SetIdx(sym->GetStIdx().Idx()); if (op == OP_addrof || op == OP_dread) { AddrofNode *drNode = mod.CurFuncCodeMemPool()->New(op); drNode->SetPrimType(typ); @@ -294,7 +282,7 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { case OP_floor: case OP_trunc: { TypeCvtNode *typecvtNode = mod.CurFuncCodeMemPool()->New(op, typ); - typecvtNode->SetFromType((PrimType)ReadNum()); + typecvtNode->SetFromType((PrimType)Read()); typecvtNode->SetOpnd(ImportExpression(func), 0); return typecvtNode; } @@ -327,15 +315,15 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { case OP_zext: case OP_extractbits: { ExtractbitsNode *extNode = mod.CurFuncCodeMemPool()->New(op, typ); - extNode->SetBitsOffset(static_cast(ReadNum())); - extNode->SetBitsSize(static_cast(ReadNum())); + extNode->SetBitsOffset(Read()); + extNode->SetBitsSize(Read()); extNode->SetOpnd(ImportExpression(func), 0); return extNode; } case OP_depositbits: { DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); - dbNode->SetBitsOffset(static_cast(ReadNum())); - dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetBitsOffset(ReadNum()); + dbNode->SetBitsSize(ReadNum()); dbNode->SetOpnd(ImportExpression(func), 0); dbNode->SetOpnd(ImportExpression(func), 1); return dbNode; @@ -380,7 +368,7 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { case OP_cmpl: case OP_cmp: { CompareNode *cmpNode = mod.CurFuncCodeMemPool()->New(op, typ); - cmpNode->SetOpndType((PrimType)ReadNum()); + cmpNode->SetOpndType((PrimType)Read()); cmpNode->SetOpnd(ImportExpression(func), 0); cmpNode->SetOpnd(ImportExpression(func), 1); return cmpNode; @@ -388,7 +376,7 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { case OP_resolveinterfacefunc: case OP_resolvevirtualfunc: { ResolveFuncNode *rsNode = mod.CurFuncCodeMemPool()->New(op, typ); - rsNode->SetPUIdx(ImportFuncViaSymName()); + rsNode->SetPUIdx(ImportFuncViaSym(func)); rsNode->SetOpnd(ImportExpression(func), 0); rsNode->SetOpnd(ImportExpression(func), 1); return rsNode; @@ -404,7 +392,7 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { // nary case OP_array: { TyIdx tidx = ImportType(); - auto boundsCheck = static_cast(ReadNum()); + bool boundsCheck = static_cast(Read()); ArrayNode *arrNode = mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), typ, tidx, boundsCheck); auto n = static_cast(ReadNum()); @@ -437,12 +425,15 @@ BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { return intrnNode; } default: - CHECK_FATAL(false, "Unhandled op %d", tag); + CHECK_FATAL(false, "Unhandled op %d", op); break; } } void BinaryMplImport::ImportSrcPos(SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } pos.SetRawData(static_cast(ReadNum())); pos.SetLineNum(static_cast(ReadNum())); } @@ -452,14 +443,16 @@ void BinaryMplImport::ImportReturnValues(MIRFunction *func, CallReturnVector *re CHECK_FATAL(tag == kBinReturnvals, "expecting return values"); auto size = static_cast(ReadNum()); for (uint32 i = 0; i < size; ++i) { - uint32 idx = static_cast(ReadNum()); - FieldID fid = static_cast(ReadNum()); - PregIdx ridx = static_cast(ReadNum()); - retv->push_back(std::make_pair(StIdx(kScopeLocal, idx), RegFieldPair(fid, ridx))); - if (idx == 0) { + RegFieldPair rfp; + rfp.SetPregIdx(static_cast(ReadNum())); + if (rfp.IsReg()) { + retv->push_back(std::make_pair(StIdx(), rfp)); continue; } - MIRSymbol *lsym = func->GetSymTab()->GetSymbolFromStIdx(idx, false); + rfp.SetFieldID(static_cast(ReadNum())); + MIRSymbol *lsym = ImportLocalSymbol(func); + CHECK_FATAL(lsym != nullptr, "null ptr check"); + retv->push_back(std::make_pair(lsym->GetStIdx(), rfp)); if (lsym->GetName().find("L_STR") == 0) { MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lsym->GetTyIdx()); CHECK_FATAL(ty->GetKind() == kTypePointer, "Pointer type expected for L_STR prefix"); @@ -496,16 +489,16 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { int32 num = static_cast(ReadNum()); StIdx stIdx; stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; if (stIdx.Islocal()) { - stIdx.SetIdx(static_cast(ReadNum())); + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); } else { - int32 stag = static_cast(ReadNum()); - CHECK_FATAL(stag == kBinKindSymViaSymname, "kBinKindSymViaSymname expected"); - GStrIdx strIdx = ImportStr(); - MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + sym = InSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); sym->SetHasPotentialAssignment(); - stIdx.SetIdx(sym->GetStIdx().Idx()); } + stIdx.SetIdx(sym->GetStIdx().Idx()); if (op == OP_dassign) { DassignNode *s = func->GetCodeMemPool()->New(); s->SetStIdx(stIdx); @@ -524,7 +517,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_regassign: { RegassignNode *s = func->GetCodeMemPool()->New(); - s->SetPrimType((PrimType)ReadNum()); + s->SetPrimType((PrimType)Read()); s->SetRegIdx(static_cast(ReadNum())); s->SetOpnd(ImportExpression(func), 0); stmt = s; @@ -541,7 +534,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_iassignoff: { IassignoffNode *s = func->GetCodeMemPool()->New(); - s->SetPrimType((PrimType)ReadNum()); + s->SetPrimType((PrimType)Read()); s->SetOffset(static_cast(ReadNum())); s->SetOpnd(ImportExpression(func), 0); s->SetOpnd(ImportExpression(func), 1); @@ -551,7 +544,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { case OP_iassignspoff: case OP_iassignfpoff: { IassignFPoffNode *s = func->GetCodeMemPool()->New(op); - s->SetPrimType((PrimType)ReadNum()); + s->SetPrimType((PrimType)Read()); s->SetOffset(static_cast(ReadNum())); s->SetOpnd(ImportExpression(func), 0); stmt = s; @@ -576,7 +569,9 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { case OP_interfaceicall: case OP_customcall: { CallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetPUIdx(ImportFuncViaSymName()); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); numOpr = static_cast(ReadNum()); s->SetNumOpnds(numOpr); for (int32 i = 0; i < numOpr; ++i) { @@ -593,7 +588,9 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { case OP_interfaceicallassigned: case OP_customcallassigned: { CallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetPUIdx(ImportFuncViaSymName()); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); ImportReturnValues(func, &s->GetReturnVec()); numOpr = static_cast(ReadNum()); s->SetNumOpnds(numOpr); @@ -609,7 +606,9 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_polymorphiccall: { CallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetPUIdx(ImportFuncViaSymName()); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); s->SetTyIdx(ImportType()); numOpr = static_cast(ReadNum()); s->SetNumOpnds(numOpr); @@ -621,7 +620,9 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { } case OP_polymorphiccallassigned: { CallNode *s = func->GetCodeMemPool()->New(mod, op); - s->SetPUIdx(ImportFuncViaSymName()); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); s->SetTyIdx(ImportType()); ImportReturnValues(func, &s->GetReturnVec()); numOpr = static_cast(ReadNum()); @@ -644,6 +645,7 @@ BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { stmt = s; break; } + case OP_icallprotoassigned: case OP_icallassigned: { IcallNode *s = func->GetCodeMemPool()->New(mod, op); s->SetRetTyIdx(ImportType()); @@ -926,6 +928,9 @@ void BinaryMplImport::ReadFunctionBodyField() { PUIdx puIdx = ImportFunction(); MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); mod.SetCurFunction(fn); + fn->GetFuncSymbol()->SetAppearsInCode(true); + localSymTab.clear(); + localSymTab.push_back(nullptr); fn->AllocSymTab(); fn->AllocPregTab(); @@ -934,7 +939,6 @@ void BinaryMplImport::ReadFunctionBodyField() { ImportFuncIdInfo(fn); ImportPregTab(fn); - ImportLocalSymTab(fn); ImportLabelTab(fn); ImportLocalTypeNameTable(fn->GetTypeNameTab()); ImportFormalsStIdx(fn); diff --git a/src/mapleall/maple_ir/src/bin_mpl_export.cpp b/src/mapleall/maple_ir/src/bin_mpl_export.cpp index 879e34074d9e451d9b8bea3588182709c479cb09..15a25c6ed8a76e8ecb11d9450889e2674ea62d9f 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_export.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -26,7 +26,7 @@ namespace { using namespace maple; using OutputConstFactory = FunctionFactory; -using OutputTypeFactory = FunctionFactory; +using OutputTypeFactory = FunctionFactory; void OutputConstInt(const MIRConst &constVal, BinaryMplExport &mplExport) { mplExport.WriteNum(kBinKindConstInt); @@ -141,39 +141,39 @@ static bool InitOutputConstFactory() { return true; } -void OutputTypeScalar(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeScalar(const MIRType &ty, BinaryMplExport &mplExport) { mplExport.WriteNum(kBinKindTypeScalar); mplExport.OutputTypeBase(ty); } -void OutputTypePointer(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypePointer(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypePointer); mplExport.OutputTypeBase(type); mplExport.OutputTypeAttrs(type.GetTypeAttrs()); - mplExport.OutputType(type.GetPointedTyIdx(), canUseTypename); + mplExport.OutputType(type.GetPointedTyIdx()); } -void OutputTypeByName(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeByName(const MIRType &ty, BinaryMplExport &mplExport) { mplExport.WriteNum(kBinKindTypeByName); mplExport.OutputTypeBase(ty); } -void OutputTypeFArray(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypeFArray(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeFArray); mplExport.OutputTypeBase(type); - mplExport.OutputType(type.GetElemTyIdx(), canUseTypename); + mplExport.OutputType(type.GetElemTyIdx()); } -void OutputTypeJArray(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypeJArray(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeJarray); mplExport.OutputTypeBase(type); - mplExport.OutputType(type.GetElemTyIdx(), canUseTypename); + mplExport.OutputType(type.GetElemTyIdx()); } -void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeArray); mplExport.OutputTypeBase(type); @@ -181,20 +181,20 @@ void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport, bool canUseT for (uint16 i = 0; i < type.GetDim(); ++i) { mplExport.WriteNum(type.GetSizeArrayItem(i)); } - mplExport.OutputType(type.GetElemTyIdx(), canUseTypename); + mplExport.OutputType(type.GetElemTyIdx()); mplExport.OutputTypeAttrs(type.GetTypeAttrs()); } -void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeFunction); mplExport.OutputTypeBase(type); - mplExport.OutputType(type.GetRetTyIdx(), canUseTypename); - mplExport.WriteNum(type.IsVarargs()); + mplExport.OutputType(type.GetRetTyIdx()); + mplExport.WriteNum(type.funcAttrs.GetAttrFlag()); size_t size = type.GetParamTypeList().size(); mplExport.WriteNum(size); for (size_t i = 0; i < size; ++i) { - mplExport.OutputType(type.GetNthParamType(i), canUseTypename); + mplExport.OutputType(type.GetNthParamType(i)); } size = type.GetParamAttrsList().size(); mplExport.WriteNum(size); @@ -203,13 +203,13 @@ void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport, bool canU } } -void OutputTypeParam(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeParam(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeParam); mplExport.OutputTypeBase(type); } -void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeInstantVector); mplExport.OutputTypeBase(type); @@ -217,15 +217,15 @@ void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport, bool mplExport.OutputTypePairs(type); } -void OutputTypeGenericInstant(const MIRType &ty, BinaryMplExport &mplExport, bool canUseTypename) { +void OutputTypeGenericInstant(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeGenericInstant); mplExport.OutputTypeBase(type); mplExport.OutputTypePairs(type); - mplExport.OutputType(type.GetGenericTyIdx(), canUseTypename); + mplExport.OutputType(type.GetGenericTyIdx()); } -void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeBitField); mplExport.OutputTypeBase(type); @@ -233,7 +233,7 @@ void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport, bool) { } // for Struct/StructIncomplete/Union -void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeStruct); mplExport.OutputTypeBase(type); @@ -249,7 +249,7 @@ void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport, bool) { } } -void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeClass); mplExport.OutputTypeBase(type); @@ -264,7 +264,7 @@ void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport, bool) { } } -void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport, bool) { +void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport) { const auto &type = static_cast(ty); mplExport.WriteNum(kBinKindTypeInterface); mplExport.OutputTypeBase(type); @@ -279,7 +279,7 @@ void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport, bool) { } } -void OutputTypeConstString(const MIRType &ty, BinaryMplExport&, bool) { +void OutputTypeConstString(const MIRType &ty, BinaryMplExport&) { ASSERT(false, "Type's kind not yet implemented: %d", ty.GetKind()); (void)ty; } @@ -397,7 +397,7 @@ void BinaryMplExport::DumpBuf(const std::string &name) { void BinaryMplExport::OutputConstBase(const MIRConst &constVal) { WriteNum(constVal.GetKind()); - OutputTypeViaTypeName(constVal.GetType().GetTypeIndex()); + OutputType(constVal.GetType().GetTypeIndex()); } void BinaryMplExport::OutputConst(MIRConst *constVal) { @@ -470,8 +470,8 @@ void BinaryMplExport::OutputPragma(const MIRPragma &p) { WriteNum(p.GetKind()); WriteNum(p.GetVisibility()); OutputStr(p.GetStrIdx()); - OutputType(p.GetTyIdx(), false); - OutputType(p.GetTyIdxEx(), false); + OutputType(p.GetTyIdx()); + OutputType(p.GetTyIdxEx()); WriteNum(p.GetParamNum()); size_t size = p.GetElementVector().size(); WriteNum(size); @@ -488,7 +488,7 @@ void BinaryMplExport::OutputTypeBase(const MIRType &type) { void BinaryMplExport::OutputFieldPair(const FieldPair &fp) { OutputStr(fp.first); // GStrIdx - OutputType(fp.second.first, false); // TyIdx + OutputType(fp.second.first); // TyIdx FieldAttrs fa = fp.second.second; WriteNum(fa.GetAttrFlag()); WriteNum(fa.GetAlignValue()); @@ -511,7 +511,7 @@ void BinaryMplExport::OutputMethodPair(const MethodPair &memPool) { MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(memPool.first.Idx()); CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, can't get symbol! Check it!"); WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(funcSt->GetNameStrIdx())); - OutputType(memPool.second.first, false); // TyIdx + OutputType(memPool.second.first); // TyIdx WriteNum(memPool.second.second.GetAttrFlag()); // FuncAttrs } @@ -539,7 +539,7 @@ void BinaryMplExport::OutputStructTypeData(const MIRStructType &type) { void BinaryMplExport::OutputImplementedInterfaces(const std::vector &interfaces) { WriteNum(interfaces.size()); for (const TyIdx &tyIdx : interfaces) { - OutputType(tyIdx, false); + OutputType(tyIdx); } } @@ -571,7 +571,7 @@ void BinaryMplExport::OutputPragmaVec(const std::vector &pragmaVec) } void BinaryMplExport::OutputClassTypeData(const MIRClassType &type) { - OutputType(type.GetParentTyIdx(), false); + OutputType(type.GetParentTyIdx()); OutputImplementedInterfaces(type.GetInterfaceImplemented()); OutputInfoIsString(type.GetInfoIsString()); if (!inIPA) { @@ -601,6 +601,7 @@ void BinaryMplExport::Init() { symMark[nullptr] = 0; funcMark[nullptr] = 0; eaNodeMark[nullptr] = 0; + curFunc = nullptr; for (uint32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { typMark[GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(pti))] = pti; } @@ -612,7 +613,7 @@ void BinaryMplExport::OutputSymbol(MIRSymbol *sym) { return; } - auto it = symMark.find(sym); + std::unordered_map::iterator it = symMark.find(sym); if (it != symMark.end()) { WriteNum(-(it->second)); return; @@ -645,7 +646,7 @@ void BinaryMplExport::OutputSymbol(MIRSymbol *sym) { if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { OutputSrcPos(sym->GetSrcPosition()); } - OutputTypeViaTypeName(sym->GetTyIdx()); + OutputType(sym->GetTyIdx()); } void BinaryMplExport::OutputFunction(PUIdx puIdx) { @@ -671,7 +672,7 @@ void BinaryMplExport::OutputFunction(PUIdx puIdx) { MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, cannot get symbol! Check it!"); OutputSymbol(funcSt); - OutputTypeViaTypeName(func->GetMIRFuncType()->GetTypeIndex()); + OutputType(func->GetMIRFuncType()->GetTypeIndex()); WriteNum(func->GetFuncAttrs().GetAttrFlag()); auto &attributes = func->GetFuncAttrs(); @@ -684,12 +685,12 @@ void BinaryMplExport::OutputFunction(PUIdx puIdx) { } WriteNum(func->GetFlag()); - OutputTypeViaTypeName(func->GetClassTyIdx()); + OutputType(func->GetClassTyIdx()); // output formal parameter information WriteNum(static_cast(func->GetFormalDefVec().size())); for (FormalDef formalDef : func->GetFormalDefVec()) { OutputStr(formalDef.formalStrIdx); - OutputType(formalDef.formalTyIdx, false); + OutputType(formalDef.formalTyIdx); WriteNum(static_cast(formalDef.formalAttrs.GetAttrFlag())); } // store Side Effect for each func @@ -751,6 +752,7 @@ void BinaryMplExport::WriteHeaderField(uint64 contentIdx) { WriteNum(mod.GetID()); if (mod.GetFlavor() == kFlavorLmbc) { WriteNum(mod.GetGlobalMemSize()); + WriteNum(mod.IsWithDbgInfo()); } WriteNum(mod.GetNumFuncs()); WriteAsciiStr(mod.GetEntryFuncName()); @@ -795,7 +797,7 @@ void BinaryMplExport::WriteTypeField(uint64 contentIdx, bool useClassList) { auto *structType = static_cast(type); // skip imported class/interface and incomplete types if (!structType->IsImported() && !structType->IsIncomplete()) { - OutputType(curTyidx, false); + OutputType(curTyidx); ++size; } } @@ -803,7 +805,7 @@ void BinaryMplExport::WriteTypeField(uint64 contentIdx, bool useClassList) { } else { uint32 idx = GlobalTables::GetTypeTable().lastDefaultTyIdx.GetIdx(); for (idx = idx + 1; idx < GlobalTables::GetTypeTable().GetTypeTableSize(); idx++) { - OutputType(TyIdx(idx), false); + OutputType(TyIdx(idx)); size++; } } @@ -884,7 +886,7 @@ void BinaryMplExport::WriteSeField() { GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); MIRFunction *func = (funcSymbol != nullptr) ? GetMIRModule().GetMIRBuilder()->GetFunctionFromSymbol(*funcSymbol) : nullptr; - OutputType(func->GetReturnTyIdx(), false); + OutputType(func->GetReturnTyIdx()); } ++size; } @@ -1102,7 +1104,7 @@ void BinaryMplExport::WriteSymField(uint64 contentIdx) { MIRSymKind sKind = s->GetSKind(); if (s->IsDeleted() || storageClass == kScUnused || (s->GetIsImported() && !s->GetAppearsInCode()) || - (storageClass == kScExtern && sKind == kStFunc)) { + (sKind == kStFunc && (storageClass == kScExtern || !s->GetAppearsInCode()))) { continue; } OutputSymbol(s); @@ -1213,8 +1215,6 @@ void BinaryMplExport::Export(const std::string &fname, std::unordered_setIsNameIsLocal() && ty->GetNameStrIdx() != GStrIdx(0)) { - WriteNum(kBinKindTypeViaTypename); - OutputStr(ty->GetNameStrIdx()); - return; - } - auto func = CreateProductFunction(ty->GetKind()); if (func != nullptr) { - func(*ty, *this, canUseTypename); + func(*ty, *this); } else { ASSERT(false, "Type's kind not yet implemented: %d", ty->GetKind()); } diff --git a/src/mapleall/maple_ir/src/bin_mpl_import.cpp b/src/mapleall/maple_ir/src/bin_mpl_import.cpp index 0406ce74782727fd4a1abfb2225646a52e47ae87..0e0f4a81b582725008dbd7511ed2fedb572b19e4 100644 --- a/src/mapleall/maple_ir/src/bin_mpl_import.cpp +++ b/src/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -129,6 +129,8 @@ MIRConst *BinaryMplImport::ImportConst(MIRFunction *func) { } case kBinKindConstAddrofFunc: { PUIdx puIdx = ImportFunction(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); mod.SetCurFunction(func); return memPool->New(puIdx, *type); } @@ -541,19 +543,6 @@ TyIdx BinaryMplImport::ImportType(bool forPointedType) { CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); return typTab.at(static_cast(-tag)); } - if (tag == kBinKindTypeViaTypename) { - GStrIdx typenameStrIdx = ImportStr(); - TyIdx tyIdx = mod.GetTypeNameTab()->GetTyIdxFromGStrIdx(typenameStrIdx); - if (tyIdx != 0) { - typTab.push_back(tyIdx); - return tyIdx; - } - MIRTypeByName ltype(typenameStrIdx); - ltype.SetNameIsLocal(false); - MIRType *type = GlobalTables::GetTypeTable().GetOrCreateMIRTypeNode(ltype); - typTab.push_back(type->GetTypeIndex()); - return type->GetTypeIndex(); - } PrimType primType = (PrimType)0; GStrIdx strIdx(0); bool nameIsLocal = false; @@ -580,13 +569,6 @@ TyIdx BinaryMplImport::ImportType(bool forPointedType) { } return origType->GetTypeIndex(); } - case kBinKindTypeByName: { - MIRTypeByName type(strIdx); - type.SetNameIsLocal(nameIsLocal); - MIRType *origType = &InsertInTypeTables(type); - typTab.push_back(origType->GetTypeIndex()); - return origType->GetTypeIndex(); - } case kBinKindTypeFArray: { MIRFarrayType type(strIdx); type.SetNameIsLocal(nameIsLocal); @@ -629,7 +611,7 @@ TyIdx BinaryMplImport::ImportType(bool forPointedType) { size_t idx = typTab.size(); typTab.push_back(TyIdx(0)); type.SetRetTyIdx(ImportType()); - type.SetVarArgs(ReadNum()); + type.funcAttrs.SetAttrFlag(ReadNum()); int64 size = ReadNum(); for (int64 i = 0; i < size; ++i) { type.GetParamTypeList().push_back(ImportType()); @@ -737,19 +719,6 @@ TyIdx BinaryMplImport::ImportTypeNonJava() { CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); return typTab[static_cast(-tag)]; } - if (tag == kBinKindTypeViaTypename) { - GStrIdx typenameStrIdx = ImportStr(); - TyIdx tyIdx = mod.GetTypeNameTab()->GetTyIdxFromGStrIdx(typenameStrIdx); - if (tyIdx != 0) { - typTab.push_back(tyIdx); - return tyIdx; - } - MIRTypeByName ltype(typenameStrIdx); - ltype.SetNameIsLocal(false); - MIRType *type = GlobalTables::GetTypeTable().GetOrCreateMIRTypeNode(ltype); - typTab.push_back(type->GetTypeIndex()); - return type->GetTypeIndex(); - } PrimType primType = (PrimType)0; GStrIdx strIdx(0); bool nameIsLocal = false; @@ -771,12 +740,6 @@ TyIdx BinaryMplImport::ImportTypeNonJava() { GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); return tyIdxUsed; } - case kBinKindTypeByName: { - MIRTypeByName type(strIdx); - type.SetNameIsLocal(nameIsLocal); - GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); - return tyIdxUsed; - } case kBinKindTypeFArray: { MIRFarrayType type(strIdx); type.SetNameIsLocal(nameIsLocal); @@ -808,7 +771,7 @@ TyIdx BinaryMplImport::ImportTypeNonJava() { MIRFuncType type(strIdx); type.SetNameIsLocal(nameIsLocal); type.SetRetTyIdx(ImportTypeNonJava()); - type.SetVarArgs(ReadNum()); + type.funcAttrs.SetAttrFlag(ReadNum()); int64 size = ReadNum(); for (int64 i = 0; i < size; ++i) { type.GetParamTypeList().push_back(ImportTypeNonJava()); @@ -1159,6 +1122,7 @@ void BinaryMplImport::ReadHeaderField() { mod.SetID(static_cast(ReadNum())); if (mod.GetFlavor() == kFlavorLmbc) { mod.SetGlobalMemSize(ReadNum()); + mod.SetWithDbgInfo(ReadNum()); } mod.SetNumFuncs(static_cast(ReadNum())); std::string inStr; diff --git a/src/mapleall/maple_ir/src/global_tables.cpp b/src/mapleall/maple_ir/src/global_tables.cpp index c7051c12571991ebe8dbf7e356fecee9c804f2b8..2ee37117607c941ae4c0d75af9936f6a68063914 100644 --- a/src/mapleall/maple_ir/src/global_tables.cpp +++ b/src/mapleall/maple_ir/src/global_tables.cpp @@ -226,7 +226,9 @@ MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::ve const std::vector &vecAttrs, bool isVarg, const TypeAttrs &retAttrs) { MIRFuncType funcType(retTyIdx, vecType, vecAttrs, retAttrs); - funcType.SetVarArgs(isVarg); + if (isVarg) { + funcType.SetVarArgs(); + } TyIdx tyIdx = GetOrCreateMIRType(&funcType); ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); return typeTable.at(tyIdx); diff --git a/src/mapleall/maple_ir/src/mir_builder.cpp b/src/mapleall/maple_ir/src/mir_builder.cpp index 20d46306c07873faa43c1284a1b4588873a38ee5..f1aa2583e472d6332eb319104483ad86eae5faa0 100755 --- a/src/mapleall/maple_ir/src/mir_builder.cpp +++ b/src/mapleall/maple_ir/src/mir_builder.cpp @@ -835,6 +835,13 @@ IcallNode *MIRBuilder::CreateStmtIcall(const MapleVector &args) { return stmt; } +IcallNode *MIRBuilder::CreateStmtIcallproto(const MapleVector &args) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallproto); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret) { auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); @@ -852,6 +859,23 @@ IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &arg return stmt; } +IcallNode *MIRBuilder::CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallprotoassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, TyIdx tyIdx) { auto *stmt = GetCurrentFuncCodeMp()->New( diff --git a/src/mapleall/maple_ir/src/mir_lower.cpp b/src/mapleall/maple_ir/src/mir_lower.cpp index 99dbd04c3f97b07b52ad36b2121c01bf404ee755..aa5cb0131c2902fc4a279c07b5bc4e0512c91679 100644 --- a/src/mapleall/maple_ir/src/mir_lower.cpp +++ b/src/mapleall/maple_ir/src/mir_lower.cpp @@ -530,6 +530,19 @@ BlockNode *MIRLower::LowerBlock(BlockNode &block) { case OP_doloop: newBlock->AppendStatementsFromBlock(*LowerDoloopStmt(static_cast(*stmt))); break; + case OP_icallassigned: + case OP_icall: { + if (mirModule.IsCModule()) { + // convert to icallproto/icallprotoassigned + IcallNode *ic = static_cast(stmt); + ic->SetOpCode(stmt->GetOpCode() == OP_icall ? OP_icallproto : OP_icallprotoassigned); + MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(stmt->Opnd(0)); + CHECK_FATAL(funcType != nullptr, "MIRLower::LowerBlock: cannot find prototype for icall"); + ic->SetRetTyIdx(funcType->GetTypeIndex()); + } + newBlock->AddStatement(stmt); + break; + } case OP_block: tmp = LowerBlock(static_cast(*stmt)); newBlock->AppendStatementsFromBlock(*tmp); @@ -985,6 +998,100 @@ void MIRLower::ExpandArrayMrt(MIRFunction &func) { } } +MIRFuncType *MIRLower::FuncTypeFromFuncPtrExpr(BaseNode *x) { + MIRFuncType *res = nullptr; + MIRFunction *func = mirModule.CurFunction(); + switch (x->GetOpCode()) { + case OP_regread: { + RegreadNode *regread = static_cast(x); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(regread->GetRegIdx()); + // see if it is promoted from a symbol + if (preg->GetOp() == OP_dread) { + const MIRSymbol *symbol = preg->rematInfo.sym; + MIRType *mirType = symbol->GetType(); + if (preg->fieldID != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(preg->fieldID); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res != nullptr) { + break; + } + } + // check if a formal promoted to preg + for (FormalDef &formalDef : func->GetFormalDefVec()) { + if (!formalDef.formalSym->IsPreg()) { + continue; + } + if (formalDef.formalSym->GetPreg() == preg) { + MIRType *mirType = formalDef.formalSym->GetType(); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + } + break; + } + case OP_dread: { + DreadNode *dread = static_cast(x); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = symbol->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(dread->GetFieldID()); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_iread: { + IreadNode *iread = static_cast(x); + MIRPtrType *ptrType = static_cast(iread->GetType()); + MIRType *mirType = ptrType->GetPointedType(); + if (mirType->GetKind() == kTypeFunction) { + res = static_cast(mirType); + } else if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_addroffunc: { + AddroffuncNode *addrofFunc = static_cast(x); + PUIdx puIdx = addrofFunc->GetPUIdx(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + res = f->GetMIRFuncType(); + break; + } + case OP_retype: { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(x)->GetTyIdx()); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(0)); + } + break; + } + case OP_select: { + res = FuncTypeFromFuncPtrExpr(x->Opnd(1)); + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(2)); + } + break; + } + default: CHECK_FATAL(false, "LMBCLowerer::FuncTypeFromFuncPtrExpr: NYI"); + } + return res; +} + const std::set MIRLower::kSetArrayHotFunc = {}; bool MIRLower::ShouldOptArrayMrt(const MIRFunction &func) { diff --git a/src/mapleall/maple_ir/src/mir_nodes.cpp b/src/mapleall/maple_ir/src/mir_nodes.cpp index 573346e739521dbc61e3dbac4009a4c58958c4ff..30d77f277505ace0448e81074b2578d8880649c6 100755 --- a/src/mapleall/maple_ir/src/mir_nodes.cpp +++ b/src/mapleall/maple_ir/src/mir_nodes.cpp @@ -1198,7 +1198,7 @@ MIRType *IcallNode::GetCallReturnType() { if (op == OP_icall || op == OP_icallassigned) { return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx); } - // icallproto + // icallproto or icallprotoassigned MIRFuncType *funcType = static_cast( GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)); return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); @@ -1223,7 +1223,7 @@ const MIRSymbol *IcallNode::GetCallReturnSymbol(const MIRModule &mod) const { void IcallNode::Dump(int32 indent, bool newline) const { StmtNode::DumpBase(indent); - if (op == OP_icallproto) { + if (op == OP_icallproto || op == OP_icallprotoassigned) { LogInfo::MapleLogger() << " "; GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)->Dump(indent + 1); } diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp index b4bf8cf4ce249b9465fdb4d2693c787a61667e6c..39371eed482544f6ec16ef62161444d03b00094e 100755 --- a/src/mapleall/maple_ir/src/mir_parser.cpp +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -894,6 +894,7 @@ bool MIRParser::ParseStmtCall(StmtNodePtr &stmt) { callStmt->SetPUIdx(pIdx); MIRFunction *callee = GlobalTables::GetFunctionTable().GetFuncTable()[pIdx]; + callee->GetFuncSymbol()->SetAppearsInCode(true); if (callee->GetName() == "setjmp") { mod.CurFunction()->SetHasSetjmp(); } @@ -930,9 +931,14 @@ bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt, Opcode op) { // . . . // dassign } // icallproto (, , ..., ) + // icallprotoassigned (, , ..., ) { + // dassign + // dassign + // . . . + // dassign } IcallNode *iCallStmt = mod.CurFuncCodeMemPool()->New(mod, op); lexer.NextToken(); - if (op == OP_icallproto) { + if (op == OP_icallproto || op == OP_icallprotoassigned) { TyIdx tyIdx(0); if (!ParseDerivedType(tyIdx)) { Error("error parsing type in ParseStmtIcall for icallproto at "); @@ -946,7 +952,7 @@ bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt, Opcode op) { } iCallStmt->SetNOpnd(opndsVec); iCallStmt->SetNumOpnds(opndsVec.size()); - if (op == OP_icallassigned) { + if (op == OP_icallassigned || op == OP_icallprotoassigned) { CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); if (!ParseCallReturns(retsVec)) { return false; @@ -970,6 +976,10 @@ bool MIRParser::ParseStmtIcallproto(StmtNodePtr &stmt) { return ParseStmtIcall(stmt, OP_icallproto); } +bool MIRParser::ParseStmtIcallprotoassigned(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallprotoassigned); +} + bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned) { Opcode o = !isAssigned ? (lexer.GetTokenKind() == TK_intrinsiccall ? OP_intrinsiccall : OP_xintrinsiccall) : (lexer.GetTokenKind() == TK_intrinsiccallassigned ? OP_intrinsiccallassigned @@ -3230,7 +3240,8 @@ bool MIRParser::ParseConstAddrLeafExpr(MIRConstPtr &cexpr) { } else if (expr->GetOpCode() == OP_addroffunc) { auto *aof = static_cast(expr); MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(aof->GetPUIdx()); - const MIRSymbol *fName = f->GetFuncSymbol(); + MIRSymbol *fName = f->GetFuncSymbol(); + fName->SetAppearsInCode(true); TyIdx ptyIdx = fName->GetTyIdx(); MIRPtrType ptrType(ptyIdx); ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); @@ -3404,6 +3415,7 @@ std::map MIRParser::InitFuncPtrMapForPar funcPtrMap[TK_icall] = &MIRParser::ParseStmtIcall; funcPtrMap[TK_icallassigned] = &MIRParser::ParseStmtIcallassigned; funcPtrMap[TK_icallproto] = &MIRParser::ParseStmtIcallproto; + funcPtrMap[TK_icallprotoassigned] = &MIRParser::ParseStmtIcallprotoassigned; funcPtrMap[TK_intrinsiccall] = &MIRParser::ParseStmtIntrinsiccall; funcPtrMap[TK_intrinsiccallassigned] = &MIRParser::ParseStmtIntrinsiccallassigned; funcPtrMap[TK_xintrinsiccall] = &MIRParser::ParseStmtIntrinsiccall; diff --git a/src/mapleall/maple_ir/src/mir_type.cpp b/src/mapleall/maple_ir/src/mir_type.cpp index 64ee74b164e73a10c60ab09f44293c1019d9f6c5..08180754b73bf60c2f0040b180b346890dfbc38d 100644 --- a/src/mapleall/maple_ir/src/mir_type.cpp +++ b/src/mapleall/maple_ir/src/mir_type.cpp @@ -740,7 +740,9 @@ void MIRFuncType::Dump(int indent, bool dontUseName) const { if (!dontUseName && CheckAndDumpTypeName(nameStrIdx, nameIsLocal)) { return; } - LogInfo::MapleLogger() << "Dump(indent + 1); @@ -751,7 +753,7 @@ void MIRFuncType::Dump(int indent, bool dontUseName) const { LogInfo::MapleLogger() << ","; } } - if (isVarArgs) { + if (IsVarargs()) { LogInfo::MapleLogger() << ", ..."; } LogInfo::MapleLogger() << ") "; @@ -1662,7 +1664,7 @@ bool MIRFuncType::EqualTo(const MIRType &type) const { } const auto &pType = static_cast(type); return (pType.retTyIdx == retTyIdx && pType.paramTypeList == paramTypeList && - pType.isVarArgs == isVarArgs && pType.paramAttrsList == paramAttrsList && + pType.funcAttrs == funcAttrs && pType.paramAttrsList == paramAttrsList && pType.retAttrs == retAttrs); } diff --git a/src/mapleall/maple_ir/src/parser.cpp b/src/mapleall/maple_ir/src/parser.cpp index 66c5098672eb760674bbb44a0f6afd765d1589b0..8fd9956b6c60e483bca2aea3822b45b2bd519d0d 100644 --- a/src/mapleall/maple_ir/src/parser.cpp +++ b/src/mapleall/maple_ir/src/parser.cpp @@ -1298,6 +1298,15 @@ bool MIRParser::ParsePointType(TyIdx &tyIdx) { // in function pointer specification and member function prototypes inside // structs and classes bool MIRParser::ParseFuncType(TyIdx &tyIdx) { + // parse function attributes + FuncAttrs fAttrs; + if (lexer.GetTokenKind() != TK_lparen) { + if (!ParseFuncAttrs(fAttrs)) { + Error("bad function attribute specification in function type at "); + return false; + } + } + // parse parameters if (lexer.GetTokenKind() != TK_lparen) { Error("expect ( parse function type parameters but get "); @@ -1358,7 +1367,10 @@ bool MIRParser::ParseFuncType(TyIdx &tyIdx) { return false; } MIRFuncType functype(retTyIdx, vecTyIdx, vecAttrs, retTypeAttrs); - functype.SetVarArgs(varargs); + functype.funcAttrs = fAttrs; + if (varargs) { + functype.SetVarArgs(); + } tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&functype); return true; } diff --git a/src/mapleall/maple_me/include/lmbc_lower.h b/src/mapleall/maple_me/include/lmbc_lower.h index 9462204fd71eaa00f23a878d380b47d379eea12e..ce8ede27c69d4143a5f6becb77515368add56105 100644 --- a/src/mapleall/maple_me/include/lmbc_lower.h +++ b/src/mapleall/maple_me/include/lmbc_lower.h @@ -40,7 +40,6 @@ class LMBCLowerer { void LowerIassign(IassignNode *, BlockNode *); void LowerAggIassign(IassignNode *, MIRType *type, int32 offset, BlockNode *); void LowerReturn(NaryStmtNode *retNode, BlockNode *newblk); - MIRFuncType *FuncTypeFromFuncPtrExpr(BaseNode *x); void LowerCall(NaryStmtNode *callNode, BlockNode *newblk); BlockNode *LowerBlock(BlockNode *); void LoadFormalsAssignedToPregs(); diff --git a/src/mapleall/maple_me/include/lmbc_memlayout.h b/src/mapleall/maple_me/include/lmbc_memlayout.h index 7cea863a221a68fc670c5c458264bc53394a62ff..7755030bc99b17c9f4a3134ddb2029287c5c5e2b 100644 --- a/src/mapleall/maple_me/include/lmbc_memlayout.h +++ b/src/mapleall/maple_me/include/lmbc_memlayout.h @@ -33,6 +33,7 @@ typedef enum { MS_FPbased, // addressed via offset from the frame pointer MS_SPbased, // addressed via offset from the stack pointer MS_GPbased, // addressed via offset from the global pointer + MS_largeStructActual, // for storing large struct actuals passed by value for ARM CPU } MemSegmentKind; class MemSegment; @@ -64,13 +65,14 @@ class MemSegment { class LMBCMemLayout { public: - uint32 FindLargestActualArea(void); - uint32 FindLargestActualArea(StmtNode *, int &); - LMBCMemLayout(MIRFunction *f, MapleAllocator *mallocator) + void FindLargestActualArea(StmtNode *stmt, int32 &maxActualSize, int32 &maxLargeStructActualSize); + LMBCMemLayout(MIRFunction *f, MemSegment *segGP, MapleAllocator *mallocator) : func(f), + seg_GPbased(segGP), seg_upformal(MS_upformal), seg_formal(MS_formal), seg_actual(MS_actual), + seg_largeStructActual(MS_largeStructActual), seg_FPbased(MS_FPbased), seg_SPbased(MS_SPbased), sym_alloc_table(mallocator->Adapter()) { @@ -89,12 +91,15 @@ class LMBCMemLayout { } MIRFunction *func; + MemSegment *seg_GPbased; MemSegment seg_upformal; MemSegment seg_formal; MemSegment seg_actual; + MemSegment seg_largeStructActual; MemSegment seg_FPbased; MemSegment seg_SPbased; MapleVector sym_alloc_table; // index is StIdx + int32 seg_LargeStructActualOffset; // negative offset off FP }; class GlobalMemLayout { diff --git a/src/mapleall/maple_me/src/alias_class.cpp b/src/mapleall/maple_me/src/alias_class.cpp index 788a9a35401facddbe8d2006270bd7eb1a2e8d5e..1a63043e273c1bc00a0a473d685ffb0e3f79d6db 100644 --- a/src/mapleall/maple_me/src/alias_class.cpp +++ b/src/mapleall/maple_me/src/alias_class.cpp @@ -1017,7 +1017,9 @@ void AliasClass::ApplyUnionForCopies(StmtNode &stmt) { } case OP_asm: case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { for (uint32 i = 0; i < stmt.NumOpnds(); ++i) { const AliasInfo &ainfo = CreateAliasInfoExpr(*stmt.Opnd(i)); if (stmt.GetOpCode() != OP_asm && i == 0) { @@ -2563,6 +2565,7 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: + case OP_icallprotoassigned: case OP_virtualcall: case OP_virtualicall: case OP_superclasscall: @@ -2570,7 +2573,8 @@ void AliasClass::GenericInsertMayDefUse(StmtNode &stmt, BBId bbID) { case OP_interfaceicall: case OP_customcall: case OP_polymorphiccall: - case OP_icall: { + case OP_icall: + case OP_icallproto: { InsertMayDefUseCall(stmt, bbID, false); return; } diff --git a/src/mapleall/maple_me/src/code_factoring.cpp b/src/mapleall/maple_me/src/code_factoring.cpp index 6cabb59adcaf7db62669268bdbdd5a316a3e359f..2f40e7b8210c6bc7971678f6be3ec23acadd2f77 100644 --- a/src/mapleall/maple_me/src/code_factoring.cpp +++ b/src/mapleall/maple_me/src/code_factoring.cpp @@ -122,7 +122,9 @@ bool FactoringOptimizer::IsIdenticalStmt(StmtNode *stmt1, StmtNode *stmt2) { break; } case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { auto *icall1 = static_cast(stmt1); auto *icall2 = static_cast(stmt2); if (icall1->GetRetTyIdx() != icall2->GetRetTyIdx() || diff --git a/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp b/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp index db340369377b6610abc4bca7d90b6d68420a3367..50a25a6101ca3f070b97861d4d057ec69078db97 100644 --- a/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp +++ b/src/mapleall/maple_me/src/demand_driven_alias_analysis.cpp @@ -641,7 +641,9 @@ void PEGBuilder::BuildPEGNodeInStmt(const StmtNode *stmt) { break; } case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { BuildPEGNodeInIcall(static_cast(stmt)); break; } diff --git a/src/mapleall/maple_me/src/irmap_build.cpp b/src/mapleall/maple_me/src/irmap_build.cpp index 30a3f075bda40be7a091e8263bded76c780a902c..6410f2269a6ca4dba1efe99ca2e5f9f80de45b9d 100755 --- a/src/mapleall/maple_me/src/irmap_build.cpp +++ b/src/mapleall/maple_me/src/irmap_build.cpp @@ -804,7 +804,7 @@ MeStmt *IRMapBuild::BuildCallMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart) { MeStmt *IRMapBuild::BuildNaryMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart) { Opcode op = stmt.GetOpCode(); - NaryMeStmt *naryMeStmt = (op == OP_icall || op == OP_icallassigned) + NaryMeStmt *naryMeStmt = (op == OP_icall || op == OP_icallassigned || op == OP_icallproto || op == OP_icallprotoassigned) ? static_cast(irMap->NewInPool(&stmt)) : static_cast(irMap->NewInPool(&stmt)); auto &naryStmtNode = static_cast(stmt); @@ -819,6 +819,9 @@ MeStmt *IRMapBuild::BuildNaryMeStmt(StmtNode &stmt, AccessSSANodes &ssaPart) { if (propagater) { propagater->PropUpdateChiListDef(*naryMeStmt->GetChiList()); } + if (op == OP_icallproto || op == OP_icallprotoassigned) { + static_cast(naryMeStmt)->SetRetTyIdx(static_cast(stmt).GetRetTyIdx()); + } return naryMeStmt; } @@ -919,6 +922,8 @@ void IRMapBuild::InitMeStmtFactory() { RegisterFactoryFunction(OP_polymorphiccallassigned, &IRMapBuild::BuildCallMeStmt); RegisterFactoryFunction(OP_icall, &IRMapBuild::BuildNaryMeStmt); RegisterFactoryFunction(OP_icallassigned, &IRMapBuild::BuildNaryMeStmt); + RegisterFactoryFunction(OP_icallproto, &IRMapBuild::BuildNaryMeStmt); + RegisterFactoryFunction(OP_icallprotoassigned, &IRMapBuild::BuildNaryMeStmt); RegisterFactoryFunction(OP_intrinsiccall, &IRMapBuild::BuildNaryMeStmt); RegisterFactoryFunction(OP_xintrinsiccall, &IRMapBuild::BuildNaryMeStmt); RegisterFactoryFunction(OP_intrinsiccallwithtype, &IRMapBuild::BuildNaryMeStmt); diff --git a/src/mapleall/maple_me/src/irmap_emit.cpp b/src/mapleall/maple_me/src/irmap_emit.cpp index 824e7c7e532cc7bf4e43a8e6324492ec2193ad64..7fcbd3804b9d7709e9c563e3dce4c066c77d4c01 100755 --- a/src/mapleall/maple_me/src/irmap_emit.cpp +++ b/src/mapleall/maple_me/src/irmap_emit.cpp @@ -412,7 +412,7 @@ MIRFunction &CallMeStmt::GetTargetFunction() { } StmtNode &CallMeStmt::EmitStmt(SSATab &ssaTab) { - if (GetOp() != OP_icall && GetOp() != OP_icallassigned) { + if (GetOp() != OP_icall && GetOp() != OP_icallassigned && GetOp() != OP_icallproto && GetOp() != OP_icallprotoassigned) { auto *callNode = ssaTab.GetModule().CurFunction()->GetCodeMempool()->New(ssaTab.GetModule(), Opcode(GetOp())); callNode->SetPUIdx(puIdx); @@ -496,6 +496,9 @@ StmtNode &IcallMeStmt::EmitStmt(SSATab &ssaTab) { } } } + if (GetOp() == OP_icallproto || GetOp() == OP_icallprotoassigned) { + icallNode->SetRetTyIdx(retTyIdx); + } return *icallNode; } diff --git a/src/mapleall/maple_me/src/lfo_dep_test.cpp b/src/mapleall/maple_me/src/lfo_dep_test.cpp index 19a523e138bc402ed79763c09ac2636042634eb2..ac3340e8435da4e46e29def1d08540586b5bebd3 100644 --- a/src/mapleall/maple_me/src/lfo_dep_test.cpp +++ b/src/mapleall/maple_me/src/lfo_dep_test.cpp @@ -66,6 +66,8 @@ void LfoDepInfo::CreateDoloopInfo(BlockNode *block, DoloopInfo *parent) { case OP_callassigned: case OP_icall: case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: case OP_return: case OP_throw: case OP_asm: diff --git a/src/mapleall/maple_me/src/lmbc_lower.cpp b/src/mapleall/maple_me/src/lmbc_lower.cpp index a155b25fa4b492de58dfd92130569d7fe2d70086..8920cc95a8088f95721e8e021217e7a6b59b81e8 100644 --- a/src/mapleall/maple_me/src/lmbc_lower.cpp +++ b/src/mapleall/maple_me/src/lmbc_lower.cpp @@ -99,17 +99,18 @@ BaseNode *LMBCLowerer::LowerDreadoff(DreadoffNode *dreadoff) { if (!symbol->LMBCAllocateOffSpecialReg()) { return dreadoff; } + PrimType symty = symbol->GetType()->GetPrimType(); PregIdx spcreg = GetSpecialRegFromSt(symbol); if (spcreg == -kSregFp) { CHECK_FATAL(symbol->IsLocal(), "load from fp non local?"); - IreadFPoffNode *ireadoff = mirBuilder->CreateExprIreadFPoff( - dreadoff->GetPrimType(), memlayout->sym_alloc_table[symbol->GetStIndex()].offset + dreadoff->offset); + IreadFPoffNode *ireadoff = mirBuilder->CreateExprIreadFPoff(symty, + memlayout->sym_alloc_table[symbol->GetStIndex()].offset + dreadoff->offset); return ireadoff; } else { BaseNode *rrn = mirBuilder->CreateExprRegread(LOWERED_PTR_TYPE, spcreg); SymbolAlloc &symalloc = symbol->IsLocal() ? memlayout->sym_alloc_table[symbol->GetStIndex()] : globmemlayout->sym_alloc_table[symbol->GetStIndex()]; - IreadoffNode *ireadoff = mirBuilder->CreateExprIreadoff(dreadoff->GetPrimType(), symalloc.offset + dreadoff->offset, rrn); + IreadoffNode *ireadoff = mirBuilder->CreateExprIreadoff(symty, symalloc.offset + dreadoff->offset, rrn); return ireadoff; } } @@ -129,14 +130,14 @@ static MIRType *GetPointedToType(const MIRPtrType *pointerty) { BaseNode *LMBCLowerer::LowerIread(IreadNode *expr) { int32 offset = 0; + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr->GetTyIdx())); + MIRType *type = ptrType->GetPointedType(); if (expr->GetFieldID() != 0) { - MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr->GetTyIdx()); - MIRStructType *structty = - static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx( - static_cast(type)->GetPointedTyIdx())); + MIRStructType *structty = static_cast(type); offset = becommon->GetFieldOffset(*structty, expr->GetFieldID()).first; + type = structty->GetFieldType(expr->GetFieldID()); } - BaseNode *ireadoff = mirBuilder->CreateExprIreadoff(expr->GetPrimType(), offset, expr->Opnd(0)); + BaseNode *ireadoff = mirBuilder->CreateExprIreadoff(type->GetPrimType(), offset, expr->Opnd(0)); return ireadoff; } @@ -334,6 +335,9 @@ void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { } if (iassign->GetRHS()->GetPrimType() != PTY_agg) { PrimType ptypused = type->GetPrimType(); + if (ptypused == PTY_agg) { + ptypused = iassign->GetRHS()->GetPrimType(); + } IassignoffNode *iassignoff = mirBuilder->CreateStmtIassignoff(ptypused, offset, iassign->addrExpr, @@ -347,15 +351,20 @@ void LMBCLowerer::LowerIassign(IassignNode *iassign, BlockNode *newblk) { // called only if the return has > 1 operand; assume prior lowering already // converted any return of structs to be via fake parameter void LMBCLowerer::LowerReturn(NaryStmtNode *retNode, BlockNode *newblk) { - CHECK_FATAL(retNode->NumOpnds() <= 2, "LMBCLowerer::LowerReturn: more than 2 return values NYI"); - for (int i = 0; i < retNode->NumOpnds(); i++) { - CHECK_FATAL(retNode->Opnd(i)->GetPrimType() != PTY_agg, - "LMBCLowerer::LowerReturn: return of aggregate needs to be handled first"); - // insert regassign for the returned value - BaseNode *rhs = LowerExpr(retNode->Opnd(i)); - RegassignNode *regasgn = mirBuilder->CreateStmtRegassign(rhs->GetPrimType(), - i == 0 ? -kSregRetval0 : -kSregRetval1, - rhs); + if (retNode->Opnd(0)->GetPrimType() != PTY_agg) { + CHECK_FATAL(retNode->NumOpnds() <= 2, "LMBCLowerer::LowerReturn: more than 2 return values NYI"); + for (int i = 0; i < retNode->NumOpnds(); i++) { + // insert regassign for the returned value + PrimType ptyp = retNode->Opnd(i)->GetPrimType(); + BaseNode *rhs = LowerExpr(retNode->Opnd(i)); + RegassignNode *regasgn = mirBuilder->CreateStmtRegassign(ptyp, + i == 0 ? -kSregRetval0 : -kSregRetval1, + rhs); + newblk->AddStatement(regasgn); + } + } else { // handle return of small struct using only %%retval0 + BaseNode *rhs = LowerExpr(retNode->Opnd(0)); + RegassignNode *regasgn = mirBuilder->CreateStmtRegassign(PTY_agg, -kSregRetval0, rhs); newblk->AddStatement(regasgn); } retNode->GetNopnd().clear(); // remove the return operands @@ -363,101 +372,11 @@ void LMBCLowerer::LowerReturn(NaryStmtNode *retNode, BlockNode *newblk) { newblk->AddStatement(retNode); } -MIRFuncType *LMBCLowerer::FuncTypeFromFuncPtrExpr(BaseNode *x) { - MIRFuncType *res = nullptr; - switch (x->GetOpCode()) { - case OP_regread: { - RegreadNode *regread = static_cast(x); - MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(regread->GetRegIdx()); - // see if it is promoted from a symbol - if (preg->GetOp() == OP_dread) { - const MIRSymbol *symbol = preg->rematInfo.sym; - MIRType *mirType = symbol->GetType(); - if (preg->fieldID != 0) { - MIRStructType *structty = static_cast(mirType); - FieldPair thepair = structty->TraverseToField(preg->fieldID); - mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); - } - - if (mirType->GetKind() == kTypePointer) { - res = static_cast(mirType)->GetPointedFuncType(); - } - if (res != nullptr) { - break; - } - } - // check if a formal promoted to preg - for (FormalDef &formalDef : func->GetFormalDefVec()) { - if (!formalDef.formalSym->IsPreg()) { - continue; - } - if (formalDef.formalSym->GetPreg() == preg) { - MIRType *mirType = formalDef.formalSym->GetType(); - if (mirType->GetKind() == kTypePointer) { - res = static_cast(mirType)->GetPointedFuncType(); - } - break; - } - } - break; - } - case OP_dread: { - DreadNode *dread = static_cast(x); - MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); - MIRType *mirType = symbol->GetType(); - if (dread->GetFieldID() != 0) { - MIRStructType *structty = static_cast(mirType); - FieldPair thepair = structty->TraverseToField(dread->GetFieldID()); - mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); - } - if (mirType->GetKind() == kTypePointer) { - res = static_cast(mirType)->GetPointedFuncType(); - } - break; - } - case OP_iread: { - IreadNode *iread = static_cast(x); - MIRPtrType *ptrType = static_cast(iread->GetType()); - MIRType *mirType = ptrType->GetPointedType(); - if (mirType->GetKind() == kTypePointer) { - res = static_cast(mirType)->GetPointedFuncType(); - } - break; - } - case OP_addroffunc: { - AddroffuncNode *addrofFunc = static_cast(x); - PUIdx puIdx = addrofFunc->GetPUIdx(); - MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); - res = f->GetMIRFuncType(); - break; - } - case OP_retype: { - MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( - static_cast(x)->GetTyIdx()); - if (mirType->GetKind() == kTypePointer) { - res = static_cast(mirType)->GetPointedFuncType(); - } - if (res == nullptr) { - res = FuncTypeFromFuncPtrExpr(x->Opnd(0)); - } - break; - } - case OP_select: { - res = FuncTypeFromFuncPtrExpr(x->Opnd(1)); - if (res == nullptr) { - res = FuncTypeFromFuncPtrExpr(x->Opnd(2)); - } - break; - } - default: CHECK_FATAL(false, "LMBCLowerer::FuncTypeFromFuncPtrExpr: NYI"); - } - return res; -} - void LMBCLowerer::LowerCall(NaryStmtNode *naryStmt, BlockNode *newblk) { // go through each parameter uint32 i = 0; - if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned || + naryStmt->GetOpCode() == OP_icallproto || naryStmt->GetOpCode() == OP_icallprotoassigned) { i = 1; } ParmLocator parmlocator; @@ -517,14 +436,10 @@ void LMBCLowerer::LowerCall(NaryStmtNode *naryStmt, BlockNode *newblk) { } } BaseNode *opnd0 = nullptr; - if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned) { + if (naryStmt->GetOpCode() == OP_icall || naryStmt->GetOpCode() == OP_icallassigned || + naryStmt->GetOpCode() == OP_icallproto || naryStmt->GetOpCode() == OP_icallprotoassigned) { opnd0 = naryStmt->Opnd(0); naryStmt->GetNopnd().clear(); // remove the call operands - // convert to OP_icallproto by finding the function prototype and record in stmt - naryStmt->SetOpCode(OP_icallproto); - MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(opnd0); - CHECK_FATAL(funcType != nullptr, "LMBCLowerer::LowerCall: cannot find prototype for icall"); - static_cast(naryStmt)->SetRetTyIdx(funcType->GetTypeIndex()); // add back the function pointer operand naryStmt->GetNopnd().push_back(LowerExpr(opnd0)); naryStmt->SetNumOpnds(1); @@ -554,6 +469,15 @@ BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { LowerDassign(static_cast(stmt), newblk); break; } + case OP_call: + case OP_callassigned: + case OP_icall: + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { + LowerCall(static_cast(stmt), newblk); + break; + } case OP_dassignoff: { LowerDassignoff(static_cast(stmt), newblk); break; @@ -569,10 +493,6 @@ BlockNode *LMBCLowerer::LowerBlock(BlockNode *block) { } else { LowerReturn(retNode, newblk); } - } - case OP_call: - case OP_icall: { - LowerCall(static_cast(stmt), newblk); break; } default: { diff --git a/src/mapleall/maple_me/src/lmbc_memlayout.cpp b/src/mapleall/maple_me/src/lmbc_memlayout.cpp index e740c5d13f320c13d1fecac89b870ab8540373fb..47ff50005289219a8a8fe37e4c4a74fbb4a49d0f 100644 --- a/src/mapleall/maple_me/src/lmbc_memlayout.cpp +++ b/src/mapleall/maple_me/src/lmbc_memlayout.cpp @@ -25,43 +25,56 @@ #include "lmbc_memlayout.h" #include "mir_symbol.h" +#define PASSLARGESTRUCTBYREF + namespace maple { -uint32 LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int &maxActualSize) { +constexpr size_t kVarargSaveAreaSize = 192; + +// go over all outgoing calls in the function body and get the maximum space +// needed for storing the actuals based on the actual parameters and the ABI; +void LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int32 &maxActualSize, int32 &maxLargeStructActualSize) { if (!stmt) { - return maxActualSize; + return; } Opcode opcode = stmt->op; switch (opcode) { case OP_block: { BlockNode *blcknode = static_cast(stmt); for (StmtNode &s : blcknode->GetStmtNodes()) { - FindLargestActualArea(&s, maxActualSize); + FindLargestActualArea(&s, maxActualSize, maxLargeStructActualSize); } break; } case OP_if: { IfStmtNode *ifnode = static_cast(stmt); - FindLargestActualArea(ifnode->GetThenPart(), maxActualSize); - FindLargestActualArea(ifnode->GetElsePart(), maxActualSize); + FindLargestActualArea(ifnode->GetThenPart(), maxActualSize, maxLargeStructActualSize); + FindLargestActualArea(ifnode->GetElsePart(), maxActualSize, maxLargeStructActualSize); break; } case OP_doloop: { - FindLargestActualArea(static_cast(stmt)->GetDoBody(), maxActualSize); + FindLargestActualArea(static_cast(stmt)->GetDoBody(), maxActualSize, maxLargeStructActualSize); break; } case OP_dowhile: case OP_while: - FindLargestActualArea(static_cast(stmt)->GetBody(), maxActualSize); + FindLargestActualArea(static_cast(stmt)->GetBody(), maxActualSize, maxLargeStructActualSize); break; case OP_call: case OP_icall: + case OP_icallproto: case OP_intrinsiccall: { ParmLocator parmlocator; // instantiate a parm locator NaryStmtNode *callstmt = static_cast(stmt); - for (uint32 i = 0; i < callstmt->NumOpnds(); i++) { +#ifdef PASSLARGESTRUCTBYREF + int32 thisCallLargeStructActualSize = 0; +#endif + uint32 i = 0; + if (opcode == OP_icall || opcode == OP_icallproto) { + i = 1; + } + for (; i < callstmt->NumOpnds(); i++) { BaseNode *opnd = callstmt->Opnd(i); - CHECK_FATAL(opnd->GetPrimType() != PTY_void, ""); MIRType *ty = nullptr; if (opnd->GetPrimType() != PTY_agg) { ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx( @@ -97,24 +110,21 @@ uint32 LMBCMemLayout::FindLargestActualArea(StmtNode *stmt, int &maxActualSize) parmlocator.LocateNextParm(ty, ploc); maxActualSize = std::max(maxActualSize, ploc.memoffset + ploc.memsize); maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); +#ifdef PASSLARGESTRUCTBYREF + if (opnd->GetPrimType() == PTY_agg && ty->GetSize() > (2*GetPrimTypeSize(PTY_ptr))) { + thisCallLargeStructActualSize += ty->GetSize(); + thisCallLargeStructActualSize = maplebe::RoundUp(thisCallLargeStructActualSize, GetPrimTypeSize(PTY_ptr)); + } +#endif } +#ifdef PASSLARGESTRUCTBYREF + maxLargeStructActualSize = std::max(maxLargeStructActualSize, thisCallLargeStructActualSize); +#endif break; } - default: - return maxActualSize; + default: break; } maxActualSize = maplebe::RoundUp(maxActualSize, GetPrimTypeSize(PTY_ptr)); - return maxActualSize; -} - -// go over all outgoing calls in the function body and get the maximum space -// needed for storing the actuals based on the actual parameters and the ABI; -// this assumes that all nesting of statements has been removed, so that all -// the statements are at only one block level -uint32 LMBCMemLayout::FindLargestActualArea(void) { - int32 maxActualSize = 0; - FindLargestActualArea(func->GetBody(), maxActualSize); - return static_cast(maxActualSize); } void LMBCMemLayout::LayoutStackFrame(void) { @@ -143,6 +153,10 @@ void LMBCMemLayout::LayoutStackFrame(void) { seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); seg_formal.how_alloc.offset = seg_FPbased.size; + if (func->IsVarargs()) { + seg_FPbased.size -= kVarargSaveAreaSize; + } + // allocate the local variables uint32 symtabsize = func->GetSymTab()->GetSymbolTableSize(); for (uint32 i = 0; i < symtabsize; i++) { @@ -153,6 +167,13 @@ void LMBCMemLayout::LayoutStackFrame(void) { if (sym->IsDeleted()) { continue; } + if (sym->GetStorageClass() == kScPstatic && sym->LMBCAllocateOffSpecialReg()) { + uint32 stindex = sym->GetStIndex(); + sym_alloc_table[stindex].mem_segment = seg_GPbased; + seg_GPbased->size = maplebe::RoundUp(seg_GPbased->size, sym->GetType()->GetAlign()); + sym_alloc_table[stindex].offset = seg_GPbased->size; + seg_GPbased->size += sym->GetType()->GetSize(); + } if (sym->GetStorageClass() != kScAuto) { continue; } @@ -162,14 +183,19 @@ void LMBCMemLayout::LayoutStackFrame(void) { seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, sym->GetType()->GetAlign()); sym_alloc_table[stindex].offset = seg_FPbased.size; } - seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); // allocate seg_actual for storing the outgoing parameters; this requires // going over all outgoing calls and get the maximum space needed for the // actuals - seg_actual.size = FindLargestActualArea(); + FindLargestActualArea(func->GetBody(), seg_actual.size, seg_largeStructActual.size); func->SetOutParmSize(seg_actual.size); + // allocate the area for storing large struct actuals passed by value + seg_largeStructActual.how_alloc.mem_segment = &seg_FPbased; + seg_FPbased.size -= seg_largeStructActual.size; + seg_FPbased.size = maplebe::RoundDown(seg_FPbased.size, GetPrimTypeSize(PTY_ptr)); + seg_LargeStructActualOffset = seg_FPbased.size; + // allocate seg_actual in seg_SPbased seg_actual.how_alloc.mem_segment = &seg_SPbased; seg_actual.how_alloc.offset = seg_SPbased.size; @@ -329,6 +355,9 @@ GlobalMemLayout::GlobalMemLayout(maplebe::BECommon *b, MIRModule *mod, MapleAll if (sym->GetStorageClass() != kScGlobal && sym->GetStorageClass() != kScFstatic) { continue; } + if (!sym->LMBCAllocateOffSpecialReg()) { + continue; + } if (sym->GetType()->GetAlign() != curalign) { continue; } diff --git a/src/mapleall/maple_me/src/me_cfg.cpp b/src/mapleall/maple_me/src/me_cfg.cpp index ac3cf1b8dbc4dee82d68285f3462b72aae7835c5..c72a09a0deaeb7a4c4427a76119b35ae7e8aa823 100644 --- a/src/mapleall/maple_me/src/me_cfg.cpp +++ b/src/mapleall/maple_me/src/me_cfg.cpp @@ -260,6 +260,7 @@ bool MeCFG::FindUse(const StmtNode &stmt, StIdx stIdx) const { case OP_customcall: case OP_polymorphiccall: case OP_icall: + case OP_icallproto: case OP_intrinsiccall: case OP_xintrinsiccall: case OP_intrinsiccallwithtype: @@ -272,6 +273,7 @@ bool MeCFG::FindUse(const StmtNode &stmt, StIdx stIdx) const { case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: + case OP_icallprotoassigned: case OP_intrinsiccallassigned: case OP_xintrinsiccallassigned: case OP_intrinsiccallwithtypeassigned: diff --git a/src/mapleall/maple_me/src/me_function.cpp b/src/mapleall/maple_me/src/me_function.cpp index 05181df7968e5330ed30981c6661ddf1094918d4..a62f1a89a38bca501cad7af81b4e920e9b8aa89d 100755 --- a/src/mapleall/maple_me/src/me_function.cpp +++ b/src/mapleall/maple_me/src/me_function.cpp @@ -303,7 +303,9 @@ void MeFunction::CloneBBMeStmts(BB &srcBB, BB &destBB, std::map(&stmt); newStmt = irmap->NewInPool(static_cast(icallStmt), icallStmt->GetRetTyIdx(), icallStmt->GetStmtID()); diff --git a/src/mapleall/maple_me/src/me_lower_globals.cpp b/src/mapleall/maple_me/src/me_lower_globals.cpp index c14dd5c76478004e501c81e34085d075ae0a7de5..fdf2df8e3f99b197cb7cf84ea217af9daff72fab 100644 --- a/src/mapleall/maple_me/src/me_lower_globals.cpp +++ b/src/mapleall/maple_me/src/me_lower_globals.cpp @@ -185,7 +185,12 @@ void MeLowerGlobals::Run() { MeExpr *addroffuncExpr = irMap->CreateAddroffuncMeExpr(callee.GetPuidx()); auto insertpos = callStmt.GetOpnds().begin(); callStmt.InsertOpnds(insertpos, addroffuncExpr); - IcallMeStmt *icallStmt = irMap->NewInPool(stmt.GetOp() == OP_call ? OP_icall : OP_icallassigned); + IcallMeStmt *icallStmt = nullptr; + if (func.GetMIRModule().IsCModule()) { + icallStmt = irMap->NewInPool(stmt.GetOp() == OP_call ? OP_icallproto : OP_icallprotoassigned); + } else { + icallStmt = irMap->NewInPool(stmt.GetOp() == OP_call ? OP_icall : OP_icallassigned); + } icallStmt->SetIsLive(callStmt.GetIsLive()); icallStmt->SetSrcPos(callStmt.GetSrcPosition()); for (MeExpr *o : callStmt.GetOpnds()) { @@ -193,7 +198,11 @@ void MeLowerGlobals::Run() { } icallStmt->GetMuList()->insert(callStmt.GetMuList()->begin(), callStmt.GetMuList()->end()); icallStmt->GetChiList()->insert(callStmt.GetChiList()->begin(), callStmt.GetChiList()->end()); - icallStmt->SetRetTyIdx(callee.GetReturnTyIdx()); + if (func.GetMIRModule().IsCModule()) { + icallStmt->SetRetTyIdx(callee.GetMIRFuncType()->GetTypeIndex()); + } else { + icallStmt->SetRetTyIdx(callee.GetReturnTyIdx()); + } if (stmt.GetOp() != OP_call) { if (callStmt.NeedDecref()) { icallStmt->EnableNeedDecref(); diff --git a/src/mapleall/maple_me/src/me_phase_manager.cpp b/src/mapleall/maple_me/src/me_phase_manager.cpp index 027294329bbb8a27a705e3d6484d4833ece94f6f..ed63dae0465115fa9d0a15ff0843dc415cbb687d 100644 --- a/src/mapleall/maple_me/src/me_phase_manager.cpp +++ b/src/mapleall/maple_me/src/me_phase_manager.cpp @@ -153,7 +153,7 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { cgLower.LowerFunc(*func); MemPool *layoutMp = memPoolCtrler.NewMemPool("layout mempool", true); MapleAllocator layoutAlloc(layoutMp); - LMBCMemLayout localMemLayout(func, &layoutAlloc); + LMBCMemLayout localMemLayout(func, &globalMemLayout.seg_GPbased, &layoutAlloc); localMemLayout.LayoutStackFrame(); LMBCLowerer lmbcLowerer(&m, &beCommon, func, &globalMemLayout, &localMemLayout); lmbcLowerer.LowerFunction(); @@ -161,6 +161,8 @@ bool MeFuncPM::PhaseRun(maple::MIRModule &m) { func->SetUpFormalSize(localMemLayout.UpformalSize()); memPoolCtrler.DeleteMemPool(layoutMp); } + globalMemLayout.seg_GPbased.size = maplebe::RoundUp(globalMemLayout.seg_GPbased.size, GetPrimTypeSize(PTY_ptr)); + m.SetGlobalMemSize(globalMemLayout.seg_GPbased.size); // output .lmbc BinaryMplt binMplt(m); std::string modFileName = m.GetFileName(); diff --git a/src/mapleall/maple_me/src/me_rc_lowering.cpp b/src/mapleall/maple_me/src/me_rc_lowering.cpp index 8b3897a1c67c8e10147d1936f0bc7cf69d8423c5..edf6f45509c2cdc8ca93b84462340e75a153629a 100644 --- a/src/mapleall/maple_me/src/me_rc_lowering.cpp +++ b/src/mapleall/maple_me/src/me_rc_lowering.cpp @@ -1144,6 +1144,7 @@ void RCLowering::HandleArguments() { firstBB->InsertMeStmtBefore(firstMeStmt, incCall); } sym->SetLocalRefVar(); + mirFunc->GetFormalDefVec()[i].formalAttrs.SetAttr(ATTR_localrefvar); for (auto *stmt : rets) { std::vector opnds = { argVar }; diff --git a/src/mapleall/maple_me/src/me_rename2preg.cpp b/src/mapleall/maple_me/src/me_rename2preg.cpp index 9549dff0045f07a826df5f40109875300b4f4d39..eddc9704b9bf9d070ee78af419a8a66c38495593 100644 --- a/src/mapleall/maple_me/src/me_rename2preg.cpp +++ b/src/mapleall/maple_me/src/me_rename2preg.cpp @@ -317,6 +317,7 @@ void SSARename2Preg::Rename2PregStmt(MeStmt *stmt) { case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: + case OP_icallprotoassigned: case OP_intrinsiccallassigned: case OP_xintrinsiccallassigned: case OP_intrinsiccallwithtypeassigned: { diff --git a/src/mapleall/maple_me/src/me_side_effect.cpp b/src/mapleall/maple_me/src/me_side_effect.cpp index ad493a9e014b82c3411bbfca8b7dc9ca8a24ba55..2df2f0a2c5cc33faecbb0181ce76eec822507e36 100644 --- a/src/mapleall/maple_me/src/me_side_effect.cpp +++ b/src/mapleall/maple_me/src/me_side_effect.cpp @@ -932,6 +932,7 @@ bool IpaSideEffect::UpdateSideEffectWithStmt(MeStmt &meStmt, case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: + case OP_icallprotoassigned: case OP_superclasscallassigned: case OP_asm: { hasPrivateDef = hasThrException = true; @@ -958,7 +959,8 @@ bool IpaSideEffect::UpdateSideEffectWithStmt(MeStmt &meStmt, case OP_interfaceicall: case OP_customcall: case OP_polymorphiccall: - case OP_icall: { + case OP_icall: + case OP_icallproto: { hasPrivateDef = hasThrException = true; SetHasDef(); for (size_t i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { diff --git a/src/mapleall/maple_me/src/me_stmt_pre.cpp b/src/mapleall/maple_me/src/me_stmt_pre.cpp index 9e2325f1425d2b0a3ea353dd4655cfde7f7c57e4..55888c9bcefef7606561df1108a9c0aabc6cb86e 100755 --- a/src/mapleall/maple_me/src/me_stmt_pre.cpp +++ b/src/mapleall/maple_me/src/me_stmt_pre.cpp @@ -966,7 +966,9 @@ void MeStmtPre::BuildWorkListBB(BB *bb) { break; } case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { auto &icallMeStmt = static_cast(stmt); VersionStackChiListUpdate(*icallMeStmt.GetChiList()); break; diff --git a/src/mapleall/maple_me/src/pme_emit.cpp b/src/mapleall/maple_me/src/pme_emit.cpp index e880637ccdca2dce8b5a883b72b681ef763474e8..b844ad16d0ee6affaf0ecb3e5971b66238c09982 100755 --- a/src/mapleall/maple_me/src/pme_emit.cpp +++ b/src/mapleall/maple_me/src/pme_emit.cpp @@ -485,10 +485,12 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *mestmt, BaseNode *parent) { return callnode; } case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { IcallMeStmt *icallMeStmt = static_cast (mestmt); IcallNode *icallnode = - codeMP->New(*codeMPAlloc, OP_icallassigned, icallMeStmt->GetRetTyIdx()); + codeMP->New(*codeMPAlloc, OP_icallprotoassigned, icallMeStmt->GetRetTyIdx()); for (uint32 i = 0; i < icallMeStmt->GetOpnds().size(); i++) { icallnode->GetNopnd().push_back(EmitPreMeExpr(icallMeStmt->GetOpnd(i), icallnode)); } @@ -509,6 +511,9 @@ StmtNode* PreMeEmitter::EmitPreMeStmt(MeStmt *mestmt, BaseNode *parent) { icallnode->SetRetTyIdx(TyIdx(preg->GetPrimType())); } } + if (mestmt->GetOp() == OP_icallproto || mestmt->GetOp() == OP_icallprotoassigned) { + icallnode->SetRetTyIdx(icallMeStmt->GetRetTyIdx()); + } icallnode->CopySafeRegionAttr(mestmt->GetStmtAttr()); icallnode->SetOriginalID(mestmt->GetOriginalId()); PreMeStmtExtensionMap[icallnode->GetStmtID()] = pmeExt; diff --git a/src/mapleall/maple_me/src/ssa_devirtual.cpp b/src/mapleall/maple_me/src/ssa_devirtual.cpp index 15a609e42709a778b7a947fc51a9ba0141c22e2d..f46e8e06ce4aa7a0effaa7cb0e1582382c6efc2c 100644 --- a/src/mapleall/maple_me/src/ssa_devirtual.cpp +++ b/src/mapleall/maple_me/src/ssa_devirtual.cpp @@ -534,7 +534,9 @@ void SSADevirtual::TraversalMeStmt(MeStmt &meStmt) { break; } case OP_icall: - case OP_icallassigned: { + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: { auto *icallMeStmt = static_cast(&meStmt); const MapleVector &opnds = icallMeStmt->GetOpnds(); for (size_t i = 0; i < opnds.size(); ++i) { diff --git a/src/mapleall/maple_me/src/ssa_pre.cpp b/src/mapleall/maple_me/src/ssa_pre.cpp index ceff080daa2a56a5d5a150e86e80677bed2548a6..a082b80b9f248bccd9191f5c7a715c9c675c17aa 100644 --- a/src/mapleall/maple_me/src/ssa_pre.cpp +++ b/src/mapleall/maple_me/src/ssa_pre.cpp @@ -1666,6 +1666,7 @@ void SSAPre::BuildWorkListStmt(MeStmt &stmt, uint32 seqStmt, bool isRebuilt, MeE case OP_customcall: case OP_polymorphiccall: case OP_icall: + case OP_icallproto: case OP_callassigned: case OP_virtualcallassigned: case OP_virtualicallassigned: @@ -1675,6 +1676,7 @@ void SSAPre::BuildWorkListStmt(MeStmt &stmt, uint32 seqStmt, bool isRebuilt, MeE case OP_customcallassigned: case OP_polymorphiccallassigned: case OP_icallassigned: + case OP_icallprotoassigned: case OP_asm: { auto *naryMeStmt = static_cast(meStmt); const MapleVector &opnds = naryMeStmt->GetOpnds(); diff --git a/src/mapleall/mpl2mpl/src/constantfold.cpp b/src/mapleall/mpl2mpl/src/constantfold.cpp index 75d9fbe2e4a15cdea0306c1a9b41a3f31e5fde5b..8859c36d65916c02eb6df8380cd0e46ea3eae278 100644 --- a/src/mapleall/mpl2mpl/src/constantfold.cpp +++ b/src/mapleall/mpl2mpl/src/constantfold.cpp @@ -162,6 +162,8 @@ StmtNode *ConstantFold::Simplify(StmtNode *node) { return SimplifyNary(static_cast(node)); case OP_icall: case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: return SimplifyIcall(static_cast(node)); case OP_asm: return SimplifyAsm(static_cast(node)); @@ -2609,8 +2611,8 @@ StmtNode *ConstantFold::SimplifyIcall(IcallNode *node) { AddroffuncNode *addrofNode = static_cast(node->GetNopndAt(0)); CallNode *callNode = mirModule->CurFuncCodeMemPool()->New(*mirModule, - node->GetOpCode() == OP_icall ? OP_call : OP_callassigned); - if (node->GetOpCode() == OP_icallassigned) { + (node->GetOpCode() == OP_icall || node->GetOpCode() == OP_icallproto) ? OP_call : OP_callassigned); + if (node->GetOpCode() == OP_icallassigned || node->GetOpCode() == OP_icallprotoassigned) { callNode->SetReturnVec(node->GetReturnVec()); } callNode->SetPUIdx(addrofNode->GetPUIdx());