diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 0b8e04bd72532..a3b6c3c8bc7e8 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -1263,8 +1263,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1, // └──▌ ADD int // ├──▌ LCL_VAR int V10 tmp6 -> copy propagated to [V35 tmp31] // └──▌ COMMA int - // ├──▌ ASG int - // │ ├──▌ LCL_VAR int V35 tmp31 + // ├──▌ STORE_LCL_VAR int V35 tmp31 // │ └──▌ LCL_FLD int V03 loc1 [+4] if (lclVar2->lvRedefinedInEmbeddedStatement) { @@ -1697,7 +1696,7 @@ void Compiler::optDebugCheckAssertion(AssertionDsc* assertion) case O2K_ZEROOBJ: { - // We only make these assertion for assignments (not control flow). + // We only make these assertion for stores (not control flow). assert(assertion->assertionKind == OAK_EQUAL); // We use "optLocalAssertionIsEqualOrNotEqual" to find these. assert(assertion->op2.u1.iconVal == 0); @@ -2080,8 +2079,8 @@ AssertionInfo Compiler::optAssertionGenJtrue(GenTree* tree) // Look through any CSEs so we see the actual trees providing values, if possible. // This is important for exact type assertions, which need to see the GT_IND. // - GenTree* op1 = relop->AsOp()->gtOp1->gtCommaAssignVal(); - GenTree* op2 = relop->AsOp()->gtOp2->gtCommaAssignVal(); + GenTree* op1 = relop->AsOp()->gtOp1->gtCommaStoreVal(); + GenTree* op2 = relop->AsOp()->gtOp2->gtCommaStoreVal(); // Check for op1 or op2 to be lcl var and if so, keep it in op1. if ((op1->gtOper != GT_LCL_VAR) && (op2->gtOper == GT_LCL_VAR)) @@ -2231,9 +2230,8 @@ AssertionIndex Compiler::optAssertionGenPhiDefn(GenTree* tree) /***************************************************************************** * - * If this statement creates a value assignment or assertion - * then assign an index to the given value assignment by adding - * it to the lookup table, if necessary. + * If this node creates an assertion then assign an index to the assertion + * by adding it to the lookup table, if necessary. */ void Compiler::optAssertionGen(GenTree* tree) { @@ -2258,7 +2256,7 @@ void Compiler::optAssertionGen(GenTree* tree) switch (tree->OperGet()) { case GT_STORE_LCL_VAR: - // VN takes care of non local assertions for assignments and data flow. + // VN takes care of non local assertions for data flow. if (optLocalAssertionProp) { assertionInfo = optCreateAssertion(tree, tree->AsLclVar()->Data(), OAK_EQUAL); @@ -2587,7 +2585,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) } else { - // Implicit assignment conversion to float or double + // Implicit conversion to float or double assert(varTypeIsFloating(tree->TypeGet())); conValTree = gtNewDconNode(value, tree->TypeGet()); } @@ -2604,7 +2602,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) } else { - // Implicit assignment conversion to float or double + // Implicit conversion to float or double assert(varTypeIsFloating(tree->TypeGet())); conValTree = gtNewDconNode(value, tree->TypeGet()); } @@ -2631,7 +2629,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) switch (tree->TypeGet()) { case TYP_INT: - // Implicit assignment conversion to smaller integer + // Implicit conversion to smaller integer conValTree = gtNewIconNode(static_cast(value)); break; @@ -2693,7 +2691,7 @@ GenTree* Compiler::optVNConstantPropOnTree(BasicBlock* block, GenTree* tree) break; case TYP_LONG: - // Implicit assignment conversion to larger integer + // Implicit conversion to larger integer conValTree = gtNewLconNode(value); break; @@ -3486,7 +3484,7 @@ GenTree* Compiler::optAssertionProp_LocalStore(ASSERT_VALARG_TP assertions, GenT } } - // We might have simplified the value but were not able to remove the assignment + // We might have simplified the value but were not able to remove the store. // if (madeChanges) { @@ -3959,7 +3957,7 @@ GenTree* Compiler::optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, Gen // Note we can't trust the OAK_EQUAL as the value could end up being a NaN // violating the assertion. However, we create OAK_EQUAL assertions for floating // point only on JTrue nodes, so if the condition held earlier, it will hold - // now. We don't create OAK_EQUAL assertion on floating point from GT_ASG + // now. We don't create OAK_EQUAL assertion on floating point from stores // because we depend on value num which would constant prop the NaN. op1->BashToConst(0.0, op1->TypeGet()); op2->BashToConst(0.0, op2->TypeGet()); @@ -4872,7 +4870,7 @@ void Compiler::optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& ac } } } - // Is curAssertion a constant assignment of a 32-bit integer? + // Is curAssertion a constant store of a 32-bit integer? // (i.e GT_LVL_VAR X == GT_CNS_INT) else if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_CONST_INT)) @@ -5119,7 +5117,7 @@ void Compiler::optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionD return; } - // Is depAssertion a constant assignment of a 32-bit integer? + // Is depAssertion a constant store of a 32-bit integer? // (i.e GT_LVL_VAR X == GT_CNS_INT) bool depIsConstAssertion = ((depAssertion->assertionKind == OAK_EQUAL) && (depAssertion->op1.kind == O1K_LCLVAR) && (depAssertion->op2.kind == O2K_CONST_INT)); @@ -5930,7 +5928,7 @@ PhaseStatus Compiler::optAssertionPropMain() // Assertion prop can speculatively create trees. INDEBUG(const unsigned baseTreeID = compGenTreeID); - // First discover all value assignments and record them in the table. + // First discover all assertions and record them in the table. for (BasicBlock* const block : Blocks()) { compCurBB = block; diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 10486bc4d0670..e78c80db41359 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -929,8 +929,8 @@ unsigned JitPtrKeyFuncs::GetHashCode(const BasicBlock* ptr) // isEmpty: check if block is empty or contains only ignorable statements // // Return Value: -// True if block is empty, or contains only PHI assignments, -// or contains zero or more PHI assignments followed by NOPs. +// True if block is empty, or contains only PHI stores, +// or contains zero or more PHI stores followed by NOPs. // bool BasicBlock::isEmpty() const { @@ -982,25 +982,15 @@ bool BasicBlock::isValid() const Statement* BasicBlock::FirstNonPhiDef() const { Statement* stmt = firstStmt(); - if (stmt == nullptr) - { - return nullptr; - } - GenTree* tree = stmt->GetRootNode(); - while ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_PHI) || - (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_PHI)) + while ((stmt != nullptr) && stmt->IsPhiDefnStmt()) { stmt = stmt->GetNextStmt(); - if (stmt == nullptr) - { - return nullptr; - } - tree = stmt->GetRootNode(); } + return stmt; } -Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const +Statement* BasicBlock::FirstNonPhiDefOrCatchArgStore() const { Statement* stmt = FirstNonPhiDef(); if (stmt == nullptr) @@ -1008,8 +998,7 @@ Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const return nullptr; } GenTree* tree = stmt->GetRootNode(); - if ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_CATCH_ARG) || - (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_CATCH_ARG)) + if (tree->OperIs(GT_STORE_LCL_VAR) && tree->AsLclVar()->Data()->OperIs(GT_CATCH_ARG)) { stmt = stmt->GetNextStmt(); } diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 7beab20b4af65..f3ca6e856ae02 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -1110,7 +1110,7 @@ struct BasicBlock : private LIR::Range // BAD_IL_OFFSET. #endif // DEBUG - VARSET_TP bbVarUse; // variables used by block (before an assignment) + VARSET_TP bbVarUse; // variables used by block (before a definition) VARSET_TP bbVarDef; // variables assigned by block (before a use) VARSET_TP bbLiveIn; // variables live on entry @@ -1165,17 +1165,17 @@ struct BasicBlock : private LIR::Range union { EXPSET_TP bbCseGen; // CSEs computed by block - ASSERT_TP bbAssertionGen; // value assignments computed by block + ASSERT_TP bbAssertionGen; // assertions computed by block }; union { EXPSET_TP bbCseIn; // CSEs available on entry - ASSERT_TP bbAssertionIn; // value assignments available on entry + ASSERT_TP bbAssertionIn; // assertions available on entry }; union { EXPSET_TP bbCseOut; // CSEs available on exit - ASSERT_TP bbAssertionOut; // value assignments available on exit + ASSERT_TP bbAssertionOut; // assertions available on exit }; void* bbEmitCookie; @@ -1273,9 +1273,9 @@ struct BasicBlock : private LIR::Range bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const; // Returns the first statement in the statement list of "this" that is - // not an SSA definition (a lcl = phi(...) assignment). + // not an SSA definition (a lcl = phi(...) store). Statement* FirstNonPhiDef() const; - Statement* FirstNonPhiDefOrCatchArgAsg() const; + Statement* FirstNonPhiDefOrCatchArgStore() const; BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) { diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis index dc4c254ac9d1a..6d25673d9c970 100644 --- a/src/coreclr/jit/clrjit.natvis +++ b/src/coreclr/jit/clrjit.natvis @@ -57,7 +57,6 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u {gtTreeID, d}: [LngCon={((GenTreeLngCon*)this)->gtLconVal, l}] - {gtTreeID, d}: [{this->gtOp1,na}={this->gtOp2,na}] {gtTreeID, d}: [{((GenTreeCast*)this)->gtCastType,en} <- {((GenTreeUnOp*)this)->gtOp1->gtType,en}] {gtTreeID, d}: [{((GenTreeHWIntrinsic*)this)->gtHWIntrinsicId,en}, {gtType,en}] {gtTreeID, d}: [{gtOper,en}, {gtType,en}] diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 69bbb274c1c99..e4f4b7a8576b1 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -241,7 +241,6 @@ unsigned argTotalDeferred; unsigned argTotalConst; unsigned argTotalObjPtr; -unsigned argTotalGTF_ASGinArgs; unsigned argMaxTempsPerMethod; @@ -1919,7 +1918,6 @@ void Compiler::compInit(ArenaAllocator* pAlloc, compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; - compAssignmentRationalized = false; compQmarkUsed = false; compFloatingPointUsed = false; @@ -4751,8 +4749,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl // DoPhase(this, PHASE_PHYSICAL_PROMOTION, &Compiler::PhysicalPromotion); - DoPhase(this, PHASE_RATIONALIZE_ASSIGNMENTS, &Compiler::fgRationalizeAssignments); - // Run a simple forward substitution pass. // DoPhase(this, PHASE_FWD_SUB, &Compiler::fgForwardSub); diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index ceb66c5153384..d2c6022e9dd2d 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -236,7 +236,7 @@ class LclSsaVarDsc LclSsaVarDsc(BasicBlock* block, GenTreeLclVarCommon* defNode) : m_block(block) { - SetAssignment(defNode); + SetDefNode(defNode); } BasicBlock* GetBlock() const @@ -249,14 +249,12 @@ class LclSsaVarDsc m_block = block; } - // TODO-ASG: rename to "GetDefNode". - GenTreeLclVarCommon* GetAssignment() const + GenTreeLclVarCommon* GetDefNode() const { return m_defNode; } - // TODO-ASG: rename to "SetDefNode". - void SetAssignment(GenTreeLclVarCommon* defNode) + void SetDefNode(GenTreeLclVarCommon* defNode) { assert((defNode == nullptr) || defNode->OperIsLocalStore()); m_defNode = defNode; @@ -1840,7 +1838,7 @@ struct FuncInfoDsc struct TempInfo { - GenTree* asg; + GenTree* store; GenTree* load; }; @@ -2068,7 +2066,7 @@ class Compiler DWORD expensiveDebugCheckLevel; #endif - GenTree* impAssignMultiRegTypeToVar(GenTree* op, + GenTree* impStoreMultiRegValueToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #ifdef TARGET_X86 @@ -2502,7 +2500,13 @@ class Compiler GenTreeLclVar* gtNewStoreLclVarNode(unsigned lclNum, GenTree* data); - GenTreeLclFld* gtNewStoreLclFldNode(unsigned lclNum, var_types type, unsigned offset, GenTree* data); + GenTreeLclFld* gtNewStoreLclFldNode( + unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* data); + + GenTreeLclFld* gtNewStoreLclFldNode(unsigned lclNum, var_types type, unsigned offset, GenTree* data) + { + return gtNewStoreLclFldNode(lclNum, type, (type == TYP_STRUCT) ? data->GetLayout(this) : nullptr, offset, data); + } GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); @@ -2812,8 +2816,6 @@ class Compiler return gtNewFieldAddrNode(varTypeIsGC(obj) ? TYP_BYREF : TYP_I_IMPL, fldHnd, obj, offset); } - GenTreeIndir* gtNewFieldIndirNode(var_types type, ClassLayout* layout, GenTreeFieldAddr* addr); - GenTreeIndexAddr* gtNewIndexAddr(GenTree* arrayOp, GenTree* indexOp, var_types elemType, @@ -2836,6 +2838,8 @@ class Compiler GenTreeMDArr* gtNewMDArrLowerBound(GenTree* arrayOp, unsigned dim, unsigned rank, BasicBlock* block); + void gtInitializeStoreNode(GenTree* store, GenTree* data); + void gtInitializeIndirNode(GenTreeIndir* indir, GenTreeFlags indirFlags); GenTreeBlk* gtNewBlkIndir(ClassLayout* layout, GenTree* addr, GenTreeFlags indirFlags = GTF_EMPTY); @@ -2869,19 +2873,22 @@ class Compiler return gtNewStoreValueNode(layout->GetType(), layout, addr, data, indirFlags); } + GenTree* gtNewStoreValueNode(var_types type, GenTree* addr, GenTree* data, GenTreeFlags indirFlags = GTF_EMPTY) + { + return gtNewStoreValueNode(type, nullptr, addr, data, indirFlags); + } + GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); - GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); - - GenTree* gtNewTempAssign(unsigned tmp, - GenTree* val, - unsigned curLevel = CHECK_SPILL_NONE, - Statement** pAfterStmt = nullptr, - const DebugInfo& di = DebugInfo(), - BasicBlock* block = nullptr); + GenTree* gtNewTempStore(unsigned tmp, + GenTree* val, + unsigned curLevel = CHECK_SPILL_NONE, + Statement** pAfterStmt = nullptr, + const DebugInfo& di = DebugInfo(), + BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, @@ -3881,11 +3888,13 @@ class Compiler GenTree* impImportStaticReadOnlyField(CORINFO_FIELD_HANDLE field, CORINFO_CLASS_HANDLE ownerCls); GenTree* impImportCnsTreeFromBuffer(uint8_t* buffer, var_types valueType); - GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, - CORINFO_ACCESS_FLAGS access, - CORINFO_FIELD_INFO* pFieldInfo, - var_types lclTyp, - /* OUT */ bool* pIsHoistable = nullptr); + GenTree* impImportStaticFieldAddress(CORINFO_RESOLVED_TOKEN* pResolvedToken, + CORINFO_ACCESS_FLAGS access, + CORINFO_FIELD_INFO* pFieldInfo, + var_types lclTyp, + GenTreeFlags* pIndirFlags, + bool* pIsHoistable = nullptr); + void impAnnotateFieldIndir(GenTreeIndir* indir); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); @@ -4040,24 +4049,23 @@ class Compiler void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); - void impAssignTempGen(unsigned lclNum, - GenTree* val, - unsigned curLevel, - Statement** pAfterStmt = nullptr, - const DebugInfo& di = DebugInfo(), - BasicBlock* block = nullptr); + void impStoreTemp(unsigned lclNum, + GenTree* val, + unsigned curLevel, + Statement** pAfterStmt = nullptr, + const DebugInfo& di = DebugInfo(), + BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); - GenTree* impAssignStruct(GenTree* dest, - GenTree* src, + GenTree* impStoreStruct(GenTree* store, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); - GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, unsigned curLevel); + GenTree* impStoreStructPtr(GenTree* destAddr, GenTree* value, unsigned curLevel); GenTree* impGetStructAddr(GenTree* structVal, unsigned curLevel, bool willDeref); @@ -4840,9 +4848,6 @@ class Compiler void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); - PhaseStatus fgRationalizeAssignments(); - GenTree* fgRationalizeAssignment(GenTreeOp* assignment); - // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. PhaseStatus fgSimpleLowering(); @@ -5887,12 +5892,12 @@ class Compiler unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); - bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); - void impMarkContiguousSIMDFieldAssignments(Statement* stmt); + bool fgMorphCombineSIMDFieldStores(BasicBlock* block, Statement* stmt); + void impMarkContiguousSIMDFieldStores(Statement* stmt); - // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment - // in function: Compiler::impMarkContiguousSIMDFieldAssignments. - Statement* fgPreviousCandidateSIMDFieldAsgStmt; + // fgPreviousCandidateSIMDFieldStoreStmt is only used for tracking previous simd field assignment + // in function: Compiler::impMarkContiguousSIMDFieldStores. + Statement* fgPreviousCandidateSIMDFieldStoreStmt; #endif // FEATURE_SIMD GenTree* fgMorphIndexAddr(GenTreeIndexAddr* tree); @@ -6392,7 +6397,7 @@ class Compiler GenTree* lpIterTree; // The "i = i const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented - genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) + genTreeOps lpIterOper() const; // the type of the operation on the iterator (ADD, SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions @@ -6966,7 +6971,7 @@ class Compiler // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. - // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, + // While for ordinary SSA defs it will be available (as a store) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef @@ -9182,7 +9187,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. - bool compAssignmentRationalized; // Have the ASG nodes been turned into their store equivalents? bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index f8f171549940f..18ea98b6f7441 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -990,8 +990,6 @@ inline GenTreeIndir* Compiler::gtNewIndexIndir(GenTreeIndexAddr* indexAddr) index = gtNewIndir(indexAddr->gtElemType, indexAddr); } - index->gtFlags |= GTF_GLOB_REF; - return index; } diff --git a/src/coreclr/jit/compphases.h b/src/coreclr/jit/compphases.h index 40862f14220e8..460b47f9bca85 100644 --- a/src/coreclr/jit/compphases.h +++ b/src/coreclr/jit/compphases.h @@ -91,7 +91,6 @@ CompPhaseNameMacro(PHASE_IF_CONVERSION, "If conversion", CompPhaseNameMacro(PHASE_VN_BASED_DEAD_STORE_REMOVAL,"VN-based dead store removal", false, -1, false) CompPhaseNameMacro(PHASE_OPT_UPDATE_FLOW_GRAPH, "Update flow graph opt pass", false, -1, false) CompPhaseNameMacro(PHASE_COMPUTE_EDGE_WEIGHTS2, "Compute edge weights (2, false)",false, -1, false) -CompPhaseNameMacro(PHASE_RATIONALIZE_ASSIGNMENTS, "Rationalize assignments", false, -1, false) CompPhaseNameMacro(PHASE_STRESS_SPLIT_TREE, "Stress gtSplitTree", false, -1, false) CompPhaseNameMacro(PHASE_EXPAND_RTLOOKUPS, "Expand runtime lookups", false, -1, true) CompPhaseNameMacro(PHASE_EXPAND_STATIC_INIT, "Expand static init", false, -1, true) diff --git a/src/coreclr/jit/earlyprop.cpp b/src/coreclr/jit/earlyprop.cpp index 16d79df88b175..b422c79a37ba3 100644 --- a/src/coreclr/jit/earlyprop.cpp +++ b/src/coreclr/jit/earlyprop.cpp @@ -362,7 +362,7 @@ GenTree* Compiler::optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropK // Track along the use-def chain to get the array length LclSsaVarDsc* ssaVarDsc = lvaTable[lclNum].GetPerSsaData(ssaNum); - GenTreeLclVarCommon* ssaDefStore = ssaVarDsc->GetAssignment(); + GenTreeLclVarCommon* ssaDefStore = ssaVarDsc->GetDefNode(); // Incoming parameters or live-in variables don't have actual definition tree node for // their FIRST_SSA_NUM. Definitions induced by calls do not record the store node. See @@ -565,7 +565,7 @@ GenTree* Compiler::optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckT return nullptr; } - GenTreeLclVarCommon* defNode = defLoc->GetAssignment(); + GenTreeLclVarCommon* defNode = defLoc->GetDefNode(); if ((defNode == nullptr) || !defNode->OperIs(GT_STORE_LCL_VAR) || (defNode->GetLclNum() != lclNum)) { return nullptr; diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 61f2c846edfac..cb3300fd31949 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -167,7 +167,7 @@ void Compiler::fgInit() #endif // DEBUG #ifdef FEATURE_SIMD - fgPreviousCandidateSIMDFieldAsgStmt = nullptr; + fgPreviousCandidateSIMDFieldStoreStmt = nullptr; #endif fgHasSwitch = false; diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index a6394691a2080..2968d80889593 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -3091,11 +3091,6 @@ void Compiler::fgDebugCheckFlags(GenTree* tree) assert(op1->OperIsCompare() || op1->IsIntegralConst(0) || op1->IsIntegralConst(1)); break; - case GT_ASG: - // Note that this is a weak check - the "op1" location node can be a COMMA. - assert(!op1->CanCSE()); - break; - case GT_IND: // Do we have a constant integer address as op1 that is also a handle? if (op1->IsIconHandle()) @@ -3141,7 +3136,7 @@ void Compiler::fgDebugCheckFlags(GenTree* tree) for (CallArg& arg : call->gtArgs.Args()) { - // TODO-Cleanup: this is a patch for a violation in our GT_ASG propagation. + // TODO-Cleanup: this is a patch for a violation in our GTF_ASG propagation. // see https://github.com/dotnet/runtime/issues/13758 if (arg.GetEarlyNode() != nullptr) { @@ -3230,20 +3225,6 @@ void Compiler::fgDebugCheckFlags(GenTree* tree) } tree->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult { - - // ASGs are nodes that produce no value, but have a type (essentially, the type of the location). - // Validate that nodes that parent ASGs do not consume values. This check also ensures that code - // which updates location types ("gsParamsToShadows" replaces small LCL_VARs with TYP_INT ones) - // does not have to worry about propagating the new type "up the tree". - // - // Uncoditionally allowing COMMA here weakens the assert, but is necessary because the compiler - // ("gtExtractSideEffList") can create "typed" "comma lists" with ASGs as second operands. - // - if (operand->OperIs(GT_ASG)) - { - assert(tree->IsCall() || tree->OperIs(GT_COMMA)); - } - fgDebugCheckFlags(operand); expectedFlags |= (operand->gtFlags & GTF_ALL_EFFECT); @@ -3472,11 +3453,8 @@ void Compiler::fgDebugCheckLinkedLocals() GenTree* node = *use; if (ShouldLink(node)) { - if ((user != nullptr) && user->OperIs(GT_ASG) && (node == user->gtGetOp1())) - { - } - else if ((user != nullptr) && user->IsCall() && - (node == m_compiler->gtCallGetDefinedRetBufLclAddr(user->AsCall()))) + if ((user != nullptr) && user->IsCall() && + (node == m_compiler->gtCallGetDefinedRetBufLclAddr(user->AsCall()))) { } else @@ -3485,11 +3463,6 @@ void Compiler::fgDebugCheckLinkedLocals() } } - if (node->OperIs(GT_ASG) && ShouldLink(node->gtGetOp1())) - { - m_locals.Push(node->gtGetOp1()); - } - if (node->IsCall()) { GenTree* defined = m_compiler->gtCallGetDefinedRetBufLclAddr(node->AsCall()); diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index ead90a27dcd29..18d8dd87b87d9 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -111,11 +111,11 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorOperGet() == GT_ASG) + if (tree->OperIsStore()) { - GenTree* value = tree->AsOp()->gtOp2; + GenTree* value = tree->Data(); if (value->OperGet() == GT_COMMA) { @@ -275,21 +275,23 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorOperIs(GT_ASG)) + // See assert below, we only look one level above for a store parent. + if (parent->OperIsStore()) { - // The inlinee can only be the RHS. - assert(parent->gtGetOp2() == *use); - AttachStructInlineeToAsg(parent->AsOp(), retClsHnd); + // The inlinee can only be the value. + assert(parent->Data() == *use); + AttachStructInlineeToStore(parent, retClsHnd); } else { - // Just assign the inlinee to a variable to keep it simple. - *use = AssignStructInlineeToVar(*use, retClsHnd); + // Just store the inlinee to a variable to keep it simple. + *use = StoreStructInlineeToVar(*use, retClsHnd); } m_madeChanges = true; } @@ -316,51 +318,50 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorOperIs(GT_ASG)); - - GenTree* dst = asg->gtGetOp1(); - GenTree* inlinee = asg->gtGetOp2(); + assert(store->OperIsStore()); + GenTree* dst = store; + GenTree* inlinee = store->Data(); // We need to force all assignments from multi-reg nodes into the "lcl = node()" form. if (inlinee->IsMultiRegNode()) { // Special case: we already have a local, the only thing to do is mark it appropriately. Except - // if it may turn into an indirection. - if (dst->OperIs(GT_LCL_VAR) && !m_compiler->lvaIsImplicitByRefLocal(dst->AsLclVar()->GetLclNum())) + // if it may turn into an indirection. TODO-Bug: this does not account for x86 varargs args. + if (store->OperIs(GT_STORE_LCL_VAR) && !m_compiler->lvaIsImplicitByRefLocal(store->AsLclVar()->GetLclNum())) { - m_compiler->lvaGetDesc(dst->AsLclVar())->lvIsMultiRegRet = true; + m_compiler->lvaGetDesc(store->AsLclVar())->lvIsMultiRegRet = true; } else { - // Here, we assign our node into a fresh temp and then use that temp as the new value. - asg->gtOp2 = AssignStructInlineeToVar(inlinee, retClsHnd); + // Here, we store our node into a fresh temp and then use that temp as the new value. + store->Data() = StoreStructInlineeToVar(inlinee, retClsHnd); } } } //------------------------------------------------------------------------ - // AssignStructInlineeToVar: Assign the struct inlinee to a temp local. + // AssignStructInlineeToVar: Store the struct inlinee to a temp local. // // Arguments: // inlinee - The inlinee of the RET_EXPR node // retClsHnd - The struct class handle of the type of the inlinee. // // Return Value: - // Value representing the freshly assigned temp. + // Value representing the freshly defined temp. // - GenTree* AssignStructInlineeToVar(GenTree* inlinee, CORINFO_CLASS_HANDLE retClsHnd) + GenTree* StoreStructInlineeToVar(GenTree* inlinee, CORINFO_CLASS_HANDLE retClsHnd) { assert(!inlinee->OperIs(GT_MKREFANY, GT_RET_EXPR)); @@ -368,7 +369,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorlvaGetDesc(lclNum); m_compiler->lvaSetStruct(lclNum, retClsHnd, false); - // Sink the assignment below any COMMAs: this is required for multi-reg nodes. + // Sink the store below any COMMAs: this is required for multi-reg nodes. GenTree* src = inlinee; GenTree* lastComma = nullptr; while (src->OperIs(GT_COMMA)) @@ -377,24 +378,23 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorAsOp()->gtOp2; } - // When assigning a multi-register value to a local var, make sure the variable is marked as lvIsMultiRegRet. + // When storing a multi-register value to a local var, make sure the variable is marked as lvIsMultiRegRet. if (src->IsMultiRegNode()) { varDsc->lvIsMultiRegRet = true; } - GenTree* dst = m_compiler->gtNewLclvNode(lclNum, varDsc->TypeGet()); - GenTree* asg = m_compiler->gtNewAssignNode(dst, src); + GenTree* store = m_compiler->gtNewStoreLclVarNode(lclNum, src); // If inlinee was comma, new inlinee is (, , , lcl = inlinee). if (inlinee->OperIs(GT_COMMA)) { - lastComma->AsOp()->gtOp2 = asg; - asg = inlinee; + lastComma->AsOp()->gtOp2 = store; + store = inlinee; } GenTree* lcl = m_compiler->gtNewLclvNode(lclNum, varDsc->TypeGet()); - return m_compiler->gtNewOperNode(GT_COMMA, lcl->TypeGet(), asg, lcl); + return m_compiler->gtNewOperNode(GT_COMMA, lcl->TypeGet(), store, lcl); } #endif // FEATURE_MULTIREG_RET @@ -478,23 +478,22 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorOperGet() == GT_ASG) + else if (tree->OperIs(GT_STORE_LCL_VAR)) { - // If we're assigning to a ref typed local that has one definition, - // we may be able to sharpen the type for the local. - GenTree* const effLhs = tree->gtGetOp1()->gtEffectiveVal(); + const unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); + GenTree* const value = tree->AsLclVarCommon()->Data(); - if ((effLhs->OperGet() == GT_LCL_VAR) && (effLhs->TypeGet() == TYP_REF)) + // If we're storing to a ref typed local that has one definition, + // we may be able to sharpen the type for the local. + if (tree->TypeGet() == TYP_REF) { - const unsigned lclNum = effLhs->AsLclVarCommon()->GetLclNum(); - LclVarDsc* lcl = m_compiler->lvaGetDesc(lclNum); + LclVarDsc* lcl = m_compiler->lvaGetDesc(lclNum); if (lcl->lvSingleDef) { - GenTree* rhs = tree->gtGetOp2(); bool isExact = false; bool isNonNull = false; - CORINFO_CLASS_HANDLE newClass = m_compiler->gtGetClassHandle(rhs, &isExact, &isNonNull); + CORINFO_CLASS_HANDLE newClass = m_compiler->gtGetClassHandle(value, &isExact, &isNonNull); if (newClass != NO_CLASS_HANDLE) { @@ -504,15 +503,10 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorgtGetOp1(); - GenTree* const rhs = tree->gtGetOp2(); - if (lhs->OperIs(GT_LCL_VAR) && GenTree::Compare(lhs, rhs)) + if (value->OperIs(GT_LCL_VAR) && (value->AsLclVar()->GetLclNum() == lclNum)) { - m_compiler->gtUpdateNodeSideEffects(tree); - assert((tree->gtFlags & GTF_SIDE_EFFECT) == GTF_ASG); JITDUMP("... removing self-assignment\n"); DISPTREE(tree); tree->gtBashToNOP(); @@ -1647,7 +1641,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) else { // We're going to assign the argument value to the temp we use for it in the inline body. - GenTree* store = gtNewTempAssign(argInfo.argTmpNum, argNode); + GenTree* store = gtNewTempStore(argInfo.argTmpNum, argNode); newStmt = gtNewStmt(store, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); @@ -1845,7 +1839,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) var_types lclTyp = tmpDsc->TypeGet(); noway_assert(lclTyp == lclVarInfo[lclNum + inlineInfo->argCnt].lclTypeInfo); - tree = gtNewTempAssign(tmpNum, (lclTyp == TYP_STRUCT) ? gtNewIconNode(0) : gtNewZeroConNode(lclTyp)); + tree = gtNewTempStore(tmpNum, (lclTyp == TYP_STRUCT) ? gtNewIconNode(0) : gtNewZeroConNode(lclTyp)); newStmt = gtNewStmt(tree, callDI); fgInsertStmtAfter(block, afterStmt, newStmt); @@ -1940,7 +1934,7 @@ void Compiler::fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* bloc } // Assign null to the local. - GenTree* nullExpr = gtNewTempAssign(tmpNum, gtNewZeroConNode(lclTyp)); + GenTree* nullExpr = gtNewTempStore(tmpNum, gtNewZeroConNode(lclTyp)); Statement* nullStmt = gtNewStmt(nullExpr, callDI); if (stmtAfter == nullptr) diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index da2ce15b72dec..480739ea69337 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -1753,12 +1753,12 @@ PhaseStatus Compiler::fgPostImportationCleanup() // Zero the entry state at method entry. // - GenTree* const initEntryState = gtNewTempAssign(entryStateVar, gtNewZeroConNode(TYP_INT)); + GenTree* const initEntryState = gtNewTempStore(entryStateVar, gtNewZeroConNode(TYP_INT)); fgNewStmtAtBeg(fgFirstBB, initEntryState); // Set the state variable once control flow reaches the OSR entry. // - GenTree* const setEntryState = gtNewTempAssign(entryStateVar, gtNewOneConNode(TYP_INT)); + GenTree* const setEntryState = gtNewTempStore(entryStateVar, gtNewOneConNode(TYP_INT)); fgNewStmtAtBeg(osrEntry, setEntryState); // Helper method to add flow @@ -3445,9 +3445,8 @@ bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNu while (count < limit) { count++; - unsigned storeLclNum; GenTree* const tree = stmt->GetRootNode(); - if (tree->OperIsStoreLcl(&storeLclNum) && (storeLclNum == lclNum) && !tree->OperIsBlkOp()) + if (tree->OperIsLocalStore() && !tree->OperIsBlkOp() && (tree->AsLclVarCommon()->GetLclNum() == lclNum)) { GenTree* const data = tree->Data(); if (data->OperIsArrLength() || data->OperIsConst() || data->OperIsCompare()) @@ -3606,13 +3605,14 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // Otherwise check the first stmt. // Verify the branch is just a simple local compare. // - unsigned storeLclNum; GenTree* const firstTree = firstStmt->GetRootNode(); - if (!firstTree->OperIsStoreLclVar(&storeLclNum)) + if (!firstTree->OperIs(GT_STORE_LCL_VAR)) { return false; } + unsigned storeLclNum = firstTree->AsLclVar()->GetLclNum(); + if (storeLclNum != *lclNum) { return false; @@ -3620,7 +3620,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // Could allow unary here too... // - GenTree* const data = firstTree->Data(); + GenTree* const data = firstTree->AsLclVar()->Data(); if (!data->OperIsBinary()) { return false; diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 7682ec92f42af..2923e04b61903 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -819,14 +819,13 @@ GenTree* BlockCountInstrumentor::CreateCounterIncrement(Compiler* comp, uint8_t* comp->gtNewIndOfIconHandleNode(countType, reinterpret_cast(counterAddr), GTF_ICON_BBC_PTR, false); // Increment value by 1 - GenTree* rhsNode = comp->gtNewOperNode(GT_ADD, countType, valueNode, comp->gtNewIconNode(1, countType)); + GenTree* incValueNode = comp->gtNewOperNode(GT_ADD, countType, valueNode, comp->gtNewIconNode(1, countType)); // Write new Basic-Block count value - GenTree* lhsNode = - comp->gtNewIndOfIconHandleNode(countType, reinterpret_cast(counterAddr), GTF_ICON_BBC_PTR, false); - GenTree* asgNode = comp->gtNewAssignNode(lhsNode, rhsNode); + GenTree* counterAddrNode = comp->gtNewIconHandleNode(reinterpret_cast(counterAddr), GTF_ICON_BBC_PTR); + GenTree* updateNode = comp->gtNewStoreIndNode(countType, counterAddrNode, incValueNode); - return asgNode; + return updateNode; } //------------------------------------------------------------------------ @@ -2037,7 +2036,7 @@ class HandleHistogramProbeInserter // // (CALLVIRT // (COMMA - // (ASG tmp, obj) + // (tmp = obj) // (COMMA // (CALL probe_fn tmp, &probeEntry) // tmp))) @@ -2127,15 +2126,14 @@ class HandleHistogramProbeInserter // Generate the IR... // - GenTree* const tmpNode2 = compiler->gtNewLclvNode(tmpNum, TYP_REF); - GenTree* const callCommaNode = compiler->gtNewOperNode(GT_COMMA, TYP_REF, helperCallNode, tmpNode2); - GenTree* const tmpNode3 = compiler->gtNewLclvNode(tmpNum, TYP_REF); - GenTree* const asgNode = compiler->gtNewAssignNode(tmpNode3, objUse->GetNode()); - GenTree* const asgCommaNode = compiler->gtNewOperNode(GT_COMMA, TYP_REF, asgNode, callCommaNode); + GenTree* const tmpNode2 = compiler->gtNewLclvNode(tmpNum, TYP_REF); + GenTree* const callCommaNode = compiler->gtNewOperNode(GT_COMMA, TYP_REF, helperCallNode, tmpNode2); + GenTree* const storeNode = compiler->gtNewStoreLclVarNode(tmpNum, objUse->GetNode()); + GenTree* const storeCommaNode = compiler->gtNewOperNode(GT_COMMA, TYP_REF, storeNode, callCommaNode); // Update the call // - objUse->SetEarlyNode(asgCommaNode); + objUse->SetEarlyNode(storeCommaNode); JITDUMP("Modified call is now\n"); DISPTREE(call); diff --git a/src/coreclr/jit/fgstmt.cpp b/src/coreclr/jit/fgstmt.cpp index 7fa2250633c3e..4651acbdfb247 100644 --- a/src/coreclr/jit/fgstmt.cpp +++ b/src/coreclr/jit/fgstmt.cpp @@ -82,7 +82,7 @@ void Compiler::fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt) } else { - Statement* insertBeforeStmt = block->FirstNonPhiDefOrCatchArgAsg(); + Statement* insertBeforeStmt = block->FirstNonPhiDefOrCatchArgStore(); if (insertBeforeStmt != nullptr) { fgInsertStmtBefore(block, insertBeforeStmt, stmt); diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index cce98db9bc503..3b157483cd75f 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1682,9 +1682,8 @@ void Compiler::fgAddSyncMethodEnterExit() // if (!opts.IsOSR()) { - GenTree* zero = gtNewZeroConNode(genActualType(typeMonAcquired)); - GenTree* varNode = gtNewLclvNode(lvaMonAcquired, typeMonAcquired); - GenTree* initNode = gtNewAssignNode(varNode, zero); + GenTree* zero = gtNewZeroConNode(typeMonAcquired); + GenTree* initNode = gtNewStoreLclVarNode(lvaMonAcquired, zero); fgNewStmtAtEnd(fgFirstBB, initNode); @@ -1709,9 +1708,8 @@ void Compiler::fgAddSyncMethodEnterExit() lvaCopyThis = lvaGrabTemp(true DEBUGARG("Synchronized method copy of this for handler")); lvaTable[lvaCopyThis].lvType = TYP_REF; - GenTree* thisNode = gtNewLclvNode(info.compThisArg, TYP_REF); - GenTree* copyNode = gtNewLclvNode(lvaCopyThis, TYP_REF); - GenTree* initNode = gtNewAssignNode(copyNode, thisNode); + GenTree* thisNode = gtNewLclVarNode(info.compThisArg); + GenTree* initNode = gtNewStoreLclVarNode(lvaCopyThis, thisNode); fgNewStmtAtEnd(tryBegBB, initNode); } @@ -1792,7 +1790,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis lclVar->gtFlags |= (retExpr->gtFlags & GTF_DONT_CSE); retExpr = gtNewOperNode(GT_COMMA, lclVar->TypeGet(), tree, lclVar); - retExpr = gtNewOperNode(GT_COMMA, lclVar->TypeGet(), tempInfo.asg, retExpr); + retExpr = gtNewOperNode(GT_COMMA, lclVar->TypeGet(), tempInfo.store, retExpr); retNode->gtOp1 = retExpr; retNode->AddAllEffectsFlags(retExpr); } @@ -2521,32 +2519,15 @@ PhaseStatus Compiler::fgAddInternal() noway_assert(lvaTable[lvaArg0Var].IsAddressExposed() || lvaTable[lvaArg0Var].lvHasILStoreOp || lva0CopiedForGenericsCtxt); - var_types thisType = lvaTable[info.compThisArg].TypeGet(); - - // Now assign the original input "this" to the temp - - GenTree* tree; - - tree = gtNewLclvNode(lvaArg0Var, thisType); - - tree = gtNewAssignNode(tree, // dst - gtNewLclvNode(info.compThisArg, thisType) // src - ); - - /* Create a new basic block and stick the assignment in it */ + // Now assign the original input "this" to the temp. + GenTree* store = gtNewStoreLclVarNode(lvaArg0Var, gtNewLclVarNode(info.compThisArg)); fgEnsureFirstBBisScratch(); + fgNewStmtAtEnd(fgFirstBB, store); - fgNewStmtAtEnd(fgFirstBB, tree); - -#ifdef DEBUG - if (verbose) - { - printf("\nCopy \"this\" to lvaArg0Var in first basic block %s\n", fgFirstBB->dspToString()); - gtDispTree(tree); - printf("\n"); - } -#endif + JITDUMP("\nCopy \"this\" to lvaArg0Var in first basic block %s\n", fgFirstBB->dspToString()); + DISPTREE(store); + JITDUMP("\n"); madeChanges = true; } @@ -2803,155 +2784,6 @@ PhaseStatus Compiler::fgFindOperOrder() return PhaseStatus::MODIFIED_EVERYTHING; } -//------------------------------------------------------------------------ -// fgRationalizeAssignments: Rewrite assignment nodes into stores. -// -// TODO-ASG: delete. -// -PhaseStatus Compiler::fgRationalizeAssignments() -{ - class AssignmentRationalizationVisitor : public GenTreeVisitor - { - public: - enum - { - DoPreOrder = true - }; - - AssignmentRationalizationVisitor(Compiler* compiler) : GenTreeVisitor(compiler) - { - } - - fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) - { - GenTree* node = *use; - - // GTF_ASG is sometimes not propagated from setup arg assignments so we have to check for GTF_CALL too. - if ((node->gtFlags & (GTF_ASG | GTF_CALL)) == 0) - { - return fgWalkResult::WALK_SKIP_SUBTREES; - } - - if (node->OperIs(GT_ASG)) - { - GenTreeFlags lhsRhsFlags = node->gtGetOp1()->gtFlags | node->gtGetOp2()->gtFlags; - *use = m_compiler->fgRationalizeAssignment(node->AsOp()); - - // TP: return early quickly for simple assignments. - if ((lhsRhsFlags & (GTF_ASG | GTF_CALL)) == 0) - { - return fgWalkResult::WALK_SKIP_SUBTREES; - } - } - - return fgWalkResult::WALK_CONTINUE; - } - }; - - AssignmentRationalizationVisitor visitor(this); - for (BasicBlock* block : Blocks()) - { - for (Statement* stmt : block->Statements()) - { - GenTree** use = stmt->GetRootNodePointer(); - if (visitor.PreOrderVisit(use, nullptr) == fgWalkResult::WALK_CONTINUE) - { - visitor.WalkTree(use, nullptr); - } - } - } - - compAssignmentRationalized = true; - -#ifdef DEBUG - for (BasicBlock* block : Blocks()) - { - for (Statement* stmt : block->Statements()) - { - assert(!gtTreeContainsOper(stmt->GetRootNode(), GT_ASG)); - } - } -#endif // DEBUG - - return PhaseStatus::MODIFIED_EVERYTHING; -} - -//------------------------------------------------------------------------ -// fgRationalizeAssignment: Rewrite GT_ASG into a store node. -// -// Arguments: -// assignment - The assignment node to rewrite -// -// Return Value: -// Assignment's location, turned into the appropriate store node. -// -GenTree* Compiler::fgRationalizeAssignment(GenTreeOp* assignment) -{ - assert(assignment->OperGet() == GT_ASG); - - bool isReverseOp = assignment->IsReverseOp(); - GenTree* location = assignment->gtGetOp1(); - GenTree* value = assignment->gtGetOp2(); - if (location->OperIsLocal()) - { - assert((location->gtFlags & GTF_VAR_DEF) != 0); - } - else if (value->OperIs(GT_LCL_VAR)) - { - assert((value->gtFlags & GTF_VAR_DEF) == 0); - } - - if (assignment->OperIsInitBlkOp()) - { - // No SIMD types are allowed for InitBlks (including zero-inits). - assert(assignment->TypeIs(TYP_STRUCT) && location->TypeIs(TYP_STRUCT)); - } - - genTreeOps storeOp; - switch (location->OperGet()) - { - case GT_LCL_VAR: - storeOp = GT_STORE_LCL_VAR; - break; - case GT_LCL_FLD: - storeOp = GT_STORE_LCL_FLD; - break; - case GT_BLK: - storeOp = GT_STORE_BLK; - break; - case GT_IND: - storeOp = GT_STOREIND; - break; - default: - unreached(); - } - - JITDUMP("Rewriting GT_ASG(%s, X) to %s(X)\n", GenTree::OpName(location->OperGet()), GenTree::OpName(storeOp)); - - GenTree* store = location; - store->SetOperRaw(storeOp); - store->Data() = value; - store->gtFlags |= GTF_ASG; - store->AddAllEffectsFlags(value); - store->AddAllEffectsFlags(assignment->gtFlags & GTF_GLOB_REF); // TODO-ASG: zero-diff quirk, delete. - if (isReverseOp && !store->OperIsLocalStore()) - { - store->SetReverseOp(); - } - store->ClearDoNotCSE(); - store->CopyRawCosts(assignment); - - if (storeOp == GT_STOREIND) - { - store->AsStoreInd()->SetRMWStatusDefault(); - } - - DISPNODE(store); - JITDUMP("\n"); - - return store; -} - //------------------------------------------------------------------------ // fgSimpleLowering: do full walk of all IR, lowering selected operations // and computing lvaOutgoingArgSpaceSize. @@ -2962,13 +2794,6 @@ GenTree* Compiler::fgRationalizeAssignment(GenTreeOp* assignment) // Notes: // Lowers GT_ARR_LENGTH, GT_MDARR_LENGTH, GT_MDARR_LOWER_BOUND, GT_BOUNDS_CHECK. // -// For target ABIs with fixed out args area, computes upper bound on -// the size of this area from the calls in the IR. -// -// Outgoing arg area size is computed here because we want to run it -// after optimization (in case calls are removed) and need to look at -// all possible calls in the method. -// PhaseStatus Compiler::fgSimpleLowering() { bool madeChanges = false; diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 684d374ceb4d1..cb8a80ca10a0b 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -235,22 +235,8 @@ void GenTree::InitNodeSize() } // Now set all of the appropriate entries to 'large' - CLANG_FORMAT_COMMENT_ANCHOR; // clang-format off - if (GlobalJitOptions::compFeatureHfa -#if defined(UNIX_AMD64_ABI) - || true -#endif // defined(UNIX_AMD64_ABI) - ) - { - // On ARM32, ARM64 and System V for struct returning - // there is code that does GT_ASG-tree.CopyObj call. - // CopyObj is a large node and the GT_ASG is small, which triggers an exception. - GenTree::s_gtNodeSizes[GT_ASG] = TREE_NODE_SZ_LARGE; - GenTree::s_gtNodeSizes[GT_RETURN] = TREE_NODE_SZ_LARGE; - } - GenTree::s_gtNodeSizes[GT_CALL] = TREE_NODE_SZ_LARGE; #ifdef TARGET_XARCH GenTree::s_gtNodeSizes[GT_CNS_VEC] = TREE_NODE_SZ_LARGE; @@ -283,8 +269,6 @@ void GenTree::InitNodeSize() #endif // FEATURE_ARG_SPLIT #endif // FEATURE_PUT_STRUCT_ARG_STK - assert(GenTree::s_gtNodeSizes[GT_RETURN] == GenTree::s_gtNodeSizes[GT_ASG]); - // This list of assertions should come to contain all GenTree subtypes that are declared // "small". assert(sizeof(GenTreeLclFld) <= GenTree::s_gtNodeSizes[GT_LCL_FLD]); @@ -5779,7 +5763,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) #endif // !TARGET_64BIT SET_INDIRECT_STORE_ORDER: - // TODO-ASG-Cleanup: this logic emulates the ASG case below. See how of much of it can be deleted. + // TODO-ASG-Cleanup: this logic emulated the ASG case below. See how of much of it can be deleted. if (!optValnumCSE_phase || optCSE_canSwap(op1, op2)) { if (op1->IsInvariant()) @@ -5812,71 +5796,6 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) } break; - case GT_ASG: - /* Assignments need a bit of special handling */ - if (gtIsLikelyRegVar(op1)) - { - /* Assignment to an enregistered LCL_VAR */ - costEx = op2->GetCostEx(); - costSz = max(3, op2->GetCostSz()); // 3 is an estimate for a reg-reg assignment - includeOp1Cost = false; - includeOp2Cost = false; - } - - if (!optValnumCSE_phase || optCSE_canSwap(op1, op2)) - { - switch (op1->OperGet()) - { - case GT_IND: - case GT_BLK: - { - // In an ASG(IND(addr), ...), the "IND" is a pure syntactical element, - // the actual indirection will only be realized at the point of the ASG - // itself. As such, we can discard any side effects "induced" by it in - // this logic. - // - GenTree* op1Addr = op1->AsIndir()->Addr(); - - if (op1Addr->IsInvariant()) - { - allowReversal = false; - tree->gtFlags |= GTF_REVERSE_OPS; - break; - } - if (op1Addr->gtFlags & GTF_ALL_EFFECT) - { - break; - } - - // In case op2 assigns to a local var that is used in op1, we have to evaluate op1 first. - if (op2->gtFlags & GTF_ASG) - { - break; - } - - // If op2 is simple then evaluate op1 first - if (op2->OperKind() & GTK_LEAF) - { - break; - } - } - // fall through and set GTF_REVERSE_OPS - FALLTHROUGH; - - case GT_LCL_VAR: - case GT_LCL_FLD: - // Note that for local stores, liveness depends on seeing the defs and - // uses in correct order, and so we MUST reverse the ASG in that case. - allowReversal = false; - tree->gtFlags |= GTF_REVERSE_OPS; - break; - - default: - break; - } - } - break; - default: break; } @@ -6697,7 +6616,6 @@ bool GenTree::OperRequiresAsgFlag() case GT_STOREIND: case GT_STORE_BLK: case GT_STORE_DYN_BLK: - case GT_ASG: case GT_XADD: case GT_XORR: case GT_XAND: @@ -7707,13 +7625,17 @@ GenTreeLclVar* Compiler::gtNewStoreLclVarNode(unsigned lclNum, GenTree* data) store->gtFlags |= GTF_GLOB_REF; } + gtInitializeStoreNode(store, data); + return store; } -GenTreeLclFld* Compiler::gtNewStoreLclFldNode(unsigned lclNum, var_types type, unsigned offset, GenTree* data) +GenTreeLclFld* Compiler::gtNewStoreLclFldNode( + unsigned lclNum, var_types type, ClassLayout* layout, unsigned offset, GenTree* data) { - ClassLayout* layout = (type == TYP_STRUCT) ? data->GetLayout(this) : nullptr; - GenTreeLclFld* store = new (this, GT_STORE_LCL_FLD) GenTreeLclFld(type, lclNum, offset, data, layout); + assert((type == TYP_STRUCT) == (layout != nullptr)); + + GenTreeLclFld* store = new (this, GT_STORE_LCL_FLD) GenTreeLclFld(type, lclNum, offset, data, layout); store->gtFlags |= (GTF_VAR_DEF | GTF_ASG); if (store->IsPartialLclFld(this)) { @@ -7724,6 +7646,8 @@ GenTreeLclFld* Compiler::gtNewStoreLclFldNode(unsigned lclNum, var_types type, u store->gtFlags |= GTF_GLOB_REF; } + gtInitializeStoreNode(store, data); + return store; } @@ -7971,37 +7895,42 @@ GenTreeFieldAddr* Compiler::gtNewFieldAddrNode(var_types type, CORINFO_FIELD_HAN } //------------------------------------------------------------------------ -// gtNewFieldIndirNode: Create a new field indirection node. -// -// Arguments: -// type - Indirection's type -// layout - Indirection's struct layout -// add - The field address +// gtInitializeStoreNode: Initialize a store node. // -// Return Value: -// The created node. +// Common initialization for all STORE nodes. Marks SIMD locals as "used in +// a HW intrinsic". // -// Notes: -// This method exists to preserve previous behavior. New code should -// use "gtNewIndir"/"gtNewBlkIndir" directly. +// Arguments: +// store - The store node +// data - The value to store // -GenTreeIndir* Compiler::gtNewFieldIndirNode(var_types type, ClassLayout* layout, GenTreeFieldAddr* addr) +void Compiler::gtInitializeStoreNode(GenTree* store, GenTree* data) { - GenTreeIndir* indir = (type == TYP_STRUCT) ? gtNewBlkIndir(layout, addr, GTF_IND_NONFAULTING) - : gtNewIndir(type, addr, GTF_IND_NONFAULTING); + // TODO-ASG: add asserts that the types match here. + assert(store->Data() == data); - if (addr->IsInstance() && addr->GetFldObj()->OperIs(GT_LCL_ADDR)) +#if defined(FEATURE_SIMD) +#ifndef TARGET_X86 + if (varTypeIsSIMD(store)) { - indir->gtFlags &= ~GTF_GLOB_REF; + // TODO-ASG: delete this zero-diff quirk. + if (!data->IsCall() || !data->AsCall()->ShouldHaveRetBufArg()) + { + // We want to track SIMD assignments as being intrinsics since they + // are functionally SIMD `mov` instructions and are more efficient + // when we don't promote, particularly when it occurs due to inlining. + SetOpLclRelatedToSIMDIntrinsic(store); + SetOpLclRelatedToSIMDIntrinsic(data); + } } - else +#else // TARGET_X86 + // TODO-Cleanup: merge into the all-arch. + if (varTypeIsSIMD(data) && data->OperIs(GT_HWINTRINSIC, GT_CNS_VEC)) { - indir->gtFlags |= GTF_GLOB_REF; + SetOpLclRelatedToSIMDIntrinsic(store); } - - addr->gtFlags |= GTF_FLD_DEREFERENCED; - - return indir; +#endif // TARGET_X86 +#endif // FEATURE_SIMD } //------------------------------------------------------------------------ @@ -8015,7 +7944,9 @@ GenTreeIndir* Compiler::gtNewFieldIndirNode(var_types type, ClassLayout* layout, // void Compiler::gtInitializeIndirNode(GenTreeIndir* indir, GenTreeFlags indirFlags) { + assert(varTypeIsI(genActualType(indir->Addr()))); assert((indirFlags & ~GTF_IND_FLAGS) == GTF_EMPTY); + indir->gtFlags |= indirFlags; indir->SetIndirExceptionFlags(this); @@ -8119,6 +8050,8 @@ GenTreeBlk* Compiler::gtNewStoreBlkNode(ClassLayout* layout, GenTree* addr, GenT store->gtFlags |= GTF_ASG; gtInitializeIndirNode(store, indirFlags); + gtInitializeStoreNode(store, data); + return store; } @@ -8142,6 +8075,8 @@ GenTreeStoreInd* Compiler::gtNewStoreIndNode(var_types type, GenTree* addr, GenT store->gtFlags |= GTF_ASG; gtInitializeIndirNode(store, indirFlags); + gtInitializeStoreNode(store, data); + return store; } @@ -8188,62 +8123,11 @@ GenTree* Compiler::gtNewStoreValueNode( return store; } -/***************************************************************************** - * - * Create a node that will assign 'src' to 'dst'. - */ - -GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src) -{ - assert(!src->TypeIs(TYP_VOID) && !compAssignmentRationalized); - /* Mark the target as being assigned */ - - if ((dst->gtOper == GT_LCL_VAR) || (dst->OperGet() == GT_LCL_FLD)) - { - dst->gtFlags |= GTF_VAR_DEF; - if (dst->IsPartialLclFld(this)) - { - // We treat these partial writes as combined uses and defs. - dst->gtFlags |= GTF_VAR_USEASG; - } - } - dst->gtFlags |= GTF_DONT_CSE; - -#if defined(FEATURE_SIMD) -#if !defined(TARGET_X86) - if (varTypeIsSIMD(dst)) - { - // We want to track SIMD assignments as being intrinsics since they - // are functionally SIMD `mov` instructions and are more efficient - // when we don't promote, particularly when it occurs due to inlining - SetOpLclRelatedToSIMDIntrinsic(dst); - SetOpLclRelatedToSIMDIntrinsic(src); - } -#else // TARGET_X86 - // TODO-Cleanup: merge with the all-arch logic. - if (varTypeIsSIMD(src) && src->OperIs(GT_HWINTRINSIC, GT_CNS_VEC)) - { - SetOpLclRelatedToSIMDIntrinsic(dst); - } -#endif // TARGET_X86 -#endif // FEATURE_SIMD - - /* Create the assignment node */ - - GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp(); - - /* Mark the expression as containing an assignment */ - - asg->gtFlags |= GTF_ASG; - - return asg; -} - //------------------------------------------------------------------------ // FixupInitBlkValue: Fixup the init value for an initBlk operation // // Arguments: -// asgType - The type of assignment that the initBlk is being transformed into +// type - The type of store that the initBlk is being transformed into // // Return Value: // Modifies the constant value on this node to be the appropriate "fill" @@ -8255,10 +8139,10 @@ GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src) // to an assignment of a primitive type. // This performs the appropriate extension. // -void GenTreeIntCon::FixupInitBlkValue(var_types asgType) +void GenTreeIntCon::FixupInitBlkValue(var_types type) { - assert(varTypeIsIntegralOrI(asgType)); - unsigned size = genTypeSize(asgType); + assert(varTypeIsIntegralOrI(type)); + unsigned size = genTypeSize(type); if (size > 1) { size_t cns = gtIconVal; @@ -8275,10 +8159,10 @@ void GenTreeIntCon::FixupInitBlkValue(var_types asgType) #endif // TARGET_64BIT // Make the type match for evaluation types. - gtType = asgType; + gtType = type; // if we are initializing a GC type the value being assigned must be zero (null). - assert(!varTypeIsGC(asgType) || (cns == 0)); + assert(!varTypeIsGC(type) || (cns == 0)); } gtIconVal = cns; @@ -8696,8 +8580,9 @@ GenTree* Compiler::gtClone(GenTree* tree, bool complexOK) #ifdef FEATURE_READYTORUN copy->AsFieldAddr()->gtFieldLookup = addr->gtFieldLookup; #endif - ClassLayout* layout = tree->OperIs(GT_BLK) ? tree->AsBlk()->GetLayout() : nullptr; - copy = gtNewFieldIndirNode(tree->TypeGet(), layout, copy->AsFieldAddr()); + copy = tree->OperIs(GT_BLK) ? gtNewBlkIndir(tree->AsBlk()->GetLayout(), copy) + : gtNewIndir(tree->TypeGet(), copy); + impAnnotateFieldIndir(copy->AsIndir()); } else if (tree->OperIs(GT_ADD, GT_SUB)) { @@ -9052,7 +8937,7 @@ GenTree* Compiler::gtCloneExpr( case GT_BOX: copy = new (this, GT_BOX) - GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtAsgStmtWhenInlinedBoxValue, + GenTreeBox(tree->TypeGet(), tree->AsOp()->gtOp1, tree->AsBox()->gtDefStmtWhenInlinedBoxValue, tree->AsBox()->gtCopyStmtWhenInlinedBoxValue); tree->AsBox()->SetCloned(); copy->AsBox()->SetCloned(); @@ -9114,15 +8999,7 @@ GenTree* Compiler::gtCloneExpr( if (tree->AsOp()->gtOp1) { - if (tree->gtOper == GT_ASG) - { - // Don't replace varNum if it appears as the LHS of an assign. - copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, -1, 0, deepVarNum, deepVarVal); - } - else - { - copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal); - } + copy->AsOp()->gtOp1 = gtCloneExpr(tree->AsOp()->gtOp1, addFlags, deepVarNum, deepVarVal); } if (tree->gtGetOp2IfPresent()) @@ -10752,15 +10629,6 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_ } goto DASH; - case GT_ASG: - if (tree->OperIsInitBlkOp()) - { - printf("I"); - --msgLength; - break; - } - goto DASH; - case GT_CALL: if (tree->AsCall()->IsInlineCandidate()) { @@ -12975,17 +12843,6 @@ void Compiler::gtDispLIRNode(GenTree* node, const char* prefixMsg /* = nullptr * displayOperand(operand, "size", operandArc, indentStack, prefixIndent); } } - else if (node->OperIs(GT_ASG)) - { - if (operand == node->gtGetOp1()) - { - displayOperand(operand, "lhs", operandArc, indentStack, prefixIndent); - } - else - { - displayOperand(operand, "rhs", operandArc, indentStack, prefixIndent); - } - } else { displayOperand(operand, "", operandArc, indentStack, prefixIndent); @@ -14089,17 +13946,6 @@ GenTree* Compiler::gtFoldExprSpecial(GenTree* tree) DONE_FOLD: - /* The node has been folded into 'op' */ - - // If there was an assignment update, we just morphed it into - // a use, update the flags appropriately - if (op->gtOper == GT_LCL_VAR) - { - assert(tree->OperIs(GT_ASG) || (op->gtFlags & (GTF_VAR_USEASG | GTF_VAR_DEF)) == 0); - - op->gtFlags &= ~(GTF_VAR_USEASG | GTF_VAR_DEF); - } - JITDUMP("\nFolding binary operator with a constant operand:\n"); DISPTREE(tree); JITDUMP("Transformed into:\n"); @@ -14243,7 +14089,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions // grab related parts for the optimization GenTreeBox* box = op->AsBox(); - Statement* allocStmt = box->gtAsgStmtWhenInlinedBoxValue; + Statement* allocStmt = box->gtDefStmtWhenInlinedBoxValue; Statement* copyStmt = box->gtCopyStmtWhenInlinedBoxValue; JITDUMP("gtTryRemoveBoxUpstreamEffects: %s to %s of BOX (valuetype)" @@ -14254,7 +14100,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions // If we don't recognize the form of the store, bail. GenTree* boxLclDef = allocStmt->GetRootNode(); - if (!boxLclDef->OperIsStoreLclVar()) + if (!boxLclDef->OperIs(GT_STORE_LCL_VAR)) { JITDUMP(" bailing; unexpected alloc def op %s\n", GenTree::OpName(boxLclDef->OperGet())); return nullptr; @@ -14271,7 +14117,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions GenTree* boxTypeHandle = nullptr; if ((options == BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE) || (options == BR_DONT_REMOVE_WANT_TYPE_HANDLE)) { - GenTree* defSrc = boxLclDef->Data(); + GenTree* defSrc = boxLclDef->AsLclVar()->Data(); genTreeOps defSrcOper = defSrc->OperGet(); // Allocation may be via AllocObj or via helper call, depending @@ -14307,7 +14153,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions // If we don't recognize the form of the copy, bail. GenTree* copy = copyStmt->GetRootNode(); - if (!copy->OperIs(GT_ASG, GT_STOREIND, GT_STORE_BLK)) + if (!copy->OperIs(GT_STOREIND, GT_STORE_BLK)) { // GT_RET_EXPR is a tolerable temporary failure. // The jit will revisit this optimization after @@ -14336,21 +14182,13 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions CORINFO_CLASS_HANDLE boxClass = lvaTable[boxTempLcl].lvClassHnd; assert(boxClass != nullptr); - // Verify that the copyDst has the expected shape - // (blk|obj|ind (add (boxTempLcl, ptr-size))) + // Verify that the copy has the expected shape + // (store_blk|store_ind (add (boxTempLcl, ptr-size))) // // The shape here is constrained to the patterns we produce // over in impImportAndPushBox for the inlined box case. - bool copyIsAsg = copy->OperIs(GT_ASG); - GenTree* copyDst = copyIsAsg ? copy->AsOp()->gtOp1 : copy; - - if (copyIsAsg && !copyDst->OperIs(GT_BLK, GT_IND)) - { - JITDUMP("Unexpected copy dest operator %s\n", GenTree::OpName(copyDst->gtOper)); - return nullptr; - } - - GenTree* copyDstAddr = copyDst->AsOp()->gtOp1; + // + GenTree* copyDstAddr = copy->AsIndir()->Addr(); if (copyDstAddr->OperGet() != GT_ADD) { JITDUMP("Unexpected copy dest address tree\n"); @@ -14384,7 +14222,7 @@ GenTree* Compiler::gtTryRemoveBoxUpstreamEffects(GenTree* op, BoxRemovalOptions boxLclDef->gtBashToNOP(); // Update the copy from the value to be boxed to the box temp - copyDst->AsOp()->gtOp1 = gtNewLclVarAddrNode(boxTempLcl, TYP_BYREF); + copy->AsIndir()->Addr() = gtNewLclVarAddrNode(boxTempLcl, TYP_BYREF); // Return the address of the now-struct typed box temp GenTree* retValue = gtNewLclVarAddrNode(boxTempLcl, TYP_BYREF); @@ -14629,10 +14467,10 @@ GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp) } else { - const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp")); - GenTree* thisAsg = gtNewTempAssign(thisTmp, thisVal); - Statement* thisAsgStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue; - thisAsgStmt->SetRootNode(thisAsg); + const unsigned thisTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag this temp")); + GenTree* thisStore = gtNewTempStore(thisTmp, thisVal); + Statement* thisStoreStmt = thisOp->AsBox()->gtCopyStmtWhenInlinedBoxValue; + thisStoreStmt->SetRootNode(thisStore); thisValOpt = gtNewLclvNode(thisTmp, type); } @@ -14645,10 +14483,10 @@ GenTree* Compiler::gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp) } else { - const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp")); - GenTree* flagAsg = gtNewTempAssign(flagTmp, flagVal); - Statement* flagAsgStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue; - flagAsgStmt->SetRootNode(flagAsg); + const unsigned flagTmp = lvaGrabTemp(true DEBUGARG("Enum:HasFlag flag temp")); + GenTree* flagStore = gtNewTempStore(flagTmp, flagVal); + Statement* flagStoreStmt = flagOp->AsBox()->gtCopyStmtWhenInlinedBoxValue; + flagStoreStmt->SetRootNode(flagStore); flagValOpt = gtNewLclvNode(flagTmp, type); flagValOptCopy = gtNewLclvNode(flagTmp, type); } @@ -15829,7 +15667,7 @@ GenTree* Compiler::gtFoldIndirConst(GenTreeIndir* indir) } //------------------------------------------------------------------------ -// gtNewTempAssign: Create an assignment of the given value to a temp. +// gtNewTempStore: Create an assignment of the given value to a temp. // // Arguments: // tmp - local number for a compiler temp @@ -15850,7 +15688,7 @@ GenTree* Compiler::gtFoldIndirConst(GenTreeIndir* indir) // // May set compFloatingPointUsed. // -GenTree* Compiler::gtNewTempAssign( +GenTree* Compiler::gtNewTempStore( unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, const DebugInfo& di, BasicBlock* block) { // Self-assignment is a nop. @@ -15915,7 +15753,7 @@ GenTree* Compiler::gtNewTempAssign( if (!ok) { gtDispTree(val); - assert(!"Incompatible types for gtNewTempAssign"); + assert(!"Incompatible types for gtNewTempStore"); } } #endif @@ -15924,7 +15762,7 @@ GenTree* Compiler::gtNewTempAssign( // if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_REF)) { - noway_assert(!"Incompatible types for gtNewTempAssign"); + noway_assert(!"Incompatible types for gtNewTempStore"); } // Floating Point assignments can be created during inlining @@ -15936,26 +15774,14 @@ GenTree* Compiler::gtNewTempAssign( compFloatingPointUsed = true; } - GenTree* store; - if (compAssignmentRationalized) - { - store = gtNewStoreLclVarNode(tmp, val); + GenTree* store = gtNewStoreLclVarNode(tmp, val); -#ifdef UNIX_AMD64_ABI - if (val->IsCall()) - { - // TODO-ASG: delete this zero-diff quirk. - varDsc->lvIsMultiRegRet = true; - } -#endif // UNIX_AMD64_ABI - } - else if (varTypeIsStruct(varDsc) && !val->IsInitVal()) - { - store = impAssignStruct(gtNewLclvNode(tmp, dstTyp), val, curLevel, pAfterStmt, di, block); - } - else + // TODO-ASG: delete this zero-diff quirk. Requires some forward substitution work. + store->gtType = dstTyp; + + if (varTypeIsStruct(varDsc) && !val->IsInitVal()) { - store = gtNewAssignNode(gtNewLclvNode(tmp, dstTyp), val); + store = impStoreStruct(store, curLevel, pAfterStmt, di, block); } return store; @@ -16072,34 +15898,32 @@ GenTree* Compiler::gtNewRefCOMfield(GenTree* objPtr, result = gtNewIndir(lclTyp, result); } } - else if (varTypeIsIntegral(lclTyp) && genTypeSize(lclTyp) < genTypeSize(TYP_INT)) + else if (varTypeIsSmall(lclTyp)) { // The helper does not extend the small return types. result = gtNewCastNode(genActualType(lclTyp), result, false, lclTyp); } } } - else + else if ((access & CORINFO_ACCESS_ADDRESS) == 0) // OK, now do the indirection { - // OK, now do the indirection - if (access & CORINFO_ACCESS_GET) - { - ClassLayout* layout; - lclTyp = TypeHandleToVarType(pFieldInfo->fieldType, structType, &layout); - result = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, result) : gtNewIndir(lclTyp, result); - } - else if (access & CORINFO_ACCESS_SET) + ClassLayout* layout; + lclTyp = TypeHandleToVarType(pFieldInfo->fieldType, structType, &layout); + + if ((access & CORINFO_ACCESS_SET) != 0) { + result = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, result, assg)->AsIndir() + : gtNewStoreIndNode(lclTyp, result, assg); if (varTypeIsStruct(lclTyp)) { - result = impAssignStructPtr(result, assg, CHECK_SPILL_ALL); - } - else - { - result = gtNewIndir(lclTyp, result); - result = gtNewAssignNode(result, assg); + result = impStoreStruct(result, CHECK_SPILL_ALL); } } + else + { + assert((access & CORINFO_ACCESS_GET) != 0); + result = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, result) : gtNewIndir(lclTyp, result); + } } return result; @@ -16368,11 +16192,6 @@ bool Compiler::gtSplitTree( return false; } - if (useInf.User->OperIs(GT_ASG) && (useInf.Use == &useInf.User->AsUnOp()->gtOp1)) - { - return true; - } - if (useInf.User->OperIs(GT_STORE_DYN_BLK) && !(*useInf.Use)->OperIs(GT_CNS_INT, GT_INIT_VAL) && (useInf.Use == &useInf.User->AsStoreDynBlk()->Data())) { @@ -16418,11 +16237,6 @@ bool Compiler::gtSplitTree( return false; } - if (node->OperIs(GT_ASG)) - { - return false; - } - GenTree* user = useInf.User; if (user == nullptr) @@ -16474,7 +16288,7 @@ bool Compiler::gtSplitTree( // CALL // LCL_VAR V00 // CALL - // ASG(V00, ...) (setup) + // STORE_LCL_VAR(...) (setup) // LCL_VAR V00 // return; @@ -16536,10 +16350,10 @@ bool Compiler::gtSplitTree( m_compiler->lvaGetDesc(lclNum)->lvIsMultiRegRet = true; } - GenTree* asg = m_compiler->gtNewTempAssign(lclNum, *use); - stmt = m_compiler->fgNewStmtFromTree(asg, m_splitStmt->GetDebugInfo()); - *use = m_compiler->gtNewLclvNode(lclNum, genActualType(*use)); - MadeChanges = true; + GenTree* store = m_compiler->gtNewTempStore(lclNum, *use); + stmt = m_compiler->fgNewStmtFromTree(store, m_splitStmt->GetDebugInfo()); + *use = m_compiler->gtNewLclvNode(lclNum, genActualType(*use)); + MadeChanges = true; } if (stmt != nullptr) @@ -18879,7 +18693,7 @@ FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fiel // void Compiler::SetOpLclRelatedToSIMDIntrinsic(GenTree* op) { - if ((op != nullptr) && op->OperIs(GT_LCL_VAR)) + if ((op != nullptr) && op->OperIsScalarLocal()) { setLclRelatedToSIMDIntrinsic(op); } @@ -23732,8 +23546,7 @@ GenTree* Compiler::gtNewSimdStoreNode(GenTree* op1, GenTree* op2, CorInfoType si var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(varTypeIsArithmetic(simdBaseType)); - op1 = gtNewIndir(op2->TypeGet(), op1); - return gtNewAssignNode(op1, op2); + return gtNewStoreIndNode(op2->TypeGet(), op1, op2); } //---------------------------------------------------------------------------------------------- @@ -25079,7 +24892,7 @@ bool GenTreeHWIntrinsic::OperRequiresAsgFlag() const { // A MemoryStore operation is an assignment and barriers, while they // don't technically do an assignment are modeled the same as - // GT_MEMORYBARRIER which tracks itself as requiring the GT_ASG flag + // GT_MEMORYBARRIER which tracks itself as requiring the GTF_ASG flag return OperIsMemoryStoreOrBarrier(); } diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index a050f2d6e2bb4..432057a70aa6d 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1688,9 +1688,6 @@ struct GenTree return OperIs(GT_JCC, GT_SETCC, GT_SELECTCC); } - bool OperIsStoreLclVar(unsigned* pLclNum = nullptr); - bool OperIsStoreLcl(unsigned* pLclNum); - #ifdef DEBUG static const GenTreeDebugOperKind gtDebugOperKindTable[]; @@ -1804,7 +1801,7 @@ struct GenTree inline GenTree* gtEffectiveVal(bool commaOnly = false); - inline GenTree* gtCommaAssignVal(); + inline GenTree* gtCommaStoreVal(); // Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself inline GenTree* gtSkipReloadOrCopy(); @@ -2222,11 +2219,9 @@ struct GenTree // Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn). bool IsPhiNode(); - // Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...)); + // Returns "true" iff "*this" is a store (GT_STORE_LCL_VAR) tree that defines an SSA name (lcl = phi(...)); bool IsPhiDefn(); - // Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...)); - // Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG), // we can't synthesize an assignment operator. // TODO-Cleanup: Could change this w/o liveset on tree nodes @@ -2349,14 +2344,11 @@ struct GenTree // the PHI node's type must be the same as the local variable's type. // // The PHI node does not represent a definition by itself, it is always -// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR -// node, that is a definition for the same local variable referenced by -// all the used PHI_ARG nodes: +// the value operand of a STORE_LCL_VAR node. The local store node itself +// is the definition for the same local variable referenced by all the +// used PHI_ARG nodes: // -// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7))) -// -// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the -// ASG node. +// STORE_LCL_VAR(PHI(PHI_ARG(V01), PHI_ARG(V01), PHI_ARG(V01))) // // The order of the PHI_ARG uses is not currently relevant and it may be // the same or not as the order of the predecessor blocks. @@ -3137,7 +3129,7 @@ struct GenTreeIntCon : public GenTreeIntConCommon { } - void FixupInitBlkValue(var_types asgType); + void FixupInitBlkValue(var_types type); #if DEBUGGABLE_GENTREE GenTreeIntCon() : GenTreeIntConCommon() @@ -3844,16 +3836,16 @@ struct GenTreeBox : public GenTreeUnOp } // This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value // type - Statement* gtAsgStmtWhenInlinedBoxValue; + Statement* gtDefStmtWhenInlinedBoxValue; // And this is the statement that copies from the value being boxed to the box payload Statement* gtCopyStmtWhenInlinedBoxValue; GenTreeBox(var_types type, GenTree* boxOp, - Statement* asgStmtWhenInlinedBoxValue, + Statement* defStmtWhenInlinedBoxValue, Statement* copyStmtWhenInlinedBoxValue) : GenTreeUnOp(GT_BOX, type, boxOp) - , gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue) + , gtDefStmtWhenInlinedBoxValue(defStmtWhenInlinedBoxValue) , gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue) { } @@ -8695,7 +8687,7 @@ inline bool GenTree::OperIsBlkOp() { return true; } - if (OperIs(GT_ASG) || OperIsStore()) + if (OperIsStore()) { return varTypeIsStruct(this); } @@ -8721,46 +8713,6 @@ inline bool GenTree::OperIsCopyBlkOp() return OperIsBlkOp() && !OperIsInitBlkOp(); } -inline bool GenTree::OperIsStoreLclVar(unsigned* pLclNum) // TODO-ASG: delete. -{ - if (OperIs(GT_STORE_LCL_VAR)) - { - if (pLclNum != nullptr) - { - *pLclNum = AsLclVar()->GetLclNum(); - } - return true; - } - if (OperIs(GT_ASG) && gtGetOp1()->OperIs(GT_LCL_VAR)) - { - if (pLclNum != nullptr) - { - *pLclNum = gtGetOp1()->AsLclVar()->GetLclNum(); - } - return true; - } - - *pLclNum = BAD_VAR_NUM; - return false; -} - -inline bool GenTree::OperIsStoreLcl(unsigned* pLclNum) // TODO-ASG: delete. -{ - if (OperIsLocalStore()) - { - *pLclNum = AsLclVarCommon()->GetLclNum(); - return true; - } - if (OperIs(GT_ASG) && gtGetOp1()->OperIsLocal()) - { - *pLclNum = gtGetOp1()->AsLclVarCommon()->GetLclNum(); - return true; - } - - *pLclNum = BAD_VAR_NUM; - return false; -} - //------------------------------------------------------------------------ // IsIntegralConst: Checks whether this is a constant node with the given value // @@ -9097,7 +9049,6 @@ inline GenTree* GenTree::gtGetOp1() const case GT_RSZ: case GT_ROL: case GT_ROR: - case GT_ASG: case GT_EQ: case GT_NE: case GT_LT: @@ -9144,8 +9095,8 @@ inline GenTree* GenTree::gtGetOp2IfPresent() const inline GenTree*& GenTree::Data() { - assert(OperIsStore() || OperIs(GT_STORE_DYN_BLK, GT_ASG)); - return OperIsLocalStore() ? AsLclVarCommon()->Data() : static_cast(this)->gtOp2; + assert(OperIsStore() || OperIs(GT_STORE_DYN_BLK)); + return OperIsLocalStore() ? AsLclVarCommon()->Data() : AsIndir()->Data(); } inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */) @@ -9169,7 +9120,7 @@ inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */) } //------------------------------------------------------------------------- -// gtCommaAssignVal - find value being assigned to a comma wrapped store +// gtCommaStoreVal - find value being assigned to a comma wrapped store // // Returns: // tree representing value being stored if this tree represents a @@ -9177,7 +9128,7 @@ inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */) // // original tree, if not. // -inline GenTree* GenTree::gtCommaAssignVal() +inline GenTree* GenTree::gtCommaStoreVal() { GenTree* result = this; diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h index bfa00180ac86b..972fd9cd19418 100644 --- a/src/coreclr/jit/gtlist.h +++ b/src/coreclr/jit/gtlist.h @@ -57,7 +57,6 @@ GTNODE(NEG , GenTreeOp ,0,GTK_UNOP) GTNODE(INTRINSIC , GenTreeIntrinsic ,0,GTK_BINOP|GTK_EXOP) -GTNODE(ASG , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR) GTNODE(LOCKADD , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR) GTNODE(XAND , GenTreeOp ,0,GTK_BINOP) GTNODE(XORR , GenTreeOp ,0,GTK_BINOP) diff --git a/src/coreclr/jit/helperexpansion.cpp b/src/coreclr/jit/helperexpansion.cpp index 983cf4d256955..62f4d7e0c1d8c 100644 --- a/src/coreclr/jit/helperexpansion.cpp +++ b/src/coreclr/jit/helperexpansion.cpp @@ -24,11 +24,12 @@ static void* GetConstantPointer(Compiler* comp, GenTree* tree) // Save expression to a local and append it as the last statement in exprBlock static GenTree* SpillExpression(Compiler* comp, GenTree* expr, BasicBlock* exprBlock, DebugInfo& debugInfo) { - unsigned const tmpNum = comp->lvaGrabTemp(true DEBUGARG("spilling expr")); - Statement* asgStmt = comp->fgNewStmtAtEnd(exprBlock, comp->gtNewTempAssign(tmpNum, expr), debugInfo); - comp->gtSetStmtInfo(asgStmt); - comp->fgSetStmtSeq(asgStmt); - return comp->gtNewLclvNode(tmpNum, genActualType(expr)); + unsigned const tmpNum = comp->lvaGrabTemp(true DEBUGARG("spilling expr")); + Statement* stmt = comp->fgNewStmtAtEnd(exprBlock, comp->gtNewTempStore(tmpNum, expr), debugInfo); + comp->gtSetStmtInfo(stmt); + comp->fgSetStmtSeq(stmt); + + return comp->gtNewLclVarNode(tmpNum); }; //------------------------------------------------------------------------------ @@ -184,7 +185,7 @@ bool Compiler::fgExpandRuntimeLookupsForCall(BasicBlock** pBlock, Statement* stm GenTreeLclVar* rtLookupLcl = nullptr; - // Mostly for Tier0: if the current statement is ASG(LCL, RuntimeLookup) + // Mostly for Tier0: if the current statement is STORE_LCL_VAR(RuntimeLookup) // we can drop it and use that LCL as the destination if (stmt->GetRootNode()->OperIs(GT_STORE_LCL_VAR) && (stmt->GetRootNode()->AsLclVar()->Data() == *callUse)) { @@ -617,7 +618,7 @@ bool Compiler::fgExpandThreadLocalAccessForCall(BasicBlock** pBlock, Statement* // ... // // maxThreadStaticBlocksCondBB (BBJ_COND): [weight: 1.0] - // asgTlsValue = tls_access_code + // tlsValue = tls_access_code // if (maxThreadStaticBlocks < typeIndex) // goto fallbackBb; // diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index b79c2de163c1b..370c3468605d3 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -1846,7 +1846,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(HWIntrinsicInfo::IsMultiReg(intrinsic)); op1 = gtNewSimdHWIntrinsicNode(retType, op1, intrinsic, simdBaseJitType, simdSize); - retNode = impAssignMultiRegTypeToVar(op1, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); + retNode = impStoreMultiRegValueToVar(op1, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); break; } case NI_AdvSimd_VectorTableLookup: @@ -1873,7 +1873,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("VectorTableLookup temp tree")); - impAssignTempGen(tmp, op1, CHECK_SPILL_NONE); + impStoreTemp(tmp, op1, CHECK_SPILL_NONE); op1 = gtNewLclvNode(tmp, argType); } @@ -1913,7 +1913,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { unsigned tmp = lvaGrabTemp(true DEBUGARG("VectorTableLookupExtension temp tree")); - impAssignTempGen(tmp, op2, CHECK_SPILL_NONE); + impStoreTemp(tmp, op2, CHECK_SPILL_NONE); op2 = gtNewLclvNode(tmp, argType); } diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 0ad8537a53f6a..e62dc8ee4d246 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -2830,7 +2830,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, // Store the type from signature into SIMD base type for convenience divRemIntrinsic->SetSimdBaseJitType(simdBaseJitType); - retNode = impAssignMultiRegTypeToVar(divRemIntrinsic, + retNode = impStoreMultiRegValueToVar(divRemIntrinsic, sig->retTypeSigClass DEBUGARG(CorInfoCallConvExtension::Managed)); break; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 6002a39dc7bb1..7f66afcacb1da 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -351,15 +351,13 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) } } - if (tree->OperIs(GT_ASG)) + if (tree->OperIsStore()) { - // For an assignment to a local variable, all references of that - // variable have to be spilled. If it is aliased, all calls and - // indirect accesses have to be spilled - - if (tree->AsOp()->gtOp1->OperIsLocal()) + // For a store to a local variable, all references of that variable have to be spilled. + // If it is aliased, all calls and indirect accesses have to be spilled. + if (tree->OperIsLocalStore()) { - unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); + unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { GenTree* stkTree = verCurrentState.esStack[level].val; @@ -368,7 +366,7 @@ void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) } } // If the access may be to global memory, all side effects have to be spilled. - else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) + else if ((tree->gtFlags & GTF_GLOB_REF) != 0) { for (unsigned level = 0; level < chkLevel; level++) { @@ -411,7 +409,7 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; - // Assignments to unaliased locals require special handling. Here, we look for trees that + // Stores to unaliased locals require special handling. Here, we look for trees that // can modify them and spill the references. In doing so, we make two assumptions: // // 1. All locals which can be modified indirectly are marked as address-exposed or with @@ -424,9 +422,9 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu // things manually. // LclVarDsc* dstVarDsc = nullptr; - if (expr->OperIs(GT_ASG) && expr->AsOp()->gtOp1->OperIsLocal()) + if (expr->OperIsLocalStore()) { - dstVarDsc = lvaGetDesc(expr->AsOp()->gtOp1->AsLclVarCommon()); + dstVarDsc = lvaGetDesc(expr->AsLclVarCommon()); } else if (expr->OperIs(GT_CALL, GT_RET_EXPR)) // The special case of calls with return buffers. { @@ -459,15 +457,15 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu { impSpillLclRefs(lvaGetLclNum(dstVarDsc), chkLevel); - if (expr->OperIs(GT_ASG)) + if (expr->OperIsLocalStore()) { - // For assignments, limit the checking to what the RHS could modify/interfere with. - GenTree* rhs = expr->AsOp()->gtOp2; - flags = rhs->gtFlags & GTF_GLOB_EFFECT; + // For assignments, limit the checking to what the value could modify/interfere with. + GenTree* value = expr->AsLclVarCommon()->Data(); + flags = value->gtFlags & GTF_GLOB_EFFECT; // We don't mark indirections off of "aliased" locals with GLOB_REF, but they must still be // considered as such in the interference checking. - if (((flags & GTF_GLOB_REF) == 0) && !impIsAddressInLocal(rhs) && gtHasLocalsWithAddrOp(rhs)) + if (((flags & GTF_GLOB_REF) == 0) && !impIsAddressInLocal(value) && gtHasLocalsWithAddrOp(value)) { flags |= GTF_GLOB_REF; } @@ -489,7 +487,7 @@ void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsu impAppendStmt(stmt); #ifdef FEATURE_SIMD - impMarkContiguousSIMDFieldAssignments(stmt); + impMarkContiguousSIMDFieldStores(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are @@ -627,27 +625,27 @@ Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const Debug * curLevel is the stack level for which the spill to the temp is being done. */ -void Compiler::impAssignTempGen(unsigned lclNum, - GenTree* val, - unsigned curLevel, - Statement** pAfterStmt, /* = NULL */ - const DebugInfo& di, /* = DebugInfo() */ - BasicBlock* block /* = NULL */ - ) +void Compiler::impStoreTemp(unsigned lclNum, + GenTree* val, + unsigned curLevel, + Statement** pAfterStmt, /* = NULL */ + const DebugInfo& di, /* = DebugInfo() */ + BasicBlock* block /* = NULL */ + ) { - GenTree* asg = gtNewTempAssign(lclNum, val, curLevel, pAfterStmt, di, block); + GenTree* store = gtNewTempStore(lclNum, val, curLevel, pAfterStmt, di, block); - if (!asg->IsNothingNode()) + if (!store->IsNothingNode()) { if (pAfterStmt) { - Statement* asgStmt = gtNewStmt(asg, di); - fgInsertStmtAfter(block, *pAfterStmt, asgStmt); - *pAfterStmt = asgStmt; + Statement* storeStmt = gtNewStmt(store, di); + fgInsertStmtAfter(block, *pAfterStmt, storeStmt); + *pAfterStmt = storeStmt; } else { - impAppendTree(asg, curLevel, impCurStmtDI); + impAppendTree(store, curLevel, impCurStmtDI); } } } @@ -758,36 +756,36 @@ bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nod } //------------------------------------------------------------------------ -// impAssignStruct: Create a struct assignment +// impStoreStruct: Import a struct store. // // Arguments: -// dest - the destination of the assignment -// src - the value to be assigned +// store - the store // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: -// The tree that should be appended to the statement list that represents the assignment. +// The tree that should be appended to the statement list that represents the store. // // Notes: -// Temp assignments may be appended to impStmtList if spilling is necessary. +// Temp stores may be appended to impStmtList if spilling is necessary. // -GenTree* Compiler::impAssignStruct(GenTree* dest, - GenTree* src, - unsigned curLevel, - Statement** pAfterStmt, /* = nullptr */ - const DebugInfo& di, /* = DebugInfo() */ - BasicBlock* block /* = nullptr */ - ) +GenTree* Compiler::impStoreStruct(GenTree* store, + unsigned curLevel, + Statement** pAfterStmt, /* = nullptr */ + const DebugInfo& di, /* = DebugInfo() */ + BasicBlock* block /* = nullptr */ + ) { - assert(varTypeIsStruct(dest) && (dest->OperIsLocal() || dest->OperIsIndir())); + assert(varTypeIsStruct(store) && store->OperIsStore()); - assert(dest->TypeGet() == src->TypeGet()); - if (dest->TypeIs(TYP_STRUCT)) + GenTree* src = store->Data(); + + assert(store->TypeGet() == src->TypeGet()); + if (store->TypeIs(TYP_STRUCT)) { - assert(ClassLayout::AreCompatible(dest->GetLayout(this), src->GetLayout(this))); + assert(ClassLayout::AreCompatible(store->GetLayout(this), src->GetLayout(this))); } DebugInfo usedDI = di; @@ -809,7 +807,7 @@ GenTree* Compiler::impAssignStruct(GenTree* dest, WellKnownArg wellKnownArgType = srcCall->ShouldHaveRetBufArg() ? WellKnownArg::RetBuffer : WellKnownArg::None; - GenTree* destAddr = impGetStructAddr(dest, CHECK_SPILL_ALL, /* willDeref */ true); + GenTree* destAddr = impGetStructAddr(store, CHECK_SPILL_ALL, /* willDeref */ true); NewCallArg newArg = NewCallArg::Primitive(destAddr).WellKnown(wellKnownArgType); #if !defined(TARGET_ARM) @@ -896,10 +894,10 @@ GenTree* Compiler::impAssignStruct(GenTree* dest, } #ifdef UNIX_AMD64_ABI - if (dest->OperIs(GT_LCL_VAR)) + if (store->OperIs(GT_STORE_LCL_VAR)) { // TODO-Cleanup: delete this quirk. - lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; + lvaGetDesc(store->AsLclVar())->lvIsMultiRegRet = true; } #endif // UNIX_AMD64_ABI } @@ -911,7 +909,7 @@ GenTree* Compiler::impAssignStruct(GenTree* dest, if (call->ShouldHaveRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter after 'this' - GenTree* destAddr = impGetStructAddr(dest, CHECK_SPILL_ALL, /* willDeref */ true); + GenTree* destAddr = impGetStructAddr(store, CHECK_SPILL_ALL, /* willDeref */ true); call->gtArgs.InsertAfterThisOrFirst(this, NewCallArg::Primitive(destAddr).WellKnown(WellKnownArg::RetBuffer)); @@ -928,34 +926,33 @@ GenTree* Compiler::impAssignStruct(GenTree* dest, { // Since we are assigning the result of a GT_MKREFANY, "destAddr" must point to a refany. // TODO-CQ: we can do this without address-exposing the local on the LHS. - GenTree* destAddr = impGetStructAddr(dest, CHECK_SPILL_ALL, /* willDeref */ true); + GenTree* destAddr = impGetStructAddr(store, CHECK_SPILL_ALL, /* willDeref */ true); GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); - GenTree* ptrSlot = gtNewIndir(TYP_I_IMPL, destAddr); - GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); - - GenTree* typeSlot = - gtNewIndir(TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); - - // append the assign of the pointer value - GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); + // Append the store of the pointer value. + // TODO-Bug: the pointer value can be a byref. Use its actual type here instead of TYP_I_IMPL. + GenTree* ptrFieldStore = gtNewStoreIndNode(TYP_I_IMPL, destAddr, src->AsOp()->gtOp1); if (pAfterStmt) { - Statement* newStmt = gtNewStmt(asg, usedDI); + Statement* newStmt = gtNewStmt(ptrFieldStore, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { - impAppendTree(asg, curLevel, usedDI); + impAppendTree(ptrFieldStore, curLevel, usedDI); } - // return the assign of the type value, to be appended - return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); + GenTree* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); + GenTree* typeFieldAddr = gtNewOperNode(GT_ADD, genActualType(destAddr), destAddrClone, typeFieldOffset); + GenTree* typeFieldStore = gtNewStoreIndNode(TYP_I_IMPL, typeFieldAddr, src->AsOp()->gtOp2); + + // Return the store of the type value, to be appended. + return typeFieldStore; } else if (src->OperIs(GT_COMMA)) { @@ -975,53 +972,55 @@ GenTree* Compiler::impAssignStruct(GenTree* dest, { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. - // Instead, we're going to sink the assignment below the COMMA. - src->AsOp()->gtOp2 = impAssignStruct(dest, src->AsOp()->gtOp2, curLevel, pAfterStmt, usedDI, block); - src->AddAllEffectsFlags(src->AsOp()->gtOp2); + // Instead, we're going to sink the store below the COMMA. + store->Data() = src->AsOp()->gtOp2; + src->AsOp()->gtOp2 = impStoreStruct(store, curLevel, pAfterStmt, usedDI, block); + src->SetAllEffectsFlags(src->AsOp()->gtOp1, src->AsOp()->gtOp2); + gtUpdateNodeSideEffects(store); return src; } // Evaluate the second thing using recursion. - return impAssignStruct(dest, src->AsOp()->gtOp2, curLevel, pAfterStmt, usedDI, block); + store->Data() = src->AsOp()->gtOp2; + gtUpdateNodeSideEffects(store); + return impStoreStruct(store, curLevel, pAfterStmt, usedDI, block); } - if (dest->OperIs(GT_LCL_VAR) && src->IsMultiRegNode()) + if (store->OperIs(GT_STORE_LCL_VAR) && src->IsMultiRegNode()) { - lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; + lvaGetDesc(store->AsLclVar())->lvIsMultiRegRet = true; } - // Return a store node, to be appended. - GenTree* storeNode = gtNewAssignNode(dest, src); - return storeNode; + return store; } //------------------------------------------------------------------------ -// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. +// impStoreStructPtr: Store (copy) the structure from 'src' to 'destAddr'. // // Arguments: -// destAddr - address of the destination of the assignment -// src - source of the assignment -// curLevel - stack level for which a spill may be being done +// destAddr - address of the destination of the store +// value - value to store +// curLevel - stack level for which a spill may be being done // // Return Value: -// The tree that should be appended to the statement list that represents the assignment. +// The tree that should be appended to the statement list that represents the store. // // Notes: -// Temp assignments may be appended to impStmtList if spilling is necessary. +// Temp stores may be appended to impStmtList if spilling is necessary. // -GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, unsigned curLevel) +GenTree* Compiler::impStoreStructPtr(GenTree* destAddr, GenTree* value, unsigned curLevel) { - var_types type = src->TypeGet(); - ClassLayout* layout = (type == TYP_STRUCT) ? src->GetLayout(this) : nullptr; - GenTree* dst = gtNewLoadValueNode(type, layout, destAddr); - GenTree* store = impAssignStruct(dst, src, curLevel); + var_types type = value->TypeGet(); + ClassLayout* layout = (type == TYP_STRUCT) ? value->GetLayout(this) : nullptr; + GenTree* store = gtNewStoreValueNode(type, layout, destAddr, value); + store = impStoreStruct(store, curLevel); return store; } //------------------------------------------------------------------------ -// impGetStructAddr: Get the address of a struct value. +// impGetStructAddr: Get the address of a struct value / location. // // Arguments: // structVal - The value in question @@ -1040,6 +1039,8 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, unsigned curLevel, bool { case GT_BLK: case GT_IND: + case GT_STOREIND: + case GT_STORE_BLK: if (willDeref) { return structVal->AsIndir()->Addr(); @@ -1047,9 +1048,11 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, unsigned curLevel, bool break; case GT_LCL_VAR: + case GT_STORE_LCL_VAR: return gtNewLclVarAddrNode(structVal->AsLclVar()->GetLclNum(), TYP_BYREF); case GT_LCL_FLD: + case GT_STORE_LCL_FLD: return gtNewLclAddrNode(structVal->AsLclFld()->GetLclNum(), structVal->AsLclFld()->GetLclOffs(), TYP_BYREF); case GT_COMMA: @@ -1061,7 +1064,7 @@ GenTree* Compiler::impGetStructAddr(GenTree* structVal, unsigned curLevel, bool } unsigned lclNum = lvaGrabTemp(true DEBUGARG("location for address-of(RValue)")); - impAssignTempGen(lclNum, structVal, curLevel); + impStoreTemp(lclNum, structVal, curLevel); // The 'return value' is now address of the temp itself. return gtNewLclVarAddrNode(lclNum, TYP_BYREF); @@ -1144,7 +1147,7 @@ GenTree* Compiler::impNormStructVal(GenTree* structVal, unsigned curLevel) case GT_RET_EXPR: { unsigned lclNum = lvaGrabTemp(true DEBUGARG("spilled call-like call argument")); - impAssignTempGen(lclNum, structVal, curLevel); + impStoreTemp(lclNum, structVal, curLevel); // The structVal is now the temp itself structVal = gtNewLclvNode(lclNum, structType); @@ -1566,7 +1569,7 @@ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken // Spilling it to a temp improves CQ (mainly in Tier0) unsigned callLclNum = lvaGrabTemp(true DEBUGARG("spilling helperCall")); - impAssignTempGen(callLclNum, helperCall, CHECK_SPILL_NONE); + impStoreTemp(callLclNum, helperCall, CHECK_SPILL_NONE); return gtNewLclvNode(callLclNum, helperCall->TypeGet()); } @@ -1681,7 +1684,7 @@ bool Compiler::impSpillStackEntry(unsigned level, } /* Assign the spilled entry to the temp */ - impAssignTempGen(tnum, tree, level); + impStoreTemp(tnum, tree, level); if (isNewTemp) { @@ -1707,7 +1710,7 @@ bool Compiler::impSpillStackEntry(unsigned level, } } - // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. + // The tree type may be modified by impStoreTemp, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; @@ -1911,10 +1914,9 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); - if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && - (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) + if (tree->OperIs(GT_STORE_LCL_VAR) && tree->AsLclVar()->Data()->OperIs(GT_CATCH_ARG)) { - tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); + tree = gtNewLclvNode(tree->AsLclVar()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(clsHnd)); @@ -1958,7 +1960,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; - GenTree* argAsg = gtNewTempAssign(tempNum, arg); + GenTree* argStore = gtNewTempStore(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; @@ -1973,11 +1975,11 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); - argStmt = gtNewStmt(argAsg, impCurStmtDI); + argStmt = gtNewStmt(argStore, impCurStmtDI); } else { - argStmt = gtNewStmt(argAsg); + argStmt = gtNewStmt(argStore); } fgInsertStmtAtEnd(newBlk, argStmt); @@ -2016,12 +2018,12 @@ GenTree* Compiler::impCloneExpr(GenTree* tree, unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); - // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which + // impStoreTemp() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from - // the lclVar AFTER calling impAssignTempGen(). + // the lclVar AFTER calling impStoreTemp(). - impAssignTempGen(temp, tree, curLevel, pAfterStmt, impCurStmtDI); + impStoreTemp(temp, tree, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); @@ -3113,7 +3115,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get - // impAssignStructPtr(temp+4, expr, clsHnd) + // impStoreStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr @@ -3168,13 +3170,13 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Assign the boxed object to the box temp. // - GenTree* asg = gtNewTempAssign(impBoxTemp, op1); - Statement* asgStmt = impAppendTree(asg, CHECK_SPILL_NONE, impCurStmtDI); + GenTree* allocBoxStore = gtNewTempStore(impBoxTemp, op1); + Statement* allocBoxStmt = impAppendTree(allocBoxStore, CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // - // We do this because impAssignStructPtr (invoked below) will + // We do this because impStoreStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. @@ -3190,7 +3192,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Walk back through the statements in this block, looking for the one // that has this call as the root node. // - // Because gtNewTempAssign (above) may have added statements that + // Because gtNewTempStore (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // @@ -3229,8 +3231,8 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), - asgStmt->GetID(), insertBeforeStmt->GetID()); - assert(asgStmt == impLastStmt); + allocBoxStmt->GetID(), insertBeforeStmt->GetID()); + assert(allocBoxStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); @@ -3250,7 +3252,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // if (varTypeIsStruct(exprToBox)) { - op1 = impAssignStructPtr(op1, exprToBox, CHECK_SPILL_ALL); + op1 = impStoreStructPtr(op1, exprToBox, CHECK_SPILL_ALL); } else { @@ -3283,8 +3285,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } - op1 = gtNewIndir(dstTyp, op1, GTF_IND_NONFAULTING); - op1 = gtNewAssignNode(op1, exprToBox); + op1 = gtNewStoreIndNode(dstTyp, op1, exprToBox, GTF_IND_NONFAULTING); } // Spill eval stack to flush out any pending side effects. @@ -3296,7 +3297,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. - op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); + op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, allocBoxStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: @@ -3305,8 +3306,7 @@ void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; - assert(op1->IsBoxedValue()); - assert(asg->gtOper == GT_ASG); + assert(op1->IsBoxedValue() && allocBoxStore->OperIs(GT_STORE_LCL_VAR)); } else { @@ -3395,9 +3395,9 @@ void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORI // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { - GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); - GenTree* dest = gtNewLclFldNode(lvaNewObjArrayArgs, TYP_INT, sizeof(INT32) * i); - node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); + GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); + GenTree* store = gtNewStoreLclFldNode(lvaNewObjArrayArgs, TYP_INT, sizeof(INT32) * i, arg); + node = gtNewOperNode(GT_COMMA, node->TypeGet(), store, node); } CorInfoHelpFunc helper = info.compCompHnd->getArrayRank(pResolvedToken->hClass) == 1 ? CORINFO_HELP_NEW_MDARR_RARE @@ -3554,16 +3554,13 @@ GenTree* Compiler::impImportStaticReadOnlyField(CORINFO_FIELD_HANDLE field, CORI } } - JITDUMP("success! Optimizing to ASG(struct, 0)."); - unsigned structTempNum = lvaGrabTemp(true DEBUGARG("folding static ro fld empty struct")); + JITDUMP("Success! Optimizing to STORE_LCL_VAR(0)."); + unsigned structTempNum = lvaGrabTemp(true DEBUGARG("folding static readonly field empty struct")); lvaSetStruct(structTempNum, fieldClsHnd, false); - // realType is either struct or SIMD - var_types realType = lvaGetRealType(structTempNum); - GenTreeLclVar* structLcl = gtNewLclvNode(structTempNum, realType); - impAppendTree(gtNewAssignNode(structLcl, gtNewIconNode(0)), CHECK_SPILL_NONE, impCurStmtDI); + impStoreTemp(structTempNum, gtNewIconNode(0), CHECK_SPILL_NONE); - return gtNewLclvNode(structTempNum, realType); + return gtNewLclVarNode(structTempNum); } JITDUMP("getStaticFieldContent returned false - bail out."); @@ -3602,17 +3599,16 @@ GenTree* Compiler::impImportStaticReadOnlyField(CORINFO_FIELD_HANDLE field, CORI return nullptr; } - unsigned structTempNum = lvaGrabTemp(true DEBUGARG("folding static ro fld struct")); + unsigned structTempNum = lvaGrabTemp(true DEBUGARG("folding static readonly field struct")); lvaSetStruct(structTempNum, fieldClsHnd, false); GenTree* constValTree = impImportCnsTreeFromBuffer(buffer, fieldVarType); assert(constValTree != nullptr); - GenTreeLclFld* fieldTree = gtNewLclFldNode(structTempNum, fieldVarType, fldOffset); - GenTree* fieldAsgTree = gtNewAssignNode(fieldTree, constValTree); - impAppendTree(fieldAsgTree, CHECK_SPILL_NONE, impCurStmtDI); + GenTree* fieldStoreTree = gtNewStoreLclFldNode(structTempNum, fieldVarType, fldOffset, constValTree); + impAppendTree(fieldStoreTree, CHECK_SPILL_NONE, impCurStmtDI); - JITDUMP("Folding 'static readonly %s' field to an ASG(LCL, CNS) node\n", eeGetClassName(fieldClsHnd)); + JITDUMP("Folding 'static readonly %s' field to a STORE_LCL_FLD(CNS) node\n", eeGetClassName(fieldClsHnd)); return impCreateLocalNode(structTempNum DEBUGARG(0)); } @@ -3716,18 +3712,19 @@ GenTree* Compiler::impImportCnsTreeFromBuffer(uint8_t* buffer, var_types valueTy } //------------------------------------------------------------------------ -// impImportStaticFieldAccess: Generate an access of a static field +// impImportStaticFieldAddress: Generate an address of a static field // // Arguments: // pResolvedToken - resolved token for the static field to access -// access - type of access to the field, distinguishes address-taken vs load/store -// pFieldInfo - EE instructions for accessing the field -// lclTyp - type of the field -// pIsHoistable - optional out parameter - whether any type initialization side effects -// of the returned tree can be hoisted to occur earlier +// access - type of access to the field, distinguishes address vs load/store +// pFieldInfo - EE instructions for accessing the field +// lclTyp - type of the field +// pIndirFlags - in/out parameter for the field indirection flags (e. g. IND_INITCLASS) +// pIsHoistable - optional out parameter - whether any type initialization side +// effects of the returned tree can be hoisted to occur earlier // // Return Value: -// Tree representing the access to the static field +// Tree representing the field's address. // // Notes: // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're @@ -3737,12 +3734,13 @@ GenTree* Compiler::impImportCnsTreeFromBuffer(uint8_t* buffer, var_types valueTy // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. - -GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, - CORINFO_ACCESS_FLAGS access, - CORINFO_FIELD_INFO* pFieldInfo, - var_types lclTyp, - /* OUT */ bool* pIsHoistable) +// +GenTree* Compiler::impImportStaticFieldAddress(CORINFO_RESOLVED_TOKEN* pResolvedToken, + CORINFO_ACCESS_FLAGS access, + CORINFO_FIELD_INFO* pFieldInfo, + var_types lclTyp, + GenTreeFlags* pIndirFlags, + bool* pIsHoistable) { // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since @@ -3784,10 +3782,9 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT outerFldSeq = nullptr; } - bool isHoistable = false; - bool isStaticReadOnlyInitedRef = false; - GenTreeFlags indirFlags = GTF_EMPTY; - unsigned typeIndex = 0; + bool isHoistable = false; + unsigned typeIndex = 0; + GenTreeFlags indirFlags = GTF_EMPTY; GenTree* op1; switch (pFieldInfo->fieldAccessor) { @@ -3939,9 +3936,12 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT default: { -// TODO-CQ: enable this optimization for 32 bit targets. + bool isStaticReadOnlyInitedRef = false; + #ifdef TARGET_64BIT - if (!isBoxedStatic && (lclTyp == TYP_REF) && ((access & CORINFO_ACCESS_ADDRESS) == 0)) + // TODO-CQ: enable this optimization for 32 bit targets. + if (!isBoxedStatic && (lclTyp == TYP_REF) && ((access & CORINFO_ACCESS_GET) != 0) && + ((*pIndirFlags & GTF_IND_VOLATILE) == 0)) { bool isSpeculative = true; if ((info.compCompHnd->getStaticFieldCurrentClass(pResolvedToken->hField, &isSpeculative) != @@ -3952,8 +3952,6 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT } #endif // TARGET_64BIT - assert(hasConstAddr); - assert(pFieldInfo->fieldLookup.addr != nullptr); assert(pFieldInfo->fieldLookup.accessType == IAT_VALUE); size_t fldAddr = reinterpret_cast(pFieldInfo->fieldLookup.addr); GenTreeFlags handleKind; @@ -3972,40 +3970,63 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT isHoistable = true; op1 = gtNewIconHandleNode(fldAddr, handleKind, innerFldSeq); INDEBUG(op1->AsIntCon()->gtTargetHandle = reinterpret_cast(pResolvedToken->hField)); + if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { indirFlags |= GTF_IND_INITCLASS; } + if (isStaticReadOnlyInitedRef) + { + indirFlags |= (GTF_IND_INVARIANT | GTF_IND_NONNULL); + } break; } } if (isBoxedStatic) { - op1 = gtNewIndir(TYP_REF, op1, GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL | indirFlags); - indirFlags = GTF_EMPTY; - op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); + op1 = gtNewIndir(TYP_REF, op1, GTF_IND_NONFAULTING | GTF_IND_INVARIANT | GTF_IND_NONNULL | indirFlags); + op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); + + indirFlags &= ~GTF_IND_INITCLASS; } - if (!(access & CORINFO_ACCESS_ADDRESS)) + *pIndirFlags |= indirFlags; + + if (pIsHoistable != nullptr) { - ClassLayout* layout; - lclTyp = TypeHandleToVarType(pFieldInfo->fieldType, pFieldInfo->structType, &layout); + *pIsHoistable = isHoistable; + } + + return op1; +} - // TODO-CQ: mark the indirections non-faulting. - op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, indirFlags) : gtNewIndir(lclTyp, op1, indirFlags); +//------------------------------------------------------------------------ +// impAnnotateFieldIndir: Set some flags on a field indirection. +// +// Arguments: +// indir - The field indirection node +// +// Notes: +// Exists to preserve previous behavior. New code should not call this. +// +void Compiler::impAnnotateFieldIndir(GenTreeIndir* indir) +{ + if (indir->Addr()->OperIs(GT_FIELD_ADDR)) + { + GenTreeFieldAddr* addr = indir->Addr()->AsFieldAddr(); - if (isStaticReadOnlyInitedRef) + if (addr->IsInstance() && addr->GetFldObj()->OperIs(GT_LCL_ADDR)) { - op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); + indir->gtFlags &= ~GTF_GLOB_REF; + } + else + { + assert((indir->gtFlags & GTF_GLOB_REF) != 0); } - } - if (pIsHoistable) - { - *pIsHoistable = isHoistable; + addr->gtFlags |= GTF_FLD_DEREFERENCED; } - return op1; } // In general try to call this before most of the verification work. Most people expect the access @@ -4195,7 +4216,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. - impAssignTempGen(tmpNum, op, CHECK_SPILL_NONE); + impStoreTemp(tmpNum, op, CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); @@ -4242,7 +4263,7 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op) // The backend does not support other struct-producing nodes (e. g. OBJs) as sources of multi-reg returns. // It also does not support assembling a multi-reg node into one register (for RETURN nodes at least). - return impAssignMultiRegTypeToVar(op, info.compMethodInfo->args.retTypeClass DEBUGARG(info.compCallConv)); + return impStoreMultiRegValueToVar(op, info.compMethodInfo->args.retTypeClass DEBUGARG(info.compCallConv)); } // Not a multi-reg return or value, we can simply use it directly. @@ -5662,7 +5683,7 @@ GenTree* Compiler::impCastClassOrIsInstToTree( // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); - impAssignTempGen(tmp, qmarkNull, CHECK_SPILL_NONE); + impStoreTemp(tmp, qmarkNull, CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // @@ -6550,8 +6571,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) } } - op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); - // Stores to pinned locals can have the implicit side effect of "unpinning", so we must spill // things that could depend on the pin. TODO-Bug: which can actually be anything, including // unpinned unaliased locals, not just side-effecting trees. @@ -6560,13 +6579,14 @@ void Compiler::impImportBlockCode(BasicBlock* block) impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("Spill before store to pinned local")); } + op1 = gtNewStoreLclVarNode(lclNum, op1); + + // TODO-ASG: delete this zero-diff quirk. Requires some forward substitution work. + op1->gtType = lclTyp; + if (varTypeIsStruct(lclTyp)) { - op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL); - } - else - { - op1 = gtNewAssignNode(op2, op1); + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); } goto SPILL_APPEND; @@ -6986,13 +7006,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto ARR_ST; ARR_ST: - // TODO-Review: this comment is no longer correct. - /* The strict order of evaluation is LHS-operands, RHS-operands, - range-check, and then assignment. However, codegen currently - does the range-check before evaluation the RHS-operands. So to - maintain strict ordering, we spill the stack. */ - - if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) + { + // The strict order of evaluation is 'array', 'index', 'value', range-check + // and then store. However, the tree we create does the range-check before + // evaluating 'value'. So to maintain strict ordering, we spill the stack. + if ((impStackTop().val->gtFlags & GTF_SIDE_EFFECT) != 0) { impSpillSideEffects(false, CHECK_SPILL_ALL DEBUGARG("Strict ordering of exceptions for Array store")); @@ -7016,20 +7034,19 @@ void Compiler::impImportBlockCode(BasicBlock* block) optMethodFlags |= OMF_HAS_ARRAYREF; } - // Create the index node. + // Create the index address node. op1 = gtNewArrayIndexAddr(op3, op1, lclTyp, stelemClsHnd); - op1 = gtNewIndexIndir(op1->AsIndexAddr()); + op2 = impImplicitR4orR8Cast(op2, lclTyp); - // Create the assignment node and append it. + // Create the store node and append it. + ClassLayout* layout = (lclTyp == TYP_STRUCT) ? typGetObjLayout(stelemClsHnd) : nullptr; + op1 = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, op1, op2)->AsIndir() + : gtNewStoreIndNode(lclTyp, op1, op2); if (varTypeIsStruct(op1)) { - op1 = impAssignStruct(op1, op2, CHECK_SPILL_ALL); - } - else - { - op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); - op1 = gtNewAssignNode(op1, op2); + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); } + } goto SPILL_APPEND; case CEE_ADD: @@ -8063,7 +8080,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) else { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); - impAssignTempGen(tmpNum, op1, CHECK_SPILL_ALL); + impStoreTemp(tmpNum, op1, CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); assert(lvaTable[tmpNum].lvSingleDef == 0); @@ -8159,8 +8176,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } #endif - op1 = gtNewIndir(lclTyp, op1, impPrefixFlagsToIndirFlags(prefixFlags)); - op1 = gtNewAssignNode(op1, op2); + op1 = gtNewStoreIndNode(lclTyp, op1, op2, impPrefixFlagsToIndirFlags(prefixFlags)); goto SPILL_APPEND; case CEE_LDIND_I1: @@ -8550,7 +8566,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) GenTree* newObjInit = gtNewZeroConNode((lclDsc->TypeGet() == TYP_STRUCT) ? TYP_INT : lclDsc->TypeGet()); - impAssignTempGen(lclNum, newObjInit, CHECK_SPILL_NONE); + impStoreTemp(lclNum, newObjInit, CHECK_SPILL_NONE); } else { @@ -8601,7 +8617,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. - impAssignTempGen(lclNum, op1, CHECK_SPILL_NONE); + impStoreTemp(lclNum, op1, CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; @@ -8813,8 +8829,9 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(" %08X", resolvedToken.token); - int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; - GenTree* obj = nullptr; + GenTreeFlags indirFlags = impPrefixFlagsToIndirFlags(prefixFlags); + int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; + GenTree* obj = nullptr; if ((opcode == CEE_LDFLD) || (opcode == CEE_LDFLDA)) { @@ -8889,8 +8906,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) tiRetVal = verMakeTypeInfo(fieldInfo.fieldType, clsHnd); } - // Perform this check always to ensure that we get field access exceptions even with - // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field @@ -8944,18 +8959,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1->AsFieldAddr()->gtFldMayOverlap = true; } - if (!isLoadAddress) + if (!isLoadAddress && compIsForInlining() && + impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, + impInlineInfo->inlArgInfo)) { - ClassLayout* layout; - lclTyp = TypeHandleToVarType(fieldInfo.fieldType, clsHnd, &layout); - op1 = gtNewFieldIndirNode(lclTyp, layout, op1->AsFieldAddr()); - - if (compIsForInlining() && - impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, - impInlineInfo->inlArgInfo)) - { - impInlineInfo->thisDereferencedFirst = true; - } + impInlineInfo->thisDereferencedFirst = true; } } break; @@ -8965,14 +8973,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Legacy TLS access is implemented as intrinsic on x86 only op1 = gtNewFieldAddrNode(TYP_I_IMPL, resolvedToken.hField, nullptr, fieldInfo.offset); op1->gtFlags |= GTF_FLD_TLS; // fgMorphExpandTlsField will handle the transformation. - - if (!isLoadAddress) - { - ClassLayout* layout; - lclTyp = TypeHandleToVarType(fieldInfo.fieldType, fieldInfo.structType, &layout); - op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, GTF_IND_NONFAULTING) - : gtNewIndir(lclTyp, op1, GTF_IND_NONFAULTING); - } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; @@ -9008,8 +9008,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: case CORINFO_FIELD_STATIC_RELOCATABLE: - op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, - lclTyp); + op1 = impImportStaticFieldAddress(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, + lclTyp, &indirFlags); break; case CORINFO_FIELD_INTRINSIC_ZERO: @@ -9050,27 +9050,19 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!"Unexpected fieldAccessor"); } - if (!isLoadAddress) + if (!isLoadAddress && !usesHelper) { - if (prefixFlags & PREFIX_VOLATILE) + ClassLayout* layout; + lclTyp = TypeHandleToVarType(fieldInfo.fieldType, clsHnd, &layout); + op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, indirFlags) + : gtNewIndir(lclTyp, op1, indirFlags); + if ((indirFlags & GTF_IND_INVARIANT) != 0) { - op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered - - if (!usesHelper) - { - assert(op1->OperIs(GT_IND, GT_BLK)); - op1->gtFlags |= GTF_IND_VOLATILE; - } + // TODO-ASG: delete this zero-diff quirk. + op1->gtFlags |= GTF_GLOB_REF; } - if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) - { - if (!usesHelper) - { - assert(op1->OperIs(GT_IND, GT_BLK)); - op1->gtFlags |= GTF_IND_UNALIGNED; - } - } + impAnnotateFieldIndir(op1->AsIndir()); } // Check if the class needs explicit initialization. @@ -9105,8 +9097,9 @@ void Compiler::impImportBlockCode(BasicBlock* block) JITDUMP(" %08X", resolvedToken.token); - int aflags = CORINFO_ACCESS_SET; - GenTree* obj = nullptr; + GenTreeFlags indirFlags = impPrefixFlagsToIndirFlags(prefixFlags); + int aflags = CORINFO_ACCESS_SET; + GenTree* obj = nullptr; eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); @@ -9181,8 +9174,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) case CORINFO_FIELD_STATIC_READYTORUN_HELPER: case CORINFO_FIELD_STATIC_RELOCATABLE: bool isHoistable; - op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, - lclTyp, &isHoistable); + op1 = impImportStaticFieldAddress(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, + lclTyp, &indirFlags, &isHoistable); if (!isHoistable) { @@ -9248,8 +9241,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1->AsFieldAddr()->gtFldMayOverlap = true; } - op1 = gtNewFieldIndirNode(lclTyp, layout, op1->AsFieldAddr()); - if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) @@ -9263,9 +9254,6 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Legacy TLS access is implemented as intrinsic on x86 only. op1 = gtNewFieldAddrNode(TYP_I_IMPL, resolvedToken.hField, nullptr, fieldInfo.offset); op1->gtFlags |= GTF_FLD_TLS; // fgMorphExpandTlsField will handle the transformation. - - op1 = (lclTyp == TYP_STRUCT) ? gtNewBlkIndir(layout, op1, GTF_IND_NONFAULTING) - : gtNewIndir(lclTyp, op1, GTF_IND_NONFAULTING); break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; @@ -9294,70 +9282,57 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!"Unexpected fieldAccessor"); } - if (varTypeIsStruct(lclTyp)) - { - op1 = impAssignStruct(op1, op2, CHECK_SPILL_ALL); - } - else - { - assert(op1->OperIs(GT_IND)); + /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full + trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during + importation and reads from the union as if it were a long during code generation. Though this + can potentially read garbage, one can get lucky to have this working correctly. - if (prefixFlags & PREFIX_VOLATILE) - { - op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered - op1->gtFlags |= GTF_IND_VOLATILE; - } - if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) - { - op1->gtFlags |= GTF_IND_UNALIGNED; - } - - // Currently, *all* TYP_REF statics are stored inside an "object[]" array that itself - // resides on the managed heap, and so we can use an unchecked write barrier for this - // store. Likewise if we're storing to a field of an on-heap object. - if ((lclTyp == TYP_REF) && - (((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0) || obj->TypeIs(TYP_REF))) - { - op1->gtFlags |= GTF_IND_TGT_HEAP; - } + This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with + /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a + dependency on it. To be backward compatible, we will explicitly add an upward cast here so that + it works correctly always. - /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full - trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during - importation and reads from the union as if it were a long during code generation. Though this - can potentially read garbage, one can get lucky to have this working correctly. - - This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with - /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a - dependency on it. To be backward compatible, we will explicitly add an upward cast here so that - it works correctly always. - - Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT - for V4.0. - */ - CLANG_FORMAT_COMMENT_ANCHOR; + Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT + for V4.0. + */ + CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT - // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be - // generated for ARM as well as x86, so the following IR will be accepted: - // STMTx (IL 0x... ???) - // * ASG long - // +--* LCL_VAR long - // \--* CNS_INT int 2 + // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be + // generated for ARM as well as x86, so the following IR will be accepted: + // STMTx (IL 0x... ???) + // * STORE_LCL_VAR long + // \--* CNS_INT int 2 - if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && - varTypeIsLong(op1->TypeGet())) - { - op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); - } + if ((lclTyp != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && + (lclTyp == TYP_LONG)) + { + op2 = gtNewCastNode(lclTyp, op2, false, lclTyp); + } #endif - // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatibility. - // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatibility. - op2 = impImplicitIorI4Cast(op2, lclTyp); - op2 = impImplicitR4orR8Cast(op2, lclTyp); + // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatibility. + // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatibility. + op2 = impImplicitIorI4Cast(op2, lclTyp); + op2 = impImplicitR4orR8Cast(op2, lclTyp); - op1 = gtNewAssignNode(op1, op2); + // Currently, *all* TYP_REF statics are stored inside an "object[]" array that itself + // resides on the managed heap, and so we can use an unchecked write barrier for this + // store. Likewise if we're storing to a field of an on-heap object. + if ((lclTyp == TYP_REF) && + (((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0) || obj->TypeIs(TYP_REF))) + { + indirFlags |= GTF_IND_TGT_HEAP; } + assert(varTypeIsI(op1)); + op1 = (lclTyp == TYP_STRUCT) ? gtNewStoreBlkNode(layout, op1, op2, indirFlags)->AsIndir() + : gtNewStoreIndNode(lclTyp, op1, op2, indirFlags); + impAnnotateFieldIndir(op1->AsIndir()); + + if (varTypeIsStruct(op1)) + { + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); + } goto SPILL_APPEND; } @@ -9987,8 +9962,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); - op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL); + op1 = gtNewStoreLclVarNode(tmp, op1); + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclVarAddrNode(tmp, TYP_BYREF); @@ -10023,8 +9998,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); - op2 = gtNewLclvNode(tmp, TYP_STRUCT); - op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL); + op1 = gtNewStoreLclVarNode(tmp, op1); + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclVarAddrNode(tmp, TYP_BYREF); @@ -10262,9 +10237,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) } op1 = impPopStack().val; - op1 = gtNewLoadValueNode(layout, op1); op2 = gtNewIconNode(0); - op1 = gtNewAssignNode(op1, op2); + op1 = gtNewStoreValueNode(layout, op1, op2); goto SPILL_APPEND; } @@ -10307,8 +10281,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = gtNewLoadValueNode(layout, op2, indirFlags); } - op1 = gtNewLoadValueNode(layout, op1, indirFlags); - op1 = gtNewAssignNode(op1, op2); + op1 = gtNewStoreValueNode(layout, op1, op2, indirFlags); } else { @@ -10356,9 +10329,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dest addr - op1 = gtNewLoadValueNode(layout, op1); op2 = gtNewLoadValueNode(layout, op2); - op1 = gtNewAssignNode(op1, op2); + op1 = gtNewStoreValueNode(layout, op1, op2); goto SPILL_APPEND; } @@ -10382,8 +10354,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); - op1 = gtNewLoadValueNode(layout, op1, impPrefixFlagsToIndirFlags(prefixFlags)); - op1 = impAssignStruct(op1, op2, CHECK_SPILL_ALL); + op1 = gtNewStoreValueNode(layout, op1, op2, impPrefixFlagsToIndirFlags(prefixFlags)); + op1 = impStoreStruct(op1, CHECK_SPILL_ALL); goto SPILL_APPEND; } @@ -10622,7 +10594,7 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) } //------------------------------------------------------------------------ -// impAssignMultiRegTypeToVar: ensure calls that return structs in multiple +// impStoreMultiRegValueToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: @@ -10632,13 +10604,13 @@ void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) // Returns: // Tree with reference to struct local to use as call return value. -GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, +GenTree* Compiler::impStoreMultiRegValueToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); lvaSetStruct(tmpNum, hClass, false); - impAssignTempGen(tmpNum, op, CHECK_SPILL_ALL); + impStoreTemp(tmpNum, op, CHECK_SPILL_ALL); LclVarDsc* varDsc = lvaGetDesc(tmpNum); @@ -10837,7 +10809,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) } } - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); + impStoreTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); @@ -10881,7 +10853,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); - impAssignTempGen(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); + impStoreTemp(lvaInlineeReturnSpillTemp, op2, CHECK_SPILL_ALL); } if (compMethodReturnsMultiRegRetType()) @@ -10913,13 +10885,13 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) if (inlRetExpr->gtSubstExpr == nullptr) { inlRetExpr->gtSubstExpr = - impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), - CHECK_SPILL_ALL); + impStoreStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), + CHECK_SPILL_ALL); } } else { - inlRetExpr->gtSubstExpr = impAssignStructPtr(dest, op2, CHECK_SPILL_ALL); + inlRetExpr->gtSubstExpr = impStoreStructPtr(dest, op2, CHECK_SPILL_ALL); } } } @@ -10942,7 +10914,7 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); - op2 = impAssignStructPtr(retBuffAddr, op2, CHECK_SPILL_ALL); + op2 = impStoreStructPtr(retBuffAddr, op2, CHECK_SPILL_ALL); impAppendTree(op2, CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly. @@ -11045,12 +11017,9 @@ void Compiler::impPoisonImplicitByrefsBeforeReturn() return; } - GenTreeLclFld* lhs = - new (this, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_STRUCT, lclNum, start, typGetBlkLayout(count)); - lhs->gtFlags |= GTF_GLOB_REF; - - GenTree* asg = gtNewAssignNode(lhs, gtNewOperNode(GT_INIT_VAL, TYP_INT, gtNewIconNode(0xcd))); - impAppendTree(asg, CHECK_SPILL_NONE, DebugInfo()); + GenTree* initValue = gtNewOperNode(GT_INIT_VAL, TYP_INT, gtNewIconNode(0xcd)); + GenTree* store = gtNewStoreLclFldNode(lclNum, TYP_STRUCT, typGetBlkLayout(count), start, initValue); + impAppendTree(store, CHECK_SPILL_NONE, DebugInfo()); }; unsigned startOffs = 0; @@ -11066,10 +11035,7 @@ void Compiler::impPoisonImplicitByrefsBeforeReturn() poisonBlock(startOffs, offs - startOffs); - GenTree* gcField = gtNewLclFldNode(lclNum, gcPtr, offs); - gcField->gtFlags |= GTF_GLOB_REF; - - GenTree* zeroField = gtNewAssignNode(gcField, gtNewZeroConNode(gcPtr)); + GenTree* zeroField = gtNewStoreLclFldNode(lclNum, gcPtr, offs, gtNewZeroConNode(gcPtr)); impAppendTree(zeroField, CHECK_SPILL_NONE, DebugInfo()); startOffs = offs + TARGET_POINTER_SIZE; @@ -11529,7 +11495,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); - impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); + impStoreTemp(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } @@ -11537,7 +11503,7 @@ void Compiler::impImportBlock(BasicBlock* block) if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); - impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); + impStoreTemp(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } @@ -11547,7 +11513,7 @@ void Compiler::impImportBlock(BasicBlock* block) assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); - impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); + impStoreTemp(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index a676724e4bd5d..01e8e4b133429 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -319,7 +319,7 @@ var_types Compiler::impImportCall(OPCODE opcode, return TYP_UNDEF; } - impAssignTempGen(lclNum, stubAddr, CHECK_SPILL_NONE); + impStoreTemp(lclNum, stubAddr, CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node @@ -413,7 +413,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); - impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); + impStoreTemp(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call->AsCall()->gtCallAddr = fptr; @@ -484,7 +484,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); - impAssignTempGen(lclNum, fptr, CHECK_SPILL_ALL); + impStoreTemp(lclNum, fptr, CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, di); @@ -1386,8 +1386,8 @@ var_types Compiler::impImportCall(OPCODE opcode, unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); - impAssignTempGen(calliSlot, call, CHECK_SPILL_NONE); - // impAssignTempGen can change src arg list and return type for call that returns struct. + impStoreTemp(calliSlot, call, CHECK_SPILL_NONE); + // impStoreTemp can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } @@ -1560,10 +1560,10 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN if (call->IsUnmanaged()) { // Native ABIs do not allow retbufs to alias anything. - // This is allowed by the managed ABI and impAssignStruct will + // This is allowed by the managed ABI and impStoreStruct will // never introduce copies due to this. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Retbuf for unmanaged call")); - impAssignTempGen(tmpNum, call, CHECK_SPILL_ALL); + impStoreTemp(tmpNum, call, CHECK_SPILL_ALL); return gtNewLclvNode(tmpNum, lvaGetDesc(tmpNum)->TypeGet()); } @@ -1595,7 +1595,7 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later - return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); + return impStoreMultiRegValueToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET @@ -1823,13 +1823,12 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) } // - // We start by looking at the last statement, making sure it's an assignment, and - // that the target of the assignment is the array passed to InitializeArray. + // We start by looking at the last statement, making sure it's a store, and + // that the target of the store is the array passed to InitializeArray. // - GenTree* arrayAssignment = impLastStmt->GetRootNode(); - if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || - (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != - arrayLocalNode->AsLclVarCommon()->GetLclNum())) + GenTree* arrayLocalStore = impLastStmt->GetRootNode(); + if (!arrayLocalStore->OperIs(GT_STORE_LCL_VAR) || !arrayLocalNode->OperIs(GT_LCL_VAR) || + (arrayLocalStore->AsLclVar()->GetLclNum() != arrayLocalNode->AsLclVar()->GetLclNum())) { return nullptr; } @@ -1838,7 +1837,7 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) // Make sure that the object being assigned is a helper call. // - GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; + GenTree* newArrayCall = arrayLocalStore->AsLclVar()->Data(); if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; @@ -1849,24 +1848,25 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) // bool isMDArray = false; - - if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && - newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_MAYBEFROZEN) && - newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && - newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && - newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) + switch (newArrayCall->AsCall()->GetHelperNum()) + { + case CORINFO_HELP_NEWARR_1_DIRECT: + case CORINFO_HELP_NEWARR_1_OBJ: + case CORINFO_HELP_NEWARR_1_MAYBEFROZEN: + case CORINFO_HELP_NEWARR_1_VC: + case CORINFO_HELP_NEWARR_1_ALIGN8: #ifdef FEATURE_READYTORUN - && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) + case CORINFO_HELP_READYTORUN_NEWARR_1: #endif - ) - { - if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR) && - newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_RARE)) - { - return nullptr; - } + break; + + case CORINFO_HELP_NEW_MDARR: + case CORINFO_HELP_NEW_MDARR_RARE: + isMDArray = true; + break; - isMDArray = true; + default: + return nullptr; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; @@ -1946,13 +1946,8 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { - return tree->OperIs(GT_ASG) && IsArgsField(tree->gtGetOp1(), index, lvaNewObjArrayArgs); - } - - static bool IsArgsField(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) - { - return tree->OperIs(GT_LCL_FLD) && (tree->AsLclFld()->GetLclNum() == lvaNewObjArrayArgs) && - (tree->AsLclFld()->GetLclOffs() == sizeof(INT32) * index); + return tree->OperIs(GT_STORE_LCL_FLD) && (tree->AsLclFld()->GetLclNum() == lvaNewObjArrayArgs) && + (tree->AsLclFld()->GetLclOffs() == (sizeof(INT32) * index)); } static bool IsComma(GenTree* tree) @@ -1977,9 +1972,9 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) if (rank == 1) { - GenTree* lowerBoundAssign = comma->gtGetOp1(); - assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); - GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); + GenTree* lowerBoundStore = comma->gtGetOp1(); + assert(Match::IsArgsFieldInit(lowerBoundStore, argIndex, lvaNewObjArrayArgs)); + GenTree* lowerBoundNode = lowerBoundStore->AsLclVarCommon()->Data(); if (lowerBoundNode->IsIntegralConst(0)) { @@ -1991,9 +1986,9 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) argIndex++; } - GenTree* lengthNodeAssign = comma->gtGetOp1(); - assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); - GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); + GenTree* lengthNodeStore = comma->gtGetOp1(); + assert(Match::IsArgsFieldInit(lengthNodeStore, argIndex, lvaNewObjArrayArgs)); + GenTree* lengthNode = lengthNodeStore->AsLclVarCommon()->Data(); if (!lengthNode->IsCnsIntOrI()) { @@ -2105,17 +2100,16 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) } ClassLayout* blkLayout = typGetBlkLayout(blkSize); + GenTree* srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_CONST_PTR); + GenTree* src = gtNewBlkIndir(blkLayout, srcAddr); GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); - GenTree* dst = gtNewBlkIndir(blkLayout, dstAddr); - - GenTree* srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_CONST_PTR); - GenTree* src = gtNewBlkIndir(blkLayout, srcAddr); + GenTree* store = gtNewStoreBlkNode(blkLayout, dstAddr, src); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_InitializeArrayIntrinsics; #endif - return gtNewAssignNode(dst, src); + return store; } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) @@ -2215,15 +2209,13 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan for CreateSpan")); lvaSetStruct(spanTempNum, spanHnd, false); - GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, OFFSETOF__CORINFO_Span__reference); - GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); - - GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, OFFSETOF__CORINFO_Span__length); - GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); + GenTree* dataFieldStore = + gtNewStoreLclFldNode(spanTempNum, TYP_BYREF, OFFSETOF__CORINFO_Span__reference, pointerValue); + GenTree* lengthFieldStore = gtNewStoreLclFldNode(spanTempNum, TYP_INT, OFFSETOF__CORINFO_Span__length, lengthValue); // Now append a few statements the initialize the span - impAppendTree(lengthFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); - impAppendTree(pointerFieldAsg, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(lengthFieldStore, CHECK_SPILL_NONE, impCurStmtDI); + impAppendTree(dataFieldStore, CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); @@ -2807,7 +2799,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); - impAssignTempGen(rawHandleSlot, rawHandle, CHECK_SPILL_NONE); + impStoreTemp(rawHandleSlot, rawHandle, CHECK_SPILL_NONE); GenTree* lclVarAddr = gtNewLclVarAddrNode(rawHandleSlot); var_types resultType = JITtype2varType(sig->retType); @@ -3108,11 +3100,11 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, unsigned structLcl = lvaGrabTemp(true DEBUGARG("RuntimeTypeHandle")); lvaSetStruct(structLcl, sig->retTypeClass, false); - GenTree* realHandle = op1->AsCall()->gtArgs.GetUserArgByIndex(0)->GetNode(); - GenTreeLclFld* handleFld = gtNewLclFldNode(structLcl, realHandle->TypeGet(), 0); - GenTree* asgHandleFld = gtNewAssignNode(handleFld, realHandle); - impAppendTree(asgHandleFld, CHECK_SPILL_NONE, impCurStmtDI); - retNode = impCreateLocalNode(structLcl DEBUGARG(0)); + GenTree* realHandle = op1->AsCall()->gtArgs.GetUserArgByIndex(0)->GetNode(); + GenTree* storeHandleFld = gtNewStoreLclFldNode(structLcl, realHandle->TypeGet(), 0, realHandle); + impAppendTree(storeHandleFld, CHECK_SPILL_NONE, impCurStmtDI); + + retNode = gtNewLclVarNode(structLcl); impPopStack(); } break; @@ -4045,7 +4037,7 @@ GenTree* Compiler::impSRCSUnsafeIntrinsic(NamedIntrinsic intrinsic, // In order to change the class handle of the object we need to spill it to a temp // and update class info for that temp. unsigned localNum = lvaGrabTemp(true DEBUGARG("updating class info")); - impAssignTempGen(localNum, op, CHECK_SPILL_ALL); + impStoreTemp(localNum, op, CHECK_SPILL_ALL); // NOTE: we still can't say for sure that it is the exact type of the argument lvaSetClass(localNum, inst, /*isExact*/ false); @@ -4697,7 +4689,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, result = gtNewQmarkNode(baseType, cond, colon); unsigned tmp = lvaGrabTemp(true DEBUGARG("Grabbing temp for LeadingZeroCount Qmark")); - impAssignTempGen(tmp, result, CHECK_SPILL_NONE); + impStoreTemp(tmp, result, CHECK_SPILL_NONE); result = gtNewLclvNode(tmp, baseType); } #elif defined(TARGET_ARM64) @@ -5028,7 +5020,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic, result = gtNewQmarkNode(baseType, cond, colon); unsigned tmp = lvaGrabTemp(true DEBUGARG("Grabbing temp for TrailingZeroCount Qmark")); - impAssignTempGen(tmp, result, CHECK_SPILL_NONE); + impStoreTemp(tmp, result, CHECK_SPILL_NONE); result = gtNewLclvNode(tmp, baseType); } #elif defined(TARGET_ARM64) @@ -5540,7 +5532,7 @@ class SpillRetExprHelper assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); - comp->impAssignTempGen(tmp, retExpr, Compiler::CHECK_SPILL_NONE); + comp->impStoreTemp(tmp, retExpr, Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); assert(comp->lvaTable[tmp].lvSingleDef == 0); @@ -8917,28 +8909,22 @@ GenTree* Compiler::impArrayAccessIntrinsic( GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast(rank), static_cast(arrayElemSize), &inds[0]); - - if (intrinsicName != NI_Array_Address) + switch (intrinsicName) { - if (elemType == TYP_STRUCT) - { - arrElem = gtNewBlkIndir(elemLayout, arrElem); - } - else - { - arrElem = gtNewIndir(elemType, arrElem); - } - } + case NI_Array_Set: + assert(!varTypeIsStruct(elemType)); + arrElem = gtNewStoreIndNode(elemType, arrElem, val); + break; - if (intrinsicName == NI_Array_Set) - { - assert(val != nullptr); - return gtNewAssignNode(arrElem, val); - } - else - { - return arrElem; + case NI_Array_Get: + arrElem = (elemType == TYP_STRUCT) ? gtNewBlkIndir(elemLayout, arrElem) : gtNewIndir(elemType, arrElem); + break; + + default: + break; } + + return arrElem; } //------------------------------------------------------------------------ @@ -8983,10 +8969,10 @@ GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) } else { - boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); - GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); - Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; - boxAsgStmt->SetRootNode(boxTempAsg); + boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); + GenTree* boxTempStore = gtNewTempStore(boxTempNum, boxSrc); + Statement* boxStoreStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; + boxStoreStmt->SetRootNode(boxTempStore); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(LCL_VAR_ADDR V%02u)", boxTempNum); diff --git a/src/coreclr/jit/importervectorization.cpp b/src/coreclr/jit/importervectorization.cpp index 67f4c58570d93..bb11a5ab233d2 100644 --- a/src/coreclr/jit/importervectorization.cpp +++ b/src/coreclr/jit/importervectorization.cpp @@ -688,12 +688,12 @@ GenTree* Compiler::impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO strLenOffset + sizeof(int), cmpMode); if (unrolled != nullptr) { - impAssignTempGen(varStrTmp, varStr, CHECK_SPILL_NONE); + impStoreTemp(varStrTmp, varStr, CHECK_SPILL_NONE); if (unrolled->OperIs(GT_QMARK)) { // QMARK nodes cannot reside on the evaluation stack unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); - impAssignTempGen(rootTmp, unrolled, CHECK_SPILL_NONE); + impStoreTemp(rootTmp, unrolled, CHECK_SPILL_NONE); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } @@ -844,14 +844,14 @@ GenTree* Compiler::impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* { if (!spanObj->OperIs(GT_LCL_VAR)) { - impAssignTempGen(spanLclNum, spanObj, CHECK_SPILL_NONE); + impStoreTemp(spanLclNum, spanObj, CHECK_SPILL_NONE); } if (unrolled->OperIs(GT_QMARK)) { // QMARK can't be a root node, spill it to a temp unsigned rootTmp = lvaGrabTemp(true DEBUGARG("spilling unroll qmark")); - impAssignTempGen(rootTmp, unrolled, CHECK_SPILL_NONE); + impStoreTemp(rootTmp, unrolled, CHECK_SPILL_NONE); unrolled = gtNewLclvNode(rootTmp, TYP_INT); } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index b5183f1586e59..a8ac3f6417523 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -131,9 +131,9 @@ class IndirectCallTransformer bool ContainsFatCalli(Statement* stmt) { GenTree* fatPointerCandidate = stmt->GetRootNode(); - if (fatPointerCandidate->OperIs(GT_ASG)) + if (fatPointerCandidate->OperIs(GT_STORE_LCL_VAR)) { - fatPointerCandidate = fatPointerCandidate->gtGetOp2(); + fatPointerCandidate = fatPointerCandidate->AsLclVar()->Data(); } return fatPointerCandidate->IsCall() && fatPointerCandidate->AsCall()->IsFatPointerCandidate(); } @@ -293,7 +293,7 @@ class IndirectCallTransformer FatPointerCallTransformer(Compiler* compiler, BasicBlock* block, Statement* stmt) : Transformer(compiler, block, stmt) { - doesReturnValue = stmt->GetRootNode()->OperIs(GT_ASG); + doesReturnValue = stmt->GetRootNode()->OperIs(GT_STORE_LCL_VAR); origCall = GetCall(stmt); fptrAddress = origCall->gtCallAddr; pointerType = fptrAddress->TypeGet(); @@ -319,8 +319,8 @@ class IndirectCallTransformer GenTreeCall* call = nullptr; if (doesReturnValue) { - assert(tree->OperIs(GT_ASG)); - call = tree->gtGetOp2()->AsCall(); + assert(tree->OperIs(GT_STORE_LCL_VAR)); + call = tree->AsLclVar()->Data()->AsCall(); } else { @@ -636,12 +636,12 @@ class IndirectCallTransformer // void SpillArgToTempBeforeGuard(CallArg* arg) { - unsigned tmpNum = compiler->lvaGrabTemp(true DEBUGARG("guarded devirt arg temp")); - GenTree* asgTree = compiler->gtNewTempAssign(tmpNum, arg->GetNode()); - Statement* asgStmt = compiler->fgNewStmtFromTree(asgTree, stmt->GetDebugInfo()); - compiler->fgInsertStmtAtEnd(checkBlock, asgStmt); + unsigned tmpNum = compiler->lvaGrabTemp(true DEBUGARG("guarded devirt arg temp")); + GenTree* store = compiler->gtNewTempStore(tmpNum, arg->GetNode()); + Statement* storeStmt = compiler->fgNewStmtFromTree(store, stmt->GetDebugInfo()); + compiler->fgInsertStmtAtEnd(checkBlock, storeStmt); - arg->SetEarlyNode(compiler->gtNewLclvNode(tmpNum, genActualType(arg->GetNode()))); + arg->SetEarlyNode(compiler->gtNewLclVarNode(tmpNum)); } //------------------------------------------------------------------------ @@ -759,7 +759,7 @@ class IndirectCallTransformer { newThisObj = clonedObj; } - GenTree* assign = compiler->gtNewTempAssign(thisTemp, newThisObj); + GenTree* store = compiler->gtNewTempStore(thisTemp, newThisObj); if (clsHnd != NO_CLASS_HANDLE) { @@ -771,7 +771,7 @@ class IndirectCallTransformer compiler->info.compCompHnd->getMethodClass(inlineInfo->guardedMethodHandle)); } - compiler->fgNewStmtAtEnd(thenBlock, assign); + compiler->fgNewStmtAtEnd(thenBlock, store); // Clone call for the devirtualized case. Note we must use the // special candidate helper and we need to use the new 'this'. @@ -844,8 +844,8 @@ class IndirectCallTransformer if (returnTemp != BAD_VAR_NUM) { - GenTree* const assign = compiler->gtNewTempAssign(returnTemp, call); - compiler->fgNewStmtAtEnd(thenBlock, assign); + GenTree* const store = compiler->gtNewTempStore(returnTemp, call); + compiler->fgNewStmtAtEnd(thenBlock, store); } else { @@ -879,7 +879,7 @@ class IndirectCallTransformer if (returnTemp != BAD_VAR_NUM) { - newRetExpr = compiler->gtNewTempAssign(returnTemp, newRetExpr); + newRetExpr = compiler->gtNewTempStore(returnTemp, newRetExpr); } else { @@ -908,8 +908,8 @@ class IndirectCallTransformer if (returnTemp != BAD_VAR_NUM) { - GenTree* assign = compiler->gtNewTempAssign(returnTemp, call); - newStmt->SetRootNode(assign); + GenTree* store = compiler->gtNewTempStore(returnTemp, call); + newStmt->SetRootNode(store); } compiler->fgInsertStmtAtEnd(elseBlock, newStmt); diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h index 7265028fa6b49..4c637fac0fac5 100644 --- a/src/coreclr/jit/jit.h +++ b/src/coreclr/jit/jit.h @@ -761,7 +761,7 @@ class Histogram #define CLFLG_CSE 0x00004 #define CLFLG_REGVAR 0x00008 #define CLFLG_RNGCHKOPT 0x00010 -#define CLFLG_DEADASGN 0x00020 +#define CLFLG_DEADSTORE 0x00020 #define CLFLG_CODEMOTION 0x00040 #define CLFLG_QMARK 0x00080 #define CLFLG_TREETRANS 0x00100 @@ -780,7 +780,7 @@ class Histogram #endif #define CLFLG_MAXOPT \ - (CLFLG_CSE | CLFLG_REGVAR | CLFLG_RNGCHKOPT | CLFLG_DEADASGN | CLFLG_CODEMOTION | CLFLG_QMARK | CLFLG_TREETRANS | \ + (CLFLG_CSE | CLFLG_REGVAR | CLFLG_RNGCHKOPT | CLFLG_DEADSTORE | CLFLG_CODEMOTION | CLFLG_QMARK | CLFLG_TREETRANS | \ CLFLG_INLINING | CLFLG_STRUCTPROMOTE) #define CLFLG_MINOPT (CLFLG_TREETRANS) diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index 8266313e62ee8..f59d83e6fe26f 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -2360,9 +2360,9 @@ bool Compiler::fgCreateFiltersForGenericExceptions() arg->gtFlags |= GTF_ORDER_SIDEEFF; unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; - GenTree* argAsg = gtNewTempAssign(tempNum, arg); + GenTree* argStore = gtNewTempStore(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); - fgInsertStmtAtBeg(filterBb, gtNewStmt(argAsg, handlerBb->firstStmt()->GetDebugInfo())); + fgInsertStmtAtBeg(filterBb, gtNewStmt(argStore, handlerBb->firstStmt()->GetDebugInfo())); // Create "catchArg is TException" tree GenTree* runtimeLookup; diff --git a/src/coreclr/jit/lclmorph.cpp b/src/coreclr/jit/lclmorph.cpp index a2d765e872147..6e37c56d24d81 100644 --- a/src/coreclr/jit/lclmorph.cpp +++ b/src/coreclr/jit/lclmorph.cpp @@ -55,19 +55,14 @@ class LocalSequencer final : public GenTreeVisitor } else { - // In the rare case that the root node becomes part of the linked - // list (i.e. top level local) we get a circular linked list here. - if (firstNode == rootNode) + // Clear the links on the sentinel in case it didn't end up in the list. + if (rootNode != lastNode) { - assert(firstNode == lastNode); - lastNode->gtNext = nullptr; - } - else - { - assert(lastNode->gtNext == nullptr); - assert(lastNode->OperIsAnyLocal()); + assert(rootNode->gtPrev == nullptr); + rootNode->gtNext = nullptr; } + lastNode->gtNext = nullptr; firstNode->gtPrev = nullptr; } @@ -83,11 +78,6 @@ class LocalSequencer final : public GenTreeVisitor SequenceLocal(node->AsLclVarCommon()); } - if (node->OperIs(GT_ASG)) - { - SequenceAssignment(node->AsOp()); - } - if (node->IsCall()) { SequenceCall(node->AsCall()); @@ -105,35 +95,10 @@ class LocalSequencer final : public GenTreeVisitor void SequenceLocal(GenTreeLclVarCommon* lcl) { lcl->gtPrev = m_prevNode; - lcl->gtNext = nullptr; m_prevNode->gtNext = lcl; m_prevNode = lcl; } - //------------------------------------------------------------------- - // SequenceAssignment: Post-process an assignment that may have a local on the LHS. - // - // Arguments: - // asg - the assignment - // - // Remarks: - // In execution order the LHS of an assignment is normally visited - // before the RHS. However, for our purposes, we would like to see the - // LHS local which is considered the def after the nodes on the RHS, so - // this function corrects where that local appears in the list. - // - // This is handled in later liveness by guaranteeing GTF_REVERSE_OPS is - // set for assignments with tracked locals on the LHS. - // - void SequenceAssignment(GenTreeOp* asg) - { - if (asg->gtGetOp1()->OperIsLocal()) - { - // Correct the point at which the definition of the local on the LHS appears. - MoveNodeToEnd(asg->gtGetOp1()); - } - } - //------------------------------------------------------------------- // SequenceCall: Post-process a call that may define a local. // @@ -141,8 +106,8 @@ class LocalSequencer final : public GenTreeVisitor // call - the call // // Remarks: - // Like above, but calls may also define a local that we would like to - // see after all other operands of the call have been evaluated. + // calls may also define a local that we would like to see + // after all other operands of the call have been evaluated. // void SequenceCall(GenTreeCall* call) { @@ -174,15 +139,13 @@ class LocalSequencer final : public GenTreeVisitor // Arguments: // node - The node // - void MoveNodeToEnd(GenTree* node) + void MoveNodeToEnd(GenTreeLclVarCommon* node) { - if (node->gtNext == nullptr) + if ((m_prevNode == node) || (node->gtNext == nullptr)) { return; } - assert(m_prevNode != node); - GenTree* prev = node->gtPrev; GenTree* next = node->gtNext; @@ -190,10 +153,7 @@ class LocalSequencer final : public GenTreeVisitor prev->gtNext = next; next->gtPrev = prev; - m_prevNode->gtNext = node; - node->gtPrev = m_prevNode; - node->gtNext = nullptr; - m_prevNode = node; + SequenceLocal(node); } }; @@ -459,6 +419,8 @@ class LocalAddressVisitor final : public GenTreeVisitor { case GT_IND: case GT_BLK: + case GT_STOREIND: + case GT_STORE_BLK: if (MorphStructField(node->AsIndir(), user)) { goto LOCAL_NODE; @@ -473,11 +435,13 @@ class LocalAddressVisitor final : public GenTreeVisitor break; case GT_LCL_FLD: - MorphLocalField(node, user); + case GT_STORE_LCL_FLD: + MorphLocalField(node->AsLclVarCommon(), user); goto LOCAL_NODE; case GT_LCL_VAR: case GT_LCL_ADDR: + case GT_STORE_LCL_VAR: LOCAL_NODE: { unsigned const lclNum = node->AsLclVarCommon()->GetLclNum(); @@ -525,10 +489,20 @@ class LocalAddressVisitor final : public GenTreeVisitor switch (node->OperGet()) { - case GT_LCL_VAR: - SequenceLocal(node->AsLclVarCommon()); - break; + case GT_STORE_LCL_FLD: + if (node->IsPartialLclFld(m_compiler)) + { + node->gtFlags |= GTF_VAR_USEASG; + } + FALLTHROUGH; + case GT_STORE_LCL_VAR: + assert(TopValue(0).Node() == node->AsLclVarCommon()->Data()); + EscapeValue(TopValue(0), node); + PopValue(); + FALLTHROUGH; + + case GT_LCL_VAR: case GT_LCL_FLD: SequenceLocal(node->AsLclVarCommon()); break; @@ -611,6 +585,13 @@ class LocalAddressVisitor final : public GenTreeVisitor } break; + case GT_STOREIND: + case GT_STORE_BLK: + assert(TopValue(0).Node() == node->AsIndir()->Data()); + EscapeValue(TopValue(0), node); + PopValue(); + FALLTHROUGH; + case GT_BLK: case GT_IND: assert(TopValue(1).Node() == node); @@ -661,16 +642,6 @@ class LocalAddressVisitor final : public GenTreeVisitor } break; - case GT_ASG: - EscapeValue(TopValue(0), node); - PopValue(); - EscapeValue(TopValue(0), node); - PopValue(); - assert(TopValue(0).Node() == node); - - SequenceAssignment(node->AsOp()); - break; - case GT_CALL: while (TopValue(0).Node() != node) { @@ -926,11 +897,11 @@ class LocalAddressVisitor final : public GenTreeVisitor void MorphLocalIndir(GenTree** use, unsigned lclNum, unsigned offset, GenTree* user) { GenTree* indir = *use; - ClassLayout* layout = indir->OperIs(GT_BLK) ? indir->AsBlk()->GetLayout() : nullptr; + ClassLayout* layout = indir->OperIs(GT_BLK, GT_STORE_BLK) ? indir->AsBlk()->GetLayout() : nullptr; IndirTransform transform = SelectLocalIndirTransform(indir->AsIndir(), lclNum, offset, user); LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum); GenTreeLclVarCommon* lclNode = nullptr; - bool isDef = (user != nullptr) && user->OperIs(GT_ASG) && (user->AsOp()->gtGetOp1() == indir); + bool isDef = indir->OperIs(GT_STOREIND, GT_STORE_BLK); switch (transform) { @@ -944,6 +915,16 @@ class LocalAddressVisitor final : public GenTreeVisitor lclNode = indir->gtGetOp1()->BashToLclVar(m_compiler, lclNum); break; + case IndirTransform::NarrowCast: + assert(varTypeIsIntegral(indir)); + assert(varTypeIsIntegral(varDsc)); + assert(genTypeSize(varDsc) >= genTypeSize(indir)); + assert(!isDef); + + lclNode = indir->gtGetOp1()->BashToLclVar(m_compiler, lclNum); + *use = m_compiler->gtNewCastNode(genActualType(indir), lclNode, false, indir->TypeGet()); + break; + #ifdef FEATURE_HW_INTRINSICS // We have two cases we want to handle: // 1. Vector2/3/4 and Quaternion where we have 4x float fields @@ -976,13 +957,10 @@ class LocalAddressVisitor final : public GenTreeVisitor case IndirTransform::WithElement: { - assert(user->OperIs(GT_ASG) && (user->gtGetOp1() == indir)); - GenTree* hwiNode = nullptr; var_types elementType = indir->TypeGet(); - lclNode = indir->BashToLclVar(m_compiler, lclNum); - GenTree* simdLclNode = m_compiler->gtNewLclvNode(lclNum, varDsc->TypeGet()); - GenTree* elementNode = user->gtGetOp2(); + GenTree* simdLclNode = m_compiler->gtNewLclVarNode(lclNum); + GenTree* elementNode = indir->AsIndir()->Data(); if (elementType == TYP_FLOAT) { @@ -1003,8 +981,11 @@ class LocalAddressVisitor final : public GenTreeVisitor CORINFO_TYPE_FLOAT, 16); } - user->AsOp()->gtOp2 = hwiNode; - user->ChangeType(varDsc->TypeGet()); + indir->ChangeType(varDsc->TypeGet()); + indir->ChangeOper(GT_STORE_LCL_VAR); + indir->AsLclVar()->SetLclNum(lclNum); + indir->AsLclVar()->Data() = hwiNode; + lclNode = indir->AsLclVarCommon(); } break; #endif // FEATURE_HW_INTRINSICS @@ -1016,23 +997,31 @@ class LocalAddressVisitor final : public GenTreeVisitor assert(genTypeSize(indir) == genTypeSize(varDsc)); // BOOL <-> UBYTE. indir->ChangeType(varDsc->lvNormalizeOnLoad() ? varDsc->TypeGet() : genActualType(varDsc)); } - indir->ChangeOper(GT_LCL_VAR); + if (isDef) + { + GenTree* data = indir->Data(); + indir->ChangeOper(GT_STORE_LCL_VAR); + indir->AsLclVar()->Data() = data; + } + else + { + indir->ChangeOper(GT_LCL_VAR); + } indir->AsLclVar()->SetLclNum(lclNum); lclNode = indir->AsLclVarCommon(); break; - case IndirTransform::NarrowCast: - assert(varTypeIsIntegral(indir)); - assert(varTypeIsIntegral(varDsc)); - assert(genTypeSize(varDsc) >= genTypeSize(indir)); - assert(!isDef); - - lclNode = indir->gtGetOp1()->BashToLclVar(m_compiler, lclNum); - *use = m_compiler->gtNewCastNode(genActualType(indir), lclNode, false, indir->TypeGet()); - break; - case IndirTransform::LclFld: - indir->ChangeOper(GT_LCL_FLD); + if (isDef) + { + GenTree* data = indir->Data(); + indir->ChangeOper(GT_STORE_LCL_FLD); + indir->AsLclFld()->Data() = data; + } + else + { + indir->ChangeOper(GT_LCL_FLD); + } indir->AsLclFld()->SetLclNum(lclNum); indir->AsLclFld()->SetLclOffs(offset); indir->AsLclFld()->SetLayout(layout); @@ -1055,17 +1044,12 @@ class LocalAddressVisitor final : public GenTreeVisitor if (isDef) { - lclNodeFlags |= (GTF_VAR_DEF | GTF_DONT_CSE); + lclNodeFlags |= (indir->AsLclVarCommon()->Data()->gtFlags & GTF_ALL_EFFECT); + lclNodeFlags |= (GTF_ASG | GTF_VAR_DEF); - if (!indir->OperIs(GT_LCL_VAR)) + if (indir->IsPartialLclFld(m_compiler)) { - unsigned lhsSize = indir->TypeIs(TYP_STRUCT) ? layout->GetSize() : genTypeSize(indir); - unsigned lclSize = m_compiler->lvaLclExactSize(lclNum); - if (lhsSize != lclSize) - { - assert(lhsSize < lclSize); - lclNodeFlags |= GTF_VAR_USEASG; - } + lclNodeFlags |= GTF_VAR_USEASG; } } @@ -1091,7 +1075,8 @@ class LocalAddressVisitor final : public GenTreeVisitor // We don't expect indirections that cannot be turned into local nodes here. assert((offset <= UINT16_MAX) && !indir->IsVolatile()); - if (IsUnused(indir, user)) + bool isDef = indir->OperIs(GT_STOREIND, GT_STORE_BLK); + if (!isDef && IsUnused(indir, user)) { return IndirTransform::Nop; } @@ -1112,9 +1097,7 @@ class LocalAddressVisitor final : public GenTreeVisitor return IndirTransform::LclVar; } - bool isDef = user->OperIs(GT_ASG) && (user->gtGetOp1() == indir); - - // For small locals on the LHS we can ignore the signed/unsigned diff. + // For small stores we can ignore the signed/unsigned diff. if (isDef && (varTypeToSigned(indir) == varTypeToSigned(varDsc))) { assert(varTypeIsSmall(indir)); @@ -1193,8 +1176,6 @@ class LocalAddressVisitor final : public GenTreeVisitor // bool MorphStructField(GenTreeIndir* node, GenTree* user) { - assert(node->OperIs(GT_IND, GT_BLK)); - GenTree* addr = node->Addr(); if (node->IsVolatile() && (!addr->OperIs(GT_FIELD_ADDR) || ((addr->gtFlags & GTF_FLD_DEREFERENCED) == 0))) { @@ -1214,17 +1195,21 @@ class LocalAddressVisitor final : public GenTreeVisitor if (node->TypeGet() == fieldType) { - GenTreeFlags lclVarFlags = node->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE); - - if ((user != nullptr) && user->OperIs(GT_ASG) && (user->AsOp()->gtOp1 == node)) + if (node->OperIs(GT_STOREIND, GT_STORE_BLK)) { - lclVarFlags |= GTF_VAR_DEF; + GenTree* data = node->Data(); + node->ChangeOper(GT_STORE_LCL_VAR); + node->AsLclVar()->Data() = data; + node->gtFlags |= GTF_VAR_DEF; + } + else + { + node->ChangeOper(GT_LCL_VAR); + node->gtFlags &= (GTF_NODE_MASK | GTF_DONT_CSE); // TODO-ASG-Cleanup: delete this zero-diff quirk. } - - node->ChangeOper(GT_LCL_VAR); node->AsLclVar()->SetLclNum(fieldLclNum); - node->gtType = fieldType; - node->gtFlags = lclVarFlags; + node->gtType = fieldType; + return true; } @@ -1298,26 +1283,66 @@ class LocalAddressVisitor final : public GenTreeVisitor } //------------------------------------------------------------------------ - // MorphLocalField: Replaces a GT_LCL_FLD based promoted struct field access - // with a GT_LCL_VAR that references the struct field. + // MorphLocalField: Replaces a local field-based promoted struct field access + // with a GT_LCL_VAR/GT_STORE_LCL_VAR that references the struct field. // // Arguments: - // node - the GT_LCL_FLD node + // node - the GT_LCL_FLD/GT_STORE_LCL_FLD node // user - the node that uses the field // // Notes: // This does not do anything if the field access does not denote // involved a promoted struct local. - // If the GT_LCL_FLD offset does not have a corresponding promoted struct + // If the local field offset does not have a corresponding promoted struct // field then no transformation is done and struct local's enregistration // is disabled. // - void MorphLocalField(GenTree* node, GenTree* user) + void MorphLocalField(GenTreeLclVarCommon* node, GenTree* user) { - assert(node->OperIs(GT_LCL_FLD)); - // TODO-Cleanup: Move fgMorphLocalField implementation here, it's not used anywhere else. - m_compiler->fgMorphLocalField(node, user); - m_stmtModified |= node->OperIs(GT_LCL_VAR); + assert(node->OperIs(GT_LCL_FLD, GT_STORE_LCL_FLD)); + + unsigned lclNum = node->AsLclFld()->GetLclNum(); + LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum); + + if (varDsc->lvPromoted) + { + unsigned fldOffset = node->AsLclFld()->GetLclOffs(); + unsigned fieldLclNum = m_compiler->lvaGetFieldLocal(varDsc, fldOffset); + + if (fieldLclNum != BAD_VAR_NUM) + { + LclVarDsc* fldVarDsc = m_compiler->lvaGetDesc(fieldLclNum); + var_types fieldType = fldVarDsc->TypeGet(); + + if (node->TypeGet() == fieldType) + { + // There is an existing sub-field we can use. + node->SetLclNum(fieldLclNum); + + if (node->OperIs(GT_STORE_LCL_FLD)) + { + node->SetOper(GT_STORE_LCL_VAR); + node->gtFlags &= ~GTF_VAR_USEASG; + } + else + { + node->SetOper(GT_LCL_VAR); + } + + JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclNum); + } + } + } + + // If we haven't replaced the field, make sure to set DNER on the local. + if (!node->OperIsScalarLocal()) + { + m_compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); + } + else + { + m_stmtModified = true; + } } public: @@ -1406,14 +1431,6 @@ class LocalAddressVisitor final : public GenTreeVisitor } } - void SequenceAssignment(GenTreeOp* asg) - { - if (m_sequencer != nullptr) - { - m_sequencer->SequenceAssignment(asg); - } - } - void SequenceCall(GenTreeCall* call) { if (m_sequencer != nullptr) @@ -1450,9 +1467,9 @@ PhaseStatus Compiler::fgMarkAddressExposedLocals() { #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeIs(TYP_FLOAT) && - stmt->GetRootNode()->OperIs(GT_ASG)) + stmt->GetRootNode()->OperIsStore()) { - madeChanges |= fgMorphCombineSIMDFieldAssignments(block, stmt); + madeChanges |= fgMorphCombineSIMDFieldStores(block, stmt); } #endif @@ -1478,10 +1495,10 @@ PhaseStatus Compiler::fgMarkAddressExposedLocals() #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- -// fgMorphCombineSIMDFieldAssignments: -// If the RHS of the input stmt is a read for simd vector X Field, then this +// fgMorphCombineSIMDFieldStores: +// If the store of the input stmt is a read for simd vector X Field, then this // function will keep reading next few stmts based on the vector size(2, 3, 4). -// If the next stmts LHS are located contiguous and RHS are also located +// If the next stmts stores are located contiguous and values are also located // contiguous, then we replace those statements with one store. // // Argument: @@ -1489,91 +1506,93 @@ PhaseStatus Compiler::fgMarkAddressExposedLocals() // stmt - Statement*. the stmt node we want to check // // Return Value: -// Whether the assignments were successfully coalesced. +// Whether the stores were successfully coalesced. // -bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) +bool Compiler::fgMorphCombineSIMDFieldStores(BasicBlock* block, Statement* stmt) { - GenTree* tree = stmt->GetRootNode(); - assert(tree->OperGet() == GT_ASG); + GenTree* store = stmt->GetRootNode(); + assert(store->OperIsStore()); - GenTree* originalLHS = tree->AsOp()->gtOp1; - GenTree* prevLHS = tree->AsOp()->gtOp1; - GenTree* prevRHS = tree->AsOp()->gtOp2; + GenTree* prevValue = store->Data(); unsigned index = 0; - var_types simdBaseType = prevRHS->TypeGet(); + var_types simdBaseType = store->TypeGet(); unsigned simdSize = 0; - GenTree* simdLclAddr = getSIMDStructFromField(prevRHS, &index, &simdSize, true); + GenTree* simdLclAddr = getSIMDStructFromField(prevValue, &index, &simdSize, true); if ((simdLclAddr == nullptr) || (index != 0) || (simdBaseType != TYP_FLOAT)) { - // if the RHS is not from a SIMD vector field X, then there is no need to check further. + // if the value is not from a SIMD vector field X, then there is no need to check further. return false; } - var_types simdType = getSIMDTypeForSize(simdSize); - int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; - int remainingAssignments = assignmentsCount; - Statement* curStmt = stmt->GetNextStmt(); - Statement* lastStmt = stmt; + var_types simdType = getSIMDTypeForSize(simdSize); + int storeCount = simdSize / genTypeSize(simdBaseType) - 1; + int remainingStores = storeCount; + GenTree* prevStore = store; + Statement* curStmt = stmt->GetNextStmt(); + Statement* lastStmt = stmt; - while (curStmt != nullptr && remainingAssignments > 0) + while (curStmt != nullptr && remainingStores > 0) { - GenTree* exp = curStmt->GetRootNode(); - if (exp->OperGet() != GT_ASG) + if (!curStmt->GetRootNode()->OperIsStore()) { break; } - GenTree* curLHS = exp->gtGetOp1(); - GenTree* curRHS = exp->gtGetOp2(); - if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) + GenTree* curStore = curStmt->GetRootNode(); + GenTree* curValue = curStore->Data(); + + if (!areArgumentsContiguous(prevStore, curStore) || !areArgumentsContiguous(prevValue, curValue)) { break; } - remainingAssignments--; - prevLHS = curLHS; - prevRHS = curRHS; + remainingStores--; + prevStore = curStore; + prevValue = curValue; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } - if (remainingAssignments > 0) + if (remainingStores > 0) { - // if the left assignments number is bigger than zero, then this means - // that the assignments are not assigning to the contiguously memory - // locations from same vector. + // if the left store number is bigger than zero, then this means that the stores + // are not assigning to the contiguous memory locations from same vector. return false; } - JITDUMP("\nFound contiguous assignments from a SIMD vector to memory.\n"); + JITDUMP("\nFound contiguous stores from a SIMD vector to memory.\n"); JITDUMP("From " FMT_BB ", " FMT_STMT " to " FMT_STMT "\n", block->bbNum, stmt->GetID(), lastStmt->GetID()); - for (int i = 0; i < assignmentsCount; i++) + for (int i = 0; i < storeCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } - GenTree* dstNode; - - if (originalLHS->OperIs(GT_LCL_FLD)) + GenTree* fullValue = gtNewLclvNode(simdLclAddr->AsLclVarCommon()->GetLclNum(), simdType); + GenTree* fullStore; + if (store->OperIs(GT_STORE_LCL_FLD)) { - dstNode = originalLHS; - dstNode->gtType = simdType; + store->gtType = simdType; + store->AsLclFld()->Data() = fullValue; + if (!store->IsPartialLclFld(this)) + { + store->gtFlags &= ~GTF_VAR_USEASG; + } + + fullStore = store; } else { - GenTree* copyBlkDst = CreateAddressNodeForSimdHWIntrinsicCreate(originalLHS, TYP_FLOAT, simdSize); - dstNode = gtNewIndir(simdType, copyBlkDst); + GenTree* dstAddr = CreateAddressNodeForSimdHWIntrinsicCreate(store, simdBaseType, simdSize); + fullStore = gtNewStoreIndNode(simdType, dstAddr, fullValue); } JITDUMP("\n" FMT_BB " " FMT_STMT " (before):\n", block->bbNum, stmt->GetID()); DISPSTMT(stmt); - tree = gtNewAssignNode(dstNode, gtNewLclvNode(simdLclAddr->AsLclVarCommon()->GetLclNum(), simdType)); - - stmt->SetRootNode(tree); + stmt->SetRootNode(fullStore); JITDUMP("\nReplaced " FMT_BB " " FMT_STMT " (after):\n", block->bbNum, stmt->GetID()); DISPSTMT(stmt); diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 79e4b3d40e973..454d65172543b 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -2599,11 +2599,8 @@ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReaso // that escape to calls leave the local in question address-exposed. For this very special case of // a return buffer, however, it is known that the callee will not do anything with it except write // to it, once. As such, we handle addresses of locals that represent return buffers specially: we -// *do not* mark the local address-exposed, instead saving the address' "Use*" in the call node, and -// treat the call much like an "ASG(IND(addr), ...)" node throughout the compilation. A complicating -// factor here is that the address can be moved to the late args list, and we have to fetch it from -// the ASG setup node in that case. In the future, we should make it such that these addresses do -// not ever need temps (currently they may because of conservative GLOB_REF setting on FIELD nodes). +// *do not* mark the local address-exposed and treat the call much like a local store node throughout +// the compilation. // // TODO-ADDR-Bug: currently, we rely on these locals not being present in call argument lists, // outside of the buffer address argument itself, as liveness - currently - treats the location node @@ -2611,7 +2608,6 @@ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReaso // rather arbitrarily. We should fix liveness to treat the call as the definition point instead and // enable this optimization for "!lvIsTemp" locals. // - void Compiler::lvaSetHiddenBufferStructArg(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 4119444303b43..e6da6e863b9ee 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -241,11 +241,11 @@ void LIR::Use::ReplaceWith(GenTree* replacement) // lclNum - The local to use for temporary storage. If BAD_VAR_NUM (the // default) is provided, this method will create and use a new // local var. -// assign - On return, if non null, contains the created assignment node +// pStore - On return, if non null, contains the created store node // // Return Value: The number of the local var used for temporary storage. // -unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTree** assign) +unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTree** pStore) { assert(IsInitialized()); assert(compiler != nullptr); @@ -259,7 +259,7 @@ unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTre lclNum = compiler->lvaGrabTemp(true DEBUGARG("ReplaceWithLclVar is creating a new local variable")); } - GenTreeLclVar* const store = compiler->gtNewTempAssign(lclNum, node)->AsLclVar(); + GenTreeLclVar* const store = compiler->gtNewTempStore(lclNum, node)->AsLclVar(); assert(store != nullptr); assert(store->gtOp1 == node); @@ -273,9 +273,9 @@ unsigned LIR::Use::ReplaceWithLclVar(Compiler* compiler, unsigned lclNum, GenTre JITDUMP("ReplaceWithLclVar created store :\n"); DISPNODE(store); - if (assign != nullptr) + if (pStore != nullptr) { - *assign = store; + *pStore = store; } return lclNum; } diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h index 7a834ca95b600..5cdead7e9383b 100644 --- a/src/coreclr/jit/lir.h +++ b/src/coreclr/jit/lir.h @@ -74,7 +74,7 @@ class LIR final bool IsDummyUse() const; void ReplaceWith(GenTree* replacement); - unsigned ReplaceWithLclVar(Compiler* compiler, unsigned lclNum = BAD_VAR_NUM, GenTree** assign = nullptr); + unsigned ReplaceWithLclVar(Compiler* compiler, unsigned lclNum = BAD_VAR_NUM, GenTree** pStore = nullptr); }; //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index fb46b08ac2bfe..ed36b02dc331d 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -231,14 +231,6 @@ void Compiler::fgPerNodeLocalVarLiveness(GenTree* tree) fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed); break; - case GT_ASG: - // An indirect store defines a memory location. - if (!tree->AsOp()->gtGetOp1()->OperIsLocal()) - { - fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed); - } - break; - // We'll assume these are use-then-defs of memory. case GT_LOCKADD: case GT_XORR: @@ -973,15 +965,13 @@ void Compiler::fgExtendDbgLifetimes() // If we haven't already done this ... if (!fgLocalVarLivenessDone) { - // Create a "zero" node - GenTree* zero = gtNewZeroConNode(type); + // Create the initializer. + GenTree* zero = gtNewZeroConNode(type); + GenTree* initNode = gtNewStoreLclVarNode(varNum, zero); - // Create initialization node + // Insert initialization node. if (!block->IsLIR()) { - GenTree* varNode = gtNewLclvNode(varNum, type); - GenTree* initNode = gtNewAssignNode(varNode, zero); - // Create a statement for the initializer, sequence it, and append it to the current BB. Statement* initStmt = gtNewStmt(initNode); gtSetStmtInfo(initStmt); @@ -990,12 +980,8 @@ void Compiler::fgExtendDbgLifetimes() } else { - GenTree* store = new (this, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, type, varNum); - store->AsOp()->gtOp1 = zero; - store->gtFlags |= (GTF_VAR_DEF | GTF_ASG); - LIR::Range initRange = LIR::EmptyRange(); - initRange.InsertBefore(nullptr, zero, store); + initRange.InsertAfter(nullptr, zero, initNode); #if !defined(TARGET_64BIT) DecomposeLongs::DecomposeRange(this, initRange); @@ -1784,7 +1770,7 @@ bool Compiler::fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVar // GenTree* Compiler::fgTryRemoveDeadStoreEarly(Statement* stmt, GenTreeLclVarCommon* cur) { - if (!stmt->GetRootNode()->OperIs(GT_ASG) || (stmt->GetRootNode()->gtGetOp1() != cur)) + if (!stmt->GetRootNode()->OperIsLocalStore() || (stmt->GetRootNode() != cur)) { return cur->gtPrev; } @@ -1794,7 +1780,7 @@ GenTree* Compiler::fgTryRemoveDeadStoreEarly(Statement* stmt, GenTreeLclVarCommo assert(stmt->GetTreeListEnd() == cur); GenTree* sideEffects = nullptr; - gtExtractSideEffList(stmt->GetRootNode()->gtGetOp2(), &sideEffects); + gtExtractSideEffList(stmt->GetRootNode()->AsLclVarCommon()->Data(), &sideEffects); if (sideEffects == nullptr) { diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index d14dbc4be0eb5..0af9216dc4661 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -830,7 +830,7 @@ GenTree* Lowering::LowerSwitch(GenTree* node) ReplaceWithLclVar(use); // GT_SWITCH(indexExpression) is now two statements: - // 1. a statement containing 'asg' (for temp = indexExpression) + // 1. a statement containing temp = indexExpression // 2. and a statement with GT_SWITCH(temp) assert(node->gtOper == GT_SWITCH); @@ -5790,7 +5790,7 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp")); unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2")); - GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result); + GenTree* lclvNodeStore = comp->gtNewTempStore(lclNumTmp, result); GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet()); tmpTree = Offset(tmpTree, vtabOffsOfIndirection); @@ -5800,7 +5800,7 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs); GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1); - GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base); + GenTree* lclvNodeStore2 = comp->gtNewTempStore(lclNumTmp2, base); LIR::Range range = LIR::SeqTree(comp, lclvNodeStore); JITDUMP("result of obtaining pointer to virtual table:\n"); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index a6b3d0c1ebaa2..bdb91c52cb5d9 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -9844,11 +9844,6 @@ void LinearScan::lsraDispNode(GenTree* tree, LsraTupleDumpMode mode, bool hasDes printf(" V%02u MEM", varNum); } } - else if (tree->OperIs(GT_ASG)) - { - assert(!tree->gtHasReg(compiler)); - printf(" asg%s ", GenTree::OpName(tree->OperGet())); - } else { compiler->gtDispNodeName(tree); diff --git a/src/coreclr/jit/lsraarm.cpp b/src/coreclr/jit/lsraarm.cpp index 86b462560d049..c16cc9162b3d4 100644 --- a/src/coreclr/jit/lsraarm.cpp +++ b/src/coreclr/jit/lsraarm.cpp @@ -333,11 +333,6 @@ int LinearScan::BuildNode(GenTree* tree) assert(srcCount == 2); break; - case GT_ASG: - noway_assert(!"We should never hit any assignment operator in lowering"); - srcCount = 0; - break; - case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp index 19d93b946069c..23be48377907c 100644 --- a/src/coreclr/jit/lsraarm64.cpp +++ b/src/coreclr/jit/lsraarm64.cpp @@ -797,11 +797,6 @@ int LinearScan::BuildNode(GenTree* tree) assert(dstCount == 0); break; - case GT_ASG: - noway_assert(!"We should never hit any assignment operator in lowering"); - srcCount = 0; - break; - case GT_ADD: case GT_SUB: if (varTypeIsFloating(tree->TypeGet())) diff --git a/src/coreclr/jit/lsraloongarch64.cpp b/src/coreclr/jit/lsraloongarch64.cpp index e91bbc5b714ef..7ab333630c046 100644 --- a/src/coreclr/jit/lsraloongarch64.cpp +++ b/src/coreclr/jit/lsraloongarch64.cpp @@ -237,11 +237,6 @@ int LinearScan::BuildNode(GenTree* tree) assert(dstCount == 0); break; - case GT_ASG: - noway_assert(!"We should never hit any assignment operator in lowering"); - srcCount = 0; - break; - case GT_ADD: case GT_SUB: if (varTypeIsFloating(tree->TypeGet())) diff --git a/src/coreclr/jit/lsrariscv64.cpp b/src/coreclr/jit/lsrariscv64.cpp index 01b4a44bd349b..7765d2d635cf9 100644 --- a/src/coreclr/jit/lsrariscv64.cpp +++ b/src/coreclr/jit/lsrariscv64.cpp @@ -237,11 +237,6 @@ int LinearScan::BuildNode(GenTree* tree) assert(dstCount == 0); break; - case GT_ASG: - noway_assert(!"We should never hit any assignment operator in lowering"); - srcCount = 0; - break; - case GT_ADD: case GT_SUB: if (varTypeIsFloating(tree->TypeGet())) diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index 3cad2d1e47a34..99c83e5e27e66 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -283,11 +283,6 @@ int LinearScan::BuildNode(GenTree* tree) } break; - case GT_ASG: - noway_assert(!"We should never hit any assignment operator in lowering"); - srcCount = 0; - break; - #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 71469e791b48b..53d161e9d9925 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -270,8 +270,7 @@ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, bool morphAr // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via -// assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC -// temporary. +// storing the GC tree to an inline non-GC temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order @@ -534,14 +533,14 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; - GenTree* asg = gtNewTempAssign(lclNum, oper); + GenTree* store = gtNewTempStore(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree - oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); + oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), store, cast); return fgMorphTree(oper); } @@ -1687,7 +1686,7 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany - setupArg = comp->gtNewTempAssign(tmpVarNum, operand); + setupArg = comp->gtNewTempStore(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = comp->gtNewLclvNode(tmpVarNum, operand->TypeGet()); @@ -1698,7 +1697,7 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany - setupArg = comp->gtNewTempAssign(tmpVarNum, operand); + setupArg = comp->gtNewTempStore(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = comp->gtNewLclvNode(tmpVarNum, operand->TypeGet()); @@ -1715,7 +1714,7 @@ void CallArgs::EvalArgsToTemps(Compiler* comp, GenTreeCall* call) } else { - setupArg = comp->gtNewTempAssign(tmpVarNum, argx); + setupArg = comp->gtNewTempStore(tmpVarNum, argx); LclVarDsc* varDsc = comp->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); @@ -1861,12 +1860,12 @@ void CallArgs::SetNeedsTemp(CallArg* arg) TempInfo Compiler::fgMakeTemp(GenTree* rhs) { unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgMakeTemp is creating a new local variable")); - GenTree* asg = gtNewTempAssign(lclNum, rhs); + GenTree* store = gtNewTempStore(lclNum, rhs); GenTree* load = gtNewLclvNode(lclNum, genActualType(rhs)); TempInfo tempInfo{}; - tempInfo.asg = asg; - tempInfo.load = load; + tempInfo.store = store; + tempInfo.load = load; return tempInfo; } @@ -1902,7 +1901,7 @@ GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, -// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) +// and replace *ppTree with comma(store(*ppTree)), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that @@ -1917,10 +1916,10 @@ GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree) GenTree* subTree = *ppTree; TempInfo tempInfo = fgMakeTemp(subTree); - GenTree* asg = tempInfo.asg; + GenTree* store = tempInfo.store; GenTree* load = tempInfo.load; - *ppTree = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); + *ppTree = gtNewOperNode(GT_COMMA, subTree->TypeGet(), store, load); return gtClone(load); } @@ -4444,7 +4443,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) (arrRef->OperIs(GT_LCL_VAR) && lvaIsLocalImplicitlyAccessedByRef(arrRef->AsLclVar()->GetLclNum()))) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); - arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); + arrRefDefn = gtNewTempStore(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, lvaGetDesc(arrRefTmpNum)->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, lvaGetDesc(arrRefTmpNum)->TypeGet()); } @@ -4459,7 +4458,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) (index->OperIs(GT_LCL_VAR) && lvaIsLocalImplicitlyAccessedByRef(index->AsLclVar()->GetLclNum()))) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); - indexDefn = gtNewTempAssign(indexTmpNum, index); + indexDefn = gtNewTempStore(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, lvaGetDesc(indexTmpNum)->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, lvaGetDesc(indexTmpNum)->TypeGet()); } @@ -5139,15 +5138,13 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m DISPTREE(tree); // Create the "comma" subtree. - GenTree* asg = nullptr; + GenTree* store = nullptr; unsigned lclNum; if (!objRef->OperIs(GT_LCL_VAR) || lvaIsLocalImplicitlyAccessedByRef(objRef->AsLclVar()->GetLclNum())) { lclNum = fgGetFieldMorphingTemp(tree->AsFieldAddr()); - - // Create the "asg" node - asg = gtNewTempAssign(lclNum, objRef); + store = gtNewTempStore(lclNum, objRef); } else { @@ -5159,10 +5156,10 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m nullchk->gtFlags |= GTF_ORDER_SIDEEFF; - if (asg != nullptr) + if (store != nullptr) { // Create the "comma" node. - comma = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullchk); + comma = gtNewOperNode(GT_COMMA, TYP_VOID, store, nullchk); } else { @@ -6386,7 +6383,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack - // if the root node was an `ASG`, `RET` or `CAST`. + // if the root node was a store, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types zeroType = (origCallType == TYP_STRUCT) ? TYP_INT : genActualType(origCallType); @@ -6643,7 +6640,7 @@ GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" - doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); + doBeforeStoreArgsStub = gtNewTempStore(lclNum, objp); if (callNeedsNullCheck) { @@ -6984,7 +6981,7 @@ GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); - stmts.Push(gtNewTempAssign(temp, *tree)); + stmts.Push(gtNewTempStore(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; @@ -7168,12 +7165,12 @@ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); - GenTree* asg = gtNewTempAssign(lclNum, objp); + GenTree* store = gtNewTempStore(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); - thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); + thisPtr = gtNewOperNode(GT_COMMA, vt, store, tmp); objp = thisPtr; } @@ -7193,15 +7190,15 @@ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); - GenTree* asg = gtNewTempAssign(lclNum, objp); + GenTree* store = gtNewTempStore(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); - asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); + store = gtNewOperNode(GT_COMMA, TYP_VOID, store, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) - thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); + thisPtr = gtNewOperNode(GT_COMMA, vt, store, gtNewLclvNode(lclNum, vt)); } else { @@ -7976,9 +7973,9 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // - unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); - unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); - GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab + unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); + unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); + GenTree* storeVar1 = gtNewTempStore(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), @@ -7991,8 +7988,8 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] - tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); - GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = + tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); + GenTree* storeVar2 = gtNewTempStore(varNum2, tmpTree2); // var2 = // This last indirection is not invariant, but is non-faulting result = gtNewIndir(TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), GTF_IND_NONFAULTING); // [var2] @@ -8000,8 +7997,8 @@ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree - GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); - result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); + GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, storeVar2, result); + result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, storeVar1, commaTree); } else { @@ -8223,7 +8220,7 @@ GenTree* Compiler::getSIMDStructFromField(GenTree* tree, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { - if (tree->OperIs(GT_IND)) + if (tree->isIndir()) { GenTree* addr = tree->AsIndir()->Addr(); if (!addr->OperIs(GT_FIELD_ADDR) || !addr->AsFieldAddr()->IsInstance()) @@ -8405,11 +8402,6 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA tree->AddAllEffectsFlags(GTF_GLOB_REF); } - if (tree->IsPartialLclFld(this)) - { - tree->gtFlags |= GTF_VAR_USEASG; - } - GenTree* expandedTree = fgMorphExpandLocal(tree->AsLclVarCommon()); if (expandedTree != nullptr) { @@ -9054,7 +9046,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set - // Iterate over the copy asgn table removing any entries + // Iterate over the assertion table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) @@ -9868,8 +9860,8 @@ void Compiler::fgTryReplaceStructLocalWithField(GenTree* tree) return; } - // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` - // and `ASG` will be transformed into field by field copy without parent local referencing if + // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `STORE_LCL_VAR(src)` + // and `STORE_LCL_VAR` will be transformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = tree->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); @@ -11424,7 +11416,7 @@ GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for - // gtNewTempAssign`. + // gtNewTempStore`. if (canFold && (genReturnBB == nullptr)) { // Fold even if types do not match, lowering will handle it. This allows the local @@ -11761,7 +11753,7 @@ GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be - // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). + // used by "special" nodes (e. g. multi-reg stores). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. @@ -11930,8 +11922,7 @@ GenTree* Compiler::fgMorphModToZero(GenTreeOp* tree) // After: // * RETURN int // \--* COMMA int -// +--* ASG int -// | +--* LCL_VAR int V03 tmp1 +// +--* STORE_LCL_VAR int V03 tmp1 // | \--* MUL int // | +--* LCL_VAR int V00 arg0 // | \--* LCL_VAR int V00 arg0 @@ -12023,7 +12014,7 @@ GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) // within one another for their sequence order. for (int i = tempInfoCount - 1; i >= 0; i--) { - result = gtNewOperNode(GT_COMMA, type, tempInfos[i].asg, result); + result = gtNewOperNode(GT_COMMA, type, tempInfos[i].store, result); } #ifdef DEBUG @@ -13762,7 +13753,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = - gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), CHECK_SPILL_NONE, &pAfterStatement, di, block); + gtNewTempStore(genReturnLocal, ret->gtGetOp1(), CHECK_SPILL_NONE, &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); @@ -13778,7 +13769,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) } else { - // gtNewTempAssign inserted additional statements after last + // gtNewTempStore inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); @@ -14687,72 +14678,6 @@ PhaseStatus Compiler::fgPromoteStructs() return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } -void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) -{ - noway_assert(tree->OperGet() == GT_LCL_FLD); - - unsigned lclNum = tree->AsLclFld()->GetLclNum(); - LclVarDsc* varDsc = lvaGetDesc(lclNum); - - if (varTypeIsStruct(varDsc)) - { - if (varDsc->lvPromoted) - { - // Promoted struct - unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); - unsigned fieldLclIndex = 0; - LclVarDsc* fldVarDsc = nullptr; - - if (fldOffset != BAD_VAR_NUM) - { - fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); - noway_assert(fieldLclIndex != BAD_VAR_NUM); - fldVarDsc = lvaGetDesc(fieldLclIndex); - } - - var_types treeType = tree->TypeGet(); - var_types fieldType = fldVarDsc->TypeGet(); - if (fldOffset != BAD_VAR_NUM && - ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) - { - // There is an existing sub-field we can use. - tree->AsLclFld()->SetLclNum(fieldLclIndex); - - // The field must be an enregisterable type; otherwise it would not be a promoted field. - // The tree type may not match, e.g. for return types that have been morphed, but both - // must be enregisterable types. - assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); - - tree->ChangeOper(GT_LCL_VAR); - assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); - - tree->gtType = fldVarDsc->TypeGet(); - - if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) - { - tree->gtFlags |= GTF_VAR_DEF; - tree->gtFlags |= GTF_DONT_CSE; - } - JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); - } - else - { -#ifdef DEBUG - // We can't convert this guy to a float because he really does have his - // address taken.. - varDsc->lvKeepType = 1; -#endif // DEBUG - } - } - } - - // If we haven't replaced the field, make sure to set DNER on the local. - if (tree->OperIs(GT_LCL_FLD)) - { - lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); - } -} - //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) @@ -15431,9 +15356,9 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock GenTreeMDArr* const mdArrLowerBound = m_compiler->gtNewMDArrLowerBound(m_compiler->gtNewLclvNode(arrLcl, TYP_REF), i, rank, m_block); // unsigned effIdxLcl = m_compiler->lvaGrabTemp(true DEBUGARG("MD array effective index")); - unsigned effIdxLcl = m_pTempCache->GrabTemp(TYP_INT); - GenTree* const effIndex = m_compiler->gtNewOperNode(GT_SUB, TYP_INT, idx, mdArrLowerBound); - GenTree* const asgNode = m_compiler->gtNewTempAssign(effIdxLcl, effIndex); + unsigned effIdxLcl = m_pTempCache->GrabTemp(TYP_INT); + GenTree* const effIndex = m_compiler->gtNewOperNode(GT_SUB, TYP_INT, idx, mdArrLowerBound); + GenTree* const effIdxLclDef = m_compiler->gtNewTempStore(effIdxLcl, effIndex); GenTreeMDArr* const mdArrLength = m_compiler->gtNewMDArrLen(m_compiler->gtNewLclvNode(arrLcl, TYP_REF), i, rank, m_block); GenTreeBoundsChk* const arrBndsChk = new (m_compiler, GT_BOUNDS_CHECK) @@ -15441,7 +15366,7 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock GenTree* const boundsCheckComma = m_compiler->gtNewOperNode(GT_COMMA, TYP_INT, arrBndsChk, m_compiler->gtNewLclvNode(effIdxLcl, TYP_INT)); - GenTree* const idxComma = m_compiler->gtNewOperNode(GT_COMMA, TYP_INT, asgNode, boundsCheckComma); + GenTree* const idxComma = m_compiler->gtNewOperNode(GT_COMMA, TYP_INT, effIdxLclDef, boundsCheckComma); // If it's not the first index, accumulate with the previously created calculation. if (i > 0) @@ -15488,10 +15413,10 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock { if (idxToCopy[i - 1] != BAD_VAR_NUM) { - GenTree* const idxLclAsg = - m_compiler->gtNewTempAssign(idxToCopy[i - 1], arrElem->gtArrInds[i - 1]); + GenTree* const idxLclStore = + m_compiler->gtNewTempStore(idxToCopy[i - 1], arrElem->gtArrInds[i - 1]); fullExpansion = - m_compiler->gtNewOperNode(GT_COMMA, fullExpansion->TypeGet(), idxLclAsg, fullExpansion); + m_compiler->gtNewOperNode(GT_COMMA, fullExpansion->TypeGet(), idxLclStore, fullExpansion); } } } @@ -15499,8 +15424,9 @@ bool Compiler::fgMorphArrayOpsStmt(MorphMDArrayTempCache* pTempCache, BasicBlock // If we needed to create a new local for the array object, copy that before everything. if (newArrLcl != BAD_VAR_NUM) { - GenTree* const arrLclAsg = m_compiler->gtNewTempAssign(newArrLcl, arrObj); - fullExpansion = m_compiler->gtNewOperNode(GT_COMMA, fullExpansion->TypeGet(), arrLclAsg, fullExpansion); + GenTree* const arrLclStore = m_compiler->gtNewTempStore(newArrLcl, arrObj); + fullExpansion = + m_compiler->gtNewOperNode(GT_COMMA, fullExpansion->TypeGet(), arrLclStore, fullExpansion); } JITDUMP("fgMorphArrayOpsStmt (before remorph):\n"); diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index 329e9c1cbddb2..074abb1f77cd3 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -39,20 +39,20 @@ class MorphInitBlockHelper GenTree* m_store = nullptr; GenTree* m_src = nullptr; - unsigned m_blockSize = 0; - ClassLayout* m_blockLayout = nullptr; - unsigned m_dstLclNum = BAD_VAR_NUM; - GenTreeLclVarCommon* m_dstLclNode = nullptr; - LclVarDsc* m_dstVarDsc = nullptr; - unsigned m_dstLclOffset = 0; - bool m_dstUseLclFld = false; - bool m_dstSingleLclVarAsg = false; + unsigned m_blockSize = 0; + ClassLayout* m_blockLayout = nullptr; + unsigned m_dstLclNum = BAD_VAR_NUM; + GenTreeLclVarCommon* m_dstLclNode = nullptr; + LclVarDsc* m_dstVarDsc = nullptr; + unsigned m_dstLclOffset = 0; + bool m_dstUseLclFld = false; + bool m_dstSingleStoreLclVar = false; enum class BlockTransformation { Undefined, FieldByField, - OneAsgBlock, + OneStoreBlock, StructBlock, SkipMultiRegSrc, SkipSingleRegCallSrc, @@ -101,9 +101,8 @@ MorphInitBlockHelper::MorphInitBlockHelper(Compiler* comp, GenTree* store, bool } //------------------------------------------------------------------------ -// Morph: transform the asg to a possible better form and changes its children -// to an appropriate form for later phases, for example, adds SIMD_INIT nodes -// or sets lvDoNotEnregister on locals. +// Morph: transform the store to a possible better form and changes its +// operands to an appropriate form for later phases. // // Return Value: // A possibly modified tree to perform the block operation. @@ -173,8 +172,8 @@ GenTree* MorphInitBlockHelper::Morph() } //------------------------------------------------------------------------ -// PrepareDst: Transform the asg destination to an appropriate form and initialize member fields -// with information about it. +// PrepareDst: Initialize member fields with information about the store's +// destination. // void MorphInitBlockHelper::PrepareDst() { @@ -253,15 +252,15 @@ void MorphInitBlockHelper::PropagateExpansionAssertions() { // Consider doing this for FieldByField as well // - if (m_comp->optLocalAssertionProp && (m_transformationDecision == BlockTransformation::OneAsgBlock)) + if (m_comp->optLocalAssertionProp && (m_transformationDecision == BlockTransformation::OneStoreBlock)) { m_comp->optAssertionGen(m_store); } } //------------------------------------------------------------------------ -// PrepareSrc: Transform the asg src to an appropriate form and initialize member fields -// with information about it. +// PrepareSrc: Initialize member fields with information about the store's +// source value. // void MorphInitBlockHelper::PrepareSrc() { @@ -278,7 +277,7 @@ void MorphInitBlockHelper::TrySpecialCases() } //------------------------------------------------------------------------ -// MorphStructCases: transforms the asg as field by field init or keeps it as a block init +// MorphStructCases: transforms the store as field by field init or keeps it as a block init // but sets appropriate flags for the involved lclVars. // // Assumptions: @@ -327,29 +326,24 @@ void MorphInitBlockHelper::MorphStructCases() // Notes: // This transforms a single block initialization assignment like: // -// * ASG struct (init) -// +--* BLK(12) struct -// | \--* ADDR long -// | \--* LCL_VAR struct(P) V02 loc0 -// | \--* int V02.a (offs=0x00) -> V06 tmp3 -// | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 -// | \--* float V02.d (offs=0x08) -> V08 tmp5 +// * STORE_BLK struct<12> (init) +// +--* LCL_ADDR byref V02 loc0 +// | \-- int V02.a (offs=0x00) -> V06 tmp3 +// | \-- ubyte V02.c (offs=0x04) -> V07 tmp4 +// | \-- float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // -// into a COMMA tree of assignments that initialize each promoted struct +// into a COMMA tree of stores that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void -// | +--* ASG int -// | | +--* LCL_VAR int V06 tmp3 +// | +--* STORE_LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A -// | \--* ASG ubyte -// | +--* LCL_VAR ubyte V07 tmp4 +// | \--* STORE_LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 -// \--* ASG float -// +--* LCL_VAR float V08 tmp5 +// \--* STORE_LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // void MorphInitBlockHelper::TryInitFieldByField() @@ -428,7 +422,7 @@ void MorphInitBlockHelper::TryInitFieldByField() var_types fieldType = fieldDesc->TypeGet(); GenTree* src = m_comp->gtNewConWithPattern(fieldType, initPattern); - GenTree* store = m_comp->gtNewTempAssign(fieldLclNum, src); + GenTree* store = m_comp->gtNewTempStore(fieldLclNum, src); if (m_comp->optLocalAssertionProp) { @@ -460,7 +454,7 @@ void MorphInitBlockHelper::TryInitFieldByField() // Transforms patterns like "STORE_BLK(LCL_VAR_ADDR, 0)" into simple // stores: "STORE_LCL_VAR(0)". // -// If successful, will set "m_transformationDecision" to "OneAsgBlock". +// If successful, will set "m_transformationDecision" to "OneStoreBlock". // void MorphInitBlockHelper::TryPrimitiveInit() { @@ -482,7 +476,7 @@ void MorphInitBlockHelper::TryPrimitiveInit() m_store->gtFlags |= GTF_VAR_DEF; m_result = m_store; - m_transformationDecision = BlockTransformation::OneAsgBlock; + m_transformationDecision = BlockTransformation::OneStoreBlock; } } @@ -508,7 +502,7 @@ void MorphInitBlockHelper::TryPrimitiveInit() // / \. // C D // -// We'd like downstream code to just see and expand ASG(IND(B), D). +// We'd like downstream code to just see and expand STOREIND(B, D). // We will produce: // // COMMA @@ -569,7 +563,7 @@ GenTree* MorphInitBlockHelper::EliminateCommas(GenTree** commaPool) { unsigned lhsAddrLclNum = m_comp->lvaGrabTemp(true DEBUGARG("Block morph LHS addr")); - addSideEffect(m_comp->gtNewTempAssign(lhsAddrLclNum, addr)); + addSideEffect(m_comp->gtNewTempStore(lhsAddrLclNum, addr)); m_store->AsUnOp()->gtOp1 = m_comp->gtNewLclvNode(lhsAddrLclNum, genActualType(addr)); m_comp->gtUpdateNodeSideEffects(m_store); } @@ -597,7 +591,7 @@ class MorphCopyBlockHelper : public MorphInitBlockHelper static GenTree* MorphCopyBlock(Compiler* comp, GenTree* tree); protected: - MorphCopyBlockHelper(Compiler* comp, GenTree* asg); + MorphCopyBlockHelper(Compiler* comp, GenTree* store); void PrepareSrc() override; @@ -613,15 +607,15 @@ class MorphCopyBlockHelper : public MorphInitBlockHelper } protected: - unsigned m_srcLclNum = BAD_VAR_NUM; - LclVarDsc* m_srcVarDsc = nullptr; - GenTreeLclVarCommon* m_srcLclNode = nullptr; - bool m_srcUseLclFld = false; - unsigned m_srcLclOffset = 0; - bool m_srcSingleLclVarAsg = false; + unsigned m_srcLclNum = BAD_VAR_NUM; + LclVarDsc* m_srcVarDsc = nullptr; + GenTreeLclVarCommon* m_srcLclNode = nullptr; + bool m_srcUseLclFld = false; + unsigned m_srcLclOffset = 0; + bool m_srcSingleStoreLclVar = false; - bool m_dstDoFldAsg = false; - bool m_srcDoFldAsg = false; + bool m_dstDoFldStore = false; + bool m_srcDoFldStore = false; private: bool CanReuseAddressForDecomposedStore(GenTree* addr); @@ -659,8 +653,8 @@ MorphCopyBlockHelper::MorphCopyBlockHelper(Compiler* comp, GenTree* store) : Mor } //------------------------------------------------------------------------ -// PrepareSrc: Transform the asg src to an appropriate form and initialize member fields -// with information about it. +// PrepareSrc: Initialize member fields with information about the store's +// source value. // void MorphCopyBlockHelper::PrepareSrc() { @@ -706,7 +700,7 @@ void MorphCopyBlockHelper::TrySpecialCases() } //------------------------------------------------------------------------ -// MorphStructCases: transforms the asg as field by field copy or keeps it as a block init +// MorphStructCases: transforms the store as field by field copy or keeps it as a block init // but sets appropriate flags for the involved lclVars. // // Assumptions: @@ -726,9 +720,9 @@ void MorphCopyBlockHelper::MorphStructCases() if (m_blockSize == m_dstVarDsc->lvExactSize()) { - JITDUMP(" (m_dstDoFldAsg=true)"); + JITDUMP(" (m_dstDoFldStore=true)"); // We may decide later that a copyblk is required when this struct has holes - m_dstDoFldAsg = true; + m_dstDoFldStore = true; } else { @@ -746,9 +740,9 @@ void MorphCopyBlockHelper::MorphStructCases() if (m_blockSize == m_srcVarDsc->lvExactSize()) { - JITDUMP(" (m_srcDoFldAsg=true)"); + JITDUMP(" (m_srcDoFldStore=true)"); // We may decide later that a copyblk is required when this struct has holes - m_srcDoFldAsg = true; + m_srcDoFldStore = true; } else { @@ -782,7 +776,7 @@ void MorphCopyBlockHelper::MorphStructCases() } // Can we use field by field assignment for the dest? - if (m_dstDoFldAsg && m_dstVarDsc->lvCustomLayout && m_dstVarDsc->lvContainsHoles) + if (m_dstDoFldStore && m_dstVarDsc->lvCustomLayout && m_dstVarDsc->lvContainsHoles) { JITDUMP(" dest contains custom layout and contains holes"); // C++ style CopyBlock with holes @@ -790,7 +784,7 @@ void MorphCopyBlockHelper::MorphStructCases() } // Can we use field by field assignment for the src? - if (m_srcDoFldAsg && m_srcVarDsc->lvCustomLayout && m_srcVarDsc->lvContainsHoles) + if (m_srcDoFldStore && m_srcVarDsc->lvCustomLayout && m_srcVarDsc->lvContainsHoles) { JITDUMP(" src contains custom layout and contains holes"); // C++ style CopyBlock with holes @@ -838,7 +832,7 @@ void MorphCopyBlockHelper::MorphStructCases() (m_srcVarDsc->HasGCPtr() && (m_dstVarDsc == nullptr)) || (m_srcVarDsc->lvFieldCnt == 1))); // Are both dest and src promoted structs? - if (m_dstDoFldAsg && m_srcDoFldAsg && (dstFldIsProfitable || srcFldIsProfitable)) + if (m_dstDoFldStore && m_srcDoFldStore && (dstFldIsProfitable || srcFldIsProfitable)) { // Both structs should be of the same type, or have the same number of fields of the same type. // If not we will use a copy block. @@ -870,7 +864,7 @@ void MorphCopyBlockHelper::MorphStructCases() } } } - else if (m_dstDoFldAsg && dstFldIsProfitable) + else if (m_dstDoFldStore && dstFldIsProfitable) { // Match the following kinds of trees: // fgMorphTree BB01, stmt 9 (before) @@ -910,11 +904,11 @@ void MorphCopyBlockHelper::MorphStructCases() var_types destType = m_comp->lvaGetDesc(fieldLclNum)->TypeGet(); if (m_srcVarDsc->TypeGet() == destType) { - m_srcSingleLclVarAsg = true; + m_srcSingleStoreLclVar = true; } } } - else if (m_srcDoFldAsg && srcFldIsProfitable) + else if (m_srcDoFldStore && srcFldIsProfitable) { // Check for the symmetric case (which happens for the _reference field of promoted spans): // @@ -934,14 +928,14 @@ void MorphCopyBlockHelper::MorphStructCases() var_types srcType = m_comp->lvaGetDesc(fieldLclNum)->TypeGet(); if (m_dstVarDsc->TypeGet() == srcType) { - m_dstSingleLclVarAsg = true; + m_dstSingleStoreLclVar = true; } } } // Are neither dest or src promoted structs? else { - assert(!(m_dstDoFldAsg && dstFldIsProfitable) && !(m_srcDoFldAsg && srcFldIsProfitable)); + assert(!(m_dstDoFldStore && dstFldIsProfitable) && !(m_srcDoFldStore && srcFldIsProfitable)); requiresCopyBlock = true; // Leave as a CopyBlock JITDUMP(" with no promoted structs"); } @@ -951,8 +945,8 @@ void MorphCopyBlockHelper::MorphStructCases() if (requiresCopyBlock) { // If a copy block is required then we won't do field by field assignments - m_dstDoFldAsg = false; - m_srcDoFldAsg = false; + m_dstDoFldStore = false; + m_srcDoFldStore = false; } JITDUMP(requiresCopyBlock ? " this requires a CopyBlock.\n" : " using field by field assignments.\n"); @@ -975,7 +969,7 @@ void MorphCopyBlockHelper::MorphStructCases() // Mark the dest/src structs as DoNotEnreg when they are not being fully referenced as the same type. // - if (!m_dstDoFldAsg && (m_dstVarDsc != nullptr) && !m_dstSingleLclVarAsg) + if (!m_dstDoFldStore && (m_dstVarDsc != nullptr) && !m_dstSingleStoreLclVar) { if (m_store->OperIs(GT_STORE_LCL_FLD)) { @@ -988,7 +982,7 @@ void MorphCopyBlockHelper::MorphStructCases() } } - if (!m_srcDoFldAsg && (m_srcVarDsc != nullptr) && !m_srcSingleLclVarAsg) + if (!m_srcDoFldStore && (m_srcVarDsc != nullptr) && !m_srcSingleStoreLclVar) { if (m_src->OperIs(GT_LCL_FLD)) { @@ -1004,7 +998,7 @@ void MorphCopyBlockHelper::MorphStructCases() //------------------------------------------------------------------------ // TryPrimitiveCopy: Attempt to replace a block assignment with a scalar assignment. // -// If successful, will set "m_transformationDecision" to "OneAsgBlock". +// If successful, will set "m_transformationDecision" to "OneStoreBlock". // void MorphCopyBlockHelper::TryPrimitiveCopy() { @@ -1076,7 +1070,7 @@ void MorphCopyBlockHelper::TryPrimitiveCopy() doRetypeNode(m_src, m_srcVarDsc, /* isUse */ true); m_result = m_store; - m_transformationDecision = BlockTransformation::OneAsgBlock; + m_transformationDecision = BlockTransformation::OneStoreBlock; } //------------------------------------------------------------------------ @@ -1100,7 +1094,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() unsigned fieldCnt = 0; unsigned dyingFieldCnt = 0; - if (m_dstDoFldAsg) + if (m_dstDoFldStore) { fieldCnt = m_dstVarDsc->lvFieldCnt; @@ -1112,7 +1106,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } } - if (m_dstDoFldAsg && m_srcDoFldAsg) + if (m_dstDoFldStore && m_srcDoFldStore) { // To do fieldwise assignments for both sides. // The structs do not have to be the same exact types but have to have same field types @@ -1120,7 +1114,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() assert(m_dstLclNum != BAD_VAR_NUM && m_srcLclNum != BAD_VAR_NUM); assert(m_dstVarDsc != nullptr && m_srcVarDsc != nullptr && m_dstVarDsc->lvFieldCnt == m_srcVarDsc->lvFieldCnt); } - else if (m_dstDoFldAsg) + else if (m_dstDoFldStore) { m_srcUseLclFld = m_srcVarDsc != nullptr; @@ -1148,7 +1142,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } else { - assert(m_srcDoFldAsg); + assert(m_srcDoFldStore); fieldCnt = m_srcVarDsc->lvFieldCnt; m_dstUseLclFld = m_dstVarDsc != nullptr; @@ -1191,7 +1185,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() LclVarDsc* addrSpillDsc = m_comp->lvaGetDesc(addrSpillTemp); addrSpillDsc->lvType = addrSpill->TypeIs(TYP_REF) ? TYP_REF : TYP_BYREF; // TODO-ASG: zero-diff quirk, delete. - addrSpillStore = m_comp->gtNewTempAssign(addrSpillTemp, addrSpill); + addrSpillStore = m_comp->gtNewTempStore(addrSpillTemp, addrSpill); } auto grabAddr = [=, &result](unsigned offs) { @@ -1259,7 +1253,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() // So, beyond this point we cannot rely on the old values of 'm_srcVarDsc' and 'm_dstVarDsc'. for (unsigned i = 0; i < fieldCnt; ++i) { - if (m_dstDoFldAsg && m_comp->fgGlobalMorph && m_dstLclNode->IsLastUse(i)) + if (m_dstDoFldStore && m_comp->fgGlobalMorph && m_dstLclNode->IsLastUse(i)) { INDEBUG(unsigned dstFieldLclNum = m_comp->lvaGetDesc(m_dstLclNum)->lvFieldLclStart + i); JITDUMP("Field-by-field copy skipping write to dead field V%02u\n", dstFieldLclNum); @@ -1267,7 +1261,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } GenTree* srcFld = nullptr; - if (m_srcDoFldAsg) + if (m_srcDoFldStore) { noway_assert((m_srcLclNum != BAD_VAR_NUM) && (m_srcLclNode != nullptr)); unsigned srcFieldLclNum = m_comp->lvaGetDesc(m_srcLclNum)->lvFieldLclStart + i; @@ -1276,11 +1270,11 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } else { - noway_assert(m_dstDoFldAsg); + noway_assert(m_dstDoFldStore); noway_assert(m_dstLclNum != BAD_VAR_NUM); unsigned dstFieldLclNum = m_comp->lvaGetDesc(m_dstLclNum)->lvFieldLclStart + i; - if (m_srcSingleLclVarAsg) + if (m_srcSingleStoreLclVar) { noway_assert(fieldCnt == 1); noway_assert(m_srcLclNum != BAD_VAR_NUM); @@ -1298,7 +1292,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() { // If this is a full-width use of the src via a different type, we need to create a // GT_LCL_FLD. - // (Note that if it was the same type, 'm_srcSingleLclVarAsg' would be true.) + // (Note that if it was the same type, 'm_srcSingleStoreLclVar' would be true.) if (m_srcLclNum != BAD_VAR_NUM) { noway_assert(m_srcLclNode != nullptr); @@ -1339,7 +1333,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() assert(srcFld != nullptr); GenTree* dstFldStore; - if (m_dstDoFldAsg) + if (m_dstDoFldStore) { noway_assert(m_dstLclNum != BAD_VAR_NUM); @@ -1348,9 +1342,9 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField() } else { - noway_assert(m_srcDoFldAsg); + noway_assert(m_srcDoFldStore); - if (m_dstSingleLclVarAsg) + if (m_dstSingleStoreLclVar) { noway_assert(fieldCnt == 1); noway_assert(m_dstVarDsc != nullptr); @@ -1560,7 +1554,7 @@ GenTree* Compiler::fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree) store->AddAllEffectsFlags(tree); INDEBUG(store->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); - JITDUMP("MorphStoreDynBlock: transformed STORE_DYN_BLK into ASG(BLK, Data())\n"); + JITDUMP("MorphStoreDynBlock: transformed STORE_DYN_BLK into STORE_BLK\n"); return tree->OperIsCopyBlkOp() ? fgMorphCopyBlock(store) : fgMorphInitBlock(store); } diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index e37624163c52e..e589bb9f92d85 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -173,7 +173,11 @@ void ObjectAllocator::MarkEscapingVarsAndBuildConnGraph() unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); bool lclEscapes = true; - if (tree->OperIs(GT_LCL_VAR) && tree->TypeIs(TYP_REF, TYP_BYREF, TYP_I_IMPL)) + if (tree->OperIsLocalStore()) + { + lclEscapes = false; + } + else if (tree->OperIs(GT_LCL_VAR) && tree->TypeIs(TYP_REF, TYP_BYREF, TYP_I_IMPL)) { assert(tree == m_ancestors.Top()); @@ -356,15 +360,15 @@ bool ObjectAllocator::MorphAllocObjNodes() for (Statement* const stmt : block->Statements()) { GenTree* stmtExpr = stmt->GetRootNode(); - GenTree* op2 = nullptr; + GenTree* data = nullptr; bool canonicalAllocObjFound = false; - if (stmtExpr->OperGet() == GT_ASG && stmtExpr->TypeGet() == TYP_REF) + if (stmtExpr->OperIs(GT_STORE_LCL_VAR) && stmtExpr->TypeIs(TYP_REF)) { - op2 = stmtExpr->gtGetOp2(); + data = stmtExpr->AsLclVar()->Data(); - if (op2->OperGet() == GT_ALLOCOBJ) + if (data->OperGet() == GT_ALLOCOBJ) { canonicalAllocObjFound = true; } @@ -376,22 +380,14 @@ bool ObjectAllocator::MorphAllocObjNodes() //------------------------------------------------------------------------ // We expect the following expression tree at this point // STMTx (IL 0x... ???) - // * ASG ref - // +--* LCL_VAR ref + // * STORE_LCL_VAR ref // \--* ALLOCOBJ ref // \--* CNS_INT(h) long //------------------------------------------------------------------------ - GenTree* op1 = stmtExpr->gtGetOp1(); - - assert(op1->OperGet() == GT_LCL_VAR); - assert(op1->TypeGet() == TYP_REF); - assert(op2 != nullptr); - assert(op2->OperGet() == GT_ALLOCOBJ); - - GenTreeAllocObj* asAllocObj = op2->AsAllocObj(); - unsigned int lclNum = op1->AsLclVar()->GetLclNum(); - CORINFO_CLASS_HANDLE clsHnd = op2->AsAllocObj()->gtAllocObjClsHnd; + GenTreeAllocObj* asAllocObj = data->AsAllocObj(); + unsigned int lclNum = stmtExpr->AsLclVar()->GetLclNum(); + CORINFO_CLASS_HANDLE clsHnd = data->AsAllocObj()->gtAllocObjClsHnd; // Don't attempt to do stack allocations inside basic blocks that may be in a loop. if (IsObjectStackAllocationEnabled() && !basicBlockHasBackwardJump && @@ -416,20 +412,16 @@ bool ObjectAllocator::MorphAllocObjNodes() JITDUMP("Allocating local variable V%02u on the heap\n", lclNum); } - op2 = MorphAllocObjNodeIntoHelperCall(asAllocObj); + data = MorphAllocObjNodeIntoHelperCall(asAllocObj); + stmtExpr->AsLclVar()->Data() = data; + stmtExpr->AddAllEffectsFlags(data); } - - // Propagate flags of op2 to its parent. - stmtExpr->AsOp()->gtOp2 = op2; - stmtExpr->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; } - #ifdef DEBUG else { - // We assume that GT_ALLOCOBJ nodes are always present in the - // canonical form. - comp->fgWalkTreePre(stmt->GetRootNodePointer(), AssertWhenAllocObjFoundVisitor); + // We assume that GT_ALLOCOBJ nodes are always present in the canonical form. + assert(!comp->gtTreeContainsOper(stmt->GetRootNode(), GT_ALLOCOBJ)); } #endif // DEBUG } @@ -524,17 +516,14 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a { //------------------------------------------------------------------------ // STMTx (IL 0x... ???) - // * ASG struct (init) - // +--* LCL_VAR struct + // * STORE_LCL_VAR struct // \--* CNS_INT int 0 //------------------------------------------------------------------------ - GenTree* tree = comp->gtNewLclvNode(lclNum, TYP_STRUCT); - tree = comp->gtNewAssignNode(tree, comp->gtNewIconNode(0)); - - Statement* newStmt = comp->gtNewStmt(tree); + GenTree* init = comp->gtNewStoreLclVarNode(lclNum, comp->gtNewIconNode(0)); + Statement* initStmt = comp->gtNewStmt(init); - comp->fgInsertStmtBefore(block, stmt, newStmt); + comp->fgInsertStmtBefore(block, stmt, initStmt); } else { @@ -545,18 +534,15 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a //------------------------------------------------------------------------ // STMTx (IL 0x... ???) - // * ASG long - // +--* LCL_FLD long + // * STORE_LCL_FLD long // \--* CNS_INT(h) long //------------------------------------------------------------------------ // Initialize the method table pointer. - GenTree* tree = comp->gtNewLclFldNode(lclNum, TYP_I_IMPL, 0); - tree = comp->gtNewAssignNode(tree, allocObj->gtGetOp1()); + GenTree* init = comp->gtNewStoreLclFldNode(lclNum, TYP_I_IMPL, 0, allocObj->gtGetOp1()); + Statement* initStmt = comp->gtNewStmt(init); - Statement* newStmt = comp->gtNewStmt(tree); - - comp->fgInsertStmtBefore(block, stmt, newStmt); + comp->fgInsertStmtBefore(block, stmt, initStmt); return lclNum; } @@ -575,7 +561,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Notes: // The method currently treats all locals assigned to a field as escaping. // The can potentially be tracked by special field edges in the connection graph. - +// bool ObjectAllocator::CanLclVarEscapeViaParentStack(ArrayStack* parentStack, unsigned int lclNum) { assert(parentStack != nullptr); @@ -599,48 +585,18 @@ bool ObjectAllocator::CanLclVarEscapeViaParentStack(ArrayStack* parent switch (parent->OperGet()) { - case GT_ASG: + // Update the connection graph if we are storing to a local. + // For all other stores we mark the local as escaping. + case GT_STORE_LCL_VAR: { - // Use the following conservative behavior for GT_ASG parent node: - // Consider local variable to be escaping if - // 1. lclVar appears on the rhs of a GT_ASG node - // AND - // 2. The lhs of the GT_ASG is not another lclVar - - GenTree* op1 = parent->AsOp()->gtGetOp1(); - - if (op1 == tree) - { - // Assigning to a local doesn't make it escaping. - // If there is another local variable on the rhs, - // we will update the connection graph when we visit it. - canLclVarEscapeViaParentStack = false; - } - else - { - // lclVar is on the rhs of GT_ASG node - assert(parent->AsOp()->gtGetOp2() == tree); + // Add an edge to the connection graph. + const unsigned int dstLclNum = parent->AsLclVar()->GetLclNum(); + const unsigned int srcLclNum = lclNum; - // Update the connection graph if we are assigning to a local. - // For all other assignments we mark the rhs local as escaping. - // TODO-ObjectStackAllocation: track assignments to fields. - if (op1->OperGet() == GT_LCL_VAR) - { - // We expect the following tree at this point - // /--* GT_LCL_VAR ref rhsLclVar - // --* = ref - // \--* GT_LCL_VAR ref lhsLclVar - - // Add an edge to the connection graph. - const unsigned int lhsLclNum = op1->AsLclVar()->GetLclNum(); - const unsigned int rhsLclNum = lclNum; - - AddConnGraphEdge(lhsLclNum, rhsLclNum); - canLclVarEscapeViaParentStack = false; - } - } - break; + AddConnGraphEdge(dstLclNum, srcLclNum); + canLclVarEscapeViaParentStack = false; } + break; case GT_EQ: case GT_NE: @@ -664,6 +620,13 @@ bool ObjectAllocator::CanLclVarEscapeViaParentStack(ArrayStack* parent keepChecking = true; break; + case GT_STOREIND: + if (tree != parent->AsIndir()->Addr()) + { + // TODO-ObjectStackAllocation: track stores to fields. + break; + } + FALLTHROUGH; case GT_IND: // Address of the field/ind is not taken so the local doesn't escape. canLclVarEscapeViaParentStack = false; @@ -723,18 +686,12 @@ void ObjectAllocator::UpdateAncestorTypes(GenTree* tree, ArrayStack* p switch (parent->OperGet()) { - case GT_ASG: - { - GenTree* op2 = parent->AsOp()->gtGetOp2(); - - if ((op2 == tree) && (parent->TypeGet() == TYP_REF)) + case GT_STORE_LCL_VAR: + if (parent->TypeGet() == TYP_REF) { - assert(parent->AsOp()->gtGetOp1()->OperGet() == GT_LCL_VAR); parent->ChangeType(newType); } - break; - } case GT_EQ: case GT_NE: @@ -759,8 +716,9 @@ void ObjectAllocator::UpdateAncestorTypes(GenTree* tree, ArrayStack* p keepChecking = true; break; - case GT_IND: - { + case GT_STOREIND: + assert(tree == parent->AsIndir()->Addr()); + // The new target could be *not* on the heap. parent->gtFlags &= ~GTF_IND_TGT_HEAP; @@ -772,7 +730,9 @@ void ObjectAllocator::UpdateAncestorTypes(GenTree* tree, ArrayStack* p parent->gtFlags |= GTF_IND_TGT_NOT_HEAP; } break; - } + + case GT_IND: + break; default: unreached(); @@ -787,30 +747,6 @@ void ObjectAllocator::UpdateAncestorTypes(GenTree* tree, ArrayStack* p return; } -#ifdef DEBUG -//------------------------------------------------------------------------ -// AssertWhenAllocObjFoundVisitor: Look for a GT_ALLOCOBJ node and assert -// when found one. -// -// Arguments: -// pTree - Tree to examine -// data - Walker data -// -// Return Value: -// Always returns fgWalkResult::WALK_CONTINUE - -Compiler::fgWalkResult ObjectAllocator::AssertWhenAllocObjFoundVisitor(GenTree** pTree, Compiler::fgWalkData* data) -{ - GenTree* tree = *pTree; - - assert(tree != nullptr); - assert(tree->OperGet() != GT_ALLOCOBJ); - - return Compiler::fgWalkResult::WALK_CONTINUE; -} - -#endif // DEBUG - //------------------------------------------------------------------------ // RewriteUses: Find uses of the newobj temp for stack-allocated // objects and replace with address of the stack local. @@ -837,7 +773,7 @@ void ObjectAllocator::RewriteUses() Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; - assert(tree->IsLocal() || tree->OperIs(GT_LCL_ADDR)); + assert(tree->OperIsAnyLocal()); const unsigned int lclNum = tree->AsLclVarCommon()->GetLclNum(); unsigned int newLclNum = BAD_VAR_NUM; @@ -847,11 +783,12 @@ void ObjectAllocator::RewriteUses() m_allocator->MayLclVarPointToStack(lclNum)) { // Analysis does not handle indirect access to pointer locals. - assert(tree->OperIs(GT_LCL_VAR)); + assert(tree->OperIsScalarLocal()); var_types newType; if (m_allocator->m_HeapLocalToStackLocalMap.TryGetValue(lclNum, &newLclNum)) { + assert(tree->OperIs(GT_LCL_VAR)); // Must be a use. newType = TYP_I_IMPL; tree = m_compiler->gtNewLclVarAddrNode(newLclNum); *use = tree; diff --git a/src/coreclr/jit/objectalloc.h b/src/coreclr/jit/objectalloc.h index 7a03320b46ea1..f4a56cb4ca39d 100644 --- a/src/coreclr/jit/objectalloc.h +++ b/src/coreclr/jit/objectalloc.h @@ -66,9 +66,7 @@ class ObjectAllocator final : public Phase struct BuildConnGraphVisitorCallbackData; bool CanLclVarEscapeViaParentStack(ArrayStack* parentStack, unsigned int lclNum); void UpdateAncestorTypes(GenTree* tree, ArrayStack* parentStack, var_types newType); -#ifdef DEBUG - static Compiler::fgWalkResult AssertWhenAllocObjFoundVisitor(GenTree** pTree, Compiler::fgWalkData* data); -#endif // DEBUG + static const unsigned int s_StackAllocMaxSize = 0x2000U; }; diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp index 67df2db2a5ba2..665a5eeb4cecd 100644 --- a/src/coreclr/jit/optcse.cpp +++ b/src/coreclr/jit/optcse.cpp @@ -3131,7 +3131,7 @@ class CSE_Heuristic } /* Create a store of the value to the temp */ - GenTree* store = m_pCompiler->gtNewTempAssign(cseLclVarNum, val); + GenTree* store = m_pCompiler->gtNewTempStore(cseLclVarNum, val); GenTree* origStore = store; if (!store->OperIs(GT_STORE_LCL_VAR)) @@ -3166,11 +3166,11 @@ class CSE_Heuristic // These should not have been set yet, since this is the first and // only def for this CSE. assert(ssaVarDsc->GetBlock() == nullptr); - assert(ssaVarDsc->GetAssignment() == nullptr); + assert(ssaVarDsc->GetDefNode() == nullptr); ssaVarDsc->m_vnPair = val->gtVNPair; ssaVarDsc->SetBlock(blk); - ssaVarDsc->SetAssignment(store->AsLclVarCommon()); + ssaVarDsc->SetDefNode(store->AsLclVarCommon()); } /* Create a reference to the CSE temp */ diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index af55e14a4451d..ef10945aca44c 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -382,7 +382,7 @@ bool OptBoolsDsc::FindCompareChain(GenTree* condition, bool* isTestCondition) // * JTRUE (NE c,d) // // ------------ BB03, preds={BB01, BB02} succs={BB04} -// * ASG (x,y) +// * STORE_LCL_VAR(y) // // These operands will be combined into a single AND in the first block (with the first // condition inverted), wrapped by the test condition (NE(...,0)). Giving: @@ -391,7 +391,7 @@ bool OptBoolsDsc::FindCompareChain(GenTree* condition, bool* isTestCondition) // * JTRUE (NE (AND (LE a,b), (NE c,d)), 0) // // ------------ BB03, preds={BB01} succs={BB04} -// * ASG x,y +// * STORE_LCL_VAR(y) // // // Example 2: @@ -403,10 +403,10 @@ bool OptBoolsDsc::FindCompareChain(GenTree* condition, bool* isTestCondition) // * JTRUE (NE (OR (LE a,b), (NE c,d)), 0) // // ------------ BB03, preds={BB01} succs={BB05} -// * ASG x,y +// * STORE_LCL_VAR(y) // // ------------ BB04, preds={BB01} succs={BB05} -// * ASG x,z +// * STORE_LCL_VAR(z) // // // Example 3: @@ -418,7 +418,7 @@ bool OptBoolsDsc::FindCompareChain(GenTree* condition, bool* isTestCondition) // * JTRUE (NE (OR ((NE (OR (NE c,d), (GE e,f)), 0), (LE a,b))), 0) // // ------------ BB03, preds={BB01} succs={BB04} -// * ASG x,y +// * STORE_LCL_VAR(y) // // // This optimization means that every condition within the IF statement is always evaluated, diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index d425f55e3f4b3..1b62867cb1431 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -769,9 +769,9 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT // optCheckIterInLoopTest: Check if iter var is used in loop test. // // Arguments: -// loopInd loopIndex -// test "jtrue" tree or an asg of the loop iter termination condition -// iterVar loop iteration variable. +// loopInd - loopIndex +// test - "jtrue" tree or an store of the loop iter termination condition +// iterVar - loop iteration variable. // // Operation: // The test tree is parsed to check if "iterVar" matches the lhs of the condition @@ -891,16 +891,10 @@ bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTree* test, unsigned } //---------------------------------------------------------------------------------- -// optIsLoopIncrTree: Check if loop is a tree of form v += 1 or v = v + 1 +// optIsLoopIncrTree: Check if loop is a tree of form v = v op const. // // Arguments: -// incr The incr tree to be checked. Whether incr tree is -// oper-equal(+=, -=...) type nodes or v=v+1 type ASG nodes. -// -// Operation: -// The test tree is parsed to check if "iterVar" matches the lhs of the condition -// and the rhs limit is extracted from the "test" tree. The limit information is -// added to the loop table. +// incr - The incr tree to be checked. // // Return Value: // iterVar local num if the iterVar is found, otherwise BAD_VAR_NUM. @@ -912,7 +906,7 @@ unsigned Compiler::optIsLoopIncrTree(GenTree* incr) unsigned iterVar = incr->IsLclVarUpdateTree(&incrVal, &updateOper); if (iterVar != BAD_VAR_NUM) { - // We have v = v op y type asg node. + // We have v = v op y type node. switch (updateOper) { case GT_ADD: @@ -5718,12 +5712,6 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu oper = tree->OperGet(); kind = tree->OperKind(); - if (oper == GT_ASG) - { - noway_assert(doit == false); - return false; - } - ValueNumPair NoVNPair = ValueNumPair(); if (kind & GTK_LEAF) @@ -9302,7 +9290,7 @@ PhaseStatus Compiler::optVNBasedDeadStoreRemoval() for (unsigned defIndex = 1; defIndex < defCount; defIndex++) { LclSsaVarDsc* defDsc = varDsc->lvPerSsaData.GetSsaDefByIndex(defIndex); - GenTreeLclVarCommon* store = defDsc->GetAssignment(); + GenTreeLclVarCommon* store = defDsc->GetDefNode(); if (store != nullptr) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index e16af87f6505b..a2d6cb5633537 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -162,12 +162,11 @@ class PatchpointTransformer // // --ppCounter; GenTree* ppCounterBefore = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT); - GenTree* ppCounterAfter = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT); GenTree* one = compiler->gtNewIconNode(1, TYP_INT); GenTree* ppCounterSub = compiler->gtNewOperNode(GT_SUB, TYP_INT, ppCounterBefore, one); - GenTree* ppCounterAsg = compiler->gtNewAssignNode(ppCounterAfter, ppCounterSub); + GenTree* ppCounterUpdate = compiler->gtNewStoreLclVarNode(ppCounterLclNum, ppCounterSub); - compiler->fgNewStmtAtEnd(block, ppCounterAsg); + compiler->fgNewStmtAtEnd(block, ppCounterUpdate); // if (ppCounter > 0), bypass helper call GenTree* ppCounterUpdated = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT); @@ -201,10 +200,9 @@ class PatchpointTransformer } GenTree* initialCounterNode = compiler->gtNewIconNode(initialCounterValue, TYP_INT); - GenTree* ppCounterRef = compiler->gtNewLclvNode(ppCounterLclNum, TYP_INT); - GenTree* ppCounterAsg = compiler->gtNewAssignNode(ppCounterRef, initialCounterNode); + GenTree* ppCounterStore = compiler->gtNewStoreLclVarNode(ppCounterLclNum, initialCounterNode); - compiler->fgNewStmtNearEnd(block, ppCounterAsg); + compiler->fgNewStmtNearEnd(block, ppCounterStore); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp index 7e62b68058578..9452210abcd04 100644 --- a/src/coreclr/jit/promotion.cpp +++ b/src/coreclr/jit/promotion.cpp @@ -536,7 +536,7 @@ class LocalsUseVisitor : public GenTreeVisitor { GenTree* tree = *use; - if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_LCL_ADDR)) + if (tree->OperIsAnyLocal()) { GenTreeLclVarCommon* lcl = tree->AsLclVarCommon(); LclVarDsc* dsc = m_compiler->lvaGetDesc(lcl); @@ -559,7 +559,7 @@ class LocalsUseVisitor : public GenTreeVisitor { accessType = lcl->TypeGet(); accessLayout = accessType == TYP_STRUCT ? lcl->GetLayout(m_compiler) : nullptr; - accessFlags = ClassifyLocalRead(lcl, user); + accessFlags = ClassifyLocalAccess(lcl, user); } LocalUses* uses = GetOrCreateUses(lcl->GetLclNum()); @@ -594,7 +594,7 @@ class LocalsUseVisitor : public GenTreeVisitor //------------------------------------------------------------------------ // ClassifyLocalAccess: - // Given a local use and its user, classify information about it. + // Given a local node and its user, classify information about it. // // Parameters: // lcl - The local @@ -603,50 +603,36 @@ class LocalsUseVisitor : public GenTreeVisitor // Returns: // Flags classifying the access. // - AccessKindFlags ClassifyLocalRead(GenTreeLclVarCommon* lcl, GenTree* user) + AccessKindFlags ClassifyLocalAccess(GenTreeLclVarCommon* lcl, GenTree* user) { - assert(lcl->OperIsLocalRead()); + assert(lcl->OperIsLocalRead() || lcl->OperIsLocalStore()); AccessKindFlags flags = AccessKindFlags::None; - if (user->IsCall()) + if (lcl->OperIsLocalStore()) { - GenTreeCall* call = user->AsCall(); - unsigned argIndex = 0; - for (CallArg& arg : call->gtArgs.Args()) - { - if (arg.GetNode() != lcl) - { - argIndex++; - continue; - } + flags |= AccessKindFlags::IsAssignmentDestination; + } - flags |= AccessKindFlags::IsCallArg; + if (user == nullptr) + { + return flags; + } - unsigned argSize = 0; - if (arg.GetSignatureType() != TYP_STRUCT) - { - argSize = genTypeSize(arg.GetSignatureType()); - } - else + if (user->IsCall()) + { + for (CallArg& arg : user->AsCall()->gtArgs.Args()) + { + if (arg.GetNode() == lcl) { - argSize = m_compiler->typGetObjLayout(arg.GetSignatureClassHandle())->GetSize(); + flags |= AccessKindFlags::IsCallArg; + break; } - - break; } } - if (user->OperIs(GT_ASG)) + if (user->OperIsStore() && (user->Data() == lcl)) { - if (user->gtGetOp1() == lcl) - { - flags |= AccessKindFlags::IsAssignmentDestination; - } - - if (user->gtGetOp2() == lcl) - { - flags |= AccessKindFlags::IsAssignmentSource; - } + flags |= AccessKindFlags::IsAssignmentSource; } if (user->OperIs(GT_RETURN)) @@ -1086,8 +1072,7 @@ StructSegments Promotion::SignificantSegments(Compiler* compiler, // CreateWriteBack: // Create IR that writes a replacement local's value back to its struct local: // -// ASG -// LCL_FLD int V00 [+4] +// STORE_LCL_FLD int V00 [+4] // LCL_VAR int V01 // // Parameters: @@ -1100,18 +1085,16 @@ StructSegments Promotion::SignificantSegments(Compiler* compiler, // GenTree* Promotion::CreateWriteBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement) { - GenTree* dst = compiler->gtNewLclFldNode(structLclNum, replacement.AccessType, replacement.Offset); - GenTree* src = compiler->gtNewLclvNode(replacement.LclNum, genActualType(replacement.AccessType)); - GenTree* asg = compiler->gtNewAssignNode(dst, src); - return asg; + GenTree* value = compiler->gtNewLclVarNode(replacement.LclNum); + GenTree* store = compiler->gtNewStoreLclFldNode(structLclNum, replacement.AccessType, replacement.Offset, value); + return store; } //------------------------------------------------------------------------ // CreateReadBack: // Create IR that reads a replacement local's value back from its struct local: // -// ASG -// LCL_VAR int V01 +// STORE_LCL_VAR int V01 // LCL_FLD int V00 [+4] // // Parameters: @@ -1124,27 +1107,24 @@ GenTree* Promotion::CreateWriteBack(Compiler* compiler, unsigned structLclNum, c // GenTree* Promotion::CreateReadBack(Compiler* compiler, unsigned structLclNum, const Replacement& replacement) { - GenTree* dst = compiler->gtNewLclvNode(replacement.LclNum, genActualType(replacement.AccessType)); - GenTree* src = compiler->gtNewLclFldNode(structLclNum, replacement.AccessType, replacement.Offset); - GenTree* asg = compiler->gtNewAssignNode(dst, src); - return asg; + GenTree* value = compiler->gtNewLclFldNode(structLclNum, replacement.AccessType, replacement.Offset); + GenTree* store = compiler->gtNewStoreLclVarNode(replacement.LclNum, value); + return store; } Compiler::fgWalkResult ReplaceVisitor::PostOrderVisit(GenTree** use, GenTree* user) { GenTree* tree = *use; - if (tree->OperIs(GT_ASG)) + if (tree->OperIsStore()) { - // If LHS of the ASG was a local then we skipped it as we don't - // want to see it until after the RHS. - if (tree->gtGetOp1()->OperIs(GT_LCL_VAR, GT_LCL_FLD)) + if (tree->OperIsLocalStore()) { - ReplaceLocal(&tree->AsOp()->gtOp1, tree); + ReplaceLocal(use, user); } - // Assignments can be decomposed directly into accesses of the replacements. - HandleAssignment(use, user); + // Stores can be decomposed directly into accesses of the replacements. + HandleStore(use, user); return fgWalkResult::WALK_CONTINUE; } @@ -1164,10 +1144,7 @@ Compiler::fgWalkResult ReplaceVisitor::PostOrderVisit(GenTree** use, GenTree* us return fgWalkResult::WALK_CONTINUE; } - // Skip the local on the LHS of ASGs when we see it in the normal tree - // visit; we handle it as part of the parent ASG instead. - if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD) && - ((user == nullptr) || !user->OperIs(GT_ASG) || (user->gtGetOp1() != tree))) + if (tree->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { ReplaceLocal(use, user); return fgWalkResult::WALK_CONTINUE; @@ -1302,7 +1279,10 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user) #ifdef DEBUG if (accessType == TYP_STRUCT) { - assert((user == nullptr) || user->OperIs(GT_ASG, GT_CALL, GT_RETURN)); + if (lcl->OperIsLocalRead()) + { + assert((user == nullptr) || user->OperIs(GT_CALL, GT_RETURN) || user->OperIsStore()); + } } else { @@ -1335,13 +1315,21 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user) Replacement& rep = replacements[index]; assert(accessType == rep.AccessType); JITDUMP(" ..replaced with promoted lcl V%02u\n", rep.LclNum); - *use = m_compiler->gtNewLclvNode(rep.LclNum, accessType); + + bool isDef = lcl->OperIsLocalStore(); + if (isDef) + { + *use = m_compiler->gtNewStoreLclVarNode(rep.LclNum, lcl->Data()); + } + else + { + *use = m_compiler->gtNewLclvNode(rep.LclNum, accessType); + } (*use)->gtFlags |= lcl->gtFlags & GTF_VAR_DEATH; - if ((lcl->gtFlags & GTF_VAR_DEF) != 0) + if (isDef) { - (*use)->gtFlags |= GTF_VAR_DEF; // TODO-ASG: delete. rep.NeedsWriteBack = true; rep.NeedsReadBack = false; } @@ -1358,9 +1346,9 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user) // └──▌ ADD int // ├──▌ LCL_VAR int V10 tmp6 -> copy propagated to [V35 tmp31] // └──▌ COMMA int - // ├──▌ ASG int - // │ ├──▌ LCL_VAR int V35 tmp31 + // ├──▌ STORE_LCL_VAR int V35 tmp31 // │ └──▌ LCL_FLD int V03 loc1 [+4] + // // This really ought to be handled by local copy prop, but the way it works during // morph makes it hard to fix there. // @@ -1733,10 +1721,9 @@ void Promotion::ExplicitlyZeroInitReplacementLocals(unsigned continue; } - GenTree* dst = m_compiler->gtNewLclvNode(rep.LclNum, rep.AccessType); - GenTree* src = m_compiler->gtNewZeroConNode(rep.AccessType); - GenTree* asg = m_compiler->gtNewAssignNode(dst, src); - InsertInitStatement(prevStmt, asg); + GenTree* value = m_compiler->gtNewZeroConNode(rep.AccessType); + GenTree* store = m_compiler->gtNewStoreLclVarNode(rep.LclNum, value); + InsertInitStatement(prevStmt, store); } } diff --git a/src/coreclr/jit/promotion.h b/src/coreclr/jit/promotion.h index 5c4e26383b399..895058e73d04b 100644 --- a/src/coreclr/jit/promotion.h +++ b/src/coreclr/jit/promotion.h @@ -281,13 +281,13 @@ class ReplaceVisitor : public GenTreeVisitor void WriteBackBefore(GenTree** use, unsigned lcl, unsigned offs, unsigned size); void MarkForReadBack(unsigned lcl, unsigned offs, unsigned size); - void HandleAssignment(GenTree** use, GenTree* user); + void HandleStore(GenTree** use, GenTree* user); bool OverlappingReplacements(GenTreeLclVarCommon* lcl, Replacement** firstReplacement, Replacement** endReplacement = nullptr); - void EliminateCommasInBlockOp(GenTreeOp* asg, DecompositionStatementList* result); - void InitFields(GenTreeLclVarCommon* dst, Replacement* firstRep, Replacement* endRep, DecompositionPlan* plan); - void CopyBetweenFields(GenTree* dst, + void EliminateCommasInBlockOp(GenTree* store, DecompositionStatementList* result); + void InitFields(GenTreeLclVarCommon* dstStore, Replacement* firstRep, Replacement* endRep, DecompositionPlan* plan); + void CopyBetweenFields(GenTree* store, Replacement* dstFirstRep, Replacement* dstEndRep, GenTree* src, diff --git a/src/coreclr/jit/promotiondecomposition.cpp b/src/coreclr/jit/promotiondecomposition.cpp index 165607bb58650..fcfd81a07c97c 100644 --- a/src/coreclr/jit/promotiondecomposition.cpp +++ b/src/coreclr/jit/promotiondecomposition.cpp @@ -52,7 +52,7 @@ class DecompositionPlan Compiler* m_compiler; jitstd::vector& m_aggregates; PromotionLiveness* m_liveness; - GenTree* m_dst; + GenTree* m_store; GenTree* m_src; bool m_dstInvolvesReplacements; bool m_srcInvolvesReplacements; @@ -63,14 +63,14 @@ class DecompositionPlan DecompositionPlan(Compiler* comp, jitstd::vector& aggregates, PromotionLiveness* liveness, - GenTree* dst, + GenTree* store, GenTree* src, bool dstInvolvesReplacements, bool srcInvolvesReplacements) : m_compiler(comp) , m_aggregates(aggregates) , m_liveness(liveness) - , m_dst(dst) + , m_store(store) , m_src(src) , m_dstInvolvesReplacements(dstInvolvesReplacements) , m_srcInvolvesReplacements(srcInvolvesReplacements) @@ -269,7 +269,7 @@ class DecompositionPlan // StructSegments ComputeRemainder() { - ClassLayout* dstLayout = m_dst->GetLayout(m_compiler); + ClassLayout* dstLayout = m_store->GetLayout(m_compiler); // Validate with "obviously correct" but less scalable fixed bit vector implementation. INDEBUG(FixedBitVect * segmentBitVect); @@ -356,7 +356,7 @@ class DecompositionPlan unsigned size = segment.End - segment.Start; if ((size == TARGET_POINTER_SIZE) && ((segment.Start % TARGET_POINTER_SIZE) == 0)) { - ClassLayout* dstLayout = m_dst->GetLayout(m_compiler); + ClassLayout* dstLayout = m_store->GetLayout(m_compiler); primitiveType = dstLayout->GetGCPtrType(segment.Start / TARGET_POINTER_SIZE); } else @@ -410,9 +410,9 @@ class DecompositionPlan void FinalizeInit(DecompositionStatementList* statements) { uint8_t initPattern = GetInitPattern(); - StructDeaths deaths = m_liveness->GetDeathsForStructLocal(m_dst->AsLclVarCommon()); + StructDeaths deaths = m_liveness->GetDeathsForStructLocal(m_store->AsLclVarCommon()); - AggregateInfo* agg = m_aggregates[m_dst->AsLclVarCommon()->GetLclNum()]; + AggregateInfo* agg = m_aggregates[m_store->AsLclVarCommon()->GetLclNum()]; assert((agg != nullptr) && (agg->Replacements.size() > 0)); Replacement* firstRep = agg->Replacements.data(); @@ -426,9 +426,9 @@ class DecompositionPlan if (!deaths.IsReplacementDying((unsigned)replacementIndex)) { - GenTree* src = m_compiler->gtNewConWithPattern(entry.Type, initPattern); - GenTree* dst = m_compiler->gtNewLclvNode(entry.ToLclNum, entry.Type); - statements->AddStatement(m_compiler->gtNewAssignNode(dst, src)); + GenTree* value = m_compiler->gtNewConWithPattern(entry.Type, initPattern); + GenTree* store = m_compiler->gtNewStoreLclVarNode(entry.ToLclNum, value); + statements->AddStatement(store); } entry.ToReplacement->NeedsWriteBack = true; @@ -438,17 +438,17 @@ class DecompositionPlan RemainderStrategy remainderStrategy = DetermineRemainderStrategy(deaths); if (remainderStrategy.Type == RemainderStrategy::FullBlock) { - GenTree* asg = m_compiler->gtNewAssignNode(m_dst, m_src); - statements->AddStatement(asg); + statements->AddStatement(m_store); } else if (remainderStrategy.Type == RemainderStrategy::Primitive) { - GenTree* src = m_compiler->gtNewConWithPattern(remainderStrategy.PrimitiveType, initPattern); - GenTreeLclVarCommon* dstLcl = m_dst->AsLclVarCommon(); - GenTree* dst = m_compiler->gtNewLclFldNode(dstLcl->GetLclNum(), remainderStrategy.PrimitiveType, - dstLcl->GetLclOffs() + remainderStrategy.PrimitiveOffset); + GenTree* value = m_compiler->gtNewConWithPattern(remainderStrategy.PrimitiveType, initPattern); + GenTreeLclVarCommon* dstLcl = m_store->AsLclVarCommon(); + GenTree* store = + m_compiler->gtNewStoreLclFldNode(dstLcl->GetLclNum(), remainderStrategy.PrimitiveType, + dstLcl->GetLclOffs() + remainderStrategy.PrimitiveOffset, value); m_compiler->lvaSetVarDoNotEnregister(dstLcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); - statements->AddStatement(m_compiler->gtNewAssignNode(dst, src)); + statements->AddStatement(store); } } @@ -461,12 +461,13 @@ class DecompositionPlan // void FinalizeCopy(DecompositionStatementList* statements) { - assert(m_dst->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_BLK) && m_src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_BLK)); + assert(m_store->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD, GT_STORE_BLK) && + m_src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_BLK)); StructDeaths dstDeaths; if (m_dstInvolvesReplacements) { - dstDeaths = m_liveness->GetDeathsForStructLocal(m_dst->AsLclVarCommon()); + dstDeaths = m_liveness->GetDeathsForStructLocal(m_store->AsLclVarCommon()); } RemainderStrategy remainderStrategy = DetermineRemainderStrategy(dstDeaths); @@ -477,41 +478,35 @@ class DecompositionPlan // first instead. That is, instead of: // // ▌ COMMA void - // ├──▌ ASG struct (copy) <- write barrier - // │ ├──▌ BLK struct - // │ │ └──▌ LCL_VAR byref V01 arg1 + // ├──▌ STORE_BLK struct <- write barrier + // │ ├──▌ LCL_VAR byref V01 arg1 // │ └──▌ LCL_VAR struct V00 arg0 // └──▌ COMMA void - // ├──▌ ASG ref <- write barrier - // │ ├──▌ IND ref - // │ │ └──▌ ADD byref - // │ │ ├──▌ LCL_VAR byref V01 arg1 - // │ │ └──▌ CNS_INT long 8 + // ├──▌ STOREIND ref <- write barrier + // │ ├───▌ ADD byref + // │ │ ├──▌ LCL_VAR byref V01 arg1 + // │ │ └──▌ CNS_INT long 8 // │ └──▌ LCL_VAR ref V05 tmp3 - // └──▌ ASG ref <- write barrier - // ├──▌ IND ref - // │ └──▌ ADD byref - // │ ├──▌ LCL_VAR byref V01 arg1 - // │ └──▌ CNS_INT long 24 + // └──▌ STOREIND ref <- write barrier + // ├──▌ ADD byref + // │ ├──▌ LCL_VAR byref V01 arg1 + // │ └──▌ CNS_INT long 24 // └──▌ LCL_VAR ref V06 tmp4 // // Produce: // // ▌ COMMA void - // ├──▌ ASG ref <- no write barrier - // │ ├──▌ LCL_FLD ref V00 arg0 [+8] + // ├──▌ STORE_LCL_FLD ref V00 arg0 [+8] <- no write barrier // │ └──▌ LCL_VAR ref V05 tmp3 // └──▌ COMMA void - // ├──▌ ASG ref <- no write barrier - // │ ├──▌ LCL_FLD ref V00 arg0 [+24] + // ├──▌ STORE_LCL_FLD ref V00 arg0 [+24] <- no write barrier // │ └──▌ LCL_VAR ref V06 tmp4 - // └──▌ ASG struct (copy) <- write barrier - // ├──▌ BLK struct - // │ └──▌ LCL_VAR byref V01 arg1 (last use) + // └──▌ STORE_BLK struct <- write barrier + // ├──▌ LCL_VAR byref V01 arg1 (last use) // └──▌ LCL_VAR struct V00 arg0 // - if ((remainderStrategy.Type == RemainderStrategy::FullBlock) && m_dst->OperIs(GT_BLK) && - m_dst->GetLayout(m_compiler)->HasGCPtr()) + if ((remainderStrategy.Type == RemainderStrategy::FullBlock) && m_store->OperIs(GT_STORE_BLK) && + m_store->AsBlk()->GetLayout()->HasGCPtr()) { for (int i = 0; i < m_entries.Height(); i++) { @@ -540,15 +535,15 @@ class DecompositionPlan FieldSeq* addrBaseOffsFldSeq = nullptr; GenTreeFlags indirFlags = GTF_EMPTY; - if (m_dst->OperIs(GT_BLK)) + if (m_store->OperIs(GT_STORE_BLK)) { - addr = m_dst->gtGetOp1(); + addr = m_store->AsIndir()->Addr(); indirFlags = - m_dst->gtFlags & (GTF_IND_VOLATILE | GTF_IND_NONFAULTING | GTF_IND_UNALIGNED | GTF_IND_INITCLASS); + m_store->gtFlags & (GTF_IND_VOLATILE | GTF_IND_NONFAULTING | GTF_IND_UNALIGNED | GTF_IND_INITCLASS); } else if (m_src->OperIs(GT_BLK)) { - addr = m_src->gtGetOp1(); + addr = m_src->AsIndir()->Addr(); indirFlags = m_src->gtFlags & (GTF_IND_VOLATILE | GTF_IND_NONFAULTING | GTF_IND_UNALIGNED | GTF_IND_INITCLASS); } @@ -618,7 +613,7 @@ class DecompositionPlan else { unsigned addrLcl = m_compiler->lvaGrabTemp(true DEBUGARG("Spilling address for field-by-field copy")); - statements->AddStatement(m_compiler->gtNewTempAssign(addrLcl, addr)); + statements->AddStatement(m_compiler->gtNewTempStore(addrLcl, addr)); addr = m_compiler->gtNewLclvNode(addrLcl, addr->TypeGet()); } } @@ -653,15 +648,14 @@ class DecompositionPlan if (remainderStrategy.Type == RemainderStrategy::FullBlock) { - // We will reuse the existing block op's operands. Rebase the - // address off of the new local we created. + // We will reuse the existing block op. Rebase the address off of the new local we created. if (m_src->OperIs(GT_BLK)) { - m_src->AsUnOp()->gtOp1 = grabAddr(0); + m_src->AsIndir()->Addr() = grabAddr(0); } - else if (m_dst->OperIs(GT_BLK)) + else if (m_store->OperIs(GT_STORE_BLK)) { - m_dst->AsUnOp()->gtOp1 = grabAddr(0); + m_store->AsIndir()->Addr() = grabAddr(0); } } @@ -671,7 +665,7 @@ class DecompositionPlan // that it's best to do it last. if ((remainderStrategy.Type == RemainderStrategy::FullBlock) && m_srcInvolvesReplacements) { - statements->AddStatement(m_compiler->gtNewAssignNode(m_dst, m_src)); + statements->AddStatement(m_store); if (m_src->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { @@ -722,27 +716,6 @@ class DecompositionPlan continue; } - GenTree* dst; - if (entry.ToLclNum != BAD_VAR_NUM) - { - dst = m_compiler->gtNewLclvNode(entry.ToLclNum, entry.Type); - } - else if (m_dst->OperIs(GT_LCL_VAR, GT_LCL_FLD)) - { - unsigned offs = m_dst->AsLclVarCommon()->GetLclOffs() + entry.Offset; - // Local morph ensures we do not see local indirs here that dereference beyond UINT16_MAX. - noway_assert(FitsIn(offs)); - dst = m_compiler->gtNewLclFldNode(m_dst->AsLclVarCommon()->GetLclNum(), entry.Type, offs); - m_compiler->lvaSetVarDoNotEnregister(m_dst->AsLclVarCommon()->GetLclNum() - DEBUGARG(DoNotEnregisterReason::LocalField)); - } - else - { - GenTree* addr = grabAddr(entry.Offset); - dst = m_compiler->gtNewIndir(entry.Type, addr); - PropagateIndirFlags(dst, indirFlags); - } - GenTree* src; if (entry.FromLclNum != BAD_VAR_NUM) { @@ -764,6 +737,7 @@ class DecompositionPlan else if (m_src->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { unsigned offs = m_src->AsLclVarCommon()->GetLclOffs() + entry.Offset; + // Local morph ensures we do not see local indirs here that dereference beyond UINT16_MAX. noway_assert(FitsIn(offs)); src = m_compiler->gtNewLclFldNode(m_src->AsLclVarCommon()->GetLclNum(), entry.Type, offs); m_compiler->lvaSetVarDoNotEnregister(m_src->AsLclVarCommon()->GetLclNum() @@ -776,31 +750,37 @@ class DecompositionPlan PropagateIndirFlags(src, indirFlags); } - statements->AddStatement(m_compiler->gtNewAssignNode(dst, src)); + GenTree* store; + if (entry.ToLclNum != BAD_VAR_NUM) + { + store = m_compiler->gtNewStoreLclVarNode(entry.ToLclNum, src); + } + else if (m_store->OperIsLocalStore()) + { + unsigned offs = m_store->AsLclVarCommon()->GetLclOffs() + entry.Offset; + // Local morph ensures we do not see local indirs here that dereference beyond UINT16_MAX. + noway_assert(FitsIn(offs)); + store = m_compiler->gtNewStoreLclFldNode(m_store->AsLclVarCommon()->GetLclNum(), entry.Type, offs, src); + m_compiler->lvaSetVarDoNotEnregister(m_store->AsLclVarCommon()->GetLclNum() + DEBUGARG(DoNotEnregisterReason::LocalField)); + } + else + { + GenTree* addr = grabAddr(entry.Offset); + store = m_compiler->gtNewStoreIndNode(entry.Type, addr, src); + PropagateIndirFlags(store, indirFlags); + } + + statements->AddStatement(store); } if ((remainderStrategy.Type == RemainderStrategy::FullBlock) && !m_srcInvolvesReplacements) { - statements->AddStatement(m_compiler->gtNewAssignNode(m_dst, m_src)); + statements->AddStatement(m_store); } if (remainderStrategy.Type == RemainderStrategy::Primitive) { - GenTree* dst; - if (m_dst->OperIs(GT_LCL_VAR, GT_LCL_FLD)) - { - GenTreeLclVarCommon* dstLcl = m_dst->AsLclVarCommon(); - dst = m_compiler->gtNewLclFldNode(dstLcl->GetLclNum(), remainderStrategy.PrimitiveType, - dstLcl->GetLclOffs() + remainderStrategy.PrimitiveOffset); - m_compiler->lvaSetVarDoNotEnregister(dstLcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); - } - else - { - dst = m_compiler->gtNewIndir(remainderStrategy.PrimitiveType, - grabAddr(remainderStrategy.PrimitiveOffset)); - PropagateIndirFlags(dst, indirFlags); - } - GenTree* src; if (m_src->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { @@ -816,7 +796,22 @@ class DecompositionPlan PropagateIndirFlags(src, indirFlags); } - statements->AddStatement(m_compiler->gtNewAssignNode(dst, src)); + GenTree* store; + if (m_store->OperIsLocalStore()) + { + GenTreeLclVarCommon* dstLcl = m_store->AsLclVarCommon(); + store = m_compiler->gtNewStoreLclFldNode(dstLcl->GetLclNum(), remainderStrategy.PrimitiveType, + dstLcl->GetLclOffs() + remainderStrategy.PrimitiveOffset, src); + m_compiler->lvaSetVarDoNotEnregister(dstLcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); + } + else + { + store = m_compiler->gtNewStoreIndNode(remainderStrategy.PrimitiveType, + grabAddr(remainderStrategy.PrimitiveOffset), src); + PropagateIndirFlags(store, indirFlags); + } + + statements->AddStatement(store); } assert(numAddrUses == 0); @@ -838,7 +833,7 @@ class DecompositionPlan // Check if this entry is dying anyway. assert(m_dstInvolvesReplacements); - AggregateInfo* agg = m_aggregates[m_dst->AsLclVarCommon()->GetLclNum()]; + AggregateInfo* agg = m_aggregates[m_store->AsLclVarCommon()->GetLclNum()]; assert((agg != nullptr) && (agg->Replacements.size() > 0)); Replacement* firstRep = agg->Replacements.data(); assert((entry.ToReplacement >= firstRep) && (entry.ToReplacement < (firstRep + agg->Replacements.size()))); @@ -884,14 +879,14 @@ class DecompositionPlan // If we aren't writing a local here then since the address is not // exposed it cannot change. - if (!m_dst->OperIs(GT_LCL_VAR, GT_LCL_FLD)) + if (!m_store->OperIsLocalStore()) { return true; } // Otherwise it could still be possible that the address is part of // the struct we're writing. - unsigned dstLclNum = m_dst->AsLclVarCommon()->GetLclNum(); + unsigned dstLclNum = m_store->AsLclVarCommon()->GetLclNum(); if (lclNum == dstLclNum) { return false; @@ -973,29 +968,24 @@ void Compiler::gtPeelOffsets(GenTree** addr, target_ssize_t* offset, FieldSeq** } } -//------------------------------------------------------------------------ -// HandleAssignment: -// Handle an assignment that may be between struct locals with replacements. +// HandleStore: +// Handle a store that may be between struct locals with replacements. // // Parameters: -// asg - The assignment -// user - The user of the assignment. +// use - The store's use +// user - The store's user // -void ReplaceVisitor::HandleAssignment(GenTree** use, GenTree* user) +void ReplaceVisitor::HandleStore(GenTree** use, GenTree* user) { - GenTreeOp* asg = (*use)->AsOp(); + GenTree* store = *use; - if (!asg->gtGetOp1()->TypeIs(TYP_STRUCT)) + if (!store->TypeIs(TYP_STRUCT)) { return; } - GenTree* dst = asg->gtGetOp1(); - assert(!dst->OperIs(GT_COMMA)); - - GenTree* src = asg->gtGetOp2()->gtEffectiveVal(); - - GenTreeLclVarCommon* dstLcl = dst->OperIs(GT_LCL_VAR, GT_LCL_FLD) ? dst->AsLclVarCommon() : nullptr; + GenTree* src = store->Data()->gtEffectiveVal(); + GenTreeLclVarCommon* dstLcl = store->OperIsLocalStore() ? store->AsLclVarCommon() : nullptr; GenTreeLclVarCommon* srcLcl = src->OperIs(GT_LCL_VAR, GT_LCL_FLD) ? src->AsLclVarCommon() : nullptr; Replacement* dstFirstRep = nullptr; @@ -1012,14 +1002,14 @@ void ReplaceVisitor::HandleAssignment(GenTree** use, GenTree* user) return; } - JITDUMP("Processing block operation [%06u] that involves replacements\n", Compiler::dspTreeID(asg)); + JITDUMP("Processing block operation [%06u] that involves replacements\n", Compiler::dspTreeID(store)); if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_BLK) || src->IsConstInitVal()) { DecompositionStatementList result; - EliminateCommasInBlockOp(asg, &result); + EliminateCommasInBlockOp(store, &result); - DecompositionPlan plan(m_compiler, m_aggregates, m_liveness, dst, src, dstInvolvesReplacements, + DecompositionPlan plan(m_compiler, m_aggregates, m_liveness, store, src, dstInvolvesReplacements, srcInvolvesReplacements); if (dstInvolvesReplacements) @@ -1107,11 +1097,11 @@ void ReplaceVisitor::HandleAssignment(GenTree** use, GenTree* user) if (src->IsConstInitVal()) { - InitFields(dst->AsLclVarCommon(), dstFirstRep, dstEndRep, &plan); + InitFields(store->AsLclVarCommon(), dstFirstRep, dstEndRep, &plan); } else { - CopyBetweenFields(dst, dstFirstRep, dstEndRep, src, srcFirstRep, srcEndRep, &result, &plan); + CopyBetweenFields(store, dstFirstRep, dstEndRep, src, srcFirstRep, srcEndRep, &result, &plan); } plan.Finalize(&result); @@ -1121,18 +1111,18 @@ void ReplaceVisitor::HandleAssignment(GenTree** use, GenTree* user) } else { - if (asg->gtGetOp2()->OperIs(GT_LCL_VAR, GT_LCL_FLD)) + if (store->Data()->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { - GenTreeLclVarCommon* rhsLcl = asg->gtGetOp2()->AsLclVarCommon(); - unsigned size = rhsLcl->GetLayout(m_compiler)->GetSize(); - WriteBackBefore(&asg->gtOp2, rhsLcl->GetLclNum(), rhsLcl->GetLclOffs(), size); + GenTreeLclVarCommon* srcLcl = store->Data()->AsLclVarCommon(); + unsigned size = srcLcl->GetLayout(m_compiler)->GetSize(); + WriteBackBefore(&store->Data(), srcLcl->GetLclNum(), srcLcl->GetLclOffs(), size); } - if (asg->gtGetOp1()->OperIs(GT_LCL_VAR, GT_LCL_FLD)) + if (store->OperIsLocalStore()) { - GenTreeLclVarCommon* lhsLcl = asg->gtGetOp1()->AsLclVarCommon(); - unsigned size = lhsLcl->GetLayout(m_compiler)->GetSize(); - MarkForReadBack(lhsLcl->GetLclNum(), lhsLcl->GetLclOffs(), size); + GenTreeLclVarCommon* lclStore = store->AsLclVarCommon(); + unsigned size = lclStore->GetLayout(m_compiler)->GetSize(); + MarkForReadBack(lclStore->GetLclNum(), lclStore->GetLclOffs(), size); } } } @@ -1169,61 +1159,57 @@ bool ReplaceVisitor::OverlappingReplacements(GenTreeLclVarCommon* lcl, // Ensure that the sources of a block op are not commas by extracting side effects. // // Parameters: -// asg - The block op -// result - Statement list to add resulting statements to. +// store - The block op +// result - Statement list to add resulting statements to. // // Remarks: // Works similarly to MorphInitBlockHelper::EliminateCommas. // -void ReplaceVisitor::EliminateCommasInBlockOp(GenTreeOp* asg, DecompositionStatementList* result) +void ReplaceVisitor::EliminateCommasInBlockOp(GenTree* store, DecompositionStatementList* result) { bool any = false; - GenTree* lhs = asg->gtGetOp1(); - assert(lhs->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_IND, GT_BLK)); - - GenTree* rhs = asg->gtGetOp2(); + GenTree* src = store->Data(); - if (asg->IsReverseOp()) + if (store->IsReverseOp()) { - while (rhs->OperIs(GT_COMMA)) + while (src->OperIs(GT_COMMA)) { - result->AddStatement(rhs->gtGetOp1()); - rhs = rhs->gtGetOp2(); + result->AddStatement(src->gtGetOp1()); + src = src->gtGetOp2(); any = true; } } else { - if (lhs->OperIsIndir() && rhs->OperIs(GT_COMMA)) + if (store->OperIsIndir() && src->OperIs(GT_COMMA)) { - GenTree* addr = lhs->gtGetOp1(); - // Note that GTF_GLOB_REF is not up to date here, hence we need - // a tree walk to find address exposed locals. - if (((addr->gtFlags & GTF_ALL_EFFECT) != 0) || (((rhs->gtFlags & GTF_ASG) != 0) && !addr->IsInvariant()) || + GenTree* addr = store->gtGetOp1(); + // Note that GTF_GLOB_REF is not up to date here, hence we need a tree walk to find address exposed locals. + if (((addr->gtFlags & GTF_ALL_EFFECT) != 0) || (((src->gtFlags & GTF_ASG) != 0) && !addr->IsInvariant()) || m_compiler->gtHasAddressExposedLocals(addr)) { - unsigned lhsAddrLclNum = m_compiler->lvaGrabTemp(true DEBUGARG("Block morph LHS addr")); + unsigned dstAddrLclNum = m_compiler->lvaGrabTemp(true DEBUGARG("Block morph store addr")); - result->AddStatement(m_compiler->gtNewTempAssign(lhsAddrLclNum, addr)); - lhs->AsUnOp()->gtOp1 = m_compiler->gtNewLclvNode(lhsAddrLclNum, genActualType(addr)); - m_compiler->gtUpdateNodeSideEffects(lhs); + result->AddStatement(m_compiler->gtNewTempStore(dstAddrLclNum, addr)); + store->AsIndir()->Addr() = m_compiler->gtNewLclvNode(dstAddrLclNum, genActualType(addr)); + m_compiler->gtUpdateNodeSideEffects(store); m_madeChanges = true; any = true; } } - while (rhs->OperIs(GT_COMMA)) + while (src->OperIs(GT_COMMA)) { - result->AddStatement(rhs->gtGetOp1()); - rhs = rhs->gtGetOp2(); + result->AddStatement(src->gtGetOp1()); + src = src->gtGetOp2(); any = true; } } if (any) { - asg->gtOp2 = rhs; - m_compiler->gtUpdateNodeSideEffects(asg); + store->Data() = src; + m_compiler->gtUpdateNodeSideEffects(store); m_madeChanges = true; } } @@ -1234,12 +1220,12 @@ void ReplaceVisitor::EliminateCommasInBlockOp(GenTreeOp* asg, DecompositionState // directly inited, and mark the other ones as requiring read back. // // Parameters: -// dst - Destination local that involves replacement. +// dstStore - Store into the destination local that involves replacement. // firstRep - The first replacement. // endRep - End of the replacements. // plan - Decomposition plan to add initialization entries into. // -void ReplaceVisitor::InitFields(GenTreeLclVarCommon* dst, +void ReplaceVisitor::InitFields(GenTreeLclVarCommon* dstStore, Replacement* firstRep, Replacement* endRep, DecompositionPlan* plan) @@ -1259,7 +1245,7 @@ void ReplaceVisitor::InitFields(GenTreeLclVarCommon* dst, } JITDUMP(" Init V%02u (%s)\n", rep->LclNum, rep->Description); - plan->InitReplacement(rep, rep->Offset - dst->GetLclOffs()); + plan->InitReplacement(rep, rep->Offset - dstStore->GetLclOffs()); } } @@ -1299,7 +1285,7 @@ const char* ReplaceVisitor::LastUseString(GenTreeLclVarCommon* lcl, Replacement* // Copy between two struct locals that may involve replacements. // // Parameters: -// dst - Destination node +// store - Store node // dstFirstRep - First replacement of the destination or nullptr if destination is not a promoted local. // dstEndRep - One past last replacement of the destination. // src - Source node @@ -1308,7 +1294,7 @@ const char* ReplaceVisitor::LastUseString(GenTreeLclVarCommon* lcl, Replacement* // statements - Statement list to add potential "init" statements to. // plan - Data structure that tracks the specific copies to be done. // -void ReplaceVisitor::CopyBetweenFields(GenTree* dst, +void ReplaceVisitor::CopyBetweenFields(GenTree* store, Replacement* dstFirstRep, Replacement* dstEndRep, GenTree* src, @@ -1319,7 +1305,7 @@ void ReplaceVisitor::CopyBetweenFields(GenTree* dst, { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_BLK)); - GenTreeLclVarCommon* dstLcl = dst->OperIs(GT_LCL_VAR, GT_LCL_FLD) ? dst->AsLclVarCommon() : nullptr; + GenTreeLclVarCommon* dstLcl = store->OperIsLocalStore() ? store->AsLclVarCommon() : nullptr; GenTreeLclVarCommon* srcLcl = src->OperIs(GT_LCL_VAR, GT_LCL_FLD) ? src->AsLclVarCommon() : nullptr; unsigned dstBaseOffs = dstLcl != nullptr ? dstLcl->GetLclOffs() : 0; unsigned srcBaseOffs = srcLcl != nullptr ? srcLcl->GetLclOffs() : 0; diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp index 7dc6ca1ecd74b..1d10709ec8580 100644 --- a/src/coreclr/jit/rangecheck.cpp +++ b/src/coreclr/jit/rangecheck.cpp @@ -487,8 +487,8 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon // If the rhs expr is local, then try to find the def of the local. else if (expr->IsLocal()) { - LclSsaVarDsc* ssaDef = GetSsaDefAsg(expr->AsLclVarCommon()); - return (ssaDef != nullptr) && IsMonotonicallyIncreasing(ssaDef->GetAssignment()->Data(), rejectNegativeConst); + LclSsaVarDsc* ssaDef = GetSsaDefStore(expr->AsLclVarCommon()); + return (ssaDef != nullptr) && IsMonotonicallyIncreasing(ssaDef->GetDefNode()->Data(), rejectNegativeConst); } else if (expr->OperIs(GT_ADD, GT_MUL, GT_LSH)) { @@ -520,7 +520,7 @@ bool RangeCheck::IsMonotonicallyIncreasing(GenTree* expr, bool rejectNegativeCon } // Given a lclvar use, try to find the lclvar's defining assignment and its containing block. -LclSsaVarDsc* RangeCheck::GetSsaDefAsg(GenTreeLclVarCommon* lclUse) +LclSsaVarDsc* RangeCheck::GetSsaDefStore(GenTreeLclVarCommon* lclUse) { unsigned ssaNum = lclUse->GetSsaNum(); @@ -534,7 +534,7 @@ LclSsaVarDsc* RangeCheck::GetSsaDefAsg(GenTreeLclVarCommon* lclUse) LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(ssaNum); // RangeCheck does not care about uninitialized variables. - if (ssaDef->GetAssignment() == nullptr) + if (ssaDef->GetDefNode() == nullptr) { // Parameters are expected to be defined in fgFirstBB if FIRST_SSA_NUM is set if (varDsc->lvIsParam && (ssaNum == SsaConfig::FIRST_SSA_NUM)) @@ -547,7 +547,7 @@ LclSsaVarDsc* RangeCheck::GetSsaDefAsg(GenTreeLclVarCommon* lclUse) // RangeCheck does not understand definitions generated by LCL_FLD nodes // nor definitions generated by indirect stores to local variables, nor // stores through parent structs. - GenTreeLclVarCommon* defStore = ssaDef->GetAssignment(); + GenTreeLclVarCommon* defStore = ssaDef->GetDefNode(); if (!defStore->OperIs(GT_STORE_LCL_VAR) || !defStore->HasSsaName()) { return nullptr; @@ -1129,7 +1129,7 @@ Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, GenTreeLclVarCommon* lcl, bool monIncreasing DEBUGARG(int indent)) { - LclSsaVarDsc* ssaDef = GetSsaDefAsg(lcl); + LclSsaVarDsc* ssaDef = GetSsaDefStore(lcl); if (ssaDef == nullptr) { return Range(Limit(Limit::keUnknown)); @@ -1138,18 +1138,18 @@ Range RangeCheck::ComputeRangeForLocalDef(BasicBlock* block, if (m_pCompiler->verbose) { JITDUMP("----------------------------------------------------\n"); - m_pCompiler->gtDispTree(ssaDef->GetAssignment()); + m_pCompiler->gtDispTree(ssaDef->GetDefNode()); JITDUMP("----------------------------------------------------\n"); } #endif - Range range = GetRange(ssaDef->GetBlock(), ssaDef->GetAssignment()->Data(), monIncreasing DEBUGARG(indent)); + Range range = GetRange(ssaDef->GetBlock(), ssaDef->GetDefNode()->Data(), monIncreasing DEBUGARG(indent)); if (!BitVecOps::MayBeUninit(block->bbAssertionIn) && (m_pCompiler->GetAssertionCount() > 0)) { JITDUMP("Merge assertions from " FMT_BB ": ", block->bbNum); Compiler::optDumpAssertionIndices(block->bbAssertionIn, " "); - JITDUMP("for definition [%06d]\n", Compiler::dspTreeID(ssaDef->GetAssignment())) + JITDUMP("for definition [%06d]\n", Compiler::dspTreeID(ssaDef->GetDefNode())) - MergeEdgeAssertions(ssaDef->GetAssignment(), block->bbAssertionIn, &range); + MergeEdgeAssertions(ssaDef->GetDefNode(), block->bbAssertionIn, &range); JITDUMP("done merging\n"); } return range; @@ -1284,7 +1284,7 @@ bool RangeCheck::DoesBinOpOverflow(BasicBlock* block, GenTreeOp* binop) // Check if the var definition the rhs involves arithmetic that overflows. bool RangeCheck::DoesVarDefOverflow(GenTreeLclVarCommon* lcl) { - LclSsaVarDsc* ssaDef = GetSsaDefAsg(lcl); + LclSsaVarDsc* ssaDef = GetSsaDefStore(lcl); if (ssaDef == nullptr) { if ((lcl->GetSsaNum() == SsaConfig::FIRST_SSA_NUM) && m_pCompiler->lvaIsParameter(lcl->GetLclNum())) @@ -1294,7 +1294,7 @@ bool RangeCheck::DoesVarDefOverflow(GenTreeLclVarCommon* lcl) } return true; } - return DoesOverflow(ssaDef->GetBlock(), ssaDef->GetAssignment()->Data()); + return DoesOverflow(ssaDef->GetBlock(), ssaDef->GetDefNode()->Data()); } bool RangeCheck::DoesPhiOverflow(BasicBlock* block, GenTree* expr) diff --git a/src/coreclr/jit/rangecheck.h b/src/coreclr/jit/rangecheck.h index 1891c0f6fc268..7af435919cbcc 100644 --- a/src/coreclr/jit/rangecheck.h +++ b/src/coreclr/jit/rangecheck.h @@ -689,8 +689,8 @@ class RangeCheck bool IsOverBudget(); private: - // Given a lclvar use, try to find the lclvar's defining assignment and its containing block. - LclSsaVarDsc* GetSsaDefAsg(GenTreeLclVarCommon* lclUse); + // Given a lclvar use, try to find the lclvar's defining store and its containing block. + LclSsaVarDsc* GetSsaDefStore(GenTreeLclVarCommon* lclUse); GenTreeBoundsChk* m_pCurBndsChk; diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index de0dd3de6b9ba..a2dbcdb5e999c 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -1638,21 +1638,18 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Here's a walkthrough of how this operates. Given a block like // // STMT00388 (IL 0x30D... ???) -// * ASG ref -// +--* LCL_VAR ref V121 tmp97 d:1 +// * STORE_LCL_VAR ref V121 tmp97 d:1 // \--* IND ref // \--* LCL_VAR byref V116 tmp92 u:1 (last use) Zero Fseq[m_task] $18c // // STMT00390 (IL 0x30D... ???) -// * ASG bool -// +--* LCL_VAR int V123 tmp99 d:1 +// * STORE_LCL_VAR int V123 tmp99 d:1 // \--* NE int // +--* LCL_VAR ref V121 tmp97 u:1 // \--* CNS_INT ref null $VN.Null // // STMT00391 -// * ASG ref $133 -// +--* LCL_VAR ref V124 tmp100 d:1 $133 +// * STORE_LCL_VAR ref V124 tmp100 d:1 // \--* IND ref $133 // \--* CNS_INT(h) long 0x31BD3020 [ICON_STR_HDL] $34f // @@ -1662,41 +1659,39 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // +--* LCL_VAR int V123 tmp99 u:1 (last use) // \--* CNS_INT int 0 $40 // -// We will first consider STMT00391. It is a local assign but the RHS value number +// We will first consider STMT00391. It is a local store but the value's VN // isn't related to $8ff. So we continue searching and add V124 to the array // of defined locals. // -// Next we consider STMT00390. It is a local assign and the RHS value number is -// the same, $8ff. So this compare is a fwd-sub candidate. We check if any local -// on the RHS is in the defined locals array. The answer is no. So the RHS tree -// can be safely forwarded in place of the compare in STMT00392. We check if V123 is -// live out of the block. The answer is no. So This RHS tree becomes the candidate tree. -// We add V123 to the array of defined locals and keep searching. +// Next we consider STMT00390. It is a local store and the value's VN is the +// same, $8ff. So this compare is a fwd-sub candidate. We check if any local +// in the value tree in the defined locals array. The answer is no. So the +// value tree can be safely forwarded in place of the compare in STMT00392. +// We check if V123 is live out of the block. The answer is no. So this value +// tree becomes the candidate tree. We add V123 to the array of defined locals +// and keep searching. // -// Next we consider STMT00388, It is a local assign but the RHS value number -// isn't related to $8ff. So we continue searching and add V121 to the array -// of defined locals. +// Next we consider STMT00388, It is a local store but the value's VN isn't +// related to $8ff. So we continue searching and add V121 to the array of +// defined locals. // // We reach the end of the block and stop searching. // // Since we found a viable candidate, we clone it and substitute into the jump: // // STMT00388 (IL 0x30D... ???) -// * ASG ref -// +--* LCL_VAR ref V121 tmp97 d:1 +// * STORE_LCL_VAR ref V121 tmp97 d:1 // \--* IND ref // \--* LCL_VAR byref V116 tmp92 u:1 (last use) Zero Fseq[m_task] $18c // // STMT00390 (IL 0x30D... ???) -// * ASG bool -// +--* LCL_VAR int V123 tmp99 d:1 +// * STORE_LCL_VAR int V123 tmp99 d:1 // \--* NE int // +--* LCL_VAR ref V121 tmp97 u:1 // \--* CNS_INT ref null $VN.Null // // STMT00391 -// * ASG ref $133 -// +--* LCL_VAR ref V124 tmp100 d:1 $133 +// * STORE_LCL_VAR ref V124 tmp100 d:1 // \--* IND ref $133 // \--* CNS_INT(h) long 0x31BD3020 [ICON_STR_HDL] $34f // @@ -1811,7 +1806,7 @@ bool Compiler::optRedundantRelop(BasicBlock* const block) break; } - // We are looking for ASG(lcl, ...) + // We are looking for STORE_LCL_VAR(...) // GenTree* const prevTree = prevStmt->GetRootNode(); diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp index 1d66784883aad..97b7d06e51ebd 100644 --- a/src/coreclr/jit/simd.cpp +++ b/src/coreclr/jit/simd.cpp @@ -466,7 +466,7 @@ GenTree* Compiler::impSIMDPopStack() // void Compiler::setLclRelatedToSIMDIntrinsic(GenTree* tree) { - assert(tree->OperIs(GT_LCL_VAR) || tree->IsLclVarAddr()); + assert(tree->OperIsScalarLocal() || tree->IsLclVarAddr()); LclVarDsc* lclVarDsc = lvaGetDesc(tree->AsLclVarCommon()); lclVarDsc->lvUsedInSIMDIntrinsic = true; } @@ -539,7 +539,7 @@ bool areFieldAddressesTheSame(GenTreeFieldAddr* op1, GenTreeFieldAddr* op2) // bool Compiler::areFieldsContiguous(GenTreeIndir* op1, GenTreeIndir* op2) { - assert(op1->OperIs(GT_IND) && op2->OperIs(GT_IND)); + assert(op1->isIndir() && op2->isIndir()); // TODO-1stClassStructs: delete once IND nodes are no more. assert(!op1->TypeIs(TYP_STRUCT) && !op2->TypeIs(TYP_STRUCT)); @@ -591,7 +591,7 @@ bool Compiler::areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* sec // bool Compiler::areArrayElementsContiguous(GenTree* op1, GenTree* op2) { - assert(op1->OperIs(GT_IND) && op2->OperIs(GT_IND)); + assert(op1->isIndir() && op2->isIndir()); assert(!op1->TypeIs(TYP_STRUCT) && (op1->TypeGet() == op2->TypeGet())); GenTreeIndexAddr* op1IndexAddr = op1->AsIndir()->Addr()->AsIndexAddr(); @@ -647,7 +647,7 @@ bool Compiler::areArgumentsContiguous(GenTree* op1, GenTree* op2) assert(!op1->TypeIs(TYP_STRUCT)); - if (op1->OperIs(GT_IND) && op2->OperIs(GT_IND)) + if (op1->isIndir() && op2->isIndir()) { GenTree* op1Addr = op1->AsIndir()->Addr(); GenTree* op2Addr = op2->AsIndir()->Addr(); @@ -682,12 +682,12 @@ bool Compiler::areArgumentsContiguous(GenTree* op1, GenTree* op2) // return the address node. // // TODO-CQ: -// Currently just supports GT_IND(GT_INDEX_ADDR / GT_FIELD_ADDR), because we can only verify those nodes -// are located contiguously or not. In future we should support more cases. +// Currently just supports GT_IND/GT_STOREIND(GT_INDEX_ADDR / GT_FIELD_ADDR), because we can only verify +// those nodes are located contiguously or not. In future we should support more cases. // GenTree* Compiler::CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_types simdBaseType, unsigned simdSize) { - assert(tree->OperIs(GT_IND)); + assert(tree->isIndir()); GenTree* addr = tree->AsIndir()->Addr(); if (addr->OperIs(GT_FIELD_ADDR)) @@ -730,46 +730,45 @@ GenTree* Compiler::CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_ } //------------------------------------------------------------------------------- -// impMarkContiguousSIMDFieldAssignments: Try to identify if there are contiguous +// impMarkContiguousSIMDFieldStores: Try to identify if there are contiguous // assignments from SIMD field to memory. If there are, then mark the related // lclvar so that it won't be promoted. // // Arguments: // stmt - GenTree*. Input statement node. // -void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt) +void Compiler::impMarkContiguousSIMDFieldStores(Statement* stmt) { if (opts.OptimizationDisabled()) { return; } GenTree* expr = stmt->GetRootNode(); - if (expr->OperGet() == GT_ASG && expr->TypeGet() == TYP_FLOAT) + if (expr->OperIsStore() && expr->TypeIs(TYP_FLOAT)) { - GenTree* curDst = expr->AsOp()->gtOp1; - GenTree* curSrc = expr->AsOp()->gtOp2; + GenTree* curValue = expr->Data(); unsigned index = 0; - var_types simdBaseType = curSrc->TypeGet(); + var_types simdBaseType = curValue->TypeGet(); unsigned simdSize = 0; - GenTree* srcSimdLclAddr = getSIMDStructFromField(curSrc, &index, &simdSize, true); + GenTree* srcSimdLclAddr = getSIMDStructFromField(curValue, &index, &simdSize, true); if (srcSimdLclAddr == nullptr || simdBaseType != TYP_FLOAT) { - fgPreviousCandidateSIMDFieldAsgStmt = nullptr; + fgPreviousCandidateSIMDFieldStoreStmt = nullptr; } else if (index == 0) { - fgPreviousCandidateSIMDFieldAsgStmt = stmt; + fgPreviousCandidateSIMDFieldStoreStmt = stmt; } - else if (fgPreviousCandidateSIMDFieldAsgStmt != nullptr) + else if (fgPreviousCandidateSIMDFieldStoreStmt != nullptr) { assert(index > 0); - GenTree* prevAsgExpr = fgPreviousCandidateSIMDFieldAsgStmt->GetRootNode(); - GenTree* prevDst = prevAsgExpr->AsOp()->gtOp1; - GenTree* prevSrc = prevAsgExpr->AsOp()->gtOp2; - if (!areArgumentsContiguous(prevDst, curDst) || !areArgumentsContiguous(prevSrc, curSrc)) + GenTree* curStore = expr; + GenTree* prevStore = fgPreviousCandidateSIMDFieldStoreStmt->GetRootNode(); + GenTree* prevValue = prevStore->Data(); + if (!areArgumentsContiguous(prevStore, curStore) || !areArgumentsContiguous(prevValue, curValue)) { - fgPreviousCandidateSIMDFieldAsgStmt = nullptr; + fgPreviousCandidateSIMDFieldStoreStmt = nullptr; } else { @@ -778,9 +777,9 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt) // Successfully found the pattern, mark the lclvar as UsedInSIMDIntrinsic setLclRelatedToSIMDIntrinsic(srcSimdLclAddr); - if (curDst->OperIs(GT_IND) && curDst->AsIndir()->Addr()->OperIs(GT_FIELD_ADDR)) + if (curStore->OperIs(GT_STOREIND) && curStore->AsIndir()->Addr()->OperIs(GT_FIELD_ADDR)) { - GenTreeFieldAddr* addr = curDst->AsIndir()->Addr()->AsFieldAddr(); + GenTreeFieldAddr* addr = curStore->AsIndir()->Addr()->AsFieldAddr(); if (addr->IsInstance()) { GenTree* objRef = addr->GetFldObj(); @@ -793,14 +792,14 @@ void Compiler::impMarkContiguousSIMDFieldAssignments(Statement* stmt) } else { - fgPreviousCandidateSIMDFieldAsgStmt = stmt; + fgPreviousCandidateSIMDFieldStoreStmt = stmt; } } } } else { - fgPreviousCandidateSIMDFieldAsgStmt = nullptr; + fgPreviousCandidateSIMDFieldStoreStmt = nullptr; } } #endif // FEATURE_SIMD diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index 931891bf4de0e..0cb64b37428e3 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -2277,8 +2277,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, if (copyBlkDst != nullptr) { assert(copyBlkSrc != nullptr); - GenTree* dest = gtNewLoadValueNode(simdType, copyBlkDst); - GenTree* retNode = gtNewAssignNode(dest, copyBlkSrc); + GenTree* retNode = gtNewStoreValueNode(simdType, copyBlkDst, copyBlkSrc); return retNode; } diff --git a/src/coreclr/jit/ssabuilder.h b/src/coreclr/jit/ssabuilder.h index 13f1f49339523..325903dc3e6f0 100644 --- a/src/coreclr/jit/ssabuilder.h +++ b/src/coreclr/jit/ssabuilder.h @@ -27,12 +27,10 @@ class SsaBuilder // Requires stmt nodes to be already sequenced in evaluation order. Analyzes the graph // for introduction of phi-nodes as GT_PHI tree nodes at the beginning of each block. // Each GT_LCL_VAR is given its ssa number through its GetSsaNum() field in the node. - // Each GT_PHI node will be under a GT_ASG node with the LHS set to the local node and - // the RHS to the GT_PHI itself. The inputs to the PHI are represented as a linked list - // of GT_PHI_ARG nodes. Each use or def is denoted by the corresponding GT_LCL_VAR - // tree. For example, to get all uses of a particular variable fully defined by its - // lclNum and ssaNum, one would use m_uses and look up all the uses. Similarly, a single - // def of an SSA variable can be looked up similarly using m_defs member. + // Each GT_PHI node will be under a STORE_LCL_VAR node as the store's value operand. + // The inputs to the PHI are represented as a linked list of GT_PHI_ARG nodes. Each + // use or def is denoted by the corresponding local nodes. All defs of a particular + // variable are stored in the "per SSA data" on the local descriptor. void Build(); private: @@ -68,15 +66,14 @@ class SsaBuilder // Requires "postOrder" to hold the blocks of the flowgraph in topologically sorted order. Requires // count to be the valid entries in the "postOrder" array. Inserts GT_PHI nodes at the beginning - // of basic blocks that require them like so: - // GT_ASG(GT_LCL_VAR, GT_PHI(GT_PHI_ARG(ssaNum, Block*), GT_PHI_ARG(ssaNum, Block*), ...)); + // of basic blocks that require them. void InsertPhiFunctions(BasicBlock** postOrder, int count); // Rename all definitions and uses within the compiled method. void RenameVariables(); // Rename all definitions and uses within a block. void BlockRenameVariables(BasicBlock* block); - // Rename a local or memory definition generated by a GT_ASG/GT_CALL node. + // Rename a local or memory definition generated by a local store/GT_CALL node. void RenameDef(GenTree* defNode, BasicBlock* block); unsigned RenamePushDef(GenTree* defNode, BasicBlock* block, unsigned lclNum, bool isFullDef); // Rename a use of a local variable. diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp index c9123c9b48a35..4b6aa7953981b 100644 --- a/src/coreclr/jit/valuenum.cpp +++ b/src/coreclr/jit/valuenum.cpp @@ -5522,7 +5522,7 @@ void Compiler::fgValueNumberArrayElemLoad(GenTree* loadTree, VNFuncApp* addrFunc // to an array element. // // Arguments: -// storeNode - The ASG node performing the store +// storeNode - The store node // addrFunc - The "VNF_PtrToArrElem" function representing the address // storeSize - The number of bytes being stored // value - (VN of) the value being stored @@ -5640,7 +5640,7 @@ void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, Fiel // a class/static field. // // Arguments: -// storeNode - The ASG node performing the store +// storeNode - The store node // baseAddr - The "base address" of the field (see "GenTree::IsFieldAddr") // fieldSeq - The field sequence representing the address // offset - The offset, relative to the field, of the target location