diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index 3bf17cee35156..f5f0712b55651 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -3285,7 +3285,7 @@ void CodeGen::genCall(GenTreeCall* call) // If there is nothing next, that means the result is thrown away, so this value is not live. // However, for minopts or debuggable code, we keep it live to support managed return value debugging. - if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode) + if ((call->gtNext == nullptr) && !compiler->opts.OptimizationDisabled() && !compiler->opts.compDbgCode) { gcInfo.gcMarkRegSetNpt(RBM_INTRET); } diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index c24f85a7dda5d..de2c1a642a105 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -1715,17 +1715,22 @@ void CodeGen::genGenerateMachineCode() printf("; Emitting "); - if (compiler->compCodeOpt() == Compiler::SMALL_CODE) + switch (compiler->opts.OptLevel()) { - printf("SMALL_CODE"); - } - else if (compiler->compCodeOpt() == Compiler::FAST_CODE) - { - printf("FAST_CODE"); - } - else - { - printf("BLENDED_CODE"); + case Compiler::OPT_MinOpts: + printf("MinOpts code"); + break; + case Compiler::OPT_Quick: + printf("quick and small code"); + break; + case Compiler::OPT_Blended: + printf("blended code"); + break; + case Compiler::OPT_Speed: + printf("fast code"); + break; + default: + unreached(); } printf(" for "); @@ -1799,11 +1804,7 @@ void CodeGen::genGenerateMachineCode() printf("; OSR variant for entry point 0x%x\n", compiler->info.compILEntry); } - if ((compiler->opts.compFlags & CLFLG_MAXOPT) == CLFLG_MAXOPT) - { - printf("; optimized code\n"); - } - else if (compiler->opts.compDbgEnC) + if (compiler->opts.compDbgEnC) { printf("; EnC code\n"); } @@ -1811,14 +1812,6 @@ void CodeGen::genGenerateMachineCode() { printf("; debuggable code\n"); } - else if (compiler->opts.MinOpts()) - { - printf("; MinOpts code\n"); - } - else - { - printf("; unknown optimization flags\n"); - } if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { @@ -1880,12 +1873,12 @@ void CodeGen::genGenerateMachineCode() // layout. This helps us generate smaller code, and allocate, after code generation, a smaller amount of // memory from the VM. - genFinalizeFrame(); + genFinalizeFrame(); // GetEmitter()->emitBegFN(isFramePointerUsed() #if defined(DEBUG) , - (compiler->compCodeOpt() != Compiler::SMALL_CODE) && + !compiler->opts.OptLevelIs(Compiler::OPT_Quick) && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) #endif ); @@ -5312,7 +5305,7 @@ void CodeGen::genFinalizeFrame() regMaskTP maskPushRegsInt = maskCalleeRegsPushed & ~maskPushRegsFloat; if ((maskPushRegsFloat != RBM_NONE) || - (compiler->opts.MinOpts() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD))) + (compiler->opts.OptimizationDisabled() && (regSet.rsMaskResvd & maskCalleeRegsPushed & RBM_OPT_RSVD))) { // Here we try to keep stack double-aligned before the vpush if ((genCountBits(regSet.rsMaskPreSpillRegs(true) | maskPushRegsInt) % 2) != 0) diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index e17dd3d743377..233134de2148d 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -7084,7 +7084,7 @@ void CodeGen::genCall(GenTreeCall* call) // If there is nothing next, that means the result is thrown away, so this value is not live. // However, for minopts or debuggable code, we keep it live to support managed return value debugging. - if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode) + if ((call->gtNext == nullptr) && !compiler->opts.OptimizationDisabled() && !compiler->opts.compDbgCode) { gcInfo.gcMarkRegSetNpt(RBM_INTRET); } diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 6780ab666212a..6ff2560ccd59d 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -9698,7 +9698,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs; gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs; - noway_assert(!compiler->opts.MinOpts() || isFramePointerUsed()); // FPO not allowed with minOpts + noway_assert(!compiler->opts.OptimizationDisabled() || isFramePointerUsed()); // FPO not allowed with minOpts #ifdef DEBUG genInterruptibleUsed = true; diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index bed028e7c2d00..89593e6ce70dd 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1914,10 +1914,8 @@ void Compiler::compInit(ArenaAllocator* pAlloc, compRationalIRForm = false; #ifdef DEBUG - compCodeGenDone = false; - opts.compMinOptsIsUsed = false; + compCodeGenDone = false; #endif - opts.compMinOptsIsSet = false; // Used by fgFindJumpTargets for inlining heuristics. opts.instrCount = 0; @@ -2346,7 +2344,7 @@ bool Compiler::compShouldThrowOnNoway( // In min opts, we don't want the noway assert to go through the exception // path. Instead we want it to just silently go through codegen for // compat reasons. - return !opts.MinOpts(); + return !opts.OptimizationDisabled(); } // ConfigInteger does not offer an option for decimal flags. Any numbers are interpreted as hex. @@ -2391,46 +2389,39 @@ void Compiler::compInitOptions(JitFlags* jitFlags) } opts.jitFlags = jitFlags; - opts.compFlags = CLFLG_MAXOPT; // Default value is for full optimization + opts.compFlags = CLFLG_MINOPT; - if (jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE) || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) || - jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) + //------------------------------------------------------------------------- + + opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); + opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); + opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); + + if (opts.compDbgCode || jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT)) { - opts.compFlags = CLFLG_MINOPT; + // MinOpts level in case of explicit miopts mode or debug-friendly codegen request + opts.compOptLevel = OPT_MinOpts; } - // Don't optimize .cctors (except prejit) or if we're an inlinee else if (!jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && ((info.compFlags & FLG_CCTOR) == FLG_CCTOR) && !compIsForInlining()) { - opts.compFlags = CLFLG_MINOPT; + // Don't waste time on static cctors in JIT mode + opts.compOptLevel = OPT_MinOpts; } - - // Default value is to generate a blend of size and speed optimizations - // - opts.compCodeOpt = BLENDED_CODE; - - // If the EE sets SIZE_OPT or if we are compiling a Class constructor - // we will optimize for code size at the expense of speed - // - if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) + else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT) || jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { - opts.compCodeOpt = SMALL_CODE; + opts.compOptLevel = OPT_Quick; } - // - // If the EE sets SPEED_OPT we will optimize for speed at the expense of code size - // - else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT) || - (jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1) && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))) + else if (jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT)) { - opts.compCodeOpt = FAST_CODE; - assert(!jitFlags->IsSet(JitFlags::JIT_FLAG_SIZE_OPT)); + opts.compOptLevel = OPT_Speed; + opts.compFlags = CLFLG_MAXOPT; + } + else + { + opts.compOptLevel = OPT_Blended; + opts.compFlags = CLFLG_MAXOPT; } - - //------------------------------------------------------------------------- - - opts.compDbgCode = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_CODE); - opts.compDbgInfo = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_INFO); - opts.compDbgEnC = jitFlags->IsSet(JitFlags::JIT_FLAG_DEBUG_EnC); #ifdef DEBUG opts.compJitAlignLoopAdaptive = JitConfig.JitAlignLoopAdaptive() == 1; @@ -3283,11 +3274,23 @@ void Compiler::compInitOptions(JitFlags* jitFlags) printf("OPTIONS: OSR variant with entry point 0x%x\n", info.compILEntry); } - printf("OPTIONS: compCodeOpt = %s\n", - (opts.compCodeOpt == BLENDED_CODE) - ? "BLENDED_CODE" - : (opts.compCodeOpt == SMALL_CODE) ? "SMALL_CODE" - : (opts.compCodeOpt == FAST_CODE) ? "FAST_CODE" : "UNKNOWN_CODE"); + switch (opts.OptLevel()) + { + case OPT_MinOpts: + printf("OPTIONS: OptLevel() = MinOpts\n"); + break; + case OPT_Quick: + printf("OPTIONS: OptLevel() = Quick\n"); + break; + case OPT_Blended: + printf("OPTIONS: OptLevel() = Blended\n"); + break; + case OPT_Speed: + printf("OPTIONS: OptLevel() = Speed\n"); + break; + default: + unreached(); + } printf("OPTIONS: compDbgCode = %s\n", dspBool(opts.compDbgCode)); printf("OPTIONS: compDbgInfo = %s\n", dspBool(opts.compDbgInfo)); @@ -3640,18 +3643,12 @@ void Compiler::compSetOptimizationLevel() if (compIsForInlining()) { - theMinOptsValue = impInlineInfo->InlinerCompiler->opts.MinOpts(); + theMinOptsValue = impInlineInfo->InlinerCompiler->opts.OptimizationDisabled(); goto _SetMinOpts; } theMinOptsValue = false; - if (opts.compFlags == CLFLG_MINOPT) - { - JITLOG((LL_INFO100, "CLFLG_MINOPT set for method %s\n", info.compFullName)); - theMinOptsValue = true; - } - #ifdef DEBUG jitMinOpts = JitConfig.JitMinOpts(); @@ -3666,11 +3663,8 @@ void Compiler::compSetOptimizationLevel() default: if (jitMinOpts <= methodCount) { - if (verbose) - { - printf(" Optimizations disabled by JitMinOpts and methodCount\n"); - } - theMinOptsValue = true; + compSwitchedToMinOptsReason = "Force JitMinOpts"; + theMinOptsValue = true; } break; case 0xD: @@ -3680,11 +3674,8 @@ void Compiler::compSetOptimizationLevel() if ((firstMinopts == methodCountMask) || (secondMinopts == methodCountMask)) { - if (verbose) - { - printf("0xD: Optimizations disabled by JitMinOpts and methodCountMask\n"); - } - theMinOptsValue = true; + compSwitchedToMinOptsReason = "0xD: Force JitMinOpts"; + theMinOptsValue = true; } } break; @@ -3695,11 +3686,8 @@ void Compiler::compSetOptimizationLevel() if ((startMinopts <= methodCountMask) && (endMinopts >= methodCountMask)) { - if (verbose) - { - printf("0xE: Optimizations disabled by JitMinOpts and methodCountMask\n"); - } - theMinOptsValue = true; + compSwitchedToMinOptsReason = "0xE: Force JitMinOpts"; + theMinOptsValue = true; } } break; @@ -3710,11 +3698,8 @@ void Compiler::compSetOptimizationLevel() if (((methodCountMask & bitsOne) == bitsOne) && ((~methodCountMask & bitsZero) == bitsZero)) { - if (verbose) - { - printf("0xF: Optimizations disabled by JitMinOpts and methodCountMask\n"); - } - theMinOptsValue = true; + compSwitchedToMinOptsReason = "0xF: Force JitMinOpts"; + theMinOptsValue = true; } } break; @@ -3766,31 +3751,33 @@ void Compiler::compSetOptimizationLevel() { if ((unsigned)JitConfig.JitMinOptsCodeSize() < info.compILCodeSize) { - JITLOG((LL_INFO10, "IL Code Size exceeded, using MinOpts for method %s\n", info.compFullName)); - theMinOptsValue = true; + compSwitchedToMinOptsReason = "IL too big"; + theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsInstrCount() < opts.instrCount) { - JITLOG((LL_INFO10, "IL instruction count exceeded, using MinOpts for method %s\n", info.compFullName)); - theMinOptsValue = true; + compSwitchedToMinOptsReason = "IL too many instr"; + theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsBbCount() < fgBBcount) { - JITLOG((LL_INFO10, "Basic Block count exceeded, using MinOpts for method %s\n", info.compFullName)); - theMinOptsValue = true; + compSwitchedToMinOptsReason = "Too many BBs"; + theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvNumCount() < lvaCount) { - JITLOG((LL_INFO10, "Local Variable Num count exceeded, using MinOpts for method %s\n", info.compFullName)); - theMinOptsValue = true; + compSwitchedToMinOptsReason = "Too many locals"; + theMinOptsValue = true; } else if ((unsigned)JitConfig.JitMinOptsLvRefCount() < opts.lvRefCount) { - JITLOG((LL_INFO10, "Local Variable Ref count exceeded, using MinOpts for method %s\n", info.compFullName)); - theMinOptsValue = true; + compSwitchedToMinOptsReason = "Too many refs"; + theMinOptsValue = true; } + if (theMinOptsValue == true) { + JITLOG((LL_INFO10, "%s, using MinOpts for method %s\n", compSwitchedToMinOptsReason, info.compFullName)); JITLOG((LL_INFO10000, "IL Code Size,Instr %4d,%4d, Basic Block count %3d, Local Variable Num,Ref count " "%3d,%3d for method %s\n", info.compILCodeSize, opts.instrCount, fgBBcount, lvaCount, opts.lvRefCount, info.compFullName)); @@ -3854,7 +3841,10 @@ void Compiler::compSetOptimizationLevel() _SetMinOpts: // Set the MinOpts value - opts.SetMinOpts(theMinOptsValue); + if (theMinOptsValue) + { + opts.SetMinOpts(); + } // Notify the VM if MinOpts is being used when not requested if (theMinOptsValue && !compIsForInlining() && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && @@ -3940,7 +3930,7 @@ bool Compiler::compRsvdRegCheck(FrameLayoutState curState) " compArgSize = %6d\n", frameSize, compArgSize); - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { // Have a recovery path in case we fail to reserve REG_OPT_RSVD and go // over the limit of SP and FP offset ranges due to large @@ -4110,14 +4100,6 @@ const char* Compiler::compGetTieringName(bool wantShortName) const const bool tier1 = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); const bool instrumenting = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR); - if (!opts.compMinOptsIsSet) - { - // If 'compMinOptsIsSet' is not set, just return here. Otherwise, if this method is called - // by the assertAbort(), we would recursively call assert while trying to get MinOpts() - // and eventually stackoverflow. - return "Optimization-Level-Not-Yet-Set"; - } - assert(!tier0 || !tier1); // We don't expect multiple TIER flags to be set at one time. if (tier0) @@ -5044,6 +5026,13 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl sprintf_s(osrBuffer, 20, " @0x%x", info.compILEntry); } + char switchToMinOpts[32] = {0}; + if (compSwitchedToMinOpts) + { + assert(compSwitchedToMinOptsReason != nullptr); + sprintf_s(switchToMinOpts, 32, " - %s ", compSwitchedToMinOptsReason); + } + #ifdef DEBUG const char* fullName = info.compFullName; #else @@ -5056,8 +5045,8 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl const bool hasProf = fgHaveProfileData(); printf("%4d: JIT compiled %s [%s%s%s%s, IL size=%u, code size=%u%s]\n", methodsCompiled, fullName, - compGetTieringName(), osrBuffer, hasProf ? " with " : "", hasProf ? compGetPgoSourceName() : "", - info.compILCodeSize, *methodCodeSize, debugPart); + compGetTieringName(), switchToMinOpts, osrBuffer, hasProf ? " with " : "", + hasProf ? compGetPgoSourceName() : "", info.compILCodeSize, *methodCodeSize, debugPart); } compFunctionTraceEnd(*methodCodePtr, *methodCodeSize, false); @@ -6320,8 +6309,9 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, compInlineContext = m_inlineStrategy->GetRootContext(); } - compSwitchedToOptimized = false; - compSwitchedToMinOpts = false; + compSwitchedToOptimized = false; + compSwitchedToMinOpts = false; + compSwitchedToMinOptsReason = nullptr; // compInitOptions will set the correct verbose flag. @@ -8588,7 +8578,7 @@ void JitTimer::PrintCsvMethodStats(Compiler* comp) } fprintf(s_csvFile, "%u,", comp->info.compILCodeSize); fprintf(s_csvFile, "%u,", comp->fgBBcount); - fprintf(s_csvFile, "%u,", comp->opts.MinOpts()); + fprintf(s_csvFile, "%u,", comp->opts.OptimizationDisabled()); fprintf(s_csvFile, "%u,", comp->optLoopCount); fprintf(s_csvFile, "%u,", comp->optLoopsCloned); #if FEATURE_LOOP_ALIGN diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index cf1ce73e5ce1b..513518159d829 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -4660,7 +4660,7 @@ class Compiler bool backendRequiresLocalVarLifetimes() { - return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); + return !opts.OptimizationDisabled() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); @@ -8980,8 +8980,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts - bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set + bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts + const char* compSwitchedToMinOptsReason; // Reason why it was switched to MinOpts // NOTE: These values are only reliable after // the importing is completely finished. @@ -9026,13 +9027,13 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX //---------------------------- JITing options ----------------------------- - enum codeOptimize + enum OptLevel { - BLENDED_CODE, - SMALL_CODE, - FAST_CODE, - - COUNT_OPT_CODE + // Order is important + OPT_MinOpts, + OPT_Quick, + OPT_Blended, + OPT_Speed }; struct Options @@ -9059,7 +9060,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX unsigned instrCount; unsigned lvRefCount; - codeOptimize compCodeOpt; // what type of code optimizations + OptLevel compOptLevel; bool compUseCMOV; @@ -9074,47 +9075,34 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // Maximum number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 - bool compMinOpts; - bool compMinOptsIsSet; -#ifdef DEBUG - mutable bool compMinOptsIsUsed; - bool MinOpts() const { - assert(compMinOptsIsSet); - compMinOptsIsUsed = true; - return compMinOpts; + return compOptLevel == OPT_MinOpts; } - bool IsMinOptsSet() const + bool OptimizationDisabled() const { - return compMinOptsIsSet; + return compOptLevel < OPT_Blended; } -#else // !DEBUG - bool MinOpts() const + bool OptimizationEnabled() const { - return compMinOpts; + return !OptimizationDisabled(); } - bool IsMinOptsSet() const + OptLevel OptLevel() const { - return compMinOptsIsSet; + return compOptLevel; } -#endif // !DEBUG - - bool OptimizationDisabled() const + bool OptLevelIs(Compiler::OptLevel lvl) const { - return MinOpts() || compDbgCode; + return compOptLevel == lvl; } - bool OptimizationEnabled() const + template + bool OptLevelIs(Compiler::OptLevel lvl, T... rest) const { - return !OptimizationDisabled(); + return OptLevel() == lvl || OptLevelIs(rest...); } - - void SetMinOpts(bool val) + void SetMinOpts() { - assert(!compMinOptsIsUsed); - assert(!compMinOptsIsSet || (compMinOpts == val)); - compMinOpts = val; - compMinOptsIsSet = true; + compOptLevel = OPT_MinOpts; } // true if the CLFLG_* for an optimization is set. @@ -9567,22 +9555,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX const char* compGetPgoSourceName() const; const char* compGetStressMessage() const; - codeOptimize compCodeOpt() const - { -#if 0 - // Switching between size & speed has measurable throughput impact - // (3.5% on NGen CoreLib when measured). It used to be enabled for - // DEBUG, but should generate identical code between CHK & RET builds, - // so that's not acceptable. - // TODO-Throughput: Figure out what to do about size vs. speed & throughput. - // Investigate the cause of the throughput regression. - - return opts.compCodeOpt; -#else - return BLENDED_CODE; -#endif - } - //--------------------- Info about the procedure -------------------------- struct Info diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 41c1ae5ebefbb..562be26a0e70f 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -2239,7 +2239,7 @@ inline int encodingLimitLower = isFloatUsage ? -0x3FC : -0xFF; // Use SP-based encoding. During encoding, we'll pick the best encoding for the actual offset we have. - if (opts.MinOpts() || (actualSPOffset <= encodingLimitUpper)) + if (opts.OptimizationDisabled() || (actualSPOffset <= encodingLimitUpper)) { varOffset = spVarOffset; *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp index f2bae6817f922..3b72581abc324 100644 --- a/src/coreclr/jit/emit.cpp +++ b/src/coreclr/jit/emit.cpp @@ -7579,16 +7579,6 @@ CORINFO_FIELD_HANDLE emitter::emitFltOrDblConst(double constValue, emitAttr attr unsigned cnsSize = (attr == EA_4BYTE) ? sizeof(float) : sizeof(double); unsigned cnsAlign = cnsSize; -#ifdef TARGET_XARCH - if (emitComp->compCodeOpt() == Compiler::SMALL_CODE) - { - // Some platforms don't require doubles to be aligned and so - // we can use a smaller alignment to help with smaller code - - cnsAlign = dataSection::MIN_DATA_ALIGN; - } -#endif // TARGET_XARCH - UNATIVE_OFFSET cnum = emitDataConst(cnsAddr, cnsSize, cnsAlign, dataType); return emitComp->eeFindJitDataOffs(cnum); } @@ -7613,13 +7603,6 @@ CORINFO_FIELD_HANDLE emitter::emitSimd8Const(simd8_t constValue) unsigned cnsSize = 8; unsigned cnsAlign = cnsSize; -#ifdef TARGET_XARCH - if (emitComp->compCodeOpt() == Compiler::SMALL_CODE) - { - cnsAlign = dataSection::MIN_DATA_ALIGN; - } -#endif // TARGET_XARCH - UNATIVE_OFFSET cnum = emitDataConst(&constValue, cnsSize, cnsAlign, TYP_SIMD8); return emitComp->eeFindJitDataOffs(cnum); #else @@ -7638,13 +7621,6 @@ CORINFO_FIELD_HANDLE emitter::emitSimd16Const(simd16_t constValue) unsigned cnsSize = 16; unsigned cnsAlign = cnsSize; -#ifdef TARGET_XARCH - if (emitComp->compCodeOpt() == Compiler::SMALL_CODE) - { - cnsAlign = dataSection::MIN_DATA_ALIGN; - } -#endif // TARGET_XARCH - UNATIVE_OFFSET cnum = emitDataConst(&constValue, cnsSize, cnsAlign, TYP_SIMD16); return emitComp->eeFindJitDataOffs(cnum); #else @@ -7663,13 +7639,6 @@ CORINFO_FIELD_HANDLE emitter::emitSimd32Const(simd32_t constValue) unsigned cnsSize = 32; unsigned cnsAlign = cnsSize; -#ifdef TARGET_XARCH - if (emitComp->compCodeOpt() == Compiler::SMALL_CODE) - { - cnsAlign = dataSection::MIN_DATA_ALIGN; - } -#endif // TARGET_XARCH - UNATIVE_OFFSET cnum = emitDataConst(&constValue, cnsSize, cnsAlign, TYP_SIMD32); return emitComp->eeFindJitDataOffs(cnum); #else diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index e9636ab27f903..0b322deb4ee52 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -12894,8 +12894,7 @@ BYTE* emitter::emitOutputCV(BYTE* dst, instrDesc* id, code_t code, CnsVal* addc) // When SMALL_CODE is set, we only expect 4-byte alignment, otherwise // we expect the same alignment as the size of the constant. - assert((emitChkAlign == false) || (ins == INS_lea) || - ((emitComp->compCodeOpt() == Compiler::SMALL_CODE) && (((size_t)addr & 3) == 0)) || + assert((emitChkAlign == false) || (ins == INS_lea) || ((((size_t)addr & 3) == 0)) || (((size_t)addr & (byteSize - 1)) == 0)); #endif // DEBUG } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index e56843264cd6a..2c9e842f997a8 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -3507,7 +3507,7 @@ void Compiler::fgFindBasicBlocks() #ifndef DEBUG // fgNormalizeEH assumes that this test has been passed. And Ssa assumes that fgNormalizeEHTable // has been run. So do this unless we're in minOpts mode (and always in debug). - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) #endif { fgCheckBasicBlockControlFlow(); diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 3b7b5f67525ee..d3897f0c7094f 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -46,7 +46,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { JITDUMP("Method compiled with minOpts, no removal.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -314,7 +314,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { JITDUMP("Method compiled with minOpts, no removal.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -643,7 +643,7 @@ PhaseStatus Compiler::fgCloneFinally() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { JITDUMP("Method compiled with minOpts, no cloning.\n"); return PhaseStatus::MODIFIED_NOTHING; @@ -1634,7 +1634,7 @@ PhaseStatus Compiler::fgMergeFinallyChains() return PhaseStatus::MODIFIED_NOTHING; } - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { JITDUMP("Method compiled with minOpts, no merging.\n"); return PhaseStatus::MODIFIED_NOTHING; diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 16eb52cf3823e..483559cd1daf0 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -3932,7 +3932,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi // bool Compiler::fgOptimizeBranch(BasicBlock* bJump) { - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { return false; } diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index ae72354b00070..e07667bf187da 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -490,8 +490,7 @@ bool Compiler::fgCanSwitchToOptimized() if (result) { // Ensure that it would be safe to change the opt level - assert(opts.compFlags == CLFLG_MINOPT); - assert(!opts.IsMinOptsSet()); + assert(opts.OptLevel() < OPT_Blended); } return result; @@ -2660,8 +2659,9 @@ PhaseStatus Compiler::fgAddInternal() // We are allowed to have multiple individual exits // However we can still decide to have a single return // - if ((compCodeOpt() == SMALL_CODE) || stressMerging) + if (stressMerging) { + // TODO-OptLevel: opts.OptLevelIs(OPT_Quick) candidate // Under stress or for Small_Code case we always // generate a single return block when we have multiple // return points diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp index fe710f509fcdb..fc5d972a5fee5 100644 --- a/src/coreclr/jit/gcencode.cpp +++ b/src/coreclr/jit/gcencode.cpp @@ -4059,7 +4059,7 @@ void GCInfo::gcMakeRegPtrTable( GCENCODER_WITH_LOGGING(gcInfoEncoderWithLog, gcInfoEncoder); const bool noTrackedGCSlots = - (compiler->opts.MinOpts() && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && + (compiler->opts.OptimizationDisabled() && !compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && !JitConfig.JitMinOptsTrackGCrefs()); if (mode == MAKE_REG_PTR_MODE_ASSIGN_SLOTS) diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index ed1e5d24bd28c..4da5d0e31a1f6 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -7922,7 +7922,7 @@ bool GenTreeOp::UsesDivideByConstOptimized(Compiler* comp) // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) - if (!comp->opts.MinOpts() && ((divisorValue >= 3) || !isSignedDivide)) + if (!comp->opts.OptimizationDisabled() && ((divisorValue >= 3) || !isSignedDivide)) { // All checks pass we can perform the division operation using a reciprocal multiply. return true; diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index 78357b2918cb5..14e9c1ca88b5b 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -1289,7 +1289,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts @@ -1328,7 +1328,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, { assert(sig->numArgs == 1); - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // ARM64 doesn't have aligned loads, but aligned loads are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts @@ -1637,7 +1637,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts @@ -1660,7 +1660,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, assert(sig->numArgs == 2); var_types simdType = getSIMDTypeForSize(simdSize); - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // ARM64 doesn't have aligned stores, but aligned stores are only validated to be // aligned during minopts, so only skip the intrinsic handling if we're minopts diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 3016b0d498322..709c793a4b54f 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -4318,7 +4318,7 @@ GenTree* Compiler::impTransformThis(GenTree* thisPtr, bool Compiler::impCanPInvokeInline() { - return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && + return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } @@ -7652,7 +7652,7 @@ GenTree* Compiler::impArrayAccessIntrinsic( assert((intrinsicName == NI_Array_Address) || (intrinsicName == NI_Array_Get) || (intrinsicName == NI_Array_Set)); // If we are generating SMALL_CODE, we don't want to use intrinsics, as it generates fatter code. - if (compCodeOpt() == SMALL_CODE) + if (opts.OptimizationDisabled()) { JITDUMP("impArrayAccessIntrinsic: rejecting array intrinsic due to SMALL_CODE\n"); return nullptr; diff --git a/src/coreclr/jit/inlinepolicy.cpp b/src/coreclr/jit/inlinepolicy.cpp index 3b671950b257f..9473698a907bf 100644 --- a/src/coreclr/jit/inlinepolicy.cpp +++ b/src/coreclr/jit/inlinepolicy.cpp @@ -95,7 +95,7 @@ InlinePolicy* InlinePolicy::GetPolicy(Compiler* compiler, bool isPrejitRoot) } const bool isPrejit = compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT); - const bool isSpeedOpt = compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_SPEED_OPT); + const bool isSpeedOpt = compiler->opts.OptLevelIs(Compiler::OPT_Speed); if ((JitConfig.JitExtDefaultPolicy() != 0)) { diff --git a/src/coreclr/jit/jittelemetry.cpp b/src/coreclr/jit/jittelemetry.cpp index dbf350885e029..0e5e21f4b6c27 100644 --- a/src/coreclr/jit/jittelemetry.cpp +++ b/src/coreclr/jit/jittelemetry.cpp @@ -271,7 +271,7 @@ void JitTelemetry::NotifyNowayAssert(const char* filename, unsigned line) if (comp != nullptr) { codeSize = comp->info.compILCodeSize; - minOpts = comp->opts.IsMinOptsSet() ? comp->opts.MinOpts() : -1; + minOpts = comp->opts.IsMinOptsSet() ? comp->opts.OptimizationDisabled() : -1; lastPhase = PhaseNames[comp->mostRecentlyActivePhase]; } diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index f9b1faf7709c1..dfc52388ad434 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -3740,7 +3740,7 @@ void Compiler::lvaSortByRefCount() lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif } - if (opts.MinOpts() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) + if (opts.OptimizationDisabled() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) { varDsc->lvTracked = 0; lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::MinOptsGC)); @@ -3761,7 +3761,7 @@ void Compiler::lvaSortByRefCount() // Are we not optimizing and we have exception handlers? // if so mark all args and locals "do not enregister". // - if (opts.MinOpts() && compHndBBtabCount > 0) + if (opts.OptimizationDisabled() && compHndBBtabCount > 0) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } @@ -3805,7 +3805,7 @@ void Compiler::lvaSortByRefCount() } // Now sort the tracked variable table by ref-count - if (compCodeOpt() == SMALL_CODE) + if (opts.OptLevelIs(OPT_Quick)) { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_SmallCode_Less(lvaTable DEBUGARG(lvaCount))); } diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index e1f0a9d02658e..943692f9ab2f2 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -1560,7 +1560,7 @@ bool Compiler::fgComputeLifeTrackedLocalDef(VARSET_TP& life, // Dead store node->gtFlags |= GTF_VAR_DEATH; - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // keepAliveVars always stay alive noway_assert(!VarSetOps::IsMember(this, keepAliveVars, varIndex)); @@ -1677,7 +1677,7 @@ bool Compiler::fgComputeLifeUntrackedLocal(VARSET_TP& life, if (fieldsAreTracked && VarSetOps::IsEmpty(this, liveFields)) { // None of the fields were live, so this is a dead store. - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // keepAliveVars always stay alive VARSET_TP keepAliveFields(VarSetOps::Intersection(this, fieldSet, keepAliveVars)); @@ -1887,7 +1887,7 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR // Removing a call does not affect liveness unless it is a tail call in a method with P/Invokes or // is itself a P/Invoke, in which case it may affect the liveness of the frame root variable. - if (!opts.MinOpts() && !opts.ShouldUsePInvokeHelpers() && + if (!opts.OptimizationDisabled() && !opts.ShouldUsePInvokeHelpers() && ((call->IsTailCall() && compMethodRequiresPInvokeFrame()) || (call->IsUnmanaged() && !call->IsSuppressGCTransition())) && lvaTable[info.compLvFrameListRoot].lvTracked) @@ -1914,7 +1914,7 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR DISPNODE(lclVarNode); blockRange.Delete(this, block, node); - if (varDsc.lvTracked && !opts.MinOpts()) + if (varDsc.lvTracked && !opts.OptimizationDisabled()) { fgStmtRemoved = true; } @@ -1939,7 +1939,7 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR const bool isTracked = lvaTable[node->AsLclVarCommon()->GetLclNum()].lvTracked; blockRange.Delete(this, block, node); - if (isTracked && !opts.MinOpts()) + if (isTracked && !opts.OptimizationDisabled()) { fgStmtRemoved = true; } @@ -2159,7 +2159,7 @@ bool Compiler::fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange) // bool Compiler::fgTryRemoveDeadStoreLIR(GenTree* store, GenTreeLclVarCommon* lclNode, BasicBlock* block) { - assert(!opts.MinOpts()); + assert(!opts.OptimizationDisabled()); // We cannot remove stores to (tracked) TYP_STRUCT locals with GC pointers marked as "explicit init", // as said locals will be reported to the GC untracked, and deleting the explicit initializer risks diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index 9bb98072a2b13..eca6780e12760 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -126,7 +126,7 @@ GenTree* Lowering::TryLowerMulWithConstant(GenTreeOp* node) assert(node->OperIs(GT_MUL)); // Do not do these optimizations when min-opts enabled. - if (comp->opts.MinOpts()) + if (comp->opts.OptimizationDisabled()) return nullptr; if (!varTypeIsIntegral(node)) @@ -6231,7 +6231,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre } else { - supportsAlignedSIMDLoads = !comp->opts.MinOpts(); + supportsAlignedSIMDLoads = !comp->opts.OptimizationDisabled(); supportsUnalignedSIMDLoads = true; } @@ -6284,7 +6284,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre // and Sse3.MoveAndDuplicate (MOVDDUP) are exceptions and don't fail for // unaligned inputs as they read mem64 (half the vector width) instead - supportsAlignedSIMDLoads = !comp->opts.MinOpts(); + supportsAlignedSIMDLoads = !comp->opts.OptimizationDisabled(); supportsUnalignedSIMDLoads = true; const unsigned expectedSize = genTypeSize(parentNode->TypeGet()) / 2; @@ -6305,7 +6305,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre } else { - supportsAlignedSIMDLoads = !comp->opts.MinOpts(); + supportsAlignedSIMDLoads = !comp->opts.OptimizationDisabled(); supportsUnalignedSIMDLoads = true; } @@ -6358,7 +6358,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre const unsigned expectedSize = genTypeSize(parentNode->GetSimdBaseType()); const unsigned operandSize = genTypeSize(childNode->TypeGet()); - supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); + supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.OptimizationDisabled(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; @@ -6373,7 +6373,7 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre const unsigned expectedSize = 16; const unsigned operandSize = genTypeSize(childNode->TypeGet()); - supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.MinOpts(); + supportsAlignedSIMDLoads = !comp->canUseVexEncoding() || !comp->opts.OptimizationDisabled(); supportsUnalignedSIMDLoads = comp->canUseVexEncoding(); supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize); break; diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 759cc1daaf838..280be38e75f2b 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -1515,7 +1515,7 @@ bool LinearScan::isRegCandidate(LclVarDsc* varDsc) // if so mark all args and locals as volatile, so that they // won't ever get enregistered. // - if (compiler->opts.MinOpts() && compiler->compHndBBtabCount > 0) + if (compiler->opts.OptimizationDisabled() && compiler->compHndBBtabCount > 0) { compiler->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } @@ -1655,7 +1655,7 @@ void LinearScan::identifyCandidates() weight_t refCntWtdStkDbl = 0; // sum of wtd ref counts for stack based doubles doDoubleAlign = false; bool checkDoubleAlign = true; - if (compiler->codeGen->isFramePointerRequired() || compiler->opts.MinOpts()) + if (compiler->codeGen->isFramePointerRequired() || compiler->opts.OptimizationDisabled()) { checkDoubleAlign = false; } diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index c1b21f20315a4..18742b35b3ed0 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -3991,7 +3991,7 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg) // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { found = ForEachHbvBitSet(*fgAvailableOutgoingArgTemps, [&](indexType lclNum) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); @@ -4254,7 +4254,7 @@ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { delay = false; } @@ -4328,7 +4328,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. - if (opts.MinOpts()) + if (opts.OptimizationDisabled()) { indexAddr->Arr() = fgMorphTree(indexAddr->Arr()); indexAddr->Index() = fgMorphTree(indexAddr->Index()); @@ -11274,13 +11274,6 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) return mul; } -#ifdef TARGET_XARCH - // Should we try to replace integer multiplication with lea/add/shift sequences? - bool mulShiftOpt = compCodeOpt() != SMALL_CODE; -#else // !TARGET_XARCH - bool mulShiftOpt = false; -#endif // !TARGET_XARCH - size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; @@ -11307,7 +11300,8 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } - else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) +#ifdef TARGET_XARCH + else if ((lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; @@ -11331,6 +11325,7 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) changeToShift = true; } } +#endif // TARGET_XARCH if (changeToShift) { diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index 6affd662f6e83..e923fd7b972e1 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -792,7 +792,7 @@ void MorphCopyBlockHelper::MorphStructCases() if (m_dstVarDsc->lvPromoted) { noway_assert(varTypeIsStruct(m_dstVarDsc)); - noway_assert(!m_comp->opts.MinOpts()); + noway_assert(!m_comp->opts.OptimizationDisabled()); if (m_blockSize == m_dstVarDsc->lvExactSize) { @@ -812,7 +812,7 @@ void MorphCopyBlockHelper::MorphStructCases() if (m_srcVarDsc->lvPromoted) { noway_assert(varTypeIsStruct(m_srcVarDsc)); - noway_assert(!m_comp->opts.MinOpts()); + noway_assert(!m_comp->opts.OptimizationDisabled()); if (m_blockSize == m_srcVarDsc->lvExactSize) { diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp index fa21ea5b8facd..581c34da19aec 100644 --- a/src/coreclr/jit/optcse.cpp +++ b/src/coreclr/jit/optcse.cpp @@ -302,7 +302,7 @@ bool Compiler::optCSEcostCmpEx::operator()(const CSEdsc* dsc1, const CSEdsc* dsc /***************************************************************************** * * Compare function passed to jitstd::sort() by CSE_Heuristic::SortCandidates - * when (CodeOptKind() == Compiler::SMALL_CODE) + * when (SmallCode()) */ /* static */ @@ -1724,15 +1724,15 @@ class CSE_Heuristic Compiler* m_pCompiler; unsigned m_addCSEcount; - weight_t aggressiveRefCnt; - weight_t moderateRefCnt; - unsigned enregCount; // count of the number of predicted enregistered variables - bool largeFrame; - bool hugeFrame; - bool madeChanges; - Compiler::codeOptimize codeOptKind; - Compiler::CSEdsc** sortTab; - size_t sortSiz; + weight_t aggressiveRefCnt; + weight_t moderateRefCnt; + unsigned enregCount; // count of the number of predicted enregistered variables + bool largeFrame; + bool hugeFrame; + bool madeChanges; + Compiler::OptLevel codeOptLevel; + Compiler::CSEdsc** sortTab; + size_t sortSiz; #ifdef DEBUG CLRRandom m_cseRNG; unsigned m_bias; @@ -1741,12 +1741,12 @@ class CSE_Heuristic public: CSE_Heuristic(Compiler* pCompiler) : m_pCompiler(pCompiler) { - codeOptKind = m_pCompiler->compCodeOpt(); + codeOptLevel = m_pCompiler->opts.OptLevel(); } - Compiler::codeOptimize CodeOptKind() + bool SmallCode() { - return codeOptKind; + return codeOptLevel <= Compiler::OPT_Quick; } bool MadeChanges() const @@ -1961,7 +1961,7 @@ class CSE_Heuristic // if ((aggressiveRefCnt == 0) && (enregCount > aggressiveEnregNum)) { - if (CodeOptKind() == Compiler::SMALL_CODE) + if (SmallCode()) { aggressiveRefCnt = varDsc->lvRefCnt(); } @@ -1973,7 +1973,7 @@ class CSE_Heuristic } if ((moderateRefCnt == 0) && (enregCount > ((CNT_CALLEE_ENREG * 3) + (CNT_CALLEE_TRASH * 2)))) { - if (CodeOptKind() == Compiler::SMALL_CODE) + if (SmallCode()) { moderateRefCnt = varDsc->lvRefCnt(); } @@ -2016,7 +2016,7 @@ class CSE_Heuristic sortSiz = m_pCompiler->optCSECandidateCount * sizeof(*sortTab); memcpy(sortTab, m_pCompiler->optCSEtab, sortSiz); - if (CodeOptKind() == Compiler::SMALL_CODE) + if (SmallCode()) { jitstd::sort(sortTab, sortTab + m_pCompiler->optCSECandidateCount, Compiler::optCSEcostCmpSz()); } @@ -2039,7 +2039,7 @@ class CSE_Heuristic weight_t use; unsigned cost; - if (CodeOptKind() == Compiler::SMALL_CODE) + if (SmallCode()) { def = dsc->csdDefCount; // def count use = dsc->csdUseCount; // use count (excluding the implicit uses at defs) @@ -2213,7 +2213,7 @@ class CSE_Heuristic void InitializeCounts() { m_Size = Expr()->GetCostSz(); // always the GetCostSz() - if (m_context->CodeOptKind() == Compiler::SMALL_CODE) + if (m_context->SmallCode()) { m_Cost = m_Size; // the estimated code size m_defCount = m_CseDsc->csdDefCount; // def count @@ -2403,7 +2403,7 @@ class CSE_Heuristic slotCount = (size + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; } - if (CodeOptKind() == Compiler::SMALL_CODE) + if (SmallCode()) { // Note that when optimizing for SMALL_CODE we set the cse_def_cost/cse_use_cost based // upon the code size and we use unweighted ref counts instead of weighted ref counts. @@ -3536,7 +3536,7 @@ bool Compiler::optIsCSEcandidate(GenTree* tree) } unsigned cost; - if (compCodeOpt() == SMALL_CODE) + if (opts.OptLevel() < OPT_Blended) { cost = tree->GetCostSz(); } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 1b0d6cc450296..57c89a0d78bea 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -153,7 +153,7 @@ void Compiler::optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); noway_assert(fgReachable(begBlk, endBlk)); - noway_assert(!opts.MinOpts()); + noway_assert(!opts.OptimizationDisabled()); #ifdef DEBUG if (verbose) @@ -277,7 +277,7 @@ void Compiler::optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk) { noway_assert(begBlk->bbNum <= endBlk->bbNum); noway_assert(begBlk->isLoopHead()); - noway_assert(!opts.MinOpts()); + noway_assert(!opts.OptimizationDisabled()); unsigned backEdgeCount = 0; @@ -386,7 +386,7 @@ void Compiler::optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmar return; } - noway_assert(!opts.MinOpts()); + noway_assert(!opts.OptimizationDisabled()); bool removeLoop = false; @@ -4018,7 +4018,7 @@ bool Compiler::optComputeLoopRep(int constInit, // PhaseStatus Compiler::optUnrollLoops() { - if (compCodeOpt() == SMALL_CODE) + if (opts.OptLevel() < OPT_Blended) { return PhaseStatus::MODIFIED_NOTHING; } @@ -4049,17 +4049,15 @@ PhaseStatus Compiler::optUnrollLoops() INDEBUG(int unrollCount = 0); // count of loops unrolled INDEBUG(int unrollFailures = 0); // count of loops attempted to be unrolled, but failed - static const unsigned ITER_LIMIT[COUNT_OPT_CODE + 1] = { - 10, // BLENDED_CODE - 0, // SMALL_CODE - 20, // FAST_CODE - 0 // COUNT_OPT_CODE + static const unsigned ITER_LIMIT[4] = { + 0, // OPT_MinOpts + 0, // OPT_Quick + 10, // OPT_Blended + 20, // OPT_Speed }; + assert((int)opts.OptLevel() <= 3); - assert(ITER_LIMIT[SMALL_CODE] == 0); - assert(ITER_LIMIT[COUNT_OPT_CODE] == 0); - - unsigned iterLimit = ITER_LIMIT[compCodeOpt()]; + unsigned iterLimit = ITER_LIMIT[opts.OptLevel()]; #ifdef DEBUG if (compStressCompile(STRESS_UNROLL_LOOPS, 50)) @@ -4068,16 +4066,13 @@ PhaseStatus Compiler::optUnrollLoops() } #endif - static const int UNROLL_LIMIT_SZ[COUNT_OPT_CODE + 1] = { - 300, // BLENDED_CODE - 0, // SMALL_CODE - 600, // FAST_CODE - 0 // COUNT_OPT_CODE + static const unsigned UNROLL_LIMIT_SZ[4] = { + 0, // OPT_MinOpts + 0, // OPT_Quick + 300, // OPT_Blended + 600, // OPT_Speed }; - assert(UNROLL_LIMIT_SZ[SMALL_CODE] == 0); - assert(UNROLL_LIMIT_SZ[COUNT_OPT_CODE] == 0); - // Visit loops from highest to lowest number to visit them in innermost to outermost order. for (unsigned lnum = optLoopCount - 1; lnum != ~0U; --lnum) { @@ -4211,7 +4206,7 @@ PhaseStatus Compiler::optUnrollLoops() continue; } - int unrollLimitSz = UNROLL_LIMIT_SZ[compCodeOpt()]; + int unrollLimitSz = UNROLL_LIMIT_SZ[opts.OptLevel()]; if (INDEBUG(compStressCompile(STRESS_UNROLL_LOOPS, 50) ||) false) { @@ -4772,7 +4767,6 @@ Compiler::OptInvertCountTreeInfoType Compiler::optInvertCountTreeInfo(GenTree* t bool Compiler::optInvertWhileLoop(BasicBlock* block) { assert(opts.OptimizationEnabled()); - assert(compCodeOpt() != SMALL_CODE); // Does the BB end with an unconditional jump? @@ -4911,7 +4905,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) unsigned maxDupCostSz = 34; - if ((compCodeOpt() == FAST_CODE) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) + if ((opts.OptLevel() == OPT_Speed) || compStressCompile(STRESS_DO_WHILE_LOOPS, 30)) { maxDupCostSz *= 4; } @@ -5190,11 +5184,6 @@ PhaseStatus Compiler::optInvertLoops() } #endif // OPT_CONFIG - if (compCodeOpt() == SMALL_CODE) - { - return PhaseStatus::MODIFIED_NOTHING; - } - bool madeChanges = false; // Assume no changes made for (BasicBlock* const block : Blocks()) { diff --git a/src/coreclr/jit/regalloc.cpp b/src/coreclr/jit/regalloc.cpp index aa45c85a070d9..aedcab82748bb 100644 --- a/src/coreclr/jit/regalloc.cpp +++ b/src/coreclr/jit/regalloc.cpp @@ -70,12 +70,6 @@ bool Compiler::shouldDoubleAlign( unsigned bytesUsed = refCntStk + refCntEBP - refCntStkParam + DBL_ALIGN_SETUP_SIZE; unsigned misaligned_weight = 4; - if (compCodeOpt() == Compiler::SMALL_CODE) - misaligned_weight = 0; - - if (compCodeOpt() == Compiler::FAST_CODE) - misaligned_weight *= 4; - JITDUMP("\nDouble alignment:\n"); JITDUMP(" Bytes that could be saved by not using EBP frame: %i\n", bytesUsed); JITDUMP(" Sum of weighted ref counts for EBP enregistered variables: %f\n", refCntWtdEBP); diff --git a/src/coreclr/jit/simdashwintrinsic.cpp b/src/coreclr/jit/simdashwintrinsic.cpp index 347ae5b6d083c..3c7d901824d37 100644 --- a/src/coreclr/jit/simdashwintrinsic.cpp +++ b/src/coreclr/jit/simdashwintrinsic.cpp @@ -666,7 +666,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, case NI_VectorT128_StoreAligned: case NI_VectorT128_StoreAlignedNonTemporal: { - if (!opts.MinOpts()) + if (!opts.OptimizationDisabled()) { // ARM64 doesn't have aligned loads/stores, but aligned simd ops are only validated // to be aligned during minopts, so only skip the intrinsic handling if we're minopts