Skip to content

Commit 479ee51

Browse files
authored
enable osr stress tests for libraries and add fixes (#64116)
* add osr plus stress tests for libraries * (Fix) have tier0 method always report generics context, then OSR method can just use that * Add ability to suppress patchpoints by hash. * Dump IR after patchpoints phase, if any were added * (Fix) Fix bug in recursive tail call detection -- if we devirtualize we need to update the method handle we use. OSR methods rely on getting this detection right so they know if they need to import the method entry block. * (Fix) refine OSR enabling/backoff to opt logic. With OSR enabled, we were forcing too many methods to be optimized. * reorder phase whitelist to more closely match phase order
1 parent 9eb9051 commit 479ee51

File tree

7 files changed

+99
-35
lines changed

7 files changed

+99
-35
lines changed

eng/pipelines/libraries/run-test-job.yml

+4
Original file line numberDiff line numberDiff line change
@@ -191,4 +191,8 @@ jobs:
191191
- fullpgo_random_gdv
192192
- fullpgo_random_edge
193193
- fullpgo_random_gdv_edge
194+
- jitosr
195+
- jitosr_stress
196+
- jitosr_stress_random
197+
- jitosr_pgo
194198

src/coreclr/jit/codegencommon.cpp

+20-5
Original file line numberDiff line numberDiff line change
@@ -6385,16 +6385,31 @@ void CodeGen::genEnregisterOSRArgsAndLocals()
63856385

63866386
void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed)
63876387
{
6388-
// For OSR the original method has set this up for us.
6388+
assert(compiler->compGeneratingProlog);
6389+
6390+
const bool reportArg = compiler->lvaReportParamTypeArg();
6391+
63896392
if (compiler->opts.IsOSR())
63906393
{
6394+
PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo;
6395+
if (reportArg)
6396+
{
6397+
// OSR method will use Tier0 slot to report context arg.
6398+
//
6399+
assert(ppInfo->HasGenericContextArgOffset());
6400+
JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n");
6401+
}
6402+
else if (compiler->lvaKeepAliveAndReportThis())
6403+
{
6404+
// OSR method will use Tier0 slot to report `this` as context.
6405+
//
6406+
assert(ppInfo->HasKeptAliveThis());
6407+
JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n");
6408+
}
6409+
63916410
return;
63926411
}
63936412

6394-
assert(compiler->compGeneratingProlog);
6395-
6396-
bool reportArg = compiler->lvaReportParamTypeArg();
6397-
63986413
// We should report either generic context arg or "this" when used so.
63996414
if (!reportArg)
64006415
{

src/coreclr/jit/compiler.cpp

+25-17
Original file line numberDiff line numberDiff line change
@@ -6396,45 +6396,53 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr,
63966396
// Honor the config setting that tells the jit to
63976397
// always optimize methods with loops.
63986398
//
6399-
// If that's not set, and OSR is enabled, the jit may still
6399+
// If neither of those apply, and OSR is enabled, the jit may still
64006400
// decide to optimize, if there's something in the method that
6401-
// OSR currently cannot handle.
6401+
// OSR currently cannot handle, or we're optionally suppressing
6402+
// OSR by method hash.
64026403
//
64036404
const char* reason = nullptr;
64046405

64056406
if (compTailPrefixSeen && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR))
64066407
{
64076408
reason = "tail.call and not BBINSTR";
64086409
}
6409-
else if ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)
6410+
else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0))
64106411
{
6411-
if (compHasBackwardJump)
6412-
{
6413-
reason = "loop";
6414-
}
6412+
reason = "loop";
64156413
}
6416-
else if (JitConfig.TC_OnStackReplacement() > 0)
6414+
6415+
if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0))
64176416
{
6418-
const bool patchpointsOK = compCanHavePatchpoints(&reason);
6419-
assert(patchpointsOK || (reason != nullptr));
6417+
const char* noPatchpointReason = nullptr;
6418+
bool canEscapeViaOSR = compCanHavePatchpoints(&reason);
64206419

64216420
#ifdef DEBUG
6422-
// Optionally disable OSR by method hash.
6423-
//
6424-
if (patchpointsOK && compHasBackwardJump)
6421+
if (canEscapeViaOSR)
64256422
{
6423+
// Optionally disable OSR by method hash. This will force any
6424+
// method that might otherwise get trapped in Tier0 to be optimized.
6425+
//
64266426
static ConfigMethodRange JitEnableOsrRange;
64276427
JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange());
64286428
const unsigned hash = impInlineRoot()->info.compMethodHash();
64296429
if (!JitEnableOsrRange.Contains(hash))
64306430
{
6431-
JITDUMP("Disabling OSR -- Method hash 0x%08x not within range ", hash);
6432-
JITDUMPEXEC(JitEnableOsrRange.Dump());
6433-
JITDUMP("\n");
6434-
reason = "OSR disabled by JitEnableOsrRange";
6431+
canEscapeViaOSR = false;
6432+
reason = "OSR disabled by JitEnableOsrRange";
64356433
}
64366434
}
64376435
#endif
6436+
6437+
if (canEscapeViaOSR)
6438+
{
6439+
JITDUMP("\nOSR enabled for this method\n");
6440+
}
6441+
else
6442+
{
6443+
JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason);
6444+
assert(reason != nullptr);
6445+
}
64386446
}
64396447

64406448
if (reason != nullptr)

src/coreclr/jit/compiler.hpp

+15-3
Original file line numberDiff line numberDiff line change
@@ -1986,13 +1986,18 @@ inline bool Compiler::lvaKeepAliveAndReportThis()
19861986
// the VM requires us to keep the generics context alive or it is used in a look-up.
19871987
// We keep it alive in the lookup scenario, even when the VM didn't ask us to,
19881988
// because collectible types need the generics context when gc-ing.
1989+
//
1990+
// Methoods that can inspire OSR methods must always report context as live
1991+
//
19891992
if (genericsContextIsThis)
19901993
{
1991-
const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0;
1994+
const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0;
1995+
const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints();
19921996

1993-
if (lvaGenericsContextInUse || mustKeep)
1997+
if (lvaGenericsContextInUse || mustKeep || hasPatchpoint)
19941998
{
1995-
JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : "referenced");
1999+
JITDUMP("Reporting this as generic context: %s\n",
2000+
mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced"));
19962001
return true;
19972002
}
19982003
}
@@ -2024,6 +2029,13 @@ inline bool Compiler::lvaReportParamTypeArg()
20242029
{
20252030
return true;
20262031
}
2032+
2033+
// Methoods that have patchpoints always report context as live
2034+
//
2035+
if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints())
2036+
{
2037+
return true;
2038+
}
20272039
}
20282040

20292041
// Otherwise, we don't need to report it -- the generics context parameter is unused.

src/coreclr/jit/importer.cpp

+22-6
Original file line numberDiff line numberDiff line change
@@ -9540,6 +9540,10 @@ var_types Compiler::impImportCall(OPCODE opcode,
95409540
// Take care to pass raw IL offset here as the 'debug info' might be different for
95419541
// inlinees.
95429542
rawILOffset);
9543+
9544+
// Devirtualization may change which method gets invoked. Update our local cache.
9545+
//
9546+
methHnd = callInfo->hMethod;
95439547
}
95449548

95459549
if (impIsThis(obj))
@@ -11803,9 +11807,21 @@ void Compiler::impImportBlockCode(BasicBlock* block)
1180311807

1180411808
#ifdef FEATURE_ON_STACK_REPLACEMENT
1180511809

11806-
// Is OSR enabled?
11810+
bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0);
11811+
11812+
#ifdef DEBUG
11813+
11814+
// Optionally suppress patchpoints by method hash
1180711815
//
11808-
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
11816+
static ConfigMethodRange JitEnablePatchpointRange;
11817+
JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange());
11818+
const unsigned hash = impInlineRoot()->info.compMethodHash();
11819+
const bool inRange = JitEnablePatchpointRange.Contains(hash);
11820+
enablePatchpoints &= inRange;
11821+
11822+
#endif // DEBUG
11823+
11824+
if (enablePatchpoints)
1180911825
{
1181011826
// We don't inline at Tier0, if we do, we may need rethink our approach.
1181111827
// Could probably support inlines that don't introduce flow.
@@ -11820,13 +11836,13 @@ void Compiler::impImportBlockCode(BasicBlock* block)
1182011836
//
1182111837
if (!compTailPrefixSeen)
1182211838
{
11823-
assert(compCanHavePatchpoints());
11824-
1182511839
// The normaly policy is only to add patchpoints to the targets of lexically
1182611840
// backwards branches.
1182711841
//
1182811842
if (compHasBackwardJump)
1182911843
{
11844+
assert(compCanHavePatchpoints());
11845+
1183011846
// Is the start of this block a suitable patchpoint?
1183111847
//
1183211848
if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0))
@@ -11857,8 +11873,8 @@ void Compiler::impImportBlockCode(BasicBlock* block)
1185711873
const bool tryOffsetOSR = offsetOSR >= 0;
1185811874
const bool tryRandomOSR = randomOSR > 0;
1185911875

11860-
if ((tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() &&
11861-
((block->bbFlags & BBF_PATCHPOINT) == 0))
11876+
if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) &&
11877+
!block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0))
1186211878
{
1186311879
// Block start can have a patchpoint. See if we should add one.
1186411880
//

src/coreclr/jit/jitconfigvalues.h

+10-2
Original file line numberDiff line numberDiff line change
@@ -471,8 +471,16 @@ CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1
471471
#endif // debug
472472

473473
#if defined(DEBUG)
474-
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // Enable osr for only some methods
475-
#endif // debug
474+
// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape
475+
// from Tier0 code. Methods outside the range that would normally be jitted at Tier0
476+
// and have patchpoints will instead be switched to optimized.
477+
CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange"))
478+
// EnablePatchpointRange allows you to limit the set of Tier0 methods that
479+
// will have patchpoints, and hence control which methods will create OSR methods.
480+
// Unlike EnableOsrRange, it will not alter the optimization setting for methods
481+
// outside the enabled range.
482+
CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange"))
483+
#endif
476484

477485
// Profile instrumentation options
478486
CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1)

src/coreclr/jit/phase.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,11 @@ void Phase::PostPhase(PhaseStatus status)
167167
// clang-format off
168168

169169
static Phases s_allowlist[] = {
170+
PHASE_INCPROFILE,
171+
PHASE_IBCPREP,
170172
PHASE_IMPORTATION,
173+
PHASE_PATCHPOINTS,
171174
PHASE_IBCINSTR,
172-
PHASE_IBCPREP,
173-
PHASE_INCPROFILE,
174175
PHASE_INDXCALL,
175176
PHASE_MORPH_INLINE,
176177
PHASE_ALLOCATE_OBJECTS,

0 commit comments

Comments
 (0)