-
Notifications
You must be signed in to change notification settings - Fork 4.8k
/
flowgraph.cpp
4132 lines (3517 loc) · 141 KB
/
flowgraph.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
// Flowgraph Miscellany
//------------------------------------------------------------------------
// blockNeedsGCPoll: Determine whether the block needs GC poll inserted
//
// Arguments:
// block - the block to check
//
// Notes:
// The GC poll may not be required because of optimizations applied earlier
// or because of GC poll done implicitly by regular unmanaged calls.
//
// Returns:
// Whether the GC poll needs to be inserted after the block
//
static bool blockNeedsGCPoll(BasicBlock* block)
{
bool blockMayNeedGCPoll = false;
for (Statement* const stmt : block->NonPhiStatements())
{
if ((stmt->GetRootNode()->gtFlags & GTF_CALL) != 0)
{
for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperGet() == GT_CALL)
{
GenTreeCall* call = tree->AsCall();
if (call->IsUnmanaged())
{
if (!call->IsSuppressGCTransition())
{
// If the block contains regular unmanaged call, we can depend on it
// to poll for GC. No need to scan further.
return false;
}
blockMayNeedGCPoll = true;
}
}
}
}
}
return blockMayNeedGCPoll;
}
//------------------------------------------------------------------------------
// fgInsertGCPolls : Insert GC polls for basic blocks containing calls to methods
// with SuppressGCTransitionAttribute.
//
// Notes:
// When not optimizing, the method relies on BBF_HAS_SUPPRESSGC_CALL flag to
// find the basic blocks that require GC polls; when optimizing the tree nodes
// are scanned to find calls to methods with SuppressGCTransitionAttribute.
//
// This must be done after any transformations that would add control flow between
// calls.
//
// Returns:
// PhaseStatus indicating what, if anything, was changed.
//
PhaseStatus Compiler::fgInsertGCPolls()
{
PhaseStatus result = PhaseStatus::MODIFIED_NOTHING;
if ((optMethodFlags & OMF_NEEDS_GCPOLLS) == 0)
{
return result;
}
bool createdPollBlocks = false;
// Walk through the blocks and hunt for a block that needs a GC Poll
//
for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->Next())
{
compCurBB = block;
// When optimizations are enabled, we can't rely on BBF_HAS_SUPPRESSGC_CALL flag:
// the call could've been moved, e.g., hoisted from a loop, CSE'd, etc.
if (opts.OptimizationDisabled() ? ((block->bbFlags & BBF_HAS_SUPPRESSGC_CALL) == 0) : !blockNeedsGCPoll(block))
{
continue;
}
result = PhaseStatus::MODIFIED_EVERYTHING;
// This block needs a GC poll. We either just insert a callout or we split the block and inline part of
// the test.
// If we're doing GCPOLL_CALL, just insert a GT_CALL node before the last node in the block.
assert(block->KindIs(BBJ_RETURN, BBJ_ALWAYS, BBJ_COND, BBJ_SWITCH, BBJ_NONE, BBJ_THROW, BBJ_CALLFINALLY));
GCPollType pollType = GCPOLL_INLINE;
// We'd like to insert an inline poll. Below is the list of places where we
// can't or don't want to emit an inline poll. Check all of those. If after all of that we still
// have INLINE, then emit an inline check.
if (opts.OptimizationDisabled())
{
// Don't split blocks and create inlined polls unless we're optimizing.
//
JITDUMP("Selecting CALL poll in block " FMT_BB " because of debug/minopts\n", block->bbNum);
pollType = GCPOLL_CALL;
}
else if (genReturnBB == block)
{
// we don't want to split the single return block
//
JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum);
pollType = GCPOLL_CALL;
}
else if (BBJ_SWITCH == block->GetJumpKind())
{
// We don't want to deal with all the outgoing edges of a switch block.
//
JITDUMP("Selecting CALL poll in block " FMT_BB " because it is a SWITCH block\n", block->bbNum);
pollType = GCPOLL_CALL;
}
else if ((block->bbFlags & BBF_COLD) != 0)
{
// We don't want to split a cold block.
//
JITDUMP("Selecting CALL poll in block " FMT_BB " because it is a cold block\n", block->bbNum);
pollType = GCPOLL_CALL;
}
BasicBlock* curBasicBlock = fgCreateGCPoll(pollType, block);
createdPollBlocks |= (block != curBasicBlock);
block = curBasicBlock;
}
// If we split a block to create a GC Poll, then rerun fgReorderBlocks to push the rarely run blocks out
// past the epilog. We should never split blocks unless we're optimizing.
if (createdPollBlocks)
{
noway_assert(opts.OptimizationEnabled());
fgReorderBlocks(/* useProfileData */ false);
fgUpdateChangedFlowGraph(FlowGraphUpdates::COMPUTE_BASICS);
}
return result;
}
//------------------------------------------------------------------------------
// fgCreateGCPoll : Insert a GC poll of the specified type for the given basic block.
//
// Arguments:
// pollType - The type of GC poll to insert
// block - Basic block to insert the poll for
//
// Return Value:
// If new basic blocks are inserted, the last inserted block; otherwise, the input block.
//
BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block)
{
bool createdPollBlocks;
void* addrTrap;
void* pAddrOfCaptureThreadGlobal;
addrTrap = info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal);
// If the trap and address of thread global are null, make the call.
if (addrTrap == nullptr && pAddrOfCaptureThreadGlobal == nullptr)
{
pollType = GCPOLL_CALL;
}
// Create the GC_CALL node
GenTree* call = gtNewHelperCallNode(CORINFO_HELP_POLL_GC, TYP_VOID);
call = fgMorphCall(call->AsCall());
gtSetEvalOrder(call);
BasicBlock* bottom = nullptr;
if (pollType == GCPOLL_CALL)
{
createdPollBlocks = false;
Statement* newStmt = nullptr;
if (block->KindIs(BBJ_ALWAYS, BBJ_CALLFINALLY, BBJ_NONE))
{
// For BBJ_ALWAYS, BBJ_CALLFINALLY, and BBJ_NONE we don't need to insert it before the condition.
// Just append it.
newStmt = fgNewStmtAtEnd(block, call);
}
else
{
newStmt = fgNewStmtNearEnd(block, call);
// For DDB156656, we need to associate the GC Poll with the IL offset (and therefore sequence
// point) of the tree before which we inserted the poll. One example of when this is a
// problem:
// if (...) { //1
// ...
// } //2
// else { //3
// ...
// }
// (gcpoll) //4
// return. //5
//
// If we take the if statement at 1, we encounter a jump at 2. This jumps over the else
// and lands at 4. 4 is where we inserted the gcpoll. However, that is associated with
// the sequence point a 3. Therefore, the debugger displays the wrong source line at the
// gc poll location.
//
// More formally, if control flow targets an instruction, that instruction must be the
// start of a new sequence point.
Statement* nextStmt = newStmt->GetNextStmt();
if (nextStmt != nullptr)
{
// Is it possible for gtNextStmt to be NULL?
newStmt->SetDebugInfo(nextStmt->GetDebugInfo());
}
}
if (fgNodeThreading != NodeThreading::None)
{
gtSetStmtInfo(newStmt);
fgSetStmtSeq(newStmt);
}
block->bbFlags |= BBF_GC_SAFE_POINT;
#ifdef DEBUG
if (verbose)
{
printf("*** creating GC Poll in block " FMT_BB "\n", block->bbNum);
gtDispBlockStmts(block);
}
#endif // DEBUG
}
else // GCPOLL_INLINE
{
assert(pollType == GCPOLL_INLINE);
createdPollBlocks = true;
// if we're doing GCPOLL_INLINE, then:
// 1) Create two new blocks: Poll and Bottom. The original block is called Top.
// I want to create:
// top -> poll -> bottom (lexically)
// so that we jump over poll to get to bottom.
BasicBlock* top = block;
BasicBlock* topFallThrough = nullptr;
unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP;
if (top->KindIs(BBJ_COND))
{
topFallThrough = top->Next();
lpIndexFallThrough = topFallThrough->bbNatLoopNum;
}
BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true);
bottom = fgNewBBafter(top->GetJumpKind(), poll, true);
BBjumpKinds oldJumpKind = top->GetJumpKind();
unsigned char lpIndex = top->bbNatLoopNum;
// Update block flags
const BasicBlockFlags originalFlags = top->bbFlags | BBF_GC_SAFE_POINT;
// We are allowed to split loops and we need to keep a few other flags...
//
noway_assert((originalFlags & (BBF_SPLIT_NONEXIST &
~(BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_LOOP_PREHEADER |
BBF_RETLESS_CALL))) == 0);
top->bbFlags = originalFlags & (~(BBF_SPLIT_LOST | BBF_LOOP_PREHEADER | BBF_RETLESS_CALL) | BBF_GC_SAFE_POINT);
bottom->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT | BBF_LOOP_PREHEADER |
BBF_RETLESS_CALL);
bottom->inheritWeight(top);
poll->bbFlags |= originalFlags & (BBF_SPLIT_GAINED | BBF_IMPORTED | BBF_GC_SAFE_POINT);
// Mark Poll as rarely run.
poll->bbSetRunRarely();
poll->bbNatLoopNum = lpIndex; // Set the bbNatLoopNum in case we are in a loop
// Bottom gets all the outgoing edges and inherited flags of Original.
bottom->SetJumpDest(top->GetJumpDest());
bottom->bbNatLoopNum = lpIndex; // Set the bbNatLoopNum in case we are in a loop
if (lpIndex != BasicBlock::NOT_IN_LOOP)
{
// Set the new lpBottom in the natural loop table
optLoopTable[lpIndex].lpBottom = bottom;
}
if (lpIndexFallThrough != BasicBlock::NOT_IN_LOOP)
{
// Set the new lpHead in the natural loop table
optLoopTable[lpIndexFallThrough].lpHead = bottom;
}
// Add the GC_CALL node to Poll.
Statement* pollStmt = fgNewStmtAtEnd(poll, call);
if (fgNodeThreading != NodeThreading::None)
{
gtSetStmtInfo(pollStmt);
fgSetStmtSeq(pollStmt);
}
// Remove the last statement from Top and add it to Bottom if necessary.
if ((oldJumpKind == BBJ_COND) || (oldJumpKind == BBJ_RETURN) || (oldJumpKind == BBJ_THROW))
{
Statement* stmt = top->firstStmt();
while (stmt->GetNextStmt() != nullptr)
{
stmt = stmt->GetNextStmt();
}
fgRemoveStmt(top, stmt);
fgInsertStmtAtEnd(bottom, stmt);
}
// for BBJ_ALWAYS blocks, bottom is an empty block.
// Create a GT_EQ node that checks against g_TrapReturningThreads. True jumps to Bottom,
// false falls through to poll. Add this to the end of Top. Top is now BBJ_COND. Bottom is
// now a jump target
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef ENABLE_FAST_GCPOLL_HELPER
// Prefer the fast gc poll helepr over the double indirection
noway_assert(pAddrOfCaptureThreadGlobal == nullptr);
#endif
GenTree* value; // The value of g_TrapReturningThreads
if (pAddrOfCaptureThreadGlobal != nullptr)
{
// Use a double indirection
GenTree* addr =
gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pAddrOfCaptureThreadGlobal, GTF_ICON_CONST_PTR, true);
value = gtNewIndir(TYP_INT, addr, GTF_IND_NONFAULTING);
}
else
{
// Use a single indirection
value = gtNewIndOfIconHandleNode(TYP_INT, (size_t)addrTrap, GTF_ICON_GLOBAL_PTR, false);
}
// NOTE: in c++ an equivalent load is done via LoadWithoutBarrier() to ensure that the
// program order is preserved. (not hoisted out of a loop or cached in a local, for example)
//
// Here we introduce the read really late after all major optimizations are done, and the location
// is formally unknown, so noone could optimize the load, thus no special flags are needed.
// Compare for equal to zero
GenTree* trapRelop = gtNewOperNode(GT_EQ, TYP_INT, value, gtNewIconNode(0, TYP_INT));
trapRelop->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE;
GenTree* trapCheck = gtNewOperNode(GT_JTRUE, TYP_VOID, trapRelop);
gtSetEvalOrder(trapCheck);
Statement* trapCheckStmt = fgNewStmtAtEnd(top, trapCheck);
if (fgNodeThreading != NodeThreading::None)
{
gtSetStmtInfo(trapCheckStmt);
fgSetStmtSeq(trapCheckStmt);
}
#ifdef DEBUG
if (verbose)
{
printf("Adding trapCheck in " FMT_BB "\n", top->bbNum);
gtDispTree(trapCheck);
}
#endif
top->SetJumpKindAndTarget(BBJ_COND, bottom);
// Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor.
fgAddRefPred(bottom, poll);
fgAddRefPred(bottom, top);
fgAddRefPred(poll, top);
// Replace Top with Bottom in the predecessor list of all outgoing edges from Bottom
// (1 for unconditional branches, 2 for conditional branches, N for switches).
switch (oldJumpKind)
{
case BBJ_NONE:
fgReplacePred(bottom->Next(), top, bottom);
break;
case BBJ_RETURN:
case BBJ_THROW:
// no successors
break;
case BBJ_COND:
// replace predecessor in the fall through block.
noway_assert(!bottom->IsLast());
fgReplacePred(bottom->Next(), top, bottom);
// fall through for the jump target
FALLTHROUGH;
case BBJ_ALWAYS:
case BBJ_CALLFINALLY:
fgReplacePred(bottom->GetJumpDest(), top, bottom);
break;
case BBJ_SWITCH:
NO_WAY("SWITCH should be a call rather than an inlined poll.");
break;
default:
NO_WAY("Unknown block type for updating predecessor lists.");
}
if (compCurBB == top)
{
compCurBB = bottom;
}
#ifdef DEBUG
if (verbose)
{
printf("*** creating inlined GC Poll in top block " FMT_BB "\n", top->bbNum);
gtDispBlockStmts(top);
printf(" poll block is " FMT_BB "\n", poll->bbNum);
gtDispBlockStmts(poll);
printf(" bottom block is " FMT_BB "\n", bottom->bbNum);
gtDispBlockStmts(bottom);
printf("\nAfter this change in fgCreateGCPoll the BB graph is:");
fgDispBasicBlocks(false);
}
#endif // DEBUG
}
return createdPollBlocks ? bottom : block;
}
//------------------------------------------------------------------------
// fgCanSwitchToOptimized: Determines if conditions are met to allow switching the opt level to optimized
//
// Return Value:
// True if the opt level may be switched from tier 0 to optimized, false otherwise
//
// Assumptions:
// - compInitOptions() has been called
// - compSetOptimizationLevel() has not been called
//
// Notes:
// This method is to be called at some point before compSetOptimizationLevel() to determine if the opt level may be
// changed based on information gathered in early phases.
bool Compiler::fgCanSwitchToOptimized()
{
bool result = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT) &&
!opts.compDbgCode && !compIsForInlining();
if (result)
{
// Ensure that it would be safe to change the opt level
assert(opts.compFlags == CLFLG_MINOPT);
assert(!opts.IsMinOptsSet());
}
return result;
}
//------------------------------------------------------------------------
// fgSwitchToOptimized: Switch the opt level from tier 0 to optimized
//
// Arguments:
// reason - reason why opt level was switched
//
// Assumptions:
// - fgCanSwitchToOptimized() is true
// - compSetOptimizationLevel() has not been called
//
// Notes:
// This method is to be called at some point before compSetOptimizationLevel() to switch the opt level to optimized
// based on information gathered in early phases.
void Compiler::fgSwitchToOptimized(const char* reason)
{
assert(fgCanSwitchToOptimized());
// Switch to optimized and re-init options
JITDUMP("****\n**** JIT Tier0 jit request switching to Tier1 because: %s\n****\n", reason);
assert(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0));
opts.jitFlags->Clear(JitFlags::JIT_FLAG_TIER0);
opts.jitFlags->Clear(JitFlags::JIT_FLAG_BBINSTR);
opts.jitFlags->Clear(JitFlags::JIT_FLAG_BBINSTR_IF_LOOPS);
opts.jitFlags->Clear(JitFlags::JIT_FLAG_OSR);
opts.jitFlags->Set(JitFlags::JIT_FLAG_BBOPT);
// Leave a note for jit diagnostics
compSwitchedToOptimized = true;
compInitOptions(opts.jitFlags);
// Notify the VM of the change
info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_SWITCHED_TO_OPTIMIZED);
}
//------------------------------------------------------------------------
// fgMayExplicitTailCall: Estimates conservatively for an explicit tail call, if the importer may actually use a tail
// call.
//
// Return Value:
// - False if a tail call will not be generated
// - True if a tail call *may* be generated
//
// Assumptions:
// - compInitOptions() has been called
// - info.compIsVarArgs has been initialized
// - An explicit tail call has been seen
// - compSetOptimizationLevel() has not been called
bool Compiler::fgMayExplicitTailCall()
{
assert(!compIsForInlining());
if (info.compFlags & CORINFO_FLG_SYNCH)
{
// Caller is synchronized
return false;
}
if (opts.IsReversePInvoke())
{
// Reverse P/Invoke
return false;
}
#if !FEATURE_FIXED_OUT_ARGS
if (info.compIsVarArgs)
{
// Caller is varargs
return false;
}
#endif // FEATURE_FIXED_OUT_ARGS
return true;
}
//------------------------------------------------------------------------
// fgFindJumpTargets: walk the IL stream, determining jump target offsets
//
// Arguments:
// codeAddr - base address of the IL code buffer
// codeSize - number of bytes in the IL code buffer
// jumpTarget - [OUT] bit vector for flagging jump targets
//
// Notes:
// If inlining or prejitting the root, this method also makes
// various observations about the method that factor into inline
// decisions.
//
// May throw an exception if the IL is malformed.
//
// jumpTarget[N] is set to 1 if IL offset N is a jump target in the method.
//
// Also sets m_addrExposed and lvHasILStoreOp, ilHasMultipleILStoreOp in lvaTable[].
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
//------------------------------------------------------------------------
// fgImport: read the IL for the method and create jit IR
//
// Returns:
// phase status
//
PhaseStatus Compiler::fgImport()
{
impImport();
// Estimate how much of method IL was actually imported.
//
// Note this includes (to some extent) the impact of importer folded
// branches, provided the folded tree covered the entire block's IL.
unsigned importedILSize = 0;
for (BasicBlock* const block : Blocks())
{
if ((block->bbFlags & BBF_IMPORTED) != 0)
{
// Assume if we generate any IR for the block we generate IR for the entire block.
if (block->firstStmt() != nullptr)
{
IL_OFFSET beginOffset = block->bbCodeOffs;
IL_OFFSET endOffset = block->bbCodeOffsEnd;
if ((beginOffset != BAD_IL_OFFSET) && (endOffset != BAD_IL_OFFSET) && (endOffset > beginOffset))
{
unsigned blockILSize = endOffset - beginOffset;
importedILSize += blockILSize;
}
}
}
}
// Could be tripped up if we ever duplicate blocks
assert(importedILSize <= info.compILCodeSize);
// Leave a note if we only did a partial import.
if (importedILSize != info.compILCodeSize)
{
JITDUMP("\n** Note: %s IL was partially imported -- imported %u of %u bytes of method IL\n",
compIsForInlining() ? "inlinee" : "root method", importedILSize, info.compILCodeSize);
}
// Record this for diagnostics and for the inliner's budget computations
info.compILImportSize = importedILSize;
if (compIsForInlining())
{
compInlineResult->SetImportedILSize(info.compILImportSize);
}
return PhaseStatus::MODIFIED_EVERYTHING;
}
/*****************************************************************************
* This function returns true if tree is a node with a call
* that unconditionally throws an exception
*/
bool Compiler::fgIsThrow(GenTree* tree)
{
if (!tree->IsCall())
{
return false;
}
GenTreeCall* call = tree->AsCall();
if ((call->gtCallType == CT_HELPER) && s_helperCallProperties.AlwaysThrow(eeGetHelperNum(call->gtCallMethHnd)))
{
noway_assert(call->gtFlags & GTF_EXCEPT);
return true;
}
return false;
}
/*****************************************************************************
* This function returns true for blocks that are in different hot-cold regions.
* It returns false when the blocks are both in the same regions
*/
bool Compiler::fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2)
{
noway_assert(blk1 != nullptr);
noway_assert(blk2 != nullptr);
if (fgFirstColdBlock == nullptr)
{
return false;
}
// If one block is Hot and the other is Cold then we are in different regions
return ((blk1->bbFlags & BBF_COLD) != (blk2->bbFlags & BBF_COLD));
}
bool Compiler::fgIsBlockCold(BasicBlock* blk)
{
noway_assert(blk != nullptr);
if (fgFirstColdBlock == nullptr)
{
return false;
}
return ((blk->bbFlags & BBF_COLD) != 0);
}
/*****************************************************************************
* This function returns true if tree is a GT_COMMA node with a call
* that unconditionally throws an exception
*/
bool Compiler::fgIsCommaThrow(GenTree* tree, bool forFolding /* = false */)
{
// Instead of always folding comma throws,
// with stress enabled we only fold half the time
if (forFolding && compStressCompile(STRESS_FOLD, 50))
{
return false; /* Don't fold */
}
/* Check for cast of a GT_COMMA with a throw overflow */
if ((tree->gtOper == GT_COMMA) && (tree->gtFlags & GTF_CALL) && (tree->gtFlags & GTF_EXCEPT))
{
return (fgIsThrow(tree->AsOp()->gtOp1));
}
return false;
}
//------------------------------------------------------------------------
// fgGetStaticsCCtorHelper: Creates a BasicBlock from the `tree` node.
//
// Arguments:
// cls - The class handle
// helper - The helper function
// typeIndex - The static block type index. Used only for
// CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED or
// CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED to cache
// the static block in an array at index typeIndex.
//
// Return Value:
// The call node corresponding to the helper
GenTreeCall* Compiler::fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper, uint32_t typeIndex)
{
bool bNeedClassID = true;
GenTreeFlags callFlags = GTF_EMPTY;
var_types type = TYP_BYREF;
// This is sort of ugly, as we have knowledge of what the helper is returning.
// We need the return type.
switch (helper)
{
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED:
bNeedClassID = false;
FALLTHROUGH;
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
callFlags |= GTF_CALL_HOISTABLE;
FALLTHROUGH;
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
// type = TYP_BYREF;
break;
case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED:
case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
bNeedClassID = false;
FALLTHROUGH;
case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
callFlags |= GTF_CALL_HOISTABLE;
FALLTHROUGH;
case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
type = TYP_I_IMPL;
break;
default:
assert(!"unknown shared statics helper");
break;
}
GenTree* opModuleIDArg;
GenTree* opClassIDArg;
// Get the class ID
unsigned clsID;
size_t moduleID;
void* pclsID;
void* pmoduleID;
clsID = info.compCompHnd->getClassDomainID(cls, &pclsID);
moduleID = info.compCompHnd->getClassModuleIdForStatics(cls, nullptr, &pmoduleID);
if (!(callFlags & GTF_CALL_HOISTABLE))
{
if (info.compCompHnd->getClassAttribs(cls) & CORINFO_FLG_BEFOREFIELDINIT)
{
callFlags |= GTF_CALL_HOISTABLE;
}
}
if (pmoduleID)
{
opModuleIDArg = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pmoduleID, GTF_ICON_CIDMID_HDL, true);
}
else
{
opModuleIDArg = gtNewIconNode((size_t)moduleID, TYP_I_IMPL);
}
GenTreeCall* result;
if (bNeedClassID)
{
if (pclsID)
{
opClassIDArg = gtNewIndOfIconHandleNode(TYP_INT, (size_t)pclsID, GTF_ICON_CIDMID_HDL, true);
}
else
{
opClassIDArg = gtNewIconNode(clsID, TYP_INT);
}
result = gtNewHelperCallNode(helper, type, opModuleIDArg, opClassIDArg);
}
else if ((helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED) ||
(helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR_OPTIMIZED))
{
result = gtNewHelperCallNode(helper, type, gtNewIconNode(typeIndex));
result->SetExpTLSFieldAccess();
}
else
{
result = gtNewHelperCallNode(helper, type, opModuleIDArg);
}
if (IsStaticHelperEligibleForExpansion(result))
{
// Keep class handle attached to the helper call since it's difficult to restore it.
result->gtInitClsHnd = cls;
}
result->gtFlags |= callFlags;
// If we're importing the special EqualityComparer<T>.Default or Comparer<T>.Default
// intrinsics, flag the helper call. Later during inlining, we can
// remove the helper call if the associated field lookup is unused.
if ((info.compFlags & CORINFO_FLG_INTRINSIC) != 0)
{
NamedIntrinsic ni = lookupNamedIntrinsic(info.compMethodHnd);
if ((ni == NI_System_Collections_Generic_EqualityComparer_get_Default) ||
(ni == NI_System_Collections_Generic_Comparer_get_Default))
{
JITDUMP("\nmarking helper call [%06u] as special dce...\n", result->gtTreeID);
result->gtCallMoreFlags |= GTF_CALL_M_HELPER_SPECIAL_DCE;
}
}
return result;
}
//------------------------------------------------------------------------------
// fgSetPreferredInitCctor: Set CORINFO_HELP_READYTORUN_NONGCSTATIC_BASE as the
// preferred call constructure if it is undefined.
//
void Compiler::fgSetPreferredInitCctor()
{
if (m_preferredInitCctor == CORINFO_HELP_UNDEF)
{
// This is the cheapest helper that triggers the constructor.
m_preferredInitCctor = CORINFO_HELP_READYTORUN_NONGCSTATIC_BASE;
}
}
GenTreeCall* Compiler::fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls)
{
#ifdef FEATURE_READYTORUN
if (opts.IsReadyToRun())
{
CORINFO_RESOLVED_TOKEN resolvedToken;
memset(&resolvedToken, 0, sizeof(resolvedToken));
resolvedToken.hClass = cls;
fgSetPreferredInitCctor();
return impReadyToRunHelperToTree(&resolvedToken, m_preferredInitCctor, TYP_BYREF);
}
#endif
// Call the shared non gc static helper, as its the fastest
return fgGetStaticsCCtorHelper(cls, info.compCompHnd->getSharedCCtorHelper(cls));
}
//------------------------------------------------------------------------------
// fgAddrCouldBeNull : Check whether the address tree can represent null.
//
// Arguments:
// addr - Address to check
//
// Return Value:
// True if address could be null; false otherwise
//
bool Compiler::fgAddrCouldBeNull(GenTree* addr)
{
switch (addr->OperGet())
{
case GT_CNS_INT:
return !addr->IsIconHandle();
case GT_CNS_STR:
case GT_FIELD_ADDR:
case GT_LCL_ADDR:
case GT_CLS_VAR_ADDR:
return false;
case GT_IND:
return (addr->gtFlags & GTF_IND_NONNULL) == 0;
case GT_INDEX_ADDR:
return !addr->AsIndexAddr()->IsNotNull();
case GT_ARR_ADDR:
return (addr->gtFlags & GTF_ARR_ADDR_NONNULL) == 0;
case GT_LCL_VAR:
return !lvaIsImplicitByRefLocal(addr->AsLclVar()->GetLclNum());
case GT_COMMA:
return fgAddrCouldBeNull(addr->AsOp()->gtOp2);
case GT_CALL:
return !addr->IsHelperCall() || !s_helperCallProperties.NonNullReturn(addr->AsCall()->GetHelperNum());
case GT_ADD:
if (addr->AsOp()->gtOp1->gtOper == GT_CNS_INT)
{
GenTree* cns1Tree = addr->AsOp()->gtOp1;
if (!cns1Tree->IsIconHandle())
{
if (!fgIsBigOffset(cns1Tree->AsIntCon()->gtIconVal))
{
// Op1 was an ordinary small constant
return fgAddrCouldBeNull(addr->AsOp()->gtOp2);
}
}
else // Op1 was a handle represented as a constant
{
// Is Op2 also a constant?
if (addr->AsOp()->gtOp2->gtOper == GT_CNS_INT)
{
GenTree* cns2Tree = addr->AsOp()->gtOp2;
// Is this an addition of a handle and constant
if (!cns2Tree->IsIconHandle())
{
if (!fgIsBigOffset(cns2Tree->AsIntCon()->gtIconVal))
{
// Op2 was an ordinary small constant
return false; // we can't have a null address
}
}
}
}
}
else
{
// Op1 is not a constant. What about Op2?
if (addr->AsOp()->gtOp2->gtOper == GT_CNS_INT)
{
GenTree* cns2Tree = addr->AsOp()->gtOp2;
// Is this an addition of a small constant
if (!cns2Tree->IsIconHandle())
{
if (!fgIsBigOffset(cns2Tree->AsIntCon()->gtIconVal))
{
// Op2 was an ordinary small constant
return fgAddrCouldBeNull(addr->AsOp()->gtOp1);
}
}
}
}
break;
default:
break;
}
return true; // default result: addr could be null.
}
//------------------------------------------------------------------------------
// fgOptimizeDelegateConstructor: try and optimize construction of a delegate
//
// Arguments:
// call -- call to original delegate constructor
// exactContextHnd -- [out] context handle to update
// ldftnToken -- [in] resolved token for the method the delegate will invoke,
// if known, or nullptr if not known
//
// Return Value:
// Original call tree if no optimization applies.
// Updated call tree if optimized.
GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call,
CORINFO_CONTEXT_HANDLE* ExactContextHnd,
methodPointerInfo* ldftnToken)
{
JITDUMP("\nfgOptimizeDelegateConstructor: ");
noway_assert(call->gtCallType == CT_USER_FUNC);
CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getMethodClass(methHnd);
assert(call->gtArgs.HasThisPointer());
assert(call->gtArgs.CountArgs() == 3);
assert(!call->gtArgs.AreArgsComplete());
GenTree* targetMethod = call->gtArgs.GetArgByIndex(2)->GetNode();
noway_assert(targetMethod->TypeGet() == TYP_I_IMPL);
genTreeOps oper = targetMethod->OperGet();
CORINFO_METHOD_HANDLE targetMethodHnd = nullptr;
GenTree* qmarkNode = nullptr;
if (oper == GT_FTN_ADDR)
{
GenTreeFptrVal* fptrValTree = targetMethod->AsFptrVal();
fptrValTree->gtFptrDelegateTarget = true;
targetMethodHnd = fptrValTree->gtFptrMethod;
}
else if (oper == GT_CALL && targetMethod->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR))
{
assert(targetMethod->AsCall()->gtArgs.CountArgs() == 3);
GenTree* handleNode = targetMethod->AsCall()->gtArgs.GetArgByIndex(2)->GetNode();