@@ -156,61 +156,6 @@ CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* sl
156
156
return call;
157
157
}
158
158
159
- void PhaseMacroExpand::extract_call_projections (CallNode *call) {
160
- _fallthroughproj = NULL ;
161
- _fallthroughcatchproj = NULL ;
162
- _ioproj_fallthrough = NULL ;
163
- _ioproj_catchall = NULL ;
164
- _catchallcatchproj = NULL ;
165
- _memproj_fallthrough = NULL ;
166
- _memproj_catchall = NULL ;
167
- _resproj = NULL ;
168
- for (DUIterator_Fast imax, i = call->fast_outs (imax); i < imax; i++) {
169
- ProjNode *pn = call->fast_out (i)->as_Proj ();
170
- switch (pn->_con ) {
171
- case TypeFunc::Control:
172
- {
173
- // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
174
- _fallthroughproj = pn;
175
- DUIterator_Fast jmax, j = pn->fast_outs (jmax);
176
- const Node *cn = pn->fast_out (j);
177
- if (cn->is_Catch ()) {
178
- ProjNode *cpn = NULL ;
179
- for (DUIterator_Fast kmax, k = cn->fast_outs (kmax); k < kmax; k++) {
180
- cpn = cn->fast_out (k)->as_Proj ();
181
- assert (cpn->is_CatchProj (), " must be a CatchProjNode" );
182
- if (cpn->_con == CatchProjNode::fall_through_index)
183
- _fallthroughcatchproj = cpn;
184
- else {
185
- assert (cpn->_con == CatchProjNode::catch_all_index, " must be correct index." );
186
- _catchallcatchproj = cpn;
187
- }
188
- }
189
- }
190
- break ;
191
- }
192
- case TypeFunc::I_O:
193
- if (pn->_is_io_use )
194
- _ioproj_catchall = pn;
195
- else
196
- _ioproj_fallthrough = pn;
197
- break ;
198
- case TypeFunc::Memory:
199
- if (pn->_is_io_use )
200
- _memproj_catchall = pn;
201
- else
202
- _memproj_fallthrough = pn;
203
- break ;
204
- case TypeFunc::Parms:
205
- _resproj = pn;
206
- break ;
207
- default :
208
- assert (false , " unexpected projection from allocation node." );
209
- }
210
- }
211
-
212
- }
213
-
214
159
void PhaseMacroExpand::eliminate_gc_barrier (Node* p2x) {
215
160
BarrierSetC2 *bs = BarrierSet::barrier_set ()->barrier_set_c2 ();
216
161
bs->eliminate_gc_barrier (this , p2x);
@@ -992,21 +937,21 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
992
937
//
993
938
// Process other users of allocation's projections
994
939
//
995
- if (_resproj != NULL && _resproj ->outcnt () != 0 ) {
940
+ if (_callprojs. resproj != NULL && _callprojs. resproj ->outcnt () != 0 ) {
996
941
// First disconnect stores captured by Initialize node.
997
942
// If Initialize node is eliminated first in the following code,
998
943
// it will kill such stores and DUIterator_Last will assert.
999
- for (DUIterator_Fast jmax, j = _resproj ->fast_outs (jmax); j < jmax; j++) {
1000
- Node * use = _resproj ->fast_out (j);
944
+ for (DUIterator_Fast jmax, j = _callprojs. resproj ->fast_outs (jmax); j < jmax; j++) {
945
+ Node* use = _callprojs. resproj ->fast_out (j);
1001
946
if (use->is_AddP ()) {
1002
947
// raw memory addresses used only by the initialization
1003
948
_igvn.replace_node (use, C->top ());
1004
949
--j; --jmax;
1005
950
}
1006
951
}
1007
- for (DUIterator_Last jmin, j = _resproj ->last_outs (jmin); j >= jmin; ) {
1008
- Node * use = _resproj ->last_out (j);
1009
- uint oc1 = _resproj ->outcnt ();
952
+ for (DUIterator_Last jmin, j = _callprojs. resproj ->last_outs (jmin); j >= jmin; ) {
953
+ Node* use = _callprojs. resproj ->last_out (j);
954
+ uint oc1 = _callprojs. resproj ->outcnt ();
1010
955
if (use->is_Initialize ()) {
1011
956
// Eliminate Initialize node.
1012
957
InitializeNode *init = use->as_Initialize ();
@@ -1016,44 +961,44 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
1016
961
_igvn.replace_node (ctrl_proj, init->in (TypeFunc::Control));
1017
962
#ifdef ASSERT
1018
963
Node* tmp = init->in (TypeFunc::Control);
1019
- assert (tmp == _fallthroughcatchproj , " allocation control projection" );
964
+ assert (tmp == _callprojs. fallthrough_catchproj , " allocation control projection" );
1020
965
#endif
1021
966
}
1022
967
Node *mem_proj = init->proj_out_or_null (TypeFunc::Memory);
1023
968
if (mem_proj != NULL ) {
1024
969
Node *mem = init->in (TypeFunc::Memory);
1025
970
#ifdef ASSERT
1026
971
if (mem->is_MergeMem ()) {
1027
- assert (mem->in (TypeFunc::Memory) == _memproj_fallthrough , " allocation memory projection" );
972
+ assert (mem->in (TypeFunc::Memory) == _callprojs. fallthrough_memproj , " allocation memory projection" );
1028
973
} else {
1029
- assert (mem == _memproj_fallthrough , " allocation memory projection" );
974
+ assert (mem == _callprojs. fallthrough_memproj , " allocation memory projection" );
1030
975
}
1031
976
#endif
1032
977
_igvn.replace_node (mem_proj, mem);
1033
978
}
1034
979
} else {
1035
980
assert (false , " only Initialize or AddP expected" );
1036
981
}
1037
- j -= (oc1 - _resproj ->outcnt ());
982
+ j -= (oc1 - _callprojs. resproj ->outcnt ());
1038
983
}
1039
984
}
1040
- if (_fallthroughcatchproj != NULL ) {
1041
- _igvn.replace_node (_fallthroughcatchproj , alloc->in (TypeFunc::Control));
985
+ if (_callprojs. fallthrough_catchproj != NULL ) {
986
+ _igvn.replace_node (_callprojs. fallthrough_catchproj , alloc->in (TypeFunc::Control));
1042
987
}
1043
- if (_memproj_fallthrough != NULL ) {
1044
- _igvn.replace_node (_memproj_fallthrough , alloc->in (TypeFunc::Memory));
988
+ if (_callprojs. fallthrough_memproj != NULL ) {
989
+ _igvn.replace_node (_callprojs. fallthrough_memproj , alloc->in (TypeFunc::Memory));
1045
990
}
1046
- if (_memproj_catchall != NULL ) {
1047
- _igvn.replace_node (_memproj_catchall , C->top ());
991
+ if (_callprojs. catchall_memproj != NULL ) {
992
+ _igvn.replace_node (_callprojs. catchall_memproj , C->top ());
1048
993
}
1049
- if (_ioproj_fallthrough != NULL ) {
1050
- _igvn.replace_node (_ioproj_fallthrough , alloc->in (TypeFunc::I_O));
994
+ if (_callprojs. fallthrough_ioproj != NULL ) {
995
+ _igvn.replace_node (_callprojs. fallthrough_ioproj , alloc->in (TypeFunc::I_O));
1051
996
}
1052
- if (_ioproj_catchall != NULL ) {
1053
- _igvn.replace_node (_ioproj_catchall , C->top ());
997
+ if (_callprojs. catchall_ioproj != NULL ) {
998
+ _igvn.replace_node (_callprojs. catchall_ioproj , C->top ());
1054
999
}
1055
- if (_catchallcatchproj != NULL ) {
1056
- _igvn.replace_node (_catchallcatchproj , C->top ());
1000
+ if (_callprojs. catchall_catchproj != NULL ) {
1001
+ _igvn.replace_node (_callprojs. catchall_catchproj , C->top ());
1057
1002
}
1058
1003
}
1059
1004
@@ -1078,7 +1023,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
1078
1023
return false ;
1079
1024
}
1080
1025
1081
- extract_call_projections ( alloc);
1026
+ alloc-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
1082
1027
1083
1028
GrowableArray <SafePointNode *> safepoints;
1084
1029
if (!can_eliminate_allocation (alloc, safepoints)) {
@@ -1133,7 +1078,7 @@ bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) {
1133
1078
1134
1079
assert (boxing->result_cast () == NULL , " unexpected boxing node result" );
1135
1080
1136
- extract_call_projections ( boxing);
1081
+ boxing-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
1137
1082
1138
1083
const TypeTuple* r = boxing->tf ()->range ();
1139
1084
assert (r->cnt () > TypeFunc::Parms, " sanity" );
@@ -1463,43 +1408,43 @@ void PhaseMacroExpand::expand_allocate_common(
1463
1408
//
1464
1409
// We are interested in the CatchProj nodes.
1465
1410
//
1466
- extract_call_projections ( call);
1411
+ call-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
1467
1412
1468
1413
// An allocate node has separate memory projections for the uses on
1469
1414
// the control and i_o paths. Replace the control memory projection with
1470
1415
// result_phi_rawmem (unless we are only generating a slow call when
1471
1416
// both memory projections are combined)
1472
- if (expand_fast_path && _memproj_fallthrough != NULL ) {
1473
- migrate_outs (_memproj_fallthrough , result_phi_rawmem);
1474
- }
1475
- // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
1476
- // _memproj_catchall so we end up with a call that has only 1 memory projection.
1477
- if (_memproj_catchall != NULL ) {
1478
- if (_memproj_fallthrough == NULL ) {
1479
- _memproj_fallthrough = new ProjNode (call, TypeFunc::Memory);
1480
- transform_later (_memproj_fallthrough );
1417
+ if (expand_fast_path && _callprojs. fallthrough_memproj != NULL ) {
1418
+ migrate_outs (_callprojs. fallthrough_memproj , result_phi_rawmem);
1419
+ }
1420
+ // Now change uses of catchall_memproj to use fallthrough_memproj and delete
1421
+ // catchall_memproj so we end up with a call that has only 1 memory projection.
1422
+ if (_callprojs. catchall_memproj != NULL ) {
1423
+ if (_callprojs. fallthrough_memproj == NULL ) {
1424
+ _callprojs. fallthrough_memproj = new ProjNode (call, TypeFunc::Memory);
1425
+ transform_later (_callprojs. fallthrough_memproj );
1481
1426
}
1482
- migrate_outs (_memproj_catchall, _memproj_fallthrough );
1483
- _igvn.remove_dead_node (_memproj_catchall );
1427
+ migrate_outs (_callprojs. catchall_memproj , _callprojs. fallthrough_memproj );
1428
+ _igvn.remove_dead_node (_callprojs. catchall_memproj );
1484
1429
}
1485
1430
1486
1431
// An allocate node has separate i_o projections for the uses on the control
1487
1432
// and i_o paths. Always replace the control i_o projection with result i_o
1488
1433
// otherwise incoming i_o become dead when only a slow call is generated
1489
1434
// (it is different from memory projections where both projections are
1490
1435
// combined in such case).
1491
- if (_ioproj_fallthrough != NULL ) {
1492
- migrate_outs (_ioproj_fallthrough , result_phi_i_o);
1493
- }
1494
- // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
1495
- // _ioproj_catchall so we end up with a call that has only 1 i_o projection.
1496
- if (_ioproj_catchall != NULL ) {
1497
- if (_ioproj_fallthrough == NULL ) {
1498
- _ioproj_fallthrough = new ProjNode (call, TypeFunc::I_O);
1499
- transform_later (_ioproj_fallthrough );
1436
+ if (_callprojs. fallthrough_ioproj != NULL ) {
1437
+ migrate_outs (_callprojs. fallthrough_ioproj , result_phi_i_o);
1438
+ }
1439
+ // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete
1440
+ // catchall_ioproj so we end up with a call that has only 1 i_o projection.
1441
+ if (_callprojs. catchall_ioproj != NULL ) {
1442
+ if (_callprojs. fallthrough_ioproj == NULL ) {
1443
+ _callprojs. fallthrough_ioproj = new ProjNode (call, TypeFunc::I_O);
1444
+ transform_later (_callprojs. fallthrough_ioproj );
1500
1445
}
1501
- migrate_outs (_ioproj_catchall, _ioproj_fallthrough );
1502
- _igvn.remove_dead_node (_ioproj_catchall );
1446
+ migrate_outs (_callprojs. catchall_ioproj , _callprojs. fallthrough_ioproj );
1447
+ _igvn.remove_dead_node (_callprojs. catchall_ioproj );
1503
1448
}
1504
1449
1505
1450
// if we generated only a slow call, we are done
@@ -1518,21 +1463,21 @@ void PhaseMacroExpand::expand_allocate_common(
1518
1463
return ;
1519
1464
}
1520
1465
1521
- if (_fallthroughcatchproj != NULL ) {
1522
- ctrl = _fallthroughcatchproj ->clone ();
1466
+ if (_callprojs. fallthrough_catchproj != NULL ) {
1467
+ ctrl = _callprojs. fallthrough_catchproj ->clone ();
1523
1468
transform_later (ctrl);
1524
- _igvn.replace_node (_fallthroughcatchproj , result_region);
1469
+ _igvn.replace_node (_callprojs. fallthrough_catchproj , result_region);
1525
1470
} else {
1526
1471
ctrl = top ();
1527
1472
}
1528
1473
Node *slow_result;
1529
- if (_resproj == NULL ) {
1474
+ if (_callprojs. resproj == NULL ) {
1530
1475
// no uses of the allocation result
1531
1476
slow_result = top ();
1532
1477
} else {
1533
- slow_result = _resproj ->clone ();
1478
+ slow_result = _callprojs. resproj ->clone ();
1534
1479
transform_later (slow_result);
1535
- _igvn.replace_node (_resproj , result_phi_rawoop);
1480
+ _igvn.replace_node (_callprojs. resproj , result_phi_rawoop);
1536
1481
}
1537
1482
1538
1483
// Plug slow-path into result merge point
@@ -1542,7 +1487,7 @@ void PhaseMacroExpand::expand_allocate_common(
1542
1487
result_phi_rawoop->init_req (slow_result_path, slow_result);
1543
1488
transform_later (result_phi_rawoop);
1544
1489
}
1545
- result_phi_rawmem->init_req (slow_result_path, _memproj_fallthrough );
1490
+ result_phi_rawmem->init_req (slow_result_path, _callprojs. fallthrough_memproj );
1546
1491
transform_later (result_phi_rawmem);
1547
1492
transform_later (result_phi_i_o);
1548
1493
// This completes all paths into the result merge point
@@ -1554,45 +1499,45 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
1554
1499
Node* mem = alloc->in (TypeFunc::Memory);
1555
1500
Node* i_o = alloc->in (TypeFunc::I_O);
1556
1501
1557
- extract_call_projections ( alloc);
1558
- if (_resproj != NULL ) {
1559
- for (DUIterator_Fast imax, i = _resproj ->fast_outs (imax); i < imax; i++) {
1560
- Node* use = _resproj ->fast_out (i);
1502
+ alloc-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
1503
+ if (_callprojs. resproj != NULL ) {
1504
+ for (DUIterator_Fast imax, i = _callprojs. resproj ->fast_outs (imax); i < imax; i++) {
1505
+ Node* use = _callprojs. resproj ->fast_out (i);
1561
1506
use->isa_MemBar ()->remove (&_igvn);
1562
1507
--imax;
1563
1508
--i; // back up iterator
1564
1509
}
1565
- assert (_resproj ->outcnt () == 0 , " all uses must be deleted" );
1566
- _igvn.remove_dead_node (_resproj );
1510
+ assert (_callprojs. resproj ->outcnt () == 0 , " all uses must be deleted" );
1511
+ _igvn.remove_dead_node (_callprojs. resproj );
1567
1512
}
1568
- if (_fallthroughcatchproj != NULL ) {
1569
- migrate_outs (_fallthroughcatchproj , ctrl);
1570
- _igvn.remove_dead_node (_fallthroughcatchproj );
1513
+ if (_callprojs. fallthrough_catchproj != NULL ) {
1514
+ migrate_outs (_callprojs. fallthrough_catchproj , ctrl);
1515
+ _igvn.remove_dead_node (_callprojs. fallthrough_catchproj );
1571
1516
}
1572
- if (_catchallcatchproj != NULL ) {
1573
- _igvn.rehash_node_delayed (_catchallcatchproj );
1574
- _catchallcatchproj ->set_req (0 , top ());
1517
+ if (_callprojs. catchall_catchproj != NULL ) {
1518
+ _igvn.rehash_node_delayed (_callprojs. catchall_catchproj );
1519
+ _callprojs. catchall_catchproj ->set_req (0 , top ());
1575
1520
}
1576
- if (_fallthroughproj != NULL ) {
1577
- Node* catchnode = _fallthroughproj ->unique_ctrl_out ();
1521
+ if (_callprojs. fallthrough_proj != NULL ) {
1522
+ Node* catchnode = _callprojs. fallthrough_proj ->unique_ctrl_out ();
1578
1523
_igvn.remove_dead_node (catchnode);
1579
- _igvn.remove_dead_node (_fallthroughproj );
1524
+ _igvn.remove_dead_node (_callprojs. fallthrough_proj );
1580
1525
}
1581
- if (_memproj_fallthrough != NULL ) {
1582
- migrate_outs (_memproj_fallthrough , mem);
1583
- _igvn.remove_dead_node (_memproj_fallthrough );
1526
+ if (_callprojs. fallthrough_memproj != NULL ) {
1527
+ migrate_outs (_callprojs. fallthrough_memproj , mem);
1528
+ _igvn.remove_dead_node (_callprojs. fallthrough_memproj );
1584
1529
}
1585
- if (_ioproj_fallthrough != NULL ) {
1586
- migrate_outs (_ioproj_fallthrough , i_o);
1587
- _igvn.remove_dead_node (_ioproj_fallthrough );
1530
+ if (_callprojs. fallthrough_ioproj != NULL ) {
1531
+ migrate_outs (_callprojs. fallthrough_ioproj , i_o);
1532
+ _igvn.remove_dead_node (_callprojs. fallthrough_ioproj );
1588
1533
}
1589
- if (_memproj_catchall != NULL ) {
1590
- _igvn.rehash_node_delayed (_memproj_catchall );
1591
- _memproj_catchall ->set_req (0 , top ());
1534
+ if (_callprojs. catchall_memproj != NULL ) {
1535
+ _igvn.rehash_node_delayed (_callprojs. catchall_memproj );
1536
+ _callprojs. catchall_memproj ->set_req (0 , top ());
1592
1537
}
1593
- if (_ioproj_catchall != NULL ) {
1594
- _igvn.rehash_node_delayed (_ioproj_catchall );
1595
- _ioproj_catchall ->set_req (0 , top ());
1538
+ if (_callprojs. catchall_ioproj != NULL ) {
1539
+ _igvn.rehash_node_delayed (_callprojs. catchall_ioproj );
1540
+ _callprojs. catchall_ioproj ->set_req (0 , top ());
1596
1541
}
1597
1542
#ifndef PRODUCT
1598
1543
if (PrintEliminateAllocations) {
@@ -2151,16 +2096,16 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) {
2151
2096
Node* ctrl = alock->in (TypeFunc::Control);
2152
2097
guarantee (ctrl != NULL , " missing control projection, cannot replace_node() with NULL" );
2153
2098
2154
- extract_call_projections ( alock);
2099
+ alock-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
2155
2100
// There are 2 projections from the lock. The lock node will
2156
2101
// be deleted when its last use is subsumed below.
2157
2102
assert (alock->outcnt () == 2 &&
2158
- _fallthroughproj != NULL &&
2159
- _memproj_fallthrough != NULL ,
2103
+ _callprojs. fallthrough_proj != NULL &&
2104
+ _callprojs. fallthrough_memproj != NULL ,
2160
2105
" Unexpected projections from Lock/Unlock" );
2161
2106
2162
- Node* fallthroughproj = _fallthroughproj ;
2163
- Node* memproj_fallthrough = _memproj_fallthrough ;
2107
+ Node* fallthroughproj = _callprojs. fallthrough_proj ;
2108
+ Node* memproj_fallthrough = _callprojs. fallthrough_memproj ;
2164
2109
2165
2110
// The memory projection from a lock/unlock is RawMem
2166
2111
// The input to a Lock is merged memory, so extract its RawMem input
@@ -2414,31 +2359,31 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
2414
2359
OptoRuntime::complete_monitor_locking_Java (), NULL , slow_path,
2415
2360
obj, box, NULL );
2416
2361
2417
- extract_call_projections ( call);
2362
+ call-> extract_projections (&_callprojs, false /* separate_io_proj */ , false /* do_asserts */ );
2418
2363
2419
2364
// Slow path can only throw asynchronous exceptions, which are always
2420
2365
// de-opted. So the compiler thinks the slow-call can never throw an
2421
2366
// exception. If it DOES throw an exception we would need the debug
2422
2367
// info removed first (since if it throws there is no monitor).
2423
- assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
2424
- _memproj_catchall == NULL && _catchallcatchproj == NULL , " Unexpected projection from Lock" );
2368
+ assert (_callprojs. fallthrough_ioproj == NULL && _callprojs. catchall_ioproj == NULL &&
2369
+ _callprojs. catchall_memproj == NULL && _callprojs. catchall_catchproj == NULL , " Unexpected projection from Lock" );
2425
2370
2426
2371
// Capture slow path
2427
2372
// disconnect fall-through projection from call and create a new one
2428
2373
// hook up users of fall-through projection to region
2429
- Node *slow_ctrl = _fallthroughproj ->clone ();
2374
+ Node *slow_ctrl = _callprojs. fallthrough_proj ->clone ();
2430
2375
transform_later (slow_ctrl);
2431
- _igvn.hash_delete (_fallthroughproj );
2432
- _fallthroughproj ->disconnect_inputs (C);
2376
+ _igvn.hash_delete (_callprojs. fallthrough_proj );
2377
+ _callprojs. fallthrough_proj ->disconnect_inputs (C);
2433
2378
region->init_req (1 , slow_ctrl);
2434
2379
// region inputs are now complete
2435
2380
transform_later (region);
2436
- _igvn.replace_node (_fallthroughproj , region);
2381
+ _igvn.replace_node (_callprojs. fallthrough_proj , region);
2437
2382
2438
2383
Node *memproj = transform_later (new ProjNode (call, TypeFunc::Memory));
2439
2384
mem_phi->init_req (1 , memproj );
2440
2385
transform_later (mem_phi);
2441
- _igvn.replace_node (_memproj_fallthrough , mem_phi);
2386
+ _igvn.replace_node (_callprojs. fallthrough_memproj , mem_phi);
2442
2387
}
2443
2388
2444
2389
// ------------------------------expand_unlock_node----------------------
@@ -2485,29 +2430,28 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) {
2485
2430
CAST_FROM_FN_PTR (address, SharedRuntime::complete_monitor_unlocking_C),
2486
2431
" complete_monitor_unlocking_C" , slow_path, obj, box, thread);
2487
2432
2488
- extract_call_projections (call);
2489
-
2490
- assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL &&
2491
- _memproj_catchall == NULL && _catchallcatchproj == NULL , " Unexpected projection from Lock" );
2433
+ call->extract_projections (&_callprojs, false /* separate_io_proj*/ , false /* do_asserts*/ );
2434
+ assert (_callprojs.fallthrough_ioproj == NULL && _callprojs.catchall_ioproj == NULL &&
2435
+ _callprojs.catchall_memproj == NULL && _callprojs.catchall_catchproj == NULL , " Unexpected projection from Lock" );
2492
2436
2493
2437
// No exceptions for unlocking
2494
2438
// Capture slow path
2495
2439
// disconnect fall-through projection from call and create a new one
2496
2440
// hook up users of fall-through projection to region
2497
- Node *slow_ctrl = _fallthroughproj ->clone ();
2441
+ Node *slow_ctrl = _callprojs. fallthrough_proj ->clone ();
2498
2442
transform_later (slow_ctrl);
2499
- _igvn.hash_delete (_fallthroughproj );
2500
- _fallthroughproj ->disconnect_inputs (C);
2443
+ _igvn.hash_delete (_callprojs. fallthrough_proj );
2444
+ _callprojs. fallthrough_proj ->disconnect_inputs (C);
2501
2445
region->init_req (1 , slow_ctrl);
2502
2446
// region inputs are now complete
2503
2447
transform_later (region);
2504
- _igvn.replace_node (_fallthroughproj , region);
2448
+ _igvn.replace_node (_callprojs. fallthrough_proj , region);
2505
2449
2506
2450
Node *memproj = transform_later (new ProjNode (call, TypeFunc::Memory) );
2507
2451
mem_phi->init_req (1 , memproj );
2508
2452
mem_phi->init_req (2 , mem);
2509
2453
transform_later (mem_phi);
2510
- _igvn.replace_node (_memproj_fallthrough , mem_phi);
2454
+ _igvn.replace_node (_callprojs. fallthrough_memproj , mem_phi);
2511
2455
}
2512
2456
2513
2457
void PhaseMacroExpand::expand_subtypecheck_node (SubTypeCheckNode *check) {
0 commit comments