Skip to content

Commit a66629a

Browse files
author
Hamlin Li
committedSep 8, 2021
8254167: G1: Record regions where evacuation failed to provide targeted iteration
Reviewed-by: tschatzl, ayang
1 parent 286a1f6 commit a66629a

16 files changed

+352
-134
lines changed
 

‎src/hotspot/share/gc/g1/g1CollectedHeap.cpp

+40-8
Original file line numberDiff line numberDiff line change
@@ -1474,8 +1474,6 @@ G1CollectedHeap::G1CollectedHeap() :
14741474
_cm_thread(NULL),
14751475
_cr(NULL),
14761476
_task_queues(NULL),
1477-
_num_regions_failed_evacuation(0),
1478-
_regions_failed_evacuation(mtGC),
14791477
_ref_processor_stw(NULL),
14801478
_is_alive_closure_stw(this),
14811479
_is_subject_to_discovery_stw(this),
@@ -1756,7 +1754,7 @@ jint G1CollectedHeap::initialize() {
17561754

17571755
_collection_set.initialize(max_reserved_regions());
17581756

1759-
_regions_failed_evacuation.resize(max_regions());
1757+
_evac_failure_regions.initialize(max_reserved_regions());
17601758

17611759
evac_failure_injector()->reset();
17621760

@@ -2316,12 +2314,46 @@ void G1CollectedHeap::collection_set_iterate_all(HeapRegionClosure* cl) {
23162314
_collection_set.iterate(cl);
23172315
}
23182316

2319-
void G1CollectedHeap::collection_set_par_iterate_all(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
2320-
_collection_set.par_iterate(cl, hr_claimer, worker_id, workers()->active_workers());
2317+
void G1CollectedHeap::collection_set_par_iterate_all(HeapRegionClosure* cl,
2318+
HeapRegionClaimer* hr_claimer,
2319+
uint worker_id) {
2320+
_collection_set.par_iterate(cl, hr_claimer, worker_id);
23212321
}
23222322

2323-
void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, HeapRegionClaimer* hr_claimer, uint worker_id) {
2324-
_collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id, workers()->active_workers());
2323+
void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl,
2324+
HeapRegionClaimer* hr_claimer,
2325+
uint worker_id) {
2326+
_collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id);
2327+
}
2328+
2329+
void G1CollectedHeap::par_iterate_regions_array_part_from(HeapRegionClosure* cl,
2330+
HeapRegionClaimer* hr_claimer,
2331+
const uint* regions,
2332+
size_t offset,
2333+
size_t length,
2334+
uint worker_id) const {
2335+
assert_at_safepoint();
2336+
if (length == 0) {
2337+
return;
2338+
}
2339+
uint total_workers = workers()->active_workers();
2340+
2341+
size_t start_pos = (worker_id * length) / total_workers;
2342+
size_t cur_pos = start_pos;
2343+
2344+
do {
2345+
uint region_idx = regions[cur_pos + offset];
2346+
if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
2347+
HeapRegion* r = region_at(region_idx);
2348+
bool result = cl->do_heap_region(r);
2349+
guarantee(!result, "Must not cancel iteration");
2350+
}
2351+
2352+
cur_pos++;
2353+
if (cur_pos == length) {
2354+
cur_pos = 0;
2355+
}
2356+
} while (cur_pos != start_pos);
23252357
}
23262358

23272359
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
@@ -2855,7 +2887,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
28552887
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
28562888

28572889
// Perform the collection.
2858-
G1YoungCollector collector(gc_cause(), target_pause_time_ms);
2890+
G1YoungCollector collector(gc_cause(), target_pause_time_ms, &_evac_failure_regions);
28592891
collector.collect();
28602892

28612893
// It should now be safe to tell the concurrent mark thread to start

‎src/hotspot/share/gc/g1/g1CollectedHeap.hpp

+11-14
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include "gc/g1/g1ConcurrentMark.hpp"
3535
#include "gc/g1/g1EdenRegions.hpp"
3636
#include "gc/g1/g1EvacStats.hpp"
37+
#include "gc/g1/g1EvacFailureRegions.hpp"
3738
#include "gc/g1/g1GCPauseType.hpp"
3839
#include "gc/g1/g1HeapTransition.hpp"
3940
#include "gc/g1/g1HeapVerifier.hpp"
@@ -830,10 +831,7 @@ class G1CollectedHeap : public CollectedHeap {
830831
// The parallel task queues
831832
G1ScannerTasksQueueSet *_task_queues;
832833

833-
// Number of regions evacuation failed in the current collection.
834-
volatile uint _num_regions_failed_evacuation;
835-
// Records for every region on the heap whether evacuation failed for it.
836-
CHeapBitMap _regions_failed_evacuation;
834+
G1EvacFailureRegions _evac_failure_regions;
837835

838836
// ("Weak") Reference processing support.
839837
//
@@ -1052,18 +1050,8 @@ class G1CollectedHeap : public CollectedHeap {
10521050

10531051
void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
10541052

1055-
inline void reset_evacuation_failed_data();
10561053
// True iff an evacuation has failed in the most-recent collection.
10571054
inline bool evacuation_failed() const;
1058-
// True iff the given region encountered an evacuation failure in the most-recent
1059-
// collection.
1060-
inline bool evacuation_failed(uint region_idx) const;
1061-
1062-
inline uint num_regions_failed_evacuation() const;
1063-
// Notify that the garbage collection encountered an evacuation failure in the
1064-
// given region. Returns whether this has been the first occurrence of an evacuation
1065-
// failure in that region.
1066-
inline bool notify_region_failed_evacuation(uint const region_idx);
10671055

10681056
void remove_from_old_gen_sets(const uint old_regions_removed,
10691057
const uint archive_regions_removed,
@@ -1167,6 +1155,15 @@ class G1CollectedHeap : public CollectedHeap {
11671155
collection_set_iterate_increment_from(blk, NULL, worker_id);
11681156
}
11691157
void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1158+
// Iterate part of an array of region indexes given by offset and length, applying
1159+
// the given HeapRegionClosure on each region. The worker_id will determine where
1160+
// in the part to start the iteration to allow for more efficient parallel iteration.
1161+
void par_iterate_regions_array_part_from(HeapRegionClosure* cl,
1162+
HeapRegionClaimer* hr_claimer,
1163+
const uint* regions,
1164+
size_t offset,
1165+
size_t length,
1166+
uint worker_id) const;
11701167

11711168
// Returns the HeapRegion that contains addr. addr must not be NULL.
11721169
template <class T>

‎src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp

+2-22
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929

3030
#include "gc/g1/g1BarrierSet.hpp"
3131
#include "gc/g1/g1CollectorState.hpp"
32+
#include "gc/g1/g1EvacFailureRegions.hpp"
3233
#include "gc/g1/g1Policy.hpp"
3334
#include "gc/g1/g1RemSet.hpp"
3435
#include "gc/g1/heapRegionManager.inline.hpp"
@@ -195,29 +196,8 @@ void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
195196
_region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
196197
}
197198

198-
void G1CollectedHeap::reset_evacuation_failed_data() {
199-
Atomic::store(&_num_regions_failed_evacuation, 0u);
200-
_regions_failed_evacuation.clear();
201-
}
202-
203199
bool G1CollectedHeap::evacuation_failed() const {
204-
return num_regions_failed_evacuation() > 0;
205-
}
206-
207-
bool G1CollectedHeap::evacuation_failed(uint region_idx) const {
208-
return _regions_failed_evacuation.par_at(region_idx, memory_order_relaxed);
209-
}
210-
211-
uint G1CollectedHeap::num_regions_failed_evacuation() const {
212-
return Atomic::load(&_num_regions_failed_evacuation);
213-
}
214-
215-
bool G1CollectedHeap::notify_region_failed_evacuation(uint const region_idx) {
216-
bool result = _regions_failed_evacuation.par_set_bit(region_idx, memory_order_relaxed);
217-
if (result) {
218-
Atomic::inc(&_num_regions_failed_evacuation, memory_order_relaxed);
219-
}
220-
return result;
200+
return _evac_failure_regions.num_regions_failed_evacuation() > 0;
221201
}
222202

223203
inline bool G1CollectedHeap::is_in_young(const oop obj) {

‎src/hotspot/share/gc/g1/g1CollectionSet.cpp

+6-29
Original file line numberDiff line numberDiff line change
@@ -207,9 +207,8 @@ void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
207207

208208
void G1CollectionSet::par_iterate(HeapRegionClosure* cl,
209209
HeapRegionClaimer* hr_claimer,
210-
uint worker_id,
211-
uint total_workers) const {
212-
iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id, total_workers);
210+
uint worker_id) const {
211+
iterate_part_from(cl, hr_claimer, 0, cur_length(), worker_id);
213212
}
214213

215214
void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
@@ -224,38 +223,16 @@ void G1CollectionSet::iterate_optional(HeapRegionClosure* cl) const {
224223

225224
void G1CollectionSet::iterate_incremental_part_from(HeapRegionClosure* cl,
226225
HeapRegionClaimer* hr_claimer,
227-
uint worker_id,
228-
uint total_workers) const {
229-
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id, total_workers);
226+
uint worker_id) const {
227+
iterate_part_from(cl, hr_claimer, _inc_part_start, increment_length(), worker_id);
230228
}
231229

232230
void G1CollectionSet::iterate_part_from(HeapRegionClosure* cl,
233231
HeapRegionClaimer* hr_claimer,
234232
size_t offset,
235233
size_t length,
236-
uint worker_id,
237-
uint total_workers) const {
238-
assert_at_safepoint();
239-
if (length == 0) {
240-
return;
241-
}
242-
243-
size_t start_pos = (worker_id * length) / total_workers;
244-
size_t cur_pos = start_pos;
245-
246-
do {
247-
uint region_idx = _collection_set_regions[cur_pos + offset];
248-
if (hr_claimer == NULL || hr_claimer->claim_region(region_idx)) {
249-
HeapRegion* r = _g1h->region_at(region_idx);
250-
bool result = cl->do_heap_region(r);
251-
guarantee(!result, "Must not cancel iteration");
252-
}
253-
254-
cur_pos++;
255-
if (cur_pos == length) {
256-
cur_pos = 0;
257-
}
258-
} while (cur_pos != start_pos);
234+
uint worker_id) const {
235+
_g1h->par_iterate_regions_array_part_from(cl, hr_claimer, _collection_set_regions, offset, length, worker_id);
259236
}
260237

261238
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,

‎src/hotspot/share/gc/g1/g1CollectionSet.hpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -262,8 +262,7 @@ class G1CollectionSet {
262262
HeapRegionClaimer* hr_claimer,
263263
size_t offset,
264264
size_t length,
265-
uint worker_id,
266-
uint total_workers) const;
265+
uint worker_id) const;
267266
public:
268267
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
269268
~G1CollectionSet();
@@ -307,7 +306,7 @@ class G1CollectionSet {
307306

308307
// Iterate over the current collection set increment applying the given HeapRegionClosure
309308
// from a starting position determined by the given worker id.
310-
void iterate_incremental_part_from(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id, uint total_workers) const;
309+
void iterate_incremental_part_from(HeapRegionClosure* cl, HeapRegionClaimer* hr_claimer, uint worker_id) const;
311310

312311
// Returns the length of the current increment in number of regions.
313312
size_t increment_length() const { return _collection_set_cur_length - _inc_part_start; }
@@ -319,8 +318,7 @@ class G1CollectionSet {
319318
void iterate(HeapRegionClosure* cl) const;
320319
void par_iterate(HeapRegionClosure* cl,
321320
HeapRegionClaimer* hr_claimer,
322-
uint worker_id,
323-
uint total_workers) const;
321+
uint worker_id) const;
324322

325323
void iterate_optional(HeapRegionClosure* cl) const;
326324

‎src/hotspot/share/gc/g1/g1EvacFailure.cpp

+16-12
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include "gc/g1/g1CollectorState.hpp"
2828
#include "gc/g1/g1ConcurrentMark.inline.hpp"
2929
#include "gc/g1/g1EvacFailure.hpp"
30+
#include "gc/g1/g1EvacFailureRegions.hpp"
3031
#include "gc/g1/g1HeapVerifier.hpp"
3132
#include "gc/g1/g1OopClosures.inline.hpp"
3233
#include "gc/g1/g1RedirtyCardsQueue.hpp"
@@ -203,14 +204,19 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
203204
UpdateLogBuffersDeferred _log_buffer_cl;
204205

205206
uint volatile* _num_failed_regions;
207+
G1EvacFailureRegions* _evac_failure_regions;
206208

207209
public:
208-
RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs, uint worker_id, uint volatile* num_failed_regions) :
210+
RemoveSelfForwardPtrHRClosure(G1RedirtyCardsQueueSet* rdcqs,
211+
uint worker_id,
212+
uint volatile* num_failed_regions,
213+
G1EvacFailureRegions* evac_failure_regions) :
209214
_g1h(G1CollectedHeap::heap()),
210215
_worker_id(worker_id),
211216
_rdc_local_qset(rdcqs),
212217
_log_buffer_cl(&_rdc_local_qset),
213-
_num_failed_regions(num_failed_regions) {
218+
_num_failed_regions(num_failed_regions),
219+
_evac_failure_regions(evac_failure_regions) {
214220
}
215221

216222
~RemoveSelfForwardPtrHRClosure() {
@@ -234,7 +240,7 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
234240
assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
235241
assert(hr->in_collection_set(), "bad CS");
236242

237-
if (_g1h->evacuation_failed(hr->hrm_index())) {
243+
if (_evac_failure_regions->contains(hr->hrm_index())) {
238244
hr->clear_index_in_opt_cset();
239245

240246
bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
@@ -259,22 +265,20 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
259265
}
260266
};
261267

262-
G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs) :
268+
G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs,
269+
G1EvacFailureRegions* evac_failure_regions) :
263270
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
264271
_g1h(G1CollectedHeap::heap()),
265272
_rdcqs(rdcqs),
266273
_hrclaimer(_g1h->workers()->active_workers()),
274+
_evac_failure_regions(evac_failure_regions),
267275
_num_failed_regions(0) { }
268276

269277
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
270-
RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id, &_num_failed_regions);
271-
272-
// We need to check all collection set regions whether they need self forward
273-
// removals, not only the last collection set increment. The reason is that
274-
// reference processing (e.g. finalizers) can make it necessary to resurrect an
275-
// otherwise unreachable object at the very end of the collection. That object
276-
// might cause an evacuation failure in any region in the collection set.
277-
_g1h->collection_set_par_iterate_all(&rsfp_cl, &_hrclaimer, worker_id);
278+
RemoveSelfForwardPtrHRClosure rsfp_cl(_rdcqs, worker_id, &_num_failed_regions, _evac_failure_regions);
279+
280+
// Iterate through all regions that failed evacuation during the entire collection.
281+
_evac_failure_regions->par_iterate(&rsfp_cl, &_hrclaimer, worker_id);
278282
}
279283

280284
uint G1ParRemoveSelfForwardPtrsTask::num_failed_regions() const {

‎src/hotspot/share/gc/g1/g1EvacFailure.hpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include "utilities/globalDefinitions.hpp"
3232

3333
class G1CollectedHeap;
34+
class G1EvacFailureRegions;
3435
class G1RedirtyCardsQueueSet;
3536

3637
// Task to fixup self-forwarding pointers
@@ -41,10 +42,11 @@ class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
4142
G1RedirtyCardsQueueSet* _rdcqs;
4243
HeapRegionClaimer _hrclaimer;
4344

45+
G1EvacFailureRegions* _evac_failure_regions;
4446
uint volatile _num_failed_regions;
4547

4648
public:
47-
G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs);
49+
G1ParRemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs, G1EvacFailureRegions* evac_failure_regions);
4850

4951
void work(uint worker_id);
5052

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
/*
2+
* Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#include "precompiled.hpp"
26+
27+
#include "gc/g1/g1CollectedHeap.hpp"
28+
#include "gc/g1/g1EvacFailureRegions.hpp"
29+
#include "gc/g1/heapRegion.hpp"
30+
#include "memory/allocation.hpp"
31+
#include "runtime/atomic.hpp"
32+
33+
34+
G1EvacFailureRegions::G1EvacFailureRegions() :
35+
_regions_failed_evacuation(mtGC) {
36+
}
37+
38+
G1EvacFailureRegions::~G1EvacFailureRegions() {
39+
FREE_C_HEAP_ARRAY(uint, _evac_failure_regions);
40+
}
41+
42+
void G1EvacFailureRegions::initialize(uint max_regions) {
43+
Atomic::store(&_evac_failure_regions_cur_length, 0u);
44+
_max_regions = max_regions;
45+
_regions_failed_evacuation.resize(_max_regions);
46+
_evac_failure_regions = NEW_C_HEAP_ARRAY(uint, _max_regions, mtGC);
47+
}
48+
49+
void G1EvacFailureRegions::par_iterate(HeapRegionClosure* closure,
50+
HeapRegionClaimer* _hrclaimer,
51+
uint worker_id) {
52+
G1CollectedHeap::heap()->par_iterate_regions_array_part_from(closure,
53+
_hrclaimer,
54+
_evac_failure_regions,
55+
0,
56+
Atomic::load(&_evac_failure_regions_cur_length),
57+
worker_id);
58+
}
59+
60+
void G1EvacFailureRegions::reset() {
61+
Atomic::store(&_evac_failure_regions_cur_length, 0u);
62+
_regions_failed_evacuation.clear();
63+
}
64+
65+
bool G1EvacFailureRegions::contains(uint region_idx) const {
66+
assert(region_idx < _max_regions, "must be");
67+
return _regions_failed_evacuation.par_at(region_idx, memory_order_relaxed);
68+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
26+
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
27+
28+
#include "runtime/atomic.hpp"
29+
#include "utilities/bitMap.hpp"
30+
31+
class HeapRegionClosure;
32+
class HeapRegionClaimer;
33+
34+
// This class records for every region on the heap whether evacuation failed for it,
35+
// and records for every evacuation failure region to speed up iteration of these
36+
// regions in post evacuation phase.
37+
class G1EvacFailureRegions {
38+
// Records for every region on the heap whether evacuation failed for it.
39+
CHeapBitMap _regions_failed_evacuation;
40+
// Regions (index) of evacuation failed in the current collection.
41+
uint* _evac_failure_regions;
42+
// Number of regions evacuation failed in the current collection.
43+
volatile uint _evac_failure_regions_cur_length;
44+
// Maximum of regions number.
45+
uint _max_regions;
46+
47+
public:
48+
G1EvacFailureRegions();
49+
~G1EvacFailureRegions();
50+
void initialize(uint max_regions);
51+
52+
void reset();
53+
54+
bool contains(uint region_idx) const;
55+
void par_iterate(HeapRegionClosure* closure,
56+
HeapRegionClaimer* _hrclaimer,
57+
uint worker_id);
58+
59+
uint num_regions_failed_evacuation() const {
60+
return Atomic::load(&_evac_failure_regions_cur_length);
61+
}
62+
63+
// Record that the garbage collection encountered an evacuation failure in the
64+
// given region. Returns whether this has been the first occurrence of an evacuation
65+
// failure in that region.
66+
inline bool record(uint region_idx);
67+
};
68+
69+
#endif //SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
/*
2+
* Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_INLINE_HPP
26+
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_INLINE_HPP
27+
28+
#include "gc/g1/g1EvacFailureRegions.hpp"
29+
#include "runtime/atomic.hpp"
30+
#include "utilities/bitMap.inline.hpp"
31+
32+
bool G1EvacFailureRegions::record(uint region_idx) {
33+
assert(region_idx < _max_regions, "must be");
34+
bool success = _regions_failed_evacuation.par_set_bit(region_idx,
35+
memory_order_relaxed);
36+
if (success) {
37+
size_t offset = Atomic::fetch_and_add(&_evac_failure_regions_cur_length, 1u);
38+
_evac_failure_regions[offset] = region_idx;
39+
}
40+
return success;
41+
}
42+
43+
#endif //SHARE_GC_G1_G1EVACFAILUREREGIONS_INLINE_HPP

‎src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

+12-6
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include "gc/g1/g1Allocator.inline.hpp"
2727
#include "gc/g1/g1CollectedHeap.inline.hpp"
2828
#include "gc/g1/g1CollectionSet.hpp"
29+
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
2930
#include "gc/g1/g1OopClosures.inline.hpp"
3031
#include "gc/g1/g1ParScanThreadState.inline.hpp"
3132
#include "gc/g1/g1RootClosures.hpp"
@@ -58,7 +59,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
5859
uint worker_id,
5960
uint n_workers,
6061
size_t young_cset_length,
61-
size_t optional_cset_length)
62+
size_t optional_cset_length,
63+
G1EvacFailureRegions* evac_failure_regions)
6264
: _g1h(g1h),
6365
_task_queue(g1h->task_queue(worker_id)),
6466
_rdc_local_qset(rdcqs),
@@ -84,7 +86,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
8486
_numa(g1h->numa()),
8587
_obj_alloc_stat(NULL),
8688
_preserved_marks(preserved_marks),
87-
_evacuation_failed_info()
89+
_evacuation_failed_info(),
90+
_evac_failure_regions(evac_failure_regions)
8891
{
8992
// We allocate number of young gen regions in the collection set plus one
9093
// entries, since entry 0 keeps track of surviving bytes for non-young regions.
@@ -541,7 +544,8 @@ G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id)
541544
new G1ParScanThreadState(_g1h, rdcqs(),
542545
_preserved_marks_set->get(worker_id),
543546
worker_id, _n_workers,
544-
_young_cset_length, _optional_cset_length);
547+
_young_cset_length, _optional_cset_length,
548+
_evac_failure_regions);
545549
}
546550
return _states[worker_id];
547551
}
@@ -595,7 +599,7 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
595599
// Forward-to-self succeeded. We are the "owner" of the object.
596600
HeapRegion* r = _g1h->heap_region_containing(old);
597601

598-
if (_g1h->notify_region_failed_evacuation(r->hrm_index())) {
602+
if (_evac_failure_regions->record(r->hrm_index())) {
599603
_g1h->hr_printer()->evac_failure(r);
600604
}
601605

@@ -649,7 +653,8 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
649653
PreservedMarksSet* preserved_marks_set,
650654
uint n_workers,
651655
size_t young_cset_length,
652-
size_t optional_cset_length) :
656+
size_t optional_cset_length,
657+
G1EvacFailureRegions* evac_failure_regions) :
653658
_g1h(g1h),
654659
_rdcqs(rdcqs),
655660
_preserved_marks_set(preserved_marks_set),
@@ -658,7 +663,8 @@ G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h,
658663
_young_cset_length(young_cset_length),
659664
_optional_cset_length(optional_cset_length),
660665
_n_workers(n_workers),
661-
_flushed(false) {
666+
_flushed(false),
667+
_evac_failure_regions(evac_failure_regions) {
662668
for (uint i = 0; i < n_workers; ++i) {
663669
_states[i] = NULL;
664670
}

‎src/hotspot/share/gc/g1/g1ParScanThreadState.hpp

+6-2
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
102102
// Per-thread evacuation failure data structures.
103103
PreservedMarks* _preserved_marks;
104104
EvacuationFailedInfo _evacuation_failed_info;
105+
G1EvacFailureRegions* _evac_failure_regions;
105106

106107
void handle_evacuation_failure_notifications(oop obj, markWord m, size_t word_sz);
107108

@@ -112,7 +113,8 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
112113
uint worker_id,
113114
uint n_workers,
114115
size_t young_cset_length,
115-
size_t optional_cset_length);
116+
size_t optional_cset_length,
117+
G1EvacFailureRegions* evac_failure_regions);
116118
virtual ~G1ParScanThreadState();
117119

118120
void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
@@ -242,14 +244,16 @@ class G1ParScanThreadStateSet : public StackObj {
242244
size_t _optional_cset_length;
243245
uint _n_workers;
244246
bool _flushed;
247+
G1EvacFailureRegions* _evac_failure_regions;
245248

246249
public:
247250
G1ParScanThreadStateSet(G1CollectedHeap* g1h,
248251
G1RedirtyCardsQueueSet* rdcqs,
249252
PreservedMarksSet* preserved_marks_set,
250253
uint n_workers,
251254
size_t young_cset_length,
252-
size_t optional_cset_length);
255+
size_t optional_cset_length,
256+
G1EvacFailureRegions* evac_failure_regions);
253257
~G1ParScanThreadStateSet();
254258

255259
G1RedirtyCardsQueueSet* rdcqs() { return _rdcqs; }

‎src/hotspot/share/gc/g1/g1YoungCollector.cpp

+10-6
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacuationInfo* evacuation_
498498
// reference processing currently works in G1.
499499
ref_processor_stw()->start_discovery(false /* always_clear */);
500500

501-
_g1h->reset_evacuation_failed_data();
501+
_evac_failure_regions->reset();
502502

503503
_g1h->gc_prologue(false);
504504

@@ -955,7 +955,7 @@ bool G1STWIsAliveClosure::do_object_b(oop p) {
955955
void G1YoungCollector::post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states) {
956956
Ticks start = Ticks::now();
957957
{
958-
G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states);
958+
G1PostEvacuateCollectionSetCleanupTask1 cl(per_thread_states, _evac_failure_regions);
959959
_g1h->run_batch_task(&cl);
960960
}
961961
phase_times()->record_post_evacuate_cleanup_task_1_time((Ticks::now() - start).seconds() * 1000.0);
@@ -965,7 +965,7 @@ void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thre
965965
G1EvacuationInfo* evacuation_info) {
966966
Ticks start = Ticks::now();
967967
{
968-
G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info);
968+
G1PostEvacuateCollectionSetCleanupTask2 cl(per_thread_states, evacuation_info, _evac_failure_regions);
969969
_g1h->run_batch_task(&cl);
970970
}
971971
phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
@@ -1024,11 +1024,14 @@ class G1PreservedMarksSet : public PreservedMarksSet {
10241024
}
10251025
};
10261026

1027-
G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause, double target_pause_time_ms) :
1027+
G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause,
1028+
double target_pause_time_ms,
1029+
G1EvacFailureRegions* evac_failure_regions) :
10281030
_g1h(G1CollectedHeap::heap()),
10291031
_gc_cause(gc_cause),
10301032
_target_pause_time_ms(target_pause_time_ms),
1031-
_concurrent_operation_is_full_mark(false)
1033+
_concurrent_operation_is_full_mark(false),
1034+
_evac_failure_regions(evac_failure_regions)
10321035
{
10331036
}
10341037

@@ -1080,7 +1083,8 @@ void G1YoungCollector::collect() {
10801083
&preserved_marks_set,
10811084
workers()->active_workers(),
10821085
collection_set()->young_region_length(),
1083-
collection_set()->optional_region_length());
1086+
collection_set()->optional_region_length(),
1087+
_evac_failure_regions);
10841088
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
10851089

10861090
bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;

‎src/hotspot/share/gc/g1/g1YoungCollector.hpp

+5-1
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ class G1CollectedHeap;
3737
class G1CollectionSet;
3838
class G1CollectorState;
3939
class G1ConcurrentMark;
40+
class G1EvacFailureRegions;
4041
class G1EvacuationInfo;
4142
class G1GCPhaseTimes;
4243
class G1HotCardCache;
@@ -123,6 +124,7 @@ class G1YoungCollector {
123124
void post_evacuate_collection_set(G1EvacuationInfo* evacuation_info,
124125
G1ParScanThreadStateSet* per_thread_states);
125126

127+
G1EvacFailureRegions* _evac_failure_regions;
126128

127129
#if TASKQUEUE_STATS
128130
uint num_task_queues() const;
@@ -133,7 +135,9 @@ class G1YoungCollector {
133135

134136
public:
135137

136-
G1YoungCollector(GCCause::Cause gc_cause, double target_pause_time_ms);
138+
G1YoungCollector(GCCause::Cause gc_cause,
139+
double target_pause_time_ms,
140+
G1EvacFailureRegions* evac_failure_regions);
137141
void collect();
138142

139143
bool concurrent_operation_is_full_mark() const { return _concurrent_operation_is_full_mark; }

‎src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp

+45-23
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@
3939
#include "jfr/jfrEvents.hpp"
4040
#include "utilities/ticks.hpp"
4141

42-
G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states) :
42+
G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
43+
G1EvacFailureRegions* evac_failure_regions) :
4344
G1BatchedGangTask("Post Evacuate Cleanup 1", G1CollectedHeap::heap()->phase_times())
4445
{
4546
add_serial_task(new MergePssTask(per_thread_states));
@@ -48,7 +49,7 @@ G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1
4849
add_serial_task(new SampleCollectionSetCandidatesTask());
4950
}
5051
if (RemoveSelfForwardPtrsTask::should_execute()) {
51-
add_parallel_task(new RemoveSelfForwardPtrsTask(per_thread_states->rdcqs()));
52+
add_parallel_task(new RemoveSelfForwardPtrsTask(per_thread_states->rdcqs(), evac_failure_regions));
5253
}
5354
add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
5455
}
@@ -100,19 +101,23 @@ bool G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask::should_
100101
return G1CollectedHeap::heap()->evacuation_failed();
101102
}
102103

103-
G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask::RemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs) :
104-
G1AbstractSubTask(G1GCPhaseTimes::RemoveSelfForwardingPtr), _task(rdcqs) { }
104+
G1PostEvacuateCollectionSetCleanupTask1::
105+
RemoveSelfForwardPtrsTask::RemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs,
106+
G1EvacFailureRegions* evac_failure_regions) :
107+
G1AbstractSubTask(G1GCPhaseTimes::RemoveSelfForwardingPtr),
108+
_task(rdcqs, evac_failure_regions),
109+
_evac_failure_regions(evac_failure_regions) { }
105110

106111
G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask::~RemoveSelfForwardPtrsTask() {
107112
G1CollectedHeap* g1h = G1CollectedHeap::heap();
108-
assert(_task.num_failed_regions() == g1h->num_regions_failed_evacuation(),
113+
assert(_task.num_failed_regions() == _evac_failure_regions->num_regions_failed_evacuation(),
109114
"Removed regions %u inconsistent with expected %u",
110-
_task.num_failed_regions(), g1h->num_regions_failed_evacuation());
115+
_task.num_failed_regions(), _evac_failure_regions->num_regions_failed_evacuation());
111116
}
112117

113118
double G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask::worker_cost() const {
114119
assert(should_execute(), "Should not call this if not executed");
115-
return G1CollectedHeap::heap()->num_regions_failed_evacuation();
120+
return _evac_failure_regions->num_regions_failed_evacuation();
116121
}
117122

118123
void G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask::do_work(uint worker_id) {
@@ -291,6 +296,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
291296
size_t _num_dirtied;
292297
G1CollectedHeap* _g1h;
293298
G1CardTable* _g1_ct;
299+
G1EvacFailureRegions* _evac_failure_regions;
294300

295301
HeapRegion* region_for_card(CardValue* card_ptr) const {
296302
return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
@@ -299,12 +305,16 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
299305
bool will_become_free(HeapRegion* hr) const {
300306
// A region will be freed by during the FreeCollectionSet phase if the region is in the
301307
// collection set and has not had an evacuation failure.
302-
return _g1h->is_in_cset(hr) && !_g1h->evacuation_failed(hr->hrm_index());
308+
return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
303309
}
304310

305311
public:
306-
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
307-
_num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
312+
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
313+
G1CardTableEntryClosure(),
314+
_num_dirtied(0),
315+
_g1h(g1h),
316+
_g1_ct(g1h->card_table()),
317+
_evac_failure_regions(evac_failure_regions) { }
308318

309319
void do_card_ptr(CardValue* card_ptr, uint worker_id) {
310320
HeapRegion* hr = region_for_card(card_ptr);
@@ -319,10 +329,13 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
319329
size_t num_dirtied() const { return _num_dirtied; }
320330
};
321331

322-
G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask::RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* rdcqs) :
332+
G1PostEvacuateCollectionSetCleanupTask2::
333+
RedirtyLoggedCardsTask::RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* rdcqs,
334+
G1EvacFailureRegions* evac_failure_regions) :
323335
G1AbstractSubTask(G1GCPhaseTimes::RedirtyCards),
324336
_rdcqs(rdcqs),
325-
_nodes(rdcqs->all_completed_buffers()) { }
337+
_nodes(rdcqs->all_completed_buffers()),
338+
_evac_failure_regions(evac_failure_regions) { }
326339

327340
G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask::~RedirtyLoggedCardsTask() {
328341
G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
@@ -336,7 +349,7 @@ double G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask::worker_c
336349
}
337350

338351
void G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask::do_work(uint worker_id) {
339-
RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap());
352+
RedirtyLoggedCardTableEntryClosure cl(G1CollectedHeap::heap(), _evac_failure_regions);
340353
const size_t buffer_size = _rdcqs->buffer_size();
341354
BufferNode* next = Atomic::load(&_nodes);
342355
while (next != nullptr) {
@@ -462,6 +475,7 @@ class FreeCSetClosure : public HeapRegionClosure {
462475
Tickspan _young_time;
463476
Tickspan _non_young_time;
464477
FreeCSetStats* _stats;
478+
G1EvacFailureRegions* _evac_failure_regions;
465479

466480
void assert_tracks_surviving_words(HeapRegion* r) {
467481
assert(r->young_index_in_cset() != 0 &&
@@ -504,14 +518,16 @@ class FreeCSetClosure : public HeapRegionClosure {
504518
public:
505519
FreeCSetClosure(const size_t* surviving_young_words,
506520
uint worker_id,
507-
FreeCSetStats* stats) :
521+
FreeCSetStats* stats,
522+
G1EvacFailureRegions* evac_failure_regions) :
508523
HeapRegionClosure(),
509524
_g1h(G1CollectedHeap::heap()),
510525
_surviving_young_words(surviving_young_words),
511526
_worker_id(worker_id),
512527
_young_time(),
513528
_non_young_time(),
514-
_stats(stats) { }
529+
_stats(stats),
530+
_evac_failure_regions(evac_failure_regions) { }
515531

516532
virtual bool do_heap_region(HeapRegion* r) {
517533
assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
@@ -525,7 +541,7 @@ class FreeCSetClosure : public HeapRegionClosure {
525541
r->record_surv_words_in_group(_surviving_young_words[r->young_index_in_cset()]);
526542
}
527543

528-
if (_g1h->evacuation_failed(r->hrm_index())) {
544+
if (_evac_failure_regions->contains(r->hrm_index())) {
529545
handle_failed_region(r);
530546
} else {
531547
handle_evacuated_region(r);
@@ -559,15 +575,18 @@ void G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask::report_stat
559575
total_stats.report(_g1h, _evacuation_info);
560576
}
561577

562-
G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask::FreeCollectionSetTask(G1EvacuationInfo* evacuation_info,
563-
const size_t* surviving_young_words) :
578+
G1PostEvacuateCollectionSetCleanupTask2::
579+
FreeCollectionSetTask::FreeCollectionSetTask(G1EvacuationInfo* evacuation_info,
580+
const size_t* surviving_young_words,
581+
G1EvacFailureRegions* evac_failure_regions) :
564582
G1AbstractSubTask(G1GCPhaseTimes::FreeCollectionSet),
565583
_g1h(G1CollectedHeap::heap()),
566584
_evacuation_info(evacuation_info),
567585
_worker_stats(nullptr),
568586
_claimer(0),
569587
_surviving_young_words(surviving_young_words),
570-
_active_workers(0) {
588+
_active_workers(0),
589+
_evac_failure_regions(evac_failure_regions) {
571590
_g1h->clear_eden();
572591
}
573592

@@ -596,14 +615,15 @@ void G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask::set_max_wor
596615
}
597616

598617
void G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask::do_work(uint worker_id) {
599-
FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id));
618+
FreeCSetClosure cl(_surviving_young_words, worker_id, worker_stats(worker_id), _evac_failure_regions);
600619
_g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
601620
// Report per-region type timings.
602621
cl.report_timing();
603622
}
604623

605624
G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
606-
G1EvacuationInfo* evacuation_info) :
625+
G1EvacuationInfo* evacuation_info,
626+
G1EvacFailureRegions* evac_failure_regions) :
607627
G1BatchedGangTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
608628
{
609629
add_serial_task(new ResetHotCardCacheTask());
@@ -618,6 +638,8 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2
618638
if (RestorePreservedMarksTask::should_execute()) {
619639
add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
620640
}
621-
add_parallel_task(new RedirtyLoggedCardsTask(per_thread_states->rdcqs()));
622-
add_parallel_task(new FreeCollectionSetTask(evacuation_info, per_thread_states->surviving_young_words()));
641+
add_parallel_task(new RedirtyLoggedCardsTask(per_thread_states->rdcqs(), evac_failure_regions));
642+
add_parallel_task(new FreeCollectionSetTask(evacuation_info,
643+
per_thread_states->surviving_young_words(),
644+
evac_failure_regions));
623645
}

‎src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.hpp

+13-5
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
class FreeCSetStats;
3232

3333
class G1CollectedHeap;
34+
class G1EvacFailureRegions;
3435
class G1EvacuationInfo;
3536
class G1ParScanThreadStateSet;
3637
class G1RedirtyCardsQueueSet;
@@ -48,7 +49,8 @@ class G1PostEvacuateCollectionSetCleanupTask1 : public G1BatchedGangTask {
4849
class RemoveSelfForwardPtrsTask;
4950

5051
public:
51-
G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states);
52+
G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
53+
G1EvacFailureRegions* evac_failure_regions);
5254
};
5355

5456
class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
@@ -81,9 +83,10 @@ class G1PostEvacuateCollectionSetCleanupTask1::SampleCollectionSetCandidatesTask
8183

8284
class G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask : public G1AbstractSubTask {
8385
G1ParRemoveSelfForwardPtrsTask _task;
86+
G1EvacFailureRegions* _evac_failure_regions;
8487

8588
public:
86-
RemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs);
89+
RemoveSelfForwardPtrsTask(G1RedirtyCardsQueueSet* rdcqs, G1EvacFailureRegions* evac_failure_regions);
8790
~RemoveSelfForwardPtrsTask();
8891

8992
static bool should_execute();
@@ -114,7 +117,8 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedGangTask {
114117

115118
public:
116119
G1PostEvacuateCollectionSetCleanupTask2(G1ParScanThreadStateSet* per_thread_states,
117-
G1EvacuationInfo* evacuation_info);
120+
G1EvacuationInfo* evacuation_info,
121+
G1EvacFailureRegions* evac_failure_regions);
118122
};
119123

120124
class G1PostEvacuateCollectionSetCleanupTask2::ResetHotCardCacheTask : public G1AbstractSubTask {
@@ -174,9 +178,10 @@ class G1PostEvacuateCollectionSetCleanupTask2::RestorePreservedMarksTask : publi
174178
class G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask : public G1AbstractSubTask {
175179
G1RedirtyCardsQueueSet* _rdcqs;
176180
BufferNode* volatile _nodes;
181+
G1EvacFailureRegions* _evac_failure_regions;
177182

178183
public:
179-
RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* rdcqs);
184+
RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* rdcqs, G1EvacFailureRegions* evac_failure_regions);
180185
virtual ~RedirtyLoggedCardsTask();
181186

182187
double worker_cost() const override;
@@ -190,12 +195,15 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1
190195
HeapRegionClaimer _claimer;
191196
const size_t* _surviving_young_words;
192197
uint _active_workers;
198+
G1EvacFailureRegions* _evac_failure_regions;
193199

194200
FreeCSetStats* worker_stats(uint worker);
195201
void report_statistics();
196202

197203
public:
198-
FreeCollectionSetTask(G1EvacuationInfo* evacuation_info, const size_t* surviving_young_words);
204+
FreeCollectionSetTask(G1EvacuationInfo* evacuation_info,
205+
const size_t* surviving_young_words,
206+
G1EvacFailureRegions* evac_failure_regions);
199207
virtual ~FreeCollectionSetTask();
200208

201209
double worker_cost() const override;

0 commit comments

Comments
 (0)
Please sign in to comment.