Skip to content
This repository was archived by the owner on Aug 27, 2022. It is now read-only.
/ lanai Public archive

Commit 9f334a1

Browse files
Kim BarrettPer Liden
Kim Barrett
and
Per Liden
committedMar 6, 2020
8240239: Replace ConcurrentGCPhaseManager
Replace ConcurrentGCPhaseManager with ConcurrentGCBreakpoints Co-authored-by: Per Liden <per.liden@oracle.com> Reviewed-by: kbarrett, pliden, sangheki
1 parent 9e2ab1e commit 9f334a1

33 files changed

+841
-1196
lines changed
 

‎src/hotspot/share/gc/g1/g1CollectedHeap.cpp

+43-14
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
#include "gc/g1/heapRegion.inline.hpp"
6767
#include "gc/g1/heapRegionRemSet.hpp"
6868
#include "gc/g1/heapRegionSet.inline.hpp"
69+
#include "gc/shared/concurrentGCBreakpoints.hpp"
6970
#include "gc/shared/gcBehaviours.hpp"
7071
#include "gc/shared/gcHeapSummary.hpp"
7172
#include "gc/shared/gcId.hpp"
@@ -2003,6 +2004,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
20032004
switch (cause) {
20042005
case GCCause::_g1_humongous_allocation: return true;
20052006
case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
2007+
case GCCause::_wb_breakpoint: return true;
20062008
default: return is_user_requested_concurrent_full_gc(cause);
20072009
}
20082010
}
@@ -2173,24 +2175,42 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
21732175
old_marking_completed_after = _old_marking_cycles_completed;
21742176
}
21752177

2176-
if (!GCCause::is_user_requested_gc(cause)) {
2178+
if (cause == GCCause::_wb_breakpoint) {
2179+
if (op.gc_succeeded()) {
2180+
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2181+
return true;
2182+
}
2183+
// When _wb_breakpoint there can't be another cycle or deferred.
2184+
assert(!op.cycle_already_in_progress(), "invariant");
2185+
assert(!op.whitebox_attached(), "invariant");
2186+
// Concurrent cycle attempt might have been cancelled by some other
2187+
// collection, so retry. Unlike other cases below, we want to retry
2188+
// even if cancelled by a STW full collection, because we really want
2189+
// to start a concurrent cycle.
2190+
if (old_marking_started_before != old_marking_started_after) {
2191+
LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
2192+
old_marking_started_before = old_marking_started_after;
2193+
}
2194+
} else if (!GCCause::is_user_requested_gc(cause)) {
21772195
// For an "automatic" (not user-requested) collection, we just need to
21782196
// ensure that progress is made.
21792197
//
21802198
// Request is finished if any of
21812199
// (1) the VMOp successfully performed a GC,
21822200
// (2) a concurrent cycle was already in progress,
2183-
// (3) a new cycle was started (by this thread or some other), or
2184-
// (4) a Full GC was performed.
2185-
// Cases (3) and (4) are detected together by a change to
2201+
// (3) whitebox is controlling concurrent cycles,
2202+
// (4) a new cycle was started (by this thread or some other), or
2203+
// (5) a Full GC was performed.
2204+
// Cases (4) and (5) are detected together by a change to
21862205
// _old_marking_cycles_started.
21872206
//
2188-
// Note that (1) does not imply (3). If we're still in the mixed
2207+
// Note that (1) does not imply (4). If we're still in the mixed
21892208
// phase of an earlier concurrent collection, the request to make the
21902209
// collection an initial-mark won't be honored. If we don't check for
21912210
// both conditions we'll spin doing back-to-back collections.
21922211
if (op.gc_succeeded() ||
21932212
op.cycle_already_in_progress() ||
2213+
op.whitebox_attached() ||
21942214
(old_marking_started_before != old_marking_started_after)) {
21952215
LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
21962216
return true;
@@ -2244,20 +2264,32 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
22442264
// a new cycle was started.
22452265
assert(!op.gc_succeeded(), "invariant");
22462266

2247-
// If VMOp failed because a cycle was already in progress, it is now
2248-
// complete. But it didn't finish this user-requested GC, so try
2249-
// again.
22502267
if (op.cycle_already_in_progress()) {
2268+
// If VMOp failed because a cycle was already in progress, it
2269+
// is now complete. But it didn't finish this user-requested
2270+
// GC, so try again.
22512271
LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
22522272
continue;
2273+
} else if (op.whitebox_attached()) {
2274+
// If WhiteBox wants control, wait for notification of a state
2275+
// change in the controller, then try again. Don't wait for
2276+
// release of control, since collections may complete while in
2277+
// control. Note: This won't recognize a STW full collection
2278+
// while waiting; we can't wait on multiple monitors.
2279+
LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
2280+
MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
2281+
if (ConcurrentGCBreakpoints::is_controlled()) {
2282+
ml.wait();
2283+
}
2284+
continue;
22532285
}
22542286
}
22552287

22562288
// Collection failed and should be retried.
22572289
assert(op.transient_failure(), "invariant");
22582290

2259-
// If GCLocker is active, wait until clear before retrying.
22602291
if (GCLocker::is_active_and_needs_gc()) {
2292+
// If GCLocker is active, wait until clear before retrying.
22612293
LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
22622294
GCLocker::stall_until_clear();
22632295
}
@@ -2453,14 +2485,10 @@ void G1CollectedHeap::verify(VerifyOption vo) {
24532485
_verifier->verify(vo);
24542486
}
24552487

2456-
bool G1CollectedHeap::supports_concurrent_phase_control() const {
2488+
bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
24572489
return true;
24582490
}
24592491

2460-
bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2461-
return _cm_thread->request_concurrent_phase(phase);
2462-
}
2463-
24642492
bool G1CollectedHeap::is_heterogeneous_heap() const {
24652493
return G1Arguments::is_heterogeneous_heap();
24662494
}
@@ -3178,6 +3206,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
31783206
// Note: of course, the actual marking work will not start until the safepoint
31793207
// itself is released in SuspendibleThreadSet::desynchronize().
31803208
do_concurrent_mark();
3209+
ConcurrentGCBreakpoints::notify_idle_to_active();
31813210
}
31823211
}
31833212

‎src/hotspot/share/gc/g1/g1CollectedHeap.hpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ class G1CollectedHeap : public CollectedHeap {
269269
// (a) cause == _g1_humongous_allocation,
270270
// (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
271271
// (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
272-
// (d) cause == _wb_conc_mark,
272+
// (d) cause == _wb_conc_mark or _wb_breakpoint,
273273
// (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
274274
bool should_do_concurrent_full_gc(GCCause::Cause cause);
275275

@@ -1423,8 +1423,7 @@ class G1CollectedHeap : public CollectedHeap {
14231423
void verify(VerifyOption vo);
14241424

14251425
// WhiteBox testing support.
1426-
virtual bool supports_concurrent_phase_control() const;
1427-
virtual bool request_concurrent_phase(const char* phase);
1426+
virtual bool supports_concurrent_gc_breakpoints() const;
14281427
bool is_heterogeneous_heap() const;
14291428

14301429
virtual WorkGang* get_safepoint_workers() { return _workers; }

0 commit comments

Comments
 (0)