|
66 | 66 | #include "gc/g1/heapRegion.inline.hpp"
|
67 | 67 | #include "gc/g1/heapRegionRemSet.hpp"
|
68 | 68 | #include "gc/g1/heapRegionSet.inline.hpp"
|
| 69 | +#include "gc/shared/concurrentGCBreakpoints.hpp" |
69 | 70 | #include "gc/shared/gcBehaviours.hpp"
|
70 | 71 | #include "gc/shared/gcHeapSummary.hpp"
|
71 | 72 | #include "gc/shared/gcId.hpp"
|
@@ -2003,6 +2004,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
2003 | 2004 | switch (cause) {
|
2004 | 2005 | case GCCause::_g1_humongous_allocation: return true;
|
2005 | 2006 | case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
|
| 2007 | + case GCCause::_wb_breakpoint: return true; |
2006 | 2008 | default: return is_user_requested_concurrent_full_gc(cause);
|
2007 | 2009 | }
|
2008 | 2010 | }
|
@@ -2173,24 +2175,42 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
|
2173 | 2175 | old_marking_completed_after = _old_marking_cycles_completed;
|
2174 | 2176 | }
|
2175 | 2177 |
|
2176 |
| - if (!GCCause::is_user_requested_gc(cause)) { |
| 2178 | + if (cause == GCCause::_wb_breakpoint) { |
| 2179 | + if (op.gc_succeeded()) { |
| 2180 | + LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true); |
| 2181 | + return true; |
| 2182 | + } |
| 2183 | + // When _wb_breakpoint there can't be another cycle or deferred. |
| 2184 | + assert(!op.cycle_already_in_progress(), "invariant"); |
| 2185 | + assert(!op.whitebox_attached(), "invariant"); |
| 2186 | + // Concurrent cycle attempt might have been cancelled by some other |
| 2187 | + // collection, so retry. Unlike other cases below, we want to retry |
| 2188 | + // even if cancelled by a STW full collection, because we really want |
| 2189 | + // to start a concurrent cycle. |
| 2190 | + if (old_marking_started_before != old_marking_started_after) { |
| 2191 | + LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC"); |
| 2192 | + old_marking_started_before = old_marking_started_after; |
| 2193 | + } |
| 2194 | + } else if (!GCCause::is_user_requested_gc(cause)) { |
2177 | 2195 | // For an "automatic" (not user-requested) collection, we just need to
|
2178 | 2196 | // ensure that progress is made.
|
2179 | 2197 | //
|
2180 | 2198 | // Request is finished if any of
|
2181 | 2199 | // (1) the VMOp successfully performed a GC,
|
2182 | 2200 | // (2) a concurrent cycle was already in progress,
|
2183 |
| - // (3) a new cycle was started (by this thread or some other), or |
2184 |
| - // (4) a Full GC was performed. |
2185 |
| - // Cases (3) and (4) are detected together by a change to |
| 2201 | + // (3) whitebox is controlling concurrent cycles, |
| 2202 | + // (4) a new cycle was started (by this thread or some other), or |
| 2203 | + // (5) a Full GC was performed. |
| 2204 | + // Cases (4) and (5) are detected together by a change to |
2186 | 2205 | // _old_marking_cycles_started.
|
2187 | 2206 | //
|
2188 |
| - // Note that (1) does not imply (3). If we're still in the mixed |
| 2207 | + // Note that (1) does not imply (4). If we're still in the mixed |
2189 | 2208 | // phase of an earlier concurrent collection, the request to make the
|
2190 | 2209 | // collection an initial-mark won't be honored. If we don't check for
|
2191 | 2210 | // both conditions we'll spin doing back-to-back collections.
|
2192 | 2211 | if (op.gc_succeeded() ||
|
2193 | 2212 | op.cycle_already_in_progress() ||
|
| 2213 | + op.whitebox_attached() || |
2194 | 2214 | (old_marking_started_before != old_marking_started_after)) {
|
2195 | 2215 | LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
|
2196 | 2216 | return true;
|
@@ -2244,20 +2264,32 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
|
2244 | 2264 | // a new cycle was started.
|
2245 | 2265 | assert(!op.gc_succeeded(), "invariant");
|
2246 | 2266 |
|
2247 |
| - // If VMOp failed because a cycle was already in progress, it is now |
2248 |
| - // complete. But it didn't finish this user-requested GC, so try |
2249 |
| - // again. |
2250 | 2267 | if (op.cycle_already_in_progress()) {
|
| 2268 | + // If VMOp failed because a cycle was already in progress, it |
| 2269 | + // is now complete. But it didn't finish this user-requested |
| 2270 | + // GC, so try again. |
2251 | 2271 | LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
|
2252 | 2272 | continue;
|
| 2273 | + } else if (op.whitebox_attached()) { |
| 2274 | + // If WhiteBox wants control, wait for notification of a state |
| 2275 | + // change in the controller, then try again. Don't wait for |
| 2276 | + // release of control, since collections may complete while in |
| 2277 | + // control. Note: This won't recognize a STW full collection |
| 2278 | + // while waiting; we can't wait on multiple monitors. |
| 2279 | + LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall"); |
| 2280 | + MonitorLocker ml(ConcurrentGCBreakpoints::monitor()); |
| 2281 | + if (ConcurrentGCBreakpoints::is_controlled()) { |
| 2282 | + ml.wait(); |
| 2283 | + } |
| 2284 | + continue; |
2253 | 2285 | }
|
2254 | 2286 | }
|
2255 | 2287 |
|
2256 | 2288 | // Collection failed and should be retried.
|
2257 | 2289 | assert(op.transient_failure(), "invariant");
|
2258 | 2290 |
|
2259 |
| - // If GCLocker is active, wait until clear before retrying. |
2260 | 2291 | if (GCLocker::is_active_and_needs_gc()) {
|
| 2292 | + // If GCLocker is active, wait until clear before retrying. |
2261 | 2293 | LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
|
2262 | 2294 | GCLocker::stall_until_clear();
|
2263 | 2295 | }
|
@@ -2453,14 +2485,10 @@ void G1CollectedHeap::verify(VerifyOption vo) {
|
2453 | 2485 | _verifier->verify(vo);
|
2454 | 2486 | }
|
2455 | 2487 |
|
2456 |
| -bool G1CollectedHeap::supports_concurrent_phase_control() const { |
| 2488 | +bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const { |
2457 | 2489 | return true;
|
2458 | 2490 | }
|
2459 | 2491 |
|
2460 |
| -bool G1CollectedHeap::request_concurrent_phase(const char* phase) { |
2461 |
| - return _cm_thread->request_concurrent_phase(phase); |
2462 |
| -} |
2463 |
| - |
2464 | 2492 | bool G1CollectedHeap::is_heterogeneous_heap() const {
|
2465 | 2493 | return G1Arguments::is_heterogeneous_heap();
|
2466 | 2494 | }
|
@@ -3178,6 +3206,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
|
3178 | 3206 | // Note: of course, the actual marking work will not start until the safepoint
|
3179 | 3207 | // itself is released in SuspendibleThreadSet::desynchronize().
|
3180 | 3208 | do_concurrent_mark();
|
| 3209 | + ConcurrentGCBreakpoints::notify_idle_to_active(); |
3181 | 3210 | }
|
3182 | 3211 | }
|
3183 | 3212 |
|
|
0 commit comments