Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8242847: G1 should not clear mark bitmaps with no marks #5213

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 63 additions & 22 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
@@ -581,29 +581,65 @@ class G1ClearBitMapTask : public AbstractGangTask {
static size_t chunk_size() { return M; }

private:
// Heap region closure used for clearing the given mark bitmap.
// Heap region closure used for clearing the _next_mark_bitmap.
class G1ClearBitmapHRClosure : public HeapRegionClosure {
private:
G1CMBitMap* _bitmap;
G1ConcurrentMark* _cm;
public:
G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
G1CMBitMap* _bitmap;
bool _suspendible; // If suspendible, do yield checks.

bool suspendible() {
return _suspendible;
}

bool is_clear_concurrent_undo() {
return suspendible() && _cm->cm_thread()->in_undo_mark();
}

bool has_aborted() {
if (suspendible()) {
_cm->do_yield_check();
return _cm->has_aborted();
}
return false;
}

HeapWord* region_clear_limit(HeapRegion* r) {
// During a Concurrent Undo Mark cycle, the _next_mark_bitmap is cleared
// without swapping with the _prev_mark_bitmap. Therefore, the per region
// next_top_at_mark_start and live_words data are current wrt
// _next_mark_bitmap. We use this information to only clear ranges of the
// bitmap that require clearing.
if (is_clear_concurrent_undo()) {
// No need to clear bitmaps for empty regions.
if (_cm->live_words(r->hrm_index()) == 0) {
assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits");
return r->bottom();
}
assert(_bitmap->get_next_marked_addr(r->next_top_at_mark_start(), r->end()) == r->end(), "Should not have marked bits above ntams");
}
return r->end();
}

public:
G1ClearBitmapHRClosure(G1ConcurrentMark* cm, bool suspendible) :
HeapRegionClosure(),
_cm(cm),
_bitmap(cm->next_mark_bitmap()),
_suspendible(suspendible)
{ }

virtual bool do_heap_region(HeapRegion* r) {
size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
if (has_aborted()) {
return true;
}

HeapWord* cur = r->bottom();
HeapWord* const end = r->end();
HeapWord* const end = region_clear_limit(r);

size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;

while (cur < end) {
// Abort iteration if necessary.
if (_cm != NULL) {
_cm->do_yield_check();
if (_cm->has_aborted()) {
return true;
}
}

MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end));
_bitmap->clear_range(mr);
@@ -614,10 +650,15 @@ class G1ClearBitMapTask : public AbstractGangTask {
// as asserts here to minimize their overhead on the product. However, we
// will have them as guarantees at the beginning / end of the bitmap
// clearing to get some checking in the product.
assert(_cm == NULL || _cm->cm_thread()->in_progress(), "invariant");
assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");
assert(!suspendible() || _cm->cm_thread()->in_progress(), "invariant");
assert(!suspendible() || !G1CollectedHeap::heap()->collector_state()->mark_or_rebuild_in_progress(), "invariant");

// Abort iteration if necessary.
if (has_aborted()) {
return true;
}
}
assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());

return false;
}
@@ -628,9 +669,9 @@ class G1ClearBitMapTask : public AbstractGangTask {
bool _suspendible; // If the task is suspendible, workers must join the STS.

public:
G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
G1ClearBitMapTask(G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
AbstractGangTask("G1 Clear Bitmap"),
_cl(bitmap, suspendible ? cm : NULL),
_cl(cm, suspendible),
_hr_claimer(n_workers),
_suspendible(suspendible)
{ }
@@ -645,15 +686,15 @@ class G1ClearBitMapTask : public AbstractGangTask {
}
};

void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
void G1ConcurrentMark::clear_next_bitmap(WorkGang* workers, bool may_yield) {
assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");

size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
size_t const num_chunks = align_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();

uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());

G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
G1ClearBitMapTask cl(this, num_workers, may_yield);

log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
workers->run_task(&cl, num_workers);
@@ -671,7 +712,7 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
// is the case.
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");

clear_bitmap(_next_mark_bitmap, _concurrent_workers, true);
clear_next_bitmap(_concurrent_workers, true);

// Repeat the asserts from above.
guarantee(cm_thread()->in_progress(), "invariant");
@@ -685,7 +726,7 @@ void G1ConcurrentMark::clear_next_bitmap(WorkGang* workers) {
// as efficiently as possible the number of active workers are temporarily
// increased to include all currently created workers.
WithUpdatedActiveWorkers update(workers, workers->created_workers());
clear_bitmap(_next_mark_bitmap, workers, false);
clear_next_bitmap(workers, false);
}

class NoteStartOfMarkHRClosure : public HeapRegionClosure {
4 changes: 2 additions & 2 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
Original file line number Diff line number Diff line change
@@ -444,9 +444,9 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
void enter_first_sync_barrier(uint worker_id);
void enter_second_sync_barrier(uint worker_id);

// Clear the given bitmap in parallel using the given WorkGang. If may_yield is
// Clear the next marking bitmap in parallel using the given WorkGang. If may_yield is
// true, periodically insert checks to see if this method should exit prematurely.
void clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield);
void clear_next_bitmap(WorkGang* workers, bool may_yield);

// Region statistics gathered during marking.
G1RegionMarkStats* _region_mark_stats;
2 changes: 2 additions & 0 deletions src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp
Original file line number Diff line number Diff line change
@@ -324,6 +324,8 @@ void G1ConcurrentMarkThread::concurrent_undo_cycle_do() {
// some reason.
if (_cm->has_aborted()) { return; }

_cm->flush_all_task_caches();

// Phase 1: Clear bitmap for next mark.
phase_clear_bitmap_for_next_mark();
}
2 changes: 2 additions & 0 deletions src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp
Original file line number Diff line number Diff line change
@@ -108,6 +108,8 @@ class G1ConcurrentMarkThread: public ConcurrentGCThread {
// marking bitmap has been cleared and in_progress() is
// cleared).
bool in_progress() const;

bool in_undo_mark() const;
};

#endif // SHARE_GC_G1_G1CONCURRENTMARKTHREAD_HPP
4 changes: 4 additions & 0 deletions src/hotspot/share/gc/g1/g1ConcurrentMarkThread.inline.hpp
Original file line number Diff line number Diff line change
@@ -60,4 +60,8 @@ inline bool G1ConcurrentMarkThread::in_progress() const {
return !idle();
}

inline bool G1ConcurrentMarkThread::in_undo_mark() const {
return _state == UndoMark;
}

#endif // SHARE_GC_G1_G1CONCURRENTMARKTHREAD_INLINE_HPP