37
37
#include " oops/compressedOops.inline.hpp"
38
38
#include " oops/oop.inline.hpp"
39
39
40
- class RemoveSelfForwardPtrObjClosure : public ObjectClosure {
40
+ class RemoveSelfForwardPtrObjClosure {
41
41
G1CollectedHeap* _g1h;
42
42
G1ConcurrentMark* _cm;
43
43
HeapRegion* _hr;
@@ -60,13 +60,13 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
60
60
61
61
size_t marked_bytes () { return _marked_words * HeapWordSize; }
62
62
63
- // Iterate over the live objects in the region to find self-forwarded objects
63
+ // Handle the marked objects in the region. These are self-forwarded objects
64
64
// that need to be kept live. We need to update the remembered sets of these
65
65
// objects. Further update the BOT and marks.
66
66
// We can coalesce and overwrite the remaining heap contents with dummy objects
67
67
// as they have either been dead or evacuated (which are unreferenced now, i.e.
68
68
// dead too) already.
69
- void do_object (oop obj) {
69
+ size_t apply (oop obj) {
70
70
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
71
71
assert (_last_forwarded_object_end <= obj_addr, " should iterate in ascending address order" );
72
72
assert (_hr->is_in (obj_addr), " sanity" );
@@ -75,12 +75,9 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
75
75
assert (obj->is_forwarded () && obj->forwardee () == obj, " sanity" );
76
76
77
77
zap_dead_objects (_last_forwarded_object_end, obj_addr);
78
- // We consider all objects that we find self-forwarded to be
79
- // live. What we'll do is that we'll update the prev marking
80
- // info so that they are all under PTAMS and explicitly marked.
81
- if (!_cm->is_marked_in_prev_bitmap (obj)) {
82
- _cm->mark_in_prev_bitmap (obj);
83
- }
78
+
79
+ // Zapping clears the bitmap, make sure it didn't clear too much.
80
+ assert (_cm->is_marked_in_prev_bitmap (obj), " should be correctly marked" );
84
81
if (_during_concurrent_start) {
85
82
// For the next marking info we'll only mark the
86
83
// self-forwarded objects explicitly if we are during
@@ -92,7 +89,7 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
92
89
// explicitly and all objects in the CSet are considered
93
90
// (implicitly) live. So, we won't mark them explicitly and
94
91
// we'll leave them over NTAMS.
95
- _cm->mark_in_next_bitmap (_worker_id, _hr, obj);
92
+ _cm->mark_in_next_bitmap (_worker_id, obj);
96
93
}
97
94
size_t obj_size = obj->size ();
98
95
@@ -102,6 +99,7 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
102
99
HeapWord* obj_end = obj_addr + obj_size;
103
100
_last_forwarded_object_end = obj_end;
104
101
_hr->alloc_block_in_bot (obj_addr, obj_end);
102
+ return obj_size;
105
103
}
106
104
107
105
// Fill the memory area from start to end with filler objects, and update the BOT
@@ -161,8 +159,11 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
161
159
RemoveSelfForwardPtrObjClosure rspc (hr,
162
160
during_concurrent_start,
163
161
_worker_id);
164
- // Iterates evac failure objs which are recorded during evacuation.
165
- hr->process_and_drop_evac_failure_objs (&rspc);
162
+
163
+ // All objects that failed evacuation has been marked in the prev bitmap.
164
+ // Use the bitmap to apply the above closure to all failing objects.
165
+ G1CMBitMap* bitmap = const_cast <G1CMBitMap*>(_g1h->concurrent_mark ()->prev_mark_bitmap ());
166
+ hr->apply_to_marked_objects (bitmap, &rspc);
166
167
// Need to zap the remainder area of the processed region.
167
168
rspc.zap_remainder ();
168
169
@@ -172,26 +173,26 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
172
173
bool do_heap_region (HeapRegion *hr) {
173
174
assert (!hr->is_pinned (), " Unexpected pinned region at index %u" , hr->hrm_index ());
174
175
assert (hr->in_collection_set (), " bad CS" );
176
+ assert (_evac_failure_regions->contains (hr->hrm_index ()), " precondition" );
175
177
176
- if (_evac_failure_regions->contains (hr->hrm_index ())) {
177
- hr->clear_index_in_opt_cset ();
178
+ hr->clear_index_in_opt_cset ();
178
179
179
- bool during_concurrent_start = _g1h->collector_state ()->in_concurrent_start_gc ();
180
- bool during_concurrent_mark = _g1h->collector_state ()->mark_or_rebuild_in_progress ();
180
+ bool during_concurrent_start = _g1h->collector_state ()->in_concurrent_start_gc ();
181
+ bool during_concurrent_mark = _g1h->collector_state ()->mark_or_rebuild_in_progress ();
181
182
182
- hr->note_self_forwarding_removal_start (during_concurrent_start,
183
- during_concurrent_mark);
184
- _g1h->verifier ()->check_bitmaps (" Self-Forwarding Ptr Removal" , hr);
183
+ hr->note_self_forwarding_removal_start (during_concurrent_start,
184
+ during_concurrent_mark);
185
185
186
- hr->reset_bot ();
186
+ hr->reset_bot ();
187
187
188
- size_t live_bytes = remove_self_forward_ptr_by_walking_hr (hr, during_concurrent_start);
188
+ size_t live_bytes = remove_self_forward_ptr_by_walking_hr (hr, during_concurrent_start);
189
189
190
- hr->rem_set ()->clean_strong_code_roots (hr);
191
- hr->rem_set ()->clear_locked (true );
190
+ hr->rem_set ()->clean_strong_code_roots (hr);
191
+ hr->rem_set ()->clear_locked (true );
192
+
193
+ hr->note_self_forwarding_removal_end (live_bytes);
194
+ _g1h->verifier ()->check_bitmaps (" Self-Forwarding Ptr Removal" , hr);
192
195
193
- hr->note_self_forwarding_removal_end (live_bytes);
194
- }
195
196
return false ;
196
197
}
197
198
};
0 commit comments