Skip to content

Commit 6a84ec6

Browse files
author
Kim Barrett
committedFeb 12, 2021
8260044: Parallel GC: Concurrent allocation after heap expansion may cause unnecessary full gc
8260045: Parallel GC: Waiting on ExpandHeap_lock may cause "expansion storm" Loop to retry allocation if expand succeeds. Treat space available after obtaining expand lock as expand success. Reviewed-by: tschatzl, iwalulya, sjohanss
1 parent 92ff891 commit 6a84ec6

File tree

4 files changed

+51
-24
lines changed

4 files changed

+51
-24
lines changed
 

‎src/hotspot/share/gc/parallel/mutableSpace.cpp

+9
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,15 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
215215
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
216216
}
217217

218+
// Only used by oldgen allocation.
219+
bool MutableSpace::needs_expand(size_t word_size) const {
220+
assert_lock_strong(ExpandHeap_lock);
221+
// Holding the lock means end is stable. So while top may be advancing
222+
// via concurrent allocations, there is no need to order the reads of top
223+
// and end here, unlike in cas_allocate.
224+
return pointer_delta(end(), top()) < word_size;
225+
}
226+
218227
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
219228
HeapWord* obj_addr = bottom();
220229
HeapWord* t = top();

‎src/hotspot/share/gc/parallel/mutableSpace.hpp

+5
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,11 @@ class MutableSpace: public CHeapObj<mtGC> {
142142
virtual HeapWord* cas_allocate(size_t word_size);
143143
// Optional deallocation. Used in NUMA-allocator.
144144
bool cas_deallocate(HeapWord *obj, size_t size);
145+
// Return true if this space needs to be expanded in order to satisfy an
146+
// allocation request of the indicated size. Concurrent allocations and
147+
// resizes may change the result of a later call. Used by oldgen allocator.
148+
// precondition: holding ExpandHeap_lock
149+
bool needs_expand(size_t word_size) const;
145150

146151
// Iteration.
147152
void oop_iterate(OopIterateClosure* cl);

‎src/hotspot/share/gc/parallel/psOldGen.cpp

+29-20
Original file line numberDiff line numberDiff line change
@@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
178178
}
179179
}
180180

181-
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
182-
expand(word_size*HeapWordSize);
181+
bool PSOldGen::expand_for_allocate(size_t word_size) {
182+
assert(word_size > 0, "allocating zero words?");
183+
bool result = true;
184+
{
185+
MutexLocker x(ExpandHeap_lock);
186+
// Avoid "expand storms" by rechecking available space after obtaining
187+
// the lock, because another thread may have already made sufficient
188+
// space available. If insufficient space available, that will remain
189+
// true until we expand, since we have the lock. Other threads may take
190+
// the space we need before we can allocate it, regardless of whether we
191+
// expand. That's okay, we'll just try expanding again.
192+
if (object_space()->needs_expand(word_size)) {
193+
result = expand(word_size*HeapWordSize);
194+
}
195+
}
183196
if (GCExpandToAllocateDelayMillis > 0) {
184197
os::naked_sleep(GCExpandToAllocateDelayMillis);
185198
}
186-
return cas_allocate_noexpand(word_size);
199+
return result;
187200
}
188201

189-
void PSOldGen::expand(size_t bytes) {
190-
if (bytes == 0) {
191-
return;
192-
}
193-
MutexLocker x(ExpandHeap_lock);
202+
bool PSOldGen::expand(size_t bytes) {
203+
assert_lock_strong(ExpandHeap_lock);
204+
assert_locked_or_safepoint(Heap_lock);
205+
assert(bytes > 0, "precondition");
194206
const size_t alignment = virtual_space()->alignment();
195207
size_t aligned_bytes = align_up(bytes, alignment);
196208
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
@@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) {
200212
// providing a page per lgroup. Alignment is larger or equal to the page size.
201213
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
202214
}
203-
if (aligned_bytes == 0){
204-
// The alignment caused the number of bytes to wrap. An expand_by(0) will
205-
// return true with the implication that and expansion was done when it
206-
// was not. A call to expand implies a best effort to expand by "bytes"
207-
// but not a guarantee. Align down to give a best effort. This is likely
208-
// the most that the generation can expand since it has some capacity to
209-
// start with.
215+
if (aligned_bytes == 0) {
216+
// The alignment caused the number of bytes to wrap. A call to expand
217+
// implies a best effort to expand by "bytes" but not a guarantee. Align
218+
// down to give a best effort. This is likely the most that the generation
219+
// can expand since it has some capacity to start with.
210220
aligned_bytes = align_down(bytes, alignment);
211221
}
212222

@@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) {
224234
if (success && GCLocker::is_active_and_needs_gc()) {
225235
log_debug(gc)("Garbage collection disabled, expanded heap instead");
226236
}
237+
return success;
227238
}
228239

229240
bool PSOldGen::expand_by(size_t bytes) {
230241
assert_lock_strong(ExpandHeap_lock);
231242
assert_locked_or_safepoint(Heap_lock);
232-
if (bytes == 0) {
233-
return true; // That's what virtual_space()->expand_by(0) would return
234-
}
243+
assert(bytes > 0, "precondition");
235244
bool result = virtual_space()->expand_by(bytes);
236245
if (result) {
237246
if (ZapUnusedHeapArea) {
@@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() {
268277
assert_lock_strong(ExpandHeap_lock);
269278
assert_locked_or_safepoint(Heap_lock);
270279

271-
bool result = true;
280+
bool result = false;
272281
const size_t remaining_bytes = virtual_space()->uncommitted_size();
273282
if (remaining_bytes > 0) {
274283
result = expand_by(remaining_bytes);
@@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) {
323332
}
324333
if (new_size > current_size) {
325334
size_t change_bytes = new_size - current_size;
335+
MutexLocker x(ExpandHeap_lock);
326336
expand(change_bytes);
327337
} else {
328338
size_t change_bytes = current_size - new_size;
329-
// shrink doesn't grab this lock, expand does. Is that right?
330339
MutexLocker x(ExpandHeap_lock);
331340
shrink(change_bytes);
332341
}

‎src/hotspot/share/gc/parallel/psOldGen.hpp

+8-4
Original file line numberDiff line numberDiff line change
@@ -79,8 +79,8 @@ class PSOldGen : public CHeapObj<mtGC> {
7979
return res;
8080
}
8181

82-
HeapWord* expand_and_cas_allocate(size_t word_size);
83-
void expand(size_t bytes);
82+
bool expand_for_allocate(size_t word_size);
83+
bool expand(size_t bytes);
8484
bool expand_by(size_t bytes);
8585
bool expand_to_reserved();
8686

@@ -135,8 +135,12 @@ class PSOldGen : public CHeapObj<mtGC> {
135135
void resize(size_t desired_free_space);
136136

137137
HeapWord* allocate(size_t word_size) {
138-
HeapWord* res = cas_allocate_noexpand(word_size);
139-
return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
138+
HeapWord* res;
139+
do {
140+
res = cas_allocate_noexpand(word_size);
141+
// Retry failed allocation if expand succeeds.
142+
} while ((res == nullptr) && expand_for_allocate(word_size));
143+
return res;
140144
}
141145

142146
// Iteration.

0 commit comments

Comments
 (0)
Please sign in to comment.