@@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
178
178
}
179
179
}
180
180
181
- HeapWord* PSOldGen::expand_and_cas_allocate (size_t word_size) {
182
- expand (word_size*HeapWordSize);
181
+ bool PSOldGen::expand_for_allocate (size_t word_size) {
182
+ assert (word_size > 0 , " allocating zero words?" );
183
+ bool result = true ;
184
+ {
185
+ MutexLocker x (ExpandHeap_lock);
186
+ // Avoid "expand storms" by rechecking available space after obtaining
187
+ // the lock, because another thread may have already made sufficient
188
+ // space available. If insufficient space available, that will remain
189
+ // true until we expand, since we have the lock. Other threads may take
190
+ // the space we need before we can allocate it, regardless of whether we
191
+ // expand. That's okay, we'll just try expanding again.
192
+ if (object_space ()->needs_expand (word_size)) {
193
+ result = expand (word_size*HeapWordSize);
194
+ }
195
+ }
183
196
if (GCExpandToAllocateDelayMillis > 0 ) {
184
197
os::naked_sleep (GCExpandToAllocateDelayMillis);
185
198
}
186
- return cas_allocate_noexpand (word_size) ;
199
+ return result ;
187
200
}
188
201
189
- void PSOldGen::expand (size_t bytes) {
190
- if (bytes == 0 ) {
191
- return ;
192
- }
193
- MutexLocker x (ExpandHeap_lock);
202
+ bool PSOldGen::expand (size_t bytes) {
203
+ assert_lock_strong (ExpandHeap_lock);
204
+ assert_locked_or_safepoint (Heap_lock);
205
+ assert (bytes > 0 , " precondition" );
194
206
const size_t alignment = virtual_space ()->alignment ();
195
207
size_t aligned_bytes = align_up (bytes, alignment);
196
208
size_t aligned_expand_bytes = align_up (MinHeapDeltaBytes, alignment);
@@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) {
200
212
// providing a page per lgroup. Alignment is larger or equal to the page size.
201
213
aligned_expand_bytes = MAX2 (aligned_expand_bytes, alignment * os::numa_get_groups_num ());
202
214
}
203
- if (aligned_bytes == 0 ){
204
- // The alignment caused the number of bytes to wrap. An expand_by(0) will
205
- // return true with the implication that and expansion was done when it
206
- // was not. A call to expand implies a best effort to expand by "bytes"
207
- // but not a guarantee. Align down to give a best effort. This is likely
208
- // the most that the generation can expand since it has some capacity to
209
- // start with.
215
+ if (aligned_bytes == 0 ) {
216
+ // The alignment caused the number of bytes to wrap. A call to expand
217
+ // implies a best effort to expand by "bytes" but not a guarantee. Align
218
+ // down to give a best effort. This is likely the most that the generation
219
+ // can expand since it has some capacity to start with.
210
220
aligned_bytes = align_down (bytes, alignment);
211
221
}
212
222
@@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) {
224
234
if (success && GCLocker::is_active_and_needs_gc ()) {
225
235
log_debug (gc)(" Garbage collection disabled, expanded heap instead" );
226
236
}
237
+ return success;
227
238
}
228
239
229
240
bool PSOldGen::expand_by (size_t bytes) {
230
241
assert_lock_strong (ExpandHeap_lock);
231
242
assert_locked_or_safepoint (Heap_lock);
232
- if (bytes == 0 ) {
233
- return true ; // That's what virtual_space()->expand_by(0) would return
234
- }
243
+ assert (bytes > 0 , " precondition" );
235
244
bool result = virtual_space ()->expand_by (bytes);
236
245
if (result) {
237
246
if (ZapUnusedHeapArea) {
@@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() {
268
277
assert_lock_strong (ExpandHeap_lock);
269
278
assert_locked_or_safepoint (Heap_lock);
270
279
271
- bool result = true ;
280
+ bool result = false ;
272
281
const size_t remaining_bytes = virtual_space ()->uncommitted_size ();
273
282
if (remaining_bytes > 0 ) {
274
283
result = expand_by (remaining_bytes);
@@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) {
323
332
}
324
333
if (new_size > current_size) {
325
334
size_t change_bytes = new_size - current_size;
335
+ MutexLocker x (ExpandHeap_lock);
326
336
expand (change_bytes);
327
337
} else {
328
338
size_t change_bytes = current_size - new_size;
329
- // shrink doesn't grab this lock, expand does. Is that right?
330
339
MutexLocker x (ExpandHeap_lock);
331
340
shrink (change_bytes);
332
341
}
0 commit comments