@@ -394,26 +394,42 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
394
394
HeapWord* result = NULL ;
395
395
for (uint try_count = 1 , gclocker_retry_count = 0 ; /* we'll return */ ; try_count += 1 ) {
396
396
bool should_try_gc;
397
+ bool preventive_collection_required = false ;
397
398
uint gc_count_before;
398
399
399
400
{
400
401
MutexLocker x (Heap_lock);
401
- result = _allocator->attempt_allocation_locked (word_size);
402
+
403
+ // Now that we have the lock, we first retry the allocation in case another
404
+ // thread changed the region while we were waiting to acquire the lock.
405
+ size_t actual_size;
406
+ result = _allocator->attempt_allocation (word_size, word_size, &actual_size);
402
407
if (result != NULL ) {
403
408
return result;
404
409
}
405
410
406
- // If the GCLocker is active and we are bound for a GC, try expanding young gen.
407
- // This is different to when only GCLocker::needs_gc() is set: try to avoid
408
- // waiting because the GCLocker is active to not wait too long.
409
- if (GCLocker::is_active_and_needs_gc () && policy ()->can_expand_young_list ()) {
410
- // No need for an ergo message here, can_expand_young_list() does this when
411
- // it returns true.
412
- result = _allocator->attempt_allocation_force (word_size);
411
+ preventive_collection_required = policy ()->preventive_collection_required (1 );
412
+ if (!preventive_collection_required) {
413
+ // We've already attempted a lock-free allocation above, so we don't want to
414
+ // do it again. Let's jump straight to replacing the active region.
415
+ result = _allocator->attempt_allocation_using_new_region (word_size);
413
416
if (result != NULL ) {
414
417
return result;
415
418
}
419
+
420
+ // If the GCLocker is active and we are bound for a GC, try expanding young gen.
421
+ // This is different to when only GCLocker::needs_gc() is set: try to avoid
422
+ // waiting because the GCLocker is active to not wait too long.
423
+ if (GCLocker::is_active_and_needs_gc () && policy ()->can_expand_young_list ()) {
424
+ // No need for an ergo message here, can_expand_young_list() does this when
425
+ // it returns true.
426
+ result = _allocator->attempt_allocation_force (word_size);
427
+ if (result != NULL ) {
428
+ return result;
429
+ }
430
+ }
416
431
}
432
+
417
433
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
418
434
// the GCLocker initiated GC has been performed and then retry. This includes
419
435
// the case when the GC Locker is not active but has not been performed.
@@ -423,9 +439,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
423
439
}
424
440
425
441
if (should_try_gc) {
442
+ GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
443
+ : GCCause::_g1_inc_collection_pause;
426
444
bool succeeded;
427
- result = do_collection_pause (word_size, gc_count_before, &succeeded,
428
- GCCause::_g1_inc_collection_pause);
445
+ result = do_collection_pause (word_size, gc_count_before, &succeeded, gc_cause);
429
446
if (result != NULL ) {
430
447
assert (succeeded, " only way to get back a non-NULL result" );
431
448
log_trace (gc, alloc)(" %s: Successfully scheduled collection returning " PTR_FORMAT,
@@ -840,21 +857,25 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
840
857
HeapWord* result = NULL ;
841
858
for (uint try_count = 1 , gclocker_retry_count = 0 ; /* we'll return */ ; try_count += 1 ) {
842
859
bool should_try_gc;
860
+ bool preventive_collection_required = false ;
843
861
uint gc_count_before;
844
862
845
863
846
864
{
847
865
MutexLocker x (Heap_lock);
848
866
849
- // Given that humongous objects are not allocated in young
850
- // regions, we'll first try to do the allocation without doing a
851
- // collection hoping that there's enough space in the heap.
852
- result = humongous_obj_allocate (word_size);
853
- if (result != NULL ) {
854
- size_t size_in_regions = humongous_obj_size_in_regions (word_size);
855
- policy ()->old_gen_alloc_tracker ()->
856
- add_allocated_humongous_bytes_since_last_gc (size_in_regions * HeapRegion::GrainBytes);
857
- return result;
867
+ size_t size_in_regions = humongous_obj_size_in_regions (word_size);
868
+ preventive_collection_required = policy ()->preventive_collection_required ((uint )size_in_regions);
869
+ if (!preventive_collection_required) {
870
+ // Given that humongous objects are not allocated in young
871
+ // regions, we'll first try to do the allocation without doing a
872
+ // collection hoping that there's enough space in the heap.
873
+ result = humongous_obj_allocate (word_size);
874
+ if (result != NULL ) {
875
+ policy ()->old_gen_alloc_tracker ()->
876
+ add_allocated_humongous_bytes_since_last_gc (size_in_regions * HeapRegion::GrainBytes);
877
+ return result;
878
+ }
858
879
}
859
880
860
881
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
@@ -866,9 +887,10 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
866
887
}
867
888
868
889
if (should_try_gc) {
890
+ GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
891
+ : GCCause::_g1_humongous_allocation;
869
892
bool succeeded;
870
- result = do_collection_pause (word_size, gc_count_before, &succeeded,
871
- GCCause::_g1_humongous_allocation);
893
+ result = do_collection_pause (word_size, gc_count_before, &succeeded, gc_cause);
872
894
if (result != NULL ) {
873
895
assert (succeeded, " only way to get back a non-NULL result" );
874
896
log_trace (gc, alloc)(" %s: Successfully scheduled collection returning " PTR_FORMAT,
1 commit comments
openjdk-notifier[bot] commentedon Jun 8, 2021
Review
Issues