Skip to content

Commit 75001c7

Browse files
author
duke
committedJan 25, 2022
Automatic merge of jdk:master into master
2 parents f9b21a1 + b327746 commit 75001c7

7 files changed

+23
-49
lines changed
 

‎src/hotspot/share/gc/parallel/psCompactionManager.cpp

+5-8
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
7070
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
7171

7272
assert(_manager_array == NULL, "Attempt to initialize twice");
73-
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
73+
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads, mtGC);
7474

7575
_oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
7676
_objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
@@ -84,9 +84,6 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
8484
region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
8585
}
8686

87-
// The VMThread gets its own ParCompactionManager, which is not available
88-
// for work stealing.
89-
_manager_array[parallel_gc_threads] = new ParCompactionManager();
9087
assert(ParallelScavengeHeap::heap()->workers().max_workers() != 0,
9188
"Not initialized?");
9289

@@ -97,14 +94,14 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
9794

9895
void ParCompactionManager::reset_all_bitmap_query_caches() {
9996
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
100-
for (uint i=0; i<=parallel_gc_threads; i++) {
97+
for (uint i=0; i<parallel_gc_threads; i++) {
10198
_manager_array[i]->reset_bitmap_query_cache();
10299
}
103100
}
104101

105102
void ParCompactionManager::flush_all_string_dedup_requests() {
106103
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
107-
for (uint i=0; i<=parallel_gc_threads; i++) {
104+
for (uint i=0; i<parallel_gc_threads; i++) {
108105
_manager_array[i]->flush_string_dedup_requests();
109106
}
110107
}
@@ -184,14 +181,14 @@ void ParCompactionManager::remove_all_shadow_regions() {
184181
#ifdef ASSERT
185182
void ParCompactionManager::verify_all_marking_stack_empty() {
186183
uint parallel_gc_threads = ParallelGCThreads;
187-
for (uint i = 0; i <= parallel_gc_threads; i++) {
184+
for (uint i = 0; i < parallel_gc_threads; i++) {
188185
assert(_manager_array[i]->marking_stacks_empty(), "Marking stack should be empty");
189186
}
190187
}
191188

192189
void ParCompactionManager::verify_all_region_stack_empty() {
193190
uint parallel_gc_threads = ParallelGCThreads;
194-
for (uint i = 0; i <= parallel_gc_threads; i++) {
191+
for (uint i = 0; i < parallel_gc_threads; i++) {
195192
assert(_manager_array[i]->region_stack()->is_empty(), "Region stack should be empty");
196193
}
197194
}

‎src/hotspot/share/gc/parallel/psCompactionManager.hpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,9 @@ class ParCompactionManager : public CHeapObj<mtGC> {
147147

148148
RegionTaskQueue* region_stack() { return &_region_stack; }
149149

150-
static ParCompactionManager* get_vmthread_cm() { return _manager_array[ParallelGCThreads]; }
150+
// Get the compaction manager when doing evacuation work from the VM thread.
151+
// Simply use the first compaction manager here.
152+
static ParCompactionManager* get_vmthread_cm() { return _manager_array[0]; }
151153

152154
ParCompactionManager();
153155

‎src/hotspot/share/gc/parallel/psParallelCompact.cpp

+5-25
Original file line numberDiff line numberDiff line change
@@ -1595,8 +1595,7 @@ void PSParallelCompact::summary_phase_msg(SpaceId dst_space_id,
15951595
}
15961596
#endif // #ifndef PRODUCT
15971597

1598-
void PSParallelCompact::summary_phase(ParCompactionManager* cm,
1599-
bool maximum_compaction)
1598+
void PSParallelCompact::summary_phase(bool maximum_compaction)
16001599
{
16011600
GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
16021601

@@ -1756,9 +1755,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
17561755

17571756
const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
17581757

1759-
// Get the compaction manager reserved for the VM thread.
1760-
ParCompactionManager* const vmthread_cm = ParCompactionManager::get_vmthread_cm();
1761-
17621758
{
17631759
const uint active_workers =
17641760
WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
@@ -1787,11 +1783,11 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
17871783

17881784
ref_processor()->start_discovery(maximum_heap_compaction);
17891785

1790-
marking_phase(vmthread_cm, &_gc_tracer);
1786+
marking_phase(&_gc_tracer);
17911787

17921788
bool max_on_system_gc = UseMaximumCompactionOnSystemGC
17931789
&& GCCause::is_user_requested_gc(gc_cause);
1794-
summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1790+
summary_phase(maximum_heap_compaction || max_on_system_gc);
17951791

17961792
#if COMPILER2_OR_JVMCI
17971793
assert(DerivedPointerTable::is_active(), "Sanity");
@@ -2063,8 +2059,7 @@ class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
20632059
}
20642060
};
20652061

2066-
void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2067-
ParallelOldTracer *gc_tracer) {
2062+
void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
20682063
// Recursively traverse all live objects and mark them
20692064
GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
20702065

@@ -2125,19 +2120,6 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
21252120
_gc_tracer.report_object_count_after_gc(is_alive_closure());
21262121
}
21272122

2128-
#ifdef ASSERT
2129-
void PCAdjustPointerClosure::verify_cm(ParCompactionManager* cm) {
2130-
assert(cm != NULL, "associate ParCompactionManage should not be NULL");
2131-
auto vmthread_cm = ParCompactionManager::get_vmthread_cm();
2132-
if (Thread::current()->is_VM_thread()) {
2133-
assert(cm == vmthread_cm, "VM threads should use ParCompactionManager from get_vmthread_cm()");
2134-
} else {
2135-
assert(Thread::current()->is_Worker_thread(), "Must be a GC thread");
2136-
assert(cm != vmthread_cm, "GC threads should use ParCompactionManager from gc_thread_compaction_manager()");
2137-
}
2138-
}
2139-
#endif
2140-
21412123
class PSAdjustTask final : public WorkerTask {
21422124
SubTasksDone _sub_tasks;
21432125
WeakProcessor::Task _weak_proc_task;
@@ -2528,9 +2510,7 @@ void PSParallelCompact::compact() {
25282510

25292511
{
25302512
GCTraceTime(Trace, gc, phases) tm("Deferred Updates", &_gc_timer);
2531-
// Update the deferred objects, if any. In principle, any compaction
2532-
// manager can be used. However, since the current thread is VM thread, we
2533-
// use the rightful one to keep the verification logic happy.
2513+
// Update the deferred objects, if any.
25342514
ParCompactionManager* cm = ParCompactionManager::get_vmthread_cm();
25352515
for (unsigned int id = old_space_id; id < last_space_id; ++id) {
25362516
update_deferred_objects(cm, SpaceId(id));

‎src/hotspot/share/gc/parallel/psParallelCompact.hpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -1062,8 +1062,7 @@ class PSParallelCompact : AllStatic {
10621062
static void post_compact();
10631063

10641064
// Mark live objects
1065-
static void marking_phase(ParCompactionManager* cm,
1066-
ParallelOldTracer *gc_tracer);
1065+
static void marking_phase(ParallelOldTracer *gc_tracer);
10671066

10681067
// Compute the dense prefix for the designated space. This is an experimental
10691068
// implementation currently not used in production.
@@ -1123,7 +1122,7 @@ class PSParallelCompact : AllStatic {
11231122

11241123
static void summarize_spaces_quick();
11251124
static void summarize_space(SpaceId id, bool maximum_compaction);
1126-
static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
1125+
static void summary_phase(bool maximum_compaction);
11271126

11281127
// Adjust addresses in roots. Does not adjust addresses in heap.
11291128
static void adjust_roots();

‎src/hotspot/share/gc/parallel/psParallelCompact.inline.hpp

+1-5
Original file line numberDiff line numberDiff line change
@@ -126,9 +126,7 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
126126

127127
class PCAdjustPointerClosure: public BasicOopIterateClosure {
128128
public:
129-
PCAdjustPointerClosure(ParCompactionManager* cm) {
130-
verify_cm(cm);
131-
_cm = cm;
129+
PCAdjustPointerClosure(ParCompactionManager* cm) : _cm(cm) {
132130
}
133131
template <typename T> void do_oop_nv(T* p) { PSParallelCompact::adjust_pointer(p, _cm); }
134132
virtual void do_oop(oop* p) { do_oop_nv(p); }
@@ -137,8 +135,6 @@ class PCAdjustPointerClosure: public BasicOopIterateClosure {
137135
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
138136
private:
139137
ParCompactionManager* _cm;
140-
141-
static void verify_cm(ParCompactionManager* cm) NOT_DEBUG_RETURN;
142138
};
143139

144140
#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP

‎src/hotspot/share/gc/parallel/psPromotionManager.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ void PSPromotionManager::initialize() {
5454
_old_gen = heap->old_gen();
5555
_young_space = heap->young_gen()->to_space();
5656

57-
const uint promotion_manager_num = ParallelGCThreads + 1;
57+
const uint promotion_manager_num = ParallelGCThreads;
5858

5959
// To prevent false sharing, we pad the PSPromotionManagers
6060
// and make sure that the first instance starts at a cache line.
@@ -95,7 +95,7 @@ PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(uint index)
9595

9696
PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
9797
assert(_manager_array != NULL, "Sanity");
98-
return &_manager_array[ParallelGCThreads];
98+
return &_manager_array[0];
9999
}
100100

101101
void PSPromotionManager::pre_scavenge() {
@@ -104,7 +104,7 @@ void PSPromotionManager::pre_scavenge() {
104104
_preserved_marks_set->assert_empty();
105105
_young_space = heap->young_gen()->to_space();
106106

107-
for(uint i=0; i<ParallelGCThreads+1; i++) {
107+
for(uint i=0; i<ParallelGCThreads; i++) {
108108
manager_array(i)->reset();
109109
}
110110
}
@@ -113,7 +113,7 @@ bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
113113
bool promotion_failure_occurred = false;
114114

115115
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
116-
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
116+
for (uint i = 0; i < ParallelGCThreads; i++) {
117117
PSPromotionManager* manager = manager_array(i);
118118
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
119119
if (manager->_promotion_failed_info.has_failed()) {
@@ -162,7 +162,7 @@ PSPromotionManager::print_taskqueue_stats() {
162162
TaskQueueStats totals;
163163
out->print("thr "); TaskQueueStats::print_header(1, out); out->cr();
164164
out->print("--- "); TaskQueueStats::print_header(2, out); out->cr();
165-
for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
165+
for (uint i = 0; i < ParallelGCThreads; ++i) {
166166
TaskQueueStats& next = manager_array(i)->_claimed_stack_depth.stats;
167167
out->print("%3d ", i); next.print(out); out->cr();
168168
totals += next;
@@ -171,7 +171,7 @@ PSPromotionManager::print_taskqueue_stats() {
171171

172172
const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
173173
for (uint i = 0; i < hlines; ++i) out->print_cr("%s", pm_stats_hdr[i]);
174-
for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
174+
for (uint i = 0; i < ParallelGCThreads; ++i) {
175175
manager_array(i)->print_local_stats(out, i);
176176
}
177177
}

‎src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444

4545
inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
4646
assert(_manager_array != NULL, "access of NULL manager_array");
47-
assert(index <= ParallelGCThreads, "out of range manager_array access");
47+
assert(index < ParallelGCThreads, "out of range manager_array access");
4848
return &_manager_array[index];
4949
}
5050

0 commit comments

Comments
 (0)
Please sign in to comment.