1
1
/*
2
- * Copyright (c) 2014, 2019 , Oracle and/or its affiliates. All rights reserved.
2
+ * Copyright (c) 2014, 2020 , Oracle and/or its affiliates. All rights reserved.
3
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
4
*
5
5
* This code is free software; you can redistribute it and/or modify it
@@ -71,14 +71,13 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
71
71
}
72
72
}
73
73
74
- template < class T > inline void G1ParScanThreadState::push_on_queue (T* ref ) {
75
- assert ( verify_ref (ref), " sanity " );
76
- _refs ->push (ref );
74
+ inline void G1ParScanThreadState::push_on_queue (ScannerTask task ) {
75
+ verify_task (task );
76
+ _task_queue ->push (task );
77
77
}
78
78
79
- inline void G1ParScanThreadState::do_oop_partial_array (oop* p) {
80
- assert (has_partial_array_mask (p), " invariant" );
81
- oop from_obj = clear_partial_array_mask (p);
79
+ inline void G1ParScanThreadState::do_partial_array (PartialArrayScanTask task) {
80
+ oop from_obj = task.to_source_array ();
82
81
83
82
assert (_g1h->is_in_reserved (from_obj), " must be in heap." );
84
83
assert (from_obj->is_objArray (), " must be obj array" );
@@ -105,8 +104,7 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
105
104
to_obj_array->set_length (end);
106
105
// Push the remainder before we process the range in case another
107
106
// worker has run out of things to do and can steal it.
108
- oop* from_obj_p = set_partial_array_mask (from_obj);
109
- push_on_queue (from_obj_p);
107
+ push_on_queue (ScannerTask (PartialArrayScanTask (from_obj)));
110
108
} else {
111
109
assert (length == end, " sanity" );
112
110
// We'll process the final range for this object. Restore the length
@@ -127,60 +125,50 @@ inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
127
125
to_obj_array->oop_iterate_range (&_scanner, start, end);
128
126
}
129
127
130
- inline void G1ParScanThreadState::deal_with_reference (oop* ref_to_scan) {
131
- if (!has_partial_array_mask (ref_to_scan)) {
132
- do_oop_evac (ref_to_scan);
128
+ inline void G1ParScanThreadState::dispatch_task (ScannerTask task) {
129
+ verify_task (task);
130
+ if (task.is_narrow_oop_ptr ()) {
131
+ do_oop_evac (task.to_narrow_oop_ptr ());
132
+ } else if (task.is_oop_ptr ()) {
133
+ do_oop_evac (task.to_oop_ptr ());
133
134
} else {
134
- do_oop_partial_array (ref_to_scan );
135
+ do_partial_array (task. to_partial_array_task () );
135
136
}
136
137
}
137
138
138
- inline void G1ParScanThreadState::deal_with_reference (narrowOop* ref_to_scan) {
139
- assert (!has_partial_array_mask (ref_to_scan), " NarrowOop* elements should never be partial arrays." );
140
- do_oop_evac (ref_to_scan);
141
- }
142
-
143
- inline void G1ParScanThreadState::dispatch_reference (StarTask ref) {
144
- assert (verify_task (ref), " sanity" );
145
- if (ref.is_narrow ()) {
146
- deal_with_reference ((narrowOop*)ref);
147
- } else {
148
- deal_with_reference ((oop*)ref);
149
- }
150
- }
151
-
152
- void G1ParScanThreadState::steal_and_trim_queue (RefToScanQueueSet *task_queues) {
153
- StarTask stolen_task;
139
+ void G1ParScanThreadState::steal_and_trim_queue (ScannerTasksQueueSet *task_queues) {
140
+ ScannerTask stolen_task;
154
141
while (task_queues->steal (_worker_id, stolen_task)) {
155
- assert (verify_task (stolen_task), " sanity" );
156
- dispatch_reference (stolen_task);
142
+ dispatch_task (stolen_task);
157
143
158
- // We've just processed a reference and we might have made
144
+ // We've just processed a task and we might have made
159
145
// available new entries on the queues. So we have to make sure
160
146
// we drain the queues as necessary.
161
147
trim_queue ();
162
148
}
163
149
}
164
150
165
151
inline bool G1ParScanThreadState::needs_partial_trimming () const {
166
- return !_refs->overflow_empty () || _refs->size () > _stack_trim_upper_threshold;
152
+ return !_task_queue->overflow_empty () ||
153
+ (_task_queue->size () > _stack_trim_upper_threshold);
167
154
}
168
155
169
156
inline bool G1ParScanThreadState::is_partially_trimmed () const {
170
- return _refs->overflow_empty () && _refs->size () <= _stack_trim_lower_threshold;
157
+ return _task_queue->overflow_empty () &&
158
+ (_task_queue->size () <= _stack_trim_lower_threshold);
171
159
}
172
160
173
161
inline void G1ParScanThreadState::trim_queue_to_threshold (uint threshold) {
174
- StarTask ref ;
162
+ ScannerTask task ;
175
163
// Drain the overflow stack first, so other threads can potentially steal.
176
- while (_refs ->pop_overflow (ref )) {
177
- if (!_refs ->try_push_to_taskqueue (ref )) {
178
- dispatch_reference (ref );
164
+ while (_task_queue ->pop_overflow (task )) {
165
+ if (!_task_queue ->try_push_to_taskqueue (task )) {
166
+ dispatch_task (task );
179
167
}
180
168
}
181
169
182
- while (_refs ->pop_local (ref , threshold)) {
183
- dispatch_reference (ref );
170
+ while (_task_queue ->pop_local (task , threshold)) {
171
+ dispatch_task (task );
184
172
}
185
173
}
186
174
@@ -220,7 +208,7 @@ inline void G1ParScanThreadState::remember_reference_into_optional_region(T* p)
220
208
assert (index < _num_optional_regions,
221
209
" Trying to access optional region idx %u beyond " SIZE_FORMAT, index , _num_optional_regions);
222
210
_oops_into_optional_regions[index ].push_oop (p);
223
- DEBUG_ONLY ( verify_ref ( p);)
211
+ verify_task ( p);
224
212
}
225
213
226
214
G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region (const HeapRegion* hr) {
0 commit comments