38
38
#include " memory/iterator.inline.hpp"
39
39
#include " oops/access.inline.hpp"
40
40
#include " oops/oop.inline.hpp"
41
+ #include " runtime/orderAccess.hpp"
41
42
#include " runtime/prefetch.inline.hpp"
42
43
43
44
inline PSPromotionManager* PSPromotionManager::manager_array (uint index) {
@@ -126,175 +127,186 @@ inline void PSPromotionManager::push_contents(oop obj) {
126
127
obj->oop_iterate_backwards (&pcc);
127
128
}
128
129
}
130
+
131
+ template <bool promote_immediately>
132
+ inline oop PSPromotionManager::copy_to_survivor_space (oop o) {
133
+ assert (should_scavenge (&o), " Sanity" );
134
+
135
+ // NOTE! We must be very careful with any methods that access the mark
136
+ // in o. There may be multiple threads racing on it, and it may be forwarded
137
+ // at any time.
138
+ markWord m = o->mark ();
139
+ if (!m.is_marked ()) {
140
+ return copy_unmarked_to_survivor_space<promote_immediately>(o, m);
141
+ } else {
142
+ // Ensure any loads from the forwardee follow all changes that precede
143
+ // the release-cmpxchg that performed the forwarding, possibly in some
144
+ // other thread.
145
+ OrderAccess::acquire ();
146
+ // Return the already installed forwardee.
147
+ return cast_to_oop (m.decode_pointer ());
148
+ }
149
+ }
150
+
129
151
//
130
152
// This method is pretty bulky. It would be nice to split it up
131
153
// into smaller submethods, but we need to be careful not to hurt
132
154
// performance.
133
155
//
134
156
template <bool promote_immediately>
135
- inline oop PSPromotionManager::copy_to_survivor_space (oop o) {
157
+ inline oop PSPromotionManager::copy_unmarked_to_survivor_space (oop o,
158
+ markWord test_mark) {
136
159
assert (should_scavenge (&o), " Sanity" );
137
160
138
161
oop new_obj = NULL ;
162
+ bool new_obj_is_tenured = false ;
163
+ size_t new_obj_size = o->size ();
139
164
140
- // NOTE! We must be very careful with any methods that access the mark
141
- // in o. There may be multiple threads racing on it, and it may be forwarded
142
- // at any time. Do not use oop methods for accessing the mark!
143
- markWord test_mark = o->mark ();
144
-
145
- // The same test as "o->is_forwarded()"
146
- if (!test_mark.is_marked ()) {
147
- bool new_obj_is_tenured = false ;
148
- size_t new_obj_size = o->size ();
149
-
150
- // Find the objects age, MT safe.
151
- uint age = (test_mark.has_displaced_mark_helper () /* o->has_displaced_mark() */ ) ?
165
+ // Find the objects age, MT safe.
166
+ uint age = (test_mark.has_displaced_mark_helper () /* o->has_displaced_mark() */ ) ?
152
167
test_mark.displaced_mark_helper ().age () : test_mark.age ();
153
168
154
- if (!promote_immediately) {
155
- // Try allocating obj in to-space (unless too old)
156
- if (age < PSScavenge::tenuring_threshold ()) {
157
- new_obj = cast_to_oop (_young_lab.allocate (new_obj_size));
158
- if (new_obj == NULL && !_young_gen_is_full) {
159
- // Do we allocate directly, or flush and refill?
160
- if (new_obj_size > (YoungPLABSize / 2 )) {
161
- // Allocate this object directly
162
- new_obj = cast_to_oop (young_space ()->cas_allocate (new_obj_size));
163
- promotion_trace_event (new_obj, o, new_obj_size, age, false , NULL );
169
+ if (!promote_immediately) {
170
+ // Try allocating obj in to-space (unless too old)
171
+ if (age < PSScavenge::tenuring_threshold ()) {
172
+ new_obj = cast_to_oop (_young_lab.allocate (new_obj_size));
173
+ if (new_obj == NULL && !_young_gen_is_full) {
174
+ // Do we allocate directly, or flush and refill?
175
+ if (new_obj_size > (YoungPLABSize / 2 )) {
176
+ // Allocate this object directly
177
+ new_obj = cast_to_oop (young_space ()->cas_allocate (new_obj_size));
178
+ promotion_trace_event (new_obj, o, new_obj_size, age, false , NULL );
179
+ } else {
180
+ // Flush and fill
181
+ _young_lab.flush ();
182
+
183
+ HeapWord* lab_base = young_space ()->cas_allocate (YoungPLABSize);
184
+ if (lab_base != NULL ) {
185
+ _young_lab.initialize (MemRegion (lab_base, YoungPLABSize));
186
+ // Try the young lab allocation again.
187
+ new_obj = cast_to_oop (_young_lab.allocate (new_obj_size));
188
+ promotion_trace_event (new_obj, o, new_obj_size, age, false , &_young_lab);
164
189
} else {
165
- // Flush and fill
166
- _young_lab.flush ();
167
-
168
- HeapWord* lab_base = young_space ()->cas_allocate (YoungPLABSize);
169
- if (lab_base != NULL ) {
170
- _young_lab.initialize (MemRegion (lab_base, YoungPLABSize));
171
- // Try the young lab allocation again.
172
- new_obj = cast_to_oop (_young_lab.allocate (new_obj_size));
173
- promotion_trace_event (new_obj, o, new_obj_size, age, false , &_young_lab);
174
- } else {
175
- _young_gen_is_full = true ;
176
- }
190
+ _young_gen_is_full = true ;
177
191
}
178
192
}
179
193
}
180
194
}
195
+ }
181
196
182
- // Otherwise try allocating obj tenured
183
- if (new_obj == NULL ) {
197
+ // Otherwise try allocating obj tenured
198
+ if (new_obj == NULL ) {
184
199
#ifndef PRODUCT
185
- if (ParallelScavengeHeap::heap ()->promotion_should_fail ()) {
186
- return oop_promotion_failed (o, test_mark);
187
- }
200
+ if (ParallelScavengeHeap::heap ()->promotion_should_fail ()) {
201
+ return oop_promotion_failed (o, test_mark);
202
+ }
188
203
#endif // #ifndef PRODUCT
189
204
190
- new_obj = cast_to_oop (_old_lab.allocate (new_obj_size));
191
- new_obj_is_tenured = true ;
192
-
193
- if (new_obj == NULL ) {
194
- if (!_old_gen_is_full) {
195
- // Do we allocate directly, or flush and refill?
196
- if (new_obj_size > (OldPLABSize / 2 )) {
197
- // Allocate this object directly
198
- new_obj = cast_to_oop (old_gen ()->allocate (new_obj_size));
199
- promotion_trace_event (new_obj, o, new_obj_size, age, true , NULL );
200
- } else {
201
- // Flush and fill
202
- _old_lab.flush ();
205
+ new_obj = cast_to_oop (_old_lab.allocate (new_obj_size));
206
+ new_obj_is_tenured = true ;
203
207
204
- HeapWord* lab_base = old_gen ()->allocate (OldPLABSize);
205
- if (lab_base != NULL ) {
208
+ if (new_obj == NULL ) {
209
+ if (!_old_gen_is_full) {
210
+ // Do we allocate directly, or flush and refill?
211
+ if (new_obj_size > (OldPLABSize / 2 )) {
212
+ // Allocate this object directly
213
+ new_obj = cast_to_oop (old_gen ()->allocate (new_obj_size));
214
+ promotion_trace_event (new_obj, o, new_obj_size, age, true , NULL );
215
+ } else {
216
+ // Flush and fill
217
+ _old_lab.flush ();
218
+
219
+ HeapWord* lab_base = old_gen ()->allocate (OldPLABSize);
220
+ if (lab_base != NULL ) {
206
221
#ifdef ASSERT
207
- // Delay the initialization of the promotion lab (plab).
208
- // This exposes uninitialized plabs to card table processing.
209
- if (GCWorkerDelayMillis > 0 ) {
210
- os::naked_sleep (GCWorkerDelayMillis);
211
- }
212
- #endif
213
- _old_lab.initialize (MemRegion (lab_base, OldPLABSize));
214
- // Try the old lab allocation again.
215
- new_obj = cast_to_oop (_old_lab.allocate (new_obj_size));
216
- promotion_trace_event (new_obj, o, new_obj_size, age, true , &_old_lab);
222
+ // Delay the initialization of the promotion lab (plab).
223
+ // This exposes uninitialized plabs to card table processing.
224
+ if (GCWorkerDelayMillis > 0 ) {
225
+ os::naked_sleep (GCWorkerDelayMillis);
217
226
}
227
+ #endif
228
+ _old_lab.initialize (MemRegion (lab_base, OldPLABSize));
229
+ // Try the old lab allocation again.
230
+ new_obj = cast_to_oop (_old_lab.allocate (new_obj_size));
231
+ promotion_trace_event (new_obj, o, new_obj_size, age, true , &_old_lab);
218
232
}
219
233
}
234
+ }
220
235
221
- // This is the promotion failed test, and code handling.
222
- // The code belongs here for two reasons. It is slightly
223
- // different than the code below, and cannot share the
224
- // CAS testing code. Keeping the code here also minimizes
225
- // the impact on the common case fast path code.
236
+ // This is the promotion failed test, and code handling.
237
+ // The code belongs here for two reasons. It is slightly
238
+ // different than the code below, and cannot share the
239
+ // CAS testing code. Keeping the code here also minimizes
240
+ // the impact on the common case fast path code.
226
241
227
- if (new_obj == NULL ) {
228
- _old_gen_is_full = true ;
229
- return oop_promotion_failed (o, test_mark);
230
- }
242
+ if (new_obj == NULL ) {
243
+ _old_gen_is_full = true ;
244
+ return oop_promotion_failed (o, test_mark);
231
245
}
232
246
}
247
+ }
233
248
234
- assert (new_obj != NULL , " allocation should have succeeded" );
249
+ assert (new_obj != NULL , " allocation should have succeeded" );
235
250
236
- // Copy obj
237
- Copy::aligned_disjoint_words (cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
251
+ // Copy obj
252
+ Copy::aligned_disjoint_words (cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
238
253
239
- // Now we have to CAS in the header.
240
- // Make copy visible to threads reading the forwardee.
241
- if (o->cas_forward_to (new_obj, test_mark, memory_order_release)) {
242
- // We won any races, we "own" this object.
243
- assert (new_obj == o->forwardee (), " Sanity" );
254
+ // Now we have to CAS in the header.
255
+ // Make copy visible to threads reading the forwardee.
256
+ oop forwardee = o->forward_to_atomic (new_obj, test_mark, memory_order_release);
257
+ if (forwardee == NULL ) { // forwardee is NULL when forwarding is successful
258
+ // We won any races, we "own" this object.
259
+ assert (new_obj == o->forwardee (), " Sanity" );
244
260
245
- // Increment age if obj still in new generation. Now that
246
- // we're dealing with a markWord that cannot change, it is
247
- // okay to use the non mt safe oop methods.
248
- if (!new_obj_is_tenured) {
249
- new_obj->incr_age ();
250
- assert (young_space ()->contains (new_obj), " Attempt to push non-promoted obj" );
251
- }
261
+ // Increment age if obj still in new generation. Now that
262
+ // we're dealing with a markWord that cannot change, it is
263
+ // okay to use the non mt safe oop methods.
264
+ if (!new_obj_is_tenured) {
265
+ new_obj->incr_age ();
266
+ assert (young_space ()->contains (new_obj), " Attempt to push non-promoted obj" );
267
+ }
252
268
253
- // Do the size comparison first with new_obj_size, which we
254
- // already have. Hopefully, only a few objects are larger than
255
- // _min_array_size_for_chunking, and most of them will be arrays.
256
- // So, the is->objArray() test would be very infrequent.
257
- if (new_obj_size > _min_array_size_for_chunking &&
258
- new_obj->is_objArray () &&
259
- PSChunkLargeArrays) {
260
- // we'll chunk it
261
- push_depth (ScannerTask (PartialArrayScanTask (o)));
262
- TASKQUEUE_STATS_ONLY (++_arrays_chunked; ++_array_chunk_pushes);
263
- } else {
264
- // we'll just push its contents
265
- push_contents (new_obj);
266
- }
267
- } else {
268
- // We lost, someone else "owns" this object
269
- guarantee (o->is_forwarded (), " Object must be forwarded if the cas failed." );
270
-
271
- // Try to deallocate the space. If it was directly allocated we cannot
272
- // deallocate it, so we have to test. If the deallocation fails,
273
- // overwrite with a filler object.
274
- if (new_obj_is_tenured) {
275
- if (!_old_lab.unallocate_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
276
- CollectedHeap::fill_with_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size);
277
- }
278
- } else if (!_young_lab.unallocate_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
269
+ log_develop_trace (gc, scavenge)(" {%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}" ,
270
+ new_obj_is_tenured ? " copying" : " tenuring" ,
271
+ new_obj->klass ()->internal_name (),
272
+ p2i ((void *)o), p2i ((void *)new_obj), new_obj->size ());
273
+
274
+ // Do the size comparison first with new_obj_size, which we
275
+ // already have. Hopefully, only a few objects are larger than
276
+ // _min_array_size_for_chunking, and most of them will be arrays.
277
+ // So, the is->objArray() test would be very infrequent.
278
+ if (new_obj_size > _min_array_size_for_chunking &&
279
+ new_obj->is_objArray () &&
280
+ PSChunkLargeArrays) {
281
+ // we'll chunk it
282
+ push_depth (ScannerTask (PartialArrayScanTask (o)));
283
+ TASKQUEUE_STATS_ONLY (++_arrays_chunked; ++_array_chunk_pushes);
284
+ } else {
285
+ // we'll just push its contents
286
+ push_contents (new_obj);
287
+ }
288
+ return new_obj;
289
+ } else {
290
+ // We lost, someone else "owns" this object.
291
+ // Ensure loads from the forwardee follow all changes that preceeded the
292
+ // release-cmpxchg that performed the forwarding in another thread.
293
+ OrderAccess::acquire ();
294
+
295
+ assert (o->is_forwarded (), " Object must be forwarded if the cas failed." );
296
+ assert (o->forwardee () == forwardee, " invariant" );
297
+
298
+ // Try to deallocate the space. If it was directly allocated we cannot
299
+ // deallocate it, so we have to test. If the deallocation fails,
300
+ // overwrite with a filler object.
301
+ if (new_obj_is_tenured) {
302
+ if (!_old_lab.unallocate_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
279
303
CollectedHeap::fill_with_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size);
280
304
}
281
-
282
- // don't update this before the unallocation!
283
- // Using acquire though consume would be accurate for accessing new_obj.
284
- new_obj = o->forwardee_acquire ();
305
+ } else if (!_young_lab.unallocate_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
306
+ CollectedHeap::fill_with_object (cast_from_oop<HeapWord*>(new_obj), new_obj_size);
285
307
}
286
- } else {
287
- assert (o->is_forwarded (), " Sanity" );
288
- new_obj = o->forwardee_acquire ();
308
+ return forwardee;
289
309
}
290
-
291
- // This code must come after the CAS test, or it will print incorrect
292
- // information.
293
- log_develop_trace (gc, scavenge)(" {%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}" ,
294
- should_scavenge (&new_obj) ? " copying" : " tenuring" ,
295
- new_obj->klass ()->internal_name (), p2i ((void *)o), p2i ((void *)new_obj), new_obj->size ());
296
-
297
- return new_obj;
298
310
}
299
311
300
312
// Attempt to "claim" oop at p via CAS, push the new obj if successful
@@ -305,18 +317,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
305
317
assert (should_scavenge (p, true ), " revisiting object?" );
306
318
307
319
oop o = RawAccess<IS_NOT_NULL>::oop_load (p);
308
- oop new_obj = o->is_forwarded ()
309
- ? o->forwardee ()
310
- : copy_to_survivor_space<promote_immediately>(o);
311
-
312
- // This code must come after the CAS test, or it will print incorrect
313
- // information.
314
- if (log_develop_is_enabled (Trace, gc, scavenge) && o->is_forwarded ()) {
315
- log_develop_trace (gc, scavenge)(" {%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}" ,
316
- " forwarding" ,
317
- new_obj->klass ()->internal_name (), p2i ((void *)o), p2i ((void *)new_obj), new_obj->size ());
318
- }
319
-
320
+ oop new_obj = copy_to_survivor_space<promote_immediately>(o);
320
321
RawAccess<IS_NOT_NULL>::oop_store (p, new_obj);
321
322
322
323
// We cannot mark without test, as some code passes us pointers
0 commit comments