Skip to content

Commit 83898e6

Browse files
committedMar 18, 2020
Add possible concurrent GC optimization
1 parent 99b539d commit 83898e6

File tree

1 file changed

+60
-3
lines changed

1 file changed

+60
-3
lines changed
 

‎src/hotspot/cpu/x86/continuationChunk_x86.inline.hpp

+60-3
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
#include "memory/iterator.inline.hpp"
2929
#include "runtime/frame.inline.hpp"
3030

31+
#define FIX_DERIVED_POINTERS true
32+
3133
static inline void* reg_to_loc(VMReg reg, intptr_t* sp) {
3234
assert (!reg->is_reg() || reg == rbp->as_VMReg(), "");
3335
return reg->is_reg() ? (void*)(sp - frame::sender_sp_offset) // see frame::update_map_with_saved_link(&map, link_addr);
@@ -77,6 +79,11 @@ static void iterate_derived_pointers(oop chunk, const ImmutableOopMap* oopmap, i
7779
if (base != (oop)NULL) {
7880
assert (!CompressedOops::is_base(base), "");
7981

82+
if (concurrent_gc) {
83+
if (ZAddress::is_good(cast_from_oop<uintptr_t>(base))) // TODO: this is a ZGC-specific optimization
84+
continue;
85+
}
86+
8087
OrderAccess::loadload();
8188
intptr_t derived_int_val = Atomic::load(derived_loc); // *derived_loc;
8289
if (derived_int_val < 0) {
@@ -100,9 +107,50 @@ static void iterate_derived_pointers(oop chunk, const ImmutableOopMap* oopmap, i
100107
OrderAccess::storestore(); // to preserve that we set the offset *before* fixing the base oop
101108
}
102109

110+
static void fix_derived_pointers(oop chunk, const ImmutableOopMap* oopmap, intptr_t* sp, CodeBlob* cb) {
111+
for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) {
112+
OopMapValue omv = oms.current();
113+
if (omv.type() != OopMapValue::derived_oop_value)
114+
continue;
115+
116+
intptr_t* derived_loc = (intptr_t*)reg_to_loc(omv.reg(), sp);
117+
intptr_t* base_loc = (intptr_t*)reg_to_loc(omv.content_reg(), sp); // see OopMapDo<OopMapFnT, DerivedOopFnT, ValueFilterT>::walk_derived_pointers1
118+
119+
// The ordering in the following is crucial
120+
OrderAccess::loadload();
121+
oop base = Atomic::load((oop*)base_loc);
122+
if (base != (oop)NULL) {
123+
assert (!CompressedOops::is_base(base), "");
124+
assert (ZAddress::is_good(cast_from_oop<uintptr_t>(base)), "");
125+
126+
OrderAccess::loadload();
127+
intptr_t offset = Atomic::load(derived_loc); // *derived_loc;
128+
if (offset >= 0)
129+
continue;
130+
131+
// at this point, we've seen a non-offset value *after* we've read the base, but we write the offset *before* fixing the base,
132+
// so we are guaranteed that the value in derived_loc is consistent with base (i.e. points into the object).
133+
if (offset < 0) {
134+
offset = -offset;
135+
assert (offset >= 0 && offset <= (base->size() << LogHeapWordSize), "");
136+
Atomic::store((intptr_t*)derived_loc, cast_from_oop<intptr_t>(base) + offset);
137+
}
138+
#ifdef ASSERT
139+
else { // DEBUG ONLY
140+
offset = offset - cast_from_oop<intptr_t>(base);
141+
assert (offset >= 0 && offset <= (base->size() << LogHeapWordSize), "offset: %ld size: %d", offset, (base->size() << LogHeapWordSize));
142+
}
143+
#endif
144+
}
145+
}
146+
OrderAccess::storestore(); // to preserve that we set the offset *before* fixing the base oop
147+
jdk_internal_misc_StackChunk::set_gc_mode(chunk, false);
148+
}
149+
103150
template <class OopClosureType>
104-
static void iterate_oops(OopClosureType* closure, const ImmutableOopMap* oopmap, intptr_t* sp, CodeBlob* cb) {
151+
static bool iterate_oops(OopClosureType* closure, const ImmutableOopMap* oopmap, intptr_t* sp, CodeBlob* cb) {
105152
DEBUG_ONLY(int oops = 0;)
153+
bool mutated = false;
106154
for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) { // see void OopMapDo<OopFnT, DerivedOopFnT, ValueFilterT>::iterate_oops_do
107155
OopMapValue omv = oms.current();
108156
if (omv.type() != OopMapValue::oop_value && omv.type() != OopMapValue::narrowoop_value)
@@ -119,10 +167,13 @@ static void iterate_oops(OopClosureType* closure, const ImmutableOopMap* oopmap,
119167

120168
// if (!SkipNullValue::should_skip(*p))
121169
log_develop_trace(jvmcont)("stack_chunk_iterate_stack narrow: %d reg: %s p: " INTPTR_FORMAT " sp offset: %ld", omv.type() == OopMapValue::narrowoop_value, omv.reg()->name(), p2i(p), (intptr_t*)p - sp);
122-
// DEBUG_ONLY(intptr_t old = *(intptr_t*)p;)
170+
// DEBUG_ONLY(intptr_t old = *(intptr_t*)p;)
171+
intptr_t before = *(intptr_t*)p;
123172
omv.type() == OopMapValue::narrowoop_value ? Devirtualizer::do_oop(closure, (narrowOop*)p) : Devirtualizer::do_oop(closure, (oop*)p);
173+
mutated |= before != *(intptr_t*)p;
124174
}
125175
assert (oops == oopmap->num_oops(), "oops: %d oopmap->num_oops(): %d", oops, oopmap->num_oops());
176+
return mutated;
126177
}
127178

128179
template <class OopClosureType, bool concurrent_gc>
@@ -193,7 +244,13 @@ void Continuation::stack_chunk_iterate_stack(oop chunk, OopClosureType* closure)
193244
iterate_derived_pointers<concurrent_gc>(chunk, oopmap, sp, cb);
194245
}
195246

196-
iterate_oops(closure, oopmap, sp, cb);
247+
bool mutated_oops = iterate_oops(closure, oopmap, sp, cb);
248+
249+
#ifdef FIX_DERIVED_POINTERS
250+
if (concurrent_gc && mutated_oops && jdk_internal_misc_StackChunk::gc_mode(chunk)) { // TODO: this is a ZGC-specific optimization that depends on the one in iterate_derived_pointers
251+
fix_derived_pointers(chunk, oopmap, sp, cb);
252+
}
253+
#endif
197254
}
198255

199256
assert (num_frames >= 0, "");

0 commit comments

Comments
 (0)
Please sign in to comment.