Skip to content

Commit 430253d

Browse files
committedJan 24, 2022
Deopt-related cleanup
1 parent 6cdfdb1 commit 430253d

6 files changed

+53
-136
lines changed
 

‎src/hotspot/share/oops/instanceStackChunkKlass.cpp

+19-44
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ class BarrierClosure: public OopClosure {
331331
template <class T> inline void do_oop_work(T* p) {
332332
oop value = (oop)HeapAccess<>::oop_load(p);
333333
if (store) HeapAccess<>::oop_store(p, value);
334-
log_develop_trace(jvmcont)("barriers_for_oops_in_frame narrow: %d p: " INTPTR_FORMAT " sp offset: " INTPTR_FORMAT, sizeof(T) < sizeof(intptr_t), p2i(p), (intptr_t*)p - _sp);
334+
log_develop_trace(jvmcont)("BarrierClosure::do_oop narrow: %d p: " INTPTR_FORMAT " sp offset: " INTPTR_FORMAT, sizeof(T) < sizeof(intptr_t), p2i(p), (intptr_t*)p - _sp);
335335
}
336336
};
337337

@@ -396,9 +396,6 @@ class OopOopIterateStackClosure {
396396
_num_frames++;
397397
assert (_closure != nullptr, "");
398398

399-
assert (mixed || !f.is_deoptimized(), "");
400-
if (mixed && f.is_compiled()) f.handle_deopted();
401-
402399
if (Devirtualizer::do_metadata(_closure)) {
403400
if (f.is_interpreted()) {
404401
Method* im = f.to_frame().interpreter_frame_method();
@@ -523,14 +520,10 @@ template void InstanceStackChunkKlass::relativize_derived_pointers<true> (const
523520

524521

525522
template <bool store, bool mixed, typename RegisterMapT>
526-
void InstanceStackChunkKlass::do_barriers(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map) {
523+
void InstanceStackChunkKlass::do_barriers0(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map) {
527524
// we need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned
528525
if (f.is_done()) return;
529-
log_develop_trace(jvmcont)("InstanceStackChunkKlass::invoke_barriers sp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(f.sp()), p2i(f.pc()));
530-
531-
if (log_develop_is_enabled(Trace, jvmcont) && !mixed && f.is_interpreted()) f.cb()->print_value_on(tty);
532-
533-
if (mixed) f.handle_deopted(); // we could freeze deopted frames in slow mode.
526+
log_develop_trace(jvmcont)("InstanceStackChunkKlass::do_barriers sp: " INTPTR_FORMAT " pc: " INTPTR_FORMAT, p2i(f.sp()), p2i(f.pc()));
534527

535528
if (f.is_interpreted()) {
536529
Method* m = f.to_frame().interpreter_frame_method();
@@ -559,22 +552,17 @@ void InstanceStackChunkKlass::do_barriers(stackChunkOop chunk, const StackChunkF
559552
}
560553
OrderAccess::loadload(); // observing the barriers will prevent derived pointers from being derelativized concurrently
561554

562-
// if (has_derived) { // we do this in fix_thawed_frame
563-
// derelativize_derived_pointers(f, map);
564-
// }
555+
// if (has_derived) derelativize_derived_pointers(f, map); // we do this in fix_thawed_frame
565556
}
566557

567-
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk, const StackChunkFrameStream<true >& f, const RegisterMap* map);
568-
template void InstanceStackChunkKlass::do_barriers<true> (stackChunkOop chunk, const StackChunkFrameStream<true >& f, const RegisterMap* map);
569-
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk, const StackChunkFrameStream<false>& f, const RegisterMap* map);
570-
template void InstanceStackChunkKlass::do_barriers<true> (stackChunkOop chunk, const StackChunkFrameStream<false>& f, const RegisterMap* map);
571-
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk, const StackChunkFrameStream<true >& f, const SmallRegisterMap* map);
572-
template void InstanceStackChunkKlass::do_barriers<true> (stackChunkOop chunk, const StackChunkFrameStream<true >& f, const SmallRegisterMap* map);
573-
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk, const StackChunkFrameStream<false>& f, const SmallRegisterMap* map);
574-
template void InstanceStackChunkKlass::do_barriers<true> (stackChunkOop chunk, const StackChunkFrameStream<false>& f, const SmallRegisterMap* map);
575-
576-
template void InstanceStackChunkKlass::fix_thawed_frame(stackChunkOop chunk, const frame& f, const RegisterMap* map);
577-
template void InstanceStackChunkKlass::fix_thawed_frame(stackChunkOop chunk, const frame& f, const SmallRegisterMap* map);
558+
template void InstanceStackChunkKlass::do_barriers0<false>(stackChunkOop chunk, const StackChunkFrameStream<true >& f, const RegisterMap* map);
559+
template void InstanceStackChunkKlass::do_barriers0<true> (stackChunkOop chunk, const StackChunkFrameStream<true >& f, const RegisterMap* map);
560+
template void InstanceStackChunkKlass::do_barriers0<false>(stackChunkOop chunk, const StackChunkFrameStream<false>& f, const RegisterMap* map);
561+
template void InstanceStackChunkKlass::do_barriers0<true> (stackChunkOop chunk, const StackChunkFrameStream<false>& f, const RegisterMap* map);
562+
template void InstanceStackChunkKlass::do_barriers0<false>(stackChunkOop chunk, const StackChunkFrameStream<true >& f, const SmallRegisterMap* map);
563+
template void InstanceStackChunkKlass::do_barriers0<true> (stackChunkOop chunk, const StackChunkFrameStream<true >& f, const SmallRegisterMap* map);
564+
template void InstanceStackChunkKlass::do_barriers0<false>(stackChunkOop chunk, const StackChunkFrameStream<false>& f, const SmallRegisterMap* map);
565+
template void InstanceStackChunkKlass::do_barriers0<true> (stackChunkOop chunk, const StackChunkFrameStream<false>& f, const SmallRegisterMap* map);
578566

579567
template <bool store>
580568
class DoBarriersStackClosure {
@@ -584,20 +572,20 @@ class DoBarriersStackClosure {
584572

585573
template <bool mixed, typename RegisterMapT>
586574
bool do_frame(const StackChunkFrameStream<mixed>& f, const RegisterMapT* map) {
587-
InstanceStackChunkKlass::do_barriers<store>(_chunk, f, map);
575+
InstanceStackChunkKlass::do_barriers0<store>(_chunk, f, map);
588576
return true;
589577
}
590578
};
591579

592-
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk);
593-
template void InstanceStackChunkKlass::do_barriers<true>(stackChunkOop chunk);
594-
595580
template <bool store>
596581
void InstanceStackChunkKlass::do_barriers(stackChunkOop chunk) {
597582
DoBarriersStackClosure<store> closure(chunk);
598583
chunk->iterate_stack(&closure);
599584
}
600585

586+
template void InstanceStackChunkKlass::do_barriers<false>(stackChunkOop chunk);
587+
template void InstanceStackChunkKlass::do_barriers<true>(stackChunkOop chunk);
588+
601589
#ifdef ASSERT
602590
template<class P>
603591
static inline oop safe_load(P *addr) {
@@ -698,22 +686,6 @@ void InstanceStackChunkKlass::build_bitmap(stackChunkOop chunk) {
698686
chunk->set_gc_mode(true); // must be set *after* the above closure
699687
}
700688

701-
// template <bool store>
702-
// class BarriersIterateStackClosure {
703-
// public:
704-
// template <bool mixed, typename RegisterMapT>
705-
// bool do_frame(const StackChunkFrameStream<mixed>& f, const RegisterMapT* map) {
706-
// InstanceStackChunkKlass::barriers_for_oops_in_frame<mixed, store>(f, map);
707-
// return true;
708-
// }
709-
// };
710-
711-
// template <bool store>
712-
// void InstanceStackChunkKlass::barriers_for_oops_in_chunk(stackChunkOop chunk) {
713-
// BarriersIterateStackClosure<store> frame_closure;
714-
// chunk->iterate_stack(&frame_closure);
715-
// }
716-
717689
// NOINLINE void InstanceStackChunkKlass::fix_chunk(stackChunkOop chunk) {
718690
// log_develop_trace(jvmcont)("fix_stack_chunk young: %d", !chunk->requires_barriers());
719691
// FixChunkIterateStackClosure frame_closure(chunk);
@@ -740,6 +712,9 @@ void InstanceStackChunkKlass::fix_thawed_frame(stackChunkOop chunk, const frame&
740712
}
741713
}
742714

715+
template void InstanceStackChunkKlass::fix_thawed_frame(stackChunkOop chunk, const frame& f, const RegisterMap* map);
716+
template void InstanceStackChunkKlass::fix_thawed_frame(stackChunkOop chunk, const frame& f, const SmallRegisterMap* map);
717+
743718
#ifdef ASSERT
744719

745720
template <typename OopT>

‎src/hotspot/share/oops/instanceStackChunkKlass.hpp

+5-1
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ class InstanceStackChunkKlass: public InstanceKlass {
6060
friend class FixChunkIterateStackClosure;
6161
friend class MarkMethodsStackClosure;
6262
template <bool concurrent_gc, typename OopClosureType> friend class OopOopIterateStackClosure;
63+
template <bool store> friend class DoBarriersStackClosure;
6364

6465
public:
6566
static const KlassID ID = InstanceStackChunkKlassID;
@@ -145,7 +146,7 @@ class InstanceStackChunkKlass: public InstanceKlass {
145146
static void do_barriers(stackChunkOop chunk);
146147

147148
template <bool store, bool mixed, typename RegisterMapT>
148-
static void do_barriers(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
149+
inline static void do_barriers(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
149150

150151
template <typename RegisterMapT>
151152
static void fix_thawed_frame(stackChunkOop chunk, const frame& f, const RegisterMapT* map);
@@ -186,6 +187,9 @@ class InstanceStackChunkKlass: public InstanceKlass {
186187
template <bool mixed, typename RegisterMapT>
187188
static void derelativize_derived_pointers(const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
188189

190+
template <bool store, bool mixed, typename RegisterMapT>
191+
static void do_barriers0(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
192+
189193
typedef void (*MemcpyFnT)(void* src, void* dst, size_t count);
190194
static void resolve_memcpy_functions();
191195
static MemcpyFnT memcpy_fn_from_stack_to_chunk;

‎src/hotspot/share/oops/instanceStackChunkKlass.inline.hpp

+9-18
Original file line numberDiff line numberDiff line change
@@ -114,19 +114,8 @@ StackChunkFrameStream<mixed>::StackChunkFrameStream(stackChunkOop chunk, bool gc
114114
get_cb();
115115

116116
if (mixed) {
117-
if (!is_done() && is_interpreted()) {
118-
_unextended_sp = unextended_sp_for_interpreter_frame();
119-
} else {
120-
_unextended_sp = _sp;
121-
}
117+
_unextended_sp = (!is_done() && is_interpreted()) ? unextended_sp_for_interpreter_frame() : _sp;
122118
assert (_unextended_sp >= _sp - InstanceStackChunkKlass::metadata_words(), "");
123-
// else if (is_compiled()) {
124-
// tty->print_cr(">>>>> XXXX"); os::print_location(tty, (intptr_t)nativeCall_before(pc())->destination());
125-
// assert (NativeCall::is_call_before(pc()) && nativeCall_before(pc()) != nullptr && nativeCall_before(pc())->destination() != nullptr, "");
126-
// if (Interpreter::contains(nativeCall_before(pc())->destination())) { // interpreted callee
127-
// _unextended_sp = unextended_sp_for_interpreter_frame_caller();
128-
// }
129-
// }
130119
}
131120
DEBUG_ONLY(else _unextended_sp = nullptr;)
132121

@@ -480,6 +469,12 @@ inline BitMap::idx_t InstanceStackChunkKlass::bit_offset(size_t stack_size_in_wo
480469
return (BitMap::idx_t)((BitsPerWord - (bitmap_size_in_bits(stack_size_in_words) & mask)) & mask);
481470
}
482471

472+
template <bool store, bool mixed, typename RegisterMapT>
473+
void InstanceStackChunkKlass::do_barriers(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map) {
474+
if (mixed) f.handle_deopted(); // we could freeze deopted frames in slow mode.
475+
do_barriers0<store>(chunk, f, map);
476+
}
477+
483478
template <typename T, class OopClosureType>
484479
void InstanceStackChunkKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
485480
assert (obj->is_stackChunk(), "");
@@ -618,13 +613,9 @@ inline void InstanceStackChunkKlass::iterate_stack(stackChunkOop obj, StackChunk
618613
assert (!f.is_done(), "");
619614
assert (f.is_compiled(), "");
620615

621-
// if (f.sp() + f.frame_size() >= l) {
622-
// log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " stub-caller frame: %d", p2i(this), f.index());
623-
// if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
624-
625-
should_continue = closure->template do_frame<mixed>((const StackChunkFrameStream<mixed>&)f, &full_map);
626-
// }
616+
should_continue = closure->template do_frame<mixed>((const StackChunkFrameStream<mixed>&)f, &full_map);
627617
f.next(map);
618+
f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call)
628619
}
629620
assert (!f.is_stub(), "");
630621

‎src/hotspot/share/runtime/continuation.cpp

+20-23
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@
5454
#include "oops/weakHandle.inline.hpp"
5555
#include "prims/jvmtiDeferredUpdates.hpp"
5656
#include "prims/jvmtiThreadState.hpp"
57-
#include "runtime/continuation.inline.hpp"
5857
#include "runtime/deoptimization.hpp"
5958
#include "runtime/frame.hpp"
6059
#include "runtime/frame.inline.hpp"
@@ -198,7 +197,7 @@ class ContMirror {
198197
ContMirror(oop cont);
199198
ContMirror(const RegisterMap* map);
200199

201-
DEBUG_ONLY(intptr_t hash() { return Thread::current()->is_Java_thread() ? _cont->identity_hash() : -1; })
200+
NOT_PRODUCT(intptr_t hash() { return Thread::current()->is_Java_thread() ? _cont->identity_hash() : -1; })
202201

203202
inline void read();
204203
inline void write();
@@ -791,7 +790,7 @@ class Freeze {
791790

792791
_cont.write();
793792

794-
DEBUG_ONLY(log_develop_trace(jvmcont)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());)
793+
log_develop_trace(jvmcont)("FREEZE CHUNK #" INTPTR_FORMAT " (young)", _cont.hash());
795794
if (log_develop_is_enabled(Trace, jvmcont)) chunk->print_on(true, tty);
796795

797796
assert (_cont.chunk_invariant(), "");
@@ -815,7 +814,7 @@ class Freeze {
815814
ResourceMark rm;
816815
#endif
817816

818-
DEBUG_ONLY(log_develop_trace(jvmcont)("freeze_slow #" INTPTR_FORMAT, _cont.hash());)
817+
log_develop_trace(jvmcont)("freeze_slow #" INTPTR_FORMAT, _cont.hash());
819818

820819
assert (_thread->thread_state() == _thread_in_vm || _thread->thread_state() == _thread_blocked, "");
821820

@@ -1247,7 +1246,7 @@ class Freeze {
12471246
if (UNLIKELY(senderf.oop_map() == nullptr)) return freeze_pinned_native; // native frame
12481247
if (UNLIKELY(Compiled::is_owning_locks(_cont.thread(), &map, senderf))) return freeze_pinned_monitor;
12491248

1250-
freeze_result result = recurse_freeze_compiled_frame(senderf, caller, 0, 0);
1249+
freeze_result result = recurse_freeze_compiled_frame(senderf, caller, 0, 0); // This might be deoptimized
12511250
if (UNLIKELY(result > freeze_ok_bottom)) return result;
12521251
assert (result != freeze_ok_bottom, "");
12531252
assert (!caller.is_interpreted_frame(), "");
@@ -1444,7 +1443,7 @@ static inline int freeze_epilog(JavaThread* thread, ContMirror& cont, bool preem
14441443

14451444
thread->set_cont_yield(false);
14461445

1447-
DEBUG_ONLY(log_develop_debug(jvmcont)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());)
1446+
log_develop_debug(jvmcont)("=== End of freeze cont ### #" INTPTR_FORMAT, cont.hash());
14481447

14491448
return 0;
14501449
}
@@ -1492,7 +1491,7 @@ int freeze0(JavaThread* current, intptr_t* const sp, bool preempt) {
14921491

14931492
assert (verify_continuation<1>(oopCont), "");
14941493
ContMirror cont(current, oopCont);
1495-
DEBUG_ONLY(log_develop_debug(jvmcont)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));)
1494+
log_develop_debug(jvmcont)("FREEZE #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
14961495

14971496
if (jdk_internal_vm_Continuation::critical_section(oopCont) > 0) {
14981497
log_develop_debug(jvmcont)("PINNED due to critical section");
@@ -1816,8 +1815,6 @@ class Thaw {
18161815
static inline void derelativize_interpreted_frame_metadata(const frame& hf, const frame& f);
18171816
static inline void set_interpreter_frame_bottom(const frame& f, intptr_t* bottom);
18181817

1819-
bool should_deoptimize() { return true; /* _thread->is_interp_only_mode(); */ } // TODO PERF
1820-
18211818
public:
18221819
DEBUG_ONLY(int _mode;)
18231820
DEBUG_ONLY(bool barriers() { return _barriers; })
@@ -2024,7 +2021,6 @@ class Thaw {
20242021
bool last_interpreted = false;
20252022
if (chunk->has_mixed_frames()) {
20262023
last_interpreted = Interpreter::contains(chunk->pc());
2027-
log_develop_trace(jvmcont)("thaw: preempt; last_interpreted: %d", last_interpreted);
20282024
}
20292025

20302026
_stream = StackChunkFrameStream<true>(chunk);
@@ -2167,13 +2163,10 @@ class Thaw {
21672163

21682164
template<typename FKind, bool bottom>
21692165
inline void patch(frame& f, const frame& caller) {
2170-
// assert (_cont.is_empty0() == _cont.is_empty(), "is_empty0: %d is_empty: %d", _cont.is_empty0(), _cont.is_empty());
2171-
if (bottom && !_cont.is_empty()) {
2172-
log_develop_trace(jvmcont)("Setting return address to return barrier: " INTPTR_FORMAT, p2i(StubRoutines::cont_returnBarrier()));
2173-
FKind::patch_pc(caller, StubRoutines::cont_returnBarrier());
2174-
} else if (bottom || should_deoptimize()) {
2175-
FKind::patch_pc(caller, caller.raw_pc()); // this patches the return address to the deopt handler if necessary
2166+
if (bottom) {
2167+
FKind::patch_pc(caller, _cont.is_empty() ? caller.raw_pc() : StubRoutines::cont_returnBarrier());
21762168
}
2169+
21772170
patch_pd<FKind, bottom>(f, caller); // TODO R: reevaluate if and when this is necessary -- only bottom and interpreted caller?
21782171

21792172
if (FKind::interpreted) {
@@ -2282,9 +2275,13 @@ class Thaw {
22822275
f.cb()->as_nmethod()->run_nmethod_entry_barrier();
22832276
}
22842277

2285-
if (f.is_deoptimized_frame()) { // TODO PERF
2278+
if (f.is_deoptimized_frame()) {
22862279
maybe_set_fastpath(f.sp());
2287-
} else if (should_deoptimize() && (f.cb()->as_compiled_method()->is_marked_for_deoptimization() || _thread->is_interp_only_mode())) {
2280+
} else if (_thread->is_interp_only_mode()
2281+
|| (_cont.is_preempted() && f.cb()->as_compiled_method()->is_marked_for_deoptimization())) {
2282+
// The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
2283+
// cannot rely on nmethod patching for deopt.
2284+
22882285
log_develop_trace(jvmcont)("Deoptimizing thawed frame");
22892286
DEBUG_ONLY(Frame::patch_pc(f, nullptr));
22902287

@@ -2298,8 +2295,8 @@ class Thaw {
22982295
}
22992296

23002297
if (!bottom) {
2301-
log_develop_trace(jvmcont)("fix thawed caller");
2302-
InstanceStackChunkKlass::fix_thawed_frame(_cont.tail(), caller, SmallRegisterMap::instance); // can only fix caller once this frame is thawed (due to callee saved regs)
2298+
// can only fix caller once this frame is thawed (due to callee saved regs)
2299+
InstanceStackChunkKlass::fix_thawed_frame(_cont.tail(), caller, SmallRegisterMap::instance);
23032300
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
23042301
clear_bitmap_bits(hsp + Compiled::size(hf), added_argsize);
23052302
}
@@ -2324,7 +2321,7 @@ class Thaw {
23242321
assert (!_stream.is_done(), "");
23252322
}
23262323

2327-
recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames);
2324+
recurse_thaw_compiled_frame(_stream.to_frame(), caller, num_frames); // this could be deoptimized
23282325

23292326
DEBUG_ONLY(before_thaw_java_frame(hf, caller, false, num_frames);)
23302327

@@ -2441,7 +2438,7 @@ static inline intptr_t* thaw0(JavaThread* thread, const thaw_kind kind) {
24412438

24422439
assert (verify_continuation<1>(oopCont), "");
24432440
ContMirror cont(thread, oopCont);
2444-
DEBUG_ONLY(log_develop_debug(jvmcont)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));)
2441+
log_develop_debug(jvmcont)("THAW #" INTPTR_FORMAT " " INTPTR_FORMAT, cont.hash(), p2i((oopDesc*)oopCont));
24452442

24462443
#ifdef ASSERT
24472444
ContinuationHelper::set_anchor_to_entry(thread, cont.entry());
@@ -2481,7 +2478,7 @@ static inline intptr_t* thaw0(JavaThread* thread, const thaw_kind kind) {
24812478

24822479
// assert (thread->last_continuation()->argsize() == 0 || Continuation::is_return_barrier_entry(*(address*)(thread->last_continuation()->bottom_sender_sp() - SENDER_SP_RET_ADDRESS_OFFSET)), "");
24832480
assert (verify_continuation<3>(cont.mirror()), "");
2484-
DEBUG_ONLY(log_develop_debug(jvmcont)("=== End of thaw #" INTPTR_FORMAT, cont.hash());)
2481+
log_develop_debug(jvmcont)("=== End of thaw #" INTPTR_FORMAT, cont.hash());
24852482

24862483
return sp;
24872484
}

‎src/hotspot/share/runtime/continuation.inline.hpp

-33
This file was deleted.

‎src/hotspot/share/runtime/frame_helpers.inline.hpp

-17
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,6 @@ class Compiled : public NonInterpreted<Compiled> {
130130

131131
template <typename RegisterMapT>
132132
static bool is_owning_locks(JavaThread* thread, RegisterMapT* map, const frame& f);
133-
static address deopt_original_pc(intptr_t* sp, address pc, CodeBlob* cb);
134133
};
135134

136135
DEBUG_ONLY(const char* Compiled::name = "Compiled";)
@@ -292,22 +291,6 @@ inline int NonInterpreted<Self>::num_oops(const frame& f) {
292291
return f.num_oops() + Self::extra_oops;
293292
}
294293

295-
296-
address Compiled::deopt_original_pc(intptr_t* sp, address pc, CodeBlob* cb) {
297-
// TODO DEOPT: unnecessary in the long term solution of unroll on freeze
298-
299-
assert (cb != nullptr && cb->is_compiled(), "");
300-
CompiledMethod* cm = cb->as_compiled_method();
301-
if (cm->is_deopt_pc(pc)) {
302-
pc = *(address*)((address)sp + cm->orig_pc_offset());
303-
assert(pc != nullptr, "");
304-
assert(cm->insts_contains_inclusive(pc), "original PC must be in the main code section of the the compiled method (or must be immediately following it)");
305-
assert(!cm->is_deopt_pc(pc), "");
306-
}
307-
308-
return pc;
309-
}
310-
311294
template<typename RegisterMapT>
312295
bool Compiled::is_owning_locks(JavaThread* thread, RegisterMapT* map, const frame& f) {
313296
assert (!f.is_interpreted_frame() && Compiled::is_instance(f), "");

0 commit comments

Comments
 (0)
Please sign in to comment.