diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
index 9003761d00ee1..57132ac24b003 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "runtime/os.hpp"
 
@@ -41,7 +42,7 @@ ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
   _implicit_full(0),
   _cycle_counter(0) {
 
-  Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
+  Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahGC::_DEGENERATED_LIMIT);
 
   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 
@@ -67,8 +68,8 @@ void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
   _alloc_failure_full++;
 }
 
-void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
-  assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
+void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahGC::ShenandoahDegenPoint point) {
+  assert(point < ShenandoahGC::_DEGENERATED_LIMIT, "sanity");
   _alloc_failure_degenerated++;
   _degen_points[point]++;
 }
@@ -119,9 +120,9 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
 
   out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs",                   _success_degenerated_gcs);
   out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
-  for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
+  for (int c = 0; c < ShenandoahGC::_DEGENERATED_LIMIT; c++) {
     if (_degen_points[c] > 0) {
-      const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
+      const char* desc = ShenandoahGC::degen_point_to_string((ShenandoahGC::ShenandoahDegenPoint)c);
       out->print_cr("    " SIZE_FORMAT_W(5) " happened at %s",         _degen_points[c], desc);
     }
   }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
index 2690b872006ce..5dfaf40ad920d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP
 
 #include "gc/shared/gcTrace.hpp"
-#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
+#include "gc/shenandoah/shenandoahSharedVariables.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/ostream.hpp"
 
@@ -47,7 +48,7 @@ class ShenandoahCollectorPolicy : public CHeapObj<mtGC> {
   size_t _explicit_full;
   size_t _implicit_concurrent;
   size_t _implicit_full;
-  size_t _degen_points[ShenandoahHeap::_DEGENERATED_LIMIT];
+  size_t _degen_points[ShenandoahGC::_DEGENERATED_LIMIT];
 
   ShenandoahSharedFlag _in_shutdown;
 
@@ -65,7 +66,7 @@ class ShenandoahCollectorPolicy : public CHeapObj<mtGC> {
   void record_success_concurrent();
   void record_success_degenerated();
   void record_success_full();
-  void record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point);
+  void record_alloc_failure_to_degenerated(ShenandoahGC::ShenandoahDegenPoint point);
   void record_alloc_failure_to_full();
   void record_degenerated_upgrade_to_full();
   void record_explicit_to_concurrent();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
new file mode 100644
index 0000000000000..183002d56cc1d
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -0,0 +1,954 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/shared/collectorCounters.hpp"
+#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahLock.hpp"
+#include "gc/shenandoah/shenandoahMark.inline.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "gc/shenandoah/shenandoahWorkGroup.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "prims/jvmtiTagMap.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/events.hpp"
+
+ShenandoahConcurrentGC::ShenandoahConcurrentGC() :
+  _mark(),
+  _degen_point(ShenandoahDegenPoint::_degenerated_unset) {
+}
+
+ShenandoahGC::ShenandoahDegenPoint ShenandoahConcurrentGC::degen_point() const {
+  return _degen_point;
+}
+
+void ShenandoahConcurrentGC::cancel() {
+  ShenandoahConcurrentMark::cancel();
+}
+
+bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+
+  // Reset for upcoming marking
+  entry_reset();
+
+  // Start initial mark under STW
+  vmop_entry_init_mark();
+
+    // Concurrent mark roots
+  entry_mark_roots();
+  if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_outside_cycle)) return false;
+
+  // Continue concurrent mark
+  entry_mark();
+  if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_mark)) return false;
+
+  // Complete marking under STW, and start evacuation
+  vmop_entry_final_mark();
+
+  // Process weak roots that might still point to regions that would be broken by cleanup
+  if (heap->is_concurrent_weak_root_in_progress()) {
+    entry_weak_refs();
+    entry_weak_roots();
+  }
+
+  // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
+  // the space. This would be the last action if there is nothing to evacuate.
+  entry_cleanup_early();
+
+  {
+    ShenandoahHeapLocker locker(heap->lock());
+    heap->free_set()->log_status();
+  }
+
+  // Perform concurrent class unloading
+  if (heap->is_concurrent_weak_root_in_progress() &&
+      ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+    entry_class_unloading();
+  }
+
+  // Processing strong roots
+  // This may be skipped if there is nothing to update/evacuate.
+  // If so, strong_root_in_progress would be unset.
+  if (heap->is_concurrent_strong_root_in_progress()) {
+    entry_strong_roots();
+  }
+
+  // Continue the cycle with evacuation and optional update-refs.
+  // This may be skipped if there is nothing to evacuate.
+  // If so, evac_in_progress would be unset by collection set preparation code.
+  if (heap->is_evacuation_in_progress()) {
+    // Concurrently evacuate
+    entry_evacuate();
+    if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_evac)) return false;
+
+    // Perform update-refs phase.
+    vmop_entry_init_updaterefs();
+    entry_updaterefs();
+    if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
+
+    // Concurrent update thread roots
+    entry_update_thread_roots();
+    if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) return false;
+
+    vmop_entry_final_updaterefs();
+
+    // Update references freed up collection set, kick the cleanup to reclaim the space.
+    entry_cleanup_complete();
+  } else {
+    // Concurrent weak/strong root flags are unset concurrently. We depend on updateref GC safepoints
+    // to ensure the changes are visible to all mutators before gc cycle is completed.
+    // In case of no evacuation, updateref GC safepoints are skipped. Therefore, we will need
+    // to perform thread handshake to ensure their consistences.
+    entry_rendezvous_roots();
+  }
+
+  return true;
+}
+
+void ShenandoahConcurrentGC::vmop_entry_init_mark() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
+
+  heap->try_inject_alloc_failure();
+  VM_ShenandoahInitMark op(this);
+  VMThread::execute(&op); // jump to entry_init_mark() under safepoint
+}
+
+void ShenandoahConcurrentGC::vmop_entry_final_mark() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
+
+  heap->try_inject_alloc_failure();
+  VM_ShenandoahFinalMarkStartEvac op(this);
+  VMThread::execute(&op); // jump to entry_final_mark under safepoint
+}
+
+void ShenandoahConcurrentGC::vmop_entry_init_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
+
+  heap->try_inject_alloc_failure();
+  VM_ShenandoahInitUpdateRefs op(this);
+  VMThread::execute(&op);
+}
+
+void ShenandoahConcurrentGC::vmop_entry_final_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
+
+  heap->try_inject_alloc_failure();
+  VM_ShenandoahFinalUpdateRefs op(this);
+  VMThread::execute(&op);
+}
+
+void ShenandoahConcurrentGC::entry_init_mark() {
+  const char* msg = init_mark_event_message();
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
+                              "init marking");
+
+  op_init_mark();
+}
+
+void ShenandoahConcurrentGC::entry_final_mark() {
+  const char* msg = final_mark_event_message();
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
+                              "final marking");
+
+  op_final_mark();
+}
+
+void ShenandoahConcurrentGC::entry_init_updaterefs() {
+  static const char* msg = "Pause Init Update Refs";
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
+  EventMark em("%s", msg);
+
+  // No workers used in this phase, no setup required
+  op_init_updaterefs();
+}
+
+void ShenandoahConcurrentGC::entry_final_updaterefs() {
+  static const char* msg = "Pause Final Update Refs";
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
+                              "final reference update");
+
+  op_final_updaterefs();
+}
+
+void ShenandoahConcurrentGC::entry_reset() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent reset";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
+                              "concurrent reset");
+
+  heap->try_inject_alloc_failure();
+  op_reset();
+}
+
+void ShenandoahConcurrentGC::entry_mark_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  const char* msg = "Concurrent marking roots";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
+                              "concurrent marking roots");
+
+  heap->try_inject_alloc_failure();
+  op_mark_roots();
+}
+
+void ShenandoahConcurrentGC::entry_mark() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  const char* msg = conc_mark_event_message();
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
+                              "concurrent marking");
+
+  heap->try_inject_alloc_failure();
+  op_mark();
+}
+
+void ShenandoahConcurrentGC::entry_weak_refs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  static const char* msg = "Concurrent weak references";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
+                              "concurrent weak references");
+
+  heap->try_inject_alloc_failure();
+  op_weak_refs();
+}
+
+void ShenandoahConcurrentGC::entry_weak_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent weak roots";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
+                              "concurrent weak root");
+
+  heap->try_inject_alloc_failure();
+  op_weak_roots();
+}
+
+void ShenandoahConcurrentGC::entry_class_unloading() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent class unloading";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
+                              "concurrent class unloading");
+
+  heap->try_inject_alloc_failure();
+  op_class_unloading();
+}
+
+void ShenandoahConcurrentGC::entry_strong_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent strong roots";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
+  EventMark em("%s", msg);
+
+  ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
+                              "concurrent strong root");
+
+  heap->try_inject_alloc_failure();
+  op_strong_roots();
+}
+
+void ShenandoahConcurrentGC::entry_cleanup_early() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent cleanup";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
+  EventMark em("%s", msg);
+
+  // This phase does not use workers, no need for setup
+  heap->try_inject_alloc_failure();
+  op_cleanup_early();
+}
+
+void ShenandoahConcurrentGC::entry_rendezvous_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Rendezvous roots";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_rendezvous_roots);
+  EventMark em("%s", msg);
+
+  // This phase does not use workers, no need for setup
+  heap->try_inject_alloc_failure();
+  op_rendezvous_roots();
+}
+
+void ShenandoahConcurrentGC::entry_evacuate() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+
+  static const char* msg = "Concurrent evacuation";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
+                              "concurrent evacuation");
+
+  heap->try_inject_alloc_failure();
+  op_evacuate();
+}
+
+void ShenandoahConcurrentGC::entry_update_thread_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+
+  static const char* msg = "Concurrent update thread roots";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
+  EventMark em("%s", msg);
+
+  // No workers used in this phase, no setup required
+  heap->try_inject_alloc_failure();
+  op_update_thread_roots();
+}
+
+void ShenandoahConcurrentGC::entry_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent update references";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
+                              "concurrent reference update");
+
+  heap->try_inject_alloc_failure();
+  op_updaterefs();
+}
+
+void ShenandoahConcurrentGC::entry_cleanup_complete() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
+  static const char* msg = "Concurrent cleanup";
+  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
+  EventMark em("%s", msg);
+
+  // This phase does not use workers, no need for setup
+  heap->try_inject_alloc_failure();
+  op_cleanup_complete();
+}
+
+void ShenandoahConcurrentGC::op_reset() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  if (ShenandoahPacing) {
+    heap->pacer()->setup_for_reset();
+  }
+
+  heap->prepare_gc();
+}
+
+class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahMarkingContext* const _ctx;
+public:
+  ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
+
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
+    if (r->is_active()) {
+      // Check if region needs updating its TAMS. We have updated it already during concurrent
+      // reset, so it is very likely we don't need to do another write here.
+      if (_ctx->top_at_mark_start(r) != r->top()) {
+        _ctx->capture_top_at_mark_start(r);
+      }
+    } else {
+      assert(_ctx->top_at_mark_start(r) == r->top(),
+             "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
+    }
+  }
+
+  bool is_thread_safe() { return true; }
+};
+
+void ShenandoahConcurrentGC::op_init_mark() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
+  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
+
+  assert(heap->marking_context()->is_bitmap_clear(), "need clear marking bitmap");
+  assert(!heap->marking_context()->is_complete(), "should not be complete");
+  assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_before_concmark();
+  }
+
+  if (VerifyBeforeGC) {
+    Universe::verify();
+  }
+
+  heap->set_concurrent_mark_in_progress(true);
+
+  // We need to reset all TLABs because they might be below the TAMS, and we need to mark
+  // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
+  // It is also a good place to resize the TLAB sizes for future allocations.
+  if (UseTLAB) {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_manage_tlabs);
+    heap->tlabs_retire(ResizeTLAB);
+  }
+
+  {
+    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
+    ShenandoahInitMarkUpdateRegionStateClosure cl;
+    heap->parallel_heap_region_iterate(&cl);
+  }
+
+  // Weak reference processing
+  ShenandoahReferenceProcessor* rp = heap->ref_processor();
+  rp->reset_thread_locals();
+  rp->set_soft_reference_policy(heap->soft_ref_policy()->should_clear_all_soft_refs());
+
+  // Make above changes visible to worker threads
+  OrderAccess::fence();
+  // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
+  // we need to make sure that all its metadata are marked. alternative is to remark
+  // thread roots at final mark pause, but it can be potential latency killer.
+  if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+    ShenandoahCodeRoots::arm_nmethods();
+  }
+
+  _mark.mark_stw_roots();
+
+  if (ShenandoahPacing) {
+    heap->pacer()->setup_for_mark();
+  }
+}
+
+void ShenandoahConcurrentGC::op_mark_roots() {
+  _mark.mark_concurrent_roots();
+}
+
+void ShenandoahConcurrentGC::op_mark() {
+  _mark.concurrent_mark();
+}
+
+void ShenandoahConcurrentGC::op_final_mark() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
+  assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_roots_no_forwarded();
+  }
+
+  if (!heap->cancelled_gc()) {
+    _mark.finish_mark();
+    assert(!heap->cancelled_gc(), "STW mark cannot OOM");
+
+    // Notify JVMTI that the tagmap table will need cleaning.
+    JvmtiTagMap::set_needs_cleaning();
+
+    heap->prepare_regions_and_collection_set(true /*concurrent*/);
+
+    // Has to be done after cset selection
+    heap->prepare_concurrent_roots();
+
+    if (!heap->collection_set()->is_empty()) {
+      if (ShenandoahVerify) {
+        heap->verifier()->verify_before_evacuation();
+      }
+
+      heap->set_evacuation_in_progress(true);
+      // From here on, we need to update references.
+      heap->set_has_forwarded_objects(true);
+
+      // Arm nmethods for concurrent processing
+      ShenandoahCodeRoots::arm_nmethods();
+
+      // Should be gone after 8212879 and concurrent stack processing
+      heap->evacuate_and_update_roots();
+
+      // Notify JVMTI that oops are changed.
+      JvmtiTagMap::set_needs_rehashing();
+
+      if (ShenandoahVerify) {
+        heap->verifier()->verify_during_evacuation();
+      }
+
+      if (ShenandoahPacing) {
+        heap->pacer()->setup_for_evac();
+      }
+    } else {
+      if (ShenandoahVerify) {
+        heap->verifier()->verify_after_concmark();
+      }
+
+      if (VerifyAfterGC) {
+        Universe::verify();
+      }
+    }
+  }
+}
+
+void ShenandoahConcurrentGC::op_weak_refs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
+  // Concurrent weak refs processing
+  ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_refs_work);
+  ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs_work);
+  heap->ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs_work, heap->workers(), true /* concurrent */);
+}
+
+class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
+private:
+  ShenandoahHeap* const _heap;
+  ShenandoahMarkingContext* const _mark_context;
+  bool  _evac_in_progress;
+  Thread* const _thread;
+
+public:
+  ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
+  void do_oop(oop* p);
+  void do_oop(narrowOop* p);
+};
+
+ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
+  _heap(ShenandoahHeap::heap()),
+  _mark_context(ShenandoahHeap::heap()->marking_context()),
+  _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
+  _thread(Thread::current()) {
+}
+
+void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
+  const oop obj = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(obj)) {
+    if (!_mark_context->is_marked(obj)) {
+      shenandoah_assert_correct(p, obj);
+      Atomic::cmpxchg(p, obj, oop(NULL));
+    } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
+      oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+      if (resolved == obj) {
+        resolved = _heap->evacuate_object(obj, _thread);
+      }
+      Atomic::cmpxchg(p, obj, resolved);
+      assert(_heap->cancelled_gc() ||
+             _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
+             "Sanity");
+    }
+  }
+}
+
+void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
+  ShouldNotReachHere();
+}
+
+class ShenandoahIsCLDAliveClosure : public CLDClosure {
+public:
+  void do_cld(ClassLoaderData* cld) {
+    cld->is_alive();
+  }
+};
+
+class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
+public:
+  void do_nmethod(nmethod* n) {
+    n->is_unloading();
+  }
+};
+
+// This task not only evacuates/updates marked weak roots, but also "NULL"
+// dead weak roots.
+class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
+private:
+  ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
+
+  // Roots related to concurrent class unloading
+  ShenandoahClassLoaderDataRoots<true /* concurrent */, true /* single thread*/>
+                                             _cld_roots;
+  ShenandoahConcurrentNMethodIterator        _nmethod_itr;
+  ShenandoahConcurrentStringDedupRoots       _dedup_roots;
+  ShenandoahPhaseTimings::Phase              _phase;
+  bool                                       _concurrent_class_unloading;
+
+public:
+  ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
+    AbstractGangTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
+    _vm_roots(phase),
+    _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
+    _nmethod_itr(ShenandoahCodeRoots::table()),
+    _dedup_roots(phase),
+    _phase(phase),
+    _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+    if (_concurrent_class_unloading) {
+      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      _nmethod_itr.nmethods_do_begin();
+    }
+
+    _dedup_roots.prologue();
+  }
+
+  ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
+    _dedup_roots.epilogue();
+
+    if (_concurrent_class_unloading) {
+      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      _nmethod_itr.nmethods_do_end();
+    }
+    // Notify runtime data structures of potentially dead oops
+    _vm_roots.report_num_dead();
+  }
+
+  void work(uint worker_id) {
+    ShenandoahConcurrentWorkerSession worker_session(worker_id);
+    {
+      ShenandoahEvacOOMScope oom;
+      // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
+      // may race against OopStorage::release() calls.
+      ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
+      _vm_roots.oops_do(&cl, worker_id);
+
+      // String dedup weak roots
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
+      _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
+    }
+
+    // If we are going to perform concurrent class unloading later on, we need to
+    // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
+    // can cleanup immediate garbage sooner.
+    if (_concurrent_class_unloading) {
+      // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
+      // CLD's holder or evacuate it.
+      {
+        ShenandoahIsCLDAliveClosure is_cld_alive;
+        _cld_roots.cld_do(&is_cld_alive, worker_id);
+      }
+
+      // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
+      // The closure calls nmethod->is_unloading(). The is_unloading
+      // state is cached, therefore, during concurrent class unloading phase,
+      // we will not touch the metadata of unloading nmethods
+      {
+        ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+        ShenandoahIsNMethodAliveClosure is_nmethod_alive;
+        _nmethod_itr.nmethods_do(&is_nmethod_alive);
+      }
+    }
+  }
+};
+
+void ShenandoahConcurrentGC::op_weak_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase");
+  // Concurrent weak root processing
+  {
+    ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
+    ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
+    ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
+    heap->workers()->run_task(&task);
+  }
+
+  // Perform handshake to flush out dead oops
+  {
+    ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
+    heap->rendezvous_threads();
+  }
+
+  if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+    heap->set_concurrent_weak_root_in_progress(false);
+  }
+}
+
+void ShenandoahConcurrentGC::op_class_unloading() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert (heap->is_concurrent_weak_root_in_progress() &&
+          ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
+          "Checked by caller");
+  heap->do_class_unloading();
+  heap->set_concurrent_weak_root_in_progress(false);
+}
+
+class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
+private:
+  BarrierSetNMethod* const               _bs;
+  ShenandoahEvacuateUpdateRootsClosure<> _cl;
+
+public:
+  ShenandoahEvacUpdateCodeCacheClosure() :
+    _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
+    _cl() {
+  }
+
+  void do_nmethod(nmethod* n) {
+    ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
+    ShenandoahReentrantLocker locker(data->lock());
+    // Setup EvacOOM scope below reentrant lock to avoid deadlock with
+    // nmethod_entry_barrier
+    ShenandoahEvacOOMScope oom;
+    data->oops_do(&_cl, true/*fix relocation*/);
+    _bs->disarm(n);
+  }
+};
+
+class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
+private:
+  ShenandoahPhaseTimings::Phase                 _phase;
+  ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
+  ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
+  ShenandoahConcurrentNMethodIterator           _nmethod_itr;
+  bool                                          _process_codecache;
+
+public:
+  ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
+    AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
+    _phase(phase),
+    _vm_roots(phase),
+    _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
+    _nmethod_itr(ShenandoahCodeRoots::table()),
+    _process_codecache(!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
+    if (_process_codecache) {
+      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      _nmethod_itr.nmethods_do_begin();
+    }
+  }
+
+  ~ShenandoahConcurrentRootsEvacUpdateTask() {
+    if (_process_codecache) {
+      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      _nmethod_itr.nmethods_do_end();
+    }
+  }
+
+  void work(uint worker_id) {
+    ShenandoahConcurrentWorkerSession worker_session(worker_id);
+    {
+      ShenandoahEvacOOMScope oom;
+      {
+        // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
+        // may race against OopStorage::release() calls.
+        ShenandoahEvacUpdateOopStorageRootsClosure cl;
+        _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
+      }
+
+      {
+        ShenandoahEvacuateUpdateRootsClosure<> cl;
+        CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
+        _cld_roots.cld_do(&clds, worker_id);
+      }
+    }
+
+    // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
+    if (_process_codecache) {
+      ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
+      ShenandoahEvacUpdateCodeCacheClosure cl;
+      _nmethod_itr.nmethods_do(&cl);
+    }
+  }
+};
+
+void ShenandoahConcurrentGC::op_strong_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(heap->is_concurrent_strong_root_in_progress(), "Checked by caller");
+  ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
+  heap->workers()->run_task(&task);
+  heap->set_concurrent_strong_root_in_progress(false);
+}
+
+void ShenandoahConcurrentGC::op_cleanup_early() {
+  ShenandoahHeap::heap()->free_set()->recycle_trash();
+}
+
+void ShenandoahConcurrentGC::op_rendezvous_roots() {
+  ShenandoahHeap::heap()->rendezvous_threads();
+}
+
+void ShenandoahConcurrentGC::op_evacuate() {
+  ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
+}
+
+void ShenandoahConcurrentGC::op_init_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  heap->set_evacuation_in_progress(false);
+  heap->prepare_update_heap_references(true /*concurrent*/);
+  heap->set_update_refs_in_progress(true);
+
+  if (ShenandoahPacing) {
+    heap->pacer()->setup_for_updaterefs();
+  }
+}
+
+void ShenandoahConcurrentGC::op_updaterefs() {
+  ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
+}
+
+class ShenandoahUpdateThreadClosure : public HandshakeClosure {
+private:
+  ShenandoahUpdateRefsClosure _cl;
+public:
+  ShenandoahUpdateThreadClosure();
+  void do_thread(Thread* thread);
+};
+
+ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
+  HandshakeClosure("Shenandoah Update Thread Roots") {
+}
+
+void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
+  if (thread->is_Java_thread()) {
+    JavaThread* jt = thread->as_Java_thread();
+    ResourceMark rm;
+    jt->oops_do(&_cl, NULL);
+  }
+}
+
+void ShenandoahConcurrentGC::op_update_thread_roots() {
+  ShenandoahUpdateThreadClosure cl;
+  Handshake::execute(&cl);
+}
+
+void ShenandoahConcurrentGC::op_final_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
+  assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references");
+
+  heap->finish_concurrent_roots();
+
+  // Clear cancelled GC, if set. On cancellation path, the block before would handle
+  // everything.
+  if (heap->cancelled_gc()) {
+    heap->clear_cancelled_gc();
+  }
+
+  // Has to be done before cset is clear
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_roots_in_to_space();
+  }
+
+  heap->update_heap_region_states(true /*concurrent*/);
+
+  heap->set_update_refs_in_progress(false);
+  heap->set_has_forwarded_objects(false);
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_after_updaterefs();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  heap->rebuild_free_set(true /*concurrent*/);
+}
+
+void ShenandoahConcurrentGC::op_cleanup_complete() {
+  ShenandoahHeap::heap()->free_set()->recycle_trash();
+}
+
+bool ShenandoahConcurrentGC::check_cancellation_and_abort(ShenandoahDegenPoint point) {
+  if (ShenandoahHeap::heap()->cancelled_gc()) {
+    _degen_point = point;
+    return true;
+  }
+  return false;
+}
+
+const char* ShenandoahConcurrentGC::init_mark_event_message() const {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
+  if (heap->unload_classes()) {
+    return "Pause Init Mark (unload classes)";
+  } else {
+    return "Pause Init Mark";
+  }
+}
+
+const char* ShenandoahConcurrentGC::final_mark_event_message() const {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
+  if (heap->unload_classes()) {
+    return "Pause Final Mark (unload classes)";
+  } else {
+    return "Pause Final Mark";
+  }
+}
+
+const char* ShenandoahConcurrentGC::conc_mark_event_message() const {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  assert(!heap->has_forwarded_objects(), "Should not have forwarded objects here");
+  if (heap->unload_classes()) {
+    return "Concurrent marking (unload classes)";
+  } else {
+    return "Concurrent marking";
+  }
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp
new file mode 100644
index 0000000000000..36a61ca58f140
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTGC_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTGC_HPP
+
+#include "gc/shared/gcCause.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+
+class VM_ShenandoahInitMark;
+class VM_ShenandoahFinalMarkStartEvac;
+class VM_ShenandoahInitUpdateRefs;
+class VM_ShenandoahFinalUpdateRefs;
+
+class ShenandoahConcurrentGC : public ShenandoahGC {
+  friend class VM_ShenandoahInitMark;
+  friend class VM_ShenandoahFinalMarkStartEvac;
+  friend class VM_ShenandoahInitUpdateRefs;
+  friend class VM_ShenandoahFinalUpdateRefs;
+
+private:
+  ShenandoahConcurrentMark  _mark;
+  ShenandoahDegenPoint      _degen_point;
+
+public:
+  ShenandoahConcurrentGC();
+  bool collect(GCCause::Cause cause);
+  ShenandoahDegenPoint degen_point() const;
+
+  // Cancel ongoing concurrent GC
+  static void cancel();
+private:
+  // Entry points to STW GC operations, these cause a related safepoint, that then
+  // call the entry method below
+  void vmop_entry_init_mark();
+  void vmop_entry_final_mark();
+  void vmop_entry_init_updaterefs();
+  void vmop_entry_final_updaterefs();
+
+  // Entry methods to normally STW GC operations. These set up logging, monitoring
+  // and workers for net VM operation
+  void entry_init_mark();
+  void entry_final_mark();
+  void entry_init_updaterefs();
+  void entry_final_updaterefs();
+
+  // Entry methods to normally concurrent GC operations. These set up logging, monitoring
+  // for concurrent operation.
+  void entry_reset();
+  void entry_mark_roots();
+  void entry_mark();
+  void entry_weak_refs();
+  void entry_weak_roots();
+  void entry_class_unloading();
+  void entry_strong_roots();
+  void entry_cleanup_early();
+  void entry_rendezvous_roots();
+  void entry_evacuate();
+  void entry_update_thread_roots();
+  void entry_updaterefs();
+  void entry_cleanup_complete();
+
+  // Actual work for the phases
+  void op_reset();
+  void op_init_mark();
+  void op_mark_roots();
+  void op_mark();
+  void op_final_mark();
+  void op_weak_refs();
+  void op_weak_roots();
+  void op_class_unloading();
+  void op_strong_roots();
+  void op_cleanup_early();
+  void op_rendezvous_roots();
+  void op_evacuate();
+  void op_init_updaterefs();
+  void op_updaterefs();
+  void op_update_thread_roots();
+  void op_final_updaterefs();
+  void op_cleanup_complete();
+
+  // Messages for GC trace events, they have to be immortal for
+  // passing around the logging/tracing systems
+  const char* init_mark_event_message() const;
+  const char* final_mark_event_message() const;
+  const char* conc_mark_event_message() const;
+
+  // Check GC cancellation and abort concurrent GC
+  bool check_cancellation_and_abort(ShenandoahDegenPoint point);
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTGC_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index bbe1434c2effb..ccad0e12163cd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -223,72 +223,6 @@ void ShenandoahConcurrentMark::mark_stw_roots() {
   workers->run_task(&mark_roots);
 }
 
-void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
-  assert(root_phase == ShenandoahPhaseTimings::full_gc_update_roots ||
-         root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
-         "Only for these phases");
-
-  ShenandoahGCPhase phase(root_phase);
-
-  bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTable::clear();
-#endif
-
-  ShenandoahHeap* const heap = ShenandoahHeap::heap();
-  WorkGang* workers = heap->workers();
-  uint nworkers = workers->active_workers();
-
-  ShenandoahRootUpdater root_updater(nworkers, root_phase);
-  ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
-  workers->run_task(&update_roots);
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTable::update_pointers();
-#endif
-}
-
-class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
-private:
-  ShenandoahThreadRoots           _thread_roots;
-  ShenandoahPhaseTimings::Phase   _phase;
-  ShenandoahGCWorkerPhase         _worker_phase;
-public:
-  ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
-    AbstractGangTask("Shenandoah Update Thread Roots"),
-    _thread_roots(phase, is_par),
-    _phase(phase),
-    _worker_phase(phase) {}
-
-  void work(uint worker_id) {
-    ShenandoahParallelWorkerSession worker_session(worker_id);
-    ShenandoahUpdateRefsClosure cl;
-    _thread_roots.oops_do(&cl, NULL, worker_id);
-  }
-};
-
-void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
-
-  ShenandoahGCPhase phase(root_phase);
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTable::clear();
-#endif
-  ShenandoahHeap* const heap = ShenandoahHeap::heap();
-  WorkGang* workers = heap->workers();
-  bool is_par = workers->active_workers() > 1;
-
-  ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
-  workers->run_task(&task);
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTable::update_pointers();
-#endif
-}
-
 // Mark concurrent roots during concurrent phases
 class ShenandoahMarkConcurrentRootsTask : public AbstractGangTask {
 private:
@@ -357,6 +291,10 @@ void ShenandoahConcurrentMark::finish_mark() {
   assert(task_queues()->is_empty(), "Should be empty");
   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
+
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  heap->set_concurrent_mark_in_progress(false);
+  heap->mark_complete_marking_context();
 }
 
 void ShenandoahConcurrentMark::finish_mark_work() {
@@ -368,7 +306,7 @@ void ShenandoahConcurrentMark::finish_mark_work() {
   //   root scan, and completes the closure, thus marking through all live objects
   // The implementation is the same, so it's shared here.
   ShenandoahHeap* const heap = ShenandoahHeap::heap();
-  ShenandoahGCPhase phase(ShenandoahPhaseTimings::finish_queues);
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::finish_mark);
   uint nworkers = heap->workers()->active_workers();
   task_queues()->reserve(nworkers);
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
index e8e5cc6acc0b8..2a5bc7dea6b78 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp
@@ -53,10 +53,6 @@ class ShenandoahConcurrentMark: public ShenandoahMark {
 
   static void cancel();
 
-  // TODO: where to put them
-  static void update_roots(ShenandoahPhaseTimings::Phase root_phase);
-  static void update_thread_roots(ShenandoahPhaseTimings::Phase root_phase);
-
 private:
   void finish_mark_work();
 };
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
index 4747e415dc3a2..0d895bac62632 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
@@ -24,13 +24,15 @@
 
 #include "precompiled.hpp"
 
-#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 #include "gc/shenandoah/shenandoahControlThread.hpp"
+#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahMark.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
@@ -47,7 +49,7 @@ ShenandoahControlThread::ShenandoahControlThread() :
   _gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
   _periodic_task(this),
   _requested_gc_cause(GCCause::_no_cause_specified),
-  _degen_point(ShenandoahHeap::_degenerated_outside_cycle),
+  _degen_point(ShenandoahGC::_degenerated_outside_cycle),
   _allocs_seen(0) {
 
   reset_gc_id();
@@ -110,7 +112,7 @@ void ShenandoahControlThread::run_service() {
     // Choose which GC mode to run in. The block below should select a single mode.
     GCMode mode = none;
     GCCause::Cause cause = GCCause::_last_gc_cause;
-    ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
+    ShenandoahGC::ShenandoahDegenPoint degen_point = ShenandoahGC::_degenerated_unset;
 
     if (alloc_failure_pending) {
       // Allocation failure takes precedence: we have to deal with it first thing
@@ -120,7 +122,7 @@ void ShenandoahControlThread::run_service() {
 
       // Consume the degen point, and seed it with default value
       degen_point = _degen_point;
-      _degen_point = ShenandoahHeap::_degenerated_outside_cycle;
+      _degen_point = ShenandoahGC::_degenerated_outside_cycle;
 
       if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
         heuristics->record_allocation_failure_gc();
@@ -384,100 +386,31 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
   //                                      Full GC  --------------------------/
   //
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-
-  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
+  if (check_cancellation_or_degen(ShenandoahGC::_degenerated_outside_cycle)) return;
 
   GCIdMark gc_id_mark;
   ShenandoahGCSession session(cause);
 
   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
 
-  // Reset for upcoming marking
-  heap->entry_reset();
-
-  // Start initial mark under STW
-  heap->vmop_entry_init_mark();
-
-  // Concurrent mark roots
-  heap->entry_mark_roots();
-  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
-
-  // Continue concurrent mark
-  heap->entry_mark();
-  if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
-
-  // Complete marking under STW, and start evacuation
-  heap->vmop_entry_final_mark();
-
-  // Process weak roots that might still point to regions that would be broken by cleanup
-  if (heap->is_concurrent_weak_root_in_progress()) {
-    heap->entry_weak_refs();
-    heap->entry_weak_roots();
-  }
-
-  // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
-  // the space. This would be the last action if there is nothing to evacuate.
-  heap->entry_cleanup_early();
-
-  {
-    ShenandoahHeapLocker locker(heap->lock());
-    heap->free_set()->log_status();
-  }
-
-  // Perform concurrent class unloading
-  if (heap->is_concurrent_weak_root_in_progress() &&
-      ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
-    heap->entry_class_unloading();
-  }
-
-  // Processing strong roots
-  // This may be skipped if there is nothing to update/evacuate.
-  // If so, strong_root_in_progress would be unset.
-  if (heap->is_concurrent_strong_root_in_progress()) {
-    heap->entry_strong_roots();
-  }
-
-  // Continue the cycle with evacuation and optional update-refs.
-  // This may be skipped if there is nothing to evacuate.
-  // If so, evac_in_progress would be unset by collection set preparation code.
-  if (heap->is_evacuation_in_progress()) {
-    // Concurrently evacuate
-    heap->entry_evac();
-    if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
-
-    // Perform update-refs phase.
-    heap->vmop_entry_init_updaterefs();
-    heap->entry_updaterefs();
-    if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
-
-    // Concurrent update thread roots
-    heap->entry_update_thread_roots();
-    if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
-
-    heap->vmop_entry_final_updaterefs();
-
-    // Update references freed up collection set, kick the cleanup to reclaim the space.
-    heap->entry_cleanup_complete();
+  ShenandoahConcurrentGC gc;
+  if (gc.collect(cause)) {
+    // Cycle is complete
+    heap->heuristics()->record_success_concurrent();
+    heap->shenandoah_policy()->record_success_concurrent();
   } else {
-    // Concurrent weak/strong root flags are unset concurrently. We depend on updateref GC safepoints
-    // to ensure the changes are visible to all mutators before gc cycle is completed.
-    // In case of no evacuation, updateref GC safepoints are skipped. Therefore, we will need
-    // to perform thread handshake to ensure their consistences.
-    heap->entry_rendezvous_roots();
+    assert(heap->cancelled_gc(), "Must have been cancelled");
+    check_cancellation_or_degen(gc.degen_point());
   }
-
-  // Cycle is complete
-  heap->heuristics()->record_success_concurrent();
-  heap->shenandoah_policy()->record_success_concurrent();
 }
 
-bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
+bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   if (heap->cancelled_gc()) {
     assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
     if (!in_graceful_shutdown()) {
-      assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
-              "Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
+      assert (_degen_point == ShenandoahGC::_degenerated_outside_cycle,
+              "Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
       _degen_point = point;
     }
     return true;
@@ -493,22 +426,24 @@ void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
   GCIdMark gc_id_mark;
   ShenandoahGCSession session(cause);
 
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  heap->vmop_entry_full(cause);
+  ShenandoahMarkCompact gc;
+  gc.collect(cause);
 
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
   heap->heuristics()->record_success_full();
   heap->shenandoah_policy()->record_success_full();
 }
 
-void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
-  assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
+void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point) {
+  assert (point != ShenandoahGC::_degenerated_unset, "Degenerated point should be set");
 
   GCIdMark gc_id_mark;
   ShenandoahGCSession session(cause);
 
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  heap->vmop_degenerated(point);
+  ShenandoahDegenGC gc(point);
+  gc.collect(cause);
 
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
   heap->heuristics()->record_success_degenerated();
   heap->shenandoah_policy()->record_success_degenerated();
 }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
index 634194a170b4e..794c873d12d7a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "gc/shared/gcCause.hpp"
 #include "gc/shared/concurrentGCThread.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahPadding.hpp"
 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
@@ -91,7 +92,7 @@ class ShenandoahControlThread: public ConcurrentGCThread {
   ShenandoahSharedFlag _do_counters_update;
   ShenandoahSharedFlag _force_counters_update;
   GCCause::Cause       _requested_gc_cause;
-  ShenandoahHeap::ShenandoahDegenPoint _degen_point;
+  ShenandoahGC::ShenandoahDegenPoint _degen_point;
 
   shenandoah_padding(0);
   volatile size_t _allocs_seen;
@@ -99,10 +100,10 @@ class ShenandoahControlThread: public ConcurrentGCThread {
   volatile size_t _gc_id;
   shenandoah_padding(2);
 
-  bool check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point);
+  bool check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point);
   void service_concurrent_normal_cycle(GCCause::Cause cause);
   void service_stw_full_cycle(GCCause::Cause cause);
-  void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point);
+  void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point);
   void service_uncommit(double shrink_before, size_t shrink_until);
 
   bool try_set_alloc_failure_gc();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
new file mode 100644
index 0000000000000..2619b1ec5635e
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/collectorCounters.hpp"
+#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahMetrics.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
+#include "gc/shenandoah/shenandoahSTWMark.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+#include "gc/shenandoah/shenandoahVerifier.hpp"
+#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
+#include "gc/shenandoah/shenandoahVMOperations.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/events.hpp"
+
+ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point) :
+  ShenandoahGC(),
+  _degen_point(degen_point) {
+}
+
+bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
+  vmop_degenerated();
+  return true;
+}
+
+void ShenandoahDegenGC::vmop_degenerated() {
+  TraceCollectorStats tcs(ShenandoahHeap::heap()->monitoring_support()->full_stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
+  VM_ShenandoahDegeneratedGC degenerated_gc(this);
+  VMThread::execute(&degenerated_gc);
+}
+
+void ShenandoahDegenGC::entry_degenerated() {
+  const char* msg = degen_event_message(_degen_point);
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
+  EventMark em("%s", msg);
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+
+  ShenandoahWorkerScope scope(heap->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
+                              "stw degenerated gc");
+
+  heap->set_degenerated_gc_in_progress(true);
+  op_degenerated();
+  heap->set_degenerated_gc_in_progress(false);
+}
+
+void ShenandoahDegenGC::op_degenerated() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  // Degenerated GC is STW, but it can also fail. Current mechanics communicates
+  // GC failure via cancelled_concgc() flag. So, if we detect the failure after
+  // some phase, we have to upgrade the Degenerate GC to Full GC.
+  heap->clear_cancelled_gc();
+
+  ShenandoahMetricsSnapshot metrics;
+  metrics.snap_before();
+
+  switch (_degen_point) {
+    // The cases below form the Duff's-like device: it describes the actual GC cycle,
+    // but enters it at different points, depending on which concurrent phase had
+    // degenerated.
+
+    case _degenerated_outside_cycle:
+      // We have degenerated from outside the cycle, which means something is bad with
+      // the heap, most probably heavy humongous fragmentation, or we are very low on free
+      // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
+      // we can do the most aggressive degen cycle, which includes processing references and
+      // class unloading, unless those features are explicitly disabled.
+      //
+
+      // Degenerated from concurrent root mark, reset the flag for STW mark
+      if (heap->is_concurrent_mark_in_progress()) {
+        ShenandoahConcurrentMark::cancel();
+        heap->set_concurrent_mark_in_progress(false);
+      }
+
+      // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
+      // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
+      heap->set_unload_classes(heap->heuristics()->can_unload_classes());
+
+      op_reset();
+
+      // STW mark
+      op_mark();
+
+    case _degenerated_mark:
+      // No fallthrough. Continue mark, handed over from concurrent mark
+      if (_degen_point == ShenandoahDegenPoint::_degenerated_mark) {
+        op_finish_mark();
+      }
+      assert(!heap->cancelled_gc(), "STW mark can not OOM");
+
+      /* Degen select Collection Set. etc. */
+      op_prepare_evacuation();
+
+      op_cleanup_early();
+
+    case _degenerated_evac:
+      // If heuristics thinks we should do the cycle, this flag would be set,
+      // and we can do evacuation. Otherwise, it would be the shortcut cycle.
+      if (heap->is_evacuation_in_progress()) {
+
+        // Degeneration under oom-evac protocol might have left some objects in
+        // collection set un-evacuated. Restart evacuation from the beginning to
+        // capture all objects. For all the objects that are already evacuated,
+        // it would be a simple check, which is supposed to be fast. This is also
+        // safe to do even without degeneration, as CSet iterator is at beginning
+        // in preparation for evacuation anyway.
+        //
+        // Before doing that, we need to make sure we never had any cset-pinned
+        // regions. This may happen if allocation failure happened when evacuating
+        // the about-to-be-pinned object, oom-evac protocol left the object in
+        // the collection set, and then the pin reached the cset region. If we continue
+        // the cycle here, we would trash the cset and alive objects in it. To avoid
+        // it, we fail degeneration right away and slide into Full GC to recover.
+
+        {
+          heap->sync_pinned_region_status();
+          heap->collection_set()->clear_current_index();
+
+          ShenandoahHeapRegion* r;
+          while ((r = heap->collection_set()->next()) != NULL) {
+            if (r->is_pinned()) {
+              heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
+              op_degenerated_fail();
+              return;
+            }
+          }
+
+          heap->collection_set()->clear_current_index();
+        }
+        op_evacuate();
+        if (heap->cancelled_gc()) {
+          op_degenerated_fail();
+          return;
+        }
+      }
+
+      // If heuristics thinks we should do the cycle, this flag would be set,
+      // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
+      if (heap->has_forwarded_objects()) {
+        op_init_updaterefs();
+        assert(!heap->cancelled_gc(), "STW reference update can not OOM");
+      }
+
+    case _degenerated_updaterefs:
+      if (heap->has_forwarded_objects()) {
+        op_updaterefs();
+        op_update_roots();
+        assert(!heap->cancelled_gc(), "STW reference update can not OOM");
+      }
+
+      if (ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
+         // Disarm nmethods that armed in concurrent cycle.
+         // In above case, update roots should disarm them
+         ShenandoahCodeRoots::disarm_nmethods();
+      }
+
+      op_cleanup_complete();
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_after_degenerated();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  metrics.snap_after();
+
+  // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
+  // because that probably means the heap is overloaded and/or fragmented.
+  if (!metrics.is_good_progress()) {
+    heap->notify_gc_no_progress();
+    heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
+    op_degenerated_futile();
+  } else {
+    heap->notify_gc_progress();
+  }
+}
+
+void ShenandoahDegenGC::op_reset() {
+  ShenandoahHeap::heap()->prepare_gc();
+}
+
+void ShenandoahDegenGC::op_mark() {
+  assert(!ShenandoahHeap::heap()->is_concurrent_mark_in_progress(), "Should be reset");
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_mark);
+  ShenandoahSTWMark mark(false /*full gc*/);
+  mark.clear();
+  mark.mark();
+}
+
+void ShenandoahDegenGC::op_finish_mark() {
+  ShenandoahConcurrentMark mark;
+  mark.finish_mark();
+}
+
+void ShenandoahDegenGC::op_prepare_evacuation() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_roots_no_forwarded();
+  }
+
+  // STW cleanup weak roots and unload classes
+  heap->parallel_cleaning(false /*full gc*/);
+  // Prepare regions and collection set
+  heap->prepare_regions_and_collection_set(false /*concurrent*/);
+
+  if (!heap->collection_set()->is_empty()) {
+    heap->set_evacuation_in_progress(true);
+    heap->set_has_forwarded_objects(true);
+
+    if(ShenandoahVerify) {
+      heap->verifier()->verify_during_evacuation();
+    }
+  } else {
+    if (ShenandoahVerify) {
+      heap->verifier()->verify_after_concmark();
+    }
+
+    if (VerifyAfterGC) {
+      Universe::verify();
+    }
+  }
+}
+
+void ShenandoahDegenGC::op_cleanup_early() {
+  ShenandoahHeap::heap()->recycle_trash();
+}
+
+void ShenandoahDegenGC::op_evacuate() {
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
+  ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
+}
+
+void ShenandoahDegenGC::op_init_updaterefs() {
+  // Evacuation has completed
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  heap->set_evacuation_in_progress(false);
+  heap->set_concurrent_weak_root_in_progress(false);
+  heap->set_concurrent_strong_root_in_progress(false);
+
+  heap->prepare_update_heap_references(false /*concurrent*/);
+  heap->set_update_refs_in_progress(true);
+}
+
+void ShenandoahDegenGC::op_updaterefs() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_updaterefs);
+  // Handed over from concurrent update references phase
+  heap->update_heap_references(false /*concurrent*/);
+
+  heap->set_update_refs_in_progress(false);
+  heap->set_has_forwarded_objects(false);
+}
+
+void ShenandoahDegenGC::op_update_roots() {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+
+  update_roots(false /*full_gc*/);
+
+  heap->update_heap_region_states(false /*concurrent*/);
+
+  if (ShenandoahVerify) {
+    heap->verifier()->verify_after_updaterefs();
+  }
+
+  if (VerifyAfterGC) {
+    Universe::verify();
+  }
+
+  heap->rebuild_free_set(false /*concurrent*/);
+}
+
+void ShenandoahDegenGC::op_cleanup_complete() {
+  ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_cleanup_complete);
+  ShenandoahHeap::heap()->recycle_trash();
+}
+
+void ShenandoahDegenGC::op_degenerated_fail() {
+  log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
+  ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
+
+  ShenandoahMarkCompact full_gc;
+  full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
+}
+
+void ShenandoahDegenGC::op_degenerated_futile() {
+  ShenandoahHeap::heap()->shenandoah_policy()->record_degenerated_upgrade_to_full();
+  ShenandoahMarkCompact full_gc;
+  full_gc.op_full(GCCause::_shenandoah_upgrade_to_full_gc);
+}
+
+const char* ShenandoahDegenGC::degen_event_message(ShenandoahDegenPoint point) const {
+  switch (point) {
+    case _degenerated_unset:
+      return "Pause Degenerated GC (<UNSET>)";
+    case _degenerated_outside_cycle:
+      return "Pause Degenerated GC (Outside of Cycle)";
+    case _degenerated_mark:
+      return "Pause Degenerated GC (Mark)";
+    case _degenerated_evac:
+      return "Pause Degenerated GC (Evacuation)";
+    case _degenerated_updaterefs:
+      return "Pause Degenerated GC (Update Refs)";
+    default:
+      ShouldNotReachHere();
+      return "ERROR";
+  }
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp
new file mode 100644
index 0000000000000..8f6f71d52c253
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHDEGENERATEDGC_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHDEGENERATEDGC_HPP
+
+#include "gc/shenandoah/shenandoahGC.hpp"
+
+class VM_ShenandoahDegeneratedGC;
+
+class ShenandoahDegenGC : public ShenandoahGC {
+  friend class VM_ShenandoahDegeneratedGC;
+private:
+  const ShenandoahDegenPoint  _degen_point;
+
+public:
+  ShenandoahDegenGC(ShenandoahDegenPoint degen_point);
+  bool collect(GCCause::Cause cause);
+
+private:
+  void vmop_degenerated();
+  void entry_degenerated();
+  void op_degenerated();
+
+  void op_reset();
+  void op_mark();
+  void op_finish_mark();
+  void op_prepare_evacuation();
+  void op_cleanup_early();
+  void op_evacuate();
+  void op_init_updaterefs();
+  void op_updaterefs();
+  void op_update_roots();
+  void op_cleanup_complete();
+
+  // Fail handling
+  void op_degenerated_futile();
+  void op_degenerated_fail();
+
+  const char* degen_event_message(ShenandoahDegenPoint point) const;
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHDEGENERATEDGC_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
new file mode 100644
index 0000000000000..d0a3820ac3136
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/workgroup.hpp"
+#include "gc/shenandoah/shenandoahClosures.inline.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
+#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+
+const char* ShenandoahGC::degen_point_to_string(ShenandoahDegenPoint point) {
+  switch(point) {
+    case _degenerated_unset:
+      return "<UNSET>";
+    case _degenerated_outside_cycle:
+      return "Outside of Cycle";
+    case _degenerated_mark:
+      return "Mark";
+    case _degenerated_evac:
+      return "Evacuation";
+    case _degenerated_updaterefs:
+      return "Update References";
+    default:
+      ShouldNotReachHere();
+      return "ERROR";
+   }
+}
+
+class ShenandoahUpdateRootsTask : public AbstractGangTask {
+private:
+  ShenandoahRootUpdater*  _root_updater;
+  bool                    _check_alive;
+public:
+  ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater, bool check_alive) :
+    AbstractGangTask("Shenandoah Update Roots"),
+    _root_updater(root_updater),
+    _check_alive(check_alive){
+  }
+
+  void work(uint worker_id) {
+    assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    ShenandoahUpdateRefsClosure cl;
+    if (_check_alive) {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
+    } else {
+      AlwaysTrueClosure always_true;;
+      _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
+    }
+  }
+};
+
+void ShenandoahGC::update_roots(bool full_gc) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
+         "Only for degenerated GC and full GC");
+
+  bool check_alive = !full_gc;
+  ShenandoahPhaseTimings::Phase p = full_gc ?
+                                    ShenandoahPhaseTimings::full_gc_update_roots :
+                                    ShenandoahPhaseTimings::degen_gc_update_roots;
+
+  ShenandoahGCPhase phase(p);
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTable::clear();
+#endif
+
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  WorkGang* workers = heap->workers();
+  uint nworkers = workers->active_workers();
+
+  ShenandoahRootUpdater root_updater(nworkers, p);
+  ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
+  workers->run_task(&update_roots);
+
+#if COMPILER2_OR_JVMCI
+  DerivedPointerTable::update_pointers();
+#endif
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp
new file mode 100644
index 0000000000000..e0d3724723a29
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHGC_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHGC_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/shared/gcCause.hpp"
+
+/*
+ * Base class of three Shenandoah GC modes
+ *
+ * The relationship of the GCs:
+ *
+ * ("normal" mode) ----> Concurrent GC ----> (finish)
+ *                            |
+ *                            | <upgrade>
+ *                            v
+ * ("passive" mode) ---> Degenerated GC ---> (finish)
+ *                            |
+ *                            | <upgrade>
+ *                            v
+ *                         Full GC --------> (finish)
+ */
+
+class ShenandoahGC : public StackObj {
+public:
+  // Fail point from concurrent GC
+  enum ShenandoahDegenPoint {
+    _degenerated_unset,
+    _degenerated_outside_cycle,
+    _degenerated_mark,
+    _degenerated_evac,
+    _degenerated_updaterefs,
+    _DEGENERATED_LIMIT
+  };
+
+  virtual bool collect(GCCause::Cause cause) = 0;
+  static const char* degen_point_to_string(ShenandoahDegenPoint point);
+
+protected:
+  static void update_roots(bool full_gc);
+};
+
+#endif  // SHARE_GC_SHENANDOAH_SHENANDOAHGC_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index a3715b819fe6e..419afbdcdb0cd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -993,6 +993,11 @@ class ShenandoahEvacuationTask : public AbstractGangTask {
   }
 };
 
+void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
+  ShenandoahEvacuationTask task(this, _collection_set, concurrent);
+  workers()->run_task(&task);
+}
+
 void ShenandoahHeap::trash_cset_regions() {
   ShenandoahHeapLocker locker(lock());
 
@@ -1569,69 +1574,38 @@ class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionCl
   bool is_thread_safe() { return true; }
 };
 
-void ShenandoahHeap::op_init_mark() {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
-  assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
-
-  assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
-  assert(!marking_context()->is_complete(), "should not be complete");
-  assert(!has_forwarded_objects(), "No forwarded objects on this path");
-
-  if (ShenandoahVerify) {
-    verifier()->verify_before_concmark();
-  }
-
-  if (VerifyBeforeGC) {
-    Universe::verify();
-  }
-
-  set_concurrent_mark_in_progress(true);
-
-  // We need to reset all TLABs because they might be below the TAMS, and we need to mark
-  // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
-  // It is also a good place to resize the TLAB sizes for future allocations.
-  if (UseTLAB) {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_manage_tlabs);
-    tlabs_retire(ResizeTLAB);
-  }
-
-  {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
-    ShenandoahInitMarkUpdateRegionStateClosure cl;
-    parallel_heap_region_iterate(&cl);
-  }
-
-  // Weak reference processing
-  ShenandoahReferenceProcessor* rp = ref_processor();
-  rp->reset_thread_locals();
-  rp->set_soft_reference_policy(soft_ref_policy()->should_clear_all_soft_refs());
+void ShenandoahHeap::rendezvous_threads() {
+  ShenandoahRendezvousClosure cl;
+  Handshake::execute(&cl);
+}
 
-  // Make above changes visible to worker threads
-  OrderAccess::fence();
+void ShenandoahHeap::recycle_trash() {
+  free_set()->recycle_trash();
+}
 
-  ShenandoahConcurrentMark mark;
-  mark.mark_stw_roots();
+class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
+private:
+  ShenandoahMarkingContext* const _ctx;
+public:
+  ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 
-  if (ShenandoahPacing) {
-    pacer()->setup_for_mark();
+  void heap_region_do(ShenandoahHeapRegion* r) {
+    if (r->is_active()) {
+      // Reset live data and set TAMS optimistically. We would recheck these under the pause
+      // anyway to capture any updates that happened since now.
+      r->clear_live_data();
+      _ctx->capture_top_at_mark_start(r);
+    }
   }
 
-  // Arm nmethods for concurrent marking. When a nmethod is about to be executed,
-  // we need to make sure that all its metadata are marked. alternative is to remark
-  // thread roots at final mark pause, but it can be potential latency killer.
-  if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
-    ShenandoahCodeRoots::arm_nmethods();
-  }
-}
+  bool is_thread_safe() { return true; }
+};
 
-void ShenandoahHeap::op_mark_roots() {
-  ShenandoahConcurrentMark mark;
-  mark.mark_concurrent_roots();
-}
+void ShenandoahHeap::prepare_gc() {
+  reset_mark_bitmap();
 
-void ShenandoahHeap::op_mark() {
-  ShenandoahConcurrentMark mark;
-  mark.concurrent_mark();
+  ShenandoahResetUpdateRegionStateClosure cl;
+  parallel_heap_region_iterate(&cl);
 }
 
 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
@@ -1681,92 +1655,11 @@ class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionC
   bool is_thread_safe() { return true; }
 };
 
-void ShenandoahHeap::op_final_mark() {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
-  assert(!has_forwarded_objects(), "No forwarded objects on this path");
-
-  if (!cancelled_gc()) {
-    finish_mark();
-    prepare_evacuation();
-  } else {
-    // If this cycle was updating references, we need to keep the has_forwarded_objects
-    // flag on, for subsequent phases to deal with it.
-    ShenandoahConcurrentMark::cancel();
-    set_concurrent_mark_in_progress(false);
-  }
-}
-
-void ShenandoahHeap::op_conc_evac() {
-  ShenandoahEvacuationTask task(this, _collection_set, true);
-  workers()->run_task(&task);
-}
-
-class ShenandoahUpdateThreadClosure : public HandshakeClosure {
-private:
-  ShenandoahUpdateRefsClosure _cl;
-public:
-  ShenandoahUpdateThreadClosure();
-  void do_thread(Thread* thread);
-};
-
-ShenandoahUpdateThreadClosure::ShenandoahUpdateThreadClosure() :
-  HandshakeClosure("Shenandoah Update Thread Roots") {
-}
-
-void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
-  if (thread->is_Java_thread()) {
-    JavaThread* jt = thread->as_Java_thread();
-    ResourceMark rm;
-    jt->oops_do(&_cl, NULL);
-  }
-}
-
-void ShenandoahHeap::op_update_thread_roots() {
-  ShenandoahUpdateThreadClosure cl;
-  Handshake::execute(&cl);
-}
-
-void ShenandoahHeap::op_stw_evac() {
-  ShenandoahEvacuationTask task(this, _collection_set, false);
-  workers()->run_task(&task);
-}
-
-void ShenandoahHeap::op_updaterefs() {
-  update_heap_references(true);
-}
-
-void ShenandoahHeap::op_cleanup_early() {
-  free_set()->recycle_trash();
-}
-
-void ShenandoahHeap::op_cleanup_complete() {
-  free_set()->recycle_trash();
-}
-
-// Helpers
-void ShenandoahHeap::finish_mark() {
-  assert(!cancelled_gc(), "Should not continue");
-  ShenandoahConcurrentMark mark;
-  mark.finish_mark();
-  // Marking is completed, deactivate SATB barrier
-  set_concurrent_mark_in_progress(false);
-  mark_complete_marking_context();
-}
-
-void ShenandoahHeap::prepare_evacuation() {
-  // Notify JVMTI that the tagmap table will need cleaning.
-  JvmtiTagMap::set_needs_cleaning();
-
-  if (is_degenerated_gc_in_progress()) {
-    parallel_cleaning(false /* full gc*/);
-  }
-
-  if (ShenandoahVerify) {
-    verifier()->verify_roots_no_forwarded();
-  }
-
+void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
+  assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_region_states);
+    ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
+                                         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
     ShenandoahFinalMarkUpdateRegionStateClosure cl;
     parallel_heap_region_iterate(&cl);
 
@@ -1779,287 +1672,30 @@ void ShenandoahHeap::prepare_evacuation() {
   // Weaker one: new allocations would happen past update watermark, and so less work would
   // be needed for reference updates (would update the large filler instead).
   if (UseTLAB) {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_manage_labs);
+    ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_manage_labs :
+                                         ShenandoahPhaseTimings::degen_gc_final_manage_labs);
     tlabs_retire(false);
   }
 
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::choose_cset);
+    ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
+                                         ShenandoahPhaseTimings::degen_gc_choose_cset);
     ShenandoahHeapLocker locker(lock());
     _collection_set->clear();
     heuristics()->choose_collection_set(_collection_set);
   }
 
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_rebuild_freeset);
+    ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
+                                         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
     ShenandoahHeapLocker locker(lock());
     _free_set->rebuild();
   }
-
-  if (!is_degenerated_gc_in_progress()) {
-    prepare_concurrent_roots();
-    prepare_concurrent_unloading();
-  }
-
-  // If collection set has candidates, start evacuation.
-  // Otherwise, bypass the rest of the cycle.
-  if (!collection_set()->is_empty()) {
-    ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
-
-    if (ShenandoahVerify) {
-      verifier()->verify_before_evacuation();
-    }
-
-    set_evacuation_in_progress(true);
-    // From here on, we need to update references.
-    set_has_forwarded_objects(true);
-
-    if (!is_degenerated_gc_in_progress()) {
-      // Arm nmethods for concurrent codecache processing.
-      ShenandoahCodeRoots::arm_nmethods();
-      evacuate_and_update_roots();
-    }
-
-    // Notify JVMTI that oops are changed.
-    JvmtiTagMap::set_needs_rehashing();
-
-    if (ShenandoahPacing) {
-      pacer()->setup_for_evac();
-    }
-
-    if (ShenandoahVerify) {
-      // If OOM while evacuating/updating of roots, there is no guarantee of their consistencies
-      if (!cancelled_gc()) {
-        // We only evacuate/update thread at this pause
-        verifier()->verify_roots_no_forwarded(ShenandoahRootVerifier::ThreadRoots);
-      }
-      verifier()->verify_during_evacuation();
-    }
-  } else {
-    if (ShenandoahVerify) {
-      verifier()->verify_after_concmark();
-    }
-
-    if (VerifyAfterGC) {
-      Universe::verify();
-    }
-  }
 }
 
-class ShenandoahEvacUpdateCodeCacheClosure : public NMethodClosure {
-private:
-  BarrierSetNMethod* const               _bs;
-  ShenandoahEvacuateUpdateRootsClosure<> _cl;
-
-public:
-  ShenandoahEvacUpdateCodeCacheClosure() :
-    _bs(BarrierSet::barrier_set()->barrier_set_nmethod()),
-    _cl() {
-  }
-
-  void do_nmethod(nmethod* n) {
-    ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
-    ShenandoahReentrantLocker locker(data->lock());
-    // Setup EvacOOM scope below reentrant lock to avoid deadlock with
-    // nmethod_entry_barrier
-    ShenandoahEvacOOMScope oom;
-    data->oops_do(&_cl, true/*fix relocation*/);
-    _bs->disarm(n);
-  }
-};
-
-class ShenandoahConcurrentRootsEvacUpdateTask : public AbstractGangTask {
-private:
-  ShenandoahPhaseTimings::Phase                 _phase;
-  ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
-  ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
-  ShenandoahConcurrentNMethodIterator           _nmethod_itr;
-  bool                                          _process_codecache;
-
-public:
-  ShenandoahConcurrentRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
-    AbstractGangTask("Shenandoah Evacuate/Update Concurrent Strong Roots"),
-    _phase(phase),
-    _vm_roots(phase),
-    _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
-    _nmethod_itr(ShenandoahCodeRoots::table()),
-    _process_codecache(!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
-    if (_process_codecache) {
-      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      _nmethod_itr.nmethods_do_begin();
-    }
-  }
-
-  ~ShenandoahConcurrentRootsEvacUpdateTask() {
-    if (_process_codecache) {
-      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      _nmethod_itr.nmethods_do_end();
-    }
-  }
-
-  void work(uint worker_id) {
-    ShenandoahConcurrentWorkerSession worker_session(worker_id);
-    {
-      ShenandoahEvacOOMScope oom;
-      {
-        // vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
-        // may race against OopStorage::release() calls.
-        ShenandoahEvacUpdateOopStorageRootsClosure cl;
-        _vm_roots.oops_do<ShenandoahEvacUpdateOopStorageRootsClosure>(&cl, worker_id);
-      }
-
-      {
-        ShenandoahEvacuateUpdateRootsClosure<> cl;
-        CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
-        _cld_roots.cld_do(&clds, worker_id);
-      }
-    }
-
-    // Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
-    if (_process_codecache) {
-      ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
-      ShenandoahEvacUpdateCodeCacheClosure cl;
-      _nmethod_itr.nmethods_do(&cl);
-    }
-  }
-};
-
-class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
-private:
-  ShenandoahHeap* const _heap;
-  ShenandoahMarkingContext* const _mark_context;
-  bool  _evac_in_progress;
-  Thread* const _thread;
-
-public:
-  ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
-  void do_oop(oop* p);
-  void do_oop(narrowOop* p);
-};
-
-ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
-  _heap(ShenandoahHeap::heap()),
-  _mark_context(ShenandoahHeap::heap()->marking_context()),
-  _evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
-  _thread(Thread::current()) {
-}
-
-void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
-  const oop obj = RawAccess<>::oop_load(p);
-  if (!CompressedOops::is_null(obj)) {
-    if (!_mark_context->is_marked(obj)) {
-      shenandoah_assert_correct(p, obj);
-      Atomic::cmpxchg(p, obj, oop(NULL));
-    } else if (_evac_in_progress && _heap->in_collection_set(obj)) {
-      oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
-      if (resolved == obj) {
-        resolved = _heap->evacuate_object(obj, _thread);
-      }
-      Atomic::cmpxchg(p, obj, resolved);
-      assert(_heap->cancelled_gc() ||
-             _mark_context->is_marked(resolved) && !_heap->in_collection_set(resolved),
-             "Sanity");
-    }
-  }
-}
-
-void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(narrowOop* p) {
-  ShouldNotReachHere();
-}
-
-class ShenandoahIsCLDAliveClosure : public CLDClosure {
-public:
-  void do_cld(ClassLoaderData* cld) {
-    cld->is_alive();
-  }
-};
-
-class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
-public:
-  void do_nmethod(nmethod* n) {
-    n->is_unloading();
-  }
-};
-
-// This task not only evacuates/updates marked weak roots, but also "NULL"
-// dead weak roots.
-class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
-private:
-  ShenandoahVMWeakRoots<true /*concurrent*/> _vm_roots;
-
-  // Roots related to concurrent class unloading
-  ShenandoahClassLoaderDataRoots<true /* concurrent */, true /* single thread*/>
-                                             _cld_roots;
-  ShenandoahConcurrentNMethodIterator        _nmethod_itr;
-  ShenandoahConcurrentStringDedupRoots       _dedup_roots;
-  bool                                       _concurrent_class_unloading;
-
-public:
-  ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
-    AbstractGangTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
-    _vm_roots(phase),
-    _cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()),
-    _nmethod_itr(ShenandoahCodeRoots::table()),
-    _dedup_roots(phase),
-    _concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
-    if (_concurrent_class_unloading) {
-      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      _nmethod_itr.nmethods_do_begin();
-    }
-
-    _dedup_roots.prologue();
-  }
-
-  ~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
-    _dedup_roots.epilogue();
-
-    if (_concurrent_class_unloading) {
-      MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-      _nmethod_itr.nmethods_do_end();
-    }
-    // Notify runtime data structures of potentially dead oops
-    _vm_roots.report_num_dead();
-  }
-
-  void work(uint worker_id) {
-    ShenandoahConcurrentWorkerSession worker_session(worker_id);
-    {
-      ShenandoahEvacOOMScope oom;
-      // jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
-      // may race against OopStorage::release() calls.
-      ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
-      _vm_roots.oops_do(&cl, worker_id);
-
-      // String dedup weak roots
-      ShenandoahForwardedIsAliveClosure is_alive;
-      ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
-      _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
-    }
-
-    // If we are going to perform concurrent class unloading later on, we need to
-    // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
-    // can cleanup immediate garbage sooner.
-    if (_concurrent_class_unloading) {
-      // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
-      // CLD's holder or evacuate it.
-      ShenandoahIsCLDAliveClosure is_cld_alive;
-      _cld_roots.cld_do(&is_cld_alive, worker_id);
-
-      // Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
-      // The closure calls nmethod->is_unloading(). The is_unloading
-      // state is cached, therefore, during concurrent class unloading phase,
-      // we will not touch the metadata of unloading nmethods
-      ShenandoahIsNMethodAliveClosure is_nmethod_alive;
-      _nmethod_itr.nmethods_do(&is_nmethod_alive);
-    }
-  }
-};
-
-void ShenandoahHeap::op_weak_refs() {
-  // Concurrent weak refs processing
-  ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_refs_work);
-  ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_refs_work);
-  ref_processor()->process_references(ShenandoahPhaseTimings::conc_weak_refs_work, workers(), true /* concurrent */);
+void ShenandoahHeap::do_class_unloading() {
+  _unloader.unload();
+  set_concurrent_weak_root_in_progress(false);
 }
 
 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
@@ -2071,259 +1707,20 @@ void ShenandoahHeap::stw_weak_refs(bool full_gc) {
   ref_processor()->process_references(phase, workers(), false /* concurrent */);
 }
 
-void ShenandoahHeap::op_weak_roots() {
-  if (is_concurrent_weak_root_in_progress()) {
-    // Concurrent weak root processing
-    {
-      ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
-      ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
-      ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
-      workers()->run_task(&task);
-    }
-
-    // Perform handshake to flush out dead oops
-    {
-      ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous);
-      rendezvous_threads();
-    }
-
-    if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
-      set_concurrent_weak_root_in_progress(false);
-    }
-  }
-}
-
-void ShenandoahHeap::op_class_unloading() {
-  assert (is_concurrent_weak_root_in_progress() &&
-          ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
-          "Checked by caller");
-  _unloader.unload();
-  set_concurrent_weak_root_in_progress(false);
-}
-
-void ShenandoahHeap::op_strong_roots() {
-  assert(is_concurrent_strong_root_in_progress(), "Checked by caller");
-  ShenandoahConcurrentRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_strong_roots);
-  workers()->run_task(&task);
-  set_concurrent_strong_root_in_progress(false);
-}
-
-void ShenandoahHeap::op_rendezvous_roots() {
-  rendezvous_threads();
-}
-
-void ShenandoahHeap::rendezvous_threads() {
-  ShenandoahRendezvousClosure cl;
-  Handshake::execute(&cl);
-}
-
-class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
-private:
-  ShenandoahMarkingContext* const _ctx;
-public:
-  ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
-
-  void heap_region_do(ShenandoahHeapRegion* r) {
-    if (r->is_active()) {
-      // Reset live data and set TAMS optimistically. We would recheck these under the pause
-      // anyway to capture any updates that happened since now.
-      r->clear_live_data();
-      _ctx->capture_top_at_mark_start(r);
-    }
-  }
-
-  bool is_thread_safe() { return true; }
-};
-
-void ShenandoahHeap::op_reset() {
-  if (ShenandoahPacing) {
-    pacer()->setup_for_reset();
-  }
-  reset_mark_bitmap();
-
-  ShenandoahResetUpdateRegionStateClosure cl;
-  parallel_heap_region_iterate(&cl);
-}
-
-void ShenandoahHeap::op_full(GCCause::Cause cause) {
-  ShenandoahMetricsSnapshot metrics;
-  metrics.snap_before();
-
-  ShenandoahMarkCompact full_gc;
-  full_gc.initialize(_gc_timer);
-  full_gc.do_it(cause);
-
-  metrics.snap_after();
-
-  if (metrics.is_good_progress()) {
-    _progress_last_gc.set();
-  } else {
-    // Nothing to do. Tell the allocation path that we have failed to make
-    // progress, and it can finally fail.
-    _progress_last_gc.unset();
-  }
-}
-
-void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
-  // Degenerated GC is STW, but it can also fail. Current mechanics communicates
-  // GC failure via cancelled_concgc() flag. So, if we detect the failure after
-  // some phase, we have to upgrade the Degenerate GC to Full GC.
-
-  clear_cancelled_gc();
-
-  ShenandoahMetricsSnapshot metrics;
-  metrics.snap_before();
-
-  switch (point) {
-    // The cases below form the Duff's-like device: it describes the actual GC cycle,
-    // but enters it at different points, depending on which concurrent phase had
-    // degenerated.
-
-    case _degenerated_outside_cycle:
-      // We have degenerated from outside the cycle, which means something is bad with
-      // the heap, most probably heavy humongous fragmentation, or we are very low on free
-      // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
-      // we can do the most aggressive degen cycle, which includes processing references and
-      // class unloading, unless those features are explicitly disabled.
-      //
-      // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
-      // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
-
-      // Degenerated from concurrent mark roots, reset for STW mark
-      if (is_concurrent_mark_in_progress()) {
-        ShenandoahConcurrentMark::cancel();
-        set_concurrent_mark_in_progress(false);
-      }
-
-      set_unload_classes(heuristics()->can_unload_classes());
-
-      op_reset();
-
-      // STW root scan
-      {
-        assert(!has_forwarded_objects(), "Should not have forwarded heap");
-        ShenandoahSTWMark mark(false /*full_gc*/);
-        mark.mark();
-        assert(!cancelled_gc(), "STW mark can not OOM");
-      }
-    case _degenerated_mark:
-      if (point == _degenerated_mark) {
-        finish_mark();
-      }
-      prepare_evacuation();
-
-      if (cancelled_gc()) {
-        op_degenerated_fail();
-        return;
-      }
-
-      if (!has_forwarded_objects() && ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
-        // Disarm nmethods that armed for concurrent mark. On normal cycle, it would
-        // be disarmed while conc-roots phase is running.
-        // TODO: Call op_conc_roots() here instead
-        ShenandoahCodeRoots::disarm_nmethods();
-      }
-
-      op_cleanup_early();
-
-    case _degenerated_evac:
-      // If heuristics thinks we should do the cycle, this flag would be set,
-      // and we can do evacuation. Otherwise, it would be the shortcut cycle.
-      if (is_evacuation_in_progress()) {
-
-        // Degeneration under oom-evac protocol might have left some objects in
-        // collection set un-evacuated. Restart evacuation from the beginning to
-        // capture all objects. For all the objects that are already evacuated,
-        // it would be a simple check, which is supposed to be fast. This is also
-        // safe to do even without degeneration, as CSet iterator is at beginning
-        // in preparation for evacuation anyway.
-        //
-        // Before doing that, we need to make sure we never had any cset-pinned
-        // regions. This may happen if allocation failure happened when evacuating
-        // the about-to-be-pinned object, oom-evac protocol left the object in
-        // the collection set, and then the pin reached the cset region. If we continue
-        // the cycle here, we would trash the cset and alive objects in it. To avoid
-        // it, we fail degeneration right away and slide into Full GC to recover.
-
-        {
-          sync_pinned_region_status();
-          collection_set()->clear_current_index();
-
-          ShenandoahHeapRegion* r;
-          while ((r = collection_set()->next()) != NULL) {
-            if (r->is_pinned()) {
-              cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
-              op_degenerated_fail();
-              return;
-            }
-          }
-
-          collection_set()->clear_current_index();
-        }
-
-        op_stw_evac();
-        if (cancelled_gc()) {
-          op_degenerated_fail();
-          return;
-        }
-      }
-
-      // If heuristics thinks we should do the cycle, this flag would be set,
-      // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
-      if (has_forwarded_objects()) {
-        op_init_updaterefs();
-        if (cancelled_gc()) {
-          op_degenerated_fail();
-          return;
-        }
-      }
-
-    case _degenerated_updaterefs:
-      if (has_forwarded_objects()) {
-        op_final_updaterefs();
-        if (cancelled_gc()) {
-          op_degenerated_fail();
-          return;
-        }
-      }
-
-      op_cleanup_complete();
-      break;
-
-    default:
-      ShouldNotReachHere();
-  }
-
-  if (ShenandoahVerify) {
-    verifier()->verify_after_degenerated();
-  }
-
-  if (VerifyAfterGC) {
-    Universe::verify();
-  }
-
-  metrics.snap_after();
+void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
+  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
 
-  // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
-  // because that probably means the heap is overloaded and/or fragmented.
-  if (!metrics.is_good_progress()) {
-    _progress_last_gc.unset();
-    cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
-    op_degenerated_futile();
-  } else {
-    _progress_last_gc.set();
+  // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
+  // make them parsable for update code to work correctly. Plus, we can compute new sizes
+  // for future GCLABs here.
+  if (UseTLAB) {
+    ShenandoahGCPhase phase(concurrent ?
+                            ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
+                            ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
+    gclabs_retire(ResizeTLAB);
   }
-}
-
-void ShenandoahHeap::op_degenerated_fail() {
-  log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
-  shenandoah_policy()->record_degenerated_upgrade_to_full();
-  op_full(GCCause::_shenandoah_upgrade_to_full_gc);
-}
 
-void ShenandoahHeap::op_degenerated_futile() {
-  shenandoah_policy()->record_degenerated_upgrade_to_full();
-  op_full(GCCause::_shenandoah_upgrade_to_full_gc);
+  _update_refs_iterator.reset();
 }
 
 void ShenandoahHeap::force_satb_flush_all_threads() {
@@ -2623,21 +2020,17 @@ ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
 
 void ShenandoahHeap::prepare_concurrent_roots() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
-  if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
-    set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
-    set_concurrent_weak_root_in_progress(true);
-  }
-}
-
-void ShenandoahHeap::prepare_concurrent_unloading() {
-  assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  assert(!is_stw_gc_in_progress(), "Only concurrent GC");
+  set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
+  set_concurrent_weak_root_in_progress(true);
   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
     _unloader.prepare();
   }
 }
 
-void ShenandoahHeap::finish_concurrent_unloading() {
+void ShenandoahHeap::finish_concurrent_roots() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  assert(!is_stw_gc_in_progress(), "Only concurrent GC");
   if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
     _unloader.finish();
   }
@@ -2720,38 +2113,12 @@ class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
 };
 
 void ShenandoahHeap::update_heap_references(bool concurrent) {
+  assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
+
   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
   workers()->run_task(&task);
 }
 
-void ShenandoahHeap::op_init_updaterefs() {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
-
-  set_evacuation_in_progress(false);
-
-  // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
-  // make them parsable for update code to work correctly. Plus, we can compute new sizes
-  // for future GCLABs here.
-  if (UseTLAB) {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_manage_gclabs);
-    gclabs_retire(ResizeTLAB);
-  }
-
-  if (ShenandoahVerify) {
-    if (!is_degenerated_gc_in_progress()) {
-      verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
-    }
-    verifier()->verify_before_updaterefs();
-  }
-
-  set_update_refs_in_progress(true);
-
-  _update_refs_iterator.reset();
-
-  if (ShenandoahPacing) {
-    pacer()->setup_for_updaterefs();
-  }
-}
 
 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 private:
@@ -2782,42 +2149,14 @@ class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapR
   bool is_thread_safe() { return true; }
 };
 
-void ShenandoahHeap::op_final_updaterefs() {
-  assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
-
-  finish_concurrent_unloading();
-
-  // Check if there is left-over work, and finish it
-  if (_update_refs_iterator.has_next()) {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
-
-    // Finish updating references where we left off.
-    clear_cancelled_gc();
-    update_heap_references(false);
-  }
-
-  // Clear cancelled GC, if set. On cancellation path, the block before would handle
-  // everything. On degenerated paths, cancelled gc would not be set anyway.
-  if (cancelled_gc()) {
-    clear_cancelled_gc();
-  }
-  assert(!cancelled_gc(), "Should have been done right before");
-
-  if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
-    verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
-  }
-
-  if (is_degenerated_gc_in_progress()) {
-    ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
-  }
-
-  // Has to be done before cset is clear
-  if (ShenandoahVerify) {
-    verifier()->verify_roots_in_to_space();
-  }
+void ShenandoahHeap::update_heap_region_states(bool concurrent) {
+  assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
+  assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
 
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_update_region_states);
+    ShenandoahGCPhase phase(concurrent ?
+                            ShenandoahPhaseTimings::final_update_refs_update_region_states :
+                            ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
     parallel_heap_region_iterate(&cl);
 
@@ -2825,23 +2164,18 @@ void ShenandoahHeap::op_final_updaterefs() {
   }
 
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_trash_cset);
+    ShenandoahGCPhase phase(concurrent ?
+                            ShenandoahPhaseTimings::final_update_refs_trash_cset :
+                            ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
     trash_cset_regions();
   }
+}
 
-  set_has_forwarded_objects(false);
-  set_update_refs_in_progress(false);
-
-  if (ShenandoahVerify) {
-    verifier()->verify_after_updaterefs();
-  }
-
-  if (VerifyAfterGC) {
-    Universe::verify();
-  }
-
+void ShenandoahHeap::rebuild_free_set(bool concurrent) {
   {
-    ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_rebuild_freeset);
+    ShenandoahGCPhase phase(concurrent ?
+                            ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
+                            ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
     ShenandoahHeapLocker locker(lock());
     _free_set->rebuild();
   }
@@ -2934,302 +2268,6 @@ void ShenandoahHeap::safepoint_synchronize_end() {
   }
 }
 
-void ShenandoahHeap::vmop_entry_init_mark() {
-  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_mark_gross);
-
-  try_inject_alloc_failure();
-  VM_ShenandoahInitMark op;
-  VMThread::execute(&op); // jump to entry_init_mark() under safepoint
-}
-
-void ShenandoahHeap::vmop_entry_final_mark() {
-  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_mark_gross);
-
-  try_inject_alloc_failure();
-  VM_ShenandoahFinalMarkStartEvac op;
-  VMThread::execute(&op); // jump to entry_final_mark under safepoint
-}
-
-void ShenandoahHeap::vmop_entry_init_updaterefs() {
-  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::init_update_refs_gross);
-
-  try_inject_alloc_failure();
-  VM_ShenandoahInitUpdateRefs op;
-  VMThread::execute(&op);
-}
-
-void ShenandoahHeap::vmop_entry_final_updaterefs() {
-  TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::final_update_refs_gross);
-
-  try_inject_alloc_failure();
-  VM_ShenandoahFinalUpdateRefs op;
-  VMThread::execute(&op);
-}
-
-void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
-  TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
-
-  try_inject_alloc_failure();
-  VM_ShenandoahFullGC op(cause);
-  VMThread::execute(&op);
-}
-
-void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
-  TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
-  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::degen_gc_gross);
-
-  VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
-  VMThread::execute(&degenerated_gc);
-}
-
-void ShenandoahHeap::entry_init_mark() {
-  const char* msg = init_mark_event_message();
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_mark);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
-                              "init marking");
-
-  op_init_mark();
-}
-
-void ShenandoahHeap::entry_final_mark() {
-  const char* msg = final_mark_event_message();
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_mark);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
-                              "final marking");
-
-  op_final_mark();
-}
-
-void ShenandoahHeap::entry_init_updaterefs() {
-  static const char* msg = "Pause Init Update Refs";
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::init_update_refs);
-  EventMark em("%s", msg);
-
-  // No workers used in this phase, no setup required
-
-  op_init_updaterefs();
-}
-
-void ShenandoahHeap::entry_final_updaterefs() {
-  static const char* msg = "Pause Final Update Refs";
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::final_update_refs);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
-                              "final reference update");
-
-  op_final_updaterefs();
-}
-
-void ShenandoahHeap::entry_full(GCCause::Cause cause) {
-  static const char* msg = "Pause Full";
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
-                              "full gc");
-
-  op_full(cause);
-}
-
-void ShenandoahHeap::entry_degenerated(int point) {
-  ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
-  const char* msg = degen_event_message(dpoint);
-  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::degen_gc, true /* log_heap_usage */);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
-                              "stw degenerated gc");
-
-  set_degenerated_gc_in_progress(true);
-  op_degenerated(dpoint);
-  set_degenerated_gc_in_progress(false);
-}
-
-void ShenandoahHeap::entry_mark_roots() {
-  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
-
-  const char* msg = "Concurrent marking roots";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark_roots);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
-                              "concurrent marking roots");
-
-  try_inject_alloc_failure();
-  op_mark_roots();
-}
-
-void ShenandoahHeap::entry_mark() {
-  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
-
-  const char* msg = conc_mark_event_message();
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_mark);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
-                              "concurrent marking");
-
-  try_inject_alloc_failure();
-  op_mark();
-}
-
-void ShenandoahHeap::entry_evac() {
-  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
-
-  static const char* msg = "Concurrent evacuation";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_evac);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
-                              "concurrent evacuation");
-
-  try_inject_alloc_failure();
-  op_conc_evac();
-}
-
-void ShenandoahHeap::entry_update_thread_roots() {
-  TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
-
-  static const char* msg = "Concurrent update thread roots";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_thread_roots);
-  EventMark em("%s", msg);
-
-  // No workers used in this phase, no setup required
-  try_inject_alloc_failure();
-  op_update_thread_roots();
-}
-
-
-void ShenandoahHeap::entry_updaterefs() {
-  static const char* msg = "Concurrent update references";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_update_refs);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
-                              "concurrent reference update");
-
-  try_inject_alloc_failure();
-  op_updaterefs();
-}
-
-void ShenandoahHeap::entry_weak_refs() {
-  static const char* msg = "Concurrent weak references";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_refs);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing(),
-                              "concurrent weak references");
-
-  try_inject_alloc_failure();
-  op_weak_refs();
-}
-
-void ShenandoahHeap::entry_weak_roots() {
-  static const char* msg = "Concurrent weak roots";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_weak_roots);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
-                              "concurrent weak root");
-
-  try_inject_alloc_failure();
-  op_weak_roots();
-}
-
-void ShenandoahHeap::entry_class_unloading() {
-  static const char* msg = "Concurrent class unloading";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_class_unload);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
-                              "concurrent class unloading");
-
-  try_inject_alloc_failure();
-  op_class_unloading();
-}
-
-void ShenandoahHeap::entry_strong_roots() {
-  static const char* msg = "Concurrent strong roots";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_strong_roots);
-  EventMark em("%s", msg);
-
-  ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_strong_roots);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
-                              "concurrent strong root");
-
-  try_inject_alloc_failure();
-  op_strong_roots();
-}
-
-void ShenandoahHeap::entry_cleanup_early() {
-  static const char* msg = "Concurrent cleanup";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_early, true /* log_heap_usage */);
-  EventMark em("%s", msg);
-
-  // This phase does not use workers, no need for setup
-
-  try_inject_alloc_failure();
-  op_cleanup_early();
-}
-
-void ShenandoahHeap::entry_rendezvous_roots() {
-  static const char* msg = "Rendezvous roots";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_rendezvous_roots);
-  EventMark em("%s", msg);
-
-  // This phase does not use workers, no need for setup
-  try_inject_alloc_failure();
-  op_rendezvous_roots();
-}
-
-void ShenandoahHeap::entry_cleanup_complete() {
-  static const char* msg = "Concurrent cleanup";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_cleanup_complete, true /* log_heap_usage */);
-  EventMark em("%s", msg);
-
-  // This phase does not use workers, no need for setup
-
-  try_inject_alloc_failure();
-  op_cleanup_complete();
-}
-
-void ShenandoahHeap::entry_reset() {
-  static const char* msg = "Concurrent reset";
-  ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset);
-  EventMark em("%s", msg);
-
-  ShenandoahWorkerScope scope(workers(),
-                              ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
-                              "concurrent reset");
-
-  try_inject_alloc_failure();
-  op_reset();
-}
-
 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
   static const char *msg = "Concurrent uncommit";
   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
@@ -3303,60 +2341,6 @@ void ShenandoahHeap::deduplicate_string(oop str) {
   }
 }
 
-const char* ShenandoahHeap::init_mark_event_message() const {
-  assert(!has_forwarded_objects(), "Should not have forwarded objects here");
-
-  bool unload_cls = unload_classes();
-
-  if (unload_cls) {
-    return "Pause Init Mark (unload classes)";
-  } else {
-    return "Pause Init Mark";
-  }
-}
-
-const char* ShenandoahHeap::final_mark_event_message() const {
-  assert(!has_forwarded_objects(), "Should not have forwarded objects here");
-
-  bool unload_cls = unload_classes();
-
-  if (unload_cls) {
-    return "Pause Final Mark (unload classes)";
-  } else {
-    return "Pause Final Mark";
-  }
-}
-
-const char* ShenandoahHeap::conc_mark_event_message() const {
-  assert(!has_forwarded_objects(), "Should not have forwarded objects here");
-
-  bool unload_cls = unload_classes();
-
-  if (unload_cls) {
-    return "Concurrent marking (unload classes)";
-  } else {
-    return "Concurrent marking";
-  }
-}
-
-const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
-  switch (point) {
-    case _degenerated_unset:
-      return "Pause Degenerated GC (<UNSET>)";
-    case _degenerated_outside_cycle:
-      return "Pause Degenerated GC (Outside of Cycle)";
-    case _degenerated_mark:
-      return "Pause Degenerated GC (Mark)";
-    case _degenerated_evac:
-      return "Pause Degenerated GC (Evacuation)";
-    case _degenerated_updaterefs:
-      return "Pause Degenerated GC (Update Refs)";
-    default:
-      ShouldNotReachHere();
-      return "ERROR";
-  }
-}
-
 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
 #ifdef ASSERT
   assert(_liveness_cache != NULL, "sanity");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index c5ace725797e3..43223ae8d59de 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -55,6 +55,8 @@ class ShenandoahHeapRegion;
 class ShenandoahHeapRegionClosure;
 class ShenandoahCollectionSet;
 class ShenandoahFreeSet;
+class ShenandoahConcurrentMark;
+class ShenandoahMarkCompact;
 class ShenandoahMonitoringSupport;
 class ShenandoahPacer;
 class ShenandoahReferenceProcessor;
@@ -120,6 +122,11 @@ class ShenandoahHeap : public CollectedHeap {
   friend class ShenandoahGCStateResetter;
   friend class ShenandoahParallelObjectIterator;
   friend class ShenandoahSafepoint;
+  // Supported GC
+  friend class ShenandoahConcurrentGC;
+  friend class ShenandoahDegenGC;
+  friend class ShenandoahMarkCompact;
+
 // ---------- Locks that guard important data structures in Heap
 //
 private:
@@ -301,38 +308,6 @@ class ShenandoahHeap : public CollectedHeap {
   inline bool is_concurrent_strong_root_in_progress() const;
   inline bool is_concurrent_weak_root_in_progress() const;
 
-// ---------- GC cancellation and degeneration machinery
-//
-// Cancelled GC flag is used to notify concurrent phases that they should terminate.
-//
-public:
-  enum ShenandoahDegenPoint {
-    _degenerated_unset,
-    _degenerated_outside_cycle,
-    _degenerated_mark,
-    _degenerated_evac,
-    _degenerated_updaterefs,
-    _DEGENERATED_LIMIT
-  };
-
-  static const char* degen_point_to_string(ShenandoahDegenPoint point) {
-    switch (point) {
-      case _degenerated_unset:
-        return "<UNSET>";
-      case _degenerated_outside_cycle:
-        return "Outside of Cycle";
-      case _degenerated_mark:
-        return "Mark";
-      case _degenerated_evac:
-        return "Evacuation";
-      case _degenerated_updaterefs:
-        return "Update Refs";
-      default:
-        ShouldNotReachHere();
-        return "ERROR";
-    }
-  };
-
 private:
   enum CancelState {
     // Normal state. GC has not been cancelled and is open for cancellation.
@@ -362,85 +337,38 @@ class ShenandoahHeap : public CollectedHeap {
 
   void cancel_gc(GCCause::Cause cause);
 
-// ---------- GC operations entry points
-//
 public:
-  // Entry points to STW GC operations, these cause a related safepoint, that then
-  // call the entry method below
-  void vmop_entry_init_mark();
-  void vmop_entry_final_mark();
-  void vmop_entry_init_updaterefs();
-  void vmop_entry_final_updaterefs();
-  void vmop_entry_full(GCCause::Cause cause);
-  void vmop_degenerated(ShenandoahDegenPoint point);
-
-  // Entry methods to normally STW GC operations. These set up logging, monitoring
-  // and workers for net VM operation
-  void entry_init_mark();
-  void entry_final_mark();
-  void entry_init_updaterefs();
-  void entry_final_updaterefs();
-  void entry_full(GCCause::Cause cause);
-  void entry_degenerated(int point);
-
-  // Entry methods to normally concurrent GC operations. These set up logging, monitoring
-  // for concurrent operation.
-  void entry_reset();
-  void entry_mark_roots();
-  void entry_mark();
-  void entry_weak_refs();
-  void entry_weak_roots();
-  void entry_class_unloading();
-  void entry_strong_roots();
-  void entry_cleanup_early();
-  void entry_rendezvous_roots();
-  void entry_evac();
-  void entry_update_thread_roots();
-  void entry_updaterefs();
-  void entry_cleanup_complete();
+  // Elastic heap support
   void entry_uncommit(double shrink_before, size_t shrink_until);
+  void op_uncommit(double shrink_before, size_t shrink_until);
 
 private:
-  // Actual work for the phases
-  void op_init_mark();
-  void op_final_mark();
-  void op_init_updaterefs();
-  void op_final_updaterefs();
-  void op_full(GCCause::Cause cause);
-  void op_degenerated(ShenandoahDegenPoint point);
-  void op_degenerated_fail();
-  void op_degenerated_futile();
-
-  void op_reset();
-  void op_mark_roots();
-  void op_mark();
-  void op_weak_refs();
-  void op_weak_roots();
-  void op_class_unloading();
-  void op_strong_roots();
-  void op_cleanup_early();
-  void op_rendezvous_roots();
-  void op_conc_evac();
-  void op_stw_evac();
-  void op_update_thread_roots();
-  void op_updaterefs();
-  void op_cleanup_complete();
-  void op_uncommit(double shrink_before, size_t shrink_until);
+  // GC support
+  // Reset bitmap, prepare regions for new GC cycle
+  void prepare_gc();
+  void prepare_regions_and_collection_set(bool concurrent);
+  // Evacuation
+  void prepare_evacuation(bool concurrent);
+  void evacuate_collection_set(bool concurrent);
+  // Concurrent root processing
+  void prepare_concurrent_roots();
+  void finish_concurrent_roots();
+  // Concurrent class unloading support
+  void do_class_unloading();
+  // Reference updating
+  void prepare_update_heap_references(bool concurrent);
+  void update_heap_references(bool concurrent);
+  // Final update region states
+  void update_heap_region_states(bool concurrent);
+  void rebuild_free_set(bool concurrent);
 
   void rendezvous_threads();
+  void recycle_trash();
 
-  // Messages for GC trace events, they have to be immortal for
-  // passing around the logging/tracing systems
-  const char* init_mark_event_message() const;
-  const char* final_mark_event_message() const;
-  const char* conc_mark_event_message() const;
-  const char* degen_event_message(ShenandoahDegenPoint point) const;
-
-// Helpers
-  void finish_mark();
-  void prepare_evacuation();
+public:
+  void notify_gc_progress()    { _progress_last_gc.set();   }
+  void notify_gc_no_progress() { _progress_last_gc.unset(); }
 
-// ---------- GC subsystems
 //
 // Mark support
 private:
@@ -517,11 +445,6 @@ class ShenandoahHeap : public CollectedHeap {
   void stw_process_weak_roots(bool full_gc);
   void stw_weak_refs(bool full_gc);
 
-  // Prepare concurrent root processing
-  void prepare_concurrent_roots();
-  // Prepare and finish concurrent unloading
-  void prepare_concurrent_unloading();
-  void finish_concurrent_unloading();
   // Heap iteration support
   void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
   bool prepare_aux_bitmap_for_iteration();
@@ -717,7 +640,6 @@ class ShenandoahHeap : public CollectedHeap {
 
 private:
   void trash_cset_regions();
-  void update_heap_references(bool concurrent);
 
 // ---------- Testing helpers functions
 //
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
index b7f8c65a6830e..83fa0c1efe950 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp
@@ -28,13 +28,14 @@
 #include "gc/shared/preservedMarks.inline.hpp"
 #include "gc/shared/tlab_globals.hpp"
 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
-#include "gc/shenandoah/shenandoahConcurrentMark.hpp"
+#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 #include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
+#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
@@ -42,7 +43,6 @@
 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
 #include "gc/shenandoah/shenandoahSTWMark.hpp"
-#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahVerifier.hpp"
 #include "gc/shenandoah/shenandoahVMOperations.hpp"
@@ -55,16 +55,60 @@
 #include "runtime/biasedLocking.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/thread.hpp"
+#include "runtime/vmThread.hpp"
 #include "utilities/copy.hpp"
+#include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
 #include "gc/shared/workgroup.hpp"
 
 ShenandoahMarkCompact::ShenandoahMarkCompact() :
-  _gc_timer(NULL),
+  _gc_timer(ShenandoahHeap::heap()->gc_timer()),
   _preserved_marks(new PreservedMarksSet(true)) {}
 
-void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
-  _gc_timer = gc_timer;
+bool ShenandoahMarkCompact::collect(GCCause::Cause cause) {
+  vmop_entry_full(cause);
+  // Always success
+  return true;
+}
+
+void ShenandoahMarkCompact::vmop_entry_full(GCCause::Cause cause) {
+  ShenandoahHeap* const heap = ShenandoahHeap::heap();
+  TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
+  ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
+
+  heap->try_inject_alloc_failure();
+  VM_ShenandoahFullGC op(cause, this);
+  VMThread::execute(&op);
+}
+
+void ShenandoahMarkCompact::entry_full(GCCause::Cause cause) {
+  static const char* msg = "Pause Full";
+  ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
+  EventMark em("%s", msg);
+
+  ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
+                              ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
+                              "full gc");
+
+  op_full(cause);
+}
+
+void ShenandoahMarkCompact::op_full(GCCause::Cause cause) {
+  ShenandoahMetricsSnapshot metrics;
+  metrics.snap_before();
+
+  // Perform full GC
+  do_it(cause);
+
+  metrics.snap_after();
+
+  if (metrics.is_good_progress()) {
+    ShenandoahHeap::heap()->notify_gc_progress();
+  } else {
+    // Nothing to do. Tell the allocation path that we have failed to make
+    // progress, and it can finally fail.
+    ShenandoahHeap::heap()->notify_gc_no_progress();
+  }
 }
 
 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
@@ -116,14 +160,14 @@ void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
 
     // b. Cancel concurrent mark, if in progress
     if (heap->is_concurrent_mark_in_progress()) {
-      ShenandoahConcurrentMark::cancel();
+      ShenandoahConcurrentGC::cancel();
       heap->set_concurrent_mark_in_progress(false);
     }
     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
 
     // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
     if (has_forwarded_objects) {
-      ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::full_gc_update_roots);
+      update_roots(true /*full_gc*/);
     }
 
     // d. Reset the bitmaps for new marking
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp
index 135c25b722269..13cb5fb77c689 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.hpp
@@ -26,8 +26,10 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARKCOMPACT_HPP
 
 #include "gc/shared/gcTimer.hpp"
+#include "gc/shenandoah/shenandoahGC.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahMetrics.hpp"
 
 /**
  * This implements Full GC (e.g. when invoking System.gc()) using a mark-compact algorithm.
@@ -51,9 +53,14 @@
  */
 
 class PreservedMarksSet;
+class VM_ShenandoahFullGC;
+class ShenandoahDegenGC;
 
-class ShenandoahMarkCompact : public StackObj {
+class ShenandoahMarkCompact : public ShenandoahGC {
   friend class ShenandoahPrepareForCompactionObjectClosure;
+  friend class VM_ShenandoahFullGC;
+  friend class ShenandoahDegenGC;
+
 private:
   GCTimer* _gc_timer;
 
@@ -61,11 +68,16 @@ class ShenandoahMarkCompact : public StackObj {
 
 public:
   ShenandoahMarkCompact();
-  void initialize(GCTimer* gc_timer);
+  bool collect(GCCause::Cause cause);
+
+private:
+  // GC entries
+  void vmop_entry_full(GCCause::Cause cause);
+  void entry_full(GCCause::Cause cause);
+  void op_full(GCCause::Cause cause);
 
   void do_it(GCCause::Cause gc_cause);
 
-private:
   void phase1_mark_heap();
   void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices);
   void phase3_update_references();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
index fa320c0bc1a56..38246ca981672 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
@@ -98,8 +98,8 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) {
   switch (phase) {
     case init_evac:
     case scan_roots:
-    case update_roots:
-    case final_update_refs_roots:
+    case finish_mark:
+    case purge_weak_par:
     case full_gc_mark:
     case full_gc_update_roots:
     case full_gc_adjust_roots:
@@ -126,9 +126,8 @@ bool ShenandoahPhaseTimings::is_worker_phase(Phase phase) {
 bool ShenandoahPhaseTimings::is_root_work_phase(Phase phase) {
   switch (phase) {
     case scan_roots:
-    case update_roots:
+    case finish_mark:
     case init_evac:
-    case final_update_refs_roots:
     case degen_gc_update_roots:
     case full_gc_mark:
     case full_gc_update_roots:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
index 47eeed2080f32..60ab163c81f4c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp
@@ -56,17 +56,18 @@ class outputStream;
   f(scan_roots,                                     "  Scan Roots")                    \
   SHENANDOAH_PAR_PHASE_DO(scan_,                    "    S: ", f)                      \
                                                                                        \
-  f(conc_mark,                                      "Concurrent Marking")              \
   f(conc_mark_roots,                                "  Roots ")                        \
   SHENANDOAH_PAR_PHASE_DO(conc_mark_roots,          "    CM: ", f)                     \
+  f(conc_mark,                                      "Concurrent Marking")              \
                                                                                        \
   f(final_mark_gross,                               "Pause Final Mark (G)")            \
   f(final_mark,                                     "Pause Final Mark (N)")            \
-  f(update_roots,                                   "  Update Roots")                  \
-  SHENANDOAH_PAR_PHASE_DO(update_,                  "    U: ", f)                      \
-  f(finish_queues,                                  "  Finish Queues")                 \
-  f(weakrefs,                                       "  Weak References")               \
-  f(weakrefs_process,                               "    Process")                     \
+  f(finish_mark,                                    "  Finish Mark")                   \
+  SHENANDOAH_PAR_PHASE_DO(finish_mark_,             "    FM: ", f)                     \
+  f(purge,                                          "  System Purge")                  \
+  SHENANDOAH_PAR_PHASE_DO(purge_cu_par_,            "      CU: ", f)                   \
+  f(purge_weak_par,                                 "    Weak Roots")                  \
+  SHENANDOAH_PAR_PHASE_DO(purge_weak_par_,          "      WR: ", f)                   \
   f(final_update_region_states,                     "  Update Region States")          \
   f(final_manage_labs,                              "  Manage GC/TLABs")               \
   f(choose_cset,                                    "  Choose Collection Set")         \
@@ -101,14 +102,12 @@ class outputStream;
   f(init_update_refs,                               "Pause Init  Update Refs (N)")     \
   f(init_update_refs_manage_gclabs,                 "  Manage GCLABs")                 \
                                                                                        \
-  f(conc_update_thread_roots,                       "Concurrent Update Thread Roots")  \
   f(conc_update_refs,                               "Concurrent Update Refs")          \
+  f(conc_update_thread_roots,                       "Concurrent Update Thread Roots")  \
                                                                                        \
   f(final_update_refs_gross,                        "Pause Final Update Refs (G)")     \
   f(final_update_refs,                              "Pause Final Update Refs (N)")     \
   f(final_update_refs_finish_work,                  "  Finish Work")                   \
-  f(final_update_refs_roots,                        "  Update Roots")                  \
-  SHENANDOAH_PAR_PHASE_DO(final_update_,            "    UR: ", f)                     \
   f(final_update_refs_update_region_states,         "  Update Region States")          \
   f(final_update_refs_trash_cset,                   "  Trash Collection Set")          \
   f(final_update_refs_rebuild_freeset,              "  Rebuild Free Set")              \
@@ -129,8 +128,20 @@ class outputStream;
   f(degen_gc_purge_weak_par,                        "     Weak Roots")                 \
   SHENANDOAH_PAR_PHASE_DO(degen_gc_purge_weak_p_,   "       DWR: ", f)                 \
   f(degen_gc_purge_cldg,                            "     CLDG")                       \
+  f(degen_gc_final_update_region_states,            "  Update Region States")          \
+  f(degen_gc_final_manage_labs,                     "  Manage GC/TLABs")               \
+  f(degen_gc_choose_cset,                           "  Choose Collection Set")         \
+  f(degen_gc_final_rebuild_freeset,                 "  Rebuild Free Set")              \
+  f(degen_gc_stw_evac,                              "  Evacuation")                    \
+  f(degen_gc_init_update_refs_manage_gclabs,        "  Manage GCLABs")                 \
+  f(degen_gc_updaterefs,                            "  Update References")             \
+  f(degen_gc_final_update_refs_finish_work,         "  Finish Work")                   \
+  f(degen_gc_final_update_refs_update_region_states,"  Update Region States")          \
+  f(degen_gc_final_update_refs_trash_cset,          "  Trash Collection Set")          \
+  f(degen_gc_final_update_refs_rebuild_freeset,     "  Rebuild Free Set")              \
   f(degen_gc_update_roots,                          "  Degen Update Roots")            \
   SHENANDOAH_PAR_PHASE_DO(degen_gc_update_,         "    DU: ", f)                     \
+  f(degen_gc_cleanup_complete,                      "  Cleanup")                       \
                                                                                        \
   f(full_gc_gross,                                  "Pause Full GC (G)")               \
   f(full_gc,                                        "Pause Full GC (N)")               \
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
index 0fd3bc9d9c960..4af89f7465dbc 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,11 @@
 
 #include "precompiled.hpp"
 
+#include "gc/shenandoah/shenandoahConcurrentGC.hpp"
+#include "gc/shenandoah/shenandoahDegeneratedGC.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahMark.inline.hpp"
+#include "gc/shenandoah/shenandoahMarkCompact.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahVMOperations.hpp"
 #include "memory/universe.hpp"
@@ -43,30 +47,30 @@ void VM_ShenandoahReferenceOperation::doit_epilogue() {
 
 void VM_ShenandoahInitMark::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
-  ShenandoahHeap::heap()->entry_init_mark();
+  _gc->entry_init_mark();
 }
 
 void VM_ShenandoahFinalMarkStartEvac::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
-  ShenandoahHeap::heap()->entry_final_mark();
+  _gc->entry_final_mark();
 }
 
 void VM_ShenandoahFullGC::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::FULL);
-  ShenandoahHeap::heap()->entry_full(_gc_cause);
+  _full_gc->entry_full(_gc_cause);
 }
 
 void VM_ShenandoahDegeneratedGC::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
-  ShenandoahHeap::heap()->entry_degenerated(_point);
+  _gc->entry_degenerated();
 }
 
 void VM_ShenandoahInitUpdateRefs::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
-  ShenandoahHeap::heap()->entry_init_updaterefs();
+  _gc->entry_init_updaterefs();
 }
 
 void VM_ShenandoahFinalUpdateRefs::doit() {
   ShenandoahGCPauseMark mark(_gc_id, SvcGCMarker::CONCURRENT);
-  ShenandoahHeap::heap()->entry_final_updaterefs();
+  _gc->entry_final_updaterefs();
 }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
index b3a4aef6426d3..72f38cba01970 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,10 @@
 
 #include "gc/shared/gcVMOperations.hpp"
 
+class ShenandoahConcurrentGC;
+class ShenandoahDegenGC;
+class ShenandoahMarkCompact;
+
 // VM_operations for the Shenandoah Collector.
 //
 // VM_ShenandoahOperation
@@ -52,16 +56,24 @@ class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation {
 };
 
 class VM_ShenandoahInitMark: public VM_ShenandoahOperation {
+private:
+  ShenandoahConcurrentGC* const _gc;
 public:
-  VM_ShenandoahInitMark() : VM_ShenandoahOperation() {};
+  VM_ShenandoahInitMark(ShenandoahConcurrentGC* gc) :
+    VM_ShenandoahOperation(),
+    _gc(gc) {};
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitMark; }
   const char* name()             const { return "Shenandoah Init Marking"; }
   virtual void doit();
 };
 
 class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahOperation {
+private:
+  ShenandoahConcurrentGC* const _gc;
 public:
-  VM_ShenandoahFinalMarkStartEvac() : VM_ShenandoahOperation() {};
+  VM_ShenandoahFinalMarkStartEvac(ShenandoahConcurrentGC* gc) :
+    VM_ShenandoahOperation(),
+    _gc(gc) {};
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; }
   const char* name()             const { return "Shenandoah Final Mark and Start Evacuation"; }
   virtual  void doit();
@@ -69,11 +81,12 @@ class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahOperation {
 
 class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation {
 private:
-  // Really the ShenandoahHeap::ShenandoahDegenerationPoint, but casted to int here
-  // in order to avoid dependency on ShenandoahHeap
-  int _point;
+  ShenandoahDegenGC* const _gc;
 public:
-  VM_ShenandoahDegeneratedGC(int point) : VM_ShenandoahReferenceOperation(), _point(point) {};
+  VM_ShenandoahDegeneratedGC(ShenandoahDegenGC* gc) :
+    VM_ShenandoahReferenceOperation(),
+    _gc(gc) {};
+
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahDegeneratedGC; }
   const char* name()             const { return "Shenandoah Degenerated GC"; }
   virtual  void doit();
@@ -81,25 +94,35 @@ class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation {
 
 class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation {
 private:
-  GCCause::Cause _gc_cause;
+  GCCause::Cause                _gc_cause;
+  ShenandoahMarkCompact* const  _full_gc;
 public:
-  VM_ShenandoahFullGC(GCCause::Cause gc_cause) : VM_ShenandoahReferenceOperation(), _gc_cause(gc_cause) {};
+  VM_ShenandoahFullGC(GCCause::Cause gc_cause, ShenandoahMarkCompact* full_gc) :
+    VM_ShenandoahReferenceOperation(),
+    _gc_cause(gc_cause),
+    _full_gc(full_gc) {};
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFullGC; }
   const char* name()             const { return "Shenandoah Full GC"; }
   virtual void doit();
 };
 
 class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation {
+  ShenandoahConcurrentGC* const _gc;
 public:
-  VM_ShenandoahInitUpdateRefs() : VM_ShenandoahOperation() {};
+  VM_ShenandoahInitUpdateRefs(ShenandoahConcurrentGC* gc) :
+    VM_ShenandoahOperation(),
+    _gc(gc) {};
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; }
   const char* name()             const { return "Shenandoah Init Update References"; }
   virtual void doit();
 };
 
 class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation {
+  ShenandoahConcurrentGC* const _gc;
 public:
-  VM_ShenandoahFinalUpdateRefs() : VM_ShenandoahOperation() {};
+  VM_ShenandoahFinalUpdateRefs(ShenandoahConcurrentGC* gc) :
+    VM_ShenandoahOperation(),
+    _gc(gc) {};
   VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; }
   const char* name()             const { return "Shenandoah Final Update References"; }
   virtual void doit();