Skip to content

Commit 45fa5aa

Browse files
author
Markus Grönlund
committedJun 5, 2020
8242088: Replace mutually exclusive lists with concurrent alternatives
Reviewed-by: egahlin
1 parent 4de4200 commit 45fa5aa

37 files changed

+2324
-1162
lines changed
 

‎src/hotspot/share/jfr/jfr.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,14 @@
2525
#include "precompiled.hpp"
2626
#include "jfr/jfr.hpp"
2727
#include "jfr/leakprofiler/leakProfiler.hpp"
28-
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
2928
#include "jfr/recorder/jfrRecorder.hpp"
3029
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
3130
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
3231
#include "jfr/recorder/service/jfrOptionSet.hpp"
3332
#include "jfr/recorder/repository/jfrRepository.hpp"
3433
#include "jfr/support/jfrThreadLocal.hpp"
3534
#include "runtime/java.hpp"
35+
#include "runtime/thread.hpp"
3636

3737
bool Jfr::is_enabled() {
3838
return JfrRecorder::is_enabled();

‎src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp

+98-96
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
3939
#include "jfr/utilities/jfrBigEndian.hpp"
4040
#include "jfr/utilities/jfrIterator.hpp"
41+
#include "jfr/utilities/jfrLinkedList.inline.hpp"
4142
#include "jfr/utilities/jfrThreadIterator.hpp"
4243
#include "jfr/utilities/jfrTypes.hpp"
4344
#include "jfr/writers/jfrJavaEventWriter.hpp"
@@ -50,9 +51,7 @@
5051
#include "runtime/os.inline.hpp"
5152
#include "runtime/safepoint.hpp"
5253

53-
typedef JfrCheckpointManager::Buffer* BufferPtr;
54-
55-
static JfrCheckpointManager* _instance = NULL;
54+
typedef JfrCheckpointManager::BufferPtr BufferPtr;
5655

5756
static volatile bool constant_pending = false;
5857

@@ -70,6 +69,8 @@ static void set_constant_pending() {
7069
}
7170
}
7271

72+
static JfrCheckpointManager* _instance = NULL;
73+
7374
JfrCheckpointManager& JfrCheckpointManager::instance() {
7475
return *_instance;
7576
}
@@ -89,7 +90,6 @@ void JfrCheckpointManager::destroy() {
8990
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
9091
_free_list_mspace(NULL),
9192
_epoch_transition_mspace(NULL),
92-
_lock(NULL),
9393
_service_thread(NULL),
9494
_chunkwriter(cw),
9595
_checkpoint_epoch_state(JfrTraceIdEpoch::epoch()) {}
@@ -101,9 +101,6 @@ JfrCheckpointManager::~JfrCheckpointManager() {
101101
if (_epoch_transition_mspace != NULL) {
102102
delete _epoch_transition_mspace;
103103
}
104-
if (_lock != NULL) {
105-
delete _lock;
106-
}
107104
JfrTypeManager::destroy();
108105
}
109106

@@ -126,40 +123,22 @@ bool JfrCheckpointManager::initialize() {
126123
if (_epoch_transition_mspace == NULL) {
127124
return false;
128125
}
129-
assert(_lock == NULL, "invariant");
130-
_lock = new Mutex(Monitor::leaf - 1, "Checkpoint mutex", Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
131-
if (_lock == NULL) {
132-
return false;
133-
}
134126
return JfrTypeManager::initialize();
135127
}
136128

137129
void JfrCheckpointManager::register_service_thread(const Thread* thread) {
138130
_service_thread = thread;
139131
}
140132

141-
void JfrCheckpointManager::register_full(BufferPtr t, Thread* thread) {
133+
void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
142134
// nothing here at the moment
143-
assert(t != NULL, "invariant");
144-
assert(t->acquired_by(thread), "invariant");
145-
assert(t->retired(), "invariant");
146-
}
147-
148-
void JfrCheckpointManager::lock() {
149-
assert(!_lock->owned_by_self(), "invariant");
150-
_lock->lock_without_safepoint_check();
151-
}
152-
153-
void JfrCheckpointManager::unlock() {
154-
_lock->unlock();
135+
assert(buffer != NULL, "invariant");
136+
assert(buffer->acquired_by(thread), "invariant");
137+
assert(buffer->retired(), "invariant");
155138
}
156139

157140
#ifdef ASSERT
158-
bool JfrCheckpointManager::is_locked() const {
159-
return _lock->owned_by_self();
160-
}
161-
162-
static void assert_free_lease(const BufferPtr buffer) {
141+
static void assert_lease(const BufferPtr buffer) {
163142
assert(buffer != NULL, "invariant");
164143
assert(buffer->acquired_by_self(), "invariant");
165144
assert(buffer->lease(), "invariant");
@@ -172,45 +151,46 @@ static void assert_release(const BufferPtr buffer) {
172151
}
173152
#endif // ASSERT
174153

175-
static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) {
154+
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
155+
return _service_thread != thread && Atomic::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
156+
}
157+
158+
static const size_t lease_retry = 10;
159+
160+
BufferPtr JfrCheckpointManager::lease(JfrCheckpointMspace* mspace, Thread* thread, size_t size /* 0 */) {
161+
assert(mspace != NULL, "invariant");
176162
static const size_t max_elem_size = mspace->min_elem_size(); // min is max
177163
BufferPtr buffer;
178164
if (size <= max_elem_size) {
179-
BufferPtr buffer = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
165+
buffer = mspace_get_free_lease_with_retry(size, mspace, lease_retry, thread);
180166
if (buffer != NULL) {
181-
DEBUG_ONLY(assert_free_lease(buffer);)
167+
DEBUG_ONLY(assert_lease(buffer);)
182168
return buffer;
183169
}
184170
}
185-
buffer = mspace_allocate_transient_lease_to_free(size, mspace, thread);
186-
DEBUG_ONLY(assert_free_lease(buffer);)
171+
buffer = mspace_allocate_transient_lease_to_full(size, mspace, thread);
172+
DEBUG_ONLY(assert_lease(buffer);)
187173
return buffer;
188174
}
189175

190-
bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const {
191-
return _service_thread != thread && Atomic::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch();
192-
}
193-
194-
static const size_t lease_retry = 10;
195-
196-
BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) {
176+
BufferPtr JfrCheckpointManager::lease(Thread* thread, size_t size /* 0 */) {
197177
JfrCheckpointManager& manager = instance();
198-
if (manager.use_epoch_transition_mspace(thread)) {
199-
return lease_free(size, manager._epoch_transition_mspace, lease_retry, thread);
200-
}
201-
return lease_free(size, manager._free_list_mspace, lease_retry, thread);
178+
JfrCheckpointMspace* const mspace = manager.use_epoch_transition_mspace(thread) ?
179+
manager._epoch_transition_mspace :
180+
manager._free_list_mspace;
181+
return lease(mspace, thread, size);
202182
}
203183

204184
JfrCheckpointMspace* JfrCheckpointManager::lookup(BufferPtr old) const {
205185
assert(old != NULL, "invariant");
206-
return _free_list_mspace->in_free_list(old) ? _free_list_mspace : _epoch_transition_mspace;
186+
return _free_list_mspace->in_mspace(old) ? _free_list_mspace : _epoch_transition_mspace;
207187
}
208188

209-
BufferPtr JfrCheckpointManager::lease_buffer(BufferPtr old, Thread* thread, size_t size /* 0 */) {
189+
BufferPtr JfrCheckpointManager::lease(BufferPtr old, Thread* thread, size_t size /* 0 */) {
210190
assert(old != NULL, "invariant");
211191
JfrCheckpointMspace* mspace = instance().lookup(old);
212192
assert(mspace != NULL, "invariant");
213-
return lease_free(size, mspace, lease_retry, thread);
193+
return lease(mspace, thread, size);
214194
}
215195

216196
/*
@@ -219,10 +199,14 @@ BufferPtr JfrCheckpointManager::lease_buffer(BufferPtr old, Thread* thread, size
219199
* The buffer is effectively invalidated for the thread post-return,
220200
* and the caller should take means to ensure that it is not referenced.
221201
*/
222-
static void release(BufferPtr const buffer, Thread* thread) {
202+
static void release(BufferPtr buffer, Thread* thread) {
223203
DEBUG_ONLY(assert_release(buffer);)
224204
buffer->clear_lease();
225-
buffer->release();
205+
if (buffer->transient()) {
206+
buffer->set_retired();
207+
} else {
208+
buffer->release();
209+
}
226210
}
227211

228212
BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
@@ -235,7 +219,7 @@ BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t request
235219
return NULL;
236220
}
237221
// migration of in-flight information
238-
BufferPtr const new_buffer = lease_buffer(old, thread, used + requested);
222+
BufferPtr const new_buffer = lease(old, thread, used + requested);
239223
if (new_buffer != NULL) {
240224
migrate_outstanding_writes(old, new_buffer, used, requested);
241225
}
@@ -335,18 +319,22 @@ class CheckpointWriteOp {
335319
size_t processed() const { return _processed; }
336320
};
337321

338-
typedef CheckpointWriteOp<JfrCheckpointMspace::Type> WriteOperation;
339-
typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseOperation;
322+
typedef CheckpointWriteOp<JfrCheckpointManager::Buffer> WriteOperation;
323+
typedef ReleaseOp<JfrCheckpointMspace> CheckpointReleaseFreeOperation;
324+
typedef ScavengingReleaseOp<JfrCheckpointMspace> CheckpointReleaseFullOperation;
340325

341-
template <template <typename> class WriterHost, template <typename, typename, typename> class CompositeOperation>
326+
template <template <typename> class WriterHost>
342327
static size_t write_mspace(JfrCheckpointMspace* mspace, JfrChunkWriter& chunkwriter) {
343328
assert(mspace != NULL, "invariant");
344329
WriteOperation wo(chunkwriter);
345330
WriterHost<WriteOperation> wh(wo);
346-
CheckpointReleaseOperation cro(mspace, Thread::current(), false);
347-
CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseOperation, CompositeOperationAnd> co(&wh, &cro);
348-
assert(mspace->is_full_empty(), "invariant");
349-
process_free_list(co, mspace);
331+
CheckpointReleaseFreeOperation free_release_op(mspace);
332+
CompositeOperation<WriterHost<WriteOperation>, CheckpointReleaseFreeOperation> free_op(&wh, &free_release_op);
333+
process_free_list(free_op, mspace);
334+
CheckpointReleaseFullOperation full_release_op(mspace);
335+
MutexedWriteOp<WriteOperation> full_write_op(wo);
336+
CompositeOperation<MutexedWriteOp<WriteOperation>, CheckpointReleaseFullOperation> full_op(&full_write_op, &full_release_op);
337+
process_full_list(full_op, mspace);
350338
return wo.processed();
351339
}
352340

@@ -369,52 +357,66 @@ void JfrCheckpointManager::synchronize_checkpoint_manager_with_current_epoch() {
369357
}
370358

371359
size_t JfrCheckpointManager::write() {
372-
const size_t processed = write_mspace<MutexedWriteOp, CompositeOperation>(_free_list_mspace, _chunkwriter);
360+
const size_t processed = write_mspace<MutexedWriteOp>(_free_list_mspace, _chunkwriter);
373361
synchronize_checkpoint_manager_with_current_epoch();
374362
return processed;
375363
}
376364

377365
size_t JfrCheckpointManager::write_epoch_transition_mspace() {
378-
return write_mspace<ExclusiveOp, CompositeOperation>(_epoch_transition_mspace, _chunkwriter);
366+
return write_mspace<ExclusiveOp>(_epoch_transition_mspace, _chunkwriter);
379367
}
380368

381-
typedef DiscardOp<DefaultDiscarder<JfrBuffer> > DiscardOperation;
369+
typedef DiscardOp<DefaultDiscarder<JfrCheckpointManager::Buffer> > DiscardOperation;
370+
typedef ExclusiveDiscardOp<DefaultDiscarder<JfrCheckpointManager::Buffer> > DiscardOperationEpochTransitionMspace;
371+
typedef CompositeOperation<DiscardOperation, CheckpointReleaseFreeOperation> DiscardFreeOperation;
372+
typedef CompositeOperation<DiscardOperation, CheckpointReleaseFullOperation> DiscardFullOperation;
373+
typedef CompositeOperation<DiscardOperationEpochTransitionMspace, CheckpointReleaseFreeOperation> DiscardEpochTransMspaceFreeOperation;
374+
typedef CompositeOperation<DiscardOperationEpochTransitionMspace, CheckpointReleaseFullOperation> DiscardEpochTransMspaceFullOperation;
375+
382376
size_t JfrCheckpointManager::clear() {
383377
clear_type_set();
384-
DiscardOperation discarder(mutexed); // mutexed discard mode
385-
process_free_list(discarder, _free_list_mspace);
386-
process_free_list(discarder, _epoch_transition_mspace);
378+
DiscardOperation mutex_discarder(mutexed);
379+
CheckpointReleaseFreeOperation free_release_op(_free_list_mspace);
380+
DiscardFreeOperation free_op(&mutex_discarder, &free_release_op);
381+
process_free_list(free_op, _free_list_mspace);
382+
CheckpointReleaseFullOperation full_release_op(_free_list_mspace);
383+
DiscardFullOperation full_op(&mutex_discarder, &full_release_op);
384+
process_full_list(full_op, _free_list_mspace);
385+
DiscardOperationEpochTransitionMspace epoch_transition_discarder(mutexed);
386+
CheckpointReleaseFreeOperation epoch_free_release_op(_epoch_transition_mspace);
387+
DiscardEpochTransMspaceFreeOperation epoch_free_op(&epoch_transition_discarder, &epoch_free_release_op);
388+
process_free_list(epoch_free_op, _epoch_transition_mspace);
389+
CheckpointReleaseFullOperation epoch_full_release_op(_epoch_transition_mspace);
390+
DiscardEpochTransMspaceFullOperation epoch_full_op(&epoch_transition_discarder, &epoch_full_release_op);
391+
process_full_list(epoch_full_op, _epoch_transition_mspace);
387392
synchronize_checkpoint_manager_with_current_epoch();
388-
return discarder.elements();
393+
return mutex_discarder.elements() + epoch_transition_discarder.elements();
389394
}
390395

391396
// Optimization for write_static_type_set() and write_threads() is to write
392397
// directly into the epoch transition mspace because we will immediately
393398
// serialize and reset this mspace post-write.
394-
static JfrBuffer* get_epoch_transition_buffer(JfrCheckpointMspace* mspace, Thread* t) {
395-
assert(mspace != NULL, "invariant");
396-
JfrBuffer* const buffer = mspace->free_head();
397-
assert(buffer != NULL, "invariant");
398-
buffer->acquire(t);
399-
buffer->set_lease();
400-
DEBUG_ONLY(assert_free_lease(buffer);)
399+
BufferPtr JfrCheckpointManager::epoch_transition_buffer(Thread* thread) {
400+
assert(_epoch_transition_mspace->free_list_is_nonempty(), "invariant");
401+
BufferPtr const buffer = lease(_epoch_transition_mspace, thread, _epoch_transition_mspace->min_elem_size());
402+
DEBUG_ONLY(assert_lease(buffer);)
401403
return buffer;
402404
}
403405

404406
size_t JfrCheckpointManager::write_static_type_set() {
405-
Thread* const t = Thread::current();
406-
ResourceMark rm(t);
407-
HandleMark hm(t);
408-
JfrCheckpointWriter writer(t, get_epoch_transition_buffer(_epoch_transition_mspace, t), STATICS);
407+
Thread* const thread = Thread::current();
408+
ResourceMark rm(thread);
409+
HandleMark hm(thread);
410+
JfrCheckpointWriter writer(thread, epoch_transition_buffer(thread), STATICS);
409411
JfrTypeManager::write_static_types(writer);
410412
return writer.used_size();
411413
}
412414

413415
size_t JfrCheckpointManager::write_threads() {
414-
Thread* const t = Thread::current();
415-
ResourceMark rm(t);
416-
HandleMark hm(t);
417-
JfrCheckpointWriter writer(t, get_epoch_transition_buffer(_epoch_transition_mspace, t), THREADS);
416+
Thread* const thread = Thread::current();
417+
ResourceMark rm(thread);
418+
HandleMark hm(thread);
419+
JfrCheckpointWriter writer(thread, epoch_transition_buffer(thread), THREADS);
418420
JfrTypeManager::write_threads(writer);
419421
return writer.used_size();
420422
}
@@ -442,20 +444,20 @@ void JfrCheckpointManager::clear_type_set() {
442444

443445
void JfrCheckpointManager::write_type_set() {
444446
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
447+
Thread* const thread = Thread::current();
445448
if (LeakProfiler::is_running()) {
446-
Thread* const t = Thread::current();
447449
// can safepoint here
448-
MutexLocker cld_lock(t, ClassLoaderDataGraph_lock);
449-
MutexLocker module_lock(t, Module_lock);
450-
JfrCheckpointWriter leakp_writer(t);
451-
JfrCheckpointWriter writer(t);
450+
MutexLocker cld_lock(thread, ClassLoaderDataGraph_lock);
451+
MutexLocker module_lock(thread, Module_lock);
452+
JfrCheckpointWriter leakp_writer(thread);
453+
JfrCheckpointWriter writer(thread);
452454
JfrTypeSet::serialize(&writer, &leakp_writer, false, false);
453455
ObjectSampleCheckpoint::on_type_set(leakp_writer);
454456
} else {
455457
// can safepoint here
456458
MutexLocker cld_lock(ClassLoaderDataGraph_lock);
457459
MutexLocker module_lock(Module_lock);
458-
JfrCheckpointWriter writer(Thread::current());
460+
JfrCheckpointWriter writer(thread);
459461
JfrTypeSet::serialize(&writer, NULL, false, false);
460462
}
461463
write();
@@ -489,27 +491,27 @@ size_t JfrCheckpointManager::flush_type_set() {
489491
if (is_constant_pending()) {
490492
WriteOperation wo(_chunkwriter);
491493
FlushOperation fo(wo);
492-
assert(_free_list_mspace->is_full_empty(), "invariant");
493494
process_free_list(fo, _free_list_mspace);
495+
process_full_list(fo, _free_list_mspace);
494496
}
495497
return elements;
496498
}
497499

498-
void JfrCheckpointManager::create_thread_blob(Thread* t) {
499-
JfrTypeManager::create_thread_blob(t);
500+
void JfrCheckpointManager::create_thread_blob(Thread* thread) {
501+
JfrTypeManager::create_thread_blob(thread);
500502
}
501503

502-
void JfrCheckpointManager::write_thread_checkpoint(Thread* t) {
503-
JfrTypeManager::write_thread_checkpoint(t);
504+
void JfrCheckpointManager::write_thread_checkpoint(Thread* thread) {
505+
JfrTypeManager::write_thread_checkpoint(thread);
504506
}
505507

506508
class JfrNotifyClosure : public ThreadClosure {
507509
public:
508-
void do_thread(Thread* t) {
509-
assert(t != NULL, "invariant");
510-
assert(t->is_Java_thread(), "invariant");
510+
void do_thread(Thread* thread) {
511+
assert(thread != NULL, "invariant");
512+
assert(thread->is_Java_thread(), "invariant");
511513
assert_locked_or_safepoint(Threads_lock);
512-
JfrJavaEventWriter::notify((JavaThread*)t);
514+
JfrJavaEventWriter::notify((JavaThread*)thread);
513515
}
514516
};
515517

‎src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.hpp

+20-21
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
2828
#include "jfr/recorder/storage/jfrBuffer.hpp"
2929
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
3030
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
31+
#include "jfr/utilities/jfrLinkedList.hpp"
3132

3233
class JfrCheckpointManager;
3334
class JfrChunkWriter;
@@ -44,7 +45,7 @@ struct JfrCheckpointEntry {
4445
juint nof_segments;
4546
};
4647

47-
typedef JfrMemorySpace<JfrBuffer, JfrMspaceSequentialRetrieval, JfrCheckpointManager> JfrCheckpointMspace;
48+
typedef JfrMemorySpace<JfrCheckpointManager, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer> > JfrCheckpointMspace;
4849

4950
//
5051
// Responsible for maintaining checkpoints and by implication types.
@@ -53,28 +54,29 @@ typedef JfrMemorySpace<JfrBuffer, JfrMspaceSequentialRetrieval, JfrCheckpointMan
5354
//
5455
class JfrCheckpointManager : public JfrCHeapObj {
5556
public:
56-
typedef JfrCheckpointMspace::Type Buffer;
57+
size_t flush_type_set();
58+
static void create_thread_blob(Thread* thread);
59+
static void write_thread_checkpoint(Thread* thread);
60+
void register_service_thread(const Thread* thread);
61+
typedef JfrCheckpointMspace::Node Buffer;
62+
typedef JfrCheckpointMspace::NodePtr BufferPtr;
63+
5764
private:
5865
JfrCheckpointMspace* _free_list_mspace;
5966
JfrCheckpointMspace* _epoch_transition_mspace;
60-
Mutex* _lock;
6167
const Thread* _service_thread;
6268
JfrChunkWriter& _chunkwriter;
6369
bool _checkpoint_epoch_state;
6470

65-
// mspace callback
66-
void register_full(Buffer* t, Thread* thread);
67-
void lock();
68-
void unlock();
69-
DEBUG_ONLY(bool is_locked() const;)
70-
71-
JfrCheckpointMspace* lookup(Buffer* old) const;
72-
bool use_epoch_transition_mspace(const Thread* t) const;
71+
JfrCheckpointMspace* lookup(BufferPtr old) const;
72+
bool use_epoch_transition_mspace(const Thread* thread) const;
7373
size_t write_epoch_transition_mspace();
74+
BufferPtr epoch_transition_buffer(Thread* thread);
7475

75-
static Buffer* lease_buffer(Thread* t, size_t size = 0);
76-
static Buffer* lease_buffer(Buffer* old, Thread* t, size_t size = 0);
77-
static Buffer* flush(Buffer* old, size_t used, size_t requested, Thread* t);
76+
static BufferPtr lease(Thread* thread, size_t size = 0);
77+
static BufferPtr lease(BufferPtr old, Thread* thread, size_t size = 0);
78+
static BufferPtr lease(JfrCheckpointMspace* mspace, Thread* thread, size_t size = 0);
79+
static BufferPtr flush(BufferPtr old, size_t used, size_t requested, Thread* thread);
7880

7981
size_t clear();
8082
size_t write();
@@ -102,11 +104,8 @@ class JfrCheckpointManager : public JfrCHeapObj {
102104
void on_rotation();
103105
static void destroy();
104106

105-
public:
106-
size_t flush_type_set();
107-
static void create_thread_blob(Thread* t);
108-
static void write_thread_checkpoint(Thread* t);
109-
void register_service_thread(const Thread* t);
107+
// mspace callback
108+
void register_full(BufferPtr buffer, Thread* thread);
110109

111110
friend class Jfr;
112111
friend class JfrRecorder;
@@ -115,7 +114,7 @@ class JfrCheckpointManager : public JfrCHeapObj {
115114
friend class JfrCheckpointWriter;
116115
friend class JfrSerializer;
117116
friend class JfrStackTraceRepository;
118-
template <typename, template <typename> class, typename>
117+
template <typename, template <typename> class, typename, typename>
119118
friend class JfrMemorySpace;
120119
};
121120

‎src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@ JfrCheckpointFlush::JfrCheckpointFlush(Type* old, size_t used, size_t requested,
3232
_result(JfrCheckpointManager::flush(old, used, requested, t)) {}
3333

3434
JfrCheckpointWriter::JfrCheckpointWriter(JfrCheckpointType type /* GENERIC */) :
35-
JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(Thread::current()), Thread::current()),
35+
JfrCheckpointWriterBase(JfrCheckpointManager::lease(Thread::current()), Thread::current()),
3636
_time(JfrTicks::now()),
3737
_offset(0),
3838
_count(0),
@@ -46,7 +46,7 @@ JfrCheckpointWriter::JfrCheckpointWriter(JfrCheckpointType type /* GENERIC */) :
4646
}
4747

4848
JfrCheckpointWriter::JfrCheckpointWriter(Thread* t, bool header /* true */, JfrCheckpointType type /* GENERIC */) :
49-
JfrCheckpointWriterBase(JfrCheckpointManager::lease_buffer(t), t),
49+
JfrCheckpointWriterBase(JfrCheckpointManager::lease(t), t),
5050
_time(JfrTicks::now()),
5151
_offset(0),
5252
_count(0),

‎src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp

+47-40
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
2929
#include "jfr/recorder/checkpoint/types/jfrTypeManager.hpp"
3030
#include "jfr/recorder/jfrRecorder.hpp"
31-
#include "jfr/utilities/jfrDoublyLinkedList.hpp"
3231
#include "jfr/utilities/jfrIterator.hpp"
32+
#include "jfr/utilities/jfrLinkedList.inline.hpp"
3333
#include "memory/resourceArea.hpp"
3434
#include "runtime/handles.inline.hpp"
3535
#include "runtime/safepoint.hpp"
@@ -38,38 +38,20 @@
3838
#include "utilities/exceptions.hpp"
3939

4040
class JfrSerializerRegistration : public JfrCHeapObj {
41+
public:
42+
JfrSerializerRegistration* _next; // list support
4143
private:
42-
JfrSerializerRegistration* _next;
43-
JfrSerializerRegistration* _prev;
4444
JfrSerializer* _serializer;
4545
mutable JfrBlobHandle _cache;
4646
JfrTypeId _id;
4747
bool _permit_cache;
48-
4948
public:
5049
JfrSerializerRegistration(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) :
51-
_next(NULL), _prev(NULL), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {}
52-
50+
_next(NULL), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {}
5351
~JfrSerializerRegistration() {
5452
delete _serializer;
5553
}
5654

57-
JfrSerializerRegistration* next() const {
58-
return _next;
59-
}
60-
61-
void set_next(JfrSerializerRegistration* next) {
62-
_next = next;
63-
}
64-
65-
JfrSerializerRegistration* prev() const {
66-
return _prev;
67-
}
68-
69-
void set_prev(JfrSerializerRegistration* prev) {
70-
_prev = prev;
71-
}
72-
7355
JfrTypeId id() const {
7456
return _id;
7557
}
@@ -155,34 +137,50 @@ class SerializerRegistrationGuard : public StackObj {
155137

156138
Semaphore SerializerRegistrationGuard::_mutex_semaphore(1);
157139

158-
typedef JfrDoublyLinkedList<JfrSerializerRegistration> List;
159-
typedef StopOnNullIterator<const List> Iterator;
140+
typedef JfrLinkedList<JfrSerializerRegistration> List;
160141
static List types;
161142

162143
void JfrTypeManager::destroy() {
163144
SerializerRegistrationGuard guard;
164-
Iterator iter(types);
165145
JfrSerializerRegistration* registration;
166-
while (iter.has_next()) {
167-
registration = types.remove(iter.next());
146+
while (types.is_nonempty()) {
147+
registration = types.remove();
168148
assert(registration != NULL, "invariant");
169149
delete registration;
170150
}
171151
}
172152

173-
void JfrTypeManager::on_rotation() {
174-
const Iterator iter(types);
175-
while (iter.has_next()) {
176-
iter.next()->on_rotation();
153+
class InvokeOnRotation {
154+
public:
155+
bool process(const JfrSerializerRegistration* r) {
156+
assert(r != NULL, "invariant");
157+
r->on_rotation();
158+
return true;
177159
}
160+
};
161+
162+
void JfrTypeManager::on_rotation() {
163+
InvokeOnRotation ior;
164+
types.iterate(ior);
178165
}
179166

180167
#ifdef ASSERT
181-
static void assert_not_registered_twice(JfrTypeId id, List& list) {
182-
const Iterator iter(list);
183-
while (iter.has_next()) {
184-
assert(iter.next()->id() != id, "invariant");
168+
169+
class Diversity {
170+
private:
171+
const JfrTypeId _id;
172+
public:
173+
Diversity(JfrTypeId id) : _id(id) {}
174+
bool process(const JfrSerializerRegistration* r) {
175+
assert(r != NULL, "invariant");
176+
assert(r->id() != _id, "invariant");
177+
return true;
185178
}
179+
};
180+
181+
static void assert_not_registered_twice(JfrTypeId id, List& list) {
182+
Diversity d(id);
183+
types.iterate(d);
186184
}
187185
#endif
188186

@@ -199,7 +197,7 @@ static bool register_static_type(JfrTypeId id, bool permit_cache, JfrSerializer*
199197
JfrCheckpointWriter writer(STATICS);
200198
registration->invoke(writer);
201199
}
202-
types.prepend(registration);
200+
types.add(registration);
203201
return true;
204202
}
205203

@@ -229,11 +227,20 @@ bool JfrSerializer::register_serializer(JfrTypeId id, bool permit_cache, JfrSeri
229227
return register_static_type(id, permit_cache, serializer);
230228
}
231229

230+
class InvokeSerializer {
231+
private:
232+
JfrCheckpointWriter& _writer;
233+
public:
234+
InvokeSerializer(JfrCheckpointWriter& writer) : _writer(writer) {}
235+
bool process(const JfrSerializerRegistration* r) {
236+
assert(r != NULL, "invariant");
237+
r->invoke(_writer);
238+
return true;
239+
}
240+
};
232241

233242
void JfrTypeManager::write_static_types(JfrCheckpointWriter& writer) {
243+
InvokeSerializer is(writer);
234244
SerializerRegistrationGuard guard;
235-
const Iterator iter(types);
236-
while (iter.has_next()) {
237-
iter.next()->invoke(writer);
238-
}
245+
types.iterate(is);
239246
}

‎src/hotspot/share/jfr/recorder/service/jfrPostBox.hpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@ enum JFR_Msg {
4040
MSG_WAKEUP,
4141
MSG_SHUTDOWN,
4242
MSG_VM_ERROR,
43-
MSG_DEADBUFFER,
4443
MSG_FLUSHPOINT,
4544
MSG_NO_OF_MSGS
4645
};
@@ -55,15 +54,14 @@ enum JFR_Msg {
5554
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
5655
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
5756
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
58-
* MSG_FLUSHPOINT (10) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0xa) == 0x400
57+
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
5958
*
6059
* Asynchronous messages (posting thread returns immediately upon deposit):
6160
*
6261
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
6362
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
6463
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
6564
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
66-
* MSG_DEADBUFFER (9) ; MSGBIT(MSG_DEADBUFFER) == (1 << 0x9) == 0x200
6765
*/
6866

6967
class JfrPostBox : public JfrCHeapObj {

‎src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp

-4
Original file line numberDiff line numberDiff line change
@@ -657,10 +657,6 @@ void JfrRecorderService::process_full_buffers() {
657657
}
658658
}
659659

660-
void JfrRecorderService::scavenge() {
661-
_storage.scavenge();
662-
}
663-
664660
void JfrRecorderService::evaluate_chunk_size_for_rotation() {
665661
JfrChunkRotation::evaluate(_chunkwriter);
666662
}

‎src/hotspot/share/jfr/recorder/service/jfrRecorderThreadLoop.cpp

+1-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,6 @@ void recorderthread_entry(JavaThread* thread, Thread* unused) {
4242
#define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))
4343
#define FLUSHPOINT (msgs & (MSGBIT(MSG_FLUSHPOINT)))
4444
#define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
45-
#define SCAVENGE (msgs & (MSGBIT(MSG_DEADBUFFER)))
4645

4746
JfrPostBox& post_box = JfrRecorderThread::post_box();
4847
log_debug(jfr, system)("Recorder thread STARTED");
@@ -63,9 +62,6 @@ void recorderthread_entry(JavaThread* thread, Thread* unused) {
6362
if (PROCESS_FULL_BUFFERS) {
6463
service.process_full_buffers();
6564
}
66-
if (SCAVENGE) {
67-
service.scavenge();
68-
}
6965
// Check amount of data written to chunk already
7066
// if it warrants asking for a new chunk
7167
service.evaluate_chunk_size_for_rotation();

‎src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp

+8-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
2929
static const u1* const TOP_CRITICAL_SECTION = NULL;
3030

3131
JfrBuffer::JfrBuffer() : _next(NULL),
32-
_prev(NULL),
3332
_identity(NULL),
3433
_pos(NULL),
3534
_top(NULL),
@@ -54,7 +53,6 @@ bool JfrBuffer::initialize(size_t header_size, size_t size) {
5453
void JfrBuffer::reinitialize(bool exclusion /* false */) {
5554
acquire_critical_section_top();
5655
assert(!lease(), "invariant");
57-
assert(!transient(), "invariant");
5856
if (exclusion != excluded()) {
5957
// update
6058
if (exclusion) {
@@ -124,6 +122,13 @@ bool JfrBuffer::try_acquire(const void* id) {
124122
return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
125123
}
126124

125+
void JfrBuffer::set_identity(const void* id) {
126+
assert(id != NULL, "invariant");
127+
assert(_identity == NULL, "invariant");
128+
OrderAccess::storestore();
129+
_identity = id;
130+
}
131+
127132
void JfrBuffer::release() {
128133
assert(identity() != NULL, "invariant");
129134
Atomic::release_store(&_identity, (const void*)NULL);
@@ -260,13 +265,11 @@ bool JfrBuffer::retired() const {
260265
}
261266

262267
void JfrBuffer::set_retired() {
263-
assert(acquired_by_self(), "invariant");
264268
set(&_flags, RETIRED);
265269
}
266270

267271
void JfrBuffer::clear_retired() {
268272
if (retired()) {
269-
assert(identity() != NULL, "invariant");
270273
clear(&_flags, RETIRED);
271274
}
272275
}

‎src/hotspot/share/jfr/recorder/storage/jfrBuffer.hpp

+5-31
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,9 @@
6060
//
6161

6262
class JfrBuffer {
63+
public:
64+
JfrBuffer* _next; // list support
6365
private:
64-
JfrBuffer* _next;
65-
JfrBuffer* _prev;
6666
const void* _identity;
6767
u1* _pos;
6868
mutable const u1* _top;
@@ -77,22 +77,6 @@ class JfrBuffer {
7777
bool initialize(size_t header_size, size_t size);
7878
void reinitialize(bool exclusion = false);
7979

80-
JfrBuffer* next() const {
81-
return _next;
82-
}
83-
84-
JfrBuffer* prev() const {
85-
return _prev;
86-
}
87-
88-
void set_next(JfrBuffer* next) {
89-
_next = next;
90-
}
91-
92-
void set_prev(JfrBuffer* prev) {
93-
_prev = prev;
94-
}
95-
9680
const u1* start() const {
9781
return ((const u1*)this) + _header_size;
9882
}
@@ -157,6 +141,9 @@ class JfrBuffer {
157141
return Atomic::load_acquire(&_identity);
158142
}
159143

144+
// use only if implied owner already
145+
void set_identity(const void* id);
146+
160147
void acquire(const void* id);
161148
bool try_acquire(const void* id);
162149
bool acquired_by(const void* id) const;
@@ -183,17 +170,4 @@ class JfrBuffer {
183170
void clear_excluded();
184171
};
185172

186-
class JfrAgeNode : public JfrBuffer {
187-
private:
188-
JfrBuffer* _retired;
189-
public:
190-
JfrAgeNode() : _retired(NULL) {}
191-
void set_retired_buffer(JfrBuffer* retired) {
192-
_retired = retired;
193-
}
194-
JfrBuffer* retired_buffer() const {
195-
return _retired;
196-
}
197-
};
198-
199173
#endif // SHARE_JFR_RECORDER_STORAGE_JFRBUFFER_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRFULLSTORAGE_HPP
26+
#define SHARE_JFR_RECORDER_STORAGE_JFRFULLSTORAGE_HPP
27+
28+
#include "jfr/utilities/jfrAllocation.hpp"
29+
#include "jfr/utilities/jfrConcurrentQueue.hpp"
30+
31+
class JfrStorageControl;
32+
33+
/*
34+
* For full storage management.
35+
*
36+
* In essence, full storage is added to a FIFO queue, where the insertion order
37+
* is used to represent the "is older" relation. Removes oldest data first.
38+
*
39+
* FullType the type of the data value to be stored in the list.
40+
*
41+
* NodeType template class for the node to store a value of FullType.
42+
*
43+
* AllocPolicy memory alloction.
44+
*/
45+
template <typename FullType, template <typename> class NodeType, typename AllocPolicy = JfrCHeapObj>
46+
class JfrFullStorage : public AllocPolicy {
47+
public:
48+
typedef FullType Value;
49+
typedef NodeType<Value>* NodePtr;
50+
typedef NodeType<Value> Node;
51+
JfrFullStorage(JfrStorageControl& control);
52+
~JfrFullStorage();
53+
bool initialize(size_t free_list_prealloc_count);
54+
bool is_empty() const;
55+
bool is_nonempty() const;
56+
bool add(Value value);
57+
Value remove();
58+
template <typename Callback>
59+
void iterate(Callback& cb);
60+
private:
61+
JfrStorageControl& _control;
62+
JfrConcurrentQueue<Node, AllocPolicy>* _free_node_list;
63+
JfrConcurrentQueue<Node, AllocPolicy>* _queue;
64+
NodePtr acquire();
65+
void release(NodePtr node);
66+
};
67+
68+
#endif // SHARE_JFR_RECORDER_STORAGE_JFRFULLSTORAGE_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEFULLLIST_INLINE_HPP
26+
#define SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEFULLLIST_INLINE_HPP
27+
28+
#include "jfr/recorder/storage/jfrStorageControl.hpp"
29+
#include "jfr/recorder/storage/jfrFullStorage.hpp"
30+
#include "jfr/utilities/jfrConcurrentQueue.inline.hpp"
31+
32+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
33+
JfrFullStorage<ValueType, NodeType, AllocPolicy>
34+
::JfrFullStorage(JfrStorageControl& control) : _control(control), _free_node_list(NULL), _queue(NULL) {}
35+
36+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
37+
JfrFullStorage<ValueType, NodeType, AllocPolicy>::~JfrFullStorage() {
38+
NodePtr node;
39+
while (_free_node_list->is_nonempty()) {
40+
node = _free_node_list->remove();
41+
delete node;
42+
}
43+
while (_queue->is_nonempty()) {
44+
node = _queue->remove();
45+
delete node;
46+
}
47+
}
48+
49+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
50+
bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::initialize(size_t free_list_prealloc_count) {
51+
assert(_free_node_list == NULL, "invariant");
52+
_free_node_list = new JfrConcurrentQueue<Node>();
53+
if (_free_node_list == NULL || !_free_node_list->initialize()) {
54+
return false;
55+
}
56+
for (size_t i = 0; i < free_list_prealloc_count; ++i) {
57+
NodePtr node = new (ResourceObj::C_HEAP, mtTracing) Node();
58+
if (node == NULL) {
59+
return false;
60+
}
61+
_free_node_list->add(node);
62+
}
63+
assert(_queue == NULL, "invariant");
64+
_queue = new JfrConcurrentQueue<Node>();
65+
return _queue != NULL && _queue->initialize();
66+
}
67+
68+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
69+
inline bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::is_empty() const {
70+
return _queue->is_empty();
71+
}
72+
73+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
74+
inline bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::is_nonempty() const {
75+
return !is_empty();
76+
}
77+
78+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
79+
inline typename JfrFullStorage<ValueType, NodeType, AllocPolicy>::NodePtr
80+
JfrFullStorage<ValueType, NodeType, AllocPolicy>::acquire() {
81+
NodePtr node = _free_node_list->remove();
82+
return node != NULL ? node : new (ResourceObj::C_HEAP, mtTracing) Node();
83+
}
84+
85+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
86+
inline void JfrFullStorage<ValueType, NodeType, AllocPolicy>
87+
::release(typename JfrFullStorage<ValueType, NodeType, AllocPolicy>::NodePtr node) {
88+
assert(node != NULL, "invariant");
89+
_free_node_list->add(node);
90+
}
91+
92+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
93+
inline bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::add(ValueType value) {
94+
assert(value != NULL, "invariant");
95+
NodePtr node = acquire();
96+
assert(node != NULL, "invariant");
97+
node->set_value(value);
98+
const bool notify = _control.increment_full();
99+
_queue->add(node);
100+
return notify;
101+
}
102+
103+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
104+
inline ValueType JfrFullStorage<ValueType, NodeType, AllocPolicy>::remove() {
105+
Value value = NULL;
106+
NodePtr node = _queue->remove();
107+
if (node != NULL) {
108+
_control.decrement_full();
109+
value = node->value();
110+
release(node);
111+
}
112+
return value;
113+
}
114+
115+
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
116+
template <typename Callback>
117+
void JfrFullStorage<ValueType, NodeType, AllocPolicy>::iterate(Callback& cb) {
118+
_queue->iterate(cb);
119+
}
120+
121+
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEFULLLIST_INLINE_HPP
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -25,82 +25,68 @@
2525
#define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP
2626

2727
#include "jfr/utilities/jfrAllocation.hpp"
28-
#include "jfr/utilities/jfrDoublyLinkedList.hpp"
29-
#include "jfr/utilities/jfrIterator.hpp"
3028

31-
template <typename T, template <typename> class RetrievalType, typename Callback>
29+
template <typename Callback, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType = FreeListType>
3230
class JfrMemorySpace : public JfrCHeapObj {
3331
public:
34-
typedef T Type;
35-
typedef RetrievalType<JfrMemorySpace<T, RetrievalType, Callback> > Retrieval;
36-
typedef JfrDoublyLinkedList<Type> List;
37-
typedef StopOnNullIterator<List> Iterator;
32+
typedef FreeListType FreeList;
33+
typedef FullListType FullList;
34+
typedef typename FreeListType::Node Node;
35+
typedef typename FreeListType::NodePtr NodePtr;
3836
private:
39-
List _free;
40-
List _full;
41-
size_t _min_elem_size;
42-
size_t _limit_size;
43-
size_t _cache_count;
37+
FreeList _free_list;
38+
FullList _full_list;
39+
const size_t _min_elem_size;
40+
const size_t _limit_size;
41+
const size_t _free_list_cache_count;
42+
size_t _free_list_count;
4443
Callback* _callback;
4544

46-
bool should_populate_cache() const { return _free.count() < _cache_count; }
45+
bool should_populate_free_list() const;
4746

4847
public:
49-
JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t cache_count, Callback* callback);
48+
JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t free_list_cache_count, Callback* callback);
5049
~JfrMemorySpace();
5150
bool initialize();
5251

53-
size_t min_elem_size() const { return _min_elem_size; }
54-
size_t limit_size() const { return _limit_size; }
55-
56-
bool has_full() const { return _full.head() != NULL; }
57-
bool has_free() const { return _free.head() != NULL; }
58-
bool is_full_empty() const { return !has_full(); }
59-
bool is_free_empty() const { return !has_free(); }
60-
61-
size_t full_count() const { return _full.count(); }
62-
size_t free_count() const { return _free.count(); }
63-
64-
List& full() { return _full; }
65-
const List& full() const { return _full; }
66-
List& free() { return _free; }
67-
const List& free() const { return _free; }
68-
69-
Type* full_head() { return _full.head(); }
70-
Type* full_tail() { return _full.tail(); }
71-
Type* free_head() { return _free.head(); }
72-
Type* free_tail() { return _free.tail(); }
73-
74-
void insert_free_head(Type* t) { _free.prepend(t); }
75-
void insert_free_tail(Type* t) { _free.append(t); }
76-
void insert_free_tail(Type* t, Type* tail, size_t count) { _free.append_list(t, tail, count); }
77-
void insert_full_head(Type* t) { _full.prepend(t); }
78-
void insert_full_tail(Type* t) { _full.append(t); }
79-
void insert_full_tail(Type* t, Type* tail, size_t count) { _full.append_list(t, tail, count); }
80-
81-
Type* remove_free(Type* t) { return _free.remove(t); }
82-
Type* remove_full(Type* t) { return _full.remove(t); }
83-
Type* remove_free_tail() { _free.remove(_free.tail()); }
84-
Type* remove_full_tail() { return _full.remove(_full.tail()); }
85-
Type* clear_full(bool return_tail = false) { return _full.clear(return_tail); }
86-
Type* clear_free(bool return_tail = false) { return _free.clear(return_tail); }
87-
void release_full(Type* t);
88-
void release_free(Type* t);
89-
90-
void register_full(Type* t, Thread* thread) { _callback->register_full(t, thread); }
91-
void lock() { _callback->lock(); }
92-
void unlock() { _callback->unlock(); }
93-
DEBUG_ONLY(bool is_locked() const { return _callback->is_locked(); })
94-
95-
Type* allocate(size_t size);
96-
void deallocate(Type* t);
97-
Type* get(size_t size, Thread* thread) { return Retrieval::get(size, this, thread); }
98-
99-
template <typename IteratorCallback, typename IteratorType>
100-
void iterate(IteratorCallback& callback, bool full = true, jfr_iter_direction direction = forward);
101-
102-
bool in_full_list(const Type* t) const { return _full.in_list(t); }
103-
bool in_free_list(const Type* t) const { return _free.in_list(t); }
52+
size_t min_elem_size() const;
53+
size_t limit_size() const;
54+
55+
NodePtr allocate(size_t size);
56+
void deallocate(NodePtr node);
57+
58+
NodePtr acquire(Thread* thread, size_t size = 0);
59+
void release(NodePtr node);
60+
61+
FreeList& free_list();
62+
const FreeList& free_list() const;
63+
64+
FullList& full_list();
65+
const FullList& full_list() const;
66+
67+
bool free_list_is_empty() const;
68+
bool full_list_is_empty() const;
69+
bool free_list_is_nonempty() const;
70+
bool full_list_is_nonempty() const;
71+
bool in_free_list(const Node* node) const;
72+
bool in_full_list(const Node* node) const;
73+
bool in_mspace(const Node* node) const;
74+
75+
void add_to_free_list(NodePtr node);
76+
void add_to_full_list(NodePtr node);
77+
78+
NodePtr remove_from_free_list();
79+
NodePtr remove_from_full_list();
80+
81+
NodePtr clear_free_list();
82+
NodePtr clear_full_list();
83+
84+
template <typename Processor>
85+
void iterate(Processor& processor, bool full_list = true);
86+
87+
void decrement_free_list_count();
88+
89+
void register_full(NodePtr node, Thread* thread);
10490
};
10591

10692
#endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP

‎src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp

+329-283
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -25,89 +25,51 @@
2525
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP
2626
#define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP
2727

28-
#include "memory/allocation.hpp"
29-
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
30-
#include "jfr/recorder/storage/jfrBuffer.hpp"
31-
#include "jfr/utilities/jfrAllocation.hpp"
32-
#include "jfr/utilities/jfrTypes.hpp"
28+
#include "jfr/utilities/jfrIterator.hpp"
3329

34-
/*
35-
* Some policy classes for getting mspace memory
36-
*/
30+
/* Some policy classes for getting mspace memory. */
3731

3832
template <typename Mspace>
39-
class JfrMspaceRetrieval : AllStatic {
33+
class JfrMspaceRetrieval {
4034
public:
41-
typedef typename Mspace::Type Type;
42-
static Type* get(size_t size, Mspace* mspace, typename Mspace::Iterator& iterator, Thread* thread) {
35+
typedef typename Mspace::Node Node;
36+
static Node* acquire(Mspace* mspace, Thread* thread, size_t size) {
37+
StopOnNullCondition<typename Mspace::FreeList> iterator(mspace->free_list());
4338
while (iterator.has_next()) {
44-
Type* const t = iterator.next();
45-
if (t->retired()) continue;
46-
if (t->try_acquire(thread)) {
47-
assert(!t->retired(), "invariant");
48-
if (t->free_size() >= size) {
49-
return t;
39+
Node* const node = iterator.next();
40+
if (node->retired()) continue;
41+
if (node->try_acquire(thread)) {
42+
assert(!node->retired(), "invariant");
43+
if (node->free_size() >= size) {
44+
return node;
5045
}
51-
t->set_retired();
52-
mspace->register_full(t, thread);
46+
node->set_retired();
47+
mspace->register_full(node, thread);
5348
}
5449
}
5550
return NULL;
5651
}
5752
};
5853

5954
template <typename Mspace>
60-
class JfrMspaceAlternatingRetrieval {
61-
private:
62-
// provides stochastic distribution over "deque" endpoints; racy is ok here
63-
static bool _last_access;
55+
class JfrMspaceRemoveRetrieval : AllStatic {
6456
public:
65-
typedef typename Mspace::Type Type;
66-
static Type* get(size_t size, Mspace* mspace, Thread* thread) {
67-
typename Mspace::Iterator iterator(mspace->free(), (_last_access = !_last_access) ? forward : backward);
68-
return JfrMspaceRetrieval<Mspace>::get(size, mspace, iterator, thread);
69-
}
70-
};
71-
72-
template <typename Mspace>
73-
bool JfrMspaceAlternatingRetrieval<Mspace>::_last_access = false;
74-
75-
template <typename Mspace>
76-
class JfrMspaceSequentialRetrieval {
77-
public:
78-
typedef typename Mspace::Type Type;
79-
static Type* get(size_t size, Mspace* mspace, Thread* thread) {
80-
typename Mspace::Iterator iterator(mspace->free());
81-
return JfrMspaceRetrieval<Mspace>::get(size, mspace, iterator, thread);
82-
}
83-
};
84-
85-
template <typename Mspace>
86-
class JfrExclusiveRetrieval : AllStatic {
87-
public:
88-
typedef typename Mspace::Type Type;
89-
static Type* get(size_t size, Mspace* mspace, typename Mspace::Iterator& iterator, Thread* thread) {
90-
assert(mspace->is_locked(), "invariant");
91-
if (iterator.has_next()) {
92-
Type* const t = iterator.next();
93-
assert(!t->retired(), "invariant");
94-
assert(t->identity() == NULL, "invariant");
95-
assert(t->free_size() >= size, "invariant");
96-
t->acquire(thread);
97-
return t;
57+
typedef typename Mspace::Node Node;
58+
static Node* acquire(Mspace* mspace, Thread* thread, size_t size) {
59+
StopOnNullConditionRemoval<typename Mspace::FreeList> iterator(mspace->free_list());
60+
// it is the iterator that removes the nodes
61+
while (iterator.has_next()) {
62+
Node* const node = iterator.next();
63+
if (node == NULL) return NULL;
64+
mspace->decrement_free_list_count();
65+
assert(node->free_size() >= size, "invariant");
66+
assert(!node->retired(), "invariant");
67+
assert(node->identity() == NULL, "invariant");
68+
node->set_identity(thread);
69+
return node;
9870
}
9971
return NULL;
10072
}
10173
};
10274

103-
template <typename Mspace>
104-
class JfrThreadLocalRetrieval {
105-
public:
106-
typedef typename Mspace::Type Type;
107-
static Type* get(size_t size, Mspace* mspace, Thread* thread) {
108-
typename Mspace::Iterator iterator(mspace->free(), forward);
109-
return JfrExclusiveRetrieval<Mspace>::get(size, mspace, iterator, thread);
110-
}
111-
};
112-
11375
#endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACERETRIEVAL_HPP

‎src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp

+87-306
Large diffs are not rendered by default.

‎src/hotspot/share/jfr/recorder/storage/jfrStorage.hpp

+29-28
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -25,52 +25,51 @@
2525
#define SHARE_JFR_RECORDER_STORAGE_JFRSTORAGE_HPP
2626

2727
#include "jfr/recorder/storage/jfrBuffer.hpp"
28+
#include "jfr/recorder/storage/jfrFullStorage.hpp"
2829
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
2930
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
31+
#include "jfr/utilities/jfrConcurrentQueue.hpp"
32+
#include "jfr/utilities/jfrLinkedList.hpp"
33+
#include "jfr/utilities/jfrNode.hpp"
34+
#include "jfr/utilities/jfrRelation.hpp"
3035

3136
class JfrChunkWriter;
3237
class JfrPostBox;
3338
class JfrStorage;
3439
class JfrStorageControl;
3540

36-
typedef JfrMemorySpace<JfrBuffer, JfrMspaceAlternatingRetrieval, JfrStorage> JfrStorageMspace;
37-
typedef JfrMemorySpace<JfrBuffer, JfrThreadLocalRetrieval, JfrStorage> JfrThreadLocalMspace;
38-
typedef JfrMemorySpace<JfrAgeNode, JfrMspaceSequentialRetrieval, JfrStorage> JfrStorageAgeMspace;
41+
typedef JfrMemorySpace<JfrStorage, JfrMspaceRetrieval, JfrLinkedList<JfrBuffer> > JfrStorageMspace;
42+
typedef JfrMemorySpace<JfrStorage, JfrMspaceRemoveRetrieval, JfrConcurrentQueue<JfrBuffer>, JfrLinkedList<JfrBuffer> > JfrThreadLocalMspace;
43+
typedef JfrFullStorage<JfrBuffer*, JfrValueNode> JfrFullList;
3944

4045
//
4146
// Responsible for providing backing storage for writing events.
4247
//
4348
class JfrStorage : public JfrCHeapObj {
4449
public:
45-
typedef JfrStorageMspace::Type Buffer;
50+
typedef JfrStorageMspace::Node Buffer;
51+
typedef JfrStorageMspace::NodePtr BufferPtr;
52+
4653
private:
4754
JfrStorageControl* _control;
4855
JfrStorageMspace* _global_mspace;
4956
JfrThreadLocalMspace* _thread_local_mspace;
50-
JfrStorageMspace* _transient_mspace;
51-
JfrStorageAgeMspace* _age_mspace;
57+
JfrFullList* _full_list;
5258
JfrChunkWriter& _chunkwriter;
5359
JfrPostBox& _post_box;
5460

55-
// mspace callbacks
56-
void register_full(Buffer* t, Thread* thread);
57-
void lock();
58-
void unlock();
59-
DEBUG_ONLY(bool is_locked() const;)
60-
61-
Buffer* acquire_large(size_t size, Thread* t);
62-
Buffer* acquire_transient(size_t size, Thread* thread);
63-
bool flush_regular_buffer(Buffer* const buffer, Thread* t);
64-
Buffer* flush_regular(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
65-
Buffer* flush_large(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
66-
Buffer* provision_large(Buffer* cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* t);
67-
void release(Buffer* buffer, Thread* t);
61+
BufferPtr acquire_large(size_t size, Thread* thread);
62+
BufferPtr acquire_transient(size_t size, Thread* thread);
63+
bool flush_regular_buffer(BufferPtr buffer, Thread* thread);
64+
BufferPtr flush_regular(BufferPtr cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* thread);
65+
BufferPtr flush_large(BufferPtr cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* thread);
66+
BufferPtr provision_large(BufferPtr cur, const u1* cur_pos, size_t used, size_t req, bool native, Thread* thread);
67+
void release(BufferPtr buffer, Thread* thread);
6868

6969
size_t clear();
7070
size_t clear_full();
7171
size_t write_full();
7272
size_t write_at_safepoint();
73-
size_t scavenge();
7473

7574
JfrStorage(JfrChunkWriter& cw, JfrPostBox& post_box);
7675
~JfrStorage();
@@ -80,19 +79,21 @@ class JfrStorage : public JfrCHeapObj {
8079
bool initialize();
8180
static void destroy();
8281

82+
// mspace callback
83+
void register_full(BufferPtr buffer, Thread* thread);
84+
8385
public:
84-
static Buffer* acquire_thread_local(Thread* t, size_t size = 0);
85-
static void release_thread_local(Buffer* buffer, Thread* t);
86-
void release_large(Buffer* const buffer, Thread* t);
87-
static Buffer* flush(Buffer* cur, size_t used, size_t req, bool native, Thread* t);
88-
void discard_oldest(Thread* t);
86+
static BufferPtr acquire_thread_local(Thread* thread, size_t size = 0);
87+
static void release_thread_local(BufferPtr buffer, Thread* thread);
88+
void release_large(BufferPtr buffer, Thread* thread);
89+
static BufferPtr flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* thread);
90+
void discard_oldest(Thread* thread);
8991
static JfrStorageControl& control();
90-
9192
size_t write();
9293

9394
friend class JfrRecorder;
9495
friend class JfrRecorderService;
95-
template <typename, template <typename> class, typename>
96+
template <typename, template <typename> class, typename, typename>
9697
friend class JfrMemorySpace;
9798
};
9899

Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -25,40 +25,15 @@
2525
#include "precompiled.hpp"
2626
#include "jfr/recorder/storage/jfrStorageControl.hpp"
2727
#include "runtime/atomic.hpp"
28-
#include "runtime/mutexLocker.hpp"
29-
30-
// returns the updated value
31-
static jlong atomic_add(size_t value, size_t volatile* const dest) {
32-
size_t compare_value;
33-
size_t exchange_value;
34-
do {
35-
compare_value = *dest;
36-
exchange_value = compare_value + value;
37-
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
38-
return exchange_value;
39-
}
40-
41-
static jlong atomic_dec(size_t volatile* const dest) {
42-
size_t compare_value;
43-
size_t exchange_value;
44-
do {
45-
compare_value = *dest;
46-
assert(compare_value >= 1, "invariant");
47-
exchange_value = compare_value - 1;
48-
} while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value);
49-
return exchange_value;
50-
}
5128

5229
const size_t max_lease_factor = 2;
5330
JfrStorageControl::JfrStorageControl(size_t global_count_total, size_t in_memory_discard_threshold) :
5431
_global_count_total(global_count_total),
5532
_full_count(0),
5633
_global_lease_count(0),
57-
_dead_count(0),
5834
_to_disk_threshold(0),
5935
_in_memory_discard_threshold(in_memory_discard_threshold),
6036
_global_lease_threshold(global_count_total / max_lease_factor),
61-
_scavenge_threshold(0),
6237
_to_disk(false) {}
6338

6439
bool JfrStorageControl::to_disk() const {
@@ -73,21 +48,24 @@ size_t JfrStorageControl::full_count() const {
7348
return _full_count;
7449
}
7550

76-
// mutexed access
77-
size_t JfrStorageControl::increment_full() {
78-
assert(JfrBuffer_lock->owned_by_self(), "invariant");
79-
return ++_full_count;
51+
bool JfrStorageControl::increment_full() {
52+
const size_t result = Atomic::add(&_full_count, (size_t)1);
53+
return to_disk() && result > _to_disk_threshold;
8054
}
8155

8256
size_t JfrStorageControl::decrement_full() {
83-
assert(JfrBuffer_lock->owned_by_self(), "invariant");
8457
assert(_full_count > 0, "invariant");
85-
return --_full_count;
58+
size_t current;
59+
size_t exchange;
60+
do {
61+
current = _full_count;
62+
exchange = current - 1;
63+
} while (Atomic::cmpxchg(&_full_count, current, exchange) != current);
64+
return exchange;
8665
}
8766

8867
void JfrStorageControl::reset_full() {
89-
assert(JfrBuffer_lock->owned_by_self(), "invariant");
90-
_full_count = 0;
68+
Atomic::store(&_full_count, (size_t)0);
9169
}
9270

9371
bool JfrStorageControl::should_post_buffer_full_message() const {
@@ -98,42 +76,24 @@ bool JfrStorageControl::should_discard() const {
9876
return !to_disk() && full_count() >= _in_memory_discard_threshold;
9977
}
10078

101-
// concurrent with accuracy requirement
102-
10379
size_t JfrStorageControl::global_lease_count() const {
10480
return Atomic::load(&_global_lease_count);
10581
}
10682

10783
size_t JfrStorageControl::increment_leased() {
108-
return atomic_add(1, &_global_lease_count);
84+
return Atomic::add(&_global_lease_count, (size_t)1);
10985
}
11086

11187
size_t JfrStorageControl::decrement_leased() {
112-
return atomic_dec(&_global_lease_count);
88+
size_t current;
89+
size_t exchange;
90+
do {
91+
current = _global_lease_count;
92+
exchange = current - 1;
93+
} while (Atomic::cmpxchg(&_global_lease_count, current, exchange) != current);
94+
return exchange;
11395
}
11496

11597
bool JfrStorageControl::is_global_lease_allowed() const {
11698
return global_lease_count() <= _global_lease_threshold;
11799
}
118-
119-
// concurrent with lax requirement
120-
121-
size_t JfrStorageControl::dead_count() const {
122-
return _dead_count;
123-
}
124-
125-
size_t JfrStorageControl::increment_dead() {
126-
return atomic_add(1, &_dead_count);
127-
}
128-
129-
size_t JfrStorageControl::decrement_dead() {
130-
return atomic_dec(&_dead_count);
131-
}
132-
133-
bool JfrStorageControl::should_scavenge() const {
134-
return dead_count() >= _scavenge_threshold;
135-
}
136-
137-
void JfrStorageControl::set_scavenge_threshold(size_t number_of_dead_buffers) {
138-
_scavenge_threshold = number_of_dead_buffers;
139-
}

‎src/hotspot/share/jfr/recorder/storage/jfrStorageControl.hpp

+2-11
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,9 @@ class JfrStorageControl : public JfrCHeapObj {
3232
size_t _global_count_total;
3333
size_t _full_count;
3434
volatile size_t _global_lease_count;
35-
volatile size_t _dead_count;
3635
size_t _to_disk_threshold;
3736
size_t _in_memory_discard_threshold;
3837
size_t _global_lease_threshold;
39-
size_t _scavenge_threshold;
4038
bool _to_disk;
4139

4240
public:
@@ -46,7 +44,7 @@ class JfrStorageControl : public JfrCHeapObj {
4644
bool to_disk() const;
4745

4846
size_t full_count() const;
49-
size_t increment_full();
47+
bool increment_full();
5048
size_t decrement_full();
5149
void reset_full();
5250
bool should_post_buffer_full_message() const;
@@ -56,13 +54,6 @@ class JfrStorageControl : public JfrCHeapObj {
5654
size_t increment_leased();
5755
size_t decrement_leased();
5856
bool is_global_lease_allowed() const;
59-
60-
size_t dead_count() const;
61-
size_t increment_dead();
62-
size_t decrement_dead();
63-
64-
void set_scavenge_threshold(size_t number_of_dead_buffers);
65-
bool should_scavenge() const;
6657
};
6758

6859
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGECONTROL_HPP

‎src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.hpp

+12-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -189,4 +189,15 @@ class DiscardOp {
189189
size_t size() const { return _operation.size(); }
190190
};
191191

192+
template <typename Operation>
193+
class ExclusiveDiscardOp : private DiscardOp<Operation> {
194+
public:
195+
typedef typename Operation::Type Type;
196+
ExclusiveDiscardOp(jfr_operation_mode mode = concurrent) : DiscardOp<Operation>(mode) {}
197+
bool process(Type* t);
198+
size_t processed() const { return DiscardOp<Operation>::processed(); }
199+
size_t elements() const { return DiscardOp<Operation>::elements(); }
200+
size_t size() const { return DiscardOp<Operation>::size(); }
201+
};
202+
192203
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_HPP

‎src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp

+21-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -52,15 +52,24 @@ inline size_t get_unflushed_size(const u1* top, Type* t) {
5252

5353
template <typename Operation>
5454
inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) {
55+
const bool is_retired = t->retired();
5556
// acquire_critical_section_top() must be read before pos() for stable access
56-
const u1* const top = t->acquire_critical_section_top();
57+
const u1* const top = is_retired ? t->top() : t->acquire_critical_section_top();
5758
const size_t unflushed_size = get_unflushed_size(top, t);
5859
if (unflushed_size == 0) {
59-
t->release_critical_section_top(top);
60+
if (is_retired) {
61+
t->set_top(top);
62+
} else {
63+
t->release_critical_section_top(top);
64+
}
6065
return true;
6166
}
6267
const bool result = _operation.write(t, top, unflushed_size);
63-
t->release_critical_section_top(top + unflushed_size);
68+
if (is_retired) {
69+
t->set_top(top + unflushed_size);
70+
} else {
71+
t->release_critical_section_top(top + unflushed_size);
72+
}
6473
return result;
6574
}
6675

@@ -119,4 +128,12 @@ inline bool DiscardOp<Operation>::process(typename Operation::Type* t) {
119128
return result;
120129
}
121130

131+
template <typename Operation>
132+
inline bool ExclusiveDiscardOp<Operation>::process(typename Operation::Type* t) {
133+
retired_sensitive_acquire(t);
134+
assert(t->acquired_by_self() || t->retired(), "invariant");
135+
// User is required to ensure proper release of the acquisition
136+
return DiscardOp<Operation>::process(t);
137+
}
138+
122139
#endif // SHARE_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP

‎src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp

+41-52
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -31,14 +31,14 @@
3131
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
3232
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
3333
#include "jfr/recorder/stringpool/jfrStringPoolWriter.hpp"
34+
#include "jfr/utilities/jfrLinkedList.inline.hpp"
3435
#include "jfr/utilities/jfrTypes.hpp"
3536
#include "logging/log.hpp"
3637
#include "runtime/atomic.hpp"
37-
#include "runtime/mutexLocker.hpp"
3838
#include "runtime/safepoint.hpp"
3939
#include "runtime/thread.inline.hpp"
4040

41-
typedef JfrStringPool::Buffer* BufferPtr;
41+
typedef JfrStringPool::BufferPtr BufferPtr;
4242

4343
static JfrStringPool* _instance = NULL;
4444
static uint64_t store_generation = 0;
@@ -48,6 +48,7 @@ inline void set_generation(uint64_t value, uint64_t* const dest) {
4848
assert(dest != NULL, "invariant");
4949
Atomic::release_store(dest, value);
5050
}
51+
5152
static void increment_store_generation() {
5253
const uint64_t current_serialized = Atomic::load_acquire(&serialized_generation);
5354
const uint64_t current_stored = Atomic::load_acquire(&store_generation);
@@ -88,14 +89,11 @@ void JfrStringPool::destroy() {
8889
_instance = NULL;
8990
}
9091

91-
JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _free_list_mspace(NULL), _lock(NULL), _chunkwriter(cw) {}
92+
JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(NULL), _chunkwriter(cw) {}
9293

9394
JfrStringPool::~JfrStringPool() {
94-
if (_free_list_mspace != NULL) {
95-
delete _free_list_mspace;
96-
}
97-
if (_lock != NULL) {
98-
delete _lock;
95+
if (_mspace != NULL) {
96+
delete _mspace;
9997
}
10098
}
10199

@@ -104,14 +102,9 @@ static const size_t string_pool_cache_count = 2;
104102
static const size_t string_pool_buffer_size = 512 * K;
105103

106104
bool JfrStringPool::initialize() {
107-
assert(_free_list_mspace == NULL, "invariant");
108-
_free_list_mspace = new JfrStringPoolMspace(string_pool_buffer_size, unlimited_mspace_size, string_pool_cache_count, this);
109-
if (_free_list_mspace == NULL || !_free_list_mspace->initialize()) {
110-
return false;
111-
}
112-
assert(_lock == NULL, "invariant");
113-
_lock = new Mutex(Monitor::leaf - 1, "Checkpoint mutex", Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
114-
return _lock != NULL;
105+
assert(_mspace == NULL, "invariant");
106+
_mspace = create_mspace<JfrStringPoolMspace>(string_pool_buffer_size, unlimited_mspace_size, string_pool_cache_count, this);
107+
return _mspace != NULL;
115108
}
116109

117110
/*
@@ -125,7 +118,11 @@ static void release(BufferPtr buffer, Thread* thread) {
125118
assert(buffer->lease(), "invariant");
126119
assert(buffer->acquired_by_self(), "invariant");
127120
buffer->clear_lease();
128-
buffer->release();
121+
if (buffer->transient()) {
122+
buffer->set_retired();
123+
} else {
124+
buffer->release();
125+
}
129126
}
130127

131128
BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
@@ -137,7 +134,7 @@ BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thr
137134
return NULL;
138135
}
139136
// migration of in-flight information
140-
BufferPtr const new_buffer = lease_buffer(thread, used + requested);
137+
BufferPtr const new_buffer = lease(thread, used + requested);
141138
if (new_buffer != NULL) {
142139
migrate_outstanding_writes(old, new_buffer, used, requested);
143140
}
@@ -147,10 +144,10 @@ BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thr
147144

148145
static const size_t lease_retry = 10;
149146

150-
BufferPtr JfrStringPool::lease_buffer(Thread* thread, size_t size /* 0 */) {
151-
BufferPtr buffer = mspace_get_free_lease_with_retry(size, instance()._free_list_mspace, lease_retry, thread);
147+
BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) {
148+
BufferPtr buffer = mspace_get_free_lease_with_retry(size, instance()._mspace, lease_retry, thread);
152149
if (buffer == NULL) {
153-
buffer = mspace_allocate_transient_lease_to_free(size, instance()._free_list_mspace, thread);
150+
buffer = mspace_allocate_transient_lease_to_full(size, instance()._mspace, thread);
154151
}
155152
assert(buffer->acquired_by_self(), "invariant");
156153
assert(buffer->lease(), "invariant");
@@ -210,18 +207,23 @@ typedef StringPoolOp<UnBufferedWriteToChunk> WriteOperation;
210207
typedef StringPoolOp<StringPoolDiscarderStub> DiscardOperation;
211208
typedef ExclusiveOp<WriteOperation> ExclusiveWriteOperation;
212209
typedef ExclusiveOp<DiscardOperation> ExclusiveDiscardOperation;
213-
typedef ReleaseOp<JfrStringPoolMspace> StringPoolReleaseOperation;
214-
typedef CompositeOperation<ExclusiveWriteOperation, StringPoolReleaseOperation> StringPoolWriteOperation;
215-
typedef CompositeOperation<ExclusiveDiscardOperation, StringPoolReleaseOperation> StringPoolDiscardOperation;
210+
typedef ReleaseOp<JfrStringPoolMspace> StringPoolReleaseFreeOperation;
211+
typedef ScavengingReleaseOp<JfrStringPoolMspace> StringPoolReleaseFullOperation;
212+
typedef CompositeOperation<ExclusiveWriteOperation, StringPoolReleaseFreeOperation> StringPoolWriteFreeOperation;
213+
typedef CompositeOperation<ExclusiveWriteOperation, StringPoolReleaseFullOperation> StringPoolWriteFullOperation;
214+
typedef CompositeOperation<ExclusiveDiscardOperation, StringPoolReleaseFreeOperation> StringPoolDiscardFreeOperation;
215+
typedef CompositeOperation<ExclusiveDiscardOperation, StringPoolReleaseFullOperation> StringPoolDiscardFullOperation;
216216

217217
size_t JfrStringPool::write() {
218218
Thread* const thread = Thread::current();
219219
WriteOperation wo(_chunkwriter, thread);
220220
ExclusiveWriteOperation ewo(wo);
221-
StringPoolReleaseOperation spro(_free_list_mspace, thread, false);
222-
StringPoolWriteOperation spwo(&ewo, &spro);
223-
assert(_free_list_mspace->is_full_empty(), "invariant");
224-
process_free_list(spwo, _free_list_mspace);
221+
StringPoolReleaseFreeOperation free_release_op(_mspace);
222+
StringPoolWriteFreeOperation free_op(&ewo, &free_release_op);
223+
process_free_list(free_op, _mspace);
224+
StringPoolReleaseFullOperation full_release_op(_mspace);
225+
StringPoolWriteFullOperation full_op(&ewo, &full_release_op);
226+
process_full_list(full_op, _mspace);
225227
return wo.processed();
226228
}
227229

@@ -234,31 +236,18 @@ size_t JfrStringPool::clear() {
234236
increment_serialized_generation();
235237
DiscardOperation discard_operation;
236238
ExclusiveDiscardOperation edo(discard_operation);
237-
StringPoolReleaseOperation spro(_free_list_mspace, Thread::current(), false);
238-
StringPoolDiscardOperation spdo(&edo, &spro);
239-
assert(_free_list_mspace->is_full_empty(), "invariant");
240-
process_free_list(spdo, _free_list_mspace);
239+
StringPoolReleaseFreeOperation free_release_op(_mspace);
240+
StringPoolDiscardFreeOperation free_op(&edo, &free_release_op);
241+
process_free_list(free_op, _mspace);
242+
StringPoolReleaseFullOperation full_release_op(_mspace);
243+
StringPoolDiscardFullOperation full_op(&edo, &full_release_op);
244+
process_full_list(full_op, _mspace);
241245
return discard_operation.processed();
242246
}
243247

244-
void JfrStringPool::register_full(BufferPtr t, Thread* thread) {
248+
void JfrStringPool::register_full(BufferPtr buffer, Thread* thread) {
245249
// nothing here at the moment
246-
assert(t != NULL, "invariant");
247-
assert(t->acquired_by(thread), "invariant");
248-
assert(t->retired(), "invariant");
249-
}
250-
251-
void JfrStringPool::lock() {
252-
assert(!_lock->owned_by_self(), "invariant");
253-
_lock->lock_without_safepoint_check();
254-
}
255-
256-
void JfrStringPool::unlock() {
257-
_lock->unlock();
258-
}
259-
260-
#ifdef ASSERT
261-
bool JfrStringPool::is_locked() const {
262-
return _lock->owned_by_self();
250+
assert(buffer != NULL, "invariant");
251+
assert(buffer->acquired_by(thread), "invariant");
252+
assert(buffer->retired(), "invariant");
263253
}
264-
#endif

‎src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.hpp

+12-15
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -29,12 +29,12 @@
2929
#include "jfr/recorder/storage/jfrMemorySpace.hpp"
3030
#include "jfr/recorder/storage/jfrMemorySpaceRetrieval.hpp"
3131
#include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp"
32+
#include "jfr/utilities/jfrLinkedList.hpp"
3233

3334
class JfrChunkWriter;
3435
class JfrStringPool;
35-
class Mutex;
3636

37-
typedef JfrMemorySpace<JfrStringPoolBuffer, JfrMspaceSequentialRetrieval, JfrStringPool> JfrStringPoolMspace;
37+
typedef JfrMemorySpace<JfrStringPool, JfrMspaceRetrieval, JfrLinkedList<JfrStringPoolBuffer> > JfrStringPoolMspace;
3838

3939
//
4040
// Although called JfrStringPool, a more succinct description would be
@@ -48,21 +48,15 @@ class JfrStringPool : public JfrCHeapObj {
4848
size_t write();
4949
size_t write_at_safepoint();
5050
size_t clear();
51+
typedef JfrStringPoolMspace::Node Buffer;
52+
typedef JfrStringPoolMspace::NodePtr BufferPtr;
5153

52-
typedef JfrStringPoolMspace::Type Buffer;
5354
private:
54-
JfrStringPoolMspace* _free_list_mspace;
55-
Mutex* _lock;
55+
JfrStringPoolMspace* _mspace;
5656
JfrChunkWriter& _chunkwriter;
5757

58-
// mspace callback
59-
void register_full(Buffer* t, Thread* thread);
60-
void lock();
61-
void unlock();
62-
DEBUG_ONLY(bool is_locked() const;)
63-
64-
static Buffer* lease_buffer(Thread* thread, size_t size = 0);
65-
static Buffer* flush(Buffer* old, size_t used, size_t requested, Thread* t);
58+
static BufferPtr lease(Thread* thread, size_t size = 0);
59+
static BufferPtr flush(BufferPtr old, size_t used, size_t requested, Thread* thread);
6660

6761
JfrStringPool(JfrChunkWriter& cw);
6862
~JfrStringPool();
@@ -73,11 +67,14 @@ class JfrStringPool : public JfrCHeapObj {
7367
static void destroy();
7468
static bool is_modified();
7569

70+
// mspace callback
71+
void register_full(BufferPtr buffer, Thread* thread);
72+
7673
friend class JfrRecorder;
7774
friend class JfrRecorderService;
7875
friend class JfrStringPoolFlush;
7976
friend class JfrStringPoolWriter;
80-
template <typename, template <typename> class, typename>
77+
template <typename, template <typename> class, typename, typename>
8178
friend class JfrMemorySpace;
8279
};
8380

‎src/hotspot/share/jfr/recorder/stringpool/jfrStringPoolBuffer.hpp

+6-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,11 @@ class JfrStringPoolBuffer : public JfrBuffer {
4141
void increment(uint64_t value);
4242
void set_string_pos(uint64_t value);
4343
void set_string_top(uint64_t value);
44+
45+
template <typename, typename>
46+
friend class JfrLinkedList;
47+
template <typename, typename>
48+
friend class JfrConcurrentLinkedList;
4449
};
4550

4651
#endif // SHARE_JFR_RECORDER_STRINGPOOL_JFRSTRINGPOOLBUFFER_HPP

‎src/hotspot/share/jfr/recorder/stringpool/jfrStringPoolWriter.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -28,11 +28,11 @@
2828
#include "jfr/writers/jfrEventWriterHost.inline.hpp"
2929
#include "jfr/writers/jfrMemoryWriterHost.inline.hpp"
3030

31-
JfrStringPoolFlush::JfrStringPoolFlush(Type* old, size_t used, size_t requested, Thread* t) :
32-
_result(JfrStringPool::flush(old, used, requested, t)) {}
31+
JfrStringPoolFlush::JfrStringPoolFlush(Type* old, size_t used, size_t requested, Thread* thread) :
32+
_result(JfrStringPool::flush(old, used, requested, thread)) {}
3333

3434
JfrStringPoolWriter::JfrStringPoolWriter(Thread* thread) :
35-
JfrStringPoolWriterBase(JfrStringPool::lease_buffer(thread), thread), _nof_strings(0) {}
35+
JfrStringPoolWriterBase(JfrStringPool::lease(thread), thread), _nof_strings(0) {}
3636

3737
JfrStringPoolWriter::~JfrStringPoolWriter() {
3838
assert(this->is_acquired(), "invariant");

‎src/hotspot/share/jfr/support/jfrFlush.cpp

+12-12
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,8 @@
3131
#include "runtime/thread.inline.hpp"
3232
#include "utilities/debug.hpp"
3333

34-
JfrFlush::JfrFlush(JfrStorage::Buffer* old, size_t used, size_t requested, Thread* t) :
35-
_result(JfrStorage::flush(old, used, requested, true, t)) {
34+
JfrFlush::JfrFlush(JfrStorage::BufferPtr old, size_t used, size_t requested, Thread* thread) :
35+
_result(JfrStorage::flush(old, used, requested, true, thread)) {
3636
}
3737

3838
template <typename T>
@@ -61,24 +61,24 @@ bool jfr_has_stacktrace_enabled(JfrEventId id) {
6161
return JfrEventSetting::has_stacktrace(id);
6262
}
6363

64-
void jfr_conditional_flush(JfrEventId id, size_t size, Thread* t) {
65-
if (t->jfr_thread_local()->has_native_buffer()) {
66-
JfrStorage::Buffer* const buffer = t->jfr_thread_local()->native_buffer();
64+
void jfr_conditional_flush(JfrEventId id, size_t size, Thread* thread) {
65+
if (thread->jfr_thread_local()->has_native_buffer()) {
66+
JfrStorage::BufferPtr buffer = thread->jfr_thread_local()->native_buffer();
6767
if (LessThanSize<JfrStorage::Buffer>::evaluate(buffer, size)) {
68-
JfrFlush f(buffer, 0, 0, t);
68+
JfrFlush f(buffer, 0, 0, thread);
6969
}
7070
}
7171
}
7272

73-
bool jfr_save_stacktrace(Thread* t) {
74-
JfrThreadLocal* const tl = t->jfr_thread_local();
73+
bool jfr_save_stacktrace(Thread* thread) {
74+
JfrThreadLocal* const tl = thread->jfr_thread_local();
7575
if (tl->has_cached_stack_trace()) {
7676
return false; // no ownership
7777
}
78-
tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(t));
78+
tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(thread));
7979
return true;
8080
}
8181

82-
void jfr_clear_stacktrace(Thread* t) {
83-
t->jfr_thread_local()->clear_cached_stack_trace();
82+
void jfr_clear_stacktrace(Thread* thread) {
83+
thread->jfr_thread_local()->clear_cached_stack_trace();
8484
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_HPP
26+
#define SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_HPP
27+
28+
#include "jfr/utilities/jfrAllocation.hpp"
29+
30+
/*
31+
* This implementation is a derivation from Harris
32+
* https://www.cl.cam.ac.uk/research/srg/netos/papers/2001-caslists.pdf
33+
*
34+
* A concurrent LIFO structure can be built using the pair:
35+
*
36+
* insert_head() and remove()
37+
*
38+
* The LIFO algorithm is non-blocking, more specifically wait-free.
39+
* When combined with a system for safe memory reclamation, where a thread will require
40+
* to know if other threads are possibly reading the memory that is to be reclaimed (more below),
41+
* a potential wait point is introduced, so technically, we are no longer wait-free.
42+
* The combination is still lock-free, but since it is no longer pure non-blocking,
43+
* we instead say the solution is concurrent.
44+
*
45+
* It is also possible to build a FIFO structure using the pair:
46+
*
47+
* insert_tail() and remove()
48+
*
49+
* To allow FIFO, the solution extends support to mark, or reserve a node, not only as part of deletions
50+
* as with the LIFO case, but also, to enable tail insertions.
51+
*
52+
* Compared to the LIFO algorithm, the FIFO algorithm is not non-blocking, because inserts to the tail block,
53+
* making it not lock-free. remove() is lock-free up until the last node in the list. In practice, the FIFO
54+
* solution can be used in certain ways that very closely approximate non-blocking, for example, situations
55+
* involving a single producer and multiple consumers.
56+
*
57+
* Although the FIFO algorithm is not non-blocking, it includes an optimization for remove() that is attractive:
58+
* In the LIFO case, a slow path taken as the result of a failed excision would have to re-traverse the list
59+
* to find the updated adjacent node pair for the already marked node. However, that node might already have
60+
* been excised by some other thread, letting the thread potentially traverse the entire list just to discover
61+
* it is no longer present (not an issue if the list is ordered by a key, then traversal is only to node >= key).
62+
* In the FIFO case, premised on the invariant that inserts only come in from the tail, it is possible to prove
63+
* a failed cas not to be the result of a new node inserted as with the LIFO case. With FIFO, there is only a single
64+
* failure mode, i.e. some other thread excised the node already. Therefore, in the FIFO case, we skip the slow-path search pass.
65+
*
66+
* We say that the FIFO solution is "mostly" concurrent, in certain situations.
67+
*
68+
* Safe memory reclamation is based on a reference tracking scheme based on versions, implemented using JfrVersion.
69+
* An access to the list is "version controlled", with clients checking out the latest version of the list.
70+
* Destructive modifications made by clients, i.e. deletions, are committed to describe new versions of the list.
71+
* Before reclamation, a client inspects the versioning system to ensure checkouts for versions strictly
72+
* less than the version of the modification have all been relinquished. See utilities/JfrVersion.hpp.
73+
*
74+
* Insertions can only take place from one end of the list, head or tail, exclusively.
75+
* Specializations, a.k.a clients, must ensure this requirement.
76+
*/
77+
78+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy = JfrCHeapObj>
79+
class JfrConcurrentLinkedListHost : public AllocPolicy {
80+
private:
81+
Client* _client;
82+
typedef typename Client::Node Node;
83+
typedef Node* NodePtr;
84+
typedef const Node* ConstNodePtr;
85+
typedef typename Client::VersionSystem::Type VersionType;
86+
typedef typename Client::VersionSystem::Handle VersionHandle;
87+
public:
88+
JfrConcurrentLinkedListHost(Client* client);
89+
bool initialize();
90+
void insert_head(NodePtr node, NodePtr head, ConstNodePtr tail) const;
91+
void insert_tail(NodePtr node, NodePtr head, NodePtr last, ConstNodePtr tail) const;
92+
NodePtr remove(NodePtr head, ConstNodePtr tail, NodePtr last = NULL, bool insert_is_head = true);
93+
template <typename Callback>
94+
void iterate(NodePtr head, ConstNodePtr tail, Callback& cb);
95+
bool in_list(ConstNodePtr node, NodePtr head, ConstNodePtr tail) const;
96+
};
97+
98+
#endif // SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,299 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_INLINE_HPP
26+
#define SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_INLINE_HPP
27+
28+
#include "jfr/utilities/jfrConcurrentLinkedListHost.hpp"
29+
#include "jfr/utilities/jfrRelation.hpp"
30+
#include "jfr/utilities/jfrTypes.hpp"
31+
#include "runtime/atomic.hpp"
32+
#include "runtime/os.inline.hpp"
33+
#include "utilities/globalDefinitions.hpp"
34+
35+
/*
36+
* The removal marker (i.e. the excision bit) is represented by '( )' as part of state description comments:
37+
* node --> next becomes (node) --> next, when node is logically deleted.
38+
*/
39+
template <typename Node>
40+
inline Node* mark_for_removal(Node* node) {
41+
assert(node != NULL, "invariant");
42+
const Node* next = node->_next;
43+
assert(next != NULL, "invariant");
44+
Node* const unmasked_next = unmask(next);
45+
return next == unmasked_next && cas(&node->_next, unmasked_next, set_excision_bit(unmasked_next)) ? unmasked_next : NULL;
46+
}
47+
48+
/*
49+
* The insertion marker (i.e. the insertion bit) is represented by '[ ]' as part of state description comments:
50+
* "node --> next" becomes "[node} --> next", in an attempt to convey node as being exlusively reserved.
51+
*/
52+
template <typename Node>
53+
inline bool mark_for_insertion(Node* node, const Node* tail) {
54+
assert(node != NULL, "invariant");
55+
return node->_next == tail && cas(&node->_next, const_cast<Node*>(tail), set_insertion_bit(tail));
56+
}
57+
58+
/*
59+
* Find a predecessor and successor node pair where successor covers predecessor (adjacency).
60+
*/
61+
template <typename Node, typename VersionHandle, template <typename> class SearchPolicy>
62+
Node* find_adjacent(Node* head, const Node* tail, Node** predecessor, VersionHandle& version_handle, SearchPolicy<Node>& predicate) {
63+
assert(head != NULL, "invariant");
64+
assert(tail != NULL, "invariant");
65+
assert(head != tail, "invariant");
66+
while (true) {
67+
Node* predecessor_next;
68+
Node* current = head;
69+
version_handle.checkout();
70+
assert(version_handle.is_tracked(), "invariant");
71+
Node* next = Atomic::load_acquire(&current->_next);
72+
do {
73+
assert(next != NULL, "invariant");
74+
Node* const unmasked_next = unmask(next);
75+
// 1A: Locate the first node to keep as predecessor.
76+
if (!is_marked_for_removal(next)) {
77+
*predecessor = current;
78+
predecessor_next = unmasked_next;
79+
}
80+
// 1B: Locate the next node to keep as successor.
81+
current = unmasked_next;
82+
if (current == tail) break;
83+
next = current->_next;
84+
} while (predicate(current, next));
85+
// current represents the successor node from here on out.
86+
// 2: Check predecessor and successor node pair for adjacency.
87+
if (predecessor_next == current) {
88+
// Invariant: predecessor --> successor
89+
return current;
90+
}
91+
// 3: Successor does not (yet) cover predecessor.
92+
// Invariant: predecessor --> (logically excised nodes) --> successor
93+
// Physically excise one or more logically excised nodes in-between.
94+
if (cas(&(*predecessor)->_next, predecessor_next, current)) {
95+
// Invariant: predecessor --> successor
96+
return current;
97+
}
98+
}
99+
}
100+
101+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
102+
JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::JfrConcurrentLinkedListHost(Client* client) : _client(client) {}
103+
104+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
105+
bool JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::initialize() {
106+
return true;
107+
}
108+
109+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
110+
void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::insert_head(typename Client::Node* node,
111+
typename Client::Node* head,
112+
const typename Client::Node* tail) const {
113+
Node* predecessor;
114+
Node* successor;
115+
HeadNode<Node> predicate(node);
116+
VersionHandle version_handle = _client->get_version_handle();
117+
while (true) {
118+
// Find an adjacent predecessor and successor node pair.
119+
successor = find_adjacent<Node, VersionHandle, HeadNode>(head, tail, &predecessor, version_handle, predicate);
120+
assert(version_handle.is_tracked(), "invariant");
121+
// Invariant (adjacency): predecessor --> successor
122+
// Invariant (optional: key-based total order): predecessor->key() < key && key <= successor->key().
123+
// We can now attempt to insert the new node in-between.
124+
node->_next = successor;
125+
if (cas(&predecessor->_next, successor, node)) {
126+
// Invariant: predecessor --> node --> successor
127+
// An insert to head is a benign modification and will not need to be committed to the version control system.
128+
return;
129+
}
130+
}
131+
}
132+
133+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
134+
void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::insert_tail(typename Client::Node* node,
135+
typename Client::Node* head,
136+
typename Client::Node* last,
137+
const typename Client::Node* tail) const {
138+
assert(node != NULL, "invariant");
139+
assert(head != NULL, "invariant");
140+
assert(last != NULL, "invarinat");
141+
assert(tail != NULL, "invariant");
142+
// Mark the new node to be inserted with the insertion marker already.
143+
node->_next = set_insertion_bit(const_cast<NodePtr>(tail));
144+
// Invariant: [node]--> tail
145+
assert(is_marked_for_insertion(node->_next), "invariant");
146+
NodePtr predecessor;
147+
LastNode<Node> predicate;
148+
VersionHandle version_handle = _client->get_version_handle();
149+
while (true) {
150+
// Find an adjacent predecessor and successor node pair, where the successor == tail
151+
const NodePtr successor = find_adjacent<Node, VersionHandle, LastNode>(last, tail, &predecessor, version_handle, predicate);
152+
assert(version_handle.is_tracked(), "invariant");
153+
assert(successor == tail, "invariant");
154+
// Invariant: predecessor --> successor
155+
// We first attempt to mark the predecessor node to signal our intent of performing an insertion.
156+
if (mark_for_insertion(predecessor, tail)) {
157+
break;
158+
}
159+
}
160+
// Predecessor node is claimed for insertion.
161+
// Invariant: [predecessor] --> tail
162+
assert(is_marked_for_insertion(predecessor->_next), "invariant");
163+
assert(predecessor != head, "invariant");
164+
if (Atomic::load_acquire(&last->_next) == predecessor) {
165+
/* Even after we store the new node into the last->_next field, there is no race
166+
because it is also marked with the insertion bit. */
167+
last->_next = node;
168+
// Invariant: last --> [node] --> tail
169+
OrderAccess::storestore();
170+
// Perform the link with the predecessor node, which by this store becomes visible for removal.
171+
predecessor->_next = node;
172+
// Invariant: predecessor --> [node] --> tail
173+
} else {
174+
assert(last == predecessor, "invariant");
175+
last->_next = node;
176+
// Invariant: last --> [node] --> tail
177+
OrderAccess::storestore();
178+
/* This implies the list is logically empty from the removal perspective.
179+
cas is not needed here because inserts must not come in from the head side
180+
concurrently with inserts from tail which are currently blocked by us.
181+
Invariant (logical): head --> tail. */
182+
head->_next = node;
183+
// Invariant: head --> [node] --> tail
184+
}
185+
version_handle.release(); // release_store_fence
186+
// Publish the inserted node by removing the insertion marker.
187+
node->_next = const_cast<NodePtr>(tail);
188+
// Invariant: last --> node --> tail (possibly also head --> node --> tail)
189+
}
190+
191+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
192+
typename Client::Node* JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::remove(typename Client::Node* head,
193+
const typename Client::Node* tail,
194+
typename Client::Node* last /* NULL */,
195+
bool insert_is_head /* true */) {
196+
assert(head != NULL, "invariant");
197+
assert(tail != NULL, "invariant");
198+
assert(head != tail, "invariant");
199+
NodePtr predecessor;
200+
NodePtr successor;
201+
NodePtr successor_next;
202+
SearchPolicy<Node> predicate;
203+
VersionHandle version_handle = _client->get_version_handle();
204+
while (true) {
205+
// Find an adjacent predecessor and successor node pair.
206+
successor = find_adjacent<Node, VersionHandle, SearchPolicy>(head, tail, &predecessor, version_handle, predicate);
207+
assert(version_handle.is_tracked(), "invariant");
208+
if (successor == tail) {
209+
return NULL;
210+
}
211+
// Invariant: predecessor --> successor
212+
// Invariant (optional: key-based total order): predecessor->key() < key && key <= successor->key()
213+
// It is the successor node that is to be removed.
214+
// We first attempt to reserve (logically excise) the successor node.
215+
successor_next = mark_for_removal(successor);
216+
if (successor_next != NULL) {
217+
break;
218+
}
219+
}
220+
// Invariant: predecessor --> (successor) --> successor_next
221+
// Successor node now logically excised.
222+
assert(is_marked_for_removal(successor->_next), "invariant");
223+
// Now attempt to physically excise the successor node.
224+
// If the cas fails, we can optimize for the slow path if we know we are not performing
225+
// insertions from the head. Then a failed cas results not from new a node being inserted,
226+
// but only because another thread excised us already.
227+
if (!cas(&predecessor->_next, successor, successor_next) && insert_is_head) {
228+
// Physically excise using slow path, can be completed asynchronously by other threads.
229+
Identity<Node> excise(successor);
230+
find_adjacent<Node, VersionHandle, Identity>(head, tail, &predecessor, version_handle, excise);
231+
assert(version_handle.is_tracked(), "invariant");
232+
}
233+
if (last != NULL && Atomic::load_acquire(&last->_next) == successor) {
234+
guarantee(!insert_is_head, "invariant");
235+
guarantee(successor_next == tail, "invariant");
236+
LastNode<Node> excise;
237+
find_adjacent<Node, VersionHandle, LastNode>(last, tail, &predecessor, version_handle, excise);
238+
// Invariant: successor excised from last list
239+
}
240+
// Increment the current version so we can track when other threads have seen this update.
241+
VersionType version = version_handle.increment();
242+
version_handle.release(); // release_store_fence
243+
// Rendezvous with checkouts for versions less than this version.
244+
version_handle.await(version);
245+
// At this point we know there can be no references onto the excised node. It is safe, enjoy it.
246+
return successor;
247+
}
248+
249+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
250+
bool JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::in_list(const typename Client::Node* node,
251+
typename Client::Node* head,
252+
const typename Client::Node* tail) const {
253+
assert(head != NULL, "invariant");
254+
assert(tail != NULL, "invariant");
255+
assert(head != tail, "invariant");
256+
VersionHandle version_handle = _client->get_version_handle();
257+
const Node* current = head;
258+
version_handle.checkout();
259+
assert(version_handle.is_tracked(), "invariant");
260+
const Node* next = Atomic::load_acquire(&current->_next);
261+
while (true) {
262+
if (!is_marked_for_removal(next)) {
263+
if (current == node) {
264+
return true;
265+
}
266+
}
267+
current = unmask(next);
268+
if (current == tail) break;
269+
next = current->_next;
270+
}
271+
return false;
272+
}
273+
274+
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
275+
template <typename Callback>
276+
inline void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::iterate(typename Client::Node* head,
277+
const typename Client::Node* tail,
278+
Callback& cb) {
279+
assert(head != NULL, "invariant");
280+
assert(tail != NULL, "invariant");
281+
assert(head != tail, "invariant");
282+
VersionHandle version_handle = _client->get_version_handle();
283+
NodePtr current = head;
284+
version_handle.checkout();
285+
assert(version_handle.is_tracked(), "invariant");
286+
NodePtr next = Atomic::load_acquire(&current->_next);
287+
while (true) {
288+
if (!is_marked_for_removal(next)) {
289+
if (!cb.process(current)) {
290+
return;
291+
}
292+
}
293+
current = unmask(next);
294+
if (current == tail) break;
295+
next = current->_next;
296+
}
297+
}
298+
299+
#endif // SHARE_JFR_UTILITIES_JFRCONCURRENTLINKEDLISTHOST_INLINE_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_HPP
26+
#define SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_HPP
27+
28+
#include "jfr/utilities/jfrAllocation.hpp"
29+
#include "jfr/utilities/jfrConcurrentLinkedListHost.hpp"
30+
#include "jfr/utilities/jfrRelation.hpp"
31+
#include "jfr/utilities/jfrVersionSystem.hpp"
32+
33+
/*
34+
* This is a thread-safe FIFO structure.
35+
* Although not non-blocking, for certain scenarios,
36+
* it can act as a close approximation, "mostly" concurrent.
37+
* For a more detailed description of its properties,
38+
* please see JfrConcurrentLinkedListHost.hpp.
39+
*/
40+
template <typename NodeType, typename AllocPolicy = JfrCHeapObj>
41+
class JfrConcurrentQueue : public AllocPolicy {
42+
public:
43+
typedef NodeType Node;
44+
typedef NodeType* NodePtr;
45+
typedef const NodeType* ConstNodePtr;
46+
typedef JfrVersionSystem VersionSystem;
47+
JfrConcurrentQueue();
48+
bool initialize();
49+
bool is_empty() const;
50+
bool is_nonempty() const;
51+
void add(NodePtr node);
52+
NodePtr remove();
53+
template <typename Callback>
54+
void iterate(Callback& cb);
55+
bool in_list(const Node* node) const;
56+
private:
57+
JfrConcurrentLinkedListHost<JfrConcurrentQueue<Node, AllocPolicy>, HeadNode>* _list;
58+
Node _head;
59+
Node _last;
60+
const Node _tail;
61+
JfrVersionSystem _version_system;
62+
// callback for JfrConcurrentLinkedListHost
63+
typename VersionSystem::Handle get_version_handle();
64+
template <typename, template <typename> class, typename>
65+
friend class JfrConcurrentLinkedListHost;
66+
};
67+
68+
#endif // SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_HPP
69+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_INLINE_HPP
26+
#define SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_INLINE_HPP
27+
28+
#include "jfr/utilities/jfrConcurrentQueue.hpp"
29+
#include "jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp"
30+
#include "jfr/utilities/jfrVersionSystem.inline.hpp"
31+
32+
template <typename NodeType, typename AllocPolicy>
33+
JfrConcurrentQueue<NodeType, AllocPolicy>::JfrConcurrentQueue() : _list(NULL), _head(), _last(), _tail(), _version_system() {
34+
_head._next = const_cast<NodePtr>(&_tail);
35+
_last._next = const_cast<NodePtr>(&_tail);
36+
}
37+
38+
template <typename NodeType, typename AllocPolicy>
39+
bool JfrConcurrentQueue<NodeType, AllocPolicy>::initialize() {
40+
assert(_list == NULL, "invariant");
41+
_list = new JfrConcurrentLinkedListHost<JfrConcurrentQueue<NodeType, AllocPolicy>, HeadNode, AllocPolicy>(this);
42+
return _list != NULL && _list->initialize();
43+
}
44+
45+
template <typename NodeType, typename AllocPolicy>
46+
inline bool JfrConcurrentQueue<NodeType, AllocPolicy>::is_empty() const {
47+
return Atomic::load_acquire(&_head._next) == &_tail;
48+
}
49+
50+
template <typename NodeType, typename AllocPolicy>
51+
inline bool JfrConcurrentQueue<NodeType, AllocPolicy>::is_nonempty() const {
52+
return !is_empty();
53+
}
54+
55+
template <typename NodeType, typename AllocPolicy>
56+
void JfrConcurrentQueue<NodeType, AllocPolicy>::add(typename JfrConcurrentQueue<NodeType, AllocPolicy>::NodePtr node) {
57+
_list->insert_tail(node, &_head, &_last, &_tail);
58+
}
59+
60+
template <typename NodeType, typename AllocPolicy>
61+
typename JfrConcurrentQueue<NodeType, AllocPolicy>::NodePtr JfrConcurrentQueue<NodeType, AllocPolicy>::remove() {
62+
return _list->remove(&_head, &_tail, &_last, false);
63+
}
64+
65+
template <typename NodeType, typename AllocPolicy>
66+
template <typename Callback>
67+
void JfrConcurrentQueue<NodeType, AllocPolicy>::iterate(Callback& cb) {
68+
_list->iterate(&_head, &_tail, cb);
69+
}
70+
71+
template <typename NodeType, typename AllocPolicy>
72+
inline JfrVersionSystem::Handle JfrConcurrentQueue<NodeType, AllocPolicy>::get_version_handle() {
73+
return _version_system.get_handle();
74+
}
75+
76+
template <typename NodeType, typename AllocPolicy>
77+
bool JfrConcurrentQueue<NodeType, AllocPolicy>::in_list(const NodeType* node) const {
78+
assert(node != NULL, "invariant");
79+
return _list->in_list(node, const_cast<NodePtr>(&_head), &_tail);
80+
}
81+
82+
#endif // SHARE_JFR_UTILITIES_JFRCONCURRENTQUEUE_INLINE_HPP
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -27,81 +27,93 @@
2727

2828
#include "memory/allocation.hpp"
2929

30-
enum jfr_iter_direction {
31-
forward = 1,
32-
backward
30+
template <typename List>
31+
class StopOnNullCondition {
32+
typedef typename List::Node Node;
33+
private:
34+
List& _list;
35+
mutable Node* _node;
36+
public:
37+
StopOnNullCondition(List& list) : _list(list), _node(list.head()) {}
38+
bool has_next() const {
39+
return _node != NULL;
40+
}
41+
Node* next() const {
42+
assert(_node != NULL, "invariant");
43+
Node* temp = _node;
44+
_node = (Node*)_node->_next;
45+
return temp;
46+
}
3347
};
3448

35-
template <typename Node>
36-
class StopOnNullCondition : public AllStatic {
49+
template <typename List>
50+
class StopOnNullConditionRemoval {
51+
typedef typename List::Node Node;
52+
private:
53+
List& _list;
54+
mutable Node* _node;
3755
public:
38-
static bool has_next(const Node* node) {
39-
return node != NULL;
56+
StopOnNullConditionRemoval(List& list) : _list(list), _node(NULL) {}
57+
bool has_next() const {
58+
_node = _list.remove();
59+
return _node != NULL;
60+
}
61+
Node* next() const {
62+
assert(_node != NULL, "invariant");
63+
return _node;
4064
}
4165
};
4266

4367
template <typename List, template <typename> class ContinuationPredicate>
4468
class Navigator {
4569
public:
4670
typedef typename List::Node Node;
47-
typedef jfr_iter_direction Direction;
48-
Navigator(List& list, Direction direction) :
49-
_list(list), _node(direction == forward ? list.head() : list.tail()), _direction(direction) {}
71+
Navigator(List& list) : _continuation(list) {}
5072
bool has_next() const {
51-
return ContinuationPredicate<Node>::has_next(_node);
52-
}
53-
54-
bool direction_forward() const {
55-
return _direction == forward;
73+
return _continuation.has_next();
5674
}
57-
5875
Node* next() const {
59-
assert(_node != NULL, "invariant");
60-
Node* temp = _node;
61-
_node = direction_forward() ? (Node*)_node->next() : (Node*)_node->prev();
62-
return temp;
63-
}
64-
65-
void set_direction(Direction direction) {
66-
_direction = direction;
67-
}
68-
69-
void reset(Direction direction) {
70-
set_direction(direction);
71-
_node = direction_forward() ? _list.head() : _list.tail();
76+
return _continuation.next();
7277
}
73-
7478
private:
75-
List& _list;
79+
ContinuationPredicate<List> _continuation;
7680
mutable Node* _node;
77-
Direction _direction;
7881
};
7982

8083
template <typename List>
8184
class NavigatorStopOnNull : public Navigator<List, StopOnNullCondition> {
8285
public:
83-
NavigatorStopOnNull(List& list, jfr_iter_direction direction = forward) : Navigator<List, StopOnNullCondition>(list, direction) {}
86+
NavigatorStopOnNull(List& list) : Navigator<List, StopOnNullCondition>(list) {}
87+
};
88+
89+
template <typename List>
90+
class NavigatorStopOnNullRemoval : public Navigator<List, StopOnNullConditionRemoval> {
91+
public:
92+
NavigatorStopOnNullRemoval(List& list) : Navigator<List, StopOnNullConditionRemoval>(list) {}
8493
};
8594

8695
template<typename List, template <typename> class Navigator, typename AP = StackObj>
8796
class IteratorHost : public AP {
8897
private:
8998
Navigator<List> _navigator;
90-
9199
public:
92-
typedef typename List::Node Node;
93-
typedef jfr_iter_direction Direction;
94-
IteratorHost(List& list, Direction direction = forward) : AP(), _navigator(list, direction) {}
95-
void reset(Direction direction = forward) { _navigator.reset(direction); }
100+
typedef typename List::NodePtr NodePtr;
101+
IteratorHost(List& list) : AP(), _navigator(list) {}
102+
void reset() { _navigator.reset(); }
96103
bool has_next() const { return _navigator.has_next(); }
97-
Node* next() const { return _navigator.next(); }
98-
void set_direction(Direction direction) { _navigator.set_direction(direction); }
104+
NodePtr next() const { return _navigator.next(); }
99105
};
100106

101107
template<typename List, typename AP = StackObj>
102108
class StopOnNullIterator : public IteratorHost<List, NavigatorStopOnNull, AP> {
103109
public:
104-
StopOnNullIterator(List& list, jfr_iter_direction direction = forward) : IteratorHost<List, NavigatorStopOnNull, AP>(list, direction) {}
110+
StopOnNullIterator(List& list) : IteratorHost<List, NavigatorStopOnNull, AP>(list) {}
111+
};
112+
113+
template<typename List, typename AP = StackObj>
114+
class StopOnNullIteratorRemoval : public IteratorHost<List, NavigatorStopOnNullRemoval, AP> {
115+
public:
116+
StopOnNullIteratorRemoval(List& list) : IteratorHost<List, NavigatorStopOnNullRemoval, AP>(list) {}
105117
};
106118

107119
#endif // SHARE_JFR_UTILITIES_JFRITERATOR_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRLINKEDLIST_HPP
26+
#define SHARE_JFR_UTILITIES_JFRLINKEDLIST_HPP
27+
28+
#include "jfr/utilities/jfrAllocation.hpp"
29+
30+
/*
31+
* This linked-list is thread-safe only for add,
32+
* not for remove, iterate, excise and in_list.
33+
* For multiple producers, single consumer.
34+
*/
35+
36+
template <typename NodeType, typename AllocPolicy = JfrCHeapObj>
37+
class JfrLinkedList : public AllocPolicy {
38+
public:
39+
typedef NodeType Node;
40+
typedef NodeType* NodePtr;
41+
JfrLinkedList();
42+
bool initialize();
43+
bool is_empty() const;
44+
bool is_nonempty() const;
45+
void add(NodePtr node);
46+
NodePtr remove();
47+
template <typename Callback>
48+
void iterate(Callback& cb);
49+
NodePtr head() const;
50+
NodePtr excise(NodePtr prev, NodePtr node);
51+
bool in_list(const NodeType* node) const;
52+
private:
53+
NodePtr _head;
54+
};
55+
56+
#endif // SHARE_JFR_UTILITIES_JFRLINKEDLIST_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRLINKEDLIST_INLINE_HPP
26+
#define SHARE_JFR_UTILITIES_JFRLINKEDLIST_INLINE_HPP
27+
28+
#include "jfr/utilities/jfrLinkedList.hpp"
29+
#include "runtime/atomic.hpp"
30+
31+
template <typename NodeType, typename AllocPolicy>
32+
JfrLinkedList<NodeType, AllocPolicy>::JfrLinkedList() : _head(NULL) {}
33+
34+
template <typename NodeType, typename AllocPolicy>
35+
bool JfrLinkedList<NodeType, AllocPolicy>::initialize() {
36+
return true;
37+
}
38+
39+
template <typename NodeType, typename AllocPolicy>
40+
inline NodeType* JfrLinkedList<NodeType, AllocPolicy>::head() const {
41+
return (NodeType*)Atomic::load_acquire(&_head);
42+
}
43+
44+
template <typename NodeType, typename AllocPolicy>
45+
inline bool JfrLinkedList<NodeType, AllocPolicy>::is_empty() const {
46+
return NULL == head();
47+
}
48+
49+
template <typename NodeType, typename AllocPolicy>
50+
inline bool JfrLinkedList<NodeType, AllocPolicy>::is_nonempty() const {
51+
return !is_empty();
52+
}
53+
54+
template <typename NodeType, typename AllocPolicy>
55+
inline void JfrLinkedList<NodeType, AllocPolicy>::add(NodeType* node) {
56+
assert(node != NULL, "invariant");
57+
NodePtr next;
58+
do {
59+
next = head();
60+
node->_next = next;
61+
} while (Atomic::cmpxchg(&_head, next, node) != next);
62+
}
63+
64+
template <typename NodeType, typename AllocPolicy>
65+
inline NodeType* JfrLinkedList<NodeType, AllocPolicy>::remove() {
66+
NodePtr node;
67+
NodePtr next;
68+
do {
69+
node = head();
70+
if (node == NULL) break;
71+
next = (NodePtr)node->_next;
72+
} while (Atomic::cmpxchg(&_head, node, next) != node);
73+
return node;
74+
}
75+
76+
template <typename NodeType, typename AllocPolicy>
77+
template <typename Callback>
78+
void JfrLinkedList<NodeType, AllocPolicy>::iterate(Callback& cb) {
79+
NodePtr current = head();
80+
while (current != NULL) {
81+
NodePtr next = (NodePtr)current->_next;
82+
if (!cb.process(current)) {
83+
return;
84+
}
85+
current = next;
86+
}
87+
}
88+
89+
template <typename NodeType, typename AllocPolicy>
90+
NodeType* JfrLinkedList<NodeType, AllocPolicy>::excise(NodeType* prev, NodeType* node) {
91+
NodePtr next = (NodePtr)node->_next;
92+
if (prev == NULL) {
93+
prev = Atomic::cmpxchg(&_head, node, next);
94+
if (prev == node) {
95+
return NULL;
96+
}
97+
}
98+
assert(prev != NULL, "invariant");
99+
while (prev->_next != node) {
100+
prev = (NodePtr)prev->_next;
101+
}
102+
assert(prev->_next == node, "invariant");
103+
prev->_next = next;
104+
return prev;
105+
}
106+
107+
template <typename NodeType, typename AllocPolicy>
108+
bool JfrLinkedList<NodeType, AllocPolicy>::in_list(const NodeType* node) const {
109+
assert(node != NULL, "invariant");
110+
const NodeType* current = head();
111+
while (current != NULL) {
112+
if (current == node) {
113+
return true;
114+
}
115+
current = (NodeType*)current->_next;
116+
}
117+
return false;
118+
}
119+
120+
#endif // SHARE_JFR_UTILITIES_JFRLINKEDLIST_INLINE_HPP
+114
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
/*
2+
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_UTILITIES_JFRNODE_HPP
26+
#define SHARE_JFR_UTILITIES_JFRNODE_HPP
27+
28+
#include "jfr/utilities/jfrTypes.hpp"
29+
#include "memory/allocation.hpp"
30+
#include "runtime/atomic.hpp"
31+
32+
const uint64_t JFR_NODE_LOGICAL_EXCISION_BIT = 1;
33+
const uint64_t JFR_NODE_LOGICAL_INSERTION_BIT = 2;
34+
const uint64_t JFR_NODE_MASK = ~(JFR_NODE_LOGICAL_INSERTION_BIT | JFR_NODE_LOGICAL_EXCISION_BIT);
35+
36+
template <typename Node>
37+
inline bool cas(Node** address, Node* current, Node* exchange) {
38+
return Atomic::cmpxchg(address, current, exchange) == current;
39+
}
40+
41+
template <typename Node>
42+
inline bool is_marked_for_removal(const Node* ptr) {
43+
return ((uint64_t)ptr & JFR_NODE_LOGICAL_EXCISION_BIT) == JFR_NODE_LOGICAL_EXCISION_BIT;
44+
}
45+
46+
template <typename Node>
47+
inline bool is_marked_for_insertion(const Node* ptr) {
48+
return ((uint64_t)ptr & JFR_NODE_LOGICAL_INSERTION_BIT) == JFR_NODE_LOGICAL_INSERTION_BIT;
49+
}
50+
51+
template <typename Node>
52+
inline Node* set_excision_bit(const Node* ptr) {
53+
return (Node*)(((uint64_t)ptr) | JFR_NODE_LOGICAL_EXCISION_BIT);
54+
}
55+
56+
template <typename Node>
57+
inline Node* set_insertion_bit(const Node* ptr) {
58+
return (Node*)(((uint64_t)ptr) | JFR_NODE_LOGICAL_INSERTION_BIT);
59+
}
60+
61+
template <typename Node>
62+
inline Node* unmask(const Node* ptr) {
63+
return (Node*)(((uint64_t)ptr) & JFR_NODE_MASK);
64+
}
65+
66+
template <typename Derived, typename Version = traceid>
67+
class JfrLinkedNode : public ResourceObj {
68+
public:
69+
typedef Version VersionType;
70+
Derived* _next;
71+
JfrLinkedNode() : _next(NULL) {}
72+
JfrLinkedNode(JfrLinkedNode<Derived, VersionType>* next) : _next(next) {}
73+
};
74+
75+
template <typename V>
76+
class JfrKeyIsThisNode : public JfrLinkedNode<JfrKeyIsThisNode<V> > {
77+
private:
78+
V _value;
79+
public:
80+
typedef V Value;
81+
typedef const JfrKeyIsThisNode<V>* Key;
82+
JfrKeyIsThisNode(const Value value = NULL) : JfrLinkedNode<JfrKeyIsThisNode<V> >(), _value(value) {}
83+
Key key() const { return this; }
84+
Value value() const { return _value; }
85+
void set_value(Value value) { _value = value; }
86+
};
87+
88+
template <typename V>
89+
class JfrValueNode : public JfrLinkedNode<JfrValueNode<V> > {
90+
private:
91+
V _value;
92+
public:
93+
typedef V Value;
94+
typedef Value Key;
95+
JfrValueNode(const Value value = NULL) : JfrLinkedNode<JfrValueNode<V> >(), _value(value) {}
96+
Key key() const { return value(); }
97+
Value value() const { return _value; }
98+
void set_value(Value value) { _value = value; }
99+
};
100+
101+
template <typename V>
102+
class JfrKeyIsFreeSizeNode : public JfrLinkedNode<JfrKeyIsFreeSizeNode<V> > {
103+
private:
104+
V _value;
105+
public:
106+
typedef V Value;
107+
typedef size_t Key;
108+
JfrKeyIsFreeSizeNode(const Value value = NULL) : JfrLinkedNode<JfrKeyIsFreeSizeNode<V> >(), _value(value) {}
109+
Key key() const { return value()->free_size(); }
110+
Value value() const { return _value; }
111+
void set_value(Value value) { _value = value; }
112+
};
113+
114+
#endif // SHARE_JFR_UTILITIES_JFRNODE_HPP

0 commit comments

Comments
 (0)
Please sign in to comment.