|
| 1 | +/* |
| 2 | + * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. |
| 3 | + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | + * |
| 5 | + * This code is free software; you can redistribute it and/or modify it |
| 6 | + * under the terms of the GNU General Public License version 2 only, as |
| 7 | + * published by the Free Software Foundation. |
| 8 | + * |
| 9 | + * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | + * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | + * accompanied this code). |
| 14 | + * |
| 15 | + * You should have received a copy of the GNU General Public License version |
| 16 | + * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | + * |
| 19 | + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | + * or visit www.oracle.com if you need additional information or have any |
| 21 | + * questions. |
| 22 | + * |
| 23 | + */ |
| 24 | + |
| 25 | +#include "precompiled.hpp" |
| 26 | + |
| 27 | +#include "gc/g1/g1SegmentedArray.inline.hpp" |
| 28 | +#include "memory/allocation.hpp" |
| 29 | +#include "runtime/atomic.hpp" |
| 30 | +#include "utilities/globalCounter.inline.hpp" |
| 31 | + |
| 32 | +G1SegmentedArraySegment::G1SegmentedArraySegment(uint slot_size, uint num_slots, G1SegmentedArraySegment* next, MEMFLAGS flag) : |
| 33 | + _slot_size(slot_size), |
| 34 | + _num_slots(num_slots), |
| 35 | + _mem_flag(flag), |
| 36 | + _next(next), |
| 37 | + _next_allocate(0) { |
| 38 | + _bottom = ((char*) this) + header_size(); |
| 39 | +} |
| 40 | + |
| 41 | +G1SegmentedArraySegment* G1SegmentedArraySegment::create_segment(uint slot_size, |
| 42 | + uint num_slots, |
| 43 | + G1SegmentedArraySegment* next, |
| 44 | + MEMFLAGS mem_flag) { |
| 45 | + size_t block_size = size_in_bytes(slot_size, num_slots); |
| 46 | + char* alloc_block = NEW_C_HEAP_ARRAY(char, block_size, mem_flag); |
| 47 | + return new (alloc_block) G1SegmentedArraySegment(slot_size, num_slots, next, mem_flag); |
| 48 | +} |
| 49 | + |
| 50 | +void G1SegmentedArraySegment::delete_segment(G1SegmentedArraySegment* segment) { |
| 51 | + segment->~G1SegmentedArraySegment(); |
| 52 | + FREE_C_HEAP_ARRAY(_mem_flag, segment); |
| 53 | +} |
| 54 | + |
| 55 | +void G1SegmentedArrayFreeList::bulk_add(G1SegmentedArraySegment& first, |
| 56 | + G1SegmentedArraySegment& last, |
| 57 | + size_t num, |
| 58 | + size_t mem_size) { |
| 59 | + _list.prepend(first, last); |
| 60 | + Atomic::add(&_num_segments, num, memory_order_relaxed); |
| 61 | + Atomic::add(&_mem_size, mem_size, memory_order_relaxed); |
| 62 | +} |
| 63 | + |
| 64 | +void G1SegmentedArrayFreeList::print_on(outputStream* out, const char* prefix) { |
| 65 | + out->print_cr("%s: segments %zu size %zu", |
| 66 | + prefix, Atomic::load(&_num_segments), Atomic::load(&_mem_size)); |
| 67 | +} |
| 68 | + |
| 69 | +G1SegmentedArraySegment* G1SegmentedArrayFreeList::get_all(size_t& num_segments, |
| 70 | + size_t& mem_size) { |
| 71 | + GlobalCounter::CriticalSection cs(Thread::current()); |
| 72 | + |
| 73 | + G1SegmentedArraySegment* result = _list.pop_all(); |
| 74 | + num_segments = Atomic::load(&_num_segments); |
| 75 | + mem_size = Atomic::load(&_mem_size); |
| 76 | + |
| 77 | + if (result != nullptr) { |
| 78 | + Atomic::sub(&_num_segments, num_segments, memory_order_relaxed); |
| 79 | + Atomic::sub(&_mem_size, mem_size, memory_order_relaxed); |
| 80 | + } |
| 81 | + return result; |
| 82 | +} |
| 83 | + |
| 84 | +void G1SegmentedArrayFreeList::free_all() { |
| 85 | + size_t num_freed = 0; |
| 86 | + size_t mem_size_freed = 0; |
| 87 | + G1SegmentedArraySegment* cur; |
| 88 | + |
| 89 | + while ((cur = _list.pop()) != nullptr) { |
| 90 | + mem_size_freed += cur->mem_size(); |
| 91 | + num_freed++; |
| 92 | + G1SegmentedArraySegment::delete_segment(cur); |
| 93 | + } |
| 94 | + |
| 95 | + Atomic::sub(&_num_segments, num_freed, memory_order_relaxed); |
| 96 | + Atomic::sub(&_mem_size, mem_size_freed, memory_order_relaxed); |
| 97 | +} |
| 98 | + |
| 99 | +G1SegmentedArraySegment* G1SegmentedArray::create_new_segment(G1SegmentedArraySegment* const prev) { |
| 100 | + // Take an existing segment if available. |
| 101 | + G1SegmentedArraySegment* next = _free_segment_list->get(); |
| 102 | + if (next == nullptr) { |
| 103 | + uint prev_num_slots = (prev != nullptr) ? prev->num_slots() : 0; |
| 104 | + uint num_slots = _alloc_options->next_num_slots(prev_num_slots); |
| 105 | + |
| 106 | + next = G1SegmentedArraySegment::create_segment(slot_size(), num_slots, prev, _alloc_options->mem_flag()); |
| 107 | + } else { |
| 108 | + assert(slot_size() == next->slot_size() , |
| 109 | + "Mismatch %d != %d", slot_size(), next->slot_size()); |
| 110 | + next->reset(prev); |
| 111 | + } |
| 112 | + |
| 113 | + // Install it as current allocation segment. |
| 114 | + G1SegmentedArraySegment* old = Atomic::cmpxchg(&_first, prev, next); |
| 115 | + if (old != prev) { |
| 116 | + // Somebody else installed the segment, use that one. |
| 117 | + G1SegmentedArraySegment::delete_segment(next); |
| 118 | + return old; |
| 119 | + } else { |
| 120 | + // Did we install the first segment in the list? If so, this is also the last. |
| 121 | + if (prev == nullptr) { |
| 122 | + _last = next; |
| 123 | + } |
| 124 | + // Successfully installed the segment into the list. |
| 125 | + Atomic::inc(&_num_segments, memory_order_relaxed); |
| 126 | + Atomic::add(&_mem_size, next->mem_size(), memory_order_relaxed); |
| 127 | + Atomic::add(&_num_available_slots, next->num_slots(), memory_order_relaxed); |
| 128 | + return next; |
| 129 | + } |
| 130 | +} |
| 131 | + |
| 132 | +G1SegmentedArray::G1SegmentedArray(const G1SegmentedArrayAllocOptions* alloc_options, |
| 133 | + G1SegmentedArrayFreeList* free_segment_list) : |
| 134 | + _alloc_options(alloc_options), |
| 135 | + _first(nullptr), |
| 136 | + _last(nullptr), |
| 137 | + _num_segments(0), |
| 138 | + _mem_size(0), |
| 139 | + _free_segment_list(free_segment_list), |
| 140 | + _num_available_slots(0), |
| 141 | + _num_allocated_slots(0) { |
| 142 | + assert(_free_segment_list != nullptr, "precondition!"); |
| 143 | +} |
| 144 | + |
| 145 | +G1SegmentedArray::~G1SegmentedArray() { |
| 146 | + drop_all(); |
| 147 | +} |
| 148 | + |
| 149 | +uint G1SegmentedArray::slot_size() const { |
| 150 | + return _alloc_options->slot_size(); |
| 151 | +} |
| 152 | + |
| 153 | +void G1SegmentedArray::drop_all() { |
| 154 | + G1SegmentedArraySegment* cur = Atomic::load_acquire(&_first); |
| 155 | + |
| 156 | + if (cur != nullptr) { |
| 157 | + assert(_last != nullptr, "If there is at least one segment, there must be a last one."); |
| 158 | + |
| 159 | + G1SegmentedArraySegment* first = cur; |
| 160 | +#ifdef ASSERT |
| 161 | + // Check list consistency. |
| 162 | + G1SegmentedArraySegment* last = cur; |
| 163 | + uint num_segments = 0; |
| 164 | + size_t mem_size = 0; |
| 165 | + while (cur != nullptr) { |
| 166 | + mem_size += cur->mem_size(); |
| 167 | + num_segments++; |
| 168 | + |
| 169 | + G1SegmentedArraySegment* next = cur->next(); |
| 170 | + last = cur; |
| 171 | + cur = next; |
| 172 | + } |
| 173 | +#endif |
| 174 | + assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments); |
| 175 | + assert(mem_size == _mem_size, "Memory size inconsistent"); |
| 176 | + assert(last == _last, "Inconsistent last segment"); |
| 177 | + |
| 178 | + _free_segment_list->bulk_add(*first, *_last, _num_segments, _mem_size); |
| 179 | + } |
| 180 | + |
| 181 | + _first = nullptr; |
| 182 | + _last = nullptr; |
| 183 | + _num_segments = 0; |
| 184 | + _mem_size = 0; |
| 185 | + _num_available_slots = 0; |
| 186 | + _num_allocated_slots = 0; |
| 187 | +} |
| 188 | + |
| 189 | +void* G1SegmentedArray::allocate() { |
| 190 | + assert(slot_size() > 0, "instance size not set."); |
| 191 | + |
| 192 | + G1SegmentedArraySegment* cur = Atomic::load_acquire(&_first); |
| 193 | + if (cur == nullptr) { |
| 194 | + cur = create_new_segment(cur); |
| 195 | + } |
| 196 | + |
| 197 | + while (true) { |
| 198 | + void* slot = cur->get_new_slot(); |
| 199 | + if (slot != nullptr) { |
| 200 | + Atomic::inc(&_num_allocated_slots, memory_order_relaxed); |
| 201 | + guarantee(is_aligned(slot, _alloc_options->slot_alignment()), |
| 202 | + "result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment()); |
| 203 | + return slot; |
| 204 | + } |
| 205 | + // The segment is full. Next round. |
| 206 | + assert(cur->is_full(), "must be"); |
| 207 | + cur = create_new_segment(cur); |
| 208 | + } |
| 209 | +} |
| 210 | + |
| 211 | +uint G1SegmentedArray::num_segments() const { |
| 212 | + return Atomic::load(&_num_segments); |
| 213 | +} |
| 214 | + |
| 215 | +#ifdef ASSERT |
| 216 | +class LengthClosure { |
| 217 | + uint _total; |
| 218 | +public: |
| 219 | + LengthClosure() : _total(0) {} |
| 220 | + void do_segment(G1SegmentedArraySegment* segment, uint limit) { |
| 221 | + _total += limit; |
| 222 | + } |
| 223 | + uint length() const { |
| 224 | + return _total; |
| 225 | + } |
| 226 | +}; |
| 227 | + |
| 228 | +uint G1SegmentedArray::calculate_length() const { |
| 229 | + LengthClosure closure; |
| 230 | + iterate_segments(closure); |
| 231 | + return closure.length(); |
| 232 | +} |
| 233 | +#endif |
| 234 | + |
| 235 | +template <typename SegmentClosure> |
| 236 | +void G1SegmentedArray::iterate_segments(SegmentClosure& closure) const { |
| 237 | + G1SegmentedArraySegment* cur = Atomic::load_acquire(&_first); |
| 238 | + |
| 239 | + assert((cur != nullptr) == (_last != nullptr), |
| 240 | + "If there is at least one segment, there must be a last one"); |
| 241 | + |
| 242 | + while (cur != nullptr) { |
| 243 | + closure.do_segment(cur, cur->length()); |
| 244 | + cur = cur->next(); |
| 245 | + } |
| 246 | +} |
0 commit comments