|
1 | 1 | /*
|
2 |
| - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 2 | + * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. |
3 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
4 | 4 | *
|
5 | 5 | * This code is free software; you can redistribute it and/or modify it
|
|
42 | 42 | //------------------------------ResourceArea-----------------------------------
|
43 | 43 | // A ResourceArea is an Arena that supports safe usage of ResourceMark.
|
44 | 44 | class ResourceArea: public Arena {
|
45 |
| - friend class ResourceMark; |
46 |
| - friend class DeoptResourceMark; |
47 | 45 | friend class VMStructs;
|
48 |
| - debug_only(int _nesting;) // current # of nested ResourceMarks |
49 |
| - debug_only(static int _warned;) // to suppress multiple warnings |
| 46 | + |
| 47 | +#ifdef ASSERT |
| 48 | + int _nesting; // current # of nested ResourceMarks |
| 49 | + void verify_has_resource_mark(); |
| 50 | +#endif // ASSERT |
50 | 51 |
|
51 | 52 | public:
|
52 |
| - ResourceArea(MEMFLAGS flags = mtThread) : Arena(flags) { |
53 |
| - debug_only(_nesting = 0;) |
54 |
| - } |
| 53 | + ResourceArea(MEMFLAGS flags = mtThread) : |
| 54 | + Arena(flags) DEBUG_ONLY(COMMA _nesting(0)) {} |
55 | 55 |
|
56 |
| - ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : Arena(flags, init_size) { |
57 |
| - debug_only(_nesting = 0;); |
58 |
| - } |
| 56 | + ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : |
| 57 | + Arena(flags, init_size) DEBUG_ONLY(COMMA _nesting(0)) {} |
59 | 58 |
|
60 | 59 | char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
61 | 60 |
|
62 | 61 | // Bias this resource area to specific memory type
|
63 | 62 | // (by default, ResourceArea is tagged as mtThread, per-thread general purpose storage)
|
64 | 63 | void bias_to(MEMFLAGS flags);
|
65 | 64 |
|
66 |
| - debug_only(int nesting() const { return _nesting; }) |
| 65 | + DEBUG_ONLY(int nesting() const { return _nesting; }) |
| 66 | + |
| 67 | + // Capture the state of a ResourceArea needed by a ResourceMark for |
| 68 | + // rollback to that mark. |
| 69 | + class SavedState { |
| 70 | + friend class ResourceArea; |
| 71 | + Chunk* _chunk; |
| 72 | + char* _hwm; |
| 73 | + char* _max; |
| 74 | + size_t _size_in_bytes; |
| 75 | + DEBUG_ONLY(int _nesting;) |
| 76 | + |
| 77 | + public: |
| 78 | + SavedState(ResourceArea* area) : |
| 79 | + _chunk(area->_chunk), |
| 80 | + _hwm(area->_hwm), |
| 81 | + _max(area->_max), |
| 82 | + _size_in_bytes(area->_size_in_bytes) |
| 83 | + DEBUG_ONLY(COMMA _nesting(area->_nesting)) |
| 84 | + {} |
| 85 | + }; |
| 86 | + |
| 87 | + // Check and adjust debug-only nesting level. |
| 88 | + void activate_state(const SavedState& state) { |
| 89 | + assert(_nesting == state._nesting, "precondition"); |
| 90 | + assert(_nesting >= 0, "precondition"); |
| 91 | + assert(_nesting < INT_MAX, "nesting overflow"); |
| 92 | + DEBUG_ONLY(++_nesting;) |
| 93 | + } |
| 94 | + |
| 95 | + // Check and adjust debug-only nesting level. |
| 96 | + void deactivate_state(const SavedState& state) { |
| 97 | + assert(_nesting > state._nesting, "deactivating inactive mark"); |
| 98 | + assert((_nesting - state._nesting) == 1, "deactivating across another mark"); |
| 99 | + DEBUG_ONLY(--_nesting;) |
| 100 | + } |
| 101 | + |
| 102 | + // Roll back the allocation state to the indicated state values. |
| 103 | + // The state must be the current state for this thread. |
| 104 | + void rollback_to(const SavedState& state) { |
| 105 | + assert(_nesting > state._nesting, "rollback to inactive mark"); |
| 106 | + assert((_nesting - state._nesting) == 1, "rollback across another mark"); |
| 107 | + |
| 108 | + if (UseMallocOnly) { |
| 109 | + free_malloced_objects(state._chunk, state._hwm, state._max, _hwm); |
| 110 | + } |
| 111 | + |
| 112 | + if (state._chunk->next() != nullptr) { // Delete later chunks. |
| 113 | + // Reset size before deleting chunks. Otherwise, the total |
| 114 | + // size could exceed the total chunk size. |
| 115 | + assert(size_in_bytes() > state._size_in_bytes, |
| 116 | + "size: " SIZE_FORMAT ", saved size: " SIZE_FORMAT, |
| 117 | + size_in_bytes(), state._size_in_bytes); |
| 118 | + set_size_in_bytes(state._size_in_bytes); |
| 119 | + state._chunk->next_chop(); |
| 120 | + } else { |
| 121 | + assert(size_in_bytes() == state._size_in_bytes, "Sanity check"); |
| 122 | + } |
| 123 | + _chunk = state._chunk; // Roll back to saved chunk. |
| 124 | + _hwm = state._hwm; |
| 125 | + _max = state._max; |
| 126 | + |
| 127 | + // Clear out this chunk (to detect allocation bugs) |
| 128 | + if (ZapResourceArea) { |
| 129 | + memset(state._hwm, badResourceValue, state._max - state._hwm); |
| 130 | + } |
| 131 | + } |
67 | 132 | };
|
68 | 133 |
|
69 | 134 |
|
70 | 135 | //------------------------------ResourceMark-----------------------------------
|
71 | 136 | // A resource mark releases all resources allocated after it was constructed
|
72 | 137 | // when the destructor is called. Typically used as a local variable.
|
| 138 | + |
| 139 | +// Shared part of implementation for ResourceMark and DeoptResourceMark. |
| 140 | +class ResourceMarkImpl { |
| 141 | + ResourceArea* _area; // Resource area to stack allocate |
| 142 | + ResourceArea::SavedState _saved_state; |
| 143 | + |
| 144 | + NONCOPYABLE(ResourceMarkImpl); |
| 145 | + |
| 146 | +public: |
| 147 | + explicit ResourceMarkImpl(ResourceArea* area) : |
| 148 | + _area(area), |
| 149 | + _saved_state(area) |
| 150 | + { |
| 151 | + _area->activate_state(_saved_state); |
| 152 | + } |
| 153 | + |
| 154 | + explicit ResourceMarkImpl(Thread* thread) |
| 155 | + : ResourceMarkImpl(thread->resource_area()) {} |
| 156 | + |
| 157 | + ~ResourceMarkImpl() { |
| 158 | + reset_to_mark(); |
| 159 | + _area->deactivate_state(_saved_state); |
| 160 | + } |
| 161 | + |
| 162 | + void reset_to_mark() const { |
| 163 | + _area->rollback_to(_saved_state); |
| 164 | + } |
| 165 | +}; |
| 166 | + |
73 | 167 | class ResourceMark: public StackObj {
|
74 |
| -protected: |
75 |
| - ResourceArea *_area; // Resource area to stack allocate |
76 |
| - Chunk *_chunk; // saved arena chunk |
77 |
| - char *_hwm, *_max; |
78 |
| - size_t _size_in_bytes; |
| 168 | + const ResourceMarkImpl _impl; |
79 | 169 | #ifdef ASSERT
|
80 | 170 | Thread* _thread;
|
81 | 171 | ResourceMark* _previous_resource_mark;
|
82 |
| -#endif //ASSERT |
83 |
| - |
84 |
| - void initialize(Thread *thread) { |
85 |
| - _area = thread->resource_area(); |
86 |
| - _chunk = _area->_chunk; |
87 |
| - _hwm = _area->_hwm; |
88 |
| - _max= _area->_max; |
89 |
| - _size_in_bytes = _area->size_in_bytes(); |
90 |
| - debug_only(_area->_nesting++;) |
91 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
92 |
| -#ifdef ASSERT |
93 |
| - _thread = thread; |
94 |
| - _previous_resource_mark = thread->current_resource_mark(); |
95 |
| - thread->set_current_resource_mark(this); |
96 | 172 | #endif // ASSERT
|
97 |
| - } |
98 |
| - public: |
99 | 173 |
|
| 174 | + NONCOPYABLE(ResourceMark); |
| 175 | + |
| 176 | + // Helper providing common constructor implementation. |
100 | 177 | #ifndef ASSERT
|
101 |
| - ResourceMark(Thread *thread) { |
102 |
| - assert(thread == Thread::current(), "not the current thread"); |
103 |
| - initialize(thread); |
104 |
| - } |
| 178 | + ResourceMark(ResourceArea* area, Thread* thread) : _impl(area) {} |
105 | 179 | #else
|
106 |
| - ResourceMark(Thread *thread); |
107 |
| -#endif // ASSERT |
108 |
| - |
109 |
| - ResourceMark() { initialize(Thread::current()); } |
110 |
| - |
111 |
| - ResourceMark( ResourceArea *r ) : |
112 |
| - _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { |
113 |
| - _size_in_bytes = r->_size_in_bytes; |
114 |
| - debug_only(_area->_nesting++;) |
115 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
116 |
| -#ifdef ASSERT |
117 |
| - Thread* thread = Thread::current_or_null(); |
118 |
| - if (thread != NULL) { |
119 |
| - _thread = thread; |
120 |
| - _previous_resource_mark = thread->current_resource_mark(); |
121 |
| - thread->set_current_resource_mark(this); |
122 |
| - } else { |
123 |
| - _thread = NULL; |
124 |
| - _previous_resource_mark = NULL; |
| 180 | + ResourceMark(ResourceArea* area, Thread* thread) : |
| 181 | + _impl(area), |
| 182 | + _thread(thread), |
| 183 | + _previous_resource_mark(nullptr) |
| 184 | + { |
| 185 | + if (_thread != nullptr) { |
| 186 | + assert(_thread == Thread::current(), "not the current thread"); |
| 187 | + _previous_resource_mark = _thread->current_resource_mark(); |
| 188 | + _thread->set_current_resource_mark(this); |
125 | 189 | }
|
126 |
| -#endif // ASSERT |
127 | 190 | }
|
| 191 | +#endif // ASSERT |
128 | 192 |
|
129 |
| - void reset_to_mark() { |
130 |
| - if (UseMallocOnly) free_malloced_objects(); |
| 193 | +public: |
131 | 194 |
|
132 |
| - if( _chunk->next() ) { // Delete later chunks |
133 |
| - // reset arena size before delete chunks. Otherwise, the total |
134 |
| - // arena size could exceed total chunk size |
135 |
| - assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check"); |
136 |
| - _area->set_size_in_bytes(size_in_bytes()); |
137 |
| - _chunk->next_chop(); |
138 |
| - } else { |
139 |
| - assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check"); |
140 |
| - } |
141 |
| - _area->_chunk = _chunk; // Roll back arena to saved chunk |
142 |
| - _area->_hwm = _hwm; |
143 |
| - _area->_max = _max; |
| 195 | + ResourceMark() : ResourceMark(Thread::current()) {} |
144 | 196 |
|
145 |
| - // clear out this chunk (to detect allocation bugs) |
146 |
| - if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); |
147 |
| - } |
| 197 | + explicit ResourceMark(Thread* thread) |
| 198 | + : ResourceMark(thread->resource_area(), thread) {} |
| 199 | + |
| 200 | + explicit ResourceMark(ResourceArea* area) |
| 201 | + : ResourceMark(area, DEBUG_ONLY(Thread::current_or_null()) NOT_DEBUG(nullptr)) {} |
148 | 202 |
|
149 |
| - ~ResourceMark() { |
150 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
151 |
| - debug_only(_area->_nesting--;) |
152 |
| - reset_to_mark(); |
153 | 203 | #ifdef ASSERT
|
154 |
| - if (_thread != NULL) { |
| 204 | + ~ResourceMark() { |
| 205 | + if (_thread != nullptr) { |
155 | 206 | _thread->set_current_resource_mark(_previous_resource_mark);
|
156 | 207 | }
|
157 |
| -#endif // ASSERT |
158 | 208 | }
|
| 209 | +#endif // ASSERT |
159 | 210 |
|
160 |
| - |
161 |
| - private: |
162 |
| - void free_malloced_objects() PRODUCT_RETURN; |
163 |
| - size_t size_in_bytes() { return _size_in_bytes; } |
| 211 | + void reset_to_mark() { _impl.reset_to_mark(); } |
164 | 212 | };
|
165 | 213 |
|
166 | 214 | //------------------------------DeoptResourceMark-----------------------------------
|
@@ -190,75 +238,18 @@ class ResourceMark: public StackObj {
|
190 | 238 | // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
|
191 | 239 | // then existing ResourceMarks would work fine since no one use new to allocate them
|
192 | 240 | // and they would be stack allocated. This leaves open the possibility of accidental
|
193 |
| -// misuse so we simple duplicate the ResourceMark functionality here. |
| 241 | +// misuse so we duplicate the ResourceMark functionality via a shared implementation |
| 242 | +// class. |
194 | 243 |
|
195 | 244 | class DeoptResourceMark: public CHeapObj<mtInternal> {
|
196 |
| -protected: |
197 |
| - ResourceArea *_area; // Resource area to stack allocate |
198 |
| - Chunk *_chunk; // saved arena chunk |
199 |
| - char *_hwm, *_max; |
200 |
| - size_t _size_in_bytes; |
201 |
| - |
202 |
| - void initialize(Thread *thread) { |
203 |
| - _area = thread->resource_area(); |
204 |
| - _chunk = _area->_chunk; |
205 |
| - _hwm = _area->_hwm; |
206 |
| - _max= _area->_max; |
207 |
| - _size_in_bytes = _area->size_in_bytes(); |
208 |
| - debug_only(_area->_nesting++;) |
209 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
210 |
| - } |
| 245 | + const ResourceMarkImpl _impl; |
211 | 246 |
|
212 |
| - public: |
213 |
| - |
214 |
| -#ifndef ASSERT |
215 |
| - DeoptResourceMark(Thread *thread) { |
216 |
| - assert(thread == Thread::current(), "not the current thread"); |
217 |
| - initialize(thread); |
218 |
| - } |
219 |
| -#else |
220 |
| - DeoptResourceMark(Thread *thread); |
221 |
| -#endif // ASSERT |
222 |
| - |
223 |
| - DeoptResourceMark() { initialize(Thread::current()); } |
224 |
| - |
225 |
| - DeoptResourceMark( ResourceArea *r ) : |
226 |
| - _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { |
227 |
| - _size_in_bytes = _area->size_in_bytes(); |
228 |
| - debug_only(_area->_nesting++;) |
229 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
230 |
| - } |
231 |
| - |
232 |
| - void reset_to_mark() { |
233 |
| - if (UseMallocOnly) free_malloced_objects(); |
234 |
| - |
235 |
| - if( _chunk->next() ) { // Delete later chunks |
236 |
| - // reset arena size before delete chunks. Otherwise, the total |
237 |
| - // arena size could exceed total chunk size |
238 |
| - assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check"); |
239 |
| - _area->set_size_in_bytes(size_in_bytes()); |
240 |
| - _chunk->next_chop(); |
241 |
| - } else { |
242 |
| - assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check"); |
243 |
| - } |
244 |
| - _area->_chunk = _chunk; // Roll back arena to saved chunk |
245 |
| - _area->_hwm = _hwm; |
246 |
| - _area->_max = _max; |
247 |
| - |
248 |
| - // clear out this chunk (to detect allocation bugs) |
249 |
| - if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); |
250 |
| - } |
251 |
| - |
252 |
| - ~DeoptResourceMark() { |
253 |
| - assert( _area->_nesting > 0, "must stack allocate RMs" ); |
254 |
| - debug_only(_area->_nesting--;) |
255 |
| - reset_to_mark(); |
256 |
| - } |
| 247 | + NONCOPYABLE(DeoptResourceMark); |
257 | 248 |
|
| 249 | +public: |
| 250 | + explicit DeoptResourceMark(Thread* thread) : _impl(thread) {} |
258 | 251 |
|
259 |
| - private: |
260 |
| - void free_malloced_objects() PRODUCT_RETURN; |
261 |
| - size_t size_in_bytes() { return _size_in_bytes; }; |
| 252 | + void reset_to_mark() { _impl.reset_to_mark(); } |
262 | 253 | };
|
263 | 254 |
|
264 | 255 | #endif // SHARE_MEMORY_RESOURCEAREA_HPP
|
0 commit comments