/home/travis/build/MoarVM/MoarVM/src/core/frame.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | |
3 | | /* This allows the dynlex cache to be disabled when bug hunting, if needed. */ |
4 | | #define MVM_DYNLEX_CACHE_ENABLED 1 |
5 | | |
6 | | /* Check spesh candidate pre-selections match the guards. */ |
7 | | #define MVM_SPESH_CHECK_PRESELECTION 0 |
8 | | |
9 | | /* Computes the initial work area for a frame or a specialization of a frame. */ |
10 | | MVMRegister * MVM_frame_initial_work(MVMThreadContext *tc, MVMuint16 *local_types, |
11 | 117k | MVMuint16 num_locals) { |
12 | 117k | MVMuint16 i; |
13 | 117k | MVMRegister *work_initial = MVM_calloc(num_locals, sizeof(MVMRegister)); |
14 | 1.98M | for (i = 0; i < num_locals; i++) |
15 | 1.87M | if (local_types[i] == MVM_reg_obj) |
16 | 1.36M | work_initial[i].o = tc->instance->VMNull; |
17 | 117k | return work_initial; |
18 | 117k | } |
19 | | |
20 | | /* Takes a static frame and does various one-off calculations about what |
21 | | * space it shall need. Also triggers bytecode verification of the frame's |
22 | | * bytecode. */ |
23 | 117k | static void prepare_and_verify_static_frame(MVMThreadContext *tc, MVMStaticFrame *static_frame) { |
24 | 117k | MVMStaticFrameBody *static_frame_body = &static_frame->body; |
25 | 117k | MVMCompUnit *cu = static_frame_body->cu; |
26 | 117k | |
27 | 117k | /* Ensure the frame is fully deserialized. */ |
28 | 117k | if (!static_frame_body->fully_deserialized) |
29 | 117k | MVM_bytecode_finish_frame(tc, cu, static_frame, 0); |
30 | 117k | |
31 | 117k | /* If we never invoked this compilation unit before, and we have spesh |
32 | 117k | * enabled, we might either have no spesh log or a nearly full one. This |
33 | 117k | * will cause problems with gathering data to OSR hot loops. */ |
34 | 117k | if (!cu->body.invoked) { |
35 | 2.80k | cu->body.invoked = 1; |
36 | 2.80k | if (tc->instance->spesh_enabled) |
37 | 1.21k | MVM_spesh_log_new_compunit(tc); |
38 | 2.80k | } |
39 | 117k | |
40 | 117k | /* Take compilation unit lock, to make sure we don't race to do the |
41 | 117k | * frame preparation/verification work. */ |
42 | 117k | MVMROOT(tc, cu, { |
43 | 117k | MVMROOT(tc, static_frame, { |
44 | 117k | MVM_reentrantmutex_lock(tc, (MVMReentrantMutex *)cu->body.deserialize_frame_mutex); |
45 | 117k | }); |
46 | 117k | }); |
47 | 117k | |
48 | 117k | if (static_frame->body.instrumentation_level == 0) { |
49 | 117k | /* Work size is number of locals/registers plus size of the maximum |
50 | 117k | * call site argument list. */ |
51 | 117k | static_frame_body->work_size = sizeof(MVMRegister) * |
52 | 117k | (static_frame_body->num_locals + static_frame_body->cu->body.max_callsite_size); |
53 | 117k | |
54 | 117k | /* Validate the bytecode. */ |
55 | 117k | MVM_validate_static_frame(tc, static_frame); |
56 | 117k | |
57 | 117k | /* Compute work area initial state that we can memcpy into place each |
58 | 117k | * time. */ |
59 | 117k | if (static_frame_body->num_locals) |
60 | 117k | static_frame_body->work_initial = MVM_frame_initial_work(tc, |
61 | 117k | static_frame_body->local_types, |
62 | 117k | static_frame_body->num_locals); |
63 | 117k | |
64 | 117k | /* Check if we have any state var lexicals. */ |
65 | 117k | if (static_frame_body->static_env_flags) { |
66 | 117k | MVMuint8 *flags = static_frame_body->static_env_flags; |
67 | 117k | MVMint64 numlex = static_frame_body->num_lexicals; |
68 | 117k | MVMint64 i; |
69 | 248k | for (i = 0; i < numlex; i++) |
70 | 130k | if (flags[i] == 2) { |
71 | 2 | static_frame_body->has_state_vars = 1; |
72 | 2 | break; |
73 | 2 | } |
74 | 117k | } |
75 | 117k | |
76 | 117k | /* Allocate the frame's spesh data structure; do it in gen2, both for |
77 | 117k | * the sake of not triggering GC here to avoid a deadlock risk, but |
78 | 117k | * also because then it can be ssigned into the gen2 static frame |
79 | 117k | * without causing it to become an inter-gen root. */ |
80 | 117k | MVM_gc_allocate_gen2_default_set(tc); |
81 | 117k | MVM_ASSIGN_REF(tc, &(static_frame->common.header), static_frame_body->spesh, |
82 | 117k | MVM_repr_alloc_init(tc, tc->instance->StaticFrameSpesh)); |
83 | 117k | MVM_gc_allocate_gen2_default_clear(tc); |
84 | 117k | } |
85 | 117k | |
86 | 117k | /* Unlock, now we're finished. */ |
87 | 117k | MVM_reentrantmutex_unlock(tc, (MVMReentrantMutex *)cu->body.deserialize_frame_mutex); |
88 | 117k | } |
89 | | |
90 | | /* When we don't match the current instrumentation level, we hit this. It may |
91 | | * simply be that we never invoked the frame, in which case we prepare and |
92 | | * verify it. It may also be because we need to instrument the code for |
93 | | * profiling. */ |
94 | 117k | static void instrumentation_level_barrier(MVMThreadContext *tc, MVMStaticFrame *static_frame) { |
95 | 117k | /* Prepare and verify if needed. */ |
96 | 117k | if (static_frame->body.instrumentation_level == 0) |
97 | 117k | prepare_and_verify_static_frame(tc, static_frame); |
98 | 117k | |
99 | 117k | /* Mark frame as being at the current instrumentation level. */ |
100 | 117k | static_frame->body.instrumentation_level = tc->instance->instrumentation_level; |
101 | 117k | |
102 | 117k | /* Add profiling instrumentation if needed. */ |
103 | 117k | if (tc->instance->profiling) |
104 | 0 | MVM_profile_instrument(tc, static_frame); |
105 | 117k | else if (tc->instance->cross_thread_write_logging) |
106 | 0 | MVM_cross_thread_write_instrument(tc, static_frame); |
107 | 117k | else if (tc->instance->coverage_logging) |
108 | 0 | MVM_line_coverage_instrument(tc, static_frame); |
109 | 117k | else if (tc->instance->debugserver) |
110 | 0 | MVM_breakpoint_instrument(tc, static_frame); |
111 | 117k | else |
112 | 117k | /* XXX uninstrumenting is currently turned off, due to multithreading |
113 | 117k | * woes. If you add an instrumentation that has to be "turned off" |
114 | 117k | * again at some point, a solution for this problem must be found. */ |
115 | 117k | MVM_profile_ensure_uninstrumented(tc, static_frame); |
116 | 117k | } |
117 | | |
118 | | /* Called when the GC destroys a frame. Since the frame may have been alive as |
119 | | * part of a continuation that was taken but never invoked, we should check |
120 | | * things normally cleaned up on return don't need cleaning up also. */ |
121 | 511k | void MVM_frame_destroy(MVMThreadContext *tc, MVMFrame *frame) { |
122 | 511k | if (frame->work) { |
123 | 0 | MVM_args_proc_cleanup(tc, &frame->params); |
124 | 0 | MVM_fixed_size_free(tc, tc->instance->fsa, frame->allocd_work, |
125 | 0 | frame->work); |
126 | 0 | if (frame->extra) { |
127 | 0 | MVMFrameExtra *e = frame->extra; |
128 | 0 | if (e->continuation_tags) |
129 | 0 | MVM_continuation_free_tags(tc, frame); |
130 | 0 | MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMFrameExtra), e); |
131 | 0 | } |
132 | 0 | } |
133 | 511k | if (frame->env) |
134 | 260k | MVM_fixed_size_free(tc, tc->instance->fsa, frame->allocd_env, frame->env); |
135 | 511k | } |
136 | | |
137 | | /* Creates a frame for usage as a context only, possibly forcing all of the |
138 | | * static lexicals to be deserialized if it's used for auto-close purposes. */ |
139 | | static MVMFrame * create_context_only(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
140 | 1.46k | MVMObject *code_ref, MVMint32 autoclose) { |
141 | 1.46k | MVMFrame *frame; |
142 | 1.46k | |
143 | 1.46k | MVMROOT2(tc, static_frame, code_ref, { |
144 | 1.46k | /* If the frame was never invoked before, need initial calculations |
145 | 1.46k | * and verification. */ |
146 | 1.46k | if (static_frame->body.instrumentation_level == 0) |
147 | 1.46k | instrumentation_level_barrier(tc, static_frame); |
148 | 1.46k | |
149 | 1.46k | frame = MVM_gc_allocate_frame(tc); |
150 | 1.46k | }); |
151 | 1.46k | |
152 | 1.46k | /* Set static frame and code ref. */ |
153 | 1.46k | MVM_ASSIGN_REF(tc, &(frame->header), frame->static_info, static_frame); |
154 | 1.46k | MVM_ASSIGN_REF(tc, &(frame->header), frame->code_ref, code_ref); |
155 | 1.46k | |
156 | 1.46k | /* Allocate space for lexicals, copying the default lexical environment |
157 | 1.46k | * into place and, if we're auto-closing, making sure anything we'd clone |
158 | 1.46k | * is vivified to prevent the clone (which is what creates the correct |
159 | 1.46k | * BEGIN/INIT semantics). */ |
160 | 1.46k | if (static_frame->body.env_size) { |
161 | 1.46k | frame->env = MVM_fixed_size_alloc(tc, tc->instance->fsa, static_frame->body.env_size); |
162 | 1.46k | frame->allocd_env = static_frame->body.env_size; |
163 | 1.46k | if (autoclose) { |
164 | 5 | MVMROOT2(tc, frame, static_frame, { |
165 | 5 | MVMuint16 i; |
166 | 5 | MVMuint16 num_lexicals = static_frame->body.num_lexicals; |
167 | 5 | for (i = 0; i < num_lexicals; i++) { |
168 | 5 | if (!static_frame->body.static_env[i].o && static_frame->body.static_env_flags[i] == 1) { |
169 | 5 | MVMint32 scid; |
170 | 5 | MVMint32 objid; |
171 | 5 | if (MVM_bytecode_find_static_lexical_scref(tc, static_frame->body.cu, |
172 | 5 | static_frame, i, &scid, &objid)) { |
173 | 5 | MVMObject *resolved; |
174 | 5 | MVMSerializationContext *sc = MVM_sc_get_sc(tc, static_frame->body.cu, scid); |
175 | 5 | |
176 | 5 | if (sc == NULL) |
177 | 5 | MVM_exception_throw_adhoc(tc, |
178 | 5 | "SC not yet resolved; lookup failed"); |
179 | 5 | |
180 | 5 | resolved = MVM_sc_get_object(tc, sc, objid); |
181 | 5 | |
182 | 5 | MVM_ASSIGN_REF(tc, &(static_frame->common.header), |
183 | 5 | static_frame->body.static_env[i].o, |
184 | 5 | resolved); |
185 | 5 | } |
186 | 5 | } |
187 | 5 | } |
188 | 5 | }); |
189 | 5 | } |
190 | 1.46k | memcpy(frame->env, static_frame->body.static_env, static_frame->body.env_size); |
191 | 1.46k | } |
192 | 1.46k | |
193 | 1.46k | return frame; |
194 | 1.46k | } |
195 | | |
196 | | /* Creates a frame that is suitable for deserializing a context into. Starts |
197 | | * with a ref count of 1 due to being held by an SC. */ |
198 | | MVMFrame * MVM_frame_create_context_only(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
199 | 1.45k | MVMObject *code_ref) { |
200 | 1.45k | return create_context_only(tc, static_frame, code_ref, 0); |
201 | 1.45k | } |
202 | | |
203 | | /* Provides auto-close functionality, for the handful of cases where we have |
204 | | * not ever been in the outer frame of something we're invoking. In this case, |
205 | | * we fake up a frame based on the static lexical environment. */ |
206 | 28 | static MVMFrame * autoclose(MVMThreadContext *tc, MVMStaticFrame *needed) { |
207 | 28 | MVMFrame *result; |
208 | 28 | |
209 | 28 | /* First, see if we can find one on the call stack; return it if so. */ |
210 | 28 | MVMFrame *candidate = tc->cur_frame; |
211 | 88 | while (candidate) { |
212 | 83 | if (candidate->static_info->body.bytecode == needed->body.bytecode) |
213 | 23 | return candidate; |
214 | 60 | candidate = candidate->caller; |
215 | 60 | } |
216 | 28 | |
217 | 28 | /* If not, fake up a frame See if it also needs an outer. */ |
218 | 5 | MVMROOT(tc, needed, { |
219 | 5 | result = create_context_only(tc, needed, (MVMObject *)needed->body.static_code, 1); |
220 | 5 | }); |
221 | 5 | if (needed->body.outer) { |
222 | 4 | /* See if the static code object has an outer. */ |
223 | 4 | MVMCode *outer_code = needed->body.outer->body.static_code; |
224 | 4 | if (outer_code->body.outer && |
225 | 3 | outer_code->body.outer->static_info->body.bytecode == needed->body.bytecode) { |
226 | 0 | /* Yes, just take it. */ |
227 | 0 | MVM_ASSIGN_REF(tc, &(result->header), result->outer, outer_code->body.outer); |
228 | 0 | } |
229 | 4 | else { |
230 | 4 | /* Otherwise, recursively auto-close. */ |
231 | 4 | MVMROOT(tc, result, { |
232 | 4 | MVMFrame *ac = autoclose(tc, needed->body.outer); |
233 | 4 | MVM_ASSIGN_REF(tc, &(result->header), result->outer, ac); |
234 | 4 | }); |
235 | 4 | } |
236 | 4 | } |
237 | 5 | return result; |
238 | 28 | } |
239 | | |
240 | | /* Obtains memory for a frame on the thread-local call stack. */ |
241 | | static MVMFrame * allocate_frame(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
242 | 17.6M | MVMSpeshCandidate *spesh_cand, MVMint32 heap) { |
243 | 17.6M | MVMFrame *frame; |
244 | 17.6M | MVMint32 env_size, work_size, num_locals; |
245 | 17.6M | MVMStaticFrameBody *static_frame_body; |
246 | 17.6M | MVMJitCode *jitcode; |
247 | 17.6M | |
248 | 17.6M | if (heap) { |
249 | 309k | /* Allocate frame on the heap. We know it's already zeroed. */ |
250 | 309k | MVMROOT(tc, static_frame, { |
251 | 309k | if (tc->cur_frame) |
252 | 309k | MVM_frame_force_to_heap(tc, tc->cur_frame); |
253 | 309k | frame = MVM_gc_allocate_frame(tc); |
254 | 309k | }); |
255 | 309k | } |
256 | 17.3M | else { |
257 | 17.3M | /* Allocate the frame on the call stack. */ |
258 | 17.3M | MVMCallStackRegion *stack = tc->stack_current; |
259 | 17.3M | if (stack->alloc + sizeof(MVMFrame) >= stack->alloc_limit) |
260 | 0 | stack = MVM_callstack_region_next(tc); |
261 | 17.3M | frame = (MVMFrame *)stack->alloc; |
262 | 17.3M | stack->alloc += sizeof(MVMFrame); |
263 | 17.3M | |
264 | 17.3M | /* Ensure collectable header flags and owner are zeroed, which means we'll |
265 | 17.3M | * never try to mark or root the frame. */ |
266 | 17.3M | frame->header.flags = 0; |
267 | 17.3M | frame->header.owner = 0; |
268 | 17.3M | |
269 | 17.3M | /* Current arguments callsite must be NULL as it's used in GC. Extra must |
270 | 17.3M | * be NULL so we know we don't have it. Flags should be zeroed. */ |
271 | 17.3M | frame->cur_args_callsite = NULL; |
272 | 17.3M | frame->extra = NULL; |
273 | 17.3M | frame->flags = 0; |
274 | 17.3M | } |
275 | 17.6M | |
276 | 17.6M | /* Allocate space for lexicals and work area. */ |
277 | 17.6M | static_frame_body = &(static_frame->body); |
278 | 9.86M | env_size = spesh_cand ? spesh_cand->env_size : static_frame_body->env_size; |
279 | 17.6M | |
280 | 9.86M | jitcode = spesh_cand ? spesh_cand->jitcode : NULL; |
281 | 9.80M | num_locals = jitcode && jitcode->local_types ? jitcode->num_locals : |
282 | 8.52M | (spesh_cand ? spesh_cand->num_locals : static_frame_body->num_locals); |
283 | 17.6M | if (env_size) { |
284 | 1.25M | frame->env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, env_size); |
285 | 1.25M | frame->allocd_env = env_size; |
286 | 1.25M | } |
287 | 16.4M | else { |
288 | 16.4M | frame->env = NULL; |
289 | 16.4M | frame->allocd_env = 0; |
290 | 16.4M | } |
291 | 9.86M | work_size = spesh_cand ? spesh_cand->work_size : static_frame_body->work_size; |
292 | 17.6M | if (work_size) { |
293 | 17.6M | if (spesh_cand) { |
294 | 9.86M | /* Allocate zeroed memory. Spesh makes sure we have VMNull setup in |
295 | 9.86M | * the places we need it. */ |
296 | 9.86M | frame->work = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, work_size); |
297 | 9.86M | } |
298 | 7.81M | else { |
299 | 7.81M | /* Copy frame template with VMNulls in to place. */ |
300 | 7.81M | frame->work = MVM_fixed_size_alloc(tc, tc->instance->fsa, work_size); |
301 | 7.81M | memcpy(frame->work, static_frame_body->work_initial, |
302 | 7.81M | sizeof(MVMRegister) * static_frame_body->num_locals); |
303 | 7.81M | } |
304 | 17.6M | frame->allocd_work = work_size; |
305 | 17.6M | |
306 | 17.6M | /* Calculate args buffer position. */ |
307 | 17.6M | frame->args = frame->work + num_locals; |
308 | 17.6M | } |
309 | 0 | else { |
310 | 0 | frame->work = NULL; |
311 | 0 | frame->allocd_work = 0; |
312 | 0 | } |
313 | 17.6M | |
314 | 17.6M | /* Set static frame and caller before we let this frame escape and the GC |
315 | 17.6M | * see it. */ |
316 | 17.6M | frame->static_info = static_frame; |
317 | 17.6M | frame->caller = tc->cur_frame; |
318 | 17.6M | |
319 | 17.6M | /* Assign a sequence nr */ |
320 | 17.6M | frame->sequence_nr = tc->next_frame_nr++; |
321 | 17.6M | |
322 | 17.6M | return frame; |
323 | 17.6M | } |
324 | | |
325 | | /* Obtains memory for a frame on the heap. */ |
326 | | static MVMFrame * allocate_heap_frame(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
327 | 24 | MVMSpeshCandidate *spesh_cand) { |
328 | 24 | MVMFrame *frame; |
329 | 24 | MVMint32 env_size, work_size; |
330 | 24 | MVMStaticFrameBody *static_frame_body; |
331 | 24 | |
332 | 24 | /* Allocate the frame. */ |
333 | 24 | MVMROOT(tc, static_frame, { |
334 | 24 | frame = MVM_gc_allocate_frame(tc); |
335 | 24 | }); |
336 | 24 | |
337 | 24 | /* Allocate space for lexicals and work area. */ |
338 | 24 | static_frame_body = &(static_frame->body); |
339 | 24 | env_size = spesh_cand ? spesh_cand->env_size : static_frame_body->env_size; |
340 | 24 | if (env_size) { |
341 | 0 | frame->env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, env_size); |
342 | 0 | frame->allocd_env = env_size; |
343 | 0 | } |
344 | 24 | work_size = spesh_cand ? spesh_cand->work_size : static_frame_body->work_size; |
345 | 24 | if (work_size) { |
346 | 24 | /* Fill up all object registers with a pointer to our VMNull object */ |
347 | 24 | if (spesh_cand && spesh_cand->local_types) { |
348 | 0 | MVMuint32 num_locals = spesh_cand->num_locals; |
349 | 0 | MVMuint16 *local_types = spesh_cand->local_types; |
350 | 0 | MVMuint32 i; |
351 | 0 | frame->work = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, work_size); |
352 | 0 | for (i = 0; i < num_locals; i++) |
353 | 0 | if (local_types[i] == MVM_reg_obj) |
354 | 0 | frame->work[i].o = tc->instance->VMNull; |
355 | 0 | } |
356 | 24 | else { |
357 | 24 | frame->work = MVM_fixed_size_alloc(tc, tc->instance->fsa, work_size); |
358 | 24 | memcpy(frame->work, static_frame_body->work_initial, |
359 | 24 | sizeof(MVMRegister) * static_frame_body->num_locals); |
360 | 24 | } |
361 | 24 | frame->allocd_work = work_size; |
362 | 24 | |
363 | 24 | /* Calculate args buffer position. */ |
364 | 24 | frame->args = frame->work + (spesh_cand |
365 | 0 | ? spesh_cand->num_locals |
366 | 24 | : static_frame_body->num_locals); |
367 | 24 | } |
368 | 24 | |
369 | 24 | return frame; |
370 | 24 | } |
371 | | |
372 | | /* This exists to reduce the amount of pointer-fiddling that has to be |
373 | | * done by the JIT */ |
374 | | void MVM_frame_invoke_code(MVMThreadContext *tc, MVMCode *code, |
375 | 2.58M | MVMCallsite *callsite, MVMint32 spesh_cand) { |
376 | 2.58M | MVM_frame_invoke(tc, code->body.sf, callsite, tc->cur_frame->args, |
377 | 2.58M | code->body.outer, (MVMObject*)code, spesh_cand); |
378 | 2.58M | } |
379 | | |
380 | | /* Takes a static frame and a thread context. Invokes the static frame. */ |
381 | | void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
382 | | MVMCallsite *callsite, MVMRegister *args, |
383 | 17.6M | MVMFrame *outer, MVMObject *code_ref, MVMint32 spesh_cand) { |
384 | 17.6M | MVMFrame *frame; |
385 | 17.6M | MVMuint8 *chosen_bytecode; |
386 | 17.6M | MVMStaticFrameSpesh *spesh; |
387 | 17.6M | |
388 | 17.6M | /* If the frame was never invoked before, or never before at the current |
389 | 17.6M | * instrumentation level, we need to trigger the instrumentation level |
390 | 17.6M | * barrier. */ |
391 | 17.6M | if (static_frame->body.instrumentation_level != tc->instance->instrumentation_level) { |
392 | 116k | MVMROOT3(tc, static_frame, code_ref, outer, { |
393 | 116k | instrumentation_level_barrier(tc, static_frame); |
394 | 116k | }); |
395 | 116k | } |
396 | 17.6M | |
397 | 17.6M | /* Ensure we have an outer if needed. This is done ahead of allocating the |
398 | 17.6M | * new frame, since an autoclose will force the callstack on to the heap. */ |
399 | 17.6M | if (outer) { |
400 | 17.6M | /* We were provided with an outer frame and it will already have had |
401 | 17.6M | * its reference count incremented; just ensure that it is based on the |
402 | 17.6M | * correct static frame (compare on bytecode address to cope with |
403 | 17.6M | * nqp::freshcoderef). */ |
404 | 17.6M | if (MVM_UNLIKELY(outer->static_info->body.orig_bytecode != static_frame->body.outer->body.orig_bytecode)) { |
405 | 0 | char *frame_cuuid = MVM_string_utf8_encode_C_string(tc, static_frame->body.cuuid); |
406 | 0 | char *frame_name; |
407 | 0 | char *outer_cuuid = MVM_string_utf8_encode_C_string(tc, outer->static_info->body.cuuid); |
408 | 0 | char *outer_name; |
409 | 0 | char *frame_outer_cuuid = MVM_string_utf8_encode_C_string(tc, static_frame->body.outer->body.cuuid); |
410 | 0 | char *frame_outer_name; |
411 | 0 |
|
412 | 0 | char *waste[7] = { frame_cuuid, outer_cuuid, frame_outer_cuuid, NULL, NULL, NULL, NULL }; |
413 | 0 | int waste_counter = 3; |
414 | 0 |
|
415 | 0 | if (static_frame->body.name) { |
416 | 0 | frame_name = MVM_string_utf8_encode_C_string(tc, static_frame->body.name); |
417 | 0 | waste[waste_counter++] = frame_name; |
418 | 0 | } |
419 | 0 | else { |
420 | 0 | frame_name = "<anonymous static frame>"; |
421 | 0 | } |
422 | 0 |
|
423 | 0 | if (outer->static_info->body.name) { |
424 | 0 | outer_name = MVM_string_utf8_encode_C_string(tc, outer->static_info->body.name); |
425 | 0 | waste[waste_counter++] = outer_name; |
426 | 0 | } |
427 | 0 | else { |
428 | 0 | outer_name = "<anonymous static frame>"; |
429 | 0 | } |
430 | 0 |
|
431 | 0 | if (static_frame->body.outer->body.name) { |
432 | 0 | frame_outer_name = MVM_string_utf8_encode_C_string(tc, static_frame->body.outer->body.name); |
433 | 0 | waste[waste_counter++] = frame_outer_name; |
434 | 0 | } |
435 | 0 | else { |
436 | 0 | frame_outer_name = "<anonymous static frame>"; |
437 | 0 | } |
438 | 0 |
|
439 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
440 | 0 | "When invoking %s '%s', provided outer frame %p (%s '%s') does not match expected static frame %p (%s '%s')", |
441 | 0 | frame_cuuid, |
442 | 0 | frame_name, |
443 | 0 | outer->static_info, |
444 | 0 | outer_cuuid, |
445 | 0 | outer_name, |
446 | 0 | static_frame->body.outer, |
447 | 0 | frame_outer_cuuid, |
448 | 0 | frame_outer_name); |
449 | 0 | } |
450 | 17.6M | } |
451 | 5.78k | else if (static_frame->body.static_code) { |
452 | 5.78k | MVMCode *static_code = static_frame->body.static_code; |
453 | 5.78k | if (static_code->body.outer) { |
454 | 1 | /* We're lacking an outer, but our static code object may have one. |
455 | 1 | * This comes up in the case of cloned protoregexes, for example. */ |
456 | 1 | outer = static_code->body.outer; |
457 | 1 | } |
458 | 5.78k | else if (static_frame->body.outer) { |
459 | 24 | /* Auto-close, and cache it in the static frame. */ |
460 | 24 | MVMROOT2(tc, static_frame, code_ref, { |
461 | 24 | MVM_frame_force_to_heap(tc, tc->cur_frame); |
462 | 24 | outer = autoclose(tc, static_frame->body.outer); |
463 | 24 | MVM_ASSIGN_REF(tc, &(static_code->common.header), |
464 | 24 | static_code->body.outer, outer); |
465 | 24 | }); |
466 | 24 | } |
467 | 5.76k | else { |
468 | 5.76k | outer = NULL; |
469 | 5.76k | } |
470 | 5.78k | } |
471 | 17.6M | |
472 | 17.6M | /* See if any specializations apply. */ |
473 | 17.6M | spesh = static_frame->body.spesh; |
474 | 17.6M | if (spesh_cand < 0) |
475 | 15.0M | spesh_cand = MVM_spesh_arg_guard_run(tc, spesh->body.spesh_arg_guard, |
476 | 15.0M | callsite, args, NULL); |
477 | 17.6M | #if MVM_SPESH_CHECK_PRESELECTION |
478 | | else { |
479 | | MVMint32 certain = -1; |
480 | | MVMint32 correct = MVM_spesh_arg_guard_run(tc, spesh->body.spesh_arg_guard, |
481 | | callsite, args, &certain); |
482 | | if (spesh_cand != correct && spesh_cand != certain) { |
483 | | fprintf(stderr, "Inconsistent spesh preselection of '%s' (%s): got %d, not %d\n", |
484 | | MVM_string_utf8_encode_C_string(tc, static_frame->body.name), |
485 | | MVM_string_utf8_encode_C_string(tc, static_frame->body.cuuid), |
486 | | spesh_cand, correct); |
487 | | MVM_dump_backtrace(tc); |
488 | | } |
489 | | } |
490 | | #endif |
491 | 17.6M | if (spesh_cand >= 0) { |
492 | 9.86M | MVMSpeshCandidate *chosen_cand = spesh->body.spesh_candidates[spesh_cand]; |
493 | 9.86M | if (static_frame->body.allocate_on_heap) { |
494 | 158k | MVMROOT3(tc, static_frame, code_ref, outer, { |
495 | 158k | frame = allocate_frame(tc, static_frame, chosen_cand, 1); |
496 | 158k | }); |
497 | 158k | } |
498 | 9.70M | else { |
499 | 9.70M | frame = allocate_frame(tc, static_frame, chosen_cand, 0); |
500 | 9.70M | frame->spesh_correlation_id = 0; |
501 | 9.70M | } |
502 | 9.86M | if (chosen_cand->jitcode) { |
503 | 9.80M | chosen_bytecode = chosen_cand->jitcode->bytecode; |
504 | 9.80M | frame->jit_entry_label = chosen_cand->jitcode->labels[0]; |
505 | 9.80M | } |
506 | 65.1k | else { |
507 | 65.1k | chosen_bytecode = chosen_cand->bytecode; |
508 | 65.1k | } |
509 | 9.86M | frame->effective_spesh_slots = chosen_cand->spesh_slots; |
510 | 9.86M | frame->spesh_cand = chosen_cand; |
511 | 9.86M | } |
512 | 7.81M | else { |
513 | 7.81M | MVMint32 on_heap = static_frame->body.allocate_on_heap; |
514 | 7.81M | if (on_heap) { |
515 | 150k | MVMROOT3(tc, static_frame, code_ref, outer, { |
516 | 150k | frame = allocate_frame(tc, static_frame, NULL, 1); |
517 | 150k | }); |
518 | 150k | } |
519 | 7.65M | else { |
520 | 7.65M | frame = allocate_frame(tc, static_frame, NULL, 0); |
521 | 7.65M | frame->spesh_cand = NULL; |
522 | 7.65M | frame->effective_spesh_slots = NULL; |
523 | 7.65M | frame->spesh_correlation_id = 0; |
524 | 7.65M | } |
525 | 7.81M | chosen_bytecode = static_frame->body.bytecode; |
526 | 7.81M | |
527 | 7.81M | /* If we should be spesh logging, set the correlation ID. */ |
528 | 7.81M | if (tc->instance->spesh_enabled && tc->spesh_log && static_frame->body.bytecode_size < MVM_SPESH_MAX_BYTECODE_SIZE) { |
529 | 5.72M | if (spesh->body.spesh_entries_recorded++ < MVM_SPESH_LOG_LOGGED_ENOUGH) { |
530 | 3.66M | MVMint32 id = ++tc->spesh_cid; |
531 | 3.66M | frame->spesh_correlation_id = id; |
532 | 3.66M | MVMROOT3(tc, static_frame, code_ref, outer, { |
533 | 3.66M | if (on_heap) { |
534 | 3.66M | MVMROOT(tc, frame, { |
535 | 3.66M | MVM_spesh_log_entry(tc, id, static_frame, callsite); |
536 | 3.66M | }); |
537 | 3.66M | } |
538 | 3.66M | else { |
539 | 3.66M | MVMROOT2(tc, frame->caller, frame->static_info, { |
540 | 3.66M | MVM_spesh_log_entry(tc, id, static_frame, callsite); |
541 | 3.66M | }); |
542 | 3.66M | } |
543 | 3.66M | }); |
544 | 3.66M | } |
545 | 5.72M | } |
546 | 7.81M | } |
547 | 17.6M | |
548 | 17.6M | /* Store the code ref (NULL at the top-level). */ |
549 | 17.6M | frame->code_ref = code_ref; |
550 | 17.6M | |
551 | 17.6M | /* Outer. */ |
552 | 17.6M | frame->outer = outer; |
553 | 17.6M | |
554 | 17.6M | /* Initialize argument processing. */ |
555 | 17.6M | MVM_args_proc_init(tc, &frame->params, callsite, args); |
556 | 17.6M | |
557 | 17.6M | MVM_jit_code_trampoline(tc); |
558 | 17.6M | |
559 | 17.6M | /* Update interpreter and thread context, so next execution will use this |
560 | 17.6M | * frame. */ |
561 | 17.6M | tc->cur_frame = frame; |
562 | 17.6M | tc->current_frame_nr = frame->sequence_nr; |
563 | 17.6M | *(tc->interp_cur_op) = chosen_bytecode; |
564 | 17.6M | *(tc->interp_bytecode_start) = chosen_bytecode; |
565 | 17.6M | *(tc->interp_reg_base) = frame->work; |
566 | 17.6M | *(tc->interp_cu) = static_frame->body.cu; |
567 | 17.6M | |
568 | 17.6M | /* If we need to do so, make clones of things in the lexical environment |
569 | 17.6M | * that need it. Note that we do this after tc->cur_frame became the |
570 | 17.6M | * current frame, to make sure these new objects will certainly get |
571 | 17.6M | * marked if GC is triggered along the way. */ |
572 | 17.6M | if (static_frame->body.has_state_vars) { |
573 | 18 | /* Drag everything out of static_frame_body before we start, |
574 | 18 | * as GC action may invalidate it. */ |
575 | 18 | MVMRegister *env = static_frame->body.static_env; |
576 | 18 | MVMuint8 *flags = static_frame->body.static_env_flags; |
577 | 18 | MVMint64 numlex = static_frame->body.num_lexicals; |
578 | 18 | MVMRegister *state = NULL; |
579 | 18 | MVMint64 state_act = 0; /* 0 = none so far, 1 = first time, 2 = later */ |
580 | 18 | MVMint64 i; |
581 | 18 | MVMROOT(tc, frame, { |
582 | 18 | for (i = 0; i < numlex; i++) { |
583 | 18 | if (flags[i] == 2) { |
584 | 18 | redo_state: |
585 | 18 | switch (state_act) { |
586 | 18 | case 0: |
587 | 18 | if (MVM_UNLIKELY(!frame->code_ref)) |
588 | 18 | MVM_exception_throw_adhoc(tc, |
589 | 18 | "Frame must have code-ref to have state variables"); |
590 | 18 | state = ((MVMCode *)frame->code_ref)->body.state_vars; |
591 | 18 | if (state) { |
592 | 18 | /* Already have state vars; pull them from this. */ |
593 | 18 | state_act = 2; |
594 | 18 | } |
595 | 18 | else { |
596 | 18 | /* Allocate storage for state vars. */ |
597 | 18 | state = (MVMRegister *)MVM_calloc(1, frame->static_info->body.env_size); |
598 | 18 | ((MVMCode *)frame->code_ref)->body.state_vars = state; |
599 | 18 | state_act = 1; |
600 | 18 | |
601 | 18 | /* Note that this frame should run state init code. */ |
602 | 18 | frame->flags |= MVM_FRAME_FLAG_STATE_INIT; |
603 | 18 | } |
604 | 18 | goto redo_state; |
605 | 18 | case 1: { |
606 | 18 | MVMObject *cloned = MVM_repr_clone(tc, env[i].o); |
607 | 18 | frame->env[i].o = cloned; |
608 | 18 | MVM_ASSIGN_REF(tc, &(frame->code_ref->header), state[i].o, cloned); |
609 | 18 | break; |
610 | 18 | } |
611 | 18 | case 2: |
612 | 18 | frame->env[i].o = state[i].o; |
613 | 18 | break; |
614 | 18 | } |
615 | 18 | } |
616 | 18 | } |
617 | 18 | }); |
618 | 18 | } |
619 | 17.6M | } |
620 | | |
621 | | /* Moves the specified frame from the stack and on to the heap. Must only |
622 | | * be called if the frame is not already there. Use MVM_frame_force_to_heap |
623 | | * when not sure. */ |
624 | 310k | MVMFrame * MVM_frame_move_to_heap(MVMThreadContext *tc, MVMFrame *frame) { |
625 | 310k | /* To keep things simple, we'll promote the entire stack. */ |
626 | 310k | MVMFrame *cur_to_promote = tc->cur_frame; |
627 | 310k | MVMFrame *new_cur_frame = NULL; |
628 | 310k | MVMFrame *update_caller = NULL; |
629 | 310k | MVMFrame *result = NULL; |
630 | 310k | MVM_CHECK_CALLER_CHAIN(tc, cur_to_promote); |
631 | 310k | MVMROOT3(tc, new_cur_frame, update_caller, result, { |
632 | 310k | while (cur_to_promote) { |
633 | 310k | /* Allocate a heap frame. */ |
634 | 310k | MVMFrame *promoted = MVM_gc_allocate_frame(tc); |
635 | 310k | |
636 | 310k | /* Bump heap promotion counter, to encourage allocating this kind |
637 | 310k | * of frame directly on the heap in the future. If the frame was |
638 | 310k | * entered at least 50 times, and over 80% of the entries lead to |
639 | 310k | * an eventual heap promotion, them we'll mark it to be allocated |
640 | 310k | * right away on the heap. Note that entries is only bumped when |
641 | 310k | * spesh logging is taking place, so we only bump the number of |
642 | 310k | * heap promotions in that case too. */ |
643 | 310k | MVMStaticFrame *sf = cur_to_promote->static_info; |
644 | 310k | if (!sf->body.allocate_on_heap && cur_to_promote->spesh_correlation_id) { |
645 | 310k | MVMuint32 promos = sf->body.spesh->body.num_heap_promotions++; |
646 | 310k | MVMuint32 entries = sf->body.spesh->body.spesh_entries_recorded; |
647 | 310k | if (entries > 50 && promos > (4 * entries) / 5) |
648 | 310k | sf->body.allocate_on_heap = 1; |
649 | 310k | } |
650 | 310k | |
651 | 310k | /* Copy current frame's body to it. */ |
652 | 310k | memcpy( |
653 | 310k | (char *)promoted + sizeof(MVMCollectable), |
654 | 310k | (char *)cur_to_promote + sizeof(MVMCollectable), |
655 | 310k | sizeof(MVMFrame) - sizeof(MVMCollectable)); |
656 | 310k | |
657 | 310k | /* Update caller of previously promoted frame, if any. This is the |
658 | 310k | * only reference that might point to a non-heap frame. */ |
659 | 310k | if (update_caller) { |
660 | 310k | MVM_ASSIGN_REF(tc, &(update_caller->header), |
661 | 310k | update_caller->caller, promoted); |
662 | 310k | } |
663 | 310k | |
664 | 310k | /* If we're the first time through the lopo, then we're instead |
665 | 310k | * replacing the current stack top. Note we do it at the end, |
666 | 310k | * so that the GC can still walk unpromoted frames if it runs |
667 | 310k | * in this loop. */ |
668 | 310k | else { |
669 | 310k | new_cur_frame = promoted; |
670 | 310k | } |
671 | 310k | |
672 | 310k | /* If the frame we're promoting was in the active handlers list, |
673 | 310k | * update the address there. */ |
674 | 310k | if (tc->active_handlers) { |
675 | 310k | MVMActiveHandler *ah = tc->active_handlers; |
676 | 310k | while (ah) { |
677 | 310k | if (ah->frame == cur_to_promote) |
678 | 310k | ah->frame = promoted; |
679 | 310k | ah = ah->next_handler; |
680 | 310k | } |
681 | 310k | } |
682 | 310k | |
683 | 310k | /* If we're replacing the frame we were asked to promote, that will |
684 | 310k | * become our result. */ |
685 | 310k | if (cur_to_promote == frame) |
686 | 310k | result = promoted; |
687 | 310k | |
688 | 310k | /* Check if there's a caller, or if we reached the end of the |
689 | 310k | * chain. */ |
690 | 310k | if (cur_to_promote->caller) { |
691 | 310k | /* If the caller is on the stack then it needs promotion too. |
692 | 310k | * If not, we're done. */ |
693 | 310k | if (MVM_FRAME_IS_ON_CALLSTACK(tc, cur_to_promote->caller)) { |
694 | 310k | /* Clear caller in promoted frame, to avoid a heap -> stack |
695 | 310k | * reference if we GC during this loop. */ |
696 | 310k | promoted->caller = NULL; |
697 | 310k | update_caller = promoted; |
698 | 310k | cur_to_promote = cur_to_promote->caller; |
699 | 310k | } |
700 | 310k | else { |
701 | 310k | if (cur_to_promote == tc->thread_entry_frame) |
702 | 310k | tc->thread_entry_frame = promoted; |
703 | 310k | cur_to_promote = NULL; |
704 | 310k | } |
705 | 310k | } |
706 | 310k | else { |
707 | 310k | /* End of caller chain; check if we promoted the entry |
708 | 310k | * frame */ |
709 | 310k | if (cur_to_promote == tc->thread_entry_frame) |
710 | 310k | tc->thread_entry_frame = promoted; |
711 | 310k | cur_to_promote = NULL; |
712 | 310k | } |
713 | 310k | } |
714 | 310k | }); |
715 | 310k | MVM_CHECK_CALLER_CHAIN(tc, new_cur_frame); |
716 | 310k | |
717 | 310k | /* All is promoted. Update thread's current frame and reset the thread |
718 | 310k | * local callstack. */ |
719 | 310k | tc->cur_frame = new_cur_frame; |
720 | 310k | MVM_callstack_reset(tc); |
721 | 310k | |
722 | 310k | /* Hand back new location of promoted frame. */ |
723 | 310k | if (!result) |
724 | 0 | MVM_panic(1, "Failed to find frame to promote on call stack"); |
725 | 310k | return result; |
726 | 310k | } |
727 | | |
728 | | /* This function is to be used by the debugserver if a thread is currently |
729 | | * blocked. */ |
730 | 0 | MVMFrame * MVM_frame_debugserver_move_to_heap(MVMThreadContext *tc, MVMThreadContext *owner, MVMFrame *frame) { |
731 | 0 | /* To keep things simple, we'll promote the entire stack. */ |
732 | 0 | MVMFrame *cur_to_promote = owner->cur_frame; |
733 | 0 | MVMFrame *new_cur_frame = NULL; |
734 | 0 | MVMFrame *update_caller = NULL; |
735 | 0 | MVMFrame *result = NULL; |
736 | 0 | MVM_CHECK_CALLER_CHAIN(tc, cur_to_promote); |
737 | 0 | MVMROOT3(tc, new_cur_frame, update_caller, result, { |
738 | 0 | while (cur_to_promote) { |
739 | 0 | /* Allocate a heap frame. */ |
740 | 0 | MVMFrame *promoted = MVM_gc_allocate_frame(tc); |
741 | 0 |
|
742 | 0 | /* Bump heap promotion counter, to encourage allocating this kind |
743 | 0 | * of frame directly on the heap in the future. If the frame was |
744 | 0 | * entered at least 50 times, and over 80% of the entries lead to |
745 | 0 | * an eventual heap promotion, them we'll mark it to be allocated |
746 | 0 | * right away on the heap. Note that entries is only bumped when |
747 | 0 | * spesh logging is taking place, so we only bump the number of |
748 | 0 | * heap promotions in that case too. */ |
749 | 0 | MVMStaticFrame *sf = cur_to_promote->static_info; |
750 | 0 | if (!sf->body.allocate_on_heap && cur_to_promote->spesh_correlation_id) { |
751 | 0 | MVMuint32 promos = sf->body.spesh->body.num_heap_promotions++; |
752 | 0 | MVMuint32 entries = sf->body.spesh->body.spesh_entries_recorded; |
753 | 0 | if (entries > 50 && promos > (4 * entries) / 5) |
754 | 0 | sf->body.allocate_on_heap = 1; |
755 | 0 | } |
756 | 0 |
|
757 | 0 | /* Copy current frame's body to it. */ |
758 | 0 | memcpy( |
759 | 0 | (char *)promoted + sizeof(MVMCollectable), |
760 | 0 | (char *)cur_to_promote + sizeof(MVMCollectable), |
761 | 0 | sizeof(MVMFrame) - sizeof(MVMCollectable)); |
762 | 0 |
|
763 | 0 | /* Update caller of previously promoted frame, if any. This is the |
764 | 0 | * only reference that might point to a non-heap frame. */ |
765 | 0 | if (update_caller) { |
766 | 0 | MVM_ASSIGN_REF(tc, &(update_caller->header), |
767 | 0 | update_caller->caller, promoted); |
768 | 0 | } |
769 | 0 |
|
770 | 0 | /* If we're the first time through the lopo, then we're instead |
771 | 0 | * replacing the current stack top. Note we do it at the end, |
772 | 0 | * so that the GC can still walk unpromoted frames if it runs |
773 | 0 | * in this loop. */ |
774 | 0 | else { |
775 | 0 | new_cur_frame = promoted; |
776 | 0 | } |
777 | 0 |
|
778 | 0 | /* If the frame we're promoting was in the active handlers list, |
779 | 0 | * update the address there. */ |
780 | 0 | if (owner->active_handlers) { |
781 | 0 | MVMActiveHandler *ah = owner->active_handlers; |
782 | 0 | while (ah) { |
783 | 0 | if (ah->frame == cur_to_promote) |
784 | 0 | ah->frame = promoted; |
785 | 0 | ah = ah->next_handler; |
786 | 0 | } |
787 | 0 | } |
788 | 0 |
|
789 | 0 | /* If we're replacing the frame we were asked to promote, that will |
790 | 0 | * become our result. */ |
791 | 0 | if (cur_to_promote == frame) |
792 | 0 | result = promoted; |
793 | 0 |
|
794 | 0 | /* Check if there's a caller, or if we reached the end of the |
795 | 0 | * chain. */ |
796 | 0 | if (cur_to_promote->caller) { |
797 | 0 | /* If the caller is on the stack then it needs promotion too. |
798 | 0 | * If not, we're done. */ |
799 | 0 | if (MVM_FRAME_IS_ON_CALLSTACK(tc, cur_to_promote->caller)) { |
800 | 0 | /* Clear caller in promoted frame, to avoid a heap -> stack |
801 | 0 | * reference if we GC during this loop. */ |
802 | 0 | promoted->caller = NULL; |
803 | 0 | update_caller = promoted; |
804 | 0 | cur_to_promote = cur_to_promote->caller; |
805 | 0 | } |
806 | 0 | else { |
807 | 0 | if (cur_to_promote == owner->thread_entry_frame) |
808 | 0 | owner->thread_entry_frame = promoted; |
809 | 0 | cur_to_promote = NULL; |
810 | 0 | } |
811 | 0 | } |
812 | 0 | else { |
813 | 0 | /* End of caller chain; check if we promoted the entry |
814 | 0 | * frame */ |
815 | 0 | if (cur_to_promote == owner->thread_entry_frame) |
816 | 0 | owner->thread_entry_frame = promoted; |
817 | 0 | cur_to_promote = NULL; |
818 | 0 | } |
819 | 0 | } |
820 | 0 | }); |
821 | 0 | MVM_CHECK_CALLER_CHAIN(tc, new_cur_frame); |
822 | 0 |
|
823 | 0 | /* All is promoted. Update thread's current frame and reset the thread |
824 | 0 | * local callstack. */ |
825 | 0 | owner->cur_frame = new_cur_frame; |
826 | 0 | MVM_callstack_reset(owner); |
827 | 0 |
|
828 | 0 | /* Hand back new location of promoted frame. */ |
829 | 0 | if (!result) |
830 | 0 | MVM_panic(1, "Failed to find frame to promote on foreign thread's call stack"); |
831 | 0 | return result; |
832 | 0 | } |
833 | | |
834 | | /* Creates a frame for de-optimization purposes. */ |
835 | | MVMFrame * MVM_frame_create_for_deopt(MVMThreadContext *tc, MVMStaticFrame *static_frame, |
836 | 24 | MVMCode *code_ref) { |
837 | 24 | MVMFrame *frame; |
838 | 24 | MVMROOT2(tc, static_frame, code_ref, { |
839 | 24 | frame = allocate_heap_frame(tc, static_frame, NULL); |
840 | 24 | }); |
841 | 24 | MVM_ASSIGN_REF(tc, &(frame->header), frame->static_info, static_frame); |
842 | 24 | MVM_ASSIGN_REF(tc, &(frame->header), frame->code_ref, code_ref); |
843 | 24 | MVM_ASSIGN_REF(tc, &(frame->header), frame->outer, code_ref->body.outer); |
844 | 24 | return frame; |
845 | 24 | } |
846 | | |
847 | | /* Removes a single frame, as part of a return or unwind. Done after any exit |
848 | | * handler has already been run. */ |
849 | 17.6M | static MVMuint64 remove_one_frame(MVMThreadContext *tc, MVMuint8 unwind) { |
850 | 17.6M | MVMFrame *returner = tc->cur_frame; |
851 | 17.6M | MVMFrame *caller = returner->caller; |
852 | 17.6M | MVM_ASSERT_NOT_FROMSPACE(tc, caller); |
853 | 17.6M | |
854 | 17.6M | /* Clear up any extra frame data. */ |
855 | 17.6M | if (returner->extra) { |
856 | 456k | MVMFrameExtra *e = returner->extra; |
857 | 456k | if (e->continuation_tags) |
858 | 1 | MVM_continuation_free_tags(tc, returner); |
859 | 456k | MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMFrameExtra), e); |
860 | 456k | returner->extra = NULL; |
861 | 456k | } |
862 | 17.6M | |
863 | 17.6M | /* Clean up frame working space. */ |
864 | 17.6M | if (returner->work) { |
865 | 17.6M | MVM_args_proc_cleanup(tc, &returner->params); |
866 | 17.6M | MVM_fixed_size_free(tc, tc->instance->fsa, returner->allocd_work, |
867 | 17.6M | returner->work); |
868 | 17.6M | } |
869 | 17.6M | |
870 | 17.6M | /* If it's a call stack frame, remove it from the stack. */ |
871 | 17.6M | if (MVM_FRAME_IS_ON_CALLSTACK(tc, returner)) { |
872 | 16.7M | MVMCallStackRegion *stack = tc->stack_current; |
873 | 16.7M | stack->alloc = (char *)returner; |
874 | 16.7M | if ((char *)stack->alloc - sizeof(MVMCallStackRegion) == (char *)stack) |
875 | 3.93M | MVM_callstack_region_prev(tc); |
876 | 16.7M | if (returner->env) |
877 | 760k | MVM_fixed_size_free(tc, tc->instance->fsa, returner->allocd_env, returner->env); |
878 | 16.7M | } |
879 | 17.6M | |
880 | 17.6M | /* Otherwise, NULL out ->work, to indicate the frame is no longer in |
881 | 17.6M | * dynamic scope. This is used by the GC to avoid marking stuff (this is |
882 | 17.6M | * needed for safety as otherwise we'd read freed memory), as well as by |
883 | 17.6M | * exceptions to ensure the target of an exception throw is indeed still |
884 | 17.6M | * in dynamic scope. */ |
885 | 959k | else { |
886 | 959k | returner->work = NULL; |
887 | 959k | } |
888 | 17.6M | |
889 | 17.6M | /* Switch back to the caller frame if there is one. */ |
890 | 17.6M | if (caller && returner != tc->thread_entry_frame) { |
891 | 17.6M | |
892 | 17.6M | if (tc->jit_return_address != NULL) { |
893 | 9.78M | /* on a JIT frame, exit to interpreter afterwards */ |
894 | 9.78M | MVMJitCode *jitcode = returner->spesh_cand->jitcode; |
895 | 9.78M | MVM_jit_code_set_current_position(tc, jitcode, returner, jitcode->exit_label); |
896 | 9.78M | /* given that we might throw in the special-return, act as if we've |
897 | 9.78M | * left the current frame (which is true) */ |
898 | 9.78M | tc->jit_return_address = NULL; |
899 | 9.78M | } |
900 | 17.6M | |
901 | 17.6M | tc->cur_frame = caller; |
902 | 17.6M | tc->current_frame_nr = caller->sequence_nr; |
903 | 17.6M | |
904 | 17.6M | *(tc->interp_cur_op) = caller->return_address; |
905 | 17.6M | *(tc->interp_bytecode_start) = MVM_frame_effective_bytecode(caller); |
906 | 17.6M | *(tc->interp_reg_base) = caller->work; |
907 | 17.6M | *(tc->interp_cu) = caller->static_info->body.cu; |
908 | 17.6M | |
909 | 17.6M | /* Handle any special return hooks. */ |
910 | 17.6M | if (caller->extra) { |
911 | 2.11M | MVMFrameExtra *e = caller->extra; |
912 | 2.11M | if (e->special_return || e->special_unwind) { |
913 | 58.5k | MVMSpecialReturn sr = e->special_return; |
914 | 58.5k | MVMSpecialReturn su = e->special_unwind; |
915 | 58.5k | void *srd = e->special_return_data; |
916 | 58.5k | e->special_return = NULL; |
917 | 58.5k | e->special_unwind = NULL; |
918 | 58.5k | e->special_return_data = NULL; |
919 | 58.5k | e->mark_special_return_data = NULL; |
920 | 58.5k | if (unwind && su) |
921 | 10 | su(tc, srd); |
922 | 58.5k | else if (!unwind && sr) |
923 | 58.5k | sr(tc, srd); |
924 | 58.5k | } |
925 | 2.11M | } |
926 | 17.6M | |
927 | 17.6M | return 1; |
928 | 17.6M | } |
929 | 312 | else { |
930 | 312 | tc->cur_frame = NULL; |
931 | 312 | return 0; |
932 | 312 | } |
933 | 17.6M | } |
934 | | |
935 | | /* Attempt to return from the current frame. Returns non-zero if we can, |
936 | | * and zero if there is nowhere to return to (which would signal the exit |
937 | | * of the interpreter). */ |
938 | 2 | static void remove_after_handler(MVMThreadContext *tc, void *sr_data) { |
939 | 2 | remove_one_frame(tc, 0); |
940 | 2 | } |
941 | 17.6M | MVMuint64 MVM_frame_try_return(MVMThreadContext *tc) { |
942 | 17.6M | MVMFrame *cur_frame = tc->cur_frame; |
943 | 17.6M | |
944 | 17.6M | if (cur_frame->static_info->body.has_exit_handler && |
945 | 2 | !(cur_frame->flags & MVM_FRAME_FLAG_EXIT_HAND_RUN)) { |
946 | 2 | /* Set us up to run exit handler, and make it so we'll really exit the |
947 | 2 | * frame when that has been done. */ |
948 | 2 | MVMFrame *caller = cur_frame->caller; |
949 | 2 | MVMHLLConfig *hll = MVM_hll_current(tc); |
950 | 2 | MVMObject *handler; |
951 | 2 | MVMObject *result; |
952 | 2 | MVMCallsite *two_args_callsite; |
953 | 2 | |
954 | 2 | if (!caller) |
955 | 0 | MVM_exception_throw_adhoc(tc, "Entry point frame cannot have an exit handler"); |
956 | 2 | if (tc->cur_frame == tc->thread_entry_frame) |
957 | 0 | MVM_exception_throw_adhoc(tc, "Thread entry point frame cannot have an exit handler"); |
958 | 2 | |
959 | 2 | if (caller->return_type == MVM_RETURN_OBJ) { |
960 | 2 | result = caller->return_value->o; |
961 | 2 | if (!result) |
962 | 0 | result = tc->instance->VMNull; |
963 | 2 | } |
964 | 0 | else { |
965 | 0 | MVMROOT(tc, cur_frame, { |
966 | 0 | switch (caller->return_type) { |
967 | 0 | case MVM_RETURN_INT: |
968 | 0 | result = MVM_repr_box_int(tc, hll->int_box_type, caller->return_value->i64); |
969 | 0 | break; |
970 | 0 | case MVM_RETURN_NUM: |
971 | 0 | result = MVM_repr_box_num(tc, hll->num_box_type, caller->return_value->n64); |
972 | 0 | break; |
973 | 0 | case MVM_RETURN_STR: |
974 | 0 | result = MVM_repr_box_str(tc, hll->str_box_type, caller->return_value->s); |
975 | 0 | break; |
976 | 0 | default: |
977 | 0 | result = tc->instance->VMNull; |
978 | 0 | } |
979 | 0 | }); |
980 | 0 | } |
981 | 2 | |
982 | 2 | handler = MVM_frame_find_invokee(tc, hll->exit_handler, NULL); |
983 | 2 | two_args_callsite = MVM_callsite_get_common(tc, MVM_CALLSITE_ID_TWO_OBJ); |
984 | 2 | MVM_args_setup_thunk(tc, NULL, MVM_RETURN_VOID, two_args_callsite); |
985 | 2 | cur_frame->args[0].o = cur_frame->code_ref; |
986 | 2 | cur_frame->args[1].o = result; |
987 | 2 | MVM_frame_special_return(tc, cur_frame, remove_after_handler, NULL, NULL, NULL); |
988 | 2 | cur_frame->flags |= MVM_FRAME_FLAG_EXIT_HAND_RUN; |
989 | 2 | STABLE(handler)->invoke(tc, handler, two_args_callsite, cur_frame->args); |
990 | 2 | return 1; |
991 | 2 | } |
992 | 17.6M | else { |
993 | 17.6M | /* No exit handler, so a straight return. */ |
994 | 17.6M | return remove_one_frame(tc, 0); |
995 | 17.6M | } |
996 | 17.6M | } |
997 | | |
998 | | /* Try a return from the current frame; skip running any exit handlers. */ |
999 | 0 | MVMuint64 MVM_frame_try_return_no_exit_handlers(MVMThreadContext *tc) { |
1000 | 0 | return remove_one_frame(tc, 0); |
1001 | 0 | } |
1002 | | |
1003 | | /* Unwinds execution state to the specified frame, placing control flow at either |
1004 | | * an absolute or relative (to start of target frame) address and optionally |
1005 | | * setting a returned result. */ |
1006 | | typedef struct { |
1007 | | MVMFrame *frame; |
1008 | | MVMuint8 *abs_addr; |
1009 | | MVMuint32 rel_addr; |
1010 | | void *jit_return_label; |
1011 | | } MVMUnwindData; |
1012 | 0 | static void mark_unwind_data(MVMThreadContext *tc, MVMFrame *frame, MVMGCWorklist *worklist) { |
1013 | 0 | MVMUnwindData *ud = (MVMUnwindData *)frame->extra->special_return_data; |
1014 | 0 | MVM_gc_worklist_add(tc, worklist, &(ud->frame)); |
1015 | 0 | } |
1016 | 1 | static void continue_unwind(MVMThreadContext *tc, void *sr_data) { |
1017 | 1 | MVMUnwindData *ud = (MVMUnwindData *)sr_data; |
1018 | 1 | MVMFrame *frame = ud->frame; |
1019 | 1 | MVMuint8 *abs_addr = ud->abs_addr; |
1020 | 1 | MVMuint32 rel_addr = ud->rel_addr; |
1021 | 1 | void *jit_return_label = ud->jit_return_label; |
1022 | 1 | MVM_free(sr_data); |
1023 | 1 | MVM_frame_unwind_to(tc, frame, abs_addr, rel_addr, NULL, jit_return_label); |
1024 | 1 | } |
1025 | | void MVM_frame_unwind_to(MVMThreadContext *tc, MVMFrame *frame, MVMuint8 *abs_addr, |
1026 | 464k | MVMuint32 rel_addr, MVMObject *return_value, void *jit_return_label) { |
1027 | 468k | while (tc->cur_frame != frame) { |
1028 | 4.08k | MVMFrame *cur_frame = tc->cur_frame; |
1029 | 4.08k | if (cur_frame->static_info->body.has_exit_handler && |
1030 | 2 | !(cur_frame->flags & MVM_FRAME_FLAG_EXIT_HAND_RUN)) { |
1031 | 1 | /* We're unwinding a frame with an exit handler. Thus we need to |
1032 | 1 | * pause the unwind, run the exit handler, and keep enough info |
1033 | 1 | * around in order to finish up the unwind afterwards. */ |
1034 | 1 | MVMHLLConfig *hll = MVM_hll_current(tc); |
1035 | 1 | MVMFrame *caller; |
1036 | 1 | MVMObject *handler; |
1037 | 1 | MVMCallsite *two_args_callsite; |
1038 | 1 | |
1039 | 1 | /* Force the frame onto the heap, since we'll reference it from the |
1040 | 1 | * unwind data. */ |
1041 | 1 | MVMROOT3(tc, frame, cur_frame, return_value, { |
1042 | 1 | frame = MVM_frame_force_to_heap(tc, frame); |
1043 | 1 | cur_frame = tc->cur_frame; |
1044 | 1 | }); |
1045 | 1 | |
1046 | 1 | caller = cur_frame->caller; |
1047 | 1 | if (!caller) |
1048 | 0 | MVM_exception_throw_adhoc(tc, "Entry point frame cannot have an exit handler"); |
1049 | 1 | if (cur_frame == tc->thread_entry_frame) |
1050 | 0 | MVM_exception_throw_adhoc(tc, "Thread entry point frame cannot have an exit handler"); |
1051 | 1 | |
1052 | 1 | handler = MVM_frame_find_invokee(tc, hll->exit_handler, NULL); |
1053 | 1 | two_args_callsite = MVM_callsite_get_common(tc, MVM_CALLSITE_ID_TWO_OBJ); |
1054 | 1 | MVM_args_setup_thunk(tc, NULL, MVM_RETURN_VOID, two_args_callsite); |
1055 | 1 | cur_frame->args[0].o = cur_frame->code_ref; |
1056 | 1 | cur_frame->args[1].o = tc->instance->VMNull; |
1057 | 1 | { |
1058 | 1 | MVMUnwindData *ud = MVM_malloc(sizeof(MVMUnwindData)); |
1059 | 1 | ud->frame = frame; |
1060 | 1 | ud->abs_addr = abs_addr; |
1061 | 1 | ud->rel_addr = rel_addr; |
1062 | 1 | ud->jit_return_label = jit_return_label; |
1063 | 1 | if (return_value) |
1064 | 0 | MVM_exception_throw_adhoc(tc, "return_value + exit_handler case NYI"); |
1065 | 1 | MVM_frame_special_return(tc, cur_frame, continue_unwind, NULL, ud, |
1066 | 1 | mark_unwind_data); |
1067 | 1 | } |
1068 | 1 | cur_frame->flags |= MVM_FRAME_FLAG_EXIT_HAND_RUN; |
1069 | 1 | STABLE(handler)->invoke(tc, handler, two_args_callsite, cur_frame->args); |
1070 | 1 | return; |
1071 | 1 | } |
1072 | 4.08k | else { |
1073 | 4.08k | /* If we're profiling, log an exit. */ |
1074 | 4.08k | if (tc->instance->profiling) |
1075 | 0 | MVM_profile_log_unwind(tc); |
1076 | 4.08k | |
1077 | 4.08k | /* No exit handler, so just remove the frame. */ |
1078 | 4.08k | if (!remove_one_frame(tc, 1)) |
1079 | 0 | MVM_panic(1, "Internal error: Unwound entire stack and missed handler"); |
1080 | 4.08k | } |
1081 | 4.08k | } |
1082 | 464k | if (abs_addr) |
1083 | 62.1k | *tc->interp_cur_op = abs_addr; |
1084 | 402k | else if (rel_addr) |
1085 | 402k | *tc->interp_cur_op = *tc->interp_bytecode_start + rel_addr; |
1086 | 464k | |
1087 | 464k | if (jit_return_label) { |
1088 | 62.1k | MVM_jit_code_set_current_position(tc, tc->cur_frame->spesh_cand->jitcode, tc->cur_frame, jit_return_label); |
1089 | 62.1k | } |
1090 | 464k | |
1091 | 464k | if (return_value) |
1092 | 0 | MVM_args_set_result_obj(tc, return_value, 1); |
1093 | 464k | } |
1094 | | |
1095 | | /* Gets a code object for a frame, lazily deserializing it if needed. */ |
1096 | 916k | MVMObject * MVM_frame_get_code_object(MVMThreadContext *tc, MVMCode *code) { |
1097 | 916k | if (MVM_UNLIKELY(REPR(code)->ID != MVM_REPR_ID_MVMCode)) |
1098 | 0 | MVM_exception_throw_adhoc(tc, "getcodeobj needs a code ref"); |
1099 | 916k | |
1100 | 916k | if (!code->body.code_object) { |
1101 | 41.9k | MVMStaticFrame *sf = code->body.sf; |
1102 | 41.9k | if (sf->body.code_obj_sc_dep_idx > 0) { |
1103 | 41.9k | MVMObject *resolved; |
1104 | 41.9k | MVMSerializationContext *sc = MVM_sc_get_sc(tc, sf->body.cu, |
1105 | 41.9k | sf->body.code_obj_sc_dep_idx - 1); |
1106 | 41.9k | if (MVM_UNLIKELY(sc == NULL)) |
1107 | 0 | MVM_exception_throw_adhoc(tc, |
1108 | 0 | "SC not yet resolved; lookup failed"); |
1109 | 41.9k | |
1110 | 41.9k | MVMROOT(tc, code, { |
1111 | 41.9k | resolved = MVM_sc_get_object(tc, sc, sf->body.code_obj_sc_idx); |
1112 | 41.9k | }); |
1113 | 41.9k | |
1114 | 41.9k | MVM_ASSIGN_REF(tc, &(code->common.header), code->body.code_object, |
1115 | 41.9k | resolved); |
1116 | 41.9k | } |
1117 | 41.9k | } |
1118 | 916k | return code->body.code_object; |
1119 | 916k | } |
1120 | | |
1121 | | /* Given the specified code object, sets its outer to the current scope. */ |
1122 | 215k | void MVM_frame_capturelex(MVMThreadContext *tc, MVMObject *code) { |
1123 | 215k | MVMCode *code_obj = (MVMCode *)code; |
1124 | 215k | MVMFrame *captured; |
1125 | 215k | if (MVM_UNLIKELY(REPR(code)->ID != MVM_REPR_ID_MVMCode)) |
1126 | 0 | MVM_exception_throw_adhoc(tc, |
1127 | 0 | "Can only perform capturelex on object with representation MVMCode"); |
1128 | 215k | MVMROOT(tc, code, { |
1129 | 215k | captured = MVM_frame_force_to_heap(tc, tc->cur_frame); |
1130 | 215k | }); |
1131 | 215k | MVM_ASSIGN_REF(tc, &(code->header), code_obj->body.outer, captured); |
1132 | 215k | } |
1133 | | |
1134 | | /* This is used for situations in Perl 6 like: |
1135 | | * supply { |
1136 | | * my $x = something(); |
1137 | | * whenever $supply { |
1138 | | * QUIT { $x.foo() } |
1139 | | * } |
1140 | | * } |
1141 | | * Here, the QUIT may be called without an invocation of the whenever ever |
1142 | | * having taken place. At the point we closure-clone the whenever block, we |
1143 | | * will capture_inner the QUIT phaser. This creates a fake outer for the |
1144 | | * QUIT, but makes *its* outer point to the nearest instance of the relevant |
1145 | | * static frame on the call stack, so that the QUIT will disocver the correct |
1146 | | * $x. |
1147 | | */ |
1148 | 0 | void MVM_frame_capture_inner(MVMThreadContext *tc, MVMObject *code) { |
1149 | 0 | MVMCode *code_obj = (MVMCode *)code; |
1150 | 0 | MVMFrame *outer; |
1151 | 0 | MVMROOT(tc, code, { |
1152 | 0 | MVMStaticFrame *sf_outer = code_obj->body.sf->body.outer; |
1153 | 0 | MVMROOT(tc, sf_outer, { |
1154 | 0 | outer = create_context_only(tc, sf_outer, (MVMObject *)sf_outer->body.static_code, 1); |
1155 | 0 | }); |
1156 | 0 | MVMROOT(tc, outer, { |
1157 | 0 | MVMFrame *outer_outer = autoclose(tc, sf_outer->body.outer); |
1158 | 0 | MVM_ASSIGN_REF(tc, &(outer->header), outer->outer, outer_outer); |
1159 | 0 | }); |
1160 | 0 | }); |
1161 | 0 | MVM_ASSIGN_REF(tc, &(code->header), code_obj->body.outer, outer); |
1162 | 0 | } |
1163 | | |
1164 | | /* Given the specified code object, copies it and returns a copy which |
1165 | | * captures a closure over the current scope. */ |
1166 | 424k | MVMObject * MVM_frame_takeclosure(MVMThreadContext *tc, MVMObject *code) { |
1167 | 424k | MVMCode *closure; |
1168 | 424k | MVMFrame *captured; |
1169 | 424k | |
1170 | 424k | if (MVM_UNLIKELY(REPR(code)->ID != MVM_REPR_ID_MVMCode)) |
1171 | 0 | MVM_exception_throw_adhoc(tc, |
1172 | 0 | "Can only perform takeclosure on object with representation MVMCode"); |
1173 | 424k | |
1174 | 424k | MVMROOT(tc, code, { |
1175 | 424k | closure = (MVMCode *)REPR(code)->allocate(tc, STABLE(code)); |
1176 | 424k | MVMROOT(tc, closure, { |
1177 | 424k | captured = MVM_frame_force_to_heap(tc, tc->cur_frame); |
1178 | 424k | }); |
1179 | 424k | }); |
1180 | 424k | |
1181 | 424k | MVM_ASSIGN_REF(tc, &(closure->common.header), closure->body.sf, ((MVMCode *)code)->body.sf); |
1182 | 424k | MVM_ASSIGN_REF(tc, &(closure->common.header), closure->body.name, ((MVMCode *)code)->body.name); |
1183 | 424k | MVM_ASSIGN_REF(tc, &(closure->common.header), closure->body.outer, captured); |
1184 | 424k | |
1185 | 424k | MVM_ASSIGN_REF(tc, &(closure->common.header), closure->body.code_object, |
1186 | 424k | ((MVMCode *)code)->body.code_object); |
1187 | 424k | |
1188 | 424k | return (MVMObject *)closure; |
1189 | 424k | } |
1190 | | |
1191 | | /* Vivifies a lexical in a frame. */ |
1192 | 6.81k | MVMObject * MVM_frame_vivify_lexical(MVMThreadContext *tc, MVMFrame *f, MVMuint16 idx) { |
1193 | 6.81k | MVMuint8 *flags; |
1194 | 6.81k | MVMint16 flag; |
1195 | 6.81k | MVMRegister *static_env; |
1196 | 6.81k | MVMuint16 effective_idx = 0; |
1197 | 6.81k | MVMStaticFrame *effective_sf; |
1198 | 6.81k | if (idx < f->static_info->body.num_lexicals) { |
1199 | 6.81k | flags = f->static_info->body.static_env_flags; |
1200 | 6.81k | static_env = f->static_info->body.static_env; |
1201 | 6.81k | effective_idx = idx; |
1202 | 6.81k | effective_sf = f->static_info; |
1203 | 6.81k | } |
1204 | 0 | else if (f->spesh_cand) { |
1205 | 0 | MVMint32 i; |
1206 | 0 | flags = NULL; |
1207 | 0 | for (i = 0; i < f->spesh_cand->num_inlines; i++) { |
1208 | 0 | MVMStaticFrame *isf = f->spesh_cand->inlines[i].sf; |
1209 | 0 | effective_idx = idx - f->spesh_cand->inlines[i].lexicals_start; |
1210 | 0 | if (effective_idx < isf->body.num_lexicals) { |
1211 | 0 | flags = isf->body.static_env_flags; |
1212 | 0 | static_env = isf->body.static_env; |
1213 | 0 | effective_sf = isf; |
1214 | 0 | break; |
1215 | 0 | } |
1216 | 0 | } |
1217 | 0 | } |
1218 | 0 | else { |
1219 | 0 | flags = NULL; |
1220 | 0 | } |
1221 | 6.81k | flag = flags ? flags[effective_idx] : -1; |
1222 | 6.81k | if (flag != -1 && static_env[effective_idx].o == NULL) { |
1223 | 6.80k | MVMint32 scid, objid; |
1224 | 6.80k | if (MVM_bytecode_find_static_lexical_scref(tc, effective_sf->body.cu, |
1225 | 6.66k | effective_sf, effective_idx, &scid, &objid)) { |
1226 | 6.66k | MVMSerializationContext *sc = MVM_sc_get_sc(tc, effective_sf->body.cu, scid); |
1227 | 6.66k | MVMObject *resolved; |
1228 | 6.66k | if (sc == NULL) |
1229 | 0 | MVM_exception_throw_adhoc(tc, |
1230 | 0 | "SC not yet resolved; lookup failed"); |
1231 | 6.66k | MVMROOT(tc, f, { |
1232 | 6.66k | resolved = MVM_sc_get_object(tc, sc, objid); |
1233 | 6.66k | }); |
1234 | 6.66k | MVM_ASSIGN_REF(tc, &(effective_sf->common.header), |
1235 | 6.66k | effective_sf->body.static_env[effective_idx].o, |
1236 | 6.66k | resolved); |
1237 | 6.66k | } |
1238 | 6.80k | } |
1239 | 6.81k | if (flag == 0) { |
1240 | 6.80k | MVMObject *viv = static_env[effective_idx].o; |
1241 | 6.80k | if (!viv) |
1242 | 145 | viv = tc->instance->VMNull; |
1243 | 6.80k | MVM_ASSIGN_REF(tc, &(f->header), f->env[idx].o, viv); |
1244 | 6.80k | return viv; |
1245 | 6.80k | } |
1246 | 4 | else if (flag == 1) { |
1247 | 4 | MVMObject *viv; |
1248 | 4 | MVMROOT(tc, f, { |
1249 | 4 | viv = MVM_repr_clone(tc, static_env[effective_idx].o); |
1250 | 4 | MVM_ASSIGN_REF(tc, &(f->header), f->env[idx].o, viv); |
1251 | 4 | }); |
1252 | 4 | return viv; |
1253 | 4 | } |
1254 | 0 | else { |
1255 | 0 | return tc->instance->VMNull; |
1256 | 0 | } |
1257 | 6.81k | } |
1258 | | |
1259 | | /* Looks up the address of the lexical with the specified name and the |
1260 | | * specified type. Non-existing object lexicals produce NULL, expected |
1261 | | * (for better or worse) by various things. Otherwise, an error is thrown |
1262 | | * if it does not exist. Incorrect type always throws. */ |
1263 | 4.35M | MVMRegister * MVM_frame_find_lexical_by_name(MVMThreadContext *tc, MVMString *name, MVMuint16 type) { |
1264 | 4.35M | MVMFrame *cur_frame = tc->cur_frame; |
1265 | 8.83M | while (cur_frame != NULL) { |
1266 | 8.83M | MVMLexicalRegistry *lexical_names = cur_frame->static_info->body.lexical_names; |
1267 | 8.83M | if (lexical_names) { |
1268 | 4.64M | /* Indexes were formerly stored off-by-one to avoid semi-predicate issue. */ |
1269 | 4.64M | MVMLexicalRegistry *entry; |
1270 | 4.64M | MVM_HASH_GET(tc, lexical_names, name, entry) |
1271 | 4.64M | if (entry) { |
1272 | 4.35M | if (MVM_LIKELY(cur_frame->static_info->body.lexical_types[entry->value] == type)) { |
1273 | 4.35M | MVMRegister *result = &cur_frame->env[entry->value]; |
1274 | 4.35M | if (type == MVM_reg_obj && !result->o) |
1275 | 145 | MVM_frame_vivify_lexical(tc, cur_frame, entry->value); |
1276 | 4.35M | return result; |
1277 | 4.35M | } |
1278 | 0 | else { |
1279 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1280 | 0 | char *waste[] = { c_name, NULL }; |
1281 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
1282 | 0 | "Lexical with name '%s' has wrong type", |
1283 | 0 | c_name); |
1284 | 0 | } |
1285 | 4.35M | } |
1286 | 4.64M | } |
1287 | 4.48M | cur_frame = cur_frame->outer; |
1288 | 4.48M | } |
1289 | 0 | if (MVM_UNLIKELY(type != MVM_reg_obj)) { |
1290 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1291 | 0 | char *waste[] = { c_name, NULL }; |
1292 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "No lexical found with name '%s'", |
1293 | 0 | c_name); |
1294 | 0 | } |
1295 | 0 | return NULL; |
1296 | 4.35M | } |
1297 | | |
1298 | | /* Binds the specified value to the given lexical, finding it along the static |
1299 | | * chain. */ |
1300 | 0 | MVM_PUBLIC void MVM_frame_bind_lexical_by_name(MVMThreadContext *tc, MVMString *name, MVMuint16 type, MVMRegister *value) { |
1301 | 0 | MVMFrame *cur_frame = tc->cur_frame; |
1302 | 0 | while (cur_frame != NULL) { |
1303 | 0 | MVMLexicalRegistry *lexical_names = cur_frame->static_info->body.lexical_names; |
1304 | 0 | if (lexical_names) { |
1305 | 0 | MVMLexicalRegistry *entry; |
1306 | 0 | MVM_HASH_GET(tc, lexical_names, name, entry) |
1307 | 0 | if (entry) { |
1308 | 0 | if (cur_frame->static_info->body.lexical_types[entry->value] == type) { |
1309 | 0 | if (type == MVM_reg_obj || type == MVM_reg_str) { |
1310 | 0 | MVM_ASSIGN_REF(tc, &(cur_frame->header), |
1311 | 0 | cur_frame->env[entry->value].o, value->o); |
1312 | 0 | } |
1313 | 0 | else { |
1314 | 0 | cur_frame->env[entry->value] = *value; |
1315 | 0 | } |
1316 | 0 | return; |
1317 | 0 | } |
1318 | 0 | else { |
1319 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1320 | 0 | char *waste[] = { c_name, NULL }; |
1321 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
1322 | 0 | "Lexical with name '%s' has wrong type", |
1323 | 0 | c_name); |
1324 | 0 | } |
1325 | 0 | } |
1326 | 0 | } |
1327 | 0 | cur_frame = cur_frame->outer; |
1328 | 0 | } |
1329 | 0 | { |
1330 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1331 | 0 | char *waste[] = { c_name, NULL }; |
1332 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "No lexical found with name '%s'", |
1333 | 0 | c_name); |
1334 | 0 | } |
1335 | 0 | } |
1336 | | |
1337 | | /* Finds a lexical in the outer frame, throwing if it's not there. */ |
1338 | 4 | MVMObject * MVM_frame_find_lexical_by_name_outer(MVMThreadContext *tc, MVMString *name) { |
1339 | 4 | MVMRegister *r = MVM_frame_find_lexical_by_name_rel(tc, name, tc->cur_frame->outer); |
1340 | 4 | if (MVM_LIKELY(r != NULL)) |
1341 | 4 | return r->o; |
1342 | 0 | else { |
1343 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1344 | 0 | char *waste[] = { c_name, NULL }; |
1345 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "No lexical found with name '%s'", |
1346 | 0 | c_name); |
1347 | 0 | } |
1348 | 4 | } |
1349 | | |
1350 | | /* Looks up the address of the lexical with the specified name, starting with |
1351 | | * the specified frame. Only works if it's an object lexical. */ |
1352 | 6 | MVMRegister * MVM_frame_find_lexical_by_name_rel(MVMThreadContext *tc, MVMString *name, MVMFrame *cur_frame) { |
1353 | 11 | while (cur_frame != NULL) { |
1354 | 10 | MVMLexicalRegistry *lexical_names = cur_frame->static_info->body.lexical_names; |
1355 | 10 | if (lexical_names) { |
1356 | 10 | /* Indexes were formerly stored off-by-one to avoid semi-predicate issue. */ |
1357 | 10 | MVMLexicalRegistry *entry; |
1358 | 10 | MVM_HASH_GET(tc, lexical_names, name, entry) |
1359 | 10 | if (entry) { |
1360 | 5 | if (cur_frame->static_info->body.lexical_types[entry->value] == MVM_reg_obj) { |
1361 | 5 | MVMRegister *result = &cur_frame->env[entry->value]; |
1362 | 5 | if (!result->o) |
1363 | 0 | MVM_frame_vivify_lexical(tc, cur_frame, entry->value); |
1364 | 5 | return result; |
1365 | 5 | } |
1366 | 0 | else { |
1367 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1368 | 0 | char *waste[] = { c_name, NULL }; |
1369 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
1370 | 0 | "Lexical with name '%s' has wrong type", |
1371 | 0 | c_name); |
1372 | 0 | } |
1373 | 5 | } |
1374 | 10 | } |
1375 | 5 | cur_frame = cur_frame->outer; |
1376 | 5 | } |
1377 | 1 | return NULL; |
1378 | 6 | } |
1379 | | |
1380 | | /* Looks up the address of the lexical with the specified name, starting with |
1381 | | * the specified frame. It checks all outer frames of the caller frame chain. */ |
1382 | 10 | MVMRegister * MVM_frame_find_lexical_by_name_rel_caller(MVMThreadContext *tc, MVMString *name, MVMFrame *cur_caller_frame) { |
1383 | 37 | while (cur_caller_frame != NULL) { |
1384 | 35 | MVMFrame *cur_frame = cur_caller_frame; |
1385 | 124 | while (cur_frame != NULL) { |
1386 | 97 | MVMLexicalRegistry *lexical_names = cur_frame->static_info->body.lexical_names; |
1387 | 97 | if (lexical_names) { |
1388 | 86 | /* Indexes were formerly stored off-by-one to avoid semi-predicate issue. */ |
1389 | 86 | MVMLexicalRegistry *entry; |
1390 | 86 | MVM_HASH_GET(tc, lexical_names, name, entry) |
1391 | 86 | if (entry) { |
1392 | 8 | if (cur_frame->static_info->body.lexical_types[entry->value] == MVM_reg_obj) { |
1393 | 8 | MVMRegister *result = &cur_frame->env[entry->value]; |
1394 | 8 | if (!result->o) |
1395 | 0 | MVM_frame_vivify_lexical(tc, cur_frame, entry->value); |
1396 | 8 | return result; |
1397 | 8 | } |
1398 | 0 | else { |
1399 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1400 | 0 | char *waste[] = { c_name, NULL }; |
1401 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
1402 | 0 | "Lexical with name '%s' has wrong type", |
1403 | 0 | c_name); |
1404 | 0 | } |
1405 | 8 | } |
1406 | 86 | } |
1407 | 89 | cur_frame = cur_frame->outer; |
1408 | 89 | } |
1409 | 27 | cur_caller_frame = cur_caller_frame->caller; |
1410 | 27 | } |
1411 | 2 | return NULL; |
1412 | 10 | } |
1413 | | |
1414 | | /* Looks up the address of the lexical with the specified name and the |
1415 | | * specified type. Returns null if it does not exist. */ |
1416 | 255k | static void try_cache_dynlex(MVMThreadContext *tc, MVMFrame *from, MVMFrame *to, MVMString *name, MVMRegister *reg, MVMuint16 type, MVMuint32 fcost, MVMuint32 icost) { |
1417 | 255k | #if MVM_DYNLEX_CACHE_ENABLED |
1418 | 255k | MVMint32 next = 0; |
1419 | 255k | MVMint32 frames = 0; |
1420 | 255k | MVMuint32 desperation = 0; |
1421 | 255k | |
1422 | 255k | if (fcost+icost > 20) |
1423 | 17.6k | desperation = 1; |
1424 | 255k | |
1425 | 1.00M | while (from && from != to) { |
1426 | 910k | frames++; |
1427 | 910k | if (frames >= next) { |
1428 | 590k | if (!from->extra || !from->extra->dynlex_cache_name || (desperation && frames > 1)) { |
1429 | 418k | MVMFrameExtra *e = MVM_frame_extra(tc, from); |
1430 | 418k | MVM_ASSIGN_REF(tc, &(from->header), e->dynlex_cache_name, name); |
1431 | 418k | e->dynlex_cache_reg = reg; |
1432 | 418k | e->dynlex_cache_type = type; |
1433 | 418k | if (desperation && next == 3) { |
1434 | 17.6k | next = fcost / 2; |
1435 | 17.6k | } |
1436 | 400k | else { |
1437 | 400k | if (next) |
1438 | 156k | return; |
1439 | 244k | next = 3; |
1440 | 244k | } |
1441 | 418k | } |
1442 | 590k | } |
1443 | 754k | from = from->caller; |
1444 | 754k | } |
1445 | 255k | #endif |
1446 | 255k | } |
1447 | 873k | MVMRegister * MVM_frame_find_contextual_by_name(MVMThreadContext *tc, MVMString *name, MVMuint16 *type, MVMFrame *cur_frame, MVMint32 vivify, MVMFrame **found_frame) { |
1448 | 873k | FILE *dlog = tc->instance->dynvar_log_fh; |
1449 | 873k | MVMuint32 fcost = 0; /* frames traversed */ |
1450 | 873k | MVMuint32 icost = 0; /* inlines traversed */ |
1451 | 873k | MVMuint32 ecost = 0; /* frames traversed with empty cache */ |
1452 | 873k | MVMuint32 xcost = 0; /* frames traversed with wrong name */ |
1453 | 873k | char *c_name; |
1454 | 873k | MVMuint64 start_time; |
1455 | 873k | MVMuint64 last_time; |
1456 | 873k | |
1457 | 873k | MVMFrame *initial_frame = cur_frame; |
1458 | 873k | if (MVM_UNLIKELY(!name)) |
1459 | 0 | MVM_exception_throw_adhoc(tc, "Contextual name cannot be null"); |
1460 | 873k | if (dlog) { |
1461 | 0 | c_name = MVM_string_utf8_encode_C_string(tc, name); |
1462 | 0 | start_time = uv_hrtime(); |
1463 | 0 | last_time = tc->instance->dynvar_log_lasttime; |
1464 | 0 | } |
1465 | 873k | |
1466 | 4.24M | while (cur_frame != NULL) { |
1467 | 4.20M | MVMLexicalRegistry *lexical_names; |
1468 | 4.20M | MVMSpeshCandidate *cand = cur_frame->spesh_cand; |
1469 | 4.20M | MVMFrameExtra *e; |
1470 | 4.20M | /* See if we are inside an inline. Note that this isn't actually |
1471 | 4.20M | * correct for a leaf frame, but those aren't inlined and don't |
1472 | 4.20M | * use getdynlex for their own lexicals since the compiler already |
1473 | 4.20M | * knows where to find them */ |
1474 | 4.20M | if (cand && cand->num_inlines) { |
1475 | 917k | if (cand->jitcode) { |
1476 | 903k | MVMJitCode *jitcode = cand->jitcode; |
1477 | 903k | void * current_position = MVM_jit_code_get_current_position(tc, jitcode, cur_frame); |
1478 | 903k | MVMint32 i; |
1479 | 903k | |
1480 | 903k | for (i = MVM_jit_code_get_active_inlines(tc, jitcode, current_position, 0); |
1481 | 1.06M | i < jitcode->num_inlines; |
1482 | 157k | i = MVM_jit_code_get_active_inlines(tc, jitcode, current_position, i+1)) { |
1483 | 157k | MVMStaticFrame *isf = cand->inlines[i].sf; |
1484 | 157k | icost++; |
1485 | 157k | if ((lexical_names = isf->body.lexical_names)) { |
1486 | 8.47k | MVMLexicalRegistry *entry; |
1487 | 8.47k | MVM_HASH_GET(tc, lexical_names, name, entry); |
1488 | 8.47k | if (entry) { |
1489 | 126 | MVMuint16 lexidx = cand->inlines[i].lexicals_start + entry->value; |
1490 | 126 | MVMRegister *result = &cur_frame->env[lexidx]; |
1491 | 126 | *type = cand->lexical_types[lexidx]; |
1492 | 126 | if (vivify && *type == MVM_reg_obj && !result->o) { |
1493 | 0 | MVMROOT3(tc, cur_frame, initial_frame, name, { |
1494 | 0 | MVM_frame_vivify_lexical(tc, cur_frame, lexidx); |
1495 | 0 | }); |
1496 | 0 | } |
1497 | 126 | if (fcost+icost > 1) |
1498 | 126 | try_cache_dynlex(tc, initial_frame, cur_frame, name, result, *type, fcost, icost); |
1499 | 126 | if (dlog) { |
1500 | 0 | fprintf(dlog, "I %s %d %d %d %d %"PRIu64" %"PRIu64" %"PRIu64"\n", c_name, fcost, icost, ecost, xcost, last_time, start_time, uv_hrtime()); |
1501 | 0 | fflush(dlog); |
1502 | 0 | MVM_free(c_name); |
1503 | 0 | tc->instance->dynvar_log_lasttime = uv_hrtime(); |
1504 | 0 | } |
1505 | 126 | *found_frame = cur_frame; |
1506 | 126 | return result; |
1507 | 126 | } |
1508 | 8.47k | } |
1509 | 157k | } |
1510 | 14.0k | } else { |
1511 | 14.0k | MVMint32 ret_offset = cur_frame->return_address - |
1512 | 14.0k | MVM_frame_effective_bytecode(cur_frame); |
1513 | 14.0k | MVMint32 i; |
1514 | 50.8k | for (i = 0; i < cand->num_inlines; i++) { |
1515 | 38.2k | icost++; |
1516 | 38.2k | if (ret_offset >= cand->inlines[i].start && ret_offset <= cand->inlines[i].end) { |
1517 | 10.4k | MVMStaticFrame *isf = cand->inlines[i].sf; |
1518 | 10.4k | if ((lexical_names = isf->body.lexical_names)) { |
1519 | 7.51k | MVMLexicalRegistry *entry; |
1520 | 7.51k | MVM_HASH_GET(tc, lexical_names, name, entry); |
1521 | 7.51k | if (entry) { |
1522 | 1.49k | MVMuint16 lexidx = cand->inlines[i].lexicals_start + entry->value; |
1523 | 1.49k | MVMRegister *result = &cur_frame->env[lexidx]; |
1524 | 1.49k | *type = cand->lexical_types[lexidx]; |
1525 | 1.49k | if (vivify && *type == MVM_reg_obj && !result->o) { |
1526 | 0 | MVMROOT3(tc, cur_frame, initial_frame, name, { |
1527 | 0 | MVM_frame_vivify_lexical(tc, cur_frame, lexidx); |
1528 | 0 | }); |
1529 | 0 | } |
1530 | 1.49k | if (fcost+icost > 1) |
1531 | 1.49k | try_cache_dynlex(tc, initial_frame, cur_frame, name, result, *type, fcost, icost); |
1532 | 1.49k | if (dlog) { |
1533 | 0 | fprintf(dlog, "I %s %d %d %d %d %"PRIu64" %"PRIu64" %"PRIu64"\n", c_name, fcost, icost, ecost, xcost, last_time, start_time, uv_hrtime()); |
1534 | 0 | fflush(dlog); |
1535 | 0 | MVM_free(c_name); |
1536 | 0 | tc->instance->dynvar_log_lasttime = uv_hrtime(); |
1537 | 0 | } |
1538 | 1.49k | *found_frame = cur_frame; |
1539 | 1.49k | return result; |
1540 | 1.49k | } |
1541 | 7.51k | } |
1542 | 10.4k | } |
1543 | 38.2k | } |
1544 | 14.0k | } |
1545 | 917k | } |
1546 | 4.20M | |
1547 | 4.20M | /* See if we've got it cached at this level. */ |
1548 | 4.20M | e = cur_frame->extra; |
1549 | 4.20M | if (e && e->dynlex_cache_name) { |
1550 | 1.63M | if (MVM_string_equal(tc, name, e->dynlex_cache_name)) { |
1551 | 596k | MVMRegister *result = e->dynlex_cache_reg; |
1552 | 596k | *type = e->dynlex_cache_type; |
1553 | 596k | if (fcost+icost > 5) |
1554 | 88.6k | try_cache_dynlex(tc, initial_frame, cur_frame, name, result, *type, fcost, icost); |
1555 | 596k | if (dlog) { |
1556 | 0 | fprintf(dlog, "C %s %d %d %d %d %"PRIu64" %"PRIu64" %"PRIu64"\n", c_name, fcost, icost, ecost, xcost, last_time, start_time, uv_hrtime()); |
1557 | 0 | fflush(dlog); |
1558 | 0 | MVM_free(c_name); |
1559 | 0 | tc->instance->dynvar_log_lasttime = uv_hrtime(); |
1560 | 0 | } |
1561 | 596k | *found_frame = cur_frame; |
1562 | 596k | return result; |
1563 | 596k | } |
1564 | 1.63M | else |
1565 | 1.03M | xcost++; |
1566 | 1.63M | } |
1567 | 4.20M | else |
1568 | 2.57M | ecost++; |
1569 | 4.20M | |
1570 | 4.20M | /* Now look in the frame itself. */ |
1571 | 3.61M | if ((lexical_names = cur_frame->static_info->body.lexical_names)) { |
1572 | 1.92M | MVMLexicalRegistry *entry; |
1573 | 1.92M | MVM_HASH_GET(tc, lexical_names, name, entry) |
1574 | 1.92M | if (entry) { |
1575 | 244k | MVMRegister *result = &cur_frame->env[entry->value]; |
1576 | 244k | *type = cur_frame->static_info->body.lexical_types[entry->value]; |
1577 | 244k | if (vivify && *type == MVM_reg_obj && !result->o) { |
1578 | 0 | MVMROOT3(tc, cur_frame, initial_frame, name, { |
1579 | 0 | MVM_frame_vivify_lexical(tc, cur_frame, entry->value); |
1580 | 0 | }); |
1581 | 0 | } |
1582 | 244k | if (dlog) { |
1583 | 0 | fprintf(dlog, "F %s %d %d %d %d %"PRIu64" %"PRIu64" %"PRIu64"\n", c_name, fcost, icost, ecost, xcost, last_time, start_time, uv_hrtime()); |
1584 | 0 | fflush(dlog); |
1585 | 0 | MVM_free(c_name); |
1586 | 0 | tc->instance->dynvar_log_lasttime = uv_hrtime(); |
1587 | 0 | } |
1588 | 244k | if (fcost+icost > 1) |
1589 | 164k | try_cache_dynlex(tc, initial_frame, cur_frame, name, result, *type, fcost, icost); |
1590 | 244k | *found_frame = cur_frame; |
1591 | 244k | return result; |
1592 | 244k | } |
1593 | 1.92M | } |
1594 | 3.36M | fcost++; |
1595 | 3.36M | cur_frame = cur_frame->caller; |
1596 | 3.36M | } |
1597 | 30.8k | if (dlog) { |
1598 | 0 | fprintf(dlog, "N %s %d %d %d %d %"PRIu64" %"PRIu64" %"PRIu64"\n", c_name, fcost, icost, ecost, xcost, last_time, start_time, uv_hrtime()); |
1599 | 0 | fflush(dlog); |
1600 | 0 | MVM_free(c_name); |
1601 | 0 | tc->instance->dynvar_log_lasttime = uv_hrtime(); |
1602 | 0 | } |
1603 | 30.8k | *found_frame = NULL; |
1604 | 30.8k | return NULL; |
1605 | 873k | } |
1606 | | |
1607 | 863k | MVMObject * MVM_frame_getdynlex(MVMThreadContext *tc, MVMString *name, MVMFrame *cur_frame) { |
1608 | 863k | MVMuint16 type; |
1609 | 863k | MVMFrame *found_frame; |
1610 | 863k | MVMRegister *lex_reg = MVM_frame_find_contextual_by_name(tc, name, &type, cur_frame, 1, &found_frame); |
1611 | 863k | MVMObject *result = NULL, *result_type = NULL; |
1612 | 863k | if (lex_reg) { |
1613 | 832k | switch (MVM_EXPECT(type, MVM_reg_obj)) { |
1614 | 0 | case MVM_reg_int64: |
1615 | 0 | result_type = (*tc->interp_cu)->body.hll_config->int_box_type; |
1616 | 0 | if (!result_type) |
1617 | 0 | MVM_exception_throw_adhoc(tc, "missing int box type"); |
1618 | 0 | result = REPR(result_type)->allocate(tc, STABLE(result_type)); |
1619 | 0 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&result); |
1620 | 0 | if (REPR(result)->initialize) |
1621 | 0 | REPR(result)->initialize(tc, STABLE(result), result, OBJECT_BODY(result)); |
1622 | 0 | REPR(result)->box_funcs.set_int(tc, STABLE(result), result, |
1623 | 0 | OBJECT_BODY(result), lex_reg->i64); |
1624 | 0 | MVM_gc_root_temp_pop(tc); |
1625 | 0 | break; |
1626 | 0 | case MVM_reg_num64: |
1627 | 0 | result_type = (*tc->interp_cu)->body.hll_config->num_box_type; |
1628 | 0 | if (!result_type) |
1629 | 0 | MVM_exception_throw_adhoc(tc, "missing num box type"); |
1630 | 0 | result = REPR(result_type)->allocate(tc, STABLE(result_type)); |
1631 | 0 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&result); |
1632 | 0 | if (REPR(result)->initialize) |
1633 | 0 | REPR(result)->initialize(tc, STABLE(result), result, OBJECT_BODY(result)); |
1634 | 0 | REPR(result)->box_funcs.set_num(tc, STABLE(result), result, |
1635 | 0 | OBJECT_BODY(result), lex_reg->n64); |
1636 | 0 | MVM_gc_root_temp_pop(tc); |
1637 | 0 | break; |
1638 | 0 | case MVM_reg_str: |
1639 | 0 | result_type = (*tc->interp_cu)->body.hll_config->str_box_type; |
1640 | 0 | if (!result_type) |
1641 | 0 | MVM_exception_throw_adhoc(tc, "missing str box type"); |
1642 | 0 | result = REPR(result_type)->allocate(tc, STABLE(result_type)); |
1643 | 0 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&result); |
1644 | 0 | if (REPR(result)->initialize) |
1645 | 0 | REPR(result)->initialize(tc, STABLE(result), result, OBJECT_BODY(result)); |
1646 | 0 | REPR(result)->box_funcs.set_str(tc, STABLE(result), result, |
1647 | 0 | OBJECT_BODY(result), lex_reg->s); |
1648 | 0 | MVM_gc_root_temp_pop(tc); |
1649 | 0 | break; |
1650 | 832k | case MVM_reg_obj: |
1651 | 832k | result = lex_reg->o; |
1652 | 832k | break; |
1653 | 0 | default: |
1654 | 0 | MVM_exception_throw_adhoc(tc, "invalid register type in getdynlex: %d", type); |
1655 | 832k | } |
1656 | 832k | } |
1657 | 863k | return result ? result : tc->instance->VMNull; |
1658 | 863k | } |
1659 | | |
1660 | 9.65k | void MVM_frame_binddynlex(MVMThreadContext *tc, MVMString *name, MVMObject *value, MVMFrame *cur_frame) { |
1661 | 9.65k | MVMuint16 type; |
1662 | 9.65k | MVMFrame *found_frame; |
1663 | 9.65k | MVMRegister *lex_reg = MVM_frame_find_contextual_by_name(tc, name, &type, cur_frame, 0, &found_frame); |
1664 | 9.65k | if (!lex_reg) { |
1665 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1666 | 0 | char *waste[] = { c_name, NULL }; |
1667 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "No contextual found with name '%s'", |
1668 | 0 | c_name); |
1669 | 0 | } |
1670 | 9.65k | switch (type) { |
1671 | 0 | case MVM_reg_int64: |
1672 | 0 | lex_reg->i64 = REPR(value)->box_funcs.get_int(tc, |
1673 | 0 | STABLE(value), value, OBJECT_BODY(value)); |
1674 | 0 | break; |
1675 | 0 | case MVM_reg_num64: |
1676 | 0 | lex_reg->n64 = REPR(value)->box_funcs.get_num(tc, |
1677 | 0 | STABLE(value), value, OBJECT_BODY(value)); |
1678 | 0 | break; |
1679 | 0 | case MVM_reg_str: |
1680 | 0 | MVM_ASSIGN_REF(tc, &(found_frame->header), lex_reg->s, |
1681 | 0 | REPR(value)->box_funcs.get_str(tc, STABLE(value), value, OBJECT_BODY(value))); |
1682 | 0 | break; |
1683 | 9.65k | case MVM_reg_obj: |
1684 | 9.65k | MVM_ASSIGN_REF(tc, &(found_frame->header), lex_reg->o, value); |
1685 | 9.65k | break; |
1686 | 0 | default: |
1687 | 0 | MVM_exception_throw_adhoc(tc, "invalid register type in binddynlex"); |
1688 | 9.65k | } |
1689 | 9.65k | } |
1690 | | |
1691 | | /* Returns the storage unit for the lexical in the specified frame. Does not |
1692 | | * try to vivify anything - gets exactly what is there. */ |
1693 | 5.08k | MVMRegister * MVM_frame_lexical(MVMThreadContext *tc, MVMFrame *f, MVMString *name) { |
1694 | 5.08k | MVMLexicalRegistry *lexical_names = f->static_info->body.lexical_names; |
1695 | 5.08k | if (MVM_LIKELY(lexical_names != NULL)) { |
1696 | 5.08k | MVMLexicalRegistry *entry; |
1697 | 5.08k | MVM_HASH_GET(tc, lexical_names, name, entry) |
1698 | 5.08k | if (entry) |
1699 | 5.08k | return &f->env[entry->value]; |
1700 | 5.08k | } |
1701 | 0 | { |
1702 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1703 | 0 | char *waste[] = { c_name, NULL }; |
1704 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "Frame has no lexical with name '%s'", |
1705 | 0 | c_name); |
1706 | 0 | } |
1707 | 0 | } |
1708 | | |
1709 | | /* Returns the storage unit for the lexical in the specified frame. */ |
1710 | 0 | MVMRegister * MVM_frame_try_get_lexical(MVMThreadContext *tc, MVMFrame *f, MVMString *name, MVMuint16 type) { |
1711 | 0 | MVMLexicalRegistry *lexical_names = f->static_info->body.lexical_names; |
1712 | 0 | if (lexical_names) { |
1713 | 0 | MVMLexicalRegistry *entry; |
1714 | 0 | MVM_HASH_GET(tc, lexical_names, name, entry) |
1715 | 0 | if (entry && f->static_info->body.lexical_types[entry->value] == type) { |
1716 | 0 | MVMRegister *result = &f->env[entry->value]; |
1717 | 0 | if (type == MVM_reg_obj && !result->o) |
1718 | 0 | MVM_frame_vivify_lexical(tc, f, entry->value); |
1719 | 0 | return result; |
1720 | 0 | } |
1721 | 0 | } |
1722 | 0 | return NULL; |
1723 | 0 | } |
1724 | | |
1725 | | /* Returns the primitive type specification for a lexical. */ |
1726 | 13.3k | MVMuint16 MVM_frame_lexical_primspec(MVMThreadContext *tc, MVMFrame *f, MVMString *name) { |
1727 | 13.3k | MVMLexicalRegistry *lexical_names = f->static_info->body.lexical_names; |
1728 | 13.3k | if (lexical_names) { |
1729 | 13.3k | MVMLexicalRegistry *entry; |
1730 | 13.3k | MVM_HASH_GET(tc, lexical_names, name, entry) |
1731 | 13.3k | if (entry) { |
1732 | 13.3k | switch (MVM_EXPECT(f->static_info->body.lexical_types[entry->value], MVM_reg_obj)) { |
1733 | 0 | case MVM_reg_int64: |
1734 | 0 | return MVM_STORAGE_SPEC_BP_INT; |
1735 | 0 | case MVM_reg_num64: |
1736 | 0 | return MVM_STORAGE_SPEC_BP_NUM; |
1737 | 0 | case MVM_reg_str: |
1738 | 0 | return MVM_STORAGE_SPEC_BP_STR; |
1739 | 13.3k | case MVM_reg_obj: |
1740 | 13.3k | return MVM_STORAGE_SPEC_BP_NONE; |
1741 | 0 | case MVM_reg_int8: |
1742 | 0 | return MVM_STORAGE_SPEC_BP_INT8; |
1743 | 0 | case MVM_reg_int16: |
1744 | 0 | return MVM_STORAGE_SPEC_BP_INT16; |
1745 | 0 | case MVM_reg_int32: |
1746 | 0 | return MVM_STORAGE_SPEC_BP_INT32; |
1747 | 0 | case MVM_reg_uint8: |
1748 | 0 | return MVM_STORAGE_SPEC_BP_UINT8; |
1749 | 0 | case MVM_reg_uint16: |
1750 | 0 | return MVM_STORAGE_SPEC_BP_UINT16; |
1751 | 0 | case MVM_reg_uint32: |
1752 | 0 | return MVM_STORAGE_SPEC_BP_UINT32; |
1753 | 0 | case MVM_reg_uint64: |
1754 | 0 | return MVM_STORAGE_SPEC_BP_UINT64; |
1755 | 0 | default: |
1756 | 0 | { |
1757 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1758 | 0 | char *waste[] = { c_name, NULL }; |
1759 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
1760 | 0 | "Unhandled lexical type '%s' in lexprimspec for '%s'", |
1761 | 0 | MVM_reg_get_debug_name(tc, f->static_info->body.lexical_types[entry->value]), |
1762 | 0 | c_name); |
1763 | 0 | } |
1764 | 13.3k | } |
1765 | 13.3k | } |
1766 | 13.3k | } |
1767 | 0 | { |
1768 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, name); |
1769 | 0 | char *waste[] = { c_name, NULL }; |
1770 | 0 | MVM_exception_throw_adhoc_free(tc, waste, "Frame has no lexical with name '%s'", |
1771 | 0 | c_name); |
1772 | 0 | } |
1773 | 0 | } |
1774 | | |
1775 | 10.1M | static MVMObject * find_invokee_internal(MVMThreadContext *tc, MVMObject *code, MVMCallsite **tweak_cs, MVMInvocationSpec *is) { |
1776 | 10.1M | /* Fast path when we have an offset directly into a P6opaque. */ |
1777 | 10.1M | if (is->code_ref_offset) { |
1778 | 10.1M | if (!IS_CONCRETE(code)) |
1779 | 0 | MVM_exception_throw_adhoc(tc, "Can not invoke a code type object"); |
1780 | 10.1M | code = MVM_p6opaque_read_object(tc, code, is->code_ref_offset); |
1781 | 10.1M | } |
1782 | 10.1M | |
1783 | 10.1M | /* Otherwise, if there is a class handle, fall back to the slow path |
1784 | 10.1M | * lookup, but set up code_ref_offset if applicable. */ |
1785 | 429 | else if (!MVM_is_null(tc, is->class_handle)) { |
1786 | 426 | MVMRegister dest; |
1787 | 426 | if (!IS_CONCRETE(code)) |
1788 | 0 | MVM_exception_throw_adhoc(tc, "Can not invoke a code type object"); |
1789 | 426 | if (code->st->REPR->ID == MVM_REPR_ID_P6opaque) |
1790 | 426 | is->code_ref_offset = MVM_p6opaque_attr_offset(tc, code->st->WHAT, |
1791 | 426 | is->class_handle, is->attr_name); |
1792 | 426 | REPR(code)->attr_funcs.get_attribute(tc, |
1793 | 426 | STABLE(code), code, OBJECT_BODY(code), |
1794 | 426 | is->class_handle, is->attr_name, |
1795 | 426 | is->hint, &dest, MVM_reg_obj); |
1796 | 426 | code = dest.o; |
1797 | 426 | } |
1798 | 429 | |
1799 | 429 | /* Failing that, it must be an invocation handler. */ |
1800 | 3 | else { |
1801 | 3 | /* Need to tweak the callsite and args to include the code object |
1802 | 3 | * being invoked. */ |
1803 | 3 | if (tweak_cs) { |
1804 | 3 | MVMCallsite *orig = *tweak_cs; |
1805 | 3 | if (orig->with_invocant) { |
1806 | 0 | *tweak_cs = orig->with_invocant; |
1807 | 0 | } |
1808 | 3 | else { |
1809 | 3 | MVMCallsite *new = MVM_calloc(1, sizeof(MVMCallsite)); |
1810 | 3 | MVMint32 fsize = orig->flag_count; |
1811 | 3 | new->flag_count = fsize + 1; |
1812 | 3 | new->arg_flags = MVM_malloc(new->flag_count * sizeof(MVMCallsiteEntry)); |
1813 | 3 | new->arg_flags[0] = MVM_CALLSITE_ARG_OBJ; |
1814 | 3 | memcpy(new->arg_flags + 1, orig->arg_flags, fsize); |
1815 | 3 | new->arg_count = orig->arg_count + 1; |
1816 | 3 | new->num_pos = orig->num_pos + 1; |
1817 | 3 | new->has_flattening = orig->has_flattening; |
1818 | 3 | new->is_interned = 0; |
1819 | 3 | new->with_invocant = NULL; |
1820 | 3 | *tweak_cs = orig->with_invocant = new; |
1821 | 3 | } |
1822 | 3 | memmove(tc->cur_frame->args + 1, tc->cur_frame->args, |
1823 | 3 | orig->arg_count * sizeof(MVMRegister)); |
1824 | 3 | tc->cur_frame->args[0].o = code; |
1825 | 3 | tc->cur_frame->cur_args_callsite = *tweak_cs; /* Keep in sync. */ |
1826 | 3 | } |
1827 | 0 | else { |
1828 | 0 | MVM_exception_throw_adhoc(tc, |
1829 | 0 | "Cannot invoke object with invocation handler in this context"); |
1830 | 0 | } |
1831 | 3 | code = is->invocation_handler; |
1832 | 3 | } |
1833 | 10.1M | return code; |
1834 | 10.1M | } |
1835 | | |
1836 | 449k | MVMObject * MVM_frame_find_invokee(MVMThreadContext *tc, MVMObject *code, MVMCallsite **tweak_cs) { |
1837 | 449k | if (MVM_is_null(tc, code)) |
1838 | 0 | MVM_exception_throw_adhoc(tc, "Cannot invoke null object"); |
1839 | 449k | if (STABLE(code)->invoke == MVM_6model_invoke_default) { |
1840 | 448k | MVMInvocationSpec *is = STABLE(code)->invocation_spec; |
1841 | 448k | if (!is) { |
1842 | 0 | MVM_exception_throw_adhoc(tc, "Cannot invoke this object (REPR: %s; %s)", |
1843 | 0 | REPR(code)->name, MVM_6model_get_debug_name(tc, code)); |
1844 | 0 | } |
1845 | 448k | code = find_invokee_internal(tc, code, tweak_cs, is); |
1846 | 448k | } |
1847 | 449k | return code; |
1848 | 449k | } |
1849 | | |
1850 | | MVM_USED_BY_JIT |
1851 | | MVMObject * MVM_frame_find_invokee_multi_ok(MVMThreadContext *tc, MVMObject *code, |
1852 | | MVMCallsite **tweak_cs, MVMRegister *args, |
1853 | 14.7M | MVMuint16 *was_multi) { |
1854 | 14.7M | if (!code) |
1855 | 0 | MVM_exception_throw_adhoc(tc, "Cannot invoke null object"); |
1856 | 14.7M | if (STABLE(code)->invoke == MVM_6model_invoke_default) { |
1857 | 9.82M | MVMInvocationSpec *is = STABLE(code)->invocation_spec; |
1858 | 9.82M | if (!is) { |
1859 | 0 | MVM_exception_throw_adhoc(tc, "Cannot invoke this object (REPR: %s; %s)", REPR(code)->name, MVM_6model_get_debug_name(tc, code)); |
1860 | 0 | } |
1861 | 9.82M | if (is->md_cache_offset && is->md_valid_offset) { |
1862 | 9.10M | if (!IS_CONCRETE(code)) |
1863 | 0 | MVM_exception_throw_adhoc(tc, "Can not invoke a code type object"); |
1864 | 9.10M | if (MVM_p6opaque_read_int64(tc, code, is->md_valid_offset)) { |
1865 | 143k | MVMObject *md_cache = MVM_p6opaque_read_object(tc, code, is->md_cache_offset); |
1866 | 143k | if (was_multi) |
1867 | 112k | *was_multi = 1; |
1868 | 143k | if (!MVM_is_null(tc, md_cache)) { |
1869 | 143k | MVMObject *result = MVM_multi_cache_find_callsite_args(tc, |
1870 | 143k | md_cache, *tweak_cs, args); |
1871 | 143k | if (result) |
1872 | 139k | return MVM_frame_find_invokee(tc, result, tweak_cs); |
1873 | 143k | } |
1874 | 143k | } |
1875 | 9.10M | } |
1876 | 712k | else if (!MVM_is_null(tc, is->md_class_handle)) { |
1877 | 144 | /* We might be able to dig straight into the multi cache and not |
1878 | 144 | * have to invoke the proto. Also on this path set up the offsets |
1879 | 144 | * so we can be faster in the future. */ |
1880 | 144 | MVMRegister dest; |
1881 | 144 | if (!IS_CONCRETE(code)) |
1882 | 0 | MVM_exception_throw_adhoc(tc, "Can not invoke a code type object"); |
1883 | 144 | if (code->st->REPR->ID == MVM_REPR_ID_P6opaque) { |
1884 | 144 | is->md_valid_offset = MVM_p6opaque_attr_offset(tc, code->st->WHAT, |
1885 | 144 | is->md_class_handle, is->md_valid_attr_name); |
1886 | 144 | is->md_cache_offset = MVM_p6opaque_attr_offset(tc, code->st->WHAT, |
1887 | 144 | is->md_class_handle, is->md_cache_attr_name); |
1888 | 144 | } |
1889 | 144 | REPR(code)->attr_funcs.get_attribute(tc, |
1890 | 144 | STABLE(code), code, OBJECT_BODY(code), |
1891 | 144 | is->md_class_handle, is->md_valid_attr_name, |
1892 | 144 | is->md_valid_hint, &dest, MVM_reg_int64); |
1893 | 144 | if (dest.i64) { |
1894 | 0 | if (was_multi) |
1895 | 0 | *was_multi = 1; |
1896 | 0 | REPR(code)->attr_funcs.get_attribute(tc, |
1897 | 0 | STABLE(code), code, OBJECT_BODY(code), |
1898 | 0 | is->md_class_handle, is->md_cache_attr_name, |
1899 | 0 | is->md_cache_hint, &dest, MVM_reg_obj); |
1900 | 0 | if (!MVM_is_null(tc, dest.o)) { |
1901 | 0 | MVMObject *result = MVM_multi_cache_find_callsite_args(tc, |
1902 | 0 | dest.o, *tweak_cs, args); |
1903 | 0 | if (result) |
1904 | 0 | return MVM_frame_find_invokee(tc, result, tweak_cs); |
1905 | 0 | } |
1906 | 0 | } |
1907 | 144 | } |
1908 | 9.68M | code = find_invokee_internal(tc, code, tweak_cs, is); |
1909 | 9.68M | } |
1910 | 14.6M | return code; |
1911 | 14.7M | } |
1912 | | |
1913 | | /* Rapid resolution of an invokee. Used by the specialized resolve code op. */ |
1914 | 12.4M | MVMObject * MVM_frame_resolve_invokee_spesh(MVMThreadContext *tc, MVMObject *invokee) { |
1915 | 12.4M | if (REPR(invokee)->ID == MVM_REPR_ID_MVMCode) { |
1916 | 10.7M | return invokee; |
1917 | 10.7M | } |
1918 | 1.73M | else { |
1919 | 1.73M | MVMInvocationSpec *is = STABLE(invokee)->invocation_spec; |
1920 | 1.73M | if (MVM_LIKELY(is && is->code_ref_offset && IS_CONCRETE(invokee))) |
1921 | 1.73M | return MVM_p6opaque_read_object(tc, invokee, is->code_ref_offset); |
1922 | 1.73M | } |
1923 | 0 | return tc->instance->VMNull; |
1924 | 12.4M | } |
1925 | | |
1926 | | /* Creates a MVMContent wrapper object around an MVMFrame. */ |
1927 | 2.93k | MVMObject * MVM_frame_context_wrapper(MVMThreadContext *tc, MVMFrame *f) { |
1928 | 2.93k | MVMObject *ctx; |
1929 | 2.93k | f = MVM_frame_force_to_heap(tc, f); |
1930 | 2.93k | MVMROOT(tc, f, { |
1931 | 2.93k | ctx = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTContext); |
1932 | 2.93k | MVM_ASSIGN_REF(tc, &(ctx->header), ((MVMContext *)ctx)->body.context, f); |
1933 | 2.93k | }); |
1934 | 2.93k | return ctx; |
1935 | 2.93k | } |
1936 | | |
1937 | | /* Gets, allocating if needed, the frame extra data structure for the given |
1938 | | * frame. This is used to hold data that only a handful of frames need. */ |
1939 | 481k | MVMFrameExtra * MVM_frame_extra(MVMThreadContext *tc, MVMFrame *f) { |
1940 | 481k | if (!f->extra) |
1941 | 456k | f->extra = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMFrameExtra)); |
1942 | 481k | return f->extra; |
1943 | 481k | } |
1944 | | |
1945 | | /* Set up special return data on a frame. */ |
1946 | | void MVM_frame_special_return(MVMThreadContext *tc, MVMFrame *f, |
1947 | | MVMSpecialReturn special_return, |
1948 | | MVMSpecialReturn special_unwind, |
1949 | | void *special_return_data, |
1950 | 58.5k | MVMSpecialReturnDataMark mark_special_return_data) { |
1951 | 58.5k | MVMFrameExtra *e = MVM_frame_extra(tc, f); |
1952 | 58.5k | e->special_return = special_return; |
1953 | 58.5k | e->special_unwind = special_unwind; |
1954 | 58.5k | e->special_return_data = special_return_data; |
1955 | 58.5k | e->mark_special_return_data = mark_special_return_data; |
1956 | 58.5k | } |
1957 | | |
1958 | | /* Clears any special return data on a frame. */ |
1959 | 33 | void MVM_frame_clear_special_return(MVMThreadContext *tc, MVMFrame *f) { |
1960 | 33 | if (f->extra) { |
1961 | 33 | f->extra->special_return = NULL; |
1962 | 33 | f->extra->special_unwind = NULL; |
1963 | 33 | f->extra->special_return_data = NULL; |
1964 | 33 | f->extra->mark_special_return_data = NULL; |
1965 | 33 | } |
1966 | 33 | } |