/home/travis/build/MoarVM/MoarVM/src/6model/sc.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | |
3 | | /* Creates a new serialization context with the specified handle. If any |
4 | | * compilation units are waiting for an SC with this handle, removes it from |
5 | | * their to-resolve list after installing itself in the appropriate slot. */ |
6 | 3.07k | MVMObject * MVM_sc_create(MVMThreadContext *tc, MVMString *handle) { |
7 | 3.07k | MVMSerializationContext *sc; |
8 | 3.07k | MVMSerializationContextBody *scb = NULL; |
9 | 3.07k | |
10 | 3.07k | /* Allocate. */ |
11 | 3.07k | MVMROOT(tc, handle, { |
12 | 3.07k | sc = (MVMSerializationContext *)REPR(tc->instance->SCRef)->allocate(tc, STABLE(tc->instance->SCRef)); |
13 | 3.07k | MVMROOT(tc, sc, { |
14 | 3.07k | /* Add to weak lookup hash. */ |
15 | 3.07k | uv_mutex_lock(&tc->instance->mutex_sc_registry); |
16 | 3.07k | MVM_HASH_GET(tc, tc->instance->sc_weakhash, handle, scb); |
17 | 3.07k | if (!scb) { |
18 | 3.07k | sc->body = scb = MVM_calloc(1, sizeof(MVMSerializationContextBody)); |
19 | 3.07k | MVM_ASSIGN_REF(tc, &(sc->common.header), scb->handle, handle); |
20 | 3.07k | MVM_HASH_BIND(tc, tc->instance->sc_weakhash, handle, scb); |
21 | 3.07k | /* Calling repr_init will allocate, BUT if it does so, and we |
22 | 3.07k | * get unlucky, the GC will try to acquire mutex_sc_registry. |
23 | 3.07k | * This deadlocks. Thus, we force allocation in gen2, which |
24 | 3.07k | * can never trigger GC. Note that releasing the mutex early |
25 | 3.07k | * is not a good way to fix this, as it leaves a race to |
26 | 3.07k | * test/set scb->sc (between the line doing it in this block, |
27 | 3.07k | * and in the else clauses beneath it). */ |
28 | 3.07k | MVM_gc_allocate_gen2_default_set(tc); |
29 | 3.07k | MVM_repr_init(tc, (MVMObject *)sc); |
30 | 3.07k | MVM_gc_allocate_gen2_default_clear(tc); |
31 | 3.07k | scb->sc = sc; |
32 | 3.07k | MVM_sc_add_all_scs_entry(tc, scb); |
33 | 3.07k | } |
34 | 3.07k | else if (scb->sc) { |
35 | 3.07k | /* we lost a race to create it! */ |
36 | 3.07k | sc = scb->sc; |
37 | 3.07k | } |
38 | 3.07k | else { |
39 | 3.07k | scb->sc = sc; |
40 | 3.07k | sc->body = scb; |
41 | 3.07k | MVM_ASSIGN_REF(tc, &(sc->common.header), scb->handle, handle); |
42 | 3.07k | MVM_gc_allocate_gen2_default_set(tc); |
43 | 3.07k | MVM_repr_init(tc, (MVMObject *)sc); |
44 | 3.07k | MVM_gc_allocate_gen2_default_clear(tc); |
45 | 3.07k | } |
46 | 3.07k | uv_mutex_unlock(&tc->instance->mutex_sc_registry); |
47 | 3.07k | }); |
48 | 3.07k | }); |
49 | 3.07k | |
50 | 3.07k | return (MVMObject *)sc; |
51 | 3.07k | } |
52 | | |
53 | | /* Makes an entry in all SCs list, the index of which is used to refer to |
54 | | * SCs in object headers. This must only be called while holding the SC |
55 | | * registry mutex. However, the all SCs list is read without the lock. |
56 | | * Thus we allocate memory using the FSA and free it at a safepoint. */ |
57 | 3.06k | void MVM_sc_add_all_scs_entry(MVMThreadContext *tc, MVMSerializationContextBody *scb) { |
58 | 3.06k | if (tc->instance->all_scs_next_idx == tc->instance->all_scs_alloc) { |
59 | 181 | if (tc->instance->all_scs_next_idx == 0) { |
60 | 144 | /* First time; allocate, and NULL first slot as it is |
61 | 144 | * the "no SC" sentinel value. */ |
62 | 144 | tc->instance->all_scs_alloc = 32; |
63 | 144 | tc->instance->all_scs = MVM_fixed_size_alloc(tc, tc->instance->fsa, |
64 | 144 | tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *)); |
65 | 144 | tc->instance->all_scs[0] = NULL; |
66 | 144 | tc->instance->all_scs_next_idx++; |
67 | 144 | } |
68 | 37 | else { |
69 | 37 | MVMuint32 orig_alloc = tc->instance->all_scs_alloc; |
70 | 37 | tc->instance->all_scs_alloc += 32; |
71 | 37 | tc->instance->all_scs = MVM_fixed_size_realloc_at_safepoint(tc, |
72 | 37 | tc->instance->fsa, tc->instance->all_scs, |
73 | 37 | orig_alloc * sizeof(MVMSerializationContextBody *), |
74 | 37 | tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *)); |
75 | 37 | } |
76 | 181 | } |
77 | 3.06k | scb->sc_idx = tc->instance->all_scs_next_idx; |
78 | 3.06k | tc->instance->all_scs[tc->instance->all_scs_next_idx] = scb; |
79 | 3.06k | tc->instance->all_scs_next_idx++; |
80 | 3.06k | } |
81 | | |
82 | | /* Given an SC, returns its unique handle. */ |
83 | 9.19k | MVMString * MVM_sc_get_handle(MVMThreadContext *tc, MVMSerializationContext *sc) { |
84 | 9.19k | return sc->body->handle; |
85 | 9.19k | } |
86 | | |
87 | | /* Given an SC, returns its description. */ |
88 | 178 | MVMString * MVM_sc_get_description(MVMThreadContext *tc, MVMSerializationContext *sc) { |
89 | 178 | return sc->body->description; |
90 | 178 | } |
91 | | |
92 | | /* Given an SC, sets its description. */ |
93 | 2.71k | void MVM_sc_set_description(MVMThreadContext *tc, MVMSerializationContext *sc, MVMString *desc) { |
94 | 2.71k | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->description, desc); |
95 | 2.71k | } |
96 | | |
97 | | /* Given an SC, looks up the index of an object that is in its root set. */ |
98 | 9.46k | MVMint64 MVM_sc_find_object_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { |
99 | 9.46k | MVMObject **roots; |
100 | 9.46k | MVMint64 i, count; |
101 | 9.46k | MVMuint32 cached = MVM_sc_get_idx_in_sc(&obj->header); |
102 | 9.46k | if (cached != ~0 && MVM_sc_get_collectable_sc(tc, &obj->header) == sc) |
103 | 5.31k | return cached; |
104 | 4.14k | roots = sc->body->root_objects; |
105 | 4.14k | count = sc->body->num_objects; |
106 | 234k | for (i = 0; i < count; i++) |
107 | 234k | if (roots[i] == obj) |
108 | 4.14k | return i; |
109 | 0 | MVM_exception_throw_adhoc(tc, |
110 | 0 | "Object does not exist in serialization context"); |
111 | 0 | } |
112 | | |
113 | | /* Calls MVM_sc_find_object_idx, but first checks if the sc is actually an SCRef. */ |
114 | 3.62k | MVMint64 MVM_sc_find_object_idx_jit(MVMThreadContext *tc, MVMObject *sc, MVMObject *obj) { |
115 | 3.62k | if (REPR(sc)->ID != MVM_REPR_ID_SCRef) |
116 | 0 | MVM_exception_throw_adhoc(tc, |
117 | 0 | "Must provide an SCRef operand to scgetobjidx"); |
118 | 3.62k | return MVM_sc_find_object_idx(tc, (MVMSerializationContext *)sc, obj); |
119 | 3.62k | } |
120 | | |
121 | | /* Given an SC, looks up the index of an STable that is in its root set. */ |
122 | 350 | MVMint64 MVM_sc_find_stable_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMSTable *st) { |
123 | 350 | MVMuint64 i; |
124 | 350 | MVMuint32 cached = MVM_sc_get_idx_in_sc(&st->header); |
125 | 350 | if (cached != ~0 && MVM_sc_get_collectable_sc(tc, &st->header) == sc) |
126 | 0 | return cached; |
127 | 5.75k | for (i = 0; i < sc->body->num_stables; i++) |
128 | 5.75k | if (sc->body->root_stables[i] == st) |
129 | 350 | return i; |
130 | 0 | MVM_exception_throw_adhoc(tc, |
131 | 0 | "STable %s does not exist in serialization context", MVM_6model_get_stable_debug_name(tc, st)); |
132 | 0 | } |
133 | | |
134 | | /* Given an SC, looks up the index of a code ref that is in its root set. */ |
135 | 29 | MVMint64 MVM_sc_find_code_idx(MVMThreadContext *tc, MVMSerializationContext *sc, MVMObject *obj) { |
136 | 29 | MVMObject *roots; |
137 | 29 | MVMint64 i, count; |
138 | 29 | MVMuint32 cached = MVM_sc_get_idx_in_sc(&obj->header); |
139 | 29 | if (cached != ~0 && MVM_sc_get_collectable_sc(tc, &obj->header) == sc) |
140 | 23 | return cached; |
141 | 6 | roots = sc->body->root_codes; |
142 | 6 | count = MVM_repr_elems(tc, roots); |
143 | 18 | for (i = 0; i < count; i++) { |
144 | 18 | MVMObject *test = MVM_repr_at_pos_o(tc, roots, i); |
145 | 18 | if (test == obj) |
146 | 6 | return i; |
147 | 18 | } |
148 | 6 | |
149 | 0 | if (REPR(obj)->ID == MVM_REPR_ID_MVMCode) { |
150 | 0 | char *c_name = MVM_string_utf8_encode_C_string(tc, ((MVMCode *)obj)->body.name); |
151 | 0 | char *waste[] = { c_name, NULL }; |
152 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
153 | 0 | "Code ref '%s' does not exist in serialization context", |
154 | 0 | c_name); |
155 | 0 | } |
156 | 0 | else { |
157 | 0 | MVM_exception_throw_adhoc(tc, |
158 | 0 | "Code ref '<NOT A CODE OBJECT>' does not exist in serialization context"); |
159 | 0 | } |
160 | 0 | } |
161 | | |
162 | | /* Given a compilation unit and dependency index, returns that SC. Slow path |
163 | | * for when the SC may be NULL. */ |
164 | 2.97k | MVMSerializationContext * MVM_sc_get_sc_slow(MVMThreadContext *tc, MVMCompUnit *cu, MVMint16 dep) { |
165 | 2.97k | MVMSerializationContext *sc = cu->body.scs[dep]; |
166 | 2.97k | if (sc == NULL) { |
167 | 2.97k | MVMSerializationContextBody *scb = cu->body.scs_to_resolve[dep]; |
168 | 2.97k | if (!scb) |
169 | 0 | MVM_exception_throw_adhoc(tc, |
170 | 0 | "SC resolution: internal error"); |
171 | 2.97k | sc = scb->sc; |
172 | 2.97k | if (sc == NULL) |
173 | 0 | return NULL; |
174 | 2.97k | MVM_ASSIGN_REF(tc, &(cu->common.header), cu->body.scs[dep], sc); |
175 | 2.97k | scb->claimed = 1; |
176 | 2.97k | } |
177 | 2.97k | return sc; |
178 | 2.97k | } |
179 | | |
180 | | /* Checks if an SC is currently in the process of doing deserialization work. */ |
181 | 33.6M | MVM_STATIC_INLINE MVMint64 sc_working(MVMSerializationContext *sc) { |
182 | 33.6M | MVMSerializationReader *sr = sc->body->sr; |
183 | 31.6M | return sr && sr->working; |
184 | 33.6M | } |
185 | | |
186 | 440 | MVMuint8 MVM_sc_is_object_immediately_available(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
187 | 440 | MVMObject **roots = sc->body->root_objects; |
188 | 440 | MVMint64 count = sc->body->num_objects; |
189 | 440 | if (idx >= 0 && idx < count) { |
190 | 440 | if (roots[idx] && !sc_working(sc)) { |
191 | 439 | return 1; |
192 | 439 | } |
193 | 440 | } |
194 | 1 | return 0; |
195 | 440 | } |
196 | | |
197 | | /* Given an SC and an index, fetch the object stored there. */ |
198 | 32.4M | MVMObject * MVM_sc_get_object(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
199 | 32.4M | MVMObject **roots = sc->body->root_objects; |
200 | 32.4M | MVMint64 count = sc->body->num_objects; |
201 | 32.4M | if (MVM_LIKELY(idx >= 0 && idx < count)) |
202 | 32.4M | return roots[idx] && !sc_working(sc) |
203 | 31.2M | ? roots[idx] |
204 | 1.29M | : MVM_serialization_demand_object(tc, sc, idx); |
205 | 1 | else { |
206 | 1 | char *c_description = MVM_string_utf8_encode_C_string(tc, sc->body->description); |
207 | 1 | char *waste[] = { c_description, NULL }; |
208 | 1 | MVM_exception_throw_adhoc_free(tc, waste, |
209 | 1 | "Probable version skew in pre-compiled '%s' (cause: no object at index %"PRId64")", |
210 | 1 | c_description, idx); |
211 | 1 | } |
212 | 32.4M | } |
213 | | |
214 | | /* Given an SC and an index, fetch the object stored there, or return NULL if |
215 | | * there is none. Does not cause lazy deserialization. */ |
216 | 966k | MVMObject * MVM_sc_try_get_object(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
217 | 966k | MVMObject **roots = sc->body->root_objects; |
218 | 966k | MVMint64 count = sc->body->num_objects; |
219 | 966k | if (idx > 0 && idx < count && !sc_working(sc)) |
220 | 49.7k | return roots[idx]; |
221 | 966k | else |
222 | 916k | return NULL; |
223 | 966k | } |
224 | | |
225 | | /* Given an SC, an index, and an object, store the object at that index. */ |
226 | 919k | void MVM_sc_set_object(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx, MVMObject *obj) { |
227 | 919k | if (idx < 0) |
228 | 0 | MVM_exception_throw_adhoc(tc, "Invalid (negative) object root index %"PRId64"", idx); |
229 | 919k | if (idx < sc->body->num_objects) { |
230 | 913k | /* Just updating an existing one. */ |
231 | 913k | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->root_objects[idx], obj); |
232 | 913k | } |
233 | 5.48k | else { |
234 | 5.48k | if (idx >= sc->body->alloc_objects) { |
235 | 2.79k | MVMint64 orig_size = sc->body->alloc_objects; |
236 | 2.79k | sc->body->alloc_objects *= 2; |
237 | 2.79k | if (sc->body->alloc_objects < idx + 1) |
238 | 1.34k | sc->body->alloc_objects = idx + 1; |
239 | 2.79k | sc->body->root_objects = MVM_recalloc(sc->body->root_objects, |
240 | 2.79k | orig_size * sizeof(MVMObject *), sc->body->alloc_objects * sizeof(MVMObject *)); |
241 | 2.79k | } |
242 | 5.48k | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->root_objects[idx], obj); |
243 | 5.48k | sc->body->num_objects = idx + 1; |
244 | 5.48k | } |
245 | 919k | MVM_sc_set_idx_in_sc(&obj->header, idx); |
246 | 919k | } |
247 | | |
248 | | /* Given an SC and an index, fetch the STable stored there. */ |
249 | 944k | MVMSTable * MVM_sc_get_stable(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
250 | 944k | if (MVM_LIKELY(idx >= 0 && idx < sc->body->num_stables)) { |
251 | 944k | MVMSTable *got = sc->body->root_stables[idx]; |
252 | 918k | return got && !sc_working(sc) ? got : MVM_serialization_demand_stable(tc, sc, idx); |
253 | 944k | } |
254 | 0 | else { |
255 | 0 | char *c_description = MVM_string_utf8_encode_C_string(tc, sc->body->description); |
256 | 0 | char *waste[] = { c_description, NULL }; |
257 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
258 | 0 | "Probable version skew in pre-compiled '%s' (cause: no STable at index %"PRId64")", |
259 | 0 | c_description, idx); |
260 | 0 | } |
261 | 944k | } |
262 | | |
263 | | /* Given an SC and an index, fetch the STable stored there, or return NULL if there |
264 | | * is none. Does not cause lazy deserialization. */ |
265 | 25.4k | MVMSTable * MVM_sc_try_get_stable(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
266 | 25.4k | if (idx >= 0 && idx < sc->body->num_stables) |
267 | 25.4k | return sc->body->root_stables[idx]; |
268 | 25.4k | else |
269 | 0 | return NULL; |
270 | 25.4k | } |
271 | | |
272 | | /* Given an SC, an index, and an STable, store the STable at the index. */ |
273 | 26.8k | void MVM_sc_set_stable(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx, MVMSTable *st) { |
274 | 26.8k | if (MVM_UNLIKELY(idx < 0)) |
275 | 0 | MVM_exception_throw_adhoc(tc, |
276 | 0 | "Invalid (negative) STable index %"PRId64, idx); |
277 | 26.8k | if (idx < sc->body->num_stables) { |
278 | 25.4k | /* Just updating an existing one. */ |
279 | 25.4k | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->root_stables[idx], st); |
280 | 25.4k | } |
281 | 1.44k | else { |
282 | 1.44k | if (idx >= sc->body->alloc_stables) { |
283 | 144 | MVMint64 orig_size = sc->body->alloc_stables; |
284 | 144 | sc->body->alloc_stables += 32; |
285 | 144 | if (sc->body->alloc_stables < idx + 1) |
286 | 0 | sc->body->alloc_stables = idx + 1; |
287 | 144 | sc->body->root_stables = MVM_realloc(sc->body->root_stables, |
288 | 144 | sc->body->alloc_stables * sizeof(MVMSTable *)); |
289 | 144 | memset(sc->body->root_stables + orig_size, 0, |
290 | 144 | (sc->body->alloc_stables - orig_size) * sizeof(MVMSTable *)); |
291 | 144 | } |
292 | 1.44k | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->root_stables[idx], st); |
293 | 1.44k | sc->body->num_stables = idx + 1; |
294 | 1.44k | } |
295 | 26.8k | } |
296 | | |
297 | | |
298 | | /* Given an SC and an STable, pushes the STable to the end of the root list. */ |
299 | 676 | void MVM_sc_push_stable(MVMThreadContext *tc, MVMSerializationContext *sc, MVMSTable *st) { |
300 | 676 | MVMint64 idx = sc->body->num_stables; |
301 | 676 | if (idx == sc->body->alloc_stables) { |
302 | 169 | sc->body->alloc_stables += 16; |
303 | 169 | sc->body->root_stables = MVM_realloc(sc->body->root_stables, |
304 | 169 | sc->body->alloc_stables * sizeof(MVMSTable *)); |
305 | 169 | } |
306 | 676 | MVM_ASSIGN_REF(tc, &(sc->common.header), sc->body->root_stables[idx], st); |
307 | 676 | sc->body->num_stables++; |
308 | 676 | } |
309 | | |
310 | | /* Given an SC and an index, fetch the code ref stored there. */ |
311 | 242k | MVMObject * MVM_sc_get_code(MVMThreadContext *tc, MVMSerializationContext *sc, MVMint64 idx) { |
312 | 242k | MVMObject *roots = sc->body->root_codes; |
313 | 242k | MVMuint64 count = MVM_repr_elems(tc, roots); |
314 | 242k | if (idx < count) { |
315 | 242k | MVMObject *found = MVM_repr_at_pos_o(tc, roots, idx); |
316 | 217k | return MVM_is_null(tc, found) || sc_working(sc) |
317 | 242k | ? MVM_serialization_demand_code(tc, sc, idx) |
318 | 576 | : found; |
319 | 242k | } |
320 | 0 | else { |
321 | 0 | char *c_description = MVM_string_utf8_encode_C_string(tc, sc->body->description); |
322 | 0 | char *waste[] = { c_description, NULL }; |
323 | 0 | MVM_exception_throw_adhoc_free(tc, waste, |
324 | 0 | "Probable version skew in pre-compiled '%s' (cause: no code ref at index %"PRId64")", |
325 | 0 | c_description, idx); |
326 | 0 | } |
327 | 242k | } |
328 | | |
329 | | /* Resolves an SC handle using the SC weakhash. */ |
330 | 7.38k | MVMSerializationContext * MVM_sc_find_by_handle(MVMThreadContext *tc, MVMString *handle) { |
331 | 7.38k | MVMSerializationContextBody *scb; |
332 | 7.38k | uv_mutex_lock(&tc->instance->mutex_sc_registry); |
333 | 7.38k | MVM_HASH_GET(tc, tc->instance->sc_weakhash, handle, scb); |
334 | 7.38k | uv_mutex_unlock(&tc->instance->mutex_sc_registry); |
335 | 7.38k | return scb && scb->sc ? scb->sc : NULL; |
336 | 7.38k | } |
337 | | |
338 | | /* Marks all objects, stables and codes that belong to this SC as free to be taken by another. */ |
339 | 0 | void MVM_sc_disclaim(MVMThreadContext *tc, MVMSerializationContext *sc) { |
340 | 0 | MVMObject **root_objects, *root_codes, *obj; |
341 | 0 | MVMSTable **root_stables, *stable; |
342 | 0 | MVMint64 i, count; |
343 | 0 | MVMCollectable *col; |
344 | 0 | if (REPR(sc)->ID != MVM_REPR_ID_SCRef) |
345 | 0 | MVM_exception_throw_adhoc(tc, |
346 | 0 | "Must provide an SCRef operand to scdisclaim"); |
347 | 0 |
|
348 | 0 | root_objects = sc->body->root_objects; |
349 | 0 | count = sc->body->num_objects; |
350 | 0 | for (i = 0; i < count; i++) { |
351 | 0 | obj = root_objects[i]; |
352 | 0 | col = &obj->header; |
353 | 0 | #ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX |
354 | | if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) { |
355 | | struct MVMSerializationIndex *const sci = col->sc_forward_u.sci; |
356 | | col->sc_forward_u.sci = NULL; |
357 | | MVM_free(sci); |
358 | | } |
359 | | col->sc_forward_u.sc.sc_idx = 0; |
360 | | col->sc_forward_u.sc.idx = 0; |
361 | | #else |
362 | 0 | col->sc_forward_u.sc.sc_idx = 0; |
363 | 0 | col->sc_forward_u.sc.idx = 0; |
364 | 0 | #endif |
365 | 0 | } |
366 | 0 | sc->body->num_objects = 0; |
367 | 0 |
|
368 | 0 | root_stables = sc->body->root_stables; |
369 | 0 | count = sc->body->num_stables; |
370 | 0 | for (i = 0; i < count; i++) { |
371 | 0 | stable = root_stables[i]; |
372 | 0 | col = &stable->header; |
373 | 0 | col->sc_forward_u.sc.sc_idx = 0; |
374 | 0 | } |
375 | 0 | sc->body->num_stables = 0; |
376 | 0 |
|
377 | 0 | root_codes = sc->body->root_codes; |
378 | 0 | count = MVM_repr_elems(tc, root_codes); |
379 | 0 | for (i = 0; i < count; i++) { |
380 | 0 | obj = MVM_repr_at_pos_o(tc, root_codes, i); |
381 | 0 | if (MVM_is_null(tc, obj)) |
382 | 0 | obj = MVM_serialization_demand_code(tc, sc, i); |
383 | 0 | col = &obj->header; |
384 | 0 | col->sc_forward_u.sc.sc_idx = 0; |
385 | 0 | } |
386 | 0 | sc->body->root_codes = NULL; |
387 | 0 | } |
388 | | |
389 | | /* SC repossession barrier. */ |
390 | 25.6M | void MVM_SC_WB_OBJ(MVMThreadContext *tc, MVMObject *obj) { |
391 | 25.6M | assert(!(obj->header.flags & MVM_CF_FORWARDER_VALID)); |
392 | 25.6M | assert(MVM_sc_get_idx_of_sc(&obj->header) != ~0); |
393 | 25.6M | if (MVM_sc_get_idx_of_sc(&obj->header) > 0) |
394 | 21.0k | MVM_sc_wb_hit_obj(tc, obj); |
395 | 25.6M | } |
396 | | |
397 | | /* Called when an object triggers the SC repossession write barrier. */ |
398 | 21.0k | void MVM_sc_wb_hit_obj(MVMThreadContext *tc, MVMObject *obj) { |
399 | 21.0k | MVMSerializationContext *comp_sc; |
400 | 21.0k | |
401 | 21.0k | /* If the WB is disabled or we're not compiling, can exit quickly. */ |
402 | 21.0k | if (tc->sc_wb_disable_depth) |
403 | 12.2k | return; |
404 | 8.83k | if (!tc->compiling_scs || !MVM_repr_elems(tc, tc->compiling_scs)) |
405 | 5.09k | return; |
406 | 8.83k | |
407 | 8.83k | /* Same if the object is flagged as one to never repossess. */ |
408 | 3.74k | if (obj->header.flags & MVM_CF_NEVER_REPOSSESS) |
409 | 1 | return; |
410 | 3.74k | |
411 | 3.74k | /* Otherwise, check that the object's SC is different from the SC |
412 | 3.74k | * of the compilation we're currently in. Repossess if so. */ |
413 | 3.73k | comp_sc = (MVMSerializationContext *)MVM_repr_at_pos_o(tc, tc->compiling_scs, 0); |
414 | 3.73k | if (MVM_sc_get_obj_sc(tc, obj) != comp_sc) { |
415 | 611 | /* Get new slot ID. */ |
416 | 611 | MVMint64 new_slot = comp_sc->body->num_objects; |
417 | 611 | |
418 | 611 | /* See if the object is actually owned by another, and it's the |
419 | 611 | * owner we need to repossess. */ |
420 | 611 | if (obj->st->WHAT == tc->instance->boot_types.BOOTArray || |
421 | 611 | obj->st->WHAT == tc->instance->boot_types.BOOTHash) { |
422 | 143 | MVMObject *owned_objects = MVM_sc_get_obj_sc(tc, obj)->body->owned_objects; |
423 | 143 | MVMint64 n = MVM_repr_elems(tc, owned_objects); |
424 | 143 | MVMint64 found = 0; |
425 | 143 | MVMint64 i; |
426 | 8.46k | for (i = 0; i < n; i += 2) { |
427 | 8.32k | if (MVM_repr_at_pos_o(tc, owned_objects, i) == obj) { |
428 | 0 | MVMSerializationContext *real_sc; |
429 | 0 | obj = MVM_repr_at_pos_o(tc, owned_objects, i + 1); |
430 | 0 | real_sc = MVM_sc_get_obj_sc(tc, obj); |
431 | 0 | if (!real_sc) |
432 | 0 | return; /* Probably disclaimed. */ |
433 | 0 | if (real_sc == comp_sc) |
434 | 0 | return; |
435 | 0 | found = 1; |
436 | 0 | break; |
437 | 0 | } |
438 | 8.32k | } |
439 | 143 | if (!found) |
440 | 143 | return; |
441 | 143 | } |
442 | 611 | |
443 | 611 | /* Add to root set. */ |
444 | 468 | MVM_sc_set_object(tc, comp_sc, new_slot, obj); |
445 | 468 | |
446 | 468 | /* Add repossession entry. */ |
447 | 468 | MVM_repr_push_i(tc, comp_sc->body->rep_indexes, new_slot << 1); |
448 | 468 | MVM_repr_push_o(tc, comp_sc->body->rep_scs, (MVMObject *)MVM_sc_get_obj_sc(tc, obj)); |
449 | 468 | |
450 | 468 | /* Update SC of the object, claiming it, and update index too. */ |
451 | 468 | MVM_sc_set_obj_sc(tc, obj, comp_sc); |
452 | 468 | MVM_sc_set_idx_in_sc(&(obj->header), new_slot); |
453 | 468 | } |
454 | 3.73k | } |
455 | | |
456 | | /* Called when an STable triggers the SC repossession write barrier. */ |
457 | 900 | void MVM_sc_wb_hit_st(MVMThreadContext *tc, MVMSTable *st) { |
458 | 900 | MVMSerializationContext *comp_sc; |
459 | 900 | |
460 | 900 | /* If the WB is disabled or we're not compiling, can exit quickly. */ |
461 | 900 | if (tc->sc_wb_disable_depth) |
462 | 0 | return; |
463 | 900 | if (!tc->compiling_scs || !MVM_repr_elems(tc, tc->compiling_scs)) |
464 | 3 | return; |
465 | 900 | |
466 | 900 | /* Otherwise, check that the STable's SC is different from the SC |
467 | 900 | * of the compilation we're currently in. Repossess if so. */ |
468 | 897 | comp_sc = (MVMSerializationContext *)MVM_repr_at_pos_o(tc, tc->compiling_scs, 0); |
469 | 897 | if (MVM_sc_get_stable_sc(tc, st) != comp_sc) { |
470 | 1 | /* Add to root set. */ |
471 | 1 | MVMint64 new_slot = comp_sc->body->num_stables; |
472 | 1 | MVM_sc_push_stable(tc, comp_sc, st); |
473 | 1 | |
474 | 1 | /* Add repossession entry. */ |
475 | 1 | MVM_repr_push_i(tc, comp_sc->body->rep_indexes, (new_slot << 1) | 1); |
476 | 1 | MVM_repr_push_o(tc, comp_sc->body->rep_scs, (MVMObject *)MVM_sc_get_stable_sc(tc, st)); |
477 | 1 | |
478 | 1 | /* Update SC of the STable, claiming it. */ |
479 | 1 | MVM_sc_set_stable_sc(tc, st, comp_sc); |
480 | 1 | MVM_sc_set_idx_in_sc(&(st->header), new_slot); |
481 | 1 | } |
482 | 897 | } |