/home/travis/build/MoarVM/MoarVM/src/gc/orchestrate.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | #include <platform/threads.h> |
3 | | |
4 | | /* If we have the job of doing GC for a thread, we add it to our work |
5 | | * list. */ |
6 | 750 | static void add_work(MVMThreadContext *tc, MVMThreadContext *stolen) { |
7 | 750 | MVMint32 i; |
8 | 1.16k | for (i = 0; i < tc->gc_work_count; i++) |
9 | 413 | if (tc->gc_work[i].tc == stolen) |
10 | 0 | return; |
11 | 750 | if (tc->gc_work == NULL) { |
12 | 84 | tc->gc_work_size = 16; |
13 | 84 | tc->gc_work = MVM_malloc(tc->gc_work_size * sizeof(MVMWorkThread)); |
14 | 84 | } |
15 | 666 | else if (tc->gc_work_count == tc->gc_work_size) { |
16 | 0 | tc->gc_work_size *= 2; |
17 | 0 | tc->gc_work = MVM_realloc(tc->gc_work, tc->gc_work_size * sizeof(MVMWorkThread)); |
18 | 0 | } |
19 | 750 | tc->gc_work[tc->gc_work_count++].tc = stolen; |
20 | 750 | } |
21 | | |
22 | | /* Goes through all threads but the current one and notifies them that a |
23 | | * GC run is starting. Those that are blocked are considered excluded from |
24 | | * the run, and are not counted. Returns the count of threads that should be |
25 | | * added to the finished countdown. */ |
26 | 364 | static MVMuint32 signal_one_thread(MVMThreadContext *tc, MVMThreadContext *to_signal) { |
27 | 364 | /* Loop here since we may not succeed first time (e.g. the status of the |
28 | 364 | * thread may change between the two ways we try to twiddle it). */ |
29 | 364 | int had_suspend_request = 0; |
30 | 364 | while (1) { |
31 | 364 | AO_t current = MVM_load(&to_signal->gc_status); |
32 | 364 | switch (current) { |
33 | 99 | case MVMGCStatus_NONE: |
34 | 99 | /* Try to set it from running to interrupted - the common case. */ |
35 | 99 | if (MVM_cas(&to_signal->gc_status, MVMGCStatus_NONE, |
36 | 99 | MVMGCStatus_INTERRUPT) == MVMGCStatus_NONE) { |
37 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Signalled thread %d to interrupt\n", to_signal->thread_id); |
38 | 99 | return 1; |
39 | 99 | } |
40 | 0 | break; |
41 | 0 | case MVMGCStatus_INTERRUPT | MVMSuspendState_SUSPEND_REQUEST: |
42 | 0 | case MVMGCStatus_INTERRUPT: |
43 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already interrupted\n", to_signal->thread_id); |
44 | 0 | return 0; |
45 | 0 | case MVMGCStatus_UNABLE | MVMSuspendState_SUSPEND_REQUEST: |
46 | 0 | case MVMGCStatus_UNABLE | MVMSuspendState_SUSPENDED: |
47 | 0 | had_suspend_request = current & MVMSUSPENDSTATUS_MASK; |
48 | 0 | /* fallthrough */ |
49 | 265 | case MVMGCStatus_UNABLE: |
50 | 265 | /* Otherwise, it's blocked; try to set it to work Stolen. */ |
51 | 265 | if (MVM_cas(&to_signal->gc_status, MVMGCStatus_UNABLE | had_suspend_request, |
52 | 265 | MVMGCStatus_STOLEN | had_suspend_request) == (MVMGCStatus_UNABLE | had_suspend_request)) { |
53 | 265 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : A blocked thread %d spotted; work stolen\n", to_signal->thread_id); |
54 | 265 | add_work(tc, to_signal); |
55 | 265 | return 0; |
56 | 265 | } |
57 | 0 | break; |
58 | 265 | /* this case occurs if a child thread is Stolen by its parent |
59 | 265 | * before we get to it in the chain. */ |
60 | 0 | case MVMGCStatus_STOLEN | MVMSuspendState_SUSPEND_REQUEST: |
61 | 0 | case MVMGCStatus_STOLEN | MVMSuspendState_SUSPENDED: |
62 | 0 | case MVMGCStatus_STOLEN: |
63 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already stolen (it was a spawning child)\n", to_signal->thread_id); |
64 | 0 | return 0; |
65 | 0 | default: |
66 | 0 | MVM_panic(MVM_exitcode_gcorch, "invalid status %"MVM_PRSz" in GC orchestrate\n", MVM_load(&to_signal->gc_status)); |
67 | 0 | return 0; |
68 | 364 | } |
69 | 364 | } |
70 | 364 | } |
71 | 338 | static MVMuint32 signal_all(MVMThreadContext *tc, MVMThread *threads) { |
72 | 338 | MVMThread *t = threads; |
73 | 338 | MVMuint32 count = 0; |
74 | 1.18k | while (t) { |
75 | 850 | switch (MVM_load(&t->body.stage)) { |
76 | 702 | case MVM_thread_stage_starting: |
77 | 702 | case MVM_thread_stage_waiting: |
78 | 702 | case MVM_thread_stage_started: |
79 | 702 | /* Don't signal ourself. */ |
80 | 702 | if (t->body.tc != tc) |
81 | 364 | count += signal_one_thread(tc, t->body.tc); |
82 | 702 | break; |
83 | 25 | case MVM_thread_stage_exited: |
84 | 25 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to clear nursery of thread %d\n", t->body.tc->thread_id); |
85 | 25 | add_work(tc, t->body.tc); |
86 | 25 | break; |
87 | 23 | case MVM_thread_stage_clearing_nursery: |
88 | 23 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to destroy thread %d\n", t->body.tc->thread_id); |
89 | 23 | /* last GC run for this thread */ |
90 | 23 | add_work(tc, t->body.tc); |
91 | 23 | break; |
92 | 100 | case MVM_thread_stage_destroyed: |
93 | 100 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : found a destroyed thread\n"); |
94 | 100 | /* will be cleaned up (removed from the lists) shortly */ |
95 | 100 | break; |
96 | 0 | default: |
97 | 0 | MVM_panic(MVM_exitcode_gcorch, "Corrupted MVMThread or running threads list: invalid thread stage %"MVM_PRSz"", MVM_load(&t->body.stage)); |
98 | 850 | } |
99 | 850 | t = t->body.next; |
100 | 850 | } |
101 | 338 | return count; |
102 | 338 | } |
103 | | |
104 | | /* Does work in a thread's in-tray, if any. Returns a non-zero value if work |
105 | | * was found and done, and zero otherwise. */ |
106 | 2.32k | static int process_in_tray(MVMThreadContext *tc, MVMuint8 gen) { |
107 | 2.32k | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Considering extra work\n"); |
108 | 2.32k | if (MVM_load(&tc->gc_in_tray)) { |
109 | 31 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
110 | 31 | "Thread %d run %d : Was given extra work by another thread; doing it\n"); |
111 | 31 | MVM_gc_collect(tc, MVMGCWhatToDo_InTray, gen); |
112 | 31 | return 1; |
113 | 31 | } |
114 | 2.29k | return 0; |
115 | 2.32k | } |
116 | | |
117 | | /* Called by a thread when it thinks it is done with GC. It may get some more |
118 | | * work yet, though. */ |
119 | 676 | static void clear_intrays(MVMThreadContext *tc, MVMuint8 gen) { |
120 | 676 | MVMuint32 did_work = 1; |
121 | 1.37k | while (did_work) { |
122 | 701 | MVMThread *cur_thread; |
123 | 701 | did_work = 0; |
124 | 701 | cur_thread = (MVMThread *)MVM_load(&tc->instance->threads); |
125 | 2.45k | while (cur_thread) { |
126 | 1.75k | if (cur_thread->body.tc) |
127 | 1.55k | did_work += process_in_tray(cur_thread->body.tc, gen); |
128 | 1.75k | cur_thread = cur_thread->body.next; |
129 | 1.75k | } |
130 | 701 | } |
131 | 676 | } |
132 | 437 | static void finish_gc(MVMThreadContext *tc, MVMuint8 gen, MVMuint8 is_coordinator) { |
133 | 437 | MVMuint32 i, did_work; |
134 | 437 | |
135 | 437 | /* Do any extra work that we have been passed. */ |
136 | 437 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
137 | 437 | "Thread %d run %d : doing any work in thread in-trays\n"); |
138 | 437 | did_work = 1; |
139 | 880 | while (did_work) { |
140 | 443 | did_work = 0; |
141 | 1.21k | for (i = 0; i < tc->gc_work_count; i++) |
142 | 772 | did_work += process_in_tray(tc->gc_work[i].tc, gen); |
143 | 443 | } |
144 | 437 | |
145 | 437 | /* Decrement gc_finish to say we're done, and wait for termination. */ |
146 | 437 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Voting to finish\n"); |
147 | 437 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
148 | 437 | MVM_decr(&tc->instance->gc_finish); |
149 | 437 | uv_cond_broadcast(&tc->instance->cond_gc_finish); |
150 | 437 | while (MVM_load(&tc->instance->gc_finish)) |
151 | 99 | uv_cond_wait(&tc->instance->cond_gc_finish, &tc->instance->mutex_gc_orchestrate); |
152 | 437 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
153 | 437 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Termination agreed\n"); |
154 | 437 | |
155 | 437 | /* Co-ordinator should do final check over all the in-trays, and trigger |
156 | 437 | * collection until all is settled. Rest should wait. Additionally, after |
157 | 437 | * in-trays are settled, coordinator walks threads looking for anything |
158 | 437 | * that needs adding to the finalize queue. It then will make another |
159 | 437 | * iteration over in-trays to handle cross-thread references to objects |
160 | 437 | * needing finalization. For full collections, collected objects are then |
161 | 437 | * cleaned from all inter-generational sets, and finally any objects to |
162 | 437 | * be freed at the fixed size allocator's next safepoint are freed. */ |
163 | 437 | if (is_coordinator) { |
164 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
165 | 338 | "Thread %d run %d : Co-ordinator handling in-tray clearing completion\n"); |
166 | 338 | clear_intrays(tc, gen); |
167 | 338 | |
168 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
169 | 338 | "Thread %d run %d : Co-ordinator handling finalizers\n"); |
170 | 338 | MVM_finalize_walk_queues(tc, gen); |
171 | 338 | clear_intrays(tc, gen); |
172 | 338 | |
173 | 338 | if (gen == MVMGCGenerations_Both) { |
174 | 0 | MVMThread *cur_thread = (MVMThread *)MVM_load(&tc->instance->threads); |
175 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
176 | 0 | "Thread %d run %d : Co-ordinator handling inter-gen root cleanup\n"); |
177 | 0 | while (cur_thread) { |
178 | 0 | if (cur_thread->body.tc) |
179 | 0 | MVM_gc_root_gen2_cleanup(cur_thread->body.tc); |
180 | 0 | cur_thread = cur_thread->body.next; |
181 | 0 | } |
182 | 0 | } |
183 | 338 | |
184 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
185 | 338 | "Thread %d run %d : Co-ordinator handling fixed-size allocator safepoint frees\n"); |
186 | 338 | MVM_fixed_size_safepoint(tc, tc->instance->fsa); |
187 | 338 | |
188 | 338 | MVM_profile_dump_instrumented_data(tc); |
189 | 338 | MVM_profile_heap_take_snapshot(tc); |
190 | 338 | |
191 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
192 | 338 | "Thread %d run %d : Co-ordinator signalling in-trays clear\n"); |
193 | 338 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
194 | 338 | MVM_store(&tc->instance->gc_intrays_clearing, 0); |
195 | 338 | uv_cond_broadcast(&tc->instance->cond_gc_intrays_clearing); |
196 | 338 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
197 | 338 | } |
198 | 99 | else { |
199 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
200 | 99 | "Thread %d run %d : Waiting for in-tray clearing completion\n"); |
201 | 99 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
202 | 99 | while (MVM_load(&tc->instance->gc_intrays_clearing)) |
203 | 31 | uv_cond_wait(&tc->instance->cond_gc_intrays_clearing, &tc->instance->mutex_gc_orchestrate); |
204 | 99 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
205 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
206 | 99 | "Thread %d run %d : Got in-tray clearing complete notice\n"); |
207 | 99 | } |
208 | 437 | |
209 | 437 | /* Reset GC status flags. This is also where thread destruction happens, |
210 | 437 | * and it needs to happen before we acknowledge this GC run is finished. */ |
211 | 1.18k | for (i = 0; i < tc->gc_work_count; i++) { |
212 | 750 | MVMThreadContext *other = tc->gc_work[i].tc; |
213 | 750 | MVMThread *thread_obj = other->thread_obj; |
214 | 750 | if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_clearing_nursery) { |
215 | 23 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
216 | 23 | "Thread %d run %d : transferring gen2 of thread %d\n", other->thread_id); |
217 | 23 | MVM_gc_gen2_transfer(other, tc); |
218 | 23 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
219 | 23 | "Thread %d run %d : destroying thread %d\n", other->thread_id); |
220 | 23 | MVM_tc_destroy(other); |
221 | 23 | tc->gc_work[i].tc = thread_obj->body.tc = NULL; |
222 | 23 | MVM_store(&thread_obj->body.stage, MVM_thread_stage_destroyed); |
223 | 23 | } |
224 | 727 | else { |
225 | 727 | /* Free gen2 unmarked if full collection. */ |
226 | 727 | if (gen == MVMGCGenerations_Both) { |
227 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
228 | 0 | "Thread %d run %d : freeing gen2 of thread %d\n", |
229 | 0 | other->thread_id); |
230 | 0 | MVM_gc_collect_free_gen2_unmarked(other, 0); |
231 | 0 | } |
232 | 727 | |
233 | 727 | /* Contribute this thread's promoted bytes. */ |
234 | 727 | MVM_add(&tc->instance->gc_promoted_bytes_since_last_full, other->gc_promoted_bytes); |
235 | 727 | |
236 | 727 | /* Collect nursery. */ |
237 | 727 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
238 | 727 | "Thread %d run %d : collecting nursery uncopied of thread %d\n", |
239 | 727 | other->thread_id); |
240 | 727 | MVM_gc_collect_free_nursery_uncopied(other, tc->gc_work[i].limit); |
241 | 727 | |
242 | 727 | /* Handle exited threads. */ |
243 | 727 | if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_exited) { |
244 | 25 | /* Don't bother freeing gen2; we'll do it next time */ |
245 | 25 | MVM_store(&thread_obj->body.stage, MVM_thread_stage_clearing_nursery); |
246 | 25 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
247 | 25 | "Thread %d run %d : set thread %d clearing nursery stage to %d\n", |
248 | 0 | other->thread_id, (int)MVM_load(&thread_obj->body.stage)); |
249 | 25 | } |
250 | 727 | |
251 | 727 | /* Mark thread free to continue. */ |
252 | 727 | MVM_cas(&other->gc_status, MVMGCStatus_STOLEN, MVMGCStatus_UNABLE); |
253 | 727 | MVM_cas(&other->gc_status, MVMGCStatus_INTERRUPT, MVMGCStatus_NONE); |
254 | 727 | } |
255 | 750 | } |
256 | 437 | |
257 | 437 | /* Signal acknowledgement of completing the cleanup, |
258 | 437 | * except for STables, and if we're the final to do |
259 | 437 | * so, free the STables, which have been linked. */ |
260 | 437 | if (MVM_decr(&tc->instance->gc_ack) == 2) { |
261 | 338 | /* Set it to zero (we're guaranteed the only ones trying to write to |
262 | 338 | * it here). Actual STable free in MVM_gc_enter_from_allocator. */ |
263 | 338 | MVM_store(&tc->instance->gc_ack, 0); |
264 | 338 | |
265 | 338 | /* Also clear in GC flag. */ |
266 | 338 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
267 | 338 | tc->instance->in_gc = 0; |
268 | 338 | uv_cond_broadcast(&tc->instance->cond_blocked_can_continue); |
269 | 338 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
270 | 338 | } |
271 | 437 | } |
272 | | |
273 | | /* Called by a thread to indicate it is about to enter a blocking operation. |
274 | | * This tells any thread that is coordinating a GC run that this thread will |
275 | | * be unable to participate. */ |
276 | 330k | void MVM_gc_mark_thread_blocked(MVMThreadContext *tc) { |
277 | 330k | /* This may need more than one attempt. */ |
278 | 330k | while (1) { |
279 | 330k | /* Try to set it from running to unable - the common case. */ |
280 | 330k | if (MVM_cas(&tc->gc_status, MVMGCStatus_NONE, |
281 | 330k | MVMGCStatus_UNABLE) == MVMGCStatus_NONE) |
282 | 330k | return; |
283 | 330k | |
284 | 5 | if (MVM_cas(&tc->gc_status, MVMGCStatus_INTERRUPT | MVMSuspendState_SUSPEND_REQUEST, |
285 | 5 | MVMGCStatus_UNABLE | MVMSuspendState_SUSPENDED) == (MVMGCStatus_INTERRUPT | MVMSuspendState_SUSPEND_REQUEST)) |
286 | 0 | return; |
287 | 5 | |
288 | 5 | /* The only way this can fail is if another thread just decided we're to |
289 | 5 | * participate in a GC run. */ |
290 | 5 | if (MVM_load(&tc->gc_status) == MVMGCStatus_INTERRUPT) |
291 | 5 | MVM_gc_enter_from_interrupt(tc); |
292 | 5 | else |
293 | 0 | MVM_panic(MVM_exitcode_gcorch, |
294 | 0 | "Invalid GC status observed while blocking thread; aborting"); |
295 | 5 | } |
296 | 330k | } |
297 | | |
298 | | /* Called by a thread to indicate it has completed a block operation and is |
299 | | * thus able to particpate in a GC run again. Note that this case needs some |
300 | | * special handling if it comes out of this mode when a GC run is taking place. */ |
301 | 330k | void MVM_gc_mark_thread_unblocked(MVMThreadContext *tc) { |
302 | 330k | /* Try to set it from unable to running. */ |
303 | 330k | while (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE, |
304 | 1 | MVMGCStatus_NONE) != MVMGCStatus_UNABLE) { |
305 | 1 | /* We can't, presumably because a GC run is going on. We should wait |
306 | 1 | * for that to finish before we go on; try using a condvar for it. */ |
307 | 1 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
308 | 1 | if (tc->instance->in_gc) { |
309 | 1 | uv_cond_wait(&tc->instance->cond_blocked_can_continue, |
310 | 1 | &tc->instance->mutex_gc_orchestrate); |
311 | 1 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
312 | 1 | } |
313 | 0 | else { |
314 | 0 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
315 | 0 | if ((MVM_load(&tc->gc_status) & MVMSUSPENDSTATUS_MASK) == MVMSuspendState_SUSPEND_REQUEST) { |
316 | 0 | while (1) { |
317 | 0 | /* Let's try to unblock into INTERRUPT mode and keep the |
318 | 0 | * suspend request, then immediately enter_from_interrupt, |
319 | 0 | * so we actually wait to be woken up. */ |
320 | 0 | if (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE | MVMSuspendState_SUSPEND_REQUEST, |
321 | 0 | MVMGCStatus_INTERRUPT | MVMSuspendState_SUSPEND_REQUEST) == |
322 | 0 | (MVMGCStatus_UNABLE | MVMSuspendState_SUSPEND_REQUEST)) { |
323 | 0 | MVM_gc_enter_from_interrupt(tc); |
324 | 0 | break; |
325 | 0 | } |
326 | 0 | /* If we're being resumed while trying to unblock into |
327 | 0 | * suspend request, we'd block forever. Therefor we have |
328 | 0 | * to check if we've been un-requested. */ |
329 | 0 | if (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE, |
330 | 0 | MVMGCStatus_NONE) == |
331 | 0 | MVMGCStatus_UNABLE) { |
332 | 0 | return; |
333 | 0 | } |
334 | 0 | } |
335 | 0 | } else if (MVM_load(&tc->gc_status) == MVMGCStatus_NONE) { |
336 | 0 | fprintf(stderr, "marking thread %d unblocked, but its status is already NONE.\n", tc->thread_id); |
337 | 0 | break; |
338 | 0 | } else { |
339 | 0 | MVM_platform_thread_yield(); |
340 | 0 | } |
341 | 0 | } |
342 | 1 | } |
343 | 330k | } |
344 | | |
345 | | /* Checks if a thread has marked itself as blocked. Considers that the GC may |
346 | | * have stolen its work and marked it as such also. So what this really |
347 | | * answers is, "did this thread mark itself blocked, and since then not mark |
348 | | * itself unblocked", which is useful if you need to conditionally unblock |
349 | | * or re-block. If the status changes from blocked to stolen or stolen to |
350 | | * blocked between checking this and calling unblock, it's safe anyway since |
351 | | * these cases are handled in MVM_gc_mark_thread_unblocked. Note that this |
352 | | * relies on a thread itself only ever calling block/unblock. */ |
353 | 0 | MVMint32 MVM_gc_is_thread_blocked(MVMThreadContext *tc) { |
354 | 0 | AO_t gc_status = MVM_load(&(tc->gc_status)) & MVMGCSTATUS_MASK; |
355 | 0 | return gc_status == MVMGCStatus_UNABLE || |
356 | 0 | gc_status == MVMGCStatus_STOLEN; |
357 | 0 | } |
358 | | |
359 | 338 | static MVMint32 is_full_collection(MVMThreadContext *tc) { |
360 | 338 | MVMuint64 percent_growth, promoted; |
361 | 338 | size_t rss; |
362 | 338 | |
363 | 338 | /* If it's below the absolute minimum, quickly return. */ |
364 | 338 | promoted = (MVMuint64)MVM_load(&tc->instance->gc_promoted_bytes_since_last_full); |
365 | 338 | if (promoted < MVM_GC_GEN2_THRESHOLD_MINIMUM) |
366 | 338 | return 0; |
367 | 338 | |
368 | 338 | /* If we're heap profiling then don't consider the resident set size, as |
369 | 338 | * it will be hugely distorted by the profile data we record. */ |
370 | 0 | if (MVM_profile_heap_profiling(tc)) |
371 | 0 | return 1; |
372 | 0 |
|
373 | 0 | /* Otherwise, consider percentage of resident set size. */ |
374 | 0 | if (uv_resident_set_memory(&rss) < 0 || rss == 0) |
375 | 0 | rss = 50 * 1024 * 1024; |
376 | 0 | percent_growth = (100 * promoted) / (MVMuint64)rss; |
377 | 0 |
|
378 | 0 | return percent_growth >= MVM_GC_GEN2_THRESHOLD_PERCENT; |
379 | 0 | } |
380 | | |
381 | 437 | static void run_gc(MVMThreadContext *tc, MVMuint8 what_to_do) { |
382 | 437 | MVMuint8 gen; |
383 | 437 | MVMuint32 i, n; |
384 | 437 | |
385 | 437 | unsigned int interval_id; |
386 | 437 | |
387 | 437 | #if MVM_GC_DEBUG |
388 | | if (tc->in_spesh) |
389 | | MVM_panic(1, "Must not GC when in the specializer/JIT\n"); |
390 | | #endif |
391 | 437 | |
392 | 437 | /* Decide nursery or full collection. */ |
393 | 437 | gen = tc->instance->gc_full_collect ? MVMGCGenerations_Both : MVMGCGenerations_Nursery; |
394 | 437 | |
395 | 437 | if (tc->instance->gc_full_collect) { |
396 | 0 | interval_id = MVM_telemetry_interval_start(tc, "start full collection"); |
397 | 437 | } else { |
398 | 437 | interval_id = MVM_telemetry_interval_start(tc, "start minor collection"); |
399 | 437 | } |
400 | 437 | |
401 | 437 | /* Do GC work for ourselves and any work threads. */ |
402 | 1.18k | for (i = 0, n = tc->gc_work_count ; i < n; i++) { |
403 | 750 | MVMThreadContext *other = tc->gc_work[i].tc; |
404 | 750 | tc->gc_work[i].limit = other->nursery_alloc; |
405 | 750 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : starting collection for thread %d\n", |
406 | 750 | other->thread_id); |
407 | 750 | other->gc_promoted_bytes = 0; |
408 | 437 | MVM_gc_collect(other, (other == tc ? what_to_do : MVMGCWhatToDo_NoInstance), gen); |
409 | 750 | } |
410 | 437 | |
411 | 437 | /* Wait for everybody to agree we're done. */ |
412 | 437 | finish_gc(tc, gen, what_to_do == MVMGCWhatToDo_All); |
413 | 437 | |
414 | 437 | MVM_telemetry_interval_stop(tc, interval_id, "finished run_gc"); |
415 | 437 | } |
416 | | |
417 | | /* This is called when the allocator finds it has run out of memory and wants |
418 | | * to trigger a GC run. In this case, it's possible (probable, really) that it |
419 | | * will need to do that triggering, notifying other running threads that the |
420 | | * time has come to GC. */ |
421 | 338 | void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { |
422 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); |
423 | 338 | |
424 | 338 | MVM_telemetry_timestamp(tc, "gc_enter_from_allocator"); |
425 | 338 | |
426 | 338 | /* Try to start the GC run. */ |
427 | 338 | if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { |
428 | 338 | MVMThread *last_starter = NULL; |
429 | 338 | MVMuint32 num_threads = 0; |
430 | 338 | |
431 | 338 | /* Stash us as the thread to blame for this GC run (used to give it a |
432 | 338 | * potential nursery size boost). */ |
433 | 338 | tc->instance->thread_to_blame_for_gc = tc; |
434 | 338 | |
435 | 338 | /* Need to wait for other threads to reset their gc_status. */ |
436 | 0 | while (MVM_load(&tc->instance->gc_ack)) { |
437 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
438 | 0 | "Thread %d run %d : waiting for other thread's gc_ack\n"); |
439 | 0 | MVM_platform_thread_yield(); |
440 | 0 | } |
441 | 338 | |
442 | 338 | /* We are the winner of the GC starting race. This gives us some |
443 | 338 | * extra responsibilities as well as doing the usual things. |
444 | 338 | * First, increment GC sequence number. */ |
445 | 338 | MVM_incr(&tc->instance->gc_seq_number); |
446 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
447 | 338 | "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", |
448 | 0 | (int)MVM_load(&tc->instance->gc_seq_number)); |
449 | 338 | |
450 | 338 | /* Decide if it will be a full collection. */ |
451 | 338 | tc->instance->gc_full_collect = is_full_collection(tc); |
452 | 338 | |
453 | 338 | MVM_telemetry_timestamp(tc, "won the gc starting race"); |
454 | 338 | |
455 | 338 | /* If profiling, record that GC is starting. */ |
456 | 338 | if (tc->instance->profiling) |
457 | 0 | MVM_profiler_log_gc_start(tc, tc->instance->gc_full_collect, 1); |
458 | 338 | |
459 | 338 | /* Ensure our stolen list is empty. */ |
460 | 338 | tc->gc_work_count = 0; |
461 | 338 | |
462 | 338 | /* Flag that we didn't agree on this run that all the in-trays are |
463 | 338 | * cleared (a responsibility of the co-ordinator. */ |
464 | 338 | MVM_store(&tc->instance->gc_intrays_clearing, 1); |
465 | 338 | |
466 | 338 | /* We'll take care of our own work. */ |
467 | 338 | add_work(tc, tc); |
468 | 338 | |
469 | 338 | /* Find other threads, and signal or steal. Also set in GC flag. */ |
470 | 338 | uv_mutex_lock(&tc->instance->mutex_threads); |
471 | 338 | tc->instance->in_gc = 1; |
472 | 338 | num_threads = signal_all(tc, tc->instance->threads); |
473 | 338 | uv_mutex_unlock(&tc->instance->mutex_threads); |
474 | 338 | |
475 | 338 | /* Bump the thread count and signal any threads waiting for that. */ |
476 | 338 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
477 | 338 | MVM_add(&tc->instance->gc_start, num_threads); |
478 | 338 | uv_cond_broadcast(&tc->instance->cond_gc_start); |
479 | 338 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
480 | 338 | |
481 | 338 | /* If there's an event loop thread, wake it up to participate. */ |
482 | 338 | if (tc->instance->event_loop_wakeup) |
483 | 0 | uv_async_send(tc->instance->event_loop_wakeup); |
484 | 338 | |
485 | 338 | /* Wait for other threads to be ready. */ |
486 | 338 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
487 | 437 | while (MVM_load(&tc->instance->gc_start) > 1) |
488 | 99 | uv_cond_wait(&tc->instance->cond_gc_start, &tc->instance->mutex_gc_orchestrate); |
489 | 338 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
490 | 338 | |
491 | 338 | /* Sanity check finish votes. */ |
492 | 338 | if (MVM_load(&tc->instance->gc_finish) != 0) |
493 | 0 | MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", |
494 | 0 | MVM_load(&tc->instance->gc_finish)); |
495 | 338 | |
496 | 338 | /* gc_ack gets an extra so the final acknowledger |
497 | 338 | * can also free the STables. */ |
498 | 338 | MVM_store(&tc->instance->gc_finish, num_threads + 1); |
499 | 338 | MVM_store(&tc->instance->gc_ack, num_threads + 2); |
500 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", |
501 | 0 | (int)MVM_load(&tc->instance->gc_finish)); |
502 | 338 | |
503 | 338 | /* Now we're ready to start, zero promoted since last full collection |
504 | 338 | * counter if this is a full collect. */ |
505 | 338 | if (tc->instance->gc_full_collect) |
506 | 338 | MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); |
507 | 338 | |
508 | 338 | /* This is a safe point for us to free any STables that have been marked |
509 | 338 | * for deletion in the previous collection (since we let finalization - |
510 | 338 | * which appends to this list - happen after we set threads on their |
511 | 338 | * way again, it's not safe to do it in the previous collection). */ |
512 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); |
513 | 338 | MVM_gc_collect_free_stables(tc); |
514 | 338 | |
515 | 338 | /* Signal to the rest to start */ |
516 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); |
517 | 338 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
518 | 338 | if (MVM_decr(&tc->instance->gc_start) != 1) |
519 | 0 | MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); |
520 | 338 | uv_cond_broadcast(&tc->instance->cond_gc_start); |
521 | 338 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
522 | 338 | |
523 | 338 | /* Start collecting. */ |
524 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); |
525 | 338 | run_gc(tc, MVMGCWhatToDo_All); |
526 | 338 | |
527 | 338 | /* If profiling, record that GC is over. */ |
528 | 338 | if (tc->instance->profiling) |
529 | 0 | MVM_profiler_log_gc_end(tc); |
530 | 338 | |
531 | 338 | MVM_telemetry_timestamp(tc, "gc finished"); |
532 | 338 | |
533 | 338 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (coordinator)\n"); |
534 | 338 | } |
535 | 0 | else { |
536 | 0 | /* Another thread beat us to starting the GC sync process. Thus, act as |
537 | 0 | * if we were interrupted to GC. */ |
538 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); |
539 | 0 | MVM_gc_enter_from_interrupt(tc); |
540 | 0 | } |
541 | 338 | } |
542 | | |
543 | | /* This is called when a thread hits an interrupt at a GC safe point. |
544 | | * |
545 | | * There are two interpretations for this: |
546 | | * * That another thread is already trying to start a GC run, so we don't need |
547 | | * to try and do that, just enlist in the run. |
548 | | * * The debug remote is asking this thread to suspend execution. |
549 | | * |
550 | | * Those cases can be distinguished by the gc state masked with |
551 | | * MVMSUSPENDSTATUS_MASK. |
552 | | * */ |
553 | 99 | void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { |
554 | 99 | AO_t curr; |
555 | 99 | |
556 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from interrupt\n"); |
557 | 99 | |
558 | 99 | |
559 | 99 | if ((MVM_load(&tc->gc_status) & MVMSUSPENDSTATUS_MASK) == MVMSuspendState_SUSPEND_REQUEST) { |
560 | 0 | if (tc->instance->debugserver && tc->instance->debugserver->debugspam_protocol) |
561 | 0 | fprintf(stderr, "thread %d reacting to suspend request\n", tc->thread_id); |
562 | 0 | MVM_gc_mark_thread_blocked(tc); |
563 | 0 | while (1) { |
564 | 0 | uv_cond_wait(&tc->instance->debugserver->tell_threads, &tc->instance->debugserver->mutex_cond); |
565 | 0 | if ((MVM_load(&tc->gc_status) & MVMSUSPENDSTATUS_MASK) == MVMSuspendState_NONE) { |
566 | 0 | if (tc->instance->debugserver && tc->instance->debugserver->debugspam_protocol) |
567 | 0 | fprintf(stderr, "thread %d got un-suspended\n", tc->thread_id); |
568 | 0 | break; |
569 | 0 | } else { |
570 | 0 | if (tc->instance->debugserver && tc->instance->debugserver->debugspam_protocol) |
571 | 0 | fprintf(stderr, "something happened, but we're still suspended.\n"); |
572 | 0 | } |
573 | 0 | } |
574 | 0 | MVM_gc_mark_thread_unblocked(tc); |
575 | 0 | return; |
576 | 99 | } else if (MVM_load(&tc->gc_status) == (MVMGCStatus_UNABLE | MVMSuspendState_SUSPENDED)) { |
577 | 0 | /* The thread that the tc belongs to is already waiting in that loop |
578 | 0 | * up there. If we reach this piece of code the active thread must be |
579 | 0 | * the debug remote using a suspended thread's ThreadContext. */ |
580 | 0 | return; |
581 | 0 | } |
582 | 99 | |
583 | 99 | MVM_telemetry_timestamp(tc, "gc_enter_from_interrupt"); |
584 | 99 | |
585 | 99 | /* If profiling, record that GC is starting. */ |
586 | 99 | if (tc->instance->profiling) |
587 | 0 | MVM_profiler_log_gc_start(tc, is_full_collection(tc), 0); |
588 | 99 | |
589 | 99 | /* We'll certainly take care of our own work. */ |
590 | 99 | tc->gc_work_count = 0; |
591 | 99 | add_work(tc, tc); |
592 | 99 | |
593 | 99 | /* Indicate that we're ready to GC. Only want to decrement it if it's 2 or |
594 | 99 | * greater (0 should never happen; 1 means the coordinator is still counting |
595 | 99 | * up how many threads will join in, so we should wait until it decides to |
596 | 99 | * decrement.) */ |
597 | 99 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
598 | 99 | while (MVM_load(&tc->instance->gc_start) < 2) |
599 | 0 | uv_cond_wait(&tc->instance->cond_gc_start, &tc->instance->mutex_gc_orchestrate); |
600 | 99 | MVM_decr(&tc->instance->gc_start); |
601 | 99 | uv_cond_broadcast(&tc->instance->cond_gc_start); |
602 | 99 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
603 | 99 | |
604 | 99 | /* Wait for all threads to indicate readiness to collect. */ |
605 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for other threads\n"); |
606 | 99 | uv_mutex_lock(&tc->instance->mutex_gc_orchestrate); |
607 | 99 | while (MVM_load(&tc->instance->gc_start)) |
608 | 99 | uv_cond_wait(&tc->instance->cond_gc_start, &tc->instance->mutex_gc_orchestrate); |
609 | 99 | uv_mutex_unlock(&tc->instance->mutex_gc_orchestrate); |
610 | 99 | |
611 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entering run_gc\n"); |
612 | 99 | run_gc(tc, MVMGCWhatToDo_NoInstance); |
613 | 99 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete\n"); |
614 | 99 | |
615 | 99 | /* If profiling, record that GC is over. */ |
616 | 99 | if (tc->instance->profiling) |
617 | 0 | MVM_profiler_log_gc_end(tc); |
618 | 99 | } |
619 | | |
620 | | /* Run the global destruction phase. */ |
621 | 0 | void MVM_gc_global_destruction(MVMThreadContext *tc) { |
622 | 0 | char *nursery_tmp; |
623 | 0 |
|
624 | 0 | MVMInstance *vm = tc->instance; |
625 | 0 | MVMThread *cur_thread = 0; |
626 | 0 |
|
627 | 0 | /* Ask all threads to suspend on the next chance they get */ |
628 | 0 | uv_mutex_lock(&vm->mutex_threads); |
629 | 0 |
|
630 | 0 | cur_thread = vm->threads; |
631 | 0 | while (cur_thread) { |
632 | 0 | if (cur_thread->body.tc != tc) { |
633 | 0 | while (1) { |
634 | 0 | /* Is the thread currently doing completely ordinary code execution? */ |
635 | 0 | if (MVM_cas(&tc->gc_status, MVMGCStatus_NONE, MVMGCStatus_INTERRUPT | MVMSuspendState_SUSPEND_REQUEST) |
636 | 0 | == MVMGCStatus_NONE) { |
637 | 0 | break; |
638 | 0 | } |
639 | 0 | /* Is the thread in question currently blocked, i.e. spending time in |
640 | 0 | * some long-running piece of C code, waiting for I/O, etc.? |
641 | 0 | * If so, just store the suspend request bit so when it unblocks itself |
642 | 0 | * it'll suspend execution. */ |
643 | 0 | if (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE, MVMGCStatus_UNABLE | MVMSuspendState_SUSPEND_REQUEST) |
644 | 0 | == MVMGCStatus_UNABLE) { |
645 | 0 | break; |
646 | 0 | } |
647 | 0 | /* Was the thread faster than us? For example by running into |
648 | 0 | * a breakpoint, completing a step, or encountering an |
649 | 0 | * unhandled exception? If so, we're done here. */ |
650 | 0 | if ((MVM_load(&tc->gc_status) & MVMSUSPENDSTATUS_MASK) == MVMSuspendState_SUSPEND_REQUEST) { |
651 | 0 | break; |
652 | 0 | } |
653 | 0 | MVM_platform_thread_yield(); |
654 | 0 | } |
655 | 0 | } |
656 | 0 | cur_thread = cur_thread->body.next; |
657 | 0 | } |
658 | 0 |
|
659 | 0 | uv_mutex_unlock(&vm->mutex_threads); |
660 | 0 |
|
661 | 0 | /* Allow other threads to do a little more work before we continue here */ |
662 | 0 | MVM_platform_thread_yield(); |
663 | 0 |
|
664 | 0 | /* Fake a nursery collection run by swapping the semi- |
665 | 0 | * space nurseries. */ |
666 | 0 | nursery_tmp = tc->nursery_fromspace; |
667 | 0 | tc->nursery_fromspace = tc->nursery_tospace; |
668 | 0 | tc->nursery_tospace = nursery_tmp; |
669 | 0 |
|
670 | 0 | /* Run the objects' finalizers */ |
671 | 0 | MVM_gc_collect_free_nursery_uncopied(tc, tc->nursery_alloc); |
672 | 0 | MVM_gc_root_gen2_cleanup(tc); |
673 | 0 | MVM_gc_collect_free_gen2_unmarked(tc, 1); |
674 | 0 | MVM_gc_collect_free_stables(tc); |
675 | 0 | } |