/home/travis/build/MoarVM/MoarVM/src/gc/orchestrate.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | #include <platform/threads.h> |
3 | | |
4 | | /* If we have the job of doing GC for a thread, we add it to our work |
5 | | * list. */ |
6 | 146 | static void add_work(MVMThreadContext *tc, MVMThreadContext *stolen) { |
7 | 146 | MVMint32 i; |
8 | 146 | for (i = 0; i < tc->gc_work_count; i++) |
9 | 0 | if (tc->gc_work[i].tc == stolen) |
10 | 0 | return; |
11 | 146 | if (tc->gc_work == NULL) { |
12 | 36 | tc->gc_work_size = 16; |
13 | 36 | tc->gc_work = MVM_malloc(tc->gc_work_size * sizeof(MVMWorkThread)); |
14 | 36 | } |
15 | 110 | else if (tc->gc_work_count == tc->gc_work_size) { |
16 | 0 | tc->gc_work_size *= 2; |
17 | 0 | tc->gc_work = MVM_realloc(tc->gc_work, tc->gc_work_size * sizeof(MVMWorkThread)); |
18 | 0 | } |
19 | 146 | tc->gc_work[tc->gc_work_count++].tc = stolen; |
20 | 146 | } |
21 | | |
22 | | /* Goes through all threads but the current one and notifies them that a |
23 | | * GC run is starting. Those that are blocked are considered excluded from |
24 | | * the run, and are not counted. Returns the count of threads that should be |
25 | | * added to the finished countdown. */ |
26 | 0 | static MVMuint32 signal_one_thread(MVMThreadContext *tc, MVMThreadContext *to_signal) { |
27 | 0 | /* Loop here since we may not succeed first time (e.g. the status of the |
28 | 0 | * thread may change between the two ways we try to twiddle it). */ |
29 | 0 | while (1) { |
30 | 0 | switch (MVM_load(&to_signal->gc_status)) { |
31 | 0 | case MVMGCStatus_NONE: |
32 | 0 | /* Try to set it from running to interrupted - the common case. */ |
33 | 0 | if (MVM_cas(&to_signal->gc_status, MVMGCStatus_NONE, |
34 | 0 | MVMGCStatus_INTERRUPT) == MVMGCStatus_NONE) { |
35 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Signalled thread %d to interrupt\n", to_signal->thread_id); |
36 | 0 | return 1; |
37 | 0 | } |
38 | 0 | break; |
39 | 0 | case MVMGCStatus_INTERRUPT: |
40 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already interrupted\n", to_signal->thread_id); |
41 | 0 | return 0; |
42 | 0 | case MVMGCStatus_UNABLE: |
43 | 0 | /* Otherwise, it's blocked; try to set it to work Stolen. */ |
44 | 0 | if (MVM_cas(&to_signal->gc_status, MVMGCStatus_UNABLE, |
45 | 0 | MVMGCStatus_STOLEN) == MVMGCStatus_UNABLE) { |
46 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : A blocked thread %d spotted; work stolen\n", to_signal->thread_id); |
47 | 0 | add_work(tc, to_signal); |
48 | 0 | return 0; |
49 | 0 | } |
50 | 0 | break; |
51 | 0 | /* this case occurs if a child thread is Stolen by its parent |
52 | 0 | * before we get to it in the chain. */ |
53 | 0 | case MVMGCStatus_STOLEN: |
54 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : thread %d already stolen (it was a spawning child)\n", to_signal->thread_id); |
55 | 0 | return 0; |
56 | 0 | default: |
57 | 0 | MVM_panic(MVM_exitcode_gcorch, "invalid status %"MVM_PRSz" in GC orchestrate\n", MVM_load(&to_signal->gc_status)); |
58 | 0 | return 0; |
59 | 0 | } |
60 | 0 | } |
61 | 0 | } |
62 | 146 | static MVMuint32 signal_all_but(MVMThreadContext *tc, MVMThread *t, MVMThread *tail) { |
63 | 146 | MVMuint32 count = 0; |
64 | 146 | MVMThread *next; |
65 | 146 | if (!t) { |
66 | 0 | return 0; |
67 | 0 | } |
68 | 146 | do { |
69 | 146 | next = t->body.next; |
70 | 146 | switch (MVM_load(&t->body.stage)) { |
71 | 146 | case MVM_thread_stage_starting: |
72 | 146 | case MVM_thread_stage_waiting: |
73 | 146 | case MVM_thread_stage_started: |
74 | 146 | if (t->body.tc != tc) { |
75 | 0 | count += signal_one_thread(tc, t->body.tc); |
76 | 0 | } |
77 | 146 | break; |
78 | 0 | case MVM_thread_stage_exited: |
79 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to clear nursery of thread %d\n", t->body.tc->thread_id); |
80 | 0 | add_work(tc, t->body.tc); |
81 | 0 | break; |
82 | 0 | case MVM_thread_stage_clearing_nursery: |
83 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : queueing to destroy thread %d\n", t->body.tc->thread_id); |
84 | 0 | /* last GC run for this thread */ |
85 | 0 | add_work(tc, t->body.tc); |
86 | 0 | break; |
87 | 0 | case MVM_thread_stage_destroyed: |
88 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : found a destroyed thread\n"); |
89 | 0 | /* will be cleaned up (removed from the lists) shortly */ |
90 | 0 | break; |
91 | 0 | default: |
92 | 0 | MVM_panic(MVM_exitcode_gcorch, "Corrupted MVMThread or running threads list: invalid thread stage %"MVM_PRSz"", MVM_load(&t->body.stage)); |
93 | 146 | } |
94 | 146 | } while (next && (t = next)); |
95 | 146 | if (tail) |
96 | 0 | MVM_gc_write_barrier(tc, (MVMCollectable *)t, (MVMCollectable *)tail); |
97 | 146 | t->body.next = tail; |
98 | 146 | return count; |
99 | 146 | } |
100 | | |
101 | | /* Does work in a thread's in-tray, if any. Returns a non-zero value if work |
102 | | * was found and done, and zero otherwise. */ |
103 | 438 | static int process_in_tray(MVMThreadContext *tc, MVMuint8 gen) { |
104 | 438 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Considering extra work\n"); |
105 | 438 | if (MVM_load(&tc->gc_in_tray)) { |
106 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
107 | 0 | "Thread %d run %d : Was given extra work by another thread; doing it\n"); |
108 | 0 | MVM_gc_collect(tc, MVMGCWhatToDo_InTray, gen); |
109 | 0 | return 1; |
110 | 0 | } |
111 | 438 | return 0; |
112 | 438 | } |
113 | | |
114 | | /* Called by a thread when it thinks it is done with GC. It may get some more |
115 | | * work yet, though. */ |
116 | 292 | static void clear_intrays(MVMThreadContext *tc, MVMuint8 gen) { |
117 | 292 | MVMuint32 did_work = 1; |
118 | 584 | while (did_work) { |
119 | 292 | MVMThread *cur_thread; |
120 | 292 | did_work = 0; |
121 | 292 | cur_thread = (MVMThread *)MVM_load(&tc->instance->threads); |
122 | 584 | while (cur_thread) { |
123 | 292 | if (cur_thread->body.tc) |
124 | 292 | did_work += process_in_tray(cur_thread->body.tc, gen); |
125 | 292 | cur_thread = cur_thread->body.next; |
126 | 292 | } |
127 | 292 | } |
128 | 292 | } |
129 | 146 | static void finish_gc(MVMThreadContext *tc, MVMuint8 gen, MVMuint8 is_coordinator) { |
130 | 146 | MVMuint32 i, did_work; |
131 | 146 | |
132 | 146 | /* Do any extra work that we have been passed. */ |
133 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
134 | 146 | "Thread %d run %d : doing any work in thread in-trays\n"); |
135 | 146 | did_work = 1; |
136 | 292 | while (did_work) { |
137 | 146 | did_work = 0; |
138 | 292 | for (i = 0; i < tc->gc_work_count; i++) |
139 | 146 | did_work += process_in_tray(tc->gc_work[i].tc, gen); |
140 | 146 | } |
141 | 146 | |
142 | 146 | /* Decrement gc_finish to say we're done, and wait for termination. */ |
143 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Voting to finish\n"); |
144 | 146 | MVM_decr(&tc->instance->gc_finish); |
145 | 0 | while (MVM_load(&tc->instance->gc_finish)) { |
146 | 0 | for (i = 0; i < 1000; i++) |
147 | 0 | ; /* XXX Something HT-efficienter. */ |
148 | 0 | /* XXX Here we can look to see if we got passed any work, and if so |
149 | 0 | * try to un-vote. */ |
150 | 0 | } |
151 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Termination agreed\n"); |
152 | 146 | |
153 | 146 | /* Co-ordinator should do final check over all the in-trays, and trigger |
154 | 146 | * collection until all is settled. Rest should wait. Additionally, after |
155 | 146 | * in-trays are settled, coordinator walks threads looking for anything |
156 | 146 | * that needs adding to the finalize queue. It then will make another |
157 | 146 | * iteration over in-trays to handle cross-thread references to objects |
158 | 146 | * needing finalization. For full collections, collected objects are then |
159 | 146 | * cleaned from all inter-generational sets, and finally any objects to |
160 | 146 | * be freed at the fixed size allocator's next safepoint are freed. */ |
161 | 146 | if (is_coordinator) { |
162 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
163 | 146 | "Thread %d run %d : Co-ordinator handling in-tray clearing completion\n"); |
164 | 146 | clear_intrays(tc, gen); |
165 | 146 | |
166 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
167 | 146 | "Thread %d run %d : Co-ordinator handling finalizers\n"); |
168 | 146 | MVM_finalize_walk_queues(tc, gen); |
169 | 146 | clear_intrays(tc, gen); |
170 | 146 | |
171 | 146 | if (gen == MVMGCGenerations_Both) { |
172 | 0 | MVMThread *cur_thread = (MVMThread *)MVM_load(&tc->instance->threads); |
173 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
174 | 0 | "Thread %d run %d : Co-ordinator handling inter-gen root cleanup\n"); |
175 | 0 | while (cur_thread) { |
176 | 0 | if (cur_thread->body.tc) |
177 | 0 | MVM_gc_root_gen2_cleanup(cur_thread->body.tc); |
178 | 0 | cur_thread = cur_thread->body.next; |
179 | 0 | } |
180 | 0 | } |
181 | 146 | |
182 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
183 | 146 | "Thread %d run %d : Co-ordinator handling fixed-size allocator safepoint frees\n"); |
184 | 146 | MVM_fixed_size_safepoint(tc, tc->instance->fsa); |
185 | 146 | |
186 | 146 | MVM_profile_heap_take_snapshot(tc); |
187 | 146 | |
188 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
189 | 146 | "Thread %d run %d : Co-ordinator signalling in-trays clear\n"); |
190 | 146 | MVM_store(&tc->instance->gc_intrays_clearing, 0); |
191 | 146 | } |
192 | 0 | else { |
193 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
194 | 0 | "Thread %d run %d : Waiting for in-tray clearing completion\n"); |
195 | 0 | while (MVM_load(&tc->instance->gc_intrays_clearing)) |
196 | 0 | for (i = 0; i < 1000; i++) |
197 | 0 | ; /* XXX Something HT-efficienter. */ |
198 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
199 | 0 | "Thread %d run %d : Got in-tray clearing complete notice\n"); |
200 | 0 | } |
201 | 146 | |
202 | 146 | /* Reset GC status flags. This is also where thread destruction happens, |
203 | 146 | * and it needs to happen before we acknowledge this GC run is finished. */ |
204 | 292 | for (i = 0; i < tc->gc_work_count; i++) { |
205 | 146 | MVMThreadContext *other = tc->gc_work[i].tc; |
206 | 146 | MVMThread *thread_obj = other->thread_obj; |
207 | 146 | if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_clearing_nursery) { |
208 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
209 | 0 | "Thread %d run %d : transferring gen2 of thread %d\n", other->thread_id); |
210 | 0 | MVM_gc_gen2_transfer(other, tc); |
211 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
212 | 0 | "Thread %d run %d : destroying thread %d\n", other->thread_id); |
213 | 0 | MVM_tc_destroy(other); |
214 | 0 | tc->gc_work[i].tc = thread_obj->body.tc = NULL; |
215 | 0 | MVM_store(&thread_obj->body.stage, MVM_thread_stage_destroyed); |
216 | 0 | } |
217 | 146 | else { |
218 | 146 | if (gen == MVMGCGenerations_Both) { |
219 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
220 | 0 | "Thread %d run %d : freeing gen2 of thread %d\n", |
221 | 0 | other->thread_id); |
222 | 0 | MVM_gc_collect_free_gen2_unmarked(other, 0); |
223 | 0 | } |
224 | 146 | if (MVM_load(&thread_obj->body.stage) == MVM_thread_stage_exited) { |
225 | 0 | /* Don't bother freeing gen2; we'll do it next time */ |
226 | 0 | MVM_store(&thread_obj->body.stage, MVM_thread_stage_clearing_nursery); |
227 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
228 | 0 | "Thread %d run %d : set thread %d clearing nursery stage to %d\n", |
229 | 0 | other->thread_id, (int)MVM_load(&thread_obj->body.stage)); |
230 | 0 | } |
231 | 146 | MVM_cas(&other->gc_status, MVMGCStatus_STOLEN, MVMGCStatus_UNABLE); |
232 | 146 | MVM_cas(&other->gc_status, MVMGCStatus_INTERRUPT, MVMGCStatus_NONE); |
233 | 146 | } |
234 | 146 | } |
235 | 146 | |
236 | 146 | /* Signal acknowledgement of completing the cleanup, |
237 | 146 | * except for STables, and if we're the final to do |
238 | 146 | * so, free the STables, which have been linked. */ |
239 | 146 | if (MVM_decr(&tc->instance->gc_ack) == 2) { |
240 | 146 | /* Set it to zero (we're guaranteed the only ones trying to write to |
241 | 146 | * it here). Actual STable free in MVM_gc_enter_from_allocator. */ |
242 | 146 | MVM_store(&tc->instance->gc_ack, 0); |
243 | 146 | } |
244 | 146 | } |
245 | | |
246 | | /* Called by a thread to indicate it is about to enter a blocking operation. |
247 | | * This tells any thread that is coordinating a GC run that this thread will |
248 | | * be unable to participate. */ |
249 | 259k | void MVM_gc_mark_thread_blocked(MVMThreadContext *tc) { |
250 | 259k | /* This may need more than one attempt. */ |
251 | 259k | while (1) { |
252 | 259k | /* Try to set it from running to unable - the common case. */ |
253 | 259k | if (MVM_cas(&tc->gc_status, MVMGCStatus_NONE, |
254 | 259k | MVMGCStatus_UNABLE) == MVMGCStatus_NONE) |
255 | 259k | return; |
256 | 259k | |
257 | 259k | /* The only way this can fail is if another thread just decided we're to |
258 | 259k | * participate in a GC run. */ |
259 | 0 | if (MVM_load(&tc->gc_status) == MVMGCStatus_INTERRUPT) |
260 | 0 | MVM_gc_enter_from_interrupt(tc); |
261 | 0 | else |
262 | 0 | MVM_panic(MVM_exitcode_gcorch, |
263 | 0 | "Invalid GC status observed while blocking thread; aborting"); |
264 | 0 | } |
265 | 259k | } |
266 | | |
267 | | /* Called by a thread to indicate it has completed a block operation and is |
268 | | * thus able to particpate in a GC run again. Note that this case needs some |
269 | | * special handling if it comes out of this mode when a GC run is taking place. */ |
270 | 259k | void MVM_gc_mark_thread_unblocked(MVMThreadContext *tc) { |
271 | 259k | /* Try to set it from unable to running. */ |
272 | 259k | while (MVM_cas(&tc->gc_status, MVMGCStatus_UNABLE, |
273 | 0 | MVMGCStatus_NONE) != MVMGCStatus_UNABLE) { |
274 | 0 | /* We can't, presumably because a GC run is going on. We should wait |
275 | 0 | * for that to finish before we go on, but without chewing CPU. */ |
276 | 0 | MVM_platform_thread_yield(); |
277 | 0 | } |
278 | 259k | } |
279 | | |
280 | | /* Checks if a thread has marked itself as blocked. Considers that the GC may |
281 | | * have stolen its work and marked it as such also. So what this really |
282 | | * answers is, "did this thread mark itself blocked, and since then not mark |
283 | | * itself unblocked", which is useful if you need to conditionally unblock |
284 | | * or re-block. If the status changes from blocked to stolen or stolen to |
285 | | * blocked between checking this and calling unblock, it's safe anyway since |
286 | | * these cases are handled in MVM_gc_mark_thread_unblocked. Note that this |
287 | | * relies on a thread itself only ever calling block/unblock. */ |
288 | 0 | MVMint32 MVM_gc_is_thread_blocked(MVMThreadContext *tc) { |
289 | 0 | AO_t gc_status = MVM_load(&(tc->gc_status)); |
290 | 0 | return gc_status == MVMGCStatus_UNABLE || |
291 | 0 | gc_status == MVMGCStatus_STOLEN; |
292 | 0 | } |
293 | | |
294 | 146 | static MVMint32 is_full_collection(MVMThreadContext *tc) { |
295 | 146 | MVMuint64 percent_growth, promoted; |
296 | 146 | size_t rss; |
297 | 146 | |
298 | 146 | /* If it's below the absolute minimum, quickly return. */ |
299 | 146 | promoted = (MVMuint64)MVM_load(&tc->instance->gc_promoted_bytes_since_last_full); |
300 | 146 | if (promoted < MVM_GC_GEN2_THRESHOLD_MINIMUM) |
301 | 146 | return 0; |
302 | 146 | |
303 | 146 | /* If we're heap profiling then don't consider the resident set size, as |
304 | 146 | * it will be hugely distorted by the profile data we record. */ |
305 | 0 | if (MVM_profile_heap_profiling(tc)) |
306 | 0 | return 1; |
307 | 0 |
|
308 | 0 | /* Otherwise, consider percentage of resident set size. */ |
309 | 0 | if (uv_resident_set_memory(&rss) < 0 || rss == 0) |
310 | 0 | rss = 50 * 1024 * 1024; |
311 | 0 | percent_growth = (100 * promoted) / (MVMuint64)rss; |
312 | 0 |
|
313 | 0 | return percent_growth >= MVM_GC_GEN2_THRESHOLD_PERCENT; |
314 | 0 | } |
315 | | |
316 | 146 | static void run_gc(MVMThreadContext *tc, MVMuint8 what_to_do) { |
317 | 146 | MVMuint8 gen; |
318 | 146 | MVMuint32 i, n; |
319 | 146 | |
320 | 146 | #if MVM_GC_DEBUG |
321 | | if (tc->in_spesh) |
322 | | MVM_panic(1, "Must not GC when in the specializer/JIT\n"); |
323 | | #endif |
324 | 146 | |
325 | 146 | /* Decide nursery or full collection. */ |
326 | 146 | gen = tc->instance->gc_full_collect ? MVMGCGenerations_Both : MVMGCGenerations_Nursery; |
327 | 146 | |
328 | 146 | /* Do GC work for ourselves and any work threads. */ |
329 | 292 | for (i = 0, n = tc->gc_work_count ; i < n; i++) { |
330 | 146 | MVMThreadContext *other = tc->gc_work[i].tc; |
331 | 146 | tc->gc_work[i].limit = other->nursery_alloc; |
332 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : starting collection for thread %d\n", |
333 | 146 | other->thread_id); |
334 | 146 | other->gc_promoted_bytes = 0; |
335 | 146 | MVM_gc_collect(other, (other == tc ? what_to_do : MVMGCWhatToDo_NoInstance), gen); |
336 | 146 | } |
337 | 146 | |
338 | 146 | /* Wait for everybody to agree we're done. */ |
339 | 146 | finish_gc(tc, gen, what_to_do == MVMGCWhatToDo_All); |
340 | 146 | |
341 | 146 | /* Now we're all done, it's safe to finalize any objects that need it. */ |
342 | 146 | /* XXX TODO explore the feasability of doing this in a background |
343 | 146 | * finalizer/destructor thread and letting the main thread(s) continue |
344 | 146 | * on their merry way(s). */ |
345 | 292 | for (i = 0, n = tc->gc_work_count ; i < n; i++) { |
346 | 146 | MVMThreadContext *other = tc->gc_work[i].tc; |
347 | 146 | |
348 | 146 | /* The thread might've been destroyed */ |
349 | 146 | if (!other) |
350 | 0 | continue; |
351 | 146 | |
352 | 146 | /* Contribute this thread's promoted bytes. */ |
353 | 146 | MVM_add(&tc->instance->gc_promoted_bytes_since_last_full, other->gc_promoted_bytes); |
354 | 146 | |
355 | 146 | /* Collect nursery. */ |
356 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
357 | 146 | "Thread %d run %d : collecting nursery uncopied of thread %d\n", |
358 | 146 | other->thread_id); |
359 | 146 | MVM_gc_collect_free_nursery_uncopied(other, tc->gc_work[i].limit); |
360 | 146 | } |
361 | 146 | } |
362 | | |
363 | | /* This is called when the allocator finds it has run out of memory and wants |
364 | | * to trigger a GC run. In this case, it's possible (probable, really) that it |
365 | | * will need to do that triggering, notifying other running threads that the |
366 | | * time has come to GC. */ |
367 | 146 | void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { |
368 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); |
369 | 146 | |
370 | 146 | /* Try to start the GC run. */ |
371 | 146 | if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { |
372 | 146 | MVMThread *last_starter = NULL; |
373 | 146 | MVMuint32 num_threads = 0; |
374 | 146 | |
375 | 146 | /* Need to wait for other threads to reset their gc_status. */ |
376 | 0 | while (MVM_load(&tc->instance->gc_ack)) { |
377 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
378 | 0 | "Thread %d run %d : waiting for other thread's gc_ack\n"); |
379 | 0 | MVM_platform_thread_yield(); |
380 | 0 | } |
381 | 146 | |
382 | 146 | /* We are the winner of the GC starting race. This gives us some |
383 | 146 | * extra responsibilities as well as doing the usual things. |
384 | 146 | * First, increment GC sequence number. */ |
385 | 146 | MVM_incr(&tc->instance->gc_seq_number); |
386 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, |
387 | 146 | "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", |
388 | 0 | (int)MVM_load(&tc->instance->gc_seq_number)); |
389 | 146 | |
390 | 146 | /* Decide if it will be a full collection. */ |
391 | 146 | tc->instance->gc_full_collect = is_full_collection(tc); |
392 | 146 | |
393 | 146 | /* If profiling, record that GC is starting. */ |
394 | 146 | if (tc->instance->profiling) |
395 | 0 | MVM_profiler_log_gc_start(tc, tc->instance->gc_full_collect); |
396 | 146 | |
397 | 146 | /* Ensure our stolen list is empty. */ |
398 | 146 | tc->gc_work_count = 0; |
399 | 146 | |
400 | 146 | /* Flag that we didn't agree on this run that all the in-trays are |
401 | 146 | * cleared (a responsibility of the co-ordinator. */ |
402 | 146 | MVM_store(&tc->instance->gc_intrays_clearing, 1); |
403 | 146 | |
404 | 146 | /* We'll take care of our own work. */ |
405 | 146 | add_work(tc, tc); |
406 | 146 | |
407 | 146 | /* Find other threads, and signal or steal. */ |
408 | 146 | do { |
409 | 146 | MVMThread *threads = (MVMThread *)MVM_load(&tc->instance->threads); |
410 | 146 | if (threads && threads != last_starter) { |
411 | 146 | MVMThread *head = threads; |
412 | 146 | MVMuint32 add; |
413 | 146 | while ((threads = (MVMThread *)MVM_casptr(&tc->instance->threads, head, NULL)) != head) { |
414 | 0 | head = threads; |
415 | 0 | } |
416 | 146 | |
417 | 146 | add = signal_all_but(tc, head, last_starter); |
418 | 146 | last_starter = head; |
419 | 146 | if (add) { |
420 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Found %d other threads\n", add); |
421 | 0 | MVM_add(&tc->instance->gc_start, add); |
422 | 0 | num_threads += add; |
423 | 0 | } |
424 | 146 | } |
425 | 146 | |
426 | 146 | /* If there's an event loop thread, wake it up to participate. */ |
427 | 146 | if (tc->instance->event_loop_wakeup) |
428 | 0 | uv_async_send(tc->instance->event_loop_wakeup); |
429 | 146 | } while (MVM_load(&tc->instance->gc_start) > 1); |
430 | 146 | |
431 | 146 | /* Sanity checks. */ |
432 | 146 | if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) |
433 | 0 | MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); |
434 | 146 | if (MVM_load(&tc->instance->gc_finish) != 0) |
435 | 0 | MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_finish)); |
436 | 146 | |
437 | 146 | /* gc_ack gets an extra so the final acknowledger |
438 | 146 | * can also free the STables. */ |
439 | 146 | MVM_store(&tc->instance->gc_finish, num_threads + 1); |
440 | 146 | MVM_store(&tc->instance->gc_ack, num_threads + 2); |
441 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", |
442 | 0 | (int)MVM_load(&tc->instance->gc_finish)); |
443 | 146 | |
444 | 146 | /* Now we're ready to start, zero promoted since last full collection |
445 | 146 | * counter if this is a full collect. */ |
446 | 146 | if (tc->instance->gc_full_collect) |
447 | 146 | MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); |
448 | 146 | |
449 | 146 | /* This is a safe point for us to free any STables that have been marked |
450 | 146 | * for deletion in the previous collection (since we let finalization - |
451 | 146 | * which appends to this list - happen after we set threads on their |
452 | 146 | * way again, it's not safe to do it in the previous collection). */ |
453 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); |
454 | 146 | MVM_gc_collect_free_stables(tc); |
455 | 146 | |
456 | 146 | /* Signal to the rest to start */ |
457 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); |
458 | 146 | if (MVM_decr(&tc->instance->gc_start) != 1) |
459 | 0 | MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); |
460 | 146 | |
461 | 146 | /* Start collecting. */ |
462 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); |
463 | 146 | run_gc(tc, MVMGCWhatToDo_All); |
464 | 146 | |
465 | 146 | /* If profiling, record that GC is over. */ |
466 | 146 | if (tc->instance->profiling) |
467 | 0 | MVM_profiler_log_gc_end(tc); |
468 | 146 | |
469 | 146 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (cooridnator)\n"); |
470 | 146 | } |
471 | 0 | else { |
472 | 0 | /* Another thread beat us to starting the GC sync process. Thus, act as |
473 | 0 | * if we were interrupted to GC. */ |
474 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); |
475 | 0 | MVM_gc_enter_from_interrupt(tc); |
476 | 0 | } |
477 | 146 | } |
478 | | |
479 | | /* This is called when a thread hits an interrupt at a GC safe point. This means |
480 | | * that another thread is already trying to start a GC run, so we don't need to |
481 | | * try and do that, just enlist in the run. */ |
482 | 0 | void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { |
483 | 0 | AO_t curr; |
484 | 0 |
|
485 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from interrupt\n"); |
486 | 0 |
|
487 | 0 | /* If profiling, record that GC is starting. */ |
488 | 0 | if (tc->instance->profiling) |
489 | 0 | MVM_profiler_log_gc_start(tc, is_full_collection(tc)); |
490 | 0 |
|
491 | 0 | /* We'll certainly take care of our own work. */ |
492 | 0 | tc->gc_work_count = 0; |
493 | 0 | add_work(tc, tc); |
494 | 0 |
|
495 | 0 | /* Indicate that we're ready to GC. Only want to decrement it if it's 2 or |
496 | 0 | * greater (0 should never happen; 1 means the coordinator is still counting |
497 | 0 | * up how many threads will join in, so we should wait until it decides to |
498 | 0 | * decrement.) */ |
499 | 0 | while ((curr = MVM_load(&tc->instance->gc_start)) < 2 |
500 | 0 | || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { |
501 | 0 | /* MVM_platform_thread_yield();*/ |
502 | 0 | } |
503 | 0 |
|
504 | 0 | /* Wait for all threads to indicate readiness to collect. */ |
505 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for other threads\n"); |
506 | 0 | while (MVM_load(&tc->instance->gc_start)) { |
507 | 0 | /* MVM_platform_thread_yield();*/ |
508 | 0 | } |
509 | 0 |
|
510 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entering run_gc\n"); |
511 | 0 | run_gc(tc, MVMGCWhatToDo_NoInstance); |
512 | 0 | GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete\n"); |
513 | 0 |
|
514 | 0 | /* If profiling, record that GC is over. */ |
515 | 0 | if (tc->instance->profiling) |
516 | 0 | MVM_profiler_log_gc_end(tc); |
517 | 0 | } |
518 | | |
519 | | /* Run the global destruction phase. */ |
520 | 0 | void MVM_gc_global_destruction(MVMThreadContext *tc) { |
521 | 0 | char *nursery_tmp; |
522 | 0 |
|
523 | 0 | /* Fake a nursery collection run by swapping the semi- |
524 | 0 | * space nurseries. */ |
525 | 0 | nursery_tmp = tc->nursery_fromspace; |
526 | 0 | tc->nursery_fromspace = tc->nursery_tospace; |
527 | 0 | tc->nursery_tospace = nursery_tmp; |
528 | 0 |
|
529 | 0 | /* Run the objects' finalizers */ |
530 | 0 | MVM_gc_collect_free_nursery_uncopied(tc, tc->nursery_alloc); |
531 | 0 | MVM_gc_root_gen2_cleanup(tc); |
532 | 0 | MVM_gc_collect_free_gen2_unmarked(tc, 1); |
533 | 0 | MVM_gc_collect_free_stables(tc); |
534 | 0 | } |