Coverage Report

Created: 2018-07-03 15:31

/home/travis/build/MoarVM/MoarVM/src/gc/collect.c
Line
Count
Source (jump to first uncovered line)
1
#include "moar.h"
2
3
/* Combines a piece of work that will be passed to another thread with the
4
 * ID of the target thread to pass it to. */
5
typedef struct {
6
    MVMuint32        target;
7
    MVMGCPassedWork *work;
8
} ThreadWork;
9
10
/* Current chunks of work we're building up to pass. */
11
typedef struct {
12
    MVMuint32   num_target_threads;
13
    ThreadWork *target_work;
14
} WorkToPass;
15
16
/* Forward decls. */
17
static void process_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist, WorkToPass *wtp, MVMuint8 gen);
18
static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr);
19
static void pass_leftover_work(MVMThreadContext *tc, WorkToPass *wtp);
20
static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist);
21
22
/* The size of the nursery that a new thread should get. The main thread will
23
 * get a full-size one right away. */
24
317
MVMuint32 MVM_gc_new_thread_nursery_size(MVMInstance *i) {
25
317
    return i->main_thread != NULL
26
173
        ? (MVM_NURSERY_SIZE < MVM_NURSERY_THREAD_START
27
0
            ? MVM_NURSERY_SIZE
28
173
            : MVM_NURSERY_THREAD_START)
29
144
        : MVM_NURSERY_SIZE;
30
317
}
31
32
/* Does a garbage collection run. Exactly what it does is configured by the
33
 * couple of arguments that it takes.
34
 *
35
 * The what_to_do argument specifies where it should look for things to add
36
 * to the worklist: everywhere, just at thread local stuff, or just in the
37
 * thread's in-tray.
38
 *
39
 * The gen argument specifies whether to collect the nursery or both of the
40
 * generations. Nursery collection is done by semi-space copying. Once an
41
 * object is seen/copied once in the nursery (may be tuned in the future to
42
 * twice or so - we'll see) then it is not copied to tospace, but instead
43
 * promoted to the second generation. If we are collecting generation 2 also,
44
 * then objects that are alive in the second generation are simply marked.
45
 * Since the second generation is managed as a set of sized pools, there is
46
 * much less motivation for any kind of copying/compaction; the internal
47
 * fragmentation that makes finding a right-sized gap problematic will not
48
 * happen.
49
 *
50
 * Note that it adds the roots and processes them in phases, to try to avoid
51
 * building up a huge worklist. */
52
779
void MVM_gc_collect(MVMThreadContext *tc, MVMuint8 what_to_do, MVMuint8 gen) {
53
779
    /* Create a GC worklist. */
54
779
    MVMGCWorklist *worklist = MVM_gc_worklist_create(tc, gen != MVMGCGenerations_Nursery);
55
779
56
779
    /* Initialize work passing data structure. */
57
779
    WorkToPass wtp;
58
779
    wtp.num_target_threads = 0;
59
779
    wtp.target_work = NULL;
60
779
61
779
    /* See what we need to work on this time. */
62
779
    if (what_to_do == MVMGCWhatToDo_InTray) {
63
31
        /* We just need to process anything in the in-tray. */
64
31
        add_in_tray_to_worklist(tc, worklist);
65
31
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from in tray \n", worklist->items);
66
31
        process_worklist(tc, worklist, &wtp, gen);
67
31
    }
68
748
    else if (what_to_do == MVMGCWhatToDo_Finalizing) {
69
0
        /* Need to process the finalizing queue. */
70
0
        MVMuint32 i;
71
0
        for (i = 0; i < tc->num_finalizing; i++)
72
0
            MVM_gc_worklist_add(tc, worklist, &(tc->finalizing[i]));
73
0
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from finalizing \n", worklist->items);
74
0
        process_worklist(tc, worklist, &wtp, gen);
75
0
    }
76
748
    else {
77
748
        /* Main collection run. The current tospace becomes fromspace, with
78
748
         * the size of the current tospace becoming stashed as the size of
79
748
         * that fromspace. */
80
748
        void *old_fromspace = tc->nursery_fromspace;
81
748
        MVMuint32 old_fromspace_size = tc->nursery_fromspace_size;
82
748
        tc->nursery_fromspace = tc->nursery_tospace;
83
748
        tc->nursery_fromspace_size = tc->nursery_tospace_size;
84
748
85
748
        /* Decide on this threads's tospace size. If fromspace was already at
86
748
         * the maximum nursery size, then that is the new tospace size. If
87
748
         * not, then see if this thread caused the current GC run, and grant
88
748
         * it a bigger tospace. Otherwise, new tospace size is left as the
89
748
         * last tospace size. */
90
748
        if (tc->nursery_tospace_size < MVM_NURSERY_SIZE) {
91
406
            if (tc->instance->thread_to_blame_for_gc == tc)
92
10
                tc->nursery_tospace_size *= 2;
93
406
        }
94
748
95
748
        /* If the old fromspace matches the target size, just re-use it. If
96
748
         * not, free it and allocate a new tospace. */
97
748
        if (old_fromspace_size == tc->nursery_tospace_size) {
98
624
            tc->nursery_tospace = old_fromspace;
99
624
        }
100
124
        else {
101
124
            MVM_free(old_fromspace);
102
124
            tc->nursery_tospace = MVM_calloc(1, tc->nursery_tospace_size);
103
124
        }
104
748
105
748
        /* Reset nursery allocation pointers to the new tospace. */
106
748
        tc->nursery_alloc       = tc->nursery_tospace;
107
748
        tc->nursery_alloc_limit = (char *)tc->nursery_tospace + tc->nursery_tospace_size;
108
748
109
748
        /* Add permanent roots and process them; only one thread will do
110
748
        * this, since they are instance-wide. */
111
748
        if (what_to_do != MVMGCWhatToDo_NoInstance) {
112
338
            MVM_gc_root_add_permanents_to_worklist(tc, worklist, NULL);
113
338
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from instance permanents\n", worklist->items);
114
338
            process_worklist(tc, worklist, &wtp, gen);
115
338
            MVM_gc_root_add_instance_roots_to_worklist(tc, worklist, NULL);
116
338
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from instance roots\n", worklist->items);
117
338
            process_worklist(tc, worklist, &wtp, gen);
118
338
        }
119
748
120
748
        /* Add per-thread state to worklist and process it. */
121
748
        MVM_gc_root_add_tc_roots_to_worklist(tc, worklist, NULL);
122
748
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from TC objects\n", worklist->items);
123
748
        process_worklist(tc, worklist, &wtp, gen);
124
748
125
748
        /* Walk current call stack, following caller chain until we reach a
126
748
         * heap-allocated frame. Note that tc->cur_frame may itself be a heap
127
748
         * frame, in which case we put it directly on the worklist as it can
128
748
         * move. */
129
748
        if (tc->cur_frame && MVM_FRAME_IS_ON_CALLSTACK(tc, tc->cur_frame)) {
130
297
            MVMFrame *cur_frame = tc->cur_frame;
131
960
            while (cur_frame && MVM_FRAME_IS_ON_CALLSTACK(tc, cur_frame)) {
132
663
                MVM_gc_root_add_frame_roots_to_worklist(tc, worklist, cur_frame);
133
663
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from a stack frame\n", worklist->items);
134
663
                process_worklist(tc, worklist, &wtp, gen);
135
663
                cur_frame = cur_frame->caller;
136
663
            }
137
297
        }
138
451
        else {
139
451
            MVM_gc_worklist_add(tc, worklist, &tc->cur_frame);
140
451
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from current frame\n", worklist->items);
141
451
            process_worklist(tc, worklist, &wtp, gen);
142
451
        }
143
748
144
748
        /* Add temporary roots and process them (these are per-thread). */
145
748
        MVM_gc_root_add_temps_to_worklist(tc, worklist, NULL);
146
748
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from thread temps\n", worklist->items);
147
748
        process_worklist(tc, worklist, &wtp, gen);
148
748
149
748
        /* Add things that are roots for the first generation because they are
150
748
        * pointed to by objects in the second generation and process them
151
748
        * (also per-thread). Note we need not do this if we're doing a full
152
748
        * collection anyway (in fact, we must not for correctness, otherwise
153
748
        * the gen2 rooting keeps them alive forever). */
154
750
        if (gen == MVMGCGenerations_Nursery) {
155
750
            MVM_gc_root_add_gen2s_to_worklist(tc, worklist);
156
750
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from gen2 \n", worklist->items);
157
750
            process_worklist(tc, worklist, &wtp, gen);
158
750
        }
159
748
160
748
        /* Process anything in the in-tray. */
161
748
        add_in_tray_to_worklist(tc, worklist);
162
748
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from in tray \n", worklist->items);
163
748
        process_worklist(tc, worklist, &wtp, gen);
164
748
165
748
        /* At this point, we have probably done most of the work we will
166
748
         * need to (only get more if another thread passes us more); zero
167
748
         * out the remaining tospace. */
168
748
        memset(tc->nursery_alloc, 0, (char *)tc->nursery_alloc_limit - (char *)tc->nursery_alloc);
169
748
    }
170
779
171
779
    /* Destroy the worklist. */
172
779
    MVM_gc_worklist_destroy(tc, worklist);
173
779
174
779
    /* Pass any work for other threads we accumulated but that didn't trigger
175
779
     * the work passing threshold, then cleanup work passing list. */
176
779
    if (wtp.num_target_threads) {
177
153
        pass_leftover_work(tc, &wtp);
178
153
        MVM_free(wtp.target_work);
179
153
    }
180
779
}
181
182
/* Processes the current worklist. */
183
4.82k
static void process_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist, WorkToPass *wtp, MVMuint8 gen) {
184
4.82k
    MVMGen2Allocator  *gen2;
185
4.82k
    MVMCollectable   **item_ptr;
186
4.82k
    MVMCollectable    *new_addr;
187
4.82k
    MVMuint32          gen2count;
188
4.82k
189
4.82k
    /* Grab the second generation allocator; we may move items into the
190
4.82k
     * old generation. */
191
4.82k
    gen2 = tc->gen2;
192
4.82k
193
4.40M
    while ((item_ptr = MVM_gc_worklist_get(tc, worklist))) {
194
4.40M
        /* Dereference the object we're considering. */
195
4.40M
        MVMCollectable *item = *item_ptr;
196
4.40M
        MVMuint8 item_gen2;
197
4.40M
        MVMuint8 to_gen2 = 0;
198
4.40M
199
4.40M
        /* If the item is NULL, that's fine - it's just a null reference and
200
4.40M
         * thus we've no object to consider. */
201
4.40M
        if (item == NULL)
202
0
            continue;
203
4.40M
204
4.40M
        /* If it's in the second generation and we're only doing a nursery,
205
4.40M
         * collection, we have nothing to do. */
206
4.40M
        item_gen2 = item->flags & MVM_CF_SECOND_GEN;
207
4.40M
        if (item_gen2) {
208
37.9k
            if (gen == MVMGCGenerations_Nursery)
209
37.9k
                continue;
210
0
            if (item->flags & MVM_CF_GEN2_LIVE) {
211
0
                /* gen2 and marked as live. */
212
0
                continue;
213
0
            }
214
4.36M
        } else if (item->flags & MVM_CF_FORWARDER_VALID) {
215
1.98M
            /* If the item was already seen and copied, then it will have a
216
1.98M
             * forwarding address already. Just update this pointer to the
217
1.98M
             * new address and we're done. */
218
1.98M
            assert(*item_ptr != item->sc_forward_u.forwarder);
219
1.98M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT)) {
220
0
                if (*item_ptr != item->sc_forward_u.forwarder) {
221
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : updating handle %p from %p to forwarder %p\n", item_ptr, item, item->sc_forward_u.forwarder);
222
0
                }
223
0
                else {
224
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : already visited handle %p to forwarder %p\n", item_ptr, item->sc_forward_u.forwarder);
225
0
                }
226
0
            }
227
1.98M
            *item_ptr = item->sc_forward_u.forwarder;
228
1.98M
            continue;
229
2.38M
        } else {
230
2.38M
            /* If the pointer is already into tospace (the bit we've already
231
2.38M
               copied into), we already updated it, so we're done. */
232
2.38M
            if (item >= (MVMCollectable *)tc->nursery_tospace && item < (MVMCollectable *)tc->nursery_alloc) {
233
83.7k
                continue;
234
83.7k
            }
235
2.38M
        }
236
4.40M
237
4.40M
        /* If it's owned by a different thread, we need to pass it over to
238
4.40M
         * the owning thread. */
239
2.30M
        if (item->owner != tc->thread_id) {
240
12.1k
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : sending a handle %p to object %p to thread %d\n", item_ptr, item, item->owner);
241
12.1k
            pass_work_item(tc, wtp, item_ptr);
242
12.1k
            continue;
243
12.1k
        }
244
2.30M
245
2.30M
        /* If it's in to-space but *ahead* of our copy offset then it's an
246
2.30M
           out-of-date pointer and we have some kind of corruption. */
247
2.28M
        if (item >= (MVMCollectable *)tc->nursery_alloc && item < (MVMCollectable *)tc->nursery_alloc_limit)
248
0
            MVM_panic(1, "Heap corruption detected: pointer %p to past fromspace", item);
249
2.28M
250
2.28M
        /* At this point, we didn't already see the object, which means we
251
2.28M
         * need to take some action. Go on the generation... */
252
2.28M
        if (item_gen2) {
253
0
            assert(!(item->flags & MVM_CF_FORWARDER_VALID));
254
0
            /* It's in the second generation. We'll just mark it. */
255
0
            new_addr = item;
256
0
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT)) {
257
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : handle %p was already %p\n", item_ptr, new_addr);
258
0
            }
259
0
            item->flags |= MVM_CF_GEN2_LIVE;
260
0
            assert(*item_ptr == new_addr);
261
2.28M
        } else {
262
2.28M
            /* Catch NULL stable (always sign of trouble) in debug mode. */
263
2.28M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && !STABLE(item)) {
264
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : found a zeroed handle %p to object %p\n", item_ptr, item);
265
0
            }
266
2.28M
267
2.28M
            /* Did we see it in the nursery before, or should we move it to
268
2.28M
             * gen2 anyway since it a persistent ID was requested? */
269
2.28M
            if (item->flags & (MVM_CF_NURSERY_SEEN | MVM_CF_HAS_OBJECT_ID)) {
270
922k
                /* Yes; we should move it to the second generation. Allocate
271
922k
                 * space in the second generation. */
272
922k
                to_gen2 = 1;
273
922k
                new_addr = item->flags & MVM_CF_HAS_OBJECT_ID
274
145
                    ? MVM_gc_object_id_use_allocation(tc, item)
275
922k
                    : MVM_gc_gen2_allocate(gen2, item->size);
276
922k
277
922k
                /* Add on to the promoted amount (used both to decide when to do
278
922k
                 * the next full collection, as well as for profiling). Note we
279
922k
                 * add unmanaged size on for objects below. */
280
922k
                tc->gc_promoted_bytes += item->size;
281
922k
282
922k
                /* Copy the object to the second generation and mark it as
283
922k
                 * living there. */
284
922k
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : copying an object %p of size %d to gen2 %p\n",
285
922k
                    item, item->size, new_addr);
286
922k
                memcpy(new_addr, item, item->size);
287
922k
                if (new_addr->flags & MVM_CF_NURSERY_SEEN)
288
922k
                    new_addr->flags ^= MVM_CF_NURSERY_SEEN;
289
922k
                new_addr->flags |= MVM_CF_SECOND_GEN;
290
922k
291
922k
                /* If it's a frame with an active work area, we need to keep
292
922k
                 * on visiting it. Also add on object's unmanaged size. */
293
922k
                if (new_addr->flags & MVM_CF_FRAME) {
294
48.9k
                    if (((MVMFrame *)new_addr)->work)
295
371
                        MVM_gc_root_gen2_add(tc, (MVMCollectable *)new_addr);
296
48.9k
                }
297
873k
                else if (!(new_addr->flags & (MVM_CF_TYPE_OBJECT | MVM_CF_STABLE))) {
298
872k
                    MVMObject *new_obj_addr = (MVMObject *)new_addr;
299
872k
                    if (REPR(new_obj_addr)->unmanaged_size)
300
473k
                        tc->gc_promoted_bytes += REPR(new_obj_addr)->unmanaged_size(tc,
301
473k
                            STABLE(new_obj_addr), OBJECT_BODY(new_obj_addr));
302
872k
                }
303
922k
304
922k
                /* If we're going to sweep the second generation, also need
305
922k
                 * to mark it as live. */
306
922k
                if (gen == MVMGCGenerations_Both)
307
0
                    new_addr->flags |= MVM_CF_GEN2_LIVE;
308
922k
            }
309
1.36M
            else {
310
1.36M
                /* No, so it will live in the nursery for another GC
311
1.36M
                 * iteration. Allocate space in the nursery. */
312
1.36M
                new_addr = (MVMCollectable *)tc->nursery_alloc;
313
1.36M
                tc->nursery_alloc = (char *)tc->nursery_alloc + MVM_ALIGN_SIZE(item->size);
314
1.36M
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : copying an object %p (reprid %d) of size %d to tospace %p\n",
315
0
                    item, REPR(item)->ID, item->size, new_addr);
316
1.36M
317
1.36M
                /* Copy the object to tospace and mark it as seen in the
318
1.36M
                 * nursery (so the next time around it will move to the
319
1.36M
                 * older generation, if it survives). */
320
1.36M
                memcpy(new_addr, item, item->size);
321
1.36M
                new_addr->flags |= MVM_CF_NURSERY_SEEN;
322
1.36M
            }
323
2.28M
324
2.28M
            /* Store the forwarding pointer and update the original
325
2.28M
             * reference. */
326
2.28M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && new_addr != item) {
327
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : updating handle %p from referent %p (reprid %d) to %p\n", item_ptr, item, REPR(item)->ID, new_addr);
328
0
            }
329
2.28M
            *item_ptr = new_addr;
330
2.28M
            item->sc_forward_u.forwarder = new_addr;
331
2.28M
            /* Set the flag on the copy of item *in fromspace* to mark that the
332
2.28M
               forwarder pointer is valid. */
333
2.28M
            item->flags |= MVM_CF_FORWARDER_VALID;
334
2.28M
        }
335
2.28M
336
2.28M
        /* Finally, we need to mark the collectable (at its moved address).
337
2.28M
         * Track how many items we had before we mark it, in case we need
338
2.28M
         * to write barrier them post-move to uphold the generational
339
2.28M
         * invariant. */
340
2.28M
        gen2count = worklist->items;
341
2.28M
        MVM_gc_mark_collectable(tc, worklist, new_addr);
342
2.28M
343
2.28M
        /* In moving an object to generation 2, we may have left it pointing
344
2.28M
         * to nursery objects. If so, make sure it's in the gen2 roots. */
345
2.28M
        if (to_gen2) {
346
922k
            MVMCollectable **j;
347
922k
            MVMuint32 max = worklist->items, k;
348
922k
349
2.43M
            for (k = gen2count; k < max; k++) {
350
1.51M
                j = worklist->list[k];
351
1.51M
                if (*j)
352
1.51M
                    MVM_gc_write_barrier(tc, new_addr, *j);
353
1.51M
            }
354
922k
        }
355
2.28M
    }
356
4.82k
}
357
358
/* Marks a collectable item (object, type object, STable). */
359
3.10M
void MVM_gc_mark_collectable(MVMThreadContext *tc, MVMGCWorklist *worklist, MVMCollectable *new_addr) {
360
3.10M
    MVMuint16 i;
361
3.10M
    MVMuint32 sc_idx;
362
3.10M
363
3.10M
    assert(!(new_addr->flags & MVM_CF_FORWARDER_VALID));
364
3.10M
    /*assert(REPR(new_addr));*/
365
3.10M
    sc_idx = MVM_sc_get_idx_of_sc(new_addr);
366
3.10M
    if (sc_idx > 0)
367
152k
        MVM_gc_worklist_add(tc, worklist, &(tc->instance->all_scs[sc_idx]->sc));
368
3.10M
369
3.10M
    if (new_addr->flags & MVM_CF_TYPE_OBJECT) {
370
1.59k
        /* Add the STable to the worklist. */
371
1.59k
        MVM_gc_worklist_add(tc, worklist, &((MVMObject *)new_addr)->st);
372
1.59k
    }
373
3.10M
    else if (new_addr->flags & MVM_CF_STABLE) {
374
15.9k
        /* Add all references in the STable to the work list. */
375
15.9k
        MVMSTable *new_addr_st = (MVMSTable *)new_addr;
376
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->method_cache);
377
46.5k
        for (i = 0; i < new_addr_st->type_check_cache_length; i++)
378
30.5k
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->type_check_cache[i]);
379
15.9k
        if (new_addr_st->container_spec)
380
0
            if (new_addr_st->container_spec->gc_mark_data)
381
0
                new_addr_st->container_spec->gc_mark_data(tc, new_addr_st, worklist);
382
15.9k
        if (new_addr_st->boolification_spec)
383
7.69k
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->boolification_spec->method);
384
15.9k
        if (new_addr_st->invocation_spec) {
385
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->class_handle);
386
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->attr_name);
387
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->invocation_handler);
388
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_class_handle);
389
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_cache_attr_name);
390
332
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_valid_attr_name);
391
332
        }
392
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->WHO);
393
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->WHAT);
394
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->HOW);
395
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->HOW_sc);
396
15.9k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->method_cache_sc);
397
15.9k
        if (new_addr_st->mode_flags & MVM_PARAMETRIC_TYPE) {
398
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.ric.parameterizer);
399
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.ric.lookup);
400
0
        }
401
15.9k
        else if (new_addr_st->mode_flags & MVM_PARAMETERIZED_TYPE) {
402
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.erized.parametric_type);
403
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.erized.parameters);
404
0
        }
405
15.9k
406
15.9k
        /* If it needs to have its REPR data marked, do that. */
407
15.9k
        if (new_addr_st->REPR->gc_mark_repr_data)
408
12.0k
            new_addr_st->REPR->gc_mark_repr_data(tc, new_addr_st, worklist);
409
15.9k
    }
410
3.08M
    else if (new_addr->flags & MVM_CF_FRAME) {
411
181k
        MVM_gc_root_add_frame_roots_to_worklist(tc, worklist, (MVMFrame *)new_addr);
412
181k
    }
413
2.90M
    else {
414
2.90M
        /* Need to view it as an object in here. */
415
2.90M
        MVMObject *new_addr_obj = (MVMObject *)new_addr;
416
2.90M
417
2.90M
        /* Add the STable to the worklist. */
418
2.90M
        MVM_gc_worklist_add(tc, worklist, &new_addr_obj->st);
419
2.90M
420
2.90M
        /* If needed, mark it. This will add addresses to the worklist
421
2.90M
         * that will need updating. Note that we are passing the address
422
2.90M
         * of the object *after* copying it since those are the addresses
423
2.90M
         * we care about updating; the old chunk of memory is now dead! */
424
2.90M
        if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && !STABLE(new_addr_obj))
425
0
            MVM_panic(MVM_exitcode_gcnursery, "Found an outdated reference to address %p", new_addr);
426
2.90M
        if (REPR(new_addr_obj)->gc_mark)
427
2.88M
            REPR(new_addr_obj)->gc_mark(tc, STABLE(new_addr_obj), OBJECT_BODY(new_addr_obj), worklist);
428
2.90M
    }
429
3.10M
}
430
431
/* Adds a chunk of work to another thread's in-tray. */
432
319
static void push_work_to_thread_in_tray(MVMThreadContext *tc, MVMuint32 target, MVMGCPassedWork *work) {
433
319
    MVMGCPassedWork * volatile *target_tray;
434
319
435
319
    /* Locate the thread to pass the work to. */
436
319
    MVMThreadContext *target_tc = NULL;
437
319
    if (target == 1) {
438
246
        /* It's going to the main thread. */
439
246
        target_tc = tc->instance->main_thread;
440
246
    }
441
73
    else {
442
73
        MVMThread *t = (MVMThread *)MVM_load(&tc->instance->threads);
443
97
        do {
444
97
            if (t->body.tc && t->body.tc->thread_id == target) {
445
73
                target_tc = t->body.tc;
446
73
                break;
447
73
            }
448
24
        } while ((t = t->body.next));
449
73
        if (!target_tc)
450
0
            MVM_panic(MVM_exitcode_gcnursery, "Internal error: invalid thread ID %d in GC work pass", target);
451
73
    }
452
319
453
319
    /* Pass the work, chaining any other in-tray entries for the thread
454
319
     * after us. */
455
319
    target_tray = &target_tc->gc_in_tray;
456
319
    while (1) {
457
319
        MVMGCPassedWork *orig = *target_tray;
458
319
        work->next = orig;
459
319
        if (MVM_casptr(target_tray, orig, work) == orig)
460
319
            return;
461
319
    }
462
319
}
463
464
/* Adds work to list of items to pass over to another thread, and if we
465
 * reach the pass threshold then does the passing. */
466
12.1k
static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr) {
467
12.1k
    ThreadWork *target_info = NULL;
468
12.1k
    MVMuint32   target      = (*item_ptr)->owner;
469
12.1k
    MVMuint32   j;
470
12.1k
471
12.1k
    /* Find any existing thread work passing list for the target. */
472
12.1k
    if (target == 0)
473
0
        MVM_panic(MVM_exitcode_gcnursery, "Internal error: zeroed target thread ID in work pass");
474
12.2k
    for (j = 0; j < wtp->num_target_threads; j++) {
475
12.0k
        if (wtp->target_work[j].target == target) {
476
12.0k
            target_info = &wtp->target_work[j];
477
12.0k
            break;
478
12.0k
        }
479
12.0k
    }
480
12.1k
481
12.1k
    /* If there's no entry for this target, create one. */
482
12.1k
    if (target_info == NULL) {
483
159
        wtp->num_target_threads++;
484
159
        wtp->target_work = MVM_realloc(wtp->target_work,
485
159
            wtp->num_target_threads * sizeof(ThreadWork));
486
159
        target_info = &wtp->target_work[wtp->num_target_threads - 1];
487
159
        target_info->target = target;
488
159
        target_info->work   = NULL;
489
159
    }
490
12.1k
491
12.1k
    /* See if there's a currently active list; create it if not. */
492
12.1k
    if (!target_info->work) {
493
319
        target_info->work = MVM_calloc(1, sizeof(MVMGCPassedWork));
494
319
    }
495
12.1k
496
12.1k
    /* Add this item to the work list. */
497
12.1k
    target_info->work->items[target_info->work->num_items] = item_ptr;
498
12.1k
    target_info->work->num_items++;
499
12.1k
500
12.1k
    /* If we've hit the limit, pass this work to the target thread. */
501
12.1k
    if (target_info->work->num_items == MVM_GC_PASS_WORK_SIZE) {
502
161
        push_work_to_thread_in_tray(tc, target, target_info->work);
503
161
        target_info->work = NULL;
504
161
    }
505
12.1k
}
506
507
/* Passes all work for other threads that we've got left in our to-pass list. */
508
153
static void pass_leftover_work(MVMThreadContext *tc, WorkToPass *wtp) {
509
153
    MVMuint32 j;
510
312
    for (j = 0; j < wtp->num_target_threads; j++)
511
159
        if (wtp->target_work[j].work)
512
158
            push_work_to_thread_in_tray(tc, wtp->target_work[j].target,
513
158
                wtp->target_work[j].work);
514
153
}
515
516
/* Takes work in a thread's in-tray, if any, and adds it to the worklist. */
517
781
static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist) {
518
781
    MVMGCPassedWork * volatile *in_tray = &tc->gc_in_tray;
519
781
    MVMGCPassedWork *head;
520
781
521
781
    /* Get work to process. */
522
781
    while (1) {
523
781
        /* See if there's anything in the in-tray; if not, we're done. */
524
781
        head = *in_tray;
525
781
        if (head == NULL)
526
626
            return;
527
781
528
781
        /* Otherwise, try to take it. */
529
155
        if (MVM_casptr(in_tray, head, NULL) == head)
530
155
            break;
531
155
    }
532
781
533
781
    /* Go through list, adding to worklist. */
534
474
    while (head) {
535
319
        MVMGCPassedWork *next = head->next;
536
319
        MVMuint32 i;
537
12.5k
        for (i = 0; i < head->num_items; i++)
538
12.1k
            MVM_gc_worklist_add(tc, worklist, head->items[i]);
539
319
        MVM_free(head);
540
319
        head = next;
541
319
    }
542
155
}
543
544
/* Save dead STable pointers to delete later.. */
545
0
static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) {
546
0
    MVMSTable *old_head;
547
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
548
    assert(!(st->header.flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED));
549
#endif
550
0
    do {
551
0
        old_head = tc->instance->stables_to_free;
552
0
        st->header.sc_forward_u.st = old_head;
553
0
    } while (!MVM_trycas(&tc->instance->stables_to_free, old_head, st));
554
0
}
555
556
/* Some objects, having been copied, need no further attention. Others
557
 * need to do some additional freeing, however. This goes through the
558
 * fromspace and does any needed work to free uncopied things (this may
559
 * run in parallel with the mutator, which will be operating on tospace). */
560
726
void MVM_gc_collect_free_nursery_uncopied(MVMThreadContext *tc, void *limit) {
561
726
    /* We start scanning the fromspace, and keep going until we hit
562
726
     * the end of the area allocated in it. */
563
726
    void *scan = tc->nursery_fromspace;
564
29.5M
    while (scan < limit) {
565
29.5M
        /* The object here is dead if it never got a forwarding pointer
566
29.5M
         * written in to it. */
567
29.5M
        MVMCollectable *item = (MVMCollectable *)scan;
568
29.5M
        MVMuint8 dead = !(item->flags & MVM_CF_FORWARDER_VALID);
569
29.5M
570
29.5M
        if (!dead)
571
29.5M
            assert(item->sc_forward_u.forwarder != NULL);
572
29.5M
573
29.5M
        /* Now go by collectable type. */
574
29.5M
        if (item->flags & MVM_CF_TYPE_OBJECT) {
575
1.02k
            /* Type object */
576
1.02k
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
577
            if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
578
                MVM_free(item->sc_forward_u.sci);
579
#endif
580
1.02k
            if (dead && item->flags & MVM_CF_HAS_OBJECT_ID)
581
0
                MVM_gc_object_id_clear(tc, item);
582
1.02k
        }
583
29.5M
        else if (item->flags & MVM_CF_STABLE) {
584
1.11k
            MVMSTable *st = (MVMSTable *)item;
585
1.11k
            if (dead) {
586
0
/*            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : enqueuing an STable %d in the nursery to be freed\n", item);*/
587
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
588
                if (item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
589
                    MVM_free(item->sc_forward_u.sci);
590
                    /* Arguably we don't need to do this, if we're always
591
                       consistent about what we put on the stable queue. */
592
                    item->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED;
593
                }
594
#endif
595
0
                MVM_gc_collect_enqueue_stable_for_deletion(tc, st);
596
0
            }
597
1.11k
        }
598
29.5M
        else if (item->flags & MVM_CF_FRAME) {
599
645k
            if (dead)
600
511k
                MVM_frame_destroy(tc, (MVMFrame *)item);
601
645k
        }
602
28.8M
        else {
603
28.8M
            /* Object instance. If dead, call gc_free if needed. Scan is
604
28.8M
             * incremented by object size. */
605
28.8M
            MVMObject *obj = (MVMObject *)item;
606
28.8M
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the nursery with reprid %d\n", item, REPR(obj)->ID);
607
28.8M
            if (dead && REPR(obj)->gc_free)
608
10.4M
                REPR(obj)->gc_free(tc, obj);
609
28.8M
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
610
            if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
611
                MVM_free(item->sc_forward_u.sci);
612
#endif
613
28.8M
            if (dead && item->flags & MVM_CF_HAS_OBJECT_ID)
614
0
                MVM_gc_object_id_clear(tc, item);
615
28.8M
        }
616
29.5M
617
29.5M
        /* Go to the next item. */
618
29.5M
        scan = (char *)scan + MVM_ALIGN_SIZE(item->size);
619
29.5M
    }
620
726
}
621
622
/* Free STables (in any thread/generation!) queued to be freed. */
623
338
void MVM_gc_collect_free_stables(MVMThreadContext *tc) {
624
338
    MVMSTable *st = tc->instance->stables_to_free;
625
338
    while (st) {
626
0
        MVMSTable *st_to_free = st;
627
0
        st = st_to_free->header.sc_forward_u.st;
628
0
        st_to_free->header.sc_forward_u.st = NULL;
629
0
        MVM_6model_stable_gc_free(tc, st_to_free);
630
0
    }
631
338
    tc->instance->stables_to_free = NULL;
632
338
}
633
634
/* Goes through the unmarked objects in the second generation heap and builds
635
 * free lists out of them. Also does any required finalization. */
636
0
void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc, MVMint32 global_destruction) {
637
0
    /* Visit each of the size class bins. */
638
0
    MVMGen2Allocator *gen2 = tc->gen2;
639
0
    MVMuint32 bin, obj_size, page, i;
640
0
    char ***freelist_insert_pos;
641
0
    for (bin = 0; bin < MVM_GEN2_BINS; bin++) {
642
0
        /* If we've nothing allocated in this size class, skip it. */
643
0
        if (gen2->size_classes[bin].pages == NULL)
644
0
            continue;
645
0
646
0
        /* Calculate object size for this bin. */
647
0
        obj_size = (bin + 1) << MVM_GEN2_BIN_BITS;
648
0
649
0
        /* freelist_insert_pos is a pointer to a memory location that
650
0
         * stores the address of the last traversed free list node (char **). */
651
0
        /* Initialize freelist insertion position to free list head. */
652
0
        freelist_insert_pos = &gen2->size_classes[bin].free_list;
653
0
654
0
        /* Visit each page. */
655
0
        for (page = 0; page < gen2->size_classes[bin].num_pages; page++) {
656
0
            /* Visit all the objects, looking for dead ones and reset the
657
0
             * mark for each of them. */
658
0
            char *cur_ptr = gen2->size_classes[bin].pages[page];
659
0
            char *end_ptr = page + 1 == gen2->size_classes[bin].num_pages
660
0
                ? gen2->size_classes[bin].alloc_pos
661
0
                : cur_ptr + obj_size * MVM_GEN2_PAGE_ITEMS;
662
0
            while (cur_ptr < end_ptr) {
663
0
                MVMCollectable *col = (MVMCollectable *)cur_ptr;
664
0
665
0
                /* Is this already a free list slot? If so, it becomes the
666
0
                 * new free list insert position. */
667
0
                if (*freelist_insert_pos == (char **)cur_ptr) {
668
0
                    freelist_insert_pos = (char ***)cur_ptr;
669
0
                }
670
0
671
0
                /* Otherwise, it must be a collectable of some kind. Is it
672
0
                 * live? */
673
0
                else if (col->flags & MVM_CF_GEN2_LIVE) {
674
0
                    /* Yes; clear the mark. */
675
0
                    col->flags &= ~MVM_CF_GEN2_LIVE;
676
0
                }
677
0
                else {
678
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the gen2\n", col);
679
0
                    /* No, it's dead. Do any cleanup. */
680
0
#if MVM_GC_DEBUG
681
                    col->flags |= MVM_CF_DEBUG_IN_GEN2_FREE_LIST;
682
#endif
683
0
                    if (col->flags & MVM_CF_TYPE_OBJECT) {
684
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
685
                        if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
686
                            MVM_free(col->sc_forward_u.sci);
687
#endif
688
0
                    }
689
0
                    else if (col->flags & MVM_CF_STABLE) {
690
0
                        if (
691
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
692
                            !(col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) &&
693
#endif
694
0
                            col->sc_forward_u.sc.sc_idx == 0
695
0
                            && col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL) {
696
0
                            /* We marked it dead last time, kill it. */
697
0
                            MVM_6model_stable_gc_free(tc, (MVMSTable *)col);
698
0
                        }
699
0
                        else {
700
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
701
                            if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
702
                                /* Whatever happens next, we can free this
703
                                   memory immediately, because no-one will be
704
                                   serializing a dead STable. */
705
                                assert(!(col->sc_forward_u.sci->sc_idx == 0
706
                                         && col->sc_forward_u.sci->idx
707
                                         == MVM_DIRECT_SC_IDX_SENTINEL));
708
                                MVM_free(col->sc_forward_u.sci);
709
                                col->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED;
710
                            }
711
#endif
712
0
                            if (global_destruction) {
713
0
                                /* We're in global destruction, so enqueue to the end
714
0
                                 * like we do in the nursery */
715
0
                                MVM_gc_collect_enqueue_stable_for_deletion(tc, (MVMSTable *)col);
716
0
                            } else {
717
0
                                /* There will definitely be another gc run, so mark it as "died last time". */
718
0
                                col->sc_forward_u.sc.sc_idx = 0;
719
0
                                col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL;
720
0
                            }
721
0
                            /* Skip the freelist updating. */
722
0
                            cur_ptr += obj_size;
723
0
                            continue;
724
0
                        }
725
0
                    }
726
0
                    else if (col->flags & MVM_CF_FRAME) {
727
0
                        MVM_frame_destroy(tc, (MVMFrame *)col);
728
0
                    }
729
0
                    else {
730
0
                        /* Object instance; call gc_free if needed. */
731
0
                        MVMObject *obj = (MVMObject *)col;
732
0
                        if (STABLE(obj) && REPR(obj)->gc_free)
733
0
                            REPR(obj)->gc_free(tc, obj);
734
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
735
                        if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
736
                            MVM_free(col->sc_forward_u.sci);
737
#endif
738
0
                    }
739
0
740
0
                    /* Chain in to the free list. */
741
0
                    *((char **)cur_ptr) = (char *)*freelist_insert_pos;
742
0
                    *freelist_insert_pos = (char **)cur_ptr;
743
0
744
0
                    /* Update the pointer to the insert position to point to us */
745
0
                    freelist_insert_pos = (char ***)cur_ptr;
746
0
                }
747
0
748
0
                /* Move to the next object. */
749
0
                cur_ptr += obj_size;
750
0
            }
751
0
        }
752
0
    }
753
0
    
754
0
    /* Also need to consider overflows. */
755
0
    for (i = 0; i < gen2->num_overflows; i++) {
756
0
        if (gen2->overflows[i]) {
757
0
            MVMCollectable *col = gen2->overflows[i];
758
0
            if (col->flags & MVM_CF_GEN2_LIVE) {
759
0
                /* A living over-sized object; just clear the mark. */
760
0
                col->flags &= ~MVM_CF_GEN2_LIVE;
761
0
            }
762
0
            else {
763
0
                /* Dead over-sized object. We know if it's this big it cannot
764
0
                 * be a type object or STable, so only need handle the simple
765
0
                 * object case. */
766
0
                if (!(col->flags & (MVM_CF_TYPE_OBJECT | MVM_CF_STABLE | MVM_CF_FRAME))) {
767
0
                    MVMObject *obj = (MVMObject *)col;
768
0
                    if (REPR(obj)->gc_free)
769
0
                        REPR(obj)->gc_free(tc, obj);
770
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
771
                    if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
772
                        MVM_free(col->sc_forward_u.sci);
773
#endif
774
0
                }
775
0
                else {
776
0
                    MVM_panic(MVM_exitcode_gcnursery, "Internal error: gen2 overflow contains non-object");
777
0
                }
778
0
                MVM_free(col);
779
0
                gen2->overflows[i] = NULL;
780
0
            }
781
0
        }
782
0
    }
783
0
    /* And finally compact the overflow list */
784
0
    MVM_gc_gen2_compact_overflows(gen2);
785
0
}