Coverage Report

Created: 2017-04-15 07:07

/home/travis/build/MoarVM/MoarVM/src/gc/collect.c
Line
Count
Source (jump to first uncovered line)
1
#include "moar.h"
2
3
/* Combines a piece of work that will be passed to another thread with the
4
 * ID of the target thread to pass it to. */
5
typedef struct {
6
    MVMuint32        target;
7
    MVMGCPassedWork *work;
8
} ThreadWork;
9
10
/* Current chunks of work we're building up to pass. */
11
typedef struct {
12
    MVMuint32   num_target_threads;
13
    ThreadWork *target_work;
14
} WorkToPass;
15
16
/* Forward decls. */
17
static void process_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist, WorkToPass *wtp, MVMuint8 gen);
18
static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr);
19
static void pass_leftover_work(MVMThreadContext *tc, WorkToPass *wtp);
20
static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist);
21
22
/* Does a garbage collection run. Exactly what it does is configured by the
23
 * couple of arguments that it takes.
24
 *
25
 * The what_to_do argument specifies where it should look for things to add
26
 * to the worklist: everywhere, just at thread local stuff, or just in the
27
 * thread's in-tray.
28
 *
29
 * The gen argument specifies whether to collect the nursery or both of the
30
 * generations. Nursery collection is done by semi-space copying. Once an
31
 * object is seen/copied once in the nursery (may be tuned in the future to
32
 * twice or so - we'll see) then it is not copied to tospace, but instead
33
 * promoted to the second generation. If we are collecting generation 2 also,
34
 * then objects that are alive in the second generation are simply marked.
35
 * Since the second generation is managed as a set of sized pools, there is
36
 * much less motivation for any kind of copying/compaction; the internal
37
 * fragmentation that makes finding a right-sized gap problematic will not
38
 * happen.
39
 *
40
 * Note that it adds the roots and processes them in phases, to try to avoid
41
 * building up a huge worklist. */
42
146
void MVM_gc_collect(MVMThreadContext *tc, MVMuint8 what_to_do, MVMuint8 gen) {
43
146
    /* Create a GC worklist. */
44
146
    MVMGCWorklist *worklist = MVM_gc_worklist_create(tc, gen != MVMGCGenerations_Nursery);
45
146
46
146
    /* Initialize work passing data structure. */
47
146
    WorkToPass wtp;
48
146
    wtp.num_target_threads = 0;
49
146
    wtp.target_work = NULL;
50
146
51
146
    /* See what we need to work on this time. */
52
146
    if (what_to_do == MVMGCWhatToDo_InTray) {
53
0
        /* We just need to process anything in the in-tray. */
54
0
        add_in_tray_to_worklist(tc, worklist);
55
0
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from in tray \n", worklist->items);
56
0
        process_worklist(tc, worklist, &wtp, gen);
57
0
    }
58
146
    else if (what_to_do == MVMGCWhatToDo_Finalizing) {
59
0
        /* Need to process the finalizing queue. */
60
0
        MVMuint32 i;
61
0
        for (i = 0; i < tc->num_finalizing; i++)
62
0
            MVM_gc_worklist_add(tc, worklist, &(tc->finalizing[i]));
63
0
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from finalizing \n", worklist->items);
64
0
        process_worklist(tc, worklist, &wtp, gen);
65
0
    }
66
146
    else {
67
146
        /* Main collection run. Swap fromspace and tospace, allocating the
68
146
         * new tospace if that didn't yet happen (we don't allocate it at
69
146
         * startup, to cut memory use for threads that quit before a GC). */
70
146
        void *fromspace = tc->nursery_tospace;
71
146
        void *tospace   = tc->nursery_fromspace;
72
146
        if (!tospace)
73
36
            tospace = MVM_calloc(1, MVM_NURSERY_SIZE);
74
146
        tc->nursery_fromspace = fromspace;
75
146
        tc->nursery_tospace   = tospace;
76
146
77
146
        /* Reset nursery allocation pointers to the new tospace. */
78
146
        tc->nursery_alloc       = tospace;
79
146
        tc->nursery_alloc_limit = (char *)tc->nursery_alloc + MVM_NURSERY_SIZE;
80
146
81
146
        /* Add permanent roots and process them; only one thread will do
82
146
        * this, since they are instance-wide. */
83
146
        if (what_to_do != MVMGCWhatToDo_NoInstance) {
84
146
            MVM_gc_root_add_permanents_to_worklist(tc, worklist, NULL);
85
146
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from instance permanents\n", worklist->items);
86
146
            process_worklist(tc, worklist, &wtp, gen);
87
146
            MVM_gc_root_add_instance_roots_to_worklist(tc, worklist, NULL);
88
146
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from instance roots\n", worklist->items);
89
146
            process_worklist(tc, worklist, &wtp, gen);
90
146
        }
91
146
92
146
        /* Add per-thread state to worklist and process it. */
93
146
        MVM_gc_root_add_tc_roots_to_worklist(tc, worklist, NULL);
94
146
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from TC objects\n", worklist->items);
95
146
        process_worklist(tc, worklist, &wtp, gen);
96
146
97
146
        /* Walk current call stack, following caller chain until we reach a
98
146
         * heap-allocated frame. Note that tc->cur_frame may itself be a heap
99
146
         * frame, in which case we put it directly on the worklist as it can
100
146
         * move. */
101
146
        if (tc->cur_frame && MVM_FRAME_IS_ON_CALLSTACK(tc, tc->cur_frame)) {
102
139
            MVMFrame *cur_frame = tc->cur_frame;
103
984
            while (cur_frame && MVM_FRAME_IS_ON_CALLSTACK(tc, cur_frame)) {
104
845
                MVM_gc_root_add_frame_roots_to_worklist(tc, worklist, cur_frame);
105
845
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from a stack frame\n", worklist->items);
106
845
                process_worklist(tc, worklist, &wtp, gen);
107
845
                cur_frame = cur_frame->caller;
108
845
            }
109
139
        }
110
7
        else {
111
7
            MVM_gc_worklist_add(tc, worklist, &tc->cur_frame);
112
7
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from current frame\n", worklist->items);
113
7
            process_worklist(tc, worklist, &wtp, gen);
114
7
        }
115
146
116
146
        /* Add temporary roots and process them (these are per-thread). */
117
146
        MVM_gc_root_add_temps_to_worklist(tc, worklist, NULL);
118
146
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from thread temps\n", worklist->items);
119
146
        process_worklist(tc, worklist, &wtp, gen);
120
146
121
146
        /* Add things that are roots for the first generation because they are
122
146
        * pointed to by objects in the second generation and process them
123
146
        * (also per-thread). Note we need not do this if we're doing a full
124
146
        * collection anyway (in fact, we must not for correctness, otherwise
125
146
        * the gen2 rooting keeps them alive forever). */
126
146
        if (gen == MVMGCGenerations_Nursery) {
127
146
            MVM_gc_root_add_gen2s_to_worklist(tc, worklist);
128
146
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from gen2 \n", worklist->items);
129
146
            process_worklist(tc, worklist, &wtp, gen);
130
146
        }
131
146
132
146
        /* Process anything in the in-tray. */
133
146
        add_in_tray_to_worklist(tc, worklist);
134
146
        GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : processing %d items from in tray \n", worklist->items);
135
146
        process_worklist(tc, worklist, &wtp, gen);
136
146
137
146
        /* At this point, we have probably done most of the work we will
138
146
         * need to (only get more if another thread passes us more); zero
139
146
         * out the remaining tospace. */
140
146
        memset(tc->nursery_alloc, 0, (char *)tc->nursery_alloc_limit - (char *)tc->nursery_alloc);
141
146
    }
142
146
143
146
    /* Destroy the worklist. */
144
146
    MVM_gc_worklist_destroy(tc, worklist);
145
146
146
146
    /* Pass any work for other threads we accumulated but that didn't trigger
147
146
     * the work passing threshold, then cleanup work passing list. */
148
146
    if (wtp.num_target_threads) {
149
0
        pass_leftover_work(tc, &wtp);
150
0
        MVM_free(wtp.target_work);
151
0
    }
152
146
}
153
154
/* Processes the current worklist. */
155
1.72k
static void process_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist, WorkToPass *wtp, MVMuint8 gen) {
156
1.72k
    MVMGen2Allocator  *gen2;
157
1.72k
    MVMCollectable   **item_ptr;
158
1.72k
    MVMCollectable    *new_addr;
159
1.72k
    MVMuint32          gen2count;
160
1.72k
161
1.72k
    /* Grab the second generation allocator; we may move items into the
162
1.72k
     * old generation. */
163
1.72k
    gen2 = tc->gen2;
164
1.72k
165
2.70M
    while ((item_ptr = MVM_gc_worklist_get(tc, worklist))) {
166
2.70M
        /* Dereference the object we're considering. */
167
2.70M
        MVMCollectable *item = *item_ptr;
168
2.70M
        MVMuint8 item_gen2;
169
2.70M
        MVMuint8 to_gen2 = 0;
170
2.70M
171
2.70M
        /* If the item is NULL, that's fine - it's just a null reference and
172
2.70M
         * thus we've no object to consider. */
173
2.70M
        if (item == NULL)
174
0
            continue;
175
2.70M
176
2.70M
        /* If it's in the second generation and we're only doing a nursery,
177
2.70M
         * collection, we have nothing to do. */
178
2.70M
        item_gen2 = item->flags & MVM_CF_SECOND_GEN;
179
2.70M
        if (item_gen2) {
180
30.5k
            if (gen == MVMGCGenerations_Nursery)
181
30.5k
                continue;
182
0
            if (item->flags & MVM_CF_GEN2_LIVE) {
183
0
                /* gen2 and marked as live. */
184
0
                continue;
185
0
            }
186
2.67M
        } else if (item->flags & MVM_CF_FORWARDER_VALID) {
187
1.15M
            /* If the item was already seen and copied, then it will have a
188
1.15M
             * forwarding address already. Just update this pointer to the
189
1.15M
             * new address and we're done. */
190
1.15M
            assert(*item_ptr != item->sc_forward_u.forwarder);
191
1.15M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT)) {
192
0
                if (*item_ptr != item->sc_forward_u.forwarder) {
193
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : updating handle %p from %p to forwarder %p\n", item_ptr, item, item->sc_forward_u.forwarder);
194
0
                }
195
0
                else {
196
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : already visited handle %p to forwarder %p\n", item_ptr, item->sc_forward_u.forwarder);
197
0
                }
198
0
            }
199
1.15M
            *item_ptr = item->sc_forward_u.forwarder;
200
1.15M
            continue;
201
1.52M
        } else {
202
1.52M
            /* If the pointer is already into tospace (the bit we've already
203
1.52M
               copied into), we already updated it, so we're done. */
204
1.52M
            if (item >= (MVMCollectable *)tc->nursery_tospace && item < (MVMCollectable *)tc->nursery_alloc) {
205
68.0k
                continue;
206
68.0k
            }
207
1.52M
        }
208
2.70M
209
2.70M
        /* If it's owned by a different thread, we need to pass it over to
210
2.70M
         * the owning thread. */
211
1.45M
        if (item->owner != tc->thread_id) {
212
0
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : sending a handle %p to object %p to thread %d\n", item_ptr, item, item->owner);
213
0
            pass_work_item(tc, wtp, item_ptr);
214
0
            continue;
215
0
        }
216
1.45M
217
1.45M
        /* If it's in to-space but *ahead* of our copy offset then it's an
218
1.45M
           out-of-date pointer and we have some kind of corruption. */
219
1.45M
        if (item >= (MVMCollectable *)tc->nursery_alloc && item < (MVMCollectable *)tc->nursery_alloc_limit)
220
0
            MVM_panic(1, "Heap corruption detected: pointer %p to past fromspace", item);
221
1.45M
222
1.45M
        /* At this point, we didn't already see the object, which means we
223
1.45M
         * need to take some action. Go on the generation... */
224
1.45M
        if (item_gen2) {
225
0
            assert(!(item->flags & MVM_CF_FORWARDER_VALID));
226
0
            /* It's in the second generation. We'll just mark it. */
227
0
            new_addr = item;
228
0
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT)) {
229
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : handle %p was already %p\n", item_ptr, new_addr);
230
0
            }
231
0
            item->flags |= MVM_CF_GEN2_LIVE;
232
0
            assert(*item_ptr == new_addr);
233
1.45M
        } else {
234
1.45M
            /* Catch NULL stable (always sign of trouble) in debug mode. */
235
1.45M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && !STABLE(item)) {
236
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : found a zeroed handle %p to object %p\n", item_ptr, item);
237
0
                printf("%d", ((MVMCollectable *)1)->owner);
238
0
            }
239
1.45M
240
1.45M
            /* Did we see it in the nursery before, or should we move it to
241
1.45M
             * gen2 anyway since it a persistent ID was requested? */
242
1.45M
            if (item->flags & (MVM_CF_NURSERY_SEEN | MVM_CF_HAS_OBJECT_ID)) {
243
570k
                /* Yes; we should move it to the second generation. Allocate
244
570k
                 * space in the second generation. */
245
570k
                to_gen2 = 1;
246
570k
                new_addr = item->flags & MVM_CF_HAS_OBJECT_ID
247
108
                    ? MVM_gc_object_id_use_allocation(tc, item)
248
570k
                    : MVM_gc_gen2_allocate(gen2, item->size);
249
570k
250
570k
                /* Add on to the promoted amount (used both to decide when to do
251
570k
                 * the next full collection, as well as for profiling). Note we
252
570k
                 * add unmanaged size on for objects below. */
253
570k
                tc->gc_promoted_bytes += item->size;
254
570k
255
570k
                /* Copy the object to the second generation and mark it as
256
570k
                 * living there. */
257
570k
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : copying an object %p of size %d to gen2 %p\n",
258
570k
                    item, item->size, new_addr);
259
570k
                memcpy(new_addr, item, item->size);
260
570k
                new_addr->flags ^= MVM_CF_NURSERY_SEEN;
261
570k
                new_addr->flags |= MVM_CF_SECOND_GEN;
262
570k
263
570k
                /* If it's a frame with an active work area, we need to keep
264
570k
                 * on visiting it. Also add on object's unmanaged size. */
265
570k
                if (new_addr->flags & MVM_CF_FRAME) {
266
41.4k
                    if (((MVMFrame *)new_addr)->work)
267
281
                        MVM_gc_root_gen2_add(tc, (MVMCollectable *)new_addr);
268
41.4k
                }
269
528k
                else if (!(new_addr->flags & (MVM_CF_TYPE_OBJECT | MVM_CF_STABLE))) {
270
528k
                    MVMObject *new_obj_addr = (MVMObject *)new_addr;
271
528k
                    if (REPR(new_obj_addr)->unmanaged_size)
272
243k
                        tc->gc_promoted_bytes += REPR(new_obj_addr)->unmanaged_size(tc,
273
243k
                            STABLE(new_obj_addr), OBJECT_BODY(new_obj_addr));
274
528k
                }
275
570k
276
570k
                /* If we're going to sweep the second generation, also need
277
570k
                 * to mark it as live. */
278
570k
                if (gen == MVMGCGenerations_Both)
279
0
                    new_addr->flags |= MVM_CF_GEN2_LIVE;
280
570k
            }
281
881k
            else {
282
881k
                /* No, so it will live in the nursery for another GC
283
881k
                 * iteration. Allocate space in the nursery. */
284
881k
                new_addr = (MVMCollectable *)tc->nursery_alloc;
285
881k
                tc->nursery_alloc = (char *)tc->nursery_alloc + item->size;
286
881k
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : copying an object %p (reprid %d) of size %d to tospace %p\n",
287
0
                    item, REPR(item)->ID, item->size, new_addr);
288
881k
289
881k
                /* Copy the object to tospace and mark it as seen in the
290
881k
                 * nursery (so the next time around it will move to the
291
881k
                 * older generation, if it survives). */
292
881k
                memcpy(new_addr, item, item->size);
293
881k
                new_addr->flags |= MVM_CF_NURSERY_SEEN;
294
881k
            }
295
1.45M
296
1.45M
            /* Store the forwarding pointer and update the original
297
1.45M
             * reference. */
298
1.45M
            if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && new_addr != item) {
299
0
                GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : updating handle %p from referent %p (reprid %d) to %p\n", item_ptr, item, REPR(item)->ID, new_addr);
300
0
            }
301
1.45M
            *item_ptr = new_addr;
302
1.45M
            item->sc_forward_u.forwarder = new_addr;
303
1.45M
            /* Set the flag on the copy of item *in fromspace* to mark that the
304
1.45M
               forwarder pointer is valid. */
305
1.45M
            item->flags |= MVM_CF_FORWARDER_VALID;
306
1.45M
        }
307
1.45M
308
1.45M
        /* Finally, we need to mark the collectable (at its moved address).
309
1.45M
         * Track how many items we had before we mark it, in case we need
310
1.45M
         * to write barrier them post-move to uphold the generational
311
1.45M
         * invariant. */
312
1.45M
        gen2count = worklist->items;
313
1.45M
        MVM_gc_mark_collectable(tc, worklist, new_addr);
314
1.45M
315
1.45M
        /* In moving an object to generation 2, we may have left it pointing
316
1.45M
         * to nursery objects. If so, make sure it's in the gen2 roots. */
317
1.45M
        if (to_gen2) {
318
570k
            MVMCollectable **j;
319
570k
            MVMuint32 max = worklist->items, k;
320
570k
321
1.49M
            for (k = gen2count; k < max; k++) {
322
929k
                j = worklist->list[k];
323
929k
                if (*j)
324
929k
                    MVM_gc_write_barrier(tc, new_addr, *j);
325
929k
            }
326
570k
        }
327
1.45M
    }
328
1.72k
}
329
330
/* Marks a collectable item (object, type object, STable). */
331
1.97M
void MVM_gc_mark_collectable(MVMThreadContext *tc, MVMGCWorklist *worklist, MVMCollectable *new_addr) {
332
1.97M
    MVMuint16 i;
333
1.97M
    MVMuint32 sc_idx;
334
1.97M
335
1.97M
    assert(!(new_addr->flags & MVM_CF_FORWARDER_VALID));
336
1.97M
    /*assert(REPR(new_addr));*/
337
1.97M
    sc_idx = MVM_sc_get_idx_of_sc(new_addr);
338
1.97M
    if (sc_idx > 0)
339
123k
        MVM_gc_worklist_add(tc, worklist, &(tc->instance->all_scs[sc_idx]->sc));
340
1.97M
341
1.97M
    if (new_addr->flags & MVM_CF_TYPE_OBJECT) {
342
1.15k
        /* Add the STable to the worklist. */
343
1.15k
        MVM_gc_worklist_add(tc, worklist, &((MVMObject *)new_addr)->st);
344
1.15k
    }
345
1.97M
    else if (new_addr->flags & MVM_CF_STABLE) {
346
12.7k
        /* Add all references in the STable to the work list. */
347
12.7k
        MVMSTable *new_addr_st = (MVMSTable *)new_addr;
348
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->method_cache);
349
37.0k
        for (i = 0; i < new_addr_st->type_check_cache_length; i++)
350
24.2k
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->type_check_cache[i]);
351
12.7k
        if (new_addr_st->container_spec)
352
0
            if (new_addr_st->container_spec->gc_mark_data)
353
0
                new_addr_st->container_spec->gc_mark_data(tc, new_addr_st, worklist);
354
12.7k
        if (new_addr_st->boolification_spec)
355
6.09k
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->boolification_spec->method);
356
12.7k
        if (new_addr_st->invocation_spec) {
357
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->class_handle);
358
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->attr_name);
359
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->invocation_handler);
360
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_class_handle);
361
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_cache_attr_name);
362
272
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->invocation_spec->md_valid_attr_name);
363
272
        }
364
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->WHO);
365
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->WHAT);
366
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->HOW);
367
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->HOW_sc);
368
12.7k
        MVM_gc_worklist_add(tc, worklist, &new_addr_st->method_cache_sc);
369
12.7k
        if (new_addr_st->mode_flags & MVM_PARAMETRIC_TYPE) {
370
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.ric.parameterizer);
371
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.ric.lookup);
372
0
        }
373
12.7k
        else if (new_addr_st->mode_flags & MVM_PARAMETERIZED_TYPE) {
374
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.erized.parametric_type);
375
0
            MVM_gc_worklist_add(tc, worklist, &new_addr_st->paramet.erized.parameters);
376
0
        }
377
12.7k
378
12.7k
        /* If it needs to have its REPR data marked, do that. */
379
12.7k
        if (new_addr_st->REPR->gc_mark_repr_data)
380
9.67k
            new_addr_st->REPR->gc_mark_repr_data(tc, new_addr_st, worklist);
381
12.7k
    }
382
1.96M
    else if (new_addr->flags & MVM_CF_FRAME) {
383
151k
        MVM_gc_root_add_frame_roots_to_worklist(tc, worklist, (MVMFrame *)new_addr);
384
151k
    }
385
1.80M
    else {
386
1.80M
        /* Need to view it as an object in here. */
387
1.80M
        MVMObject *new_addr_obj = (MVMObject *)new_addr;
388
1.80M
389
1.80M
        /* Add the STable to the worklist. */
390
1.80M
        MVM_gc_worklist_add(tc, worklist, &new_addr_obj->st);
391
1.80M
392
1.80M
        /* If needed, mark it. This will add addresses to the worklist
393
1.80M
         * that will need updating. Note that we are passing the address
394
1.80M
         * of the object *after* copying it since those are the addresses
395
1.80M
         * we care about updating; the old chunk of memory is now dead! */
396
1.80M
        if (MVM_GC_DEBUG_ENABLED(MVM_GC_DEBUG_COLLECT) && !STABLE(new_addr_obj))
397
0
            MVM_panic(MVM_exitcode_gcnursery, "Found an outdated reference to address %p", new_addr);
398
1.80M
        if (REPR(new_addr_obj)->gc_mark)
399
1.79M
            REPR(new_addr_obj)->gc_mark(tc, STABLE(new_addr_obj), OBJECT_BODY(new_addr_obj), worklist);
400
1.80M
    }
401
1.97M
}
402
403
/* Adds a chunk of work to another thread's in-tray. */
404
0
static void push_work_to_thread_in_tray(MVMThreadContext *tc, MVMuint32 target, MVMGCPassedWork *work) {
405
0
    MVMGCPassedWork * volatile *target_tray;
406
0
407
0
    /* Locate the thread to pass the work to. */
408
0
    MVMThreadContext *target_tc = NULL;
409
0
    if (target == 1) {
410
0
        /* It's going to the main thread. */
411
0
        target_tc = tc->instance->main_thread;
412
0
    }
413
0
    else {
414
0
        MVMThread *t = (MVMThread *)MVM_load(&tc->instance->threads);
415
0
        do {
416
0
            if (t->body.tc && t->body.tc->thread_id == target) {
417
0
                target_tc = t->body.tc;
418
0
                break;
419
0
            }
420
0
        } while ((t = t->body.next));
421
0
        if (!target_tc)
422
0
            MVM_panic(MVM_exitcode_gcnursery, "Internal error: invalid thread ID %d in GC work pass", target);
423
0
    }
424
0
425
0
    /* Pass the work, chaining any other in-tray entries for the thread
426
0
     * after us. */
427
0
    target_tray = &target_tc->gc_in_tray;
428
0
    while (1) {
429
0
        MVMGCPassedWork *orig = *target_tray;
430
0
        work->next = orig;
431
0
        if (MVM_casptr(target_tray, orig, work) == orig)
432
0
            return;
433
0
    }
434
0
}
435
436
/* Adds work to list of items to pass over to another thread, and if we
437
 * reach the pass threshold then does the passing. */
438
0
static void pass_work_item(MVMThreadContext *tc, WorkToPass *wtp, MVMCollectable **item_ptr) {
439
0
    ThreadWork *target_info = NULL;
440
0
    MVMuint32   target      = (*item_ptr)->owner;
441
0
    MVMuint32   j;
442
0
443
0
    /* Find any existing thread work passing list for the target. */
444
0
    if (target == 0)
445
0
        MVM_panic(MVM_exitcode_gcnursery, "Internal error: zeroed target thread ID in work pass");
446
0
    for (j = 0; j < wtp->num_target_threads; j++) {
447
0
        if (wtp->target_work[j].target == target) {
448
0
            target_info = &wtp->target_work[j];
449
0
            break;
450
0
        }
451
0
    }
452
0
453
0
    /* If there's no entry for this target, create one. */
454
0
    if (target_info == NULL) {
455
0
        wtp->num_target_threads++;
456
0
        wtp->target_work = MVM_realloc(wtp->target_work,
457
0
            wtp->num_target_threads * sizeof(ThreadWork));
458
0
        target_info = &wtp->target_work[wtp->num_target_threads - 1];
459
0
        target_info->target = target;
460
0
        target_info->work   = NULL;
461
0
    }
462
0
463
0
    /* See if there's a currently active list; create it if not. */
464
0
    if (!target_info->work) {
465
0
        target_info->work = MVM_calloc(1, sizeof(MVMGCPassedWork));
466
0
    }
467
0
468
0
    /* Add this item to the work list. */
469
0
    target_info->work->items[target_info->work->num_items] = item_ptr;
470
0
    target_info->work->num_items++;
471
0
472
0
    /* If we've hit the limit, pass this work to the target thread. */
473
0
    if (target_info->work->num_items == MVM_GC_PASS_WORK_SIZE) {
474
0
        push_work_to_thread_in_tray(tc, target, target_info->work);
475
0
        target_info->work = NULL;
476
0
    }
477
0
}
478
479
/* Passes all work for other threads that we've got left in our to-pass list. */
480
0
static void pass_leftover_work(MVMThreadContext *tc, WorkToPass *wtp) {
481
0
    MVMuint32 j;
482
0
    for (j = 0; j < wtp->num_target_threads; j++)
483
0
        if (wtp->target_work[j].work)
484
0
            push_work_to_thread_in_tray(tc, wtp->target_work[j].target,
485
0
                wtp->target_work[j].work);
486
0
}
487
488
/* Takes work in a thread's in-tray, if any, and adds it to the worklist. */
489
146
static void add_in_tray_to_worklist(MVMThreadContext *tc, MVMGCWorklist *worklist) {
490
146
    MVMGCPassedWork * volatile *in_tray = &tc->gc_in_tray;
491
146
    MVMGCPassedWork *head;
492
146
493
146
    /* Get work to process. */
494
146
    while (1) {
495
146
        /* See if there's anything in the in-tray; if not, we're done. */
496
146
        head = *in_tray;
497
146
        if (head == NULL)
498
146
            return;
499
146
500
146
        /* Otherwise, try to take it. */
501
0
        if (MVM_casptr(in_tray, head, NULL) == head)
502
0
            break;
503
0
    }
504
146
505
146
    /* Go through list, adding to worklist. */
506
0
    while (head) {
507
0
        MVMGCPassedWork *next = head->next;
508
0
        MVMuint32 i;
509
0
        for (i = 0; i < head->num_items; i++)
510
0
            MVM_gc_worklist_add(tc, worklist, head->items[i]);
511
0
        MVM_free(head);
512
0
        head = next;
513
0
    }
514
0
}
515
516
/* Save dead STable pointers to delete later.. */
517
0
static void MVM_gc_collect_enqueue_stable_for_deletion(MVMThreadContext *tc, MVMSTable *st) {
518
0
    MVMSTable *old_head;
519
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
520
    assert(!(st->header.flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED));
521
#endif
522
0
    do {
523
0
        old_head = tc->instance->stables_to_free;
524
0
        st->header.sc_forward_u.st = old_head;
525
0
    } while (!MVM_trycas(&tc->instance->stables_to_free, old_head, st));
526
0
}
527
528
/* Some objects, having been copied, need no further attention. Others
529
 * need to do some additional freeing, however. This goes through the
530
 * fromspace and does any needed work to free uncopied things (this may
531
 * run in parallel with the mutator, which will be operating on tospace). */
532
146
void MVM_gc_collect_free_nursery_uncopied(MVMThreadContext *tc, void *limit) {
533
146
    /* We start scanning the fromspace, and keep going until we hit
534
146
     * the end of the area allocated in it. */
535
146
    void *scan = tc->nursery_fromspace;
536
8.79M
    while (scan < limit) {
537
8.79M
        /* The object here is dead if it never got a forwarding pointer
538
8.79M
         * written in to it. */
539
8.79M
        MVMCollectable *item = (MVMCollectable *)scan;
540
8.79M
        MVMuint8 dead = !(item->flags & MVM_CF_FORWARDER_VALID);
541
8.79M
542
8.79M
        if (!dead)
543
8.79M
            assert(item->sc_forward_u.forwarder != NULL);
544
8.79M
545
8.79M
        /* Now go by collectable type. */
546
8.79M
        if (item->flags & MVM_CF_TYPE_OBJECT) {
547
762
            /* Type object */
548
762
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
549
            if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
550
                MVM_free(item->sc_forward_u.sci);
551
#endif
552
762
            if (dead && item->flags & MVM_CF_HAS_OBJECT_ID)
553
0
                MVM_gc_object_id_clear(tc, item);
554
762
        }
555
8.79M
        else if (item->flags & MVM_CF_STABLE) {
556
825
            MVMSTable *st = (MVMSTable *)item;
557
825
            if (dead) {
558
0
/*            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : enqueuing an STable %d in the nursery to be freed\n", item);*/
559
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
560
                if (item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
561
                    MVM_free(item->sc_forward_u.sci);
562
                    /* Arguably we don't need to do this, if we're always
563
                       consistent about what we put on the stable queue. */
564
                    item->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED;
565
                }
566
#endif
567
0
                MVM_gc_collect_enqueue_stable_for_deletion(tc, st);
568
0
            }
569
825
        }
570
8.79M
        else if (item->flags & MVM_CF_FRAME) {
571
664k
            if (dead)
572
552k
                MVM_frame_destroy(tc, (MVMFrame *)item);
573
664k
        }
574
8.12M
        else {
575
8.12M
            /* Object instance. If dead, call gc_free if needed. Scan is
576
8.12M
             * incremented by object size. */
577
8.12M
            MVMObject *obj = (MVMObject *)item;
578
8.12M
            GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the nursery with reprid %d\n", item, REPR(obj)->ID);
579
8.12M
            if (dead && REPR(obj)->gc_free)
580
4.30M
                REPR(obj)->gc_free(tc, obj);
581
8.12M
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
582
            if (dead && item->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
583
                MVM_free(item->sc_forward_u.sci);
584
#endif
585
8.12M
            if (dead && item->flags & MVM_CF_HAS_OBJECT_ID)
586
0
                MVM_gc_object_id_clear(tc, item);
587
8.12M
        }
588
8.79M
589
8.79M
        /* Go to the next item. */
590
8.79M
        scan = (char *)scan + item->size;
591
8.79M
    }
592
146
}
593
594
/* Free STables (in any thread/generation!) queued to be freed. */
595
146
void MVM_gc_collect_free_stables(MVMThreadContext *tc) {
596
146
    MVMSTable *st = tc->instance->stables_to_free;
597
146
    while (st) {
598
0
        MVMSTable *st_to_free = st;
599
0
        st = st_to_free->header.sc_forward_u.st;
600
0
        st_to_free->header.sc_forward_u.st = NULL;
601
0
        MVM_6model_stable_gc_free(tc, st_to_free);
602
0
    }
603
146
    tc->instance->stables_to_free = NULL;
604
146
}
605
606
/* Goes through the unmarked objects in the second generation heap and builds
607
 * free lists out of them. Also does any required finalization. */
608
0
void MVM_gc_collect_free_gen2_unmarked(MVMThreadContext *tc, MVMint32 global_destruction) {
609
0
    /* Visit each of the size class bins. */
610
0
    MVMGen2Allocator *gen2 = tc->gen2;
611
0
    MVMuint32 bin, obj_size, page, i;
612
0
    char ***freelist_insert_pos;
613
0
    for (bin = 0; bin < MVM_GEN2_BINS; bin++) {
614
0
        /* If we've nothing allocated in this size class, skip it. */
615
0
        if (gen2->size_classes[bin].pages == NULL)
616
0
            continue;
617
0
618
0
        /* Calculate object size for this bin. */
619
0
        obj_size = (bin + 1) << MVM_GEN2_BIN_BITS;
620
0
621
0
        /* freelist_insert_pos is a pointer to a memory location that
622
0
         * stores the address of the last traversed free list node (char **). */
623
0
        /* Initialize freelist insertion position to free list head. */
624
0
        freelist_insert_pos = &gen2->size_classes[bin].free_list;
625
0
626
0
        /* Visit each page. */
627
0
        for (page = 0; page < gen2->size_classes[bin].num_pages; page++) {
628
0
            /* Visit all the objects, looking for dead ones and reset the
629
0
             * mark for each of them. */
630
0
            char *cur_ptr = gen2->size_classes[bin].pages[page];
631
0
            char *end_ptr = page + 1 == gen2->size_classes[bin].num_pages
632
0
                ? gen2->size_classes[bin].alloc_pos
633
0
                : cur_ptr + obj_size * MVM_GEN2_PAGE_ITEMS;
634
0
            while (cur_ptr < end_ptr) {
635
0
                MVMCollectable *col = (MVMCollectable *)cur_ptr;
636
0
637
0
                /* Is this already a free list slot? If so, it becomes the
638
0
                 * new free list insert position. */
639
0
                if (*freelist_insert_pos == (char **)cur_ptr) {
640
0
                    freelist_insert_pos = (char ***)cur_ptr;
641
0
                }
642
0
643
0
                /* Otherwise, it must be a collectable of some kind. Is it
644
0
                 * live? */
645
0
                else if (col->flags & MVM_CF_GEN2_LIVE) {
646
0
                    /* Yes; clear the mark. */
647
0
                    col->flags &= ~MVM_CF_GEN2_LIVE;
648
0
                }
649
0
                else {
650
0
                    GCDEBUG_LOG(tc, MVM_GC_DEBUG_COLLECT, "Thread %d run %d : collecting an object %p in the gen2\n", col);
651
0
                    /* No, it's dead. Do any cleanup. */
652
0
                    if (col->flags & MVM_CF_TYPE_OBJECT) {
653
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
654
                        if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
655
                            MVM_free(col->sc_forward_u.sci);
656
#endif
657
0
                    }
658
0
                    else if (col->flags & MVM_CF_STABLE) {
659
0
                        if (
660
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
661
                            !(col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) &&
662
#endif
663
0
                            col->sc_forward_u.sc.sc_idx == 0
664
0
                            && col->sc_forward_u.sc.idx == MVM_DIRECT_SC_IDX_SENTINEL) {
665
0
                            /* We marked it dead last time, kill it. */
666
0
                            MVM_6model_stable_gc_free(tc, (MVMSTable *)col);
667
0
                        }
668
0
                        else {
669
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
670
                            if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED) {
671
                                /* Whatever happens next, we can free this
672
                                   memory immediately, because no-one will be
673
                                   serializing a dead STable. */
674
                                assert(!(col->sc_forward_u.sci->sc_idx == 0
675
                                         && col->sc_forward_u.sci->idx
676
                                         == MVM_DIRECT_SC_IDX_SENTINEL));
677
                                MVM_free(col->sc_forward_u.sci);
678
                                col->flags &= ~MVM_CF_SERIALZATION_INDEX_ALLOCATED;
679
                            }
680
#endif
681
0
                            if (global_destruction) {
682
0
                                /* We're in global destruction, so enqueue to the end
683
0
                                 * like we do in the nursery */
684
0
                                MVM_gc_collect_enqueue_stable_for_deletion(tc, (MVMSTable *)col);
685
0
                            } else {
686
0
                                /* There will definitely be another gc run, so mark it as "died last time". */
687
0
                                col->sc_forward_u.sc.sc_idx = 0;
688
0
                                col->sc_forward_u.sc.idx = MVM_DIRECT_SC_IDX_SENTINEL;
689
0
                            }
690
0
                            /* Skip the freelist updating. */
691
0
                            cur_ptr += obj_size;
692
0
                            continue;
693
0
                        }
694
0
                    }
695
0
                    else if (col->flags & MVM_CF_FRAME) {
696
0
                        MVM_frame_destroy(tc, (MVMFrame *)col);
697
0
                    }
698
0
                    else {
699
0
                        /* Object instance; call gc_free if needed. */
700
0
                        MVMObject *obj = (MVMObject *)col;
701
0
                        if (STABLE(obj) && REPR(obj)->gc_free)
702
0
                            REPR(obj)->gc_free(tc, obj);
703
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
704
                        if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
705
                            MVM_free(col->sc_forward_u.sci);
706
#endif
707
0
                    }
708
0
709
0
                    /* Chain in to the free list. */
710
0
                    *((char **)cur_ptr) = (char *)*freelist_insert_pos;
711
0
                    *freelist_insert_pos = (char **)cur_ptr;
712
0
713
0
                    /* Update the pointer to the insert position to point to us */
714
0
                    freelist_insert_pos = (char ***)cur_ptr;
715
0
                }
716
0
717
0
                /* Move to the next object. */
718
0
                cur_ptr += obj_size;
719
0
            }
720
0
        }
721
0
    }
722
0
    
723
0
    /* Also need to consider overflows. */
724
0
    for (i = 0; i < gen2->num_overflows; i++) {
725
0
        if (gen2->overflows[i]) {
726
0
            MVMCollectable *col = gen2->overflows[i];
727
0
            if (col->flags & MVM_CF_GEN2_LIVE) {
728
0
                /* A living over-sized object; just clear the mark. */
729
0
                col->flags &= ~MVM_CF_GEN2_LIVE;
730
0
            }
731
0
            else {
732
0
                /* Dead over-sized object. We know if it's this big it cannot
733
0
                 * be a type object or STable, so only need handle the simple
734
0
                 * object case. */
735
0
                if (!(col->flags & (MVM_CF_TYPE_OBJECT | MVM_CF_STABLE | MVM_CF_FRAME))) {
736
0
                    MVMObject *obj = (MVMObject *)col;
737
0
                    if (REPR(obj)->gc_free)
738
0
                        REPR(obj)->gc_free(tc, obj);
739
0
#ifdef MVM_USE_OVERFLOW_SERIALIZATION_INDEX
740
                    if (col->flags & MVM_CF_SERIALZATION_INDEX_ALLOCATED)
741
                        MVM_free(col->sc_forward_u.sci);
742
#endif
743
0
                }
744
0
                else {
745
0
                    MVM_panic(MVM_exitcode_gcnursery, "Internal error: gen2 overflow contains non-object");
746
0
                }
747
0
                MVM_free(col);
748
0
                gen2->overflows[i] = NULL;
749
0
            }
750
0
        }
751
0
    }
752
0
    /* And finally compact the overflow list */
753
0
    MVM_gc_gen2_compact_overflows(gen2);
754
0
}