Coverage Report

Created: 2018-07-03 15:31

/home/travis/build/MoarVM/MoarVM/src/core/fixedsizealloc.c
Line
Count
Source (jump to first uncovered line)
1
#include "moar.h"
2
3
#include "memdebug.h"
4
5
/* The fixed size allocator provides a thread-safe mechanism for getting and
6
 * releasing fixed-size chunks of memory. Requests larger blocks from the
7
 * operating system, and then allocates out of them. Can certainly be further
8
 * improved. The free list works like a stack, so you get the most recently
9
 * freed piece of memory of a given size, which should give good cache
10
 * behavior. */
11
12
/* Turn this on to switch to a mode where we debug by size. */
13
#define FSA_SIZE_DEBUG 0
14
#if FSA_SIZE_DEBUG
15
typedef struct {
16
    MVMuint64 alloc_size;
17
    void *memory;
18
} MVMFixedSizeAllocDebug;
19
#endif
20
21
/* Creates the allocator data structure with bins. */
22
144
MVMFixedSizeAlloc * MVM_fixed_size_create(MVMThreadContext *tc) {
23
144
    int init_stat;
24
144
#ifdef MVM_VALGRIND_SUPPORT
25
    int bin_no;
26
#endif
27
144
    MVMFixedSizeAlloc *al = MVM_malloc(sizeof(MVMFixedSizeAlloc));
28
144
    al->size_classes = MVM_calloc(MVM_FSA_BINS, sizeof(MVMFixedSizeAllocSizeClass));
29
144
    if ((init_stat = uv_mutex_init(&(al->complex_alloc_mutex))) < 0)
30
0
        MVM_exception_throw_adhoc(tc, "Failed to initialize mutex: %s",
31
0
            uv_strerror(init_stat));
32
144
    al->freelist_spin = 0;
33
144
    al->free_at_next_safepoint_overflows = NULL;
34
144
35
144
    /* All other places where we use valgrind macros are very likely
36
144
     * thrown out by dead code elimination. Not 100% sure about this,
37
144
     * so we ifdef it out. */
38
144
#ifdef MVM_VALGRIND_SUPPORT
39
    for (bin_no = 0; bin_no < MVM_FSA_BINS; bin_no++)
40
        VALGRIND_CREATE_MEMPOOL(&al->size_classes[bin_no], MVM_FSA_REDZONE_BYTES, 0);
41
#endif
42
144
43
144
    return al;
44
144
}
45
46
/* Creates the per-thread fixed size allocator state. */
47
317
void MVM_fixed_size_create_thread(MVMThreadContext *tc) {
48
317
    MVMFixedSizeAllocThread *al = MVM_malloc(sizeof(MVMFixedSizeAllocThread));
49
317
    al->size_classes = MVM_calloc(MVM_FSA_BINS, sizeof(MVMFixedSizeAllocThreadSizeClass));
50
317
    tc->thread_fsa = al;
51
317
}
52
53
/* Destroys the global fixed size allocator data structure and all of
54
 * the memory held within it. */
55
0
void MVM_fixed_size_destroy(MVMFixedSizeAlloc *al) {
56
0
    int bin_no;
57
0
58
0
    for (bin_no = 0; bin_no < MVM_FSA_BINS; bin_no++) {
59
0
        int page_no;
60
0
        int num_pages = al->size_classes[bin_no].num_pages;
61
0
62
0
        VALGRIND_DESTROY_MEMPOOL(&al->size_classes[bin_no]);
63
0
64
0
        for (page_no = 0; page_no < num_pages; page_no++) {
65
0
            MVM_free(al->size_classes[bin_no].pages[page_no]);
66
0
        }
67
0
        MVM_free(al->size_classes[bin_no].pages);
68
0
    }
69
0
    uv_mutex_destroy(&(al->complex_alloc_mutex));
70
0
71
0
    MVM_free(al->size_classes);
72
0
    MVM_free(al);
73
0
}
74
75
/* Determine the bin. If we hit a bin exactly then it's off-by-one,
76
 * since the bins list is base-0. Otherwise we've some extra bits,
77
 * which round us up to the next bin, but that's a no-op. */
78
43.1M
static MVMuint32 bin_for(size_t bytes) {
79
43.1M
    MVMuint32 bin = (MVMuint32)(bytes >> MVM_FSA_BIN_BITS);
80
43.1M
    if ((bytes & MVM_FSA_BIN_MASK) == 0)
81
43.1M
        bin--;
82
43.1M
    return bin;
83
43.1M
}
84
85
/* Sets up a size class bin in the second generation. */
86
12.0k
static void setup_bin(MVMFixedSizeAlloc *al, MVMuint32 bin) {
87
12.0k
    /* Work out page size we want. */
88
12.0k
    MVMuint32 page_size = MVM_FSA_PAGE_ITEMS * ((bin + 1) << MVM_FSA_BIN_BITS) + MVM_FSA_REDZONE_BYTES * 2 * MVM_FSA_PAGE_ITEMS;
89
12.0k
90
12.0k
    /* We'll just allocate a single page to start off with. */
91
12.0k
    al->size_classes[bin].num_pages = 1;
92
12.0k
    al->size_classes[bin].pages     = MVM_malloc(sizeof(void *) * al->size_classes[bin].num_pages);
93
12.0k
    al->size_classes[bin].pages[0]  = MVM_malloc(page_size);
94
12.0k
95
12.0k
    /* Set up allocation position and limit. */
96
12.0k
    al->size_classes[bin].alloc_pos = al->size_classes[bin].pages[0];
97
12.0k
    al->size_classes[bin].alloc_limit = al->size_classes[bin].alloc_pos + page_size;
98
12.0k
}
99
100
/* Adds a new page to a size class bin. */
101
24.1k
static void add_page(MVMFixedSizeAlloc *al, MVMuint32 bin) {
102
24.1k
    /* Work out page size. */
103
24.1k
    MVMuint32 page_size = MVM_FSA_PAGE_ITEMS * ((bin + 1) << MVM_FSA_BIN_BITS) + MVM_FSA_REDZONE_BYTES * 2 * MVM_FSA_PAGE_ITEMS;
104
24.1k
105
24.1k
    /* Add the extra page. */
106
24.1k
    MVMuint32 cur_page = al->size_classes[bin].num_pages;
107
24.1k
    al->size_classes[bin].num_pages++;
108
24.1k
    al->size_classes[bin].pages = MVM_realloc(al->size_classes[bin].pages,
109
24.1k
        sizeof(void *) * al->size_classes[bin].num_pages);
110
24.1k
    al->size_classes[bin].pages[cur_page] = MVM_malloc(page_size);
111
24.1k
112
24.1k
    /* Set up allocation position and limit. */
113
24.1k
    al->size_classes[bin].alloc_pos = al->size_classes[bin].pages[cur_page];
114
24.1k
    al->size_classes[bin].alloc_limit = al->size_classes[bin].alloc_pos + page_size;
115
24.1k
116
24.1k
    /* set the cur_page to a proper value */
117
24.1k
    al->size_classes[bin].cur_page = cur_page;
118
24.1k
}
119
120
/* Allocates a piece of memory of the specified size, using the FSA. */
121
3.22M
static void * alloc_slow_path(MVMThreadContext *tc, MVMFixedSizeAlloc *al, MVMuint32 bin) {
122
3.22M
    void *result;
123
3.22M
124
3.22M
    /* Lock. */
125
3.22M
    uv_mutex_lock(&(al->complex_alloc_mutex));
126
3.22M
127
3.22M
    /* If we've no pages yet, never encountered this bin; set it up. */
128
3.22M
    if (al->size_classes[bin].pages == NULL)
129
12.0k
        setup_bin(al, bin);
130
3.22M
131
3.22M
    /* If we're at the page limit, add a new page. */
132
3.22M
    if (al->size_classes[bin].alloc_pos == al->size_classes[bin].alloc_limit) {
133
24.1k
        add_page(al, bin);
134
24.1k
    }
135
3.22M
136
3.22M
    /* Now we can allocate. */
137
3.22M
    result = (void *)(al->size_classes[bin].alloc_pos + MVM_FSA_REDZONE_BYTES);
138
3.22M
    al->size_classes[bin].alloc_pos += ((bin + 1) << MVM_FSA_BIN_BITS) + 2 * MVM_FSA_REDZONE_BYTES;
139
3.22M
140
3.22M
    VALGRIND_MEMPOOL_ALLOC(&al->size_classes[bin], result, (bin + 1) << MVM_FSA_BIN_BITS);
141
3.22M
142
3.22M
    /* Unlock. */
143
3.22M
    uv_mutex_unlock(&(al->complex_alloc_mutex));
144
3.22M
145
3.22M
    return result;
146
3.22M
}
147
3.54M
static void * alloc_from_global(MVMThreadContext *tc, MVMFixedSizeAlloc *al, MVMuint32 bin) {
148
3.54M
    /* Try and take from the global free list (fast path). */
149
3.54M
    MVMFixedSizeAllocSizeClass     *bin_ptr = &(al->size_classes[bin]);
150
3.54M
    MVMFixedSizeAllocFreeListEntry *fle = NULL;
151
3.54M
    /* Multi-threaded, so take a lock. Note that the lock is needed in
152
3.54M
     * addition to the atomic operations: the atomics allow us to add
153
3.54M
     * to the free list in a lock-free way, and the lock allows us to
154
3.54M
     * avoid the ABA issue we'd have with only the atomics. */
155
3.54M
    while (!MVM_trycas(&(al->freelist_spin), 0, 1)) {
156
134
        MVMint32 i = 0;
157
137k
        while (i < 1024)
158
137k
            i++;
159
134
    }
160
3.54M
    do {
161
3.54M
        fle = bin_ptr->free_list;
162
3.54M
        if (!fle)
163
3.22M
            break;
164
316k
    } while (!MVM_trycas(&(bin_ptr->free_list), fle, fle->next));
165
3.54M
    MVM_barrier();
166
3.54M
    al->freelist_spin = 0;
167
3.54M
    if (fle) {
168
316k
        VALGRIND_MEMPOOL_ALLOC(&al->size_classes[bin], ((void *)fle),
169
316k
                (bin + 1) << MVM_FSA_BIN_BITS);
170
316k
        return (void *)fle;
171
316k
    }
172
3.54M
173
3.54M
    /* Failed to take from free list; slow path with the lock. */
174
3.22M
    return alloc_slow_path(tc, al, bin);
175
3.54M
}
176
23.0M
void * MVM_fixed_size_alloc(MVMThreadContext *tc, MVMFixedSizeAlloc *al, size_t bytes) {
177
23.0M
#if FSA_SIZE_DEBUG
178
    MVMFixedSizeAllocDebug *dbg = MVM_malloc(bytes + sizeof(MVMuint64));
179
    dbg->alloc_size = bytes;
180
    return &(dbg->memory);
181
#else
182
23.0M
    MVMuint32 bin = bin_for(bytes);
183
23.0M
    if (bin < MVM_FSA_BINS) {
184
22.9M
        /* Try and take from the per-thread free list. */
185
22.9M
        MVMFixedSizeAllocThreadSizeClass *bin_ptr = &(tc->thread_fsa->size_classes[bin]);
186
22.9M
        MVMFixedSizeAllocFreeListEntry *fle = bin_ptr->free_list;
187
22.9M
        if (fle) {
188
19.3M
            bin_ptr->free_list = fle->next;
189
19.3M
            bin_ptr->items--;
190
19.3M
            return (void *)fle;
191
19.3M
        }
192
3.54M
        return alloc_from_global(tc, al, bin);
193
22.9M
    }
194
126k
    return MVM_malloc(bytes);
195
23.0M
#endif
196
23.0M
}
197
198
/* Allocates a piece of memory of the specified size, using the FSA. Promises
199
 * it will be zeroed. */
200
12.8M
void * MVM_fixed_size_alloc_zeroed(MVMThreadContext *tc, MVMFixedSizeAlloc *al, size_t bytes) {
201
12.8M
    void *allocd = MVM_fixed_size_alloc(tc, al, bytes);
202
12.8M
    memset(allocd, 0, bytes);
203
12.8M
    return allocd;
204
12.8M
}
205
206
/* Reallocs a piece of memory to the specified size, using the FSA. */
207
0
void * MVM_fixed_size_realloc(MVMThreadContext *tc, MVMFixedSizeAlloc *al, void * p, size_t old_bytes, size_t new_bytes) {
208
0
#if FSA_SIZE_DEBUG
209
    MVMFixedSizeAllocDebug *dbg = MVM_realloc((char *)p - 8, new_bytes + sizeof(MVMuint64));
210
    dbg->alloc_size = new_bytes;
211
    return &(dbg->memory);
212
#else
213
0
    MVMuint32 old_bin = bin_for(old_bytes);
214
0
    MVMuint32 new_bin = bin_for(new_bytes);
215
0
    if (old_bin == new_bin) {
216
0
        return p;
217
0
    }
218
0
    else if (old_bin < MVM_FSA_BINS || new_bin < MVM_FSA_BINS) {
219
0
        void *allocd = MVM_fixed_size_alloc(tc, al, new_bytes);
220
0
        memcpy(allocd, p, new_bin > old_bin ? old_bytes : new_bytes);
221
0
        MVM_fixed_size_free(tc, al, old_bytes, p);
222
0
        return allocd;
223
0
    }
224
0
    else {
225
0
        return MVM_realloc(p, new_bytes);
226
0
    }
227
0
#endif
228
0
}
229
230
/* Reallocs a piece of memory to the specified size, using the FSA. */
231
37
void * MVM_fixed_size_realloc_at_safepoint(MVMThreadContext *tc, MVMFixedSizeAlloc *al, void * p, size_t old_bytes, size_t new_bytes) {
232
37
#if FSA_SIZE_DEBUG
233
    MVMFixedSizeAllocDebug *new_p = MVM_fixed_size_alloc(tc, al, new_bytes);
234
    MVMFixedSizeAllocDebug *dbg = (MVMFixedSizeAllocDebug *)((char *)new_p - 8);
235
    memcpy(new_p, (char *)p, new_bytes > old_bytes ? old_bytes : new_bytes);
236
    MVM_fixed_size_free_at_safepoint(tc, al, old_bytes, p);
237
    return &(dbg->memory);
238
#else
239
37
    MVMuint32 old_bin = bin_for(old_bytes);
240
37
    MVMuint32 new_bin = bin_for(new_bytes);
241
37
    if (old_bin == new_bin) {
242
0
        return p;
243
0
    }
244
37
    else {
245
37
        void *allocd = MVM_fixed_size_alloc(tc, al, new_bytes);
246
37
        memcpy(allocd, p, new_bin > old_bin ? old_bytes : new_bytes);
247
37
        MVM_fixed_size_free_at_safepoint(tc, al, old_bytes, p);
248
37
        return allocd;
249
37
    }
250
37
#endif
251
37
}
252
253
/* Frees a piece of memory of the specified size, using the FSA. */
254
static void add_to_global_bin_freelist(MVMThreadContext *tc, MVMFixedSizeAlloc *al,
255
400k
                                       MVMint32 bin, void *to_free) {
256
400k
    MVMFixedSizeAllocSizeClass     *bin_ptr = &(al->size_classes[bin]);
257
400k
    MVMFixedSizeAllocFreeListEntry *to_add  = (MVMFixedSizeAllocFreeListEntry *)to_free;
258
400k
    MVMFixedSizeAllocFreeListEntry *orig;
259
400k
260
400k
    VALGRIND_MEMPOOL_FREE(bin_ptr, to_add);
261
400k
    VALGRIND_MAKE_MEM_DEFINED(to_add, sizeof(MVMFixedSizeAllocFreeListEntry));
262
400k
263
400k
    /* Multi-threaded; race to add it. */
264
400k
    do {
265
400k
        orig = bin_ptr->free_list;
266
400k
        to_add->next = orig;
267
400k
    } while (!MVM_trycas(&(bin_ptr->free_list), orig, to_add));
268
400k
}
269
static void add_to_bin_freelist(MVMThreadContext *tc, MVMFixedSizeAlloc *al,
270
19.9M
                                MVMint32 bin, void *to_free) {
271
19.9M
    MVMFixedSizeAllocThreadSizeClass *bin_ptr = &(tc->thread_fsa->size_classes[bin]);
272
19.9M
    if (bin_ptr->items < MVM_FSA_THREAD_FREELIST_LIMIT) {
273
19.5M
        MVMFixedSizeAllocFreeListEntry *to_add  = (MVMFixedSizeAllocFreeListEntry *)to_free;
274
19.5M
        to_add->next = bin_ptr->free_list;
275
19.5M
        bin_ptr->free_list = to_add;
276
19.5M
        bin_ptr->items++;
277
19.5M
    }
278
400k
    else {
279
400k
        add_to_global_bin_freelist(tc, al, bin, to_free);
280
400k
    }
281
19.9M
}
282
20.0M
void MVM_fixed_size_free(MVMThreadContext *tc, MVMFixedSizeAlloc *al, size_t bytes, void *to_free) {
283
20.0M
#if FSA_SIZE_DEBUG
284
    MVMFixedSizeAllocDebug *dbg = (MVMFixedSizeAllocDebug *)((char *)to_free - 8);
285
    if (dbg->alloc_size != bytes) {
286
#ifdef MVM_VALGRIND_SUPPORT
287
        if (RUNNING_ON_VALGRIND) {
288
            char command[128];
289
            snprintf(&command, 128, "check_memory defined %p %d", dbg, bytes + 8);
290
            VALGRIND_MONITOR_COMMAND(command);
291
            VALGRIND_PRINTF_BACKTRACE("Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
292
        }
293
        else {
294
            MVM_panic(1, "Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
295
        }
296
#else
297
        MVM_panic(1, "Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
298
#endif
299
    }
300
    MVM_free(dbg);
301
#else
302
20.0M
    MVMuint32 bin = bin_for(bytes);
303
20.0M
    if (bin < MVM_FSA_BINS) {
304
19.9M
        /* Add to freelist chained through a bin. */
305
19.9M
        add_to_bin_freelist(tc, al, bin, to_free);
306
19.9M
    }
307
121k
    else {
308
121k
        /* Was malloc'd due to being oversize, so just free it. */
309
121k
        MVM_free(to_free);
310
121k
    }
311
20.0M
#endif
312
20.0M
}
313
314
/* Race to add to the "free at next safe point" overflows list. */
315
304
static void add_to_overflows_safepoint_free_list(MVMThreadContext *tc, MVMFixedSizeAlloc *al, void *to_free) {
316
304
    MVMFixedSizeAllocSafepointFreeListEntry *orig;
317
304
    MVMFixedSizeAllocSafepointFreeListEntry *to_add = MVM_fixed_size_alloc(
318
304
        tc, al, sizeof(MVMFixedSizeAllocSafepointFreeListEntry));
319
304
    to_add->to_free = to_free;
320
304
    do {
321
304
        orig = al->free_at_next_safepoint_overflows;
322
304
        to_add->next = orig;
323
304
    } while (!MVM_trycas(&(al->free_at_next_safepoint_overflows), orig, to_add));
324
304
}
325
326
/* Queues a piece of memory of the specified size to be freed at the next
327
 * global safe point, using the FSA. A global safe point is defined as one in
328
 * which all threads, since we requested the freeing of the memory, have
329
 * reached a safe point. */
330
12.3k
void MVM_fixed_size_free_at_safepoint(MVMThreadContext *tc, MVMFixedSizeAlloc *al, size_t bytes, void *to_free) {
331
12.3k
#if FSA_SIZE_DEBUG
332
    MVMFixedSizeAllocDebug *dbg = (MVMFixedSizeAllocDebug *)((char *)to_free - 8);
333
    if (dbg->alloc_size != bytes) {
334
#ifdef MVM_VALGRIND_SUPPORT
335
        if (RUNNING_ON_VALGRIND) {
336
            char command[128]; snprintf(&command, 128, "check_memory defined %p %lu", dbg, bytes + 8); VALGRIND_MONITOR_COMMAND(command);
337
            VALGRIND_PRINTF_BACKTRACE("Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
338
        }
339
        else {
340
            MVM_panic(1, "Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
341
        }
342
#else
343
        MVM_panic(1, "Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
344
#endif
345
        MVM_panic(1, "Fixed size allocator: wrong size in free: expected %lu, got %lu", dbg->alloc_size, bytes);
346
    }
347
    add_to_overflows_safepoint_free_list(tc, al, dbg);
348
#else
349
12.3k
    MVMuint32 bin = bin_for(bytes);
350
12.3k
    if (bin < MVM_FSA_BINS) {
351
12.0k
        /* Came from a bin; race to add it to the "free at next safe point"
352
12.0k
         * list. */
353
12.0k
        MVMFixedSizeAllocSizeClass     *bin_ptr = &(al->size_classes[bin]);
354
12.0k
        MVMFixedSizeAllocSafepointFreeListEntry *orig;
355
12.0k
        MVMFixedSizeAllocSafepointFreeListEntry *to_add = MVM_fixed_size_alloc(
356
12.0k
            tc, al, sizeof(MVMFixedSizeAllocSafepointFreeListEntry));
357
12.0k
        to_add->to_free = to_free;
358
12.0k
        do {
359
12.0k
            orig = bin_ptr->free_at_next_safepoint_list;
360
12.0k
            to_add->next = orig;
361
12.0k
        } while (!MVM_trycas(&(bin_ptr->free_at_next_safepoint_list), orig, to_add));
362
12.0k
    }
363
304
    else {
364
304
        /* Was malloc'd due to being oversize. */
365
304
        add_to_overflows_safepoint_free_list(tc, al, to_free);
366
304
    }
367
12.3k
#endif
368
12.3k
}
369
370
/* Called when we're at a safepoint, to free everything queued up to be freed
371
 * at the next safepoint. Assumes that it is only called on one thread at a
372
 * time, while the world is stopped. */
373
338
void MVM_fixed_size_safepoint(MVMThreadContext *tc, MVMFixedSizeAlloc *al) {
374
338
    /* Go through bins and process any safepoint free lists. */
375
338
    MVMFixedSizeAllocSafepointFreeListEntry *cur, *next;
376
338
    MVMint32 bin;
377
32.7k
    for (bin = 0; bin < MVM_FSA_BINS; bin++) {
378
32.4k
        cur = al->size_classes[bin].free_at_next_safepoint_list;
379
36.9k
        while (cur) {
380
4.53k
            next = cur->next;
381
4.53k
            add_to_bin_freelist(tc, al, bin, cur->to_free);
382
4.53k
            MVM_fixed_size_free(tc, al, sizeof(MVMFixedSizeAllocSafepointFreeListEntry), cur);
383
4.53k
            cur = next;
384
4.53k
        }
385
32.4k
        al->size_classes[bin].free_at_next_safepoint_list = NULL;
386
32.4k
    }
387
338
388
338
    /* Free overflows. */
389
338
    cur = al->free_at_next_safepoint_overflows;
390
505
    while (cur) {
391
167
        next = cur->next;
392
167
        MVM_free(cur->to_free);
393
167
        MVM_fixed_size_free(tc, al, sizeof(MVMFixedSizeAllocSafepointFreeListEntry), cur);
394
167
        cur = next;
395
167
    }
396
338
    al->free_at_next_safepoint_overflows = NULL;
397
338
}
398
399
/* Destroys per-thread fixed size allocator state. All freelists will be
400
 * contributed back to the global freelists for the bin size. */
401
23
void MVM_fixed_size_destroy_thread(MVMThreadContext *tc) {
402
23
    MVMFixedSizeAllocThread *al = tc->thread_fsa;
403
23
    int bin;
404
2.23k
    for (bin = 0; bin < MVM_FSA_BINS; bin++) {
405
2.20k
        MVMFixedSizeAllocThreadSizeClass *bin_ptr = &(al->size_classes[bin]);
406
2.20k
        MVMFixedSizeAllocFreeListEntry *fle = bin_ptr->free_list;
407
2.26k
        while (fle) {
408
55
            MVMFixedSizeAllocFreeListEntry *next = fle->next;
409
55
            add_to_global_bin_freelist(tc, tc->instance->fsa, bin, (void *)fle);
410
55
            fle = next;
411
55
        }
412
2.20k
    }
413
23
    MVM_free(al->size_classes);
414
23
    MVM_free(al);
415
23
}