Coverage Report

Created: 2018-07-03 15:31

/home/travis/build/MoarVM/MoarVM/src/core/threadcontext.h
Line
Count
Source (jump to first uncovered line)
1
0
#define MVMGCSTATUS_MASK 3
2
99
#define MVMSUSPENDSTATUS_MASK 12
3
4
/* Possible values for the thread execution interrupt flag. */
5
typedef enum {
6
    /* Indicates that the thread is currently executing, and should
7
     * continue to do so. */
8
    MVMGCStatus_NONE = 0,
9
10
    /* Set when another thread decides it wants to do a GC run. The
11
     * current thread, on detecting this condition at a safe point,
12
     * should join in with the current GC run. */
13
    MVMGCStatus_INTERRUPT = 1,
14
15
    /* Set by a thread when it is unable to do any GC work because it
16
     * is currently blocked waiting on an operation in the outside
17
     * world (such as, waiting for another thread to join, or for
18
     * some I/O to complete). */
19
    MVMGCStatus_UNABLE = 2,
20
21
    /* Indicates that, while the thread was in unable status, a GC
22
     * run was triggered and the scanning work was stolen. A thread
23
     * that becomes unblocked upon seeing this will wait for the GC
24
     * run to be done. */
25
    MVMGCStatus_STOLEN = 3,
26
} MVMGCStatus;
27
28
typedef enum {
29
    /* Indicates the thread shall continue executing. */
30
    MVMSuspendState_NONE = 0,
31
32
    /* Indicates the thread shall suspend execution as soon as practical. */
33
    MVMSuspendState_SUSPEND_REQUEST = 4,
34
35
    /* Indicates the thread has suspended execution and is waiting for
36
     * a signal to resume execution. */
37
    MVMSuspendState_SUSPENDED = 12,
38
} MVMSuspendStatus;
39
40
typedef enum {
41
    /* Just pass by any line number annotation */
42
    MVMDebugSteppingMode_NONE = 0,
43
44
    /* Step Over:
45
     *  Line annotation: If the line number doesn't match, but the frame does.
46
     *  Return from Frame: If the frame matches. */
47
    MVMDebugSteppingMode_STEP_OVER = 1,
48
49
    /* Step Into:
50
     *  Line annotation: If the line number doesn't match in the same frame,
51
     *    if the frame doesn't match.
52
     *  Return from Frame: If the frame matches. */
53
    MVMDebugSteppingMode_STEP_INTO = 2,
54
55
    /* Step Out:
56
     *  Line annotation: -
57
     *  Return from Frame: If the frame matches. */
58
    MVMDebugSteppingMode_STEP_OUT = 3,
59
} MVMDebugSteppingMode;
60
61
62
/* Information associated with an executing thread. */
63
struct MVMThreadContext {
64
    /************************************************************************
65
     * Information about this thread
66
     ************************************************************************/
67
68
    /* Internal ID of the thread. */
69
    MVMuint32 thread_id;
70
71
    /* Thread object representing the thread. */
72
    MVMThread *thread_obj;
73
74
    /* The VM instance that this thread belongs to. */
75
    MVMInstance *instance;
76
77
    /* The number of locks the thread is holding. */
78
    MVMint64 num_locks;
79
80
    /************************************************************************
81
     * Garbage collection and memory management
82
     ************************************************************************/
83
84
    /* Start of fromspace, the place we're copying objects from during a
85
     * copying collection or processing dead objects that need to do extra
86
     * resource release afterwards. */
87
    void *nursery_fromspace;
88
89
    /* Where we evacuate objects to when collecting this thread's nursery, or
90
     * allocate new ones. */
91
    void *nursery_tospace;
92
93
    /* The current allocation pointer, where the next object to be allocated
94
     * should be placed. */
95
    void *nursery_alloc;
96
97
    /* The end of the space we're allowed to allocate to. */
98
    void *nursery_alloc_limit;
99
100
    /* This thread's GC status. */
101
    AO_t gc_status;
102
103
    /* The second GC generation allocator. */
104
    MVMGen2Allocator *gen2;
105
106
    /* The current sizes of the nursery fromspace/tospace for this thread, in
107
     * bytes. Used to handle growing it over time depending on usage. */
108
    MVMuint32 nursery_fromspace_size;
109
    MVMuint32 nursery_tospace_size;
110
111
    /* Non-zero is we should allocate in gen2; incremented/decremented as we
112
     * enter/leave a region wanting gen2 allocation. */
113
    MVMuint32 allocate_in_gen2;
114
115
    /* Number of bytes promoted to gen2 in current GC run. */
116
    MVMuint32 gc_promoted_bytes;
117
118
    /* Temporarily rooted objects. This is generally used by code written in
119
     * C that wants to keep references to objects. Since those may change
120
     * if the code in question also allocates, there is a need to register
121
     * them; this ensures the GC will not swallow them but also that they
122
     * will get updated if a GC run happens. Note that this is used as a
123
     * stack and is also thread-local, so it's cheap to push/pop. */
124
    MVMuint32             num_temproots;
125
    MVMuint32             mark_temproots;
126
    MVMuint32             alloc_temproots;
127
    MVMCollectable     ***temproots;
128
129
    /* Nursery collectables (maybe STables) rooted because something in
130
     * generation 2 is pointing at them. */
131
    MVMuint32             num_gen2roots;
132
    MVMuint32             alloc_gen2roots;
133
    MVMCollectable      **gen2roots;
134
135
    /* Finalize queue objects, which need to have a finalizer invoked once
136
     * they are no longer referenced from anywhere except this queue. */
137
    MVMuint32             num_finalize;
138
    MVMuint32             alloc_finalize;
139
    MVMObject           **finalize;
140
141
    /* List of objects we're in the process of finalizing. */
142
    MVMuint32             num_finalizing;
143
    MVMuint32             alloc_finalizing;
144
    MVMObject           **finalizing;
145
146
    /* The GC's cross-thread in-tray of processing work. */
147
    MVMGCPassedWork *gc_in_tray;
148
149
    /* Threads we will do GC work for this run (ourself plus any that we stole
150
     * work from because they were blocked). */
151
    MVMWorkThread   *gc_work;
152
    MVMuint32        gc_work_size;
153
    MVMuint32        gc_work_count;
154
155
    /* Per-thread fixed size allocator state. */
156
    MVMFixedSizeAllocThread *thread_fsa;
157
158
    /************************************************************************
159
     * Interpreter state
160
     ************************************************************************/
161
162
    /* Pointer to where the interpreter's current opcode is stored. */
163
    MVMuint8 **interp_cur_op;
164
165
    /* Pointer to where the interpreter's bytecode start pointer is stored. */
166
    MVMuint8 **interp_bytecode_start;
167
168
    /* Pointer to where the interpreter's base of the current register
169
     * set is stored. */
170
    MVMRegister **interp_reg_base;
171
172
    /* Pointer to where the interpreter's current compilation unit pointer
173
     * is stored. */
174
    MVMCompUnit **interp_cu;
175
176
    /* Jump buffer, used when an exception is thrown from C-land and we need
177
     * to fall back into the interpreter. These things are huge, so put it
178
     * near the end to keep the hotter stuff on the same cacheline. */
179
    jmp_buf interp_jump;
180
181
    /************************************************************************
182
     * Frames, call stack, and exception state
183
     ************************************************************************/
184
185
    /* The frame we're currently executing. */
186
    MVMFrame *cur_frame;
187
188
    /* The frame lying at the base of the current thread. */
189
    MVMFrame *thread_entry_frame;
190
191
    /* First call stack memory region, so we can traverse them for cleanup. */
192
    MVMCallStackRegion *stack_first;
193
194
    /* Current call stack region, which the next frame will be allocated in. */
195
    MVMCallStackRegion *stack_current;
196
197
    /* Linked list of exception handlers that we're currently executing, topmost
198
     * one first in the list. */
199
    MVMActiveHandler *active_handlers;
200
201
    /* Result object of the last-run exception handler. */
202
    MVMObject *last_handler_result;
203
204
    /* Last payload made available in a payload-goto exception handler. */
205
    MVMObject *last_payload;
206
207
    /************************************************************************
208
     * Specialization and JIT compilation
209
     ************************************************************************/
210
211
    /* Frame sequence numbers in order to cheaply identify the place of a frame
212
     * in the call stack */
213
    MVMint32 current_frame_nr;
214
    MVMint32 next_frame_nr;
215
216
    /* JIT return address pointer, so we can figure out the current position in
217
     * the code */
218
    void **jit_return_address;
219
220
    /* This thread's current spesh log to write in to, if there curently is
221
     * one. */
222
    MVMSpeshLog *spesh_log;
223
224
    /* How many spesh logs we can produce, inclusive of the current one.
225
     * Ensures the spesh worker isn't overwhelmed with data before it has a
226
     * change to produce some specializations. */
227
    AO_t spesh_log_quota;
228
229
    /* The spesh stack simulation, perserved between processing logs. */
230
    MVMSpeshSimStack *spesh_sim_stack;
231
232
    /* We try to do better at OSR by creating a fresh log when we enter a new
233
     * compilation unit. However, for things that EVAL or do a ton of BEGIN,
234
     * this does more harm than good. Use this to throttle it back. */
235
    MVMuint32 num_compunit_extra_logs;
236
237
    /* The current specialization correlation ID, used in logging. */
238
    MVMuint32 spesh_cid;
239
240
#if MVM_GC_DEBUG
241
    /* Whether we are currently in the specializer. Used to catch GC runs that
242
     * take place at times they never should. */
243
    MVMint32 in_spesh;
244
#endif
245
246
    /* State to cheaply determine if we should look again for the availability
247
     * of optimzied code at an OSR point. When the current state seen by the
248
     * interpreter of frame number of spesh candidates matches, we know there
249
     * was no change since the last OSR point. */
250
    MVMint32 osr_hunt_frame_nr;
251
    MVMint32 osr_hunt_num_spesh_candidates;
252
253
    /* If we are currently in a spesh plugin, the current set of guards we
254
     * have recorded. */
255
    MVMSpeshPluginGuard *plugin_guards;
256
    MVMObject *plugin_guard_args;
257
    MVMuint32 num_plugin_guards;
258
259
    /************************************************************************
260
     * Per-thread state held by assorted VM subsystems
261
     ************************************************************************/
262
263
    /* libuv event loop */
264
    uv_loop_t *loop;
265
266
    /* Mutex that must be released if we throw an exception. Used in places
267
     * like I/O, which grab a mutex but may throw an exception. */
268
    uv_mutex_t *ex_release_mutex;
269
270
    /* Memory buffer pointing to the last thing we serialized, intended to go
271
     * into the next compilation unit we write. Also the serialized string
272
     * heap, which will be used to seed the compilation unit string heap. */
273
    MVMint32      serialized_size;
274
    char         *serialized;
275
    MVMObject    *serialized_string_heap;
276
277
    /* Serialization context write barrier disabled depth (anything non-zero
278
     * means disabled). */
279
    MVMint32           sc_wb_disable_depth;
280
281
    /* Any serialization contexts we are compiling. The current one is at
282
     * index 0. */
283
    MVMObject     *compiling_scs;
284
285
    /* Dispatcher for next invocation that matches _for to take. If _for is
286
     * NULL then anything matches. */
287
    MVMObject     *cur_dispatcher;
288
    MVMObject     *cur_dispatcher_for;
289
290
    /* Cache of native code callback data. */
291
    MVMNativeCallbackCacheHead *native_callback_cache;
292
293
    /* Random number generator state. */
294
    MVMuint64 rand_state[2];
295
296
    /* NFA evaluator memory cache, to avoid many allocations; see NFA.c. */
297
    MVMuint32 *nfa_done;
298
    MVMuint32 *nfa_curst;
299
    MVMuint32 *nfa_nextst;
300
    MVMint64   nfa_alloc_states;
301
    MVMint64 *nfa_fates;
302
    MVMint64  nfa_fates_len;
303
    MVMint64 *nfa_longlit;
304
    MVMint64  nfa_longlit_len;
305
306
    /* Memory for doing multi-dim indexing with late-bound dimension counts. */
307
    MVMint64 *multi_dim_indices;
308
    MVMint64  num_multi_dim_indices;
309
310
    /* Profiling data collected for this thread, if profiling is on. */
311
    MVMProfileThreadData *prof_data;
312
313
    /* Debug server stepping mode and settings */
314
    MVMDebugSteppingMode step_mode;
315
    MVMFrame *step_mode_frame;
316
    MVMuint32 step_mode_file_idx;
317
    MVMuint32 step_mode_line_no;
318
    MVMuint64 step_message_id;
319
320
    MVMuint32 cur_file_idx;
321
    MVMuint32 cur_line_no;
322
};
323
324
MVMThreadContext * MVM_tc_create(MVMThreadContext *parent, MVMInstance *instance);
325
void MVM_tc_destroy(MVMThreadContext *tc);
326
void MVM_tc_set_ex_release_mutex(MVMThreadContext *tc, uv_mutex_t *mutex);
327
void MVM_tc_release_ex_release_mutex(MVMThreadContext *tc);
328
void MVM_tc_clear_ex_release_mutex(MVMThreadContext *tc);