/home/travis/build/MoarVM/MoarVM/src/core/threads.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | #include <platform/threads.h> |
3 | | |
4 | | /* Temporary structure for passing data to thread start. */ |
5 | | typedef struct { |
6 | | MVMThreadContext *tc; |
7 | | MVMObject *thread_obj; |
8 | | } ThreadStart; |
9 | | |
10 | | /* Creates a new thread handle with the MVMThread representation. Does not |
11 | | * actually start execution of the thread, but does give it its unique ID. */ |
12 | 173 | MVMObject * MVM_thread_new(MVMThreadContext *tc, MVMObject *invokee, MVMint64 app_lifetime) { |
13 | 173 | MVMThread *thread; |
14 | 173 | MVMThreadContext *child_tc; |
15 | 173 | unsigned int interval_id; |
16 | 173 | |
17 | 173 | interval_id = MVM_telemetry_interval_start(tc, "spawning a new thread off of me"); |
18 | 173 | |
19 | 173 | /* Create the Thread object and stash code to run and lifetime. */ |
20 | 173 | MVMROOT(tc, invokee, { |
21 | 173 | thread = (MVMThread *)MVM_repr_alloc_init(tc, tc->instance->Thread); |
22 | 173 | }); |
23 | 173 | thread->body.stage = MVM_thread_stage_unstarted; |
24 | 173 | MVM_ASSIGN_REF(tc, &(thread->common.header), thread->body.invokee, invokee); |
25 | 173 | thread->body.app_lifetime = app_lifetime; |
26 | 173 | |
27 | 173 | /* Try to create the new threadcontext. Can throw if libuv can't |
28 | 173 | * create a loop for it for some reason (i.e. too many open files) */ |
29 | 173 | MVMROOT(tc, thread, { |
30 | 173 | child_tc = MVM_tc_create(tc, tc->instance); |
31 | 173 | }); |
32 | 173 | |
33 | 173 | /* Set up the new threadcontext a little. */ |
34 | 173 | child_tc->thread_obj = thread; |
35 | 173 | child_tc->thread_id = 1 + MVM_incr(&tc->instance->next_user_thread_id); |
36 | 173 | /* Add one, since MVM_incr returns original. */ |
37 | 173 | thread->body.tc = child_tc; |
38 | 173 | |
39 | 173 | MVM_telemetry_interval_stop(child_tc, interval_id, "i'm the newly spawned thread."); |
40 | 173 | |
41 | 173 | /* Also make a copy of the thread ID in the thread object itself, so it |
42 | 173 | * is available once the thread dies and its ThreadContext is gone. */ |
43 | 173 | thread->body.thread_id = child_tc->thread_id; |
44 | 173 | |
45 | 173 | return (MVMObject *)thread; |
46 | 173 | } |
47 | | |
48 | | /* This callback is passed to the interpreter code. It takes care of making |
49 | | * the initial invocation of the thread code. */ |
50 | 173 | static void thread_initial_invoke(MVMThreadContext *tc, void *data) { |
51 | 173 | /* The passed data is simply the code object to invoke. */ |
52 | 173 | ThreadStart *ts = (ThreadStart *)data; |
53 | 173 | MVMThread *thread = (MVMThread *)ts->thread_obj; |
54 | 173 | MVMObject *invokee = thread->body.invokee; |
55 | 173 | thread->body.invokee = NULL; |
56 | 173 | |
57 | 173 | /* Create initial frame, which sets up all of the interpreter state also. */ |
58 | 173 | invokee = MVM_frame_find_invokee(tc, invokee, NULL); |
59 | 173 | STABLE(invokee)->invoke(tc, invokee, MVM_callsite_get_common(tc, MVM_CALLSITE_ID_NULL_ARGS), NULL); |
60 | 173 | |
61 | 173 | /* This frame should be marked as the thread entry frame, so that any |
62 | 173 | * return from it will cause us to drop out of the interpreter and end |
63 | 173 | * the thread. */ |
64 | 173 | tc->thread_entry_frame = tc->cur_frame; |
65 | 173 | } |
66 | | |
67 | | /* This callback handles starting execution of a thread. */ |
68 | 173 | static void start_thread(void *data) { |
69 | 173 | ThreadStart *ts = (ThreadStart *)data; |
70 | 173 | MVMThreadContext *tc = ts->tc; |
71 | 173 | |
72 | 173 | /* wait for the GC to finish if it's not finished stealing us. */ |
73 | 173 | MVM_gc_mark_thread_unblocked(tc); |
74 | 173 | tc->thread_obj->body.stage = MVM_thread_stage_started; |
75 | 173 | |
76 | 173 | /* Stash thread ID. */ |
77 | 173 | tc->thread_obj->body.native_thread_id = MVM_platform_thread_id(); |
78 | 173 | |
79 | 173 | /* Create a spesh log for this thread, unless it's just going to run C |
80 | 173 | * code (and thus it's a VM internal worker). */ |
81 | 173 | if (REPR(tc->thread_obj->body.invokee)->ID != MVM_REPR_ID_MVMCFunction) |
82 | 26 | MVM_spesh_log_initialize_thread(tc, 0); |
83 | 173 | |
84 | 173 | MVM_debugserver_notify_thread_creation(tc); |
85 | 173 | |
86 | 173 | /* Enter the interpreter, to run code. */ |
87 | 173 | MVM_interp_run(tc, thread_initial_invoke, ts); |
88 | 173 | |
89 | 173 | MVM_debugserver_notify_thread_destruction(tc); |
90 | 173 | |
91 | 173 | /* Pop the temp root stack's ts->thread_obj, if it's still there (if we |
92 | 173 | * cleared the temp root stack on exception at some point, it'll already be |
93 | 173 | * gone). */ |
94 | 173 | if (tc->num_temproots != 0) |
95 | 25 | MVM_gc_root_temp_pop_n(tc, tc->num_temproots); |
96 | 173 | MVM_free(ts); |
97 | 173 | |
98 | 173 | /* Mark as exited, so the GC will know to clear our stuff. */ |
99 | 173 | tc->thread_obj->body.stage = MVM_thread_stage_exited; |
100 | 173 | |
101 | 173 | /* Mark ourselves as blocked, so that another thread will take care |
102 | 173 | * of GC-ing our objects and cleaning up our thread context. */ |
103 | 173 | MVM_gc_mark_thread_blocked(tc); |
104 | 173 | |
105 | 173 | /* Exit the thread, now it's completed. */ |
106 | 173 | MVM_platform_thread_exit(NULL); |
107 | 173 | } |
108 | | |
109 | | /* Begins execution of a thread. */ |
110 | 173 | void MVM_thread_run(MVMThreadContext *tc, MVMObject *thread_obj) { |
111 | 173 | MVMThread *child = (MVMThread *)thread_obj; |
112 | 173 | int status, added; |
113 | 173 | ThreadStart *ts; |
114 | 173 | |
115 | 173 | if (REPR(child)->ID == MVM_REPR_ID_MVMThread && IS_CONCRETE(thread_obj)) { |
116 | 173 | MVMThreadContext *child_tc = child->body.tc; |
117 | 173 | |
118 | 173 | /* Mark thread as GC blocked until the thread actually starts. */ |
119 | 173 | MVM_gc_mark_thread_blocked(child_tc); |
120 | 173 | |
121 | 173 | /* Create thread state, to pass to the thread start callback. */ |
122 | 173 | ts = MVM_malloc(sizeof(ThreadStart)); |
123 | 173 | ts->tc = child_tc; |
124 | 173 | |
125 | 173 | /* Push to starting threads list. We may need to retry this if we are |
126 | 173 | * asked to join a GC run at this point (since the GC would already |
127 | 173 | * have taken a snapshot of the thread list, so it's not safe to add |
128 | 173 | * another at this point). */ |
129 | 173 | added = 0; |
130 | 346 | while (!added) { |
131 | 173 | uv_mutex_lock(&tc->instance->mutex_threads); |
132 | 173 | if (MVM_load(&tc->gc_status) == MVMGCStatus_NONE) { |
133 | 173 | /* Insert into list. */ |
134 | 173 | MVM_ASSIGN_REF(tc, &(child->common.header), child->body.next, |
135 | 173 | tc->instance->threads); |
136 | 173 | tc->instance->threads = child; |
137 | 173 | |
138 | 173 | /* Store the thread object in the thread start information and |
139 | 173 | * keep it alive by putting it in the *child* tc's temp roots. */ |
140 | 173 | ts->thread_obj = thread_obj; |
141 | 173 | MVM_gc_root_temp_push(child_tc, (MVMCollectable **)&ts->thread_obj); |
142 | 173 | |
143 | 173 | /* Move thread to starting stage. */ |
144 | 173 | child->body.stage = MVM_thread_stage_starting; |
145 | 173 | |
146 | 173 | /* Mark us done and unlock the mutex; any GC run will now have |
147 | 173 | * a consistent view of the thread list and can safely run. */ |
148 | 173 | added = 1; |
149 | 173 | uv_mutex_unlock(&tc->instance->mutex_threads); |
150 | 173 | } |
151 | 0 | else { |
152 | 0 | /* Another thread decided we'll GC now. Release mutex, and |
153 | 0 | * do the GC, making sure thread_obj and child are marked. */ |
154 | 0 | uv_mutex_unlock(&tc->instance->mutex_threads); |
155 | 0 | MVMROOT2(tc, thread_obj, child, { |
156 | 0 | GC_SYNC_POINT(tc); |
157 | 0 | }); |
158 | 0 | } |
159 | 173 | } |
160 | 173 | |
161 | 173 | /* Do the actual thread creation. */ |
162 | 173 | status = uv_thread_create(&child->body.thread, start_thread, ts); |
163 | 173 | if (status < 0) |
164 | 0 | MVM_panic(MVM_exitcode_compunit, "Could not spawn thread: errorcode %d", status); |
165 | 173 | } |
166 | 0 | else { |
167 | 0 | MVM_exception_throw_adhoc(tc, |
168 | 0 | "Thread handle passed to run must have representation MVMThread"); |
169 | 0 | } |
170 | 173 | } |
171 | | |
172 | | /* Waits for a thread to finish. */ |
173 | 25 | static int try_join(MVMThreadContext *tc, MVMThread *thread) { |
174 | 25 | /* Join the thread, marking ourselves as unable to GC while we wait. */ |
175 | 25 | int status; |
176 | 25 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&thread); |
177 | 25 | MVM_gc_mark_thread_blocked(tc); |
178 | 25 | if (thread->body.stage < MVM_thread_stage_exited) { |
179 | 15 | status = uv_thread_join(&thread->body.thread); |
180 | 15 | } |
181 | 10 | else { |
182 | 10 | /* the target already ended */ |
183 | 10 | status = 0; |
184 | 10 | } |
185 | 25 | MVM_gc_mark_thread_unblocked(tc); |
186 | 25 | MVM_gc_root_temp_pop(tc); |
187 | 25 | |
188 | 25 | /* After a thread has been joined, we trigger a GC run to clean up after |
189 | 25 | * it. This avoids problems where a program spawns threads and joins them |
190 | 25 | * in a loop gobbling a load of memory and other resources because we do |
191 | 25 | * not ever trigger a GC run to clean up the thread. */ |
192 | 25 | MVM_gc_enter_from_allocator(tc); |
193 | 25 | |
194 | 25 | return status; |
195 | 25 | } |
196 | 25 | void MVM_thread_join(MVMThreadContext *tc, MVMObject *thread_obj) { |
197 | 25 | if (REPR(thread_obj)->ID == MVM_REPR_ID_MVMThread && IS_CONCRETE(thread_obj)) { |
198 | 25 | int status = try_join(tc, (MVMThread *)thread_obj); |
199 | 25 | if (status < 0) |
200 | 0 | MVM_panic(MVM_exitcode_compunit, "Could not join thread: errorcode %d", status); |
201 | 25 | } |
202 | 0 | else { |
203 | 0 | MVM_exception_throw_adhoc(tc, |
204 | 0 | "Thread handle passed to join must have representation MVMThread"); |
205 | 0 | } |
206 | 25 | } |
207 | | |
208 | | /* Gets the (VM-level) ID of a thread. */ |
209 | 8 | MVMint64 MVM_thread_id(MVMThreadContext *tc, MVMObject *thread_obj) { |
210 | 8 | if (REPR(thread_obj)->ID == MVM_REPR_ID_MVMThread && IS_CONCRETE(thread_obj)) |
211 | 8 | return ((MVMThread *)thread_obj)->body.thread_id; |
212 | 8 | else |
213 | 0 | MVM_exception_throw_adhoc(tc, |
214 | 0 | "Thread handle passed to threadid must have representation MVMThread"); |
215 | 8 | } |
216 | | |
217 | | /* Gets the native OS ID of a thread. If it's not yet available because |
218 | | * the thread was not yet started, this will return 0. */ |
219 | 0 | MVMint64 MVM_thread_native_id(MVMThreadContext *tc, MVMObject *thread_obj) { |
220 | 0 | if (REPR(thread_obj)->ID == MVM_REPR_ID_MVMThread && IS_CONCRETE(thread_obj)) |
221 | 0 | return ((MVMThread *)thread_obj)->body.native_thread_id; |
222 | 0 | else |
223 | 0 | MVM_exception_throw_adhoc(tc, |
224 | 0 | "Thread handle passed to threadnativeid must have representation MVMThread"); |
225 | 0 | } |
226 | | |
227 | | /* Yields control to another thread. */ |
228 | 2.68k | void MVM_thread_yield(MVMThreadContext *tc) { |
229 | 2.68k | MVM_telemetry_timestamp(tc, "thread yielding"); |
230 | 2.68k | MVM_platform_thread_yield(); |
231 | 2.68k | } |
232 | | |
233 | | /* Gets the object representing the current thread. */ |
234 | 3 | MVMObject * MVM_thread_current(MVMThreadContext *tc) { |
235 | 3 | return (MVMObject *)tc->thread_obj; |
236 | 3 | } |
237 | | |
238 | | /* Gets the number of locks held by a thread. */ |
239 | 0 | MVMint64 MVM_thread_lock_count(MVMThreadContext *tc, MVMObject *thread_obj) { |
240 | 0 | if (REPR(thread_obj)->ID == MVM_REPR_ID_MVMThread && IS_CONCRETE(thread_obj)) { |
241 | 0 | MVMThreadContext *thread_tc = ((MVMThread *)thread_obj)->body.tc; |
242 | 0 | return thread_tc ? thread_tc->num_locks : 0; |
243 | 0 | } |
244 | 0 | else { |
245 | 0 | MVM_exception_throw_adhoc(tc, |
246 | 0 | "Thread handle used with threadlockcount must have representation MVMThread"); |
247 | 0 | } |
248 | 0 | } |
249 | | |
250 | 0 | void MVM_thread_cleanup_threads_list(MVMThreadContext *tc, MVMThread **head) { |
251 | 0 | /* Assumed to be the only thread accessing the list. |
252 | 0 | * must set next on every item. */ |
253 | 0 | MVMThread *new_list = NULL, *this = *head, *next; |
254 | 0 | *head = NULL; |
255 | 0 | while (this) { |
256 | 0 | next = this->body.next; |
257 | 0 | switch (this->body.stage) { |
258 | 0 | case MVM_thread_stage_starting: |
259 | 0 | case MVM_thread_stage_waiting: |
260 | 0 | case MVM_thread_stage_started: |
261 | 0 | case MVM_thread_stage_exited: |
262 | 0 | case MVM_thread_stage_clearing_nursery: |
263 | 0 | /* push it to the new starting list */ |
264 | 0 | this->body.next = new_list; |
265 | 0 | new_list = this; |
266 | 0 | break; |
267 | 0 | case MVM_thread_stage_destroyed: |
268 | 0 | /* don't put in a list */ |
269 | 0 | this->body.next = NULL; |
270 | 0 | break; |
271 | 0 | default: |
272 | 0 | MVM_panic(MVM_exitcode_threads, "Thread in unknown stage: %"MVM_PRSz"\n", this->body.stage); |
273 | 0 | } |
274 | 0 | this = next; |
275 | 0 | } |
276 | 0 | *head = new_list; |
277 | 0 | } |
278 | | |
279 | | /* Goes through all non-app-lifetime threads and joins them. */ |
280 | 143 | void MVM_thread_join_foreground(MVMThreadContext *tc) { |
281 | 143 | MVMint64 work = 1; |
282 | 286 | while (work) { |
283 | 143 | MVMThread *cur_thread = tc->instance->threads; |
284 | 143 | work = 0; |
285 | 458 | while (cur_thread) { |
286 | 315 | if (cur_thread->body.tc != tc->instance->main_thread) { |
287 | 172 | if (!cur_thread->body.app_lifetime) { |
288 | 24 | if (MVM_load(&cur_thread->body.stage) < MVM_thread_stage_exited) { |
289 | 0 | /* Join may trigger GC and invalidate cur_thread, so we |
290 | 0 | * just set work to 1 and do another trip around the main |
291 | 0 | * loop. */ |
292 | 0 | try_join(tc, cur_thread); |
293 | 0 | work = 1; |
294 | 0 | break; |
295 | 0 | } |
296 | 24 | } |
297 | 172 | } |
298 | 315 | cur_thread = cur_thread->body.next; |
299 | 315 | } |
300 | 143 | } |
301 | 143 | } |
302 | | |