/home/travis/build/MoarVM/MoarVM/src/io/procops.c
Line | Count | Source (jump to first uncovered line) |
1 | | #include "moar.h" |
2 | | #include "platform/time.h" |
3 | | #include "tinymt64.h" |
4 | | #include "bithacks.h" |
5 | | |
6 | | /* concatenating with "" ensures that only literal strings are accepted as argument. */ |
7 | 144 | #define STR_WITH_LEN(str) ("" str ""), (sizeof(str) - 1) |
8 | | |
9 | | /* MSVC compilers know about environ, |
10 | | * see http://msdn.microsoft.com/en-us//library/vstudio/stxk41x1.aspx */ |
11 | | #ifndef _WIN32 |
12 | | #include <unistd.h> |
13 | | # ifdef __APPLE_CC__ |
14 | | # include <crt_externs.h> |
15 | | # define environ (*_NSGetEnviron()) |
16 | | # else |
17 | | extern char **environ; |
18 | | # endif |
19 | | #else |
20 | | #include <stdlib.h> |
21 | | #endif |
22 | | |
23 | | #ifdef _WIN32 |
24 | | static wchar_t * ANSIToUnicode(MVMuint16 acp, const char *str) |
25 | | { |
26 | | const int len = MultiByteToWideChar(acp, 0, str, -1, NULL, 0); |
27 | | wchar_t * const result = (wchar_t *)MVM_malloc(len * sizeof(wchar_t)); |
28 | | |
29 | | MultiByteToWideChar(acp, 0, str, -1, (LPWSTR)result, len); |
30 | | |
31 | | return result; |
32 | | } |
33 | | |
34 | | static char * UnicodeToUTF8(const wchar_t *str) |
35 | | { |
36 | | const int len = WideCharToMultiByte(CP_UTF8, 0, str, -1, NULL, 0, NULL, NULL); |
37 | | char * const result = (char *)MVM_malloc(len + 1); |
38 | | |
39 | | WideCharToMultiByte(CP_UTF8, 0, str, -1, result, len, NULL, NULL); |
40 | | |
41 | | return result; |
42 | | } |
43 | | |
44 | | static char * ANSIToUTF8(MVMuint16 acp, const char * str) |
45 | | { |
46 | | wchar_t * const wstr = ANSIToUnicode(acp, str); |
47 | | char * const result = UnicodeToUTF8(wstr); |
48 | | |
49 | | MVM_free(wstr); |
50 | | return result; |
51 | | } |
52 | | |
53 | | MVM_PUBLIC char ** |
54 | | MVM_UnicodeToUTF8_argv(const int argc, wchar_t **wargv) |
55 | | { |
56 | | int i; |
57 | | char **argv = MVM_malloc((argc + 1) * sizeof(*argv)); |
58 | | for (i = 0; i < argc; ++i) |
59 | | { |
60 | | argv[i] = UnicodeToUTF8(wargv[i]); |
61 | | } |
62 | | argv[i] = NULL; |
63 | | return argv; |
64 | | } |
65 | | |
66 | | #endif |
67 | | |
68 | 4.06k | MVMObject * MVM_proc_getenvhash(MVMThreadContext *tc) { |
69 | 4.06k | MVMInstance * const instance = tc->instance; |
70 | 4.06k | MVMObject * env_hash; |
71 | 4.06k | |
72 | 4.06k | if (instance->env_hash) { |
73 | 3.91k | return instance->env_hash; |
74 | 3.91k | } |
75 | 144 | else { |
76 | 144 | MVMuint32 pos = 0; |
77 | 144 | MVMString *needle = MVM_string_ascii_decode(tc, instance->VMString, STR_WITH_LEN("=")); |
78 | 144 | #ifndef _WIN32 |
79 | 144 | char *env; |
80 | 144 | #else |
81 | | wchar_t *env; |
82 | | (void) _wgetenv(L"windows"); /* populate _wenviron */ |
83 | | #endif |
84 | 144 | |
85 | 144 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&needle); |
86 | 144 | |
87 | 144 | env_hash = MVM_repr_alloc_init(tc, MVM_hll_current(tc)->slurpy_hash_type); |
88 | 144 | MVM_gc_root_temp_push(tc, (MVMCollectable **)&env_hash); |
89 | 144 | |
90 | 144 | #ifndef _WIN32 |
91 | 18.2k | while ((env = environ[pos++]) != NULL) { |
92 | 18.1k | MVMString *str = MVM_string_utf8_c8_decode(tc, instance->VMString, env, strlen(env)); |
93 | 18.1k | #else |
94 | | while ((env = _wenviron[pos++]) != NULL) { |
95 | | char * const _env = UnicodeToUTF8(env); |
96 | | MVMString *str = MVM_string_utf8_c8_decode(tc, instance->VMString, _env, strlen(_env)); |
97 | | #endif |
98 | 18.1k | |
99 | 18.1k | MVMuint32 index = MVM_string_index(tc, str, needle, 0); |
100 | 18.1k | |
101 | 18.1k | MVMString *key, *val; |
102 | 18.1k | MVMObject *box; |
103 | 18.1k | |
104 | 18.1k | #ifdef _WIN32 |
105 | | MVM_free(_env); |
106 | | #endif |
107 | 18.1k | MVM_gc_root_temp_push(tc, (MVMCollectable **)&str); |
108 | 18.1k | |
109 | 18.1k | key = MVM_string_substring(tc, str, 0, index); |
110 | 18.1k | MVM_gc_root_temp_push(tc, (MVMCollectable **)&key); |
111 | 18.1k | |
112 | 18.1k | val = MVM_string_substring(tc, str, index + 1, -1); |
113 | 18.1k | box = MVM_repr_box_str(tc, MVM_hll_current(tc)->str_box_type, val); |
114 | 18.1k | MVM_repr_bind_key_o(tc, env_hash, key, box); |
115 | 18.1k | |
116 | 18.1k | MVM_gc_root_temp_pop_n(tc, 2); |
117 | 18.1k | } |
118 | 144 | |
119 | 144 | MVM_gc_root_temp_pop_n(tc, 2); |
120 | 144 | |
121 | 144 | instance->env_hash = env_hash; |
122 | 144 | |
123 | 144 | return env_hash; |
124 | 144 | } |
125 | 4.06k | } |
126 | | |
127 | | #define INIT_ENV() do { \ |
128 | | MVMROOT(tc, iter, { \ |
129 | | MVMString * const equal = MVM_string_ascii_decode(tc, tc->instance->VMString, STR_WITH_LEN("=")); \ |
130 | | MVMROOT(tc, equal, { \ |
131 | | MVMString *env_str = NULL; \ |
132 | | MVMObject *iterval = NULL; \ |
133 | | i = 0; \ |
134 | | while(MVM_iter_istrue(tc, iter)) { \ |
135 | | MVM_repr_shift_o(tc, (MVMObject *)iter); \ |
136 | | env_str = MVM_string_concatenate(tc, MVM_iterkey_s(tc, iter), equal); \ |
137 | | iterval = MVM_iterval(tc, iter); \ |
138 | | env_str = MVM_string_concatenate(tc, env_str, MVM_repr_get_str(tc, iterval)); \ |
139 | | _env[i++] = MVM_string_utf8_c8_encode_C_string(tc, env_str); \ |
140 | | } \ |
141 | | _env[size] = NULL; \ |
142 | | }); \ |
143 | | }); \ |
144 | | } while (0) |
145 | | |
146 | 0 | #define FREE_ENV() do { \ |
147 | 0 | i = 0; \ |
148 | 0 | while(_env[i]) \ |
149 | 0 | MVM_free(_env[i++]); \ |
150 | 0 | MVM_free(_env); \ |
151 | 0 | } while (0) |
152 | | |
153 | | /* Data that we keep for an asynchronous process handle. */ |
154 | | typedef struct { |
155 | | /* The libuv handle to the process. */ |
156 | | uv_process_t *handle; |
157 | | |
158 | | /* The async task handle, provided we're running. */ |
159 | | MVMObject *async_task; |
160 | | |
161 | | /* The exit signal to send, if any. */ |
162 | | MVMint64 signal; |
163 | | } MVMIOAsyncProcessData; |
164 | | |
165 | | typedef enum { |
166 | | STATE_UNSTARTED, |
167 | | STATE_STARTED, |
168 | | STATE_DONE |
169 | | } ProcessState; |
170 | | |
171 | | /* Info we convey about an async spawn task. */ |
172 | | typedef struct { |
173 | | MVMThreadContext *tc; |
174 | | int work_idx; |
175 | | MVMObject *handle; |
176 | | MVMObject *callbacks; |
177 | | char *prog; |
178 | | char *cwd; |
179 | | char **env; |
180 | | char **args; |
181 | | uv_stream_t *stdin_handle; |
182 | | MVMuint32 seq_stdout; |
183 | | MVMuint32 seq_stderr; |
184 | | MVMuint32 seq_merge; |
185 | | MVMint64 permit_stdout; |
186 | | MVMint64 permit_stderr; |
187 | | MVMint64 permit_merge; |
188 | | uv_pipe_t *pipe_stdout; |
189 | | uv_pipe_t *pipe_stderr; |
190 | | int reading_stdout; |
191 | | int reading_stderr; |
192 | | ProcessState state; |
193 | | int using; |
194 | | int merge; |
195 | | size_t last_read; |
196 | | } SpawnInfo; |
197 | | |
198 | | /* Info we convey about a write task. */ |
199 | | typedef struct { |
200 | | MVMOSHandle *handle; |
201 | | MVMObject *buf_data; |
202 | | uv_write_t *req; |
203 | | uv_buf_t buf; |
204 | | MVMThreadContext *tc; |
205 | | int work_idx; |
206 | | } SpawnWriteInfo; |
207 | | |
208 | | /* Completion handler for an asynchronous write. */ |
209 | 0 | static void on_write(uv_write_t *req, int status) { |
210 | 0 | SpawnWriteInfo *wi = (SpawnWriteInfo *)req->data; |
211 | 0 | MVMThreadContext *tc = wi->tc; |
212 | 0 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
213 | 0 | MVMAsyncTask *t = MVM_io_eventloop_get_active_work(tc, wi->work_idx); |
214 | 0 | MVM_repr_push_o(tc, arr, t->body.schedulee); |
215 | 0 | if (status >= 0) { |
216 | 0 | MVMROOT2(tc, arr, t, { |
217 | 0 | MVMObject *bytes_box = MVM_repr_box_int(tc, |
218 | 0 | tc->instance->boot_types.BOOTInt, |
219 | 0 | wi->buf.len); |
220 | 0 | MVM_repr_push_o(tc, arr, bytes_box); |
221 | 0 | }); |
222 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
223 | 0 | } |
224 | 0 | else { |
225 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt); |
226 | 0 | MVMROOT2(tc, arr, t, { |
227 | 0 | MVMString *msg_str = MVM_string_ascii_decode_nt(tc, |
228 | 0 | tc->instance->VMString, uv_strerror(status)); |
229 | 0 | MVMObject *msg_box = MVM_repr_box_str(tc, |
230 | 0 | tc->instance->boot_types.BOOTStr, msg_str); |
231 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
232 | 0 | }); |
233 | 0 | } |
234 | 0 | MVM_repr_push_o(tc, t->body.queue, arr); |
235 | 0 | MVM_io_eventloop_remove_active_work(tc, &(wi->work_idx)); |
236 | 0 | MVM_free(wi->req); |
237 | 0 | } |
238 | | |
239 | | /* Does setup work for an asynchronous write. */ |
240 | 0 | static void write_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data) { |
241 | 0 | MVMIOAsyncProcessData *handle_data; |
242 | 0 | MVMAsyncTask *spawn_task; |
243 | 0 | MVMArray *buffer; |
244 | 0 | SpawnInfo *si; |
245 | 0 | char *output; |
246 | 0 | int output_size, r; |
247 | 0 |
|
248 | 0 | /* Add to work in progress. */ |
249 | 0 | SpawnWriteInfo *wi = (SpawnWriteInfo *)data; |
250 | 0 | wi->tc = tc; |
251 | 0 | wi->work_idx = MVM_io_eventloop_add_active_work(tc, async_task); |
252 | 0 |
|
253 | 0 | /* Extract buf data. */ |
254 | 0 | buffer = (MVMArray *)wi->buf_data; |
255 | 0 | output = (char *)(buffer->body.slots.i8 + buffer->body.start); |
256 | 0 | output_size = (int)buffer->body.elems; |
257 | 0 |
|
258 | 0 | /* Create and initialize write request. */ |
259 | 0 | wi->req = MVM_malloc(sizeof(uv_write_t)); |
260 | 0 | wi->buf = uv_buf_init(output, output_size); |
261 | 0 | wi->req->data = data; |
262 | 0 | handle_data = (MVMIOAsyncProcessData *)wi->handle->body.data; |
263 | 0 | spawn_task = (MVMAsyncTask *)handle_data->async_task; |
264 | 0 | si = spawn_task ? (SpawnInfo *)spawn_task->body.data : NULL; |
265 | 0 | if (!si || !si->stdin_handle || (r = uv_write(wi->req, si->stdin_handle, &(wi->buf), 1, on_write)) < 0) { |
266 | 0 | /* Error; need to notify. */ |
267 | 0 | MVMROOT(tc, async_task, { |
268 | 0 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
269 | 0 | MVMAsyncTask *t = (MVMAsyncTask *)async_task; |
270 | 0 | MVM_repr_push_o(tc, arr, t->body.schedulee); |
271 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt); |
272 | 0 | MVMROOT(tc, arr, { |
273 | 0 | MVMString *msg_str = MVM_string_ascii_decode_nt(tc, |
274 | 0 | tc->instance->VMString, (si && si->stdin_handle |
275 | 0 | ? uv_strerror(r) |
276 | 0 | : "This process is not opened for write")); |
277 | 0 | MVMObject *msg_box = MVM_repr_box_str(tc, |
278 | 0 | tc->instance->boot_types.BOOTStr, msg_str); |
279 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
280 | 0 | }); |
281 | 0 | MVM_repr_push_o(tc, t->body.queue, arr); |
282 | 0 | }); |
283 | 0 |
|
284 | 0 | /* Cleanup handle. */ |
285 | 0 | MVM_free(wi->req); |
286 | 0 | wi->req = NULL; |
287 | 0 | } |
288 | 0 | } |
289 | | |
290 | | /* Marks objects for a write task. */ |
291 | 0 | static void write_gc_mark(MVMThreadContext *tc, void *data, MVMGCWorklist *worklist) { |
292 | 0 | SpawnWriteInfo *wi = (SpawnWriteInfo *)data; |
293 | 0 | MVM_gc_worklist_add(tc, worklist, &wi->handle); |
294 | 0 | MVM_gc_worklist_add(tc, worklist, &wi->buf_data); |
295 | 0 | } |
296 | | |
297 | | /* Frees info for a write task. */ |
298 | 0 | static void write_gc_free(MVMThreadContext *tc, MVMObject *t, void *data) { |
299 | 0 | if (data) |
300 | 0 | MVM_free(data); |
301 | 0 | } |
302 | | |
303 | | /* Operations table for async write task. */ |
304 | | static const MVMAsyncTaskOps write_op_table = { |
305 | | write_setup, |
306 | | NULL, |
307 | | NULL, |
308 | | write_gc_mark, |
309 | | write_gc_free |
310 | | }; |
311 | | |
312 | | static MVMAsyncTask * write_bytes(MVMThreadContext *tc, MVMOSHandle *h, MVMObject *queue, |
313 | 0 | MVMObject *schedulee, MVMObject *buffer, MVMObject *async_type) { |
314 | 0 | MVMAsyncTask *task; |
315 | 0 | SpawnWriteInfo *wi; |
316 | 0 |
|
317 | 0 | /* Validate REPRs. */ |
318 | 0 | if (REPR(queue)->ID != MVM_REPR_ID_ConcBlockingQueue) |
319 | 0 | MVM_exception_throw_adhoc(tc, |
320 | 0 | "asyncwritebytes target queue must have ConcBlockingQueue REPR"); |
321 | 0 | if (REPR(async_type)->ID != MVM_REPR_ID_MVMAsyncTask) |
322 | 0 | MVM_exception_throw_adhoc(tc, |
323 | 0 | "asyncwritebytes result type must have REPR AsyncTask"); |
324 | 0 | if (!IS_CONCRETE(buffer) || REPR(buffer)->ID != MVM_REPR_ID_VMArray) |
325 | 0 | MVM_exception_throw_adhoc(tc, "asyncwritebytes requires a native array to read from"); |
326 | 0 | if (((MVMArrayREPRData *)STABLE(buffer)->REPR_data)->slot_type != MVM_ARRAY_U8 |
327 | 0 | && ((MVMArrayREPRData *)STABLE(buffer)->REPR_data)->slot_type != MVM_ARRAY_I8) |
328 | 0 | MVM_exception_throw_adhoc(tc, "asyncwritebytes requires a native array of uint8 or int8"); |
329 | 0 |
|
330 | 0 | /* Create async task handle. */ |
331 | 0 | MVMROOT4(tc, queue, schedulee, h, buffer, { |
332 | 0 | task = (MVMAsyncTask *)MVM_repr_alloc_init(tc, async_type); |
333 | 0 | }); |
334 | 0 | MVM_ASSIGN_REF(tc, &(task->common.header), task->body.queue, queue); |
335 | 0 | MVM_ASSIGN_REF(tc, &(task->common.header), task->body.schedulee, schedulee); |
336 | 0 | task->body.ops = &write_op_table; |
337 | 0 | wi = MVM_calloc(1, sizeof(SpawnWriteInfo)); |
338 | 0 | MVM_ASSIGN_REF(tc, &(task->common.header), wi->handle, h); |
339 | 0 | MVM_ASSIGN_REF(tc, &(task->common.header), wi->buf_data, buffer); |
340 | 0 | task->body.data = wi; |
341 | 0 |
|
342 | 0 | /* Hand the task off to the event loop. */ |
343 | 0 | MVMROOT(tc, task, { |
344 | 0 | MVM_io_eventloop_queue_work(tc, (MVMObject *)task); |
345 | 0 | }); |
346 | 0 |
|
347 | 0 | return task; |
348 | 0 | } |
349 | | |
350 | | /* Marks an async handle. */ |
351 | 0 | static void proc_async_gc_mark(MVMThreadContext *tc, void *data, MVMGCWorklist *worklist) { |
352 | 0 | MVMIOAsyncProcessData *apd = (MVMIOAsyncProcessData *)data; |
353 | 0 | if (data) |
354 | 0 | MVM_gc_worklist_add(tc, worklist, &(apd->async_task)); |
355 | 0 | } |
356 | | |
357 | | /* Does an asynchronous close (since it must run on the event loop). */ |
358 | 0 | static void close_cb(uv_handle_t *handle) { |
359 | 0 | MVM_free(handle); |
360 | 0 | } |
361 | 0 | static void close_perform(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data) { |
362 | 0 | uv_close((uv_handle_t *)data, close_cb); |
363 | 0 | } |
364 | | |
365 | | /* Operations table for async close task. */ |
366 | | static const MVMAsyncTaskOps close_op_table = { |
367 | | close_perform, |
368 | | NULL, |
369 | | NULL, |
370 | | NULL, |
371 | | NULL |
372 | | }; |
373 | | |
374 | | static void deferred_close_perform(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data); |
375 | | |
376 | | static const MVMAsyncTaskOps deferred_close_op_table = { |
377 | | deferred_close_perform, |
378 | | NULL, |
379 | | NULL, |
380 | | NULL, |
381 | | NULL |
382 | | }; |
383 | | |
384 | 6 | static MVMint64 close_stdin(MVMThreadContext *tc, MVMOSHandle *h) { |
385 | 6 | MVMIOAsyncProcessData *handle_data = (MVMIOAsyncProcessData *)h->body.data; |
386 | 6 | MVMAsyncTask *spawn_task = (MVMAsyncTask *)handle_data->async_task; |
387 | 6 | SpawnInfo *si = spawn_task ? (SpawnInfo *)spawn_task->body.data : NULL; |
388 | 6 | if (si && si->state == STATE_UNSTARTED) { |
389 | 0 | MVMAsyncTask *task; |
390 | 0 | MVMROOT(tc, h, { |
391 | 0 | task = (MVMAsyncTask *)MVM_repr_alloc_init(tc, |
392 | 0 | tc->instance->boot_types.BOOTAsync); |
393 | 0 | }); |
394 | 0 | task->body.ops = &deferred_close_op_table; |
395 | 0 | task->body.data = si; |
396 | 0 | MVM_io_eventloop_queue_work(tc, (MVMObject *)task); |
397 | 0 | return 0; |
398 | 0 | } |
399 | 6 | if (si && si->stdin_handle) { |
400 | 0 | MVMAsyncTask *task; |
401 | 0 | MVMROOT(tc, h, { |
402 | 0 | task = (MVMAsyncTask *)MVM_repr_alloc_init(tc, |
403 | 0 | tc->instance->boot_types.BOOTAsync); |
404 | 0 | }); |
405 | 0 | task->body.ops = &close_op_table; |
406 | 0 | task->body.data = si->stdin_handle; |
407 | 0 | MVM_io_eventloop_queue_work(tc, (MVMObject *)task); |
408 | 0 | si->stdin_handle = NULL; |
409 | 0 | } |
410 | 6 | return 0; |
411 | 6 | } |
412 | | |
413 | 0 | static void deferred_close_perform(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data) { |
414 | 0 | SpawnInfo *si = (SpawnInfo *) data; |
415 | 0 | MVMOSHandle *h = (MVMOSHandle *) si->handle; |
416 | 0 |
|
417 | 0 | if (si->state == STATE_UNSTARTED) { |
418 | 0 | MVMAsyncTask *task; |
419 | 0 | MVMROOT(tc, h, { |
420 | 0 | task = (MVMAsyncTask *)MVM_repr_alloc_init(tc, |
421 | 0 | tc->instance->boot_types.BOOTAsync); |
422 | 0 | }); |
423 | 0 | task->body.ops = &deferred_close_op_table; |
424 | 0 | task->body.data = si; |
425 | 0 | MVM_io_eventloop_queue_work(tc, (MVMObject *)task); |
426 | 0 | return; |
427 | 0 | } |
428 | 0 | if (si->stdin_handle) { |
429 | 0 | close_stdin(tc, h); |
430 | 0 | } |
431 | 0 | } |
432 | | |
433 | 14 | MVMObject * get_async_task_handle(MVMThreadContext *tc, MVMOSHandle *h) { |
434 | 14 | MVMIOAsyncProcessData *handle_data = (MVMIOAsyncProcessData *)h->body.data; |
435 | 14 | return handle_data->async_task; |
436 | 14 | } |
437 | | |
438 | | /* IO ops table, for async process, populated with functions. */ |
439 | | static const MVMIOAsyncWritable proc_async_writable = { write_bytes }; |
440 | | static const MVMIOClosable closable = { close_stdin }; |
441 | | static const MVMIOOps proc_op_table = { |
442 | | &closable, |
443 | | NULL, |
444 | | NULL, |
445 | | NULL, |
446 | | &proc_async_writable, |
447 | | NULL, |
448 | | NULL, |
449 | | NULL, |
450 | | get_async_task_handle, |
451 | | NULL, |
452 | | NULL, |
453 | | NULL, |
454 | | proc_async_gc_mark, |
455 | | NULL |
456 | | }; |
457 | | |
458 | 6 | static void spawn_async_close(uv_handle_t *handle) { |
459 | 6 | MVM_free(handle); |
460 | 6 | } |
461 | | |
462 | 7 | static void async_spawn_on_exit(uv_process_t *req, MVMint64 exit_status, int term_signal) { |
463 | 7 | /* Check we've got a callback to fire. */ |
464 | 7 | SpawnInfo *si = (SpawnInfo *)req->data; |
465 | 7 | MVMThreadContext *tc = si->tc; |
466 | 7 | MVMObject *done_cb = MVM_repr_at_key_o(tc, si->callbacks, |
467 | 7 | tc->instance->str_consts.done); |
468 | 7 | MVMOSHandle *os_handle; |
469 | 7 | if (!MVM_is_null(tc, done_cb)) { |
470 | 7 | MVMROOT(tc, done_cb, { |
471 | 7 | /* Get status. */ |
472 | 7 | MVMint64 status = (exit_status << 8) | term_signal; |
473 | 7 | |
474 | 7 | /* Get what we'll need to build and convey the result. */ |
475 | 7 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
476 | 7 | MVMAsyncTask *t = MVM_io_eventloop_get_active_work(tc, si->work_idx); |
477 | 7 | |
478 | 7 | /* Box and send along status. */ |
479 | 7 | MVM_repr_push_o(tc, arr, done_cb); |
480 | 7 | MVMROOT2(tc, arr, t, { |
481 | 7 | MVMObject *result_box = MVM_repr_box_int(tc, |
482 | 7 | tc->instance->boot_types.BOOTInt, status); |
483 | 7 | MVM_repr_push_o(tc, arr, result_box); |
484 | 7 | }); |
485 | 7 | MVM_repr_push_o(tc, t->body.queue, arr); |
486 | 7 | }); |
487 | 7 | } |
488 | 7 | |
489 | 7 | /* when invoked via MVMIOOps, close_stdin is already wrapped in a mutex */ |
490 | 7 | os_handle = (MVMOSHandle *) si->handle; |
491 | 7 | uv_mutex_lock(os_handle->body.mutex); |
492 | 7 | si->state = STATE_DONE; |
493 | 7 | close_stdin(tc, os_handle); |
494 | 7 | uv_mutex_unlock(os_handle->body.mutex); |
495 | 7 | |
496 | 7 | /* Close handle. */ |
497 | 7 | uv_close((uv_handle_t *)req, spawn_async_close); |
498 | 7 | ((MVMIOAsyncProcessData *)((MVMOSHandle *)si->handle)->body.data)->handle = NULL; |
499 | 7 | if (--si->using == 0) |
500 | 6 | MVM_io_eventloop_remove_active_work(tc, &(si->work_idx)); |
501 | 7 | } |
502 | | |
503 | | /* Allocates a buffer of the suggested size. */ |
504 | 21 | static void on_alloc(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { |
505 | 21 | SpawnInfo *si = (SpawnInfo *)handle->data; |
506 | 12 | size_t size = si->last_read ? si->last_read : 64; |
507 | 21 | |
508 | 21 | if (size < 128) { |
509 | 16 | size = 128; |
510 | 16 | } |
511 | 5 | else { |
512 | 5 | size = MVM_bithacks_next_greater_pow2(size + 1); |
513 | 5 | } |
514 | 21 | |
515 | 21 | buf->base = MVM_malloc(size); |
516 | 21 | buf->len = size; |
517 | 21 | } |
518 | | |
519 | | /* Read functions for stdout/stderr/merged. */ |
520 | | static void async_read(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf, SpawnInfo *si, |
521 | 25 | MVMObject *callback, MVMuint32 seq_number, MVMint64 *permit) { |
522 | 25 | MVMThreadContext *tc = si->tc; |
523 | 25 | MVMObject *arr; |
524 | 25 | MVMAsyncTask *t; |
525 | 25 | MVMROOT(tc, callback, { |
526 | 25 | arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
527 | 25 | t = MVM_io_eventloop_get_active_work(tc, si->work_idx); |
528 | 25 | }); |
529 | 25 | MVM_repr_push_o(tc, arr, callback); |
530 | 25 | if (nread >= 0) { |
531 | 11 | MVMROOT2(tc, t, arr, { |
532 | 11 | /* Push the sequence number. */ |
533 | 11 | MVMObject *seq_boxed = MVM_repr_box_int(tc, |
534 | 11 | tc->instance->boot_types.BOOTInt, seq_number); |
535 | 11 | MVM_repr_push_o(tc, arr, seq_boxed); |
536 | 11 | |
537 | 11 | /* Push buffer of data. */ |
538 | 11 | { |
539 | 11 | MVMObject *buf_type = MVM_repr_at_key_o(tc, si->callbacks, |
540 | 11 | tc->instance->str_consts.buf_type); |
541 | 11 | MVMArray *res_buf = (MVMArray *)MVM_repr_alloc_init(tc, buf_type); |
542 | 11 | res_buf->body.slots.i8 = (MVMint8 *)buf->base; |
543 | 11 | res_buf->body.start = 0; |
544 | 11 | res_buf->body.ssize = buf->len; |
545 | 11 | res_buf->body.elems = nread; |
546 | 11 | MVM_repr_push_o(tc, arr, (MVMObject *)res_buf); |
547 | 11 | } |
548 | 11 | |
549 | 11 | /* Finally, no error. */ |
550 | 11 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
551 | 11 | |
552 | 11 | /* Update handle with amount read. */ |
553 | 11 | si->last_read = nread; |
554 | 11 | |
555 | 11 | /* Update permit count, stop reading if we run out. */ |
556 | 11 | if (permit > 0) { |
557 | 11 | (*permit)--; |
558 | 11 | if (*permit == 0) { |
559 | 11 | uv_read_stop(handle); |
560 | 11 | if (handle == (uv_stream_t *)si->pipe_stdout) |
561 | 11 | si->reading_stdout = 0; |
562 | 11 | else if (handle == (uv_stream_t *)si->pipe_stderr) |
563 | 11 | si->reading_stderr = 0; |
564 | 11 | else |
565 | 11 | MVM_panic(1, "Confused stopping reading async process handle"); |
566 | 11 | } |
567 | 11 | } |
568 | 11 | }); |
569 | 11 | } |
570 | 14 | else if (nread == UV_EOF) { |
571 | 14 | MVMROOT2(tc, t, arr, { |
572 | 14 | MVMObject *final = MVM_repr_box_int(tc, |
573 | 14 | tc->instance->boot_types.BOOTInt, seq_number); |
574 | 14 | MVM_repr_push_o(tc, arr, final); |
575 | 14 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
576 | 14 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
577 | 14 | }); |
578 | 14 | if (buf->base) |
579 | 10 | MVM_free(buf->base); |
580 | 14 | uv_close((uv_handle_t *)handle, NULL); |
581 | 14 | if (--si->using == 0) |
582 | 0 | MVM_io_eventloop_remove_active_work(tc, &(si->work_idx)); |
583 | 14 | } |
584 | 0 | else { |
585 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt); |
586 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
587 | 0 | MVMROOT2(tc, t, arr, { |
588 | 0 | MVMString *msg_str = MVM_string_ascii_decode_nt(tc, |
589 | 0 | tc->instance->VMString, uv_strerror(nread)); |
590 | 0 | MVMObject *msg_box = MVM_repr_box_str(tc, |
591 | 0 | tc->instance->boot_types.BOOTStr, msg_str); |
592 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
593 | 0 | }); |
594 | 0 | if (buf->base) |
595 | 0 | MVM_free(buf->base); |
596 | 0 | uv_close((uv_handle_t *)handle, NULL); |
597 | 0 | if (--si->using == 0) |
598 | 0 | MVM_io_eventloop_remove_active_work(tc, &(si->work_idx)); |
599 | 0 | } |
600 | 25 | MVM_repr_push_o(tc, t->body.queue, arr); |
601 | 25 | } |
602 | 10 | static void async_spawn_stdout_bytes_read(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf) { |
603 | 10 | SpawnInfo *si = (SpawnInfo *)handle->data; |
604 | 10 | MVMObject *cb = MVM_repr_at_key_o(si->tc, si->callbacks, |
605 | 10 | si->tc->instance->str_consts.stdout_bytes); |
606 | 10 | async_read(handle, nread, buf, si, cb, si->seq_stdout++, &(si->permit_stdout)); |
607 | 10 | } |
608 | 15 | static void async_spawn_stderr_bytes_read(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf) { |
609 | 15 | SpawnInfo *si = (SpawnInfo *)handle->data; |
610 | 15 | MVMObject *cb = MVM_repr_at_key_o(si->tc, si->callbacks, |
611 | 15 | si->tc->instance->str_consts.stderr_bytes); |
612 | 15 | async_read(handle, nread, buf, si, cb, si->seq_stderr++, &(si->permit_stderr)); |
613 | 15 | } |
614 | 0 | static void async_spawn_merge_bytes_read(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf) { |
615 | 0 | SpawnInfo *si = (SpawnInfo *)handle->data; |
616 | 0 | MVMObject *cb = MVM_repr_at_key_o(si->tc, si->callbacks, |
617 | 0 | si->tc->instance->str_consts.merge_bytes); |
618 | 0 | async_read(handle, nread, buf, si, cb, si->seq_merge++, &(si->permit_merge)); |
619 | 0 | } |
620 | | |
621 | | /* Actually spawns an async task. This runs in the event loop thread. */ |
622 | 14 | static MVMint64 get_pipe_fd(MVMThreadContext *tc, uv_pipe_t *pipe) { |
623 | 14 | uv_os_fd_t fd; |
624 | 14 | if (uv_fileno((uv_handle_t *)pipe, &fd) == 0) |
625 | 14 | return (MVMint64)fd; |
626 | 14 | else |
627 | 0 | return 0; |
628 | 14 | } |
629 | 7 | static void spawn_setup(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data) { |
630 | 7 | MVMint64 spawn_result; |
631 | 7 | |
632 | 7 | /* Process info setup. */ |
633 | 7 | uv_process_t *process = MVM_calloc(1, sizeof(uv_process_t)); |
634 | 7 | uv_process_options_t process_options = {0}; |
635 | 7 | uv_stdio_container_t process_stdio[3]; |
636 | 7 | |
637 | 7 | /* Add to work in progress. */ |
638 | 7 | SpawnInfo *si = (SpawnInfo *)data; |
639 | 7 | si->tc = tc; |
640 | 7 | si->work_idx = MVM_io_eventloop_add_active_work(tc, async_task); |
641 | 7 | si->using = 1; |
642 | 7 | |
643 | 7 | /* Create input/output handles as needed. */ |
644 | 7 | if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.write)) { |
645 | 0 | uv_pipe_t *pipe = MVM_malloc(sizeof(uv_pipe_t)); |
646 | 0 | uv_pipe_init(tc->loop, pipe, 0); |
647 | 0 | pipe->data = si; |
648 | 0 | process_stdio[0].flags = UV_CREATE_PIPE | UV_READABLE_PIPE; |
649 | 0 | process_stdio[0].data.stream = (uv_stream_t *)pipe; |
650 | 0 | si->stdin_handle = (uv_stream_t *)pipe; |
651 | 0 | } |
652 | 7 | else if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.stdin_fd)) { |
653 | 0 | process_stdio[0].flags = UV_INHERIT_FD; |
654 | 0 | process_stdio[0].data.fd = (int)MVM_repr_get_int(tc, |
655 | 0 | MVM_repr_at_key_o(tc, si->callbacks, tc->instance->str_consts.stdin_fd)); |
656 | 0 | } |
657 | 7 | else { |
658 | 7 | process_stdio[0].flags = UV_INHERIT_FD; |
659 | 7 | process_stdio[0].data.fd = 0; |
660 | 7 | } |
661 | 7 | if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.merge_bytes)) { |
662 | 0 | si->pipe_stdout = MVM_malloc(sizeof(uv_pipe_t)); |
663 | 0 | uv_pipe_init(tc->loop, si->pipe_stdout, 0); |
664 | 0 | si->pipe_stdout->data = si; |
665 | 0 | process_stdio[1].flags = UV_CREATE_PIPE | UV_WRITABLE_PIPE; |
666 | 0 | process_stdio[1].data.stream = (uv_stream_t *)si->pipe_stdout; |
667 | 0 | si->pipe_stderr = MVM_malloc(sizeof(uv_pipe_t)); |
668 | 0 | uv_pipe_init(tc->loop, si->pipe_stderr, 0); |
669 | 0 | si->pipe_stderr->data = si; |
670 | 0 | process_stdio[2].flags = UV_CREATE_PIPE | UV_WRITABLE_PIPE; |
671 | 0 | process_stdio[2].data.stream = (uv_stream_t *)si->pipe_stderr; |
672 | 0 | si->merge = 1; |
673 | 0 | si->using += 2; |
674 | 0 | } |
675 | 7 | else { |
676 | 7 | if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.stdout_bytes)) { |
677 | 7 | si->pipe_stdout = MVM_malloc(sizeof(uv_pipe_t)); |
678 | 7 | uv_pipe_init(tc->loop, si->pipe_stdout, 0); |
679 | 7 | si->pipe_stdout->data = si; |
680 | 7 | process_stdio[1].flags = UV_CREATE_PIPE | UV_WRITABLE_PIPE; |
681 | 7 | process_stdio[1].data.stream = (uv_stream_t *)si->pipe_stdout; |
682 | 7 | si->using++; |
683 | 7 | } |
684 | 0 | else if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.stdout_fd)) { |
685 | 0 | process_stdio[1].flags = UV_INHERIT_FD; |
686 | 0 | process_stdio[1].data.fd = (int)MVM_repr_get_int(tc, |
687 | 0 | MVM_repr_at_key_o(tc, si->callbacks, tc->instance->str_consts.stdout_fd)); |
688 | 0 | } |
689 | 0 | else { |
690 | 0 | process_stdio[1].flags = UV_INHERIT_FD; |
691 | 0 | process_stdio[1].data.fd = 1; |
692 | 0 | } |
693 | 7 | if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.stderr_bytes)) { |
694 | 7 | si->pipe_stderr = MVM_malloc(sizeof(uv_pipe_t)); |
695 | 7 | uv_pipe_init(tc->loop, si->pipe_stderr, 0); |
696 | 7 | si->pipe_stderr->data = si; |
697 | 7 | process_stdio[2].flags = UV_CREATE_PIPE | UV_WRITABLE_PIPE; |
698 | 7 | process_stdio[2].data.stream = (uv_stream_t *)si->pipe_stderr; |
699 | 7 | si->using++; |
700 | 7 | } |
701 | 0 | else if (MVM_repr_exists_key(tc, si->callbacks, tc->instance->str_consts.stderr_fd)) { |
702 | 0 | process_stdio[2].flags = UV_INHERIT_FD; |
703 | 0 | process_stdio[2].data.fd = (int)MVM_repr_get_int(tc, |
704 | 0 | MVM_repr_at_key_o(tc, si->callbacks, tc->instance->str_consts.stderr_fd)); |
705 | 0 | } |
706 | 0 | else { |
707 | 0 | process_stdio[2].flags = UV_INHERIT_FD; |
708 | 0 | process_stdio[2].data.fd = 2; |
709 | 0 | } |
710 | 7 | } |
711 | 7 | |
712 | 7 | /* Set up process start info. */ |
713 | 7 | process_options.stdio = process_stdio; |
714 | 7 | process_options.file = si->prog; |
715 | 7 | process_options.args = si->args; |
716 | 7 | process_options.cwd = si->cwd; |
717 | 7 | process_options.flags = UV_PROCESS_WINDOWS_HIDE; |
718 | 7 | process_options.env = si->env; |
719 | 7 | process_options.stdio_count = 3; |
720 | 7 | process_options.exit_cb = async_spawn_on_exit; |
721 | 7 | |
722 | 7 | /* Attach data, spawn, report any error. */ |
723 | 7 | process->data = si; |
724 | 7 | spawn_result = uv_spawn(tc->loop, process, &process_options); |
725 | 7 | if (spawn_result) { |
726 | 0 | MVMObject *msg_box = NULL; |
727 | 0 | si->state = STATE_DONE; |
728 | 0 | MVMROOT2(tc, async_task, msg_box, { |
729 | 0 | MVMObject *error_cb; |
730 | 0 | MVMString *msg_str = MVM_string_ascii_decode_nt(tc, |
731 | 0 | tc->instance->VMString, uv_strerror(spawn_result)); |
732 | 0 | msg_box = MVM_repr_box_str(tc, |
733 | 0 | tc->instance->boot_types.BOOTStr, msg_str); |
734 | 0 |
|
735 | 0 | error_cb = MVM_repr_at_key_o(tc, si->callbacks, |
736 | 0 | tc->instance->str_consts.error); |
737 | 0 | if (!MVM_is_null(tc, error_cb)) { |
738 | 0 | MVMROOT(tc, error_cb, { |
739 | 0 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
740 | 0 | MVM_repr_push_o(tc, arr, error_cb); |
741 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
742 | 0 | MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr); |
743 | 0 | }); |
744 | 0 | } |
745 | 0 |
|
746 | 0 | if (si->pipe_stdout) { |
747 | 0 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
748 | 0 | MVMObject *cb = MVM_repr_at_key_o(tc, si->callbacks, |
749 | 0 | tc->instance->str_consts.stdout_bytes); |
750 | 0 | MVM_repr_push_o(tc, arr, cb); |
751 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt); |
752 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
753 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
754 | 0 | MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr); |
755 | 0 | } |
756 | 0 |
|
757 | 0 | if (si->pipe_stderr) { |
758 | 0 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
759 | 0 | MVMObject *cb = MVM_repr_at_key_o(tc, si->callbacks, |
760 | 0 | tc->instance->str_consts.stderr_bytes); |
761 | 0 | MVM_repr_push_o(tc, arr, cb); |
762 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTInt); |
763 | 0 | MVM_repr_push_o(tc, arr, tc->instance->boot_types.BOOTStr); |
764 | 0 | MVM_repr_push_o(tc, arr, msg_box); |
765 | 0 | MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr); |
766 | 0 | } |
767 | 0 | }); |
768 | 0 |
|
769 | 0 | MVM_io_eventloop_remove_active_work(tc, &(si->work_idx)); |
770 | 0 | } |
771 | 7 | else { |
772 | 7 | MVMOSHandle *handle = (MVMOSHandle *)si->handle; |
773 | 7 | MVMIOAsyncProcessData *apd = (MVMIOAsyncProcessData *)handle->body.data; |
774 | 7 | MVMObject *ready_cb = MVM_repr_at_key_o(tc, si->callbacks, |
775 | 7 | tc->instance->str_consts.ready); |
776 | 7 | apd->handle = process; |
777 | 7 | si->state = STATE_STARTED; |
778 | 7 | |
779 | 7 | if (!MVM_is_null(tc, ready_cb)) { |
780 | 7 | MVMROOT2(tc, ready_cb, async_task, { |
781 | 7 | MVMObject *arr = MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTArray); |
782 | 7 | MVMROOT(tc, arr, { |
783 | 7 | MVMObject *pid; |
784 | 7 | MVMObject *handle_arr = MVM_repr_alloc_init(tc, |
785 | 7 | tc->instance->boot_types.BOOTIntArray); |
786 | 7 | MVM_repr_push_i(tc, handle_arr, si->pipe_stdout |
787 | 7 | ? get_pipe_fd(tc, si->pipe_stdout) |
788 | 7 | : -1); |
789 | 7 | MVM_repr_push_i(tc, handle_arr, si->pipe_stderr |
790 | 7 | ? get_pipe_fd(tc, si->pipe_stderr) |
791 | 7 | : -1); |
792 | 7 | MVM_repr_push_o(tc, arr, ready_cb); |
793 | 7 | MVM_repr_push_o(tc, arr, handle_arr); |
794 | 7 | pid = MVM_repr_box_int(tc, tc->instance->boot_types.BOOTInt, process->pid); |
795 | 7 | MVM_repr_push_o(tc, arr, pid); |
796 | 7 | MVM_repr_push_o(tc, ((MVMAsyncTask *)async_task)->body.queue, arr); |
797 | 7 | }); |
798 | 7 | }); |
799 | 7 | } |
800 | 7 | } |
801 | 7 | } |
802 | | |
803 | | /* Permits provide the back-pressure mechanism for the readers. */ |
804 | | static void spawn_permit(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data, |
805 | 14 | MVMint64 channel, MVMint64 permits) { |
806 | 14 | SpawnInfo *si = (SpawnInfo *)data; |
807 | 14 | if (si->work_idx < 0) |
808 | 0 | return; |
809 | 14 | if (channel == 0 && si->pipe_stdout && si->pipe_stderr && si->merge) { |
810 | 0 | if (permits < 0) |
811 | 0 | si->permit_merge = -1; |
812 | 0 | else if (si->permit_merge < 0) |
813 | 0 | si->permit_merge = permits; |
814 | 0 | else |
815 | 0 | si->permit_merge += permits; |
816 | 0 | if (!si->reading_stdout && si->permit_merge) { |
817 | 0 | uv_read_start((uv_stream_t *)si->pipe_stdout, on_alloc, |
818 | 0 | async_spawn_merge_bytes_read); |
819 | 0 | uv_read_start((uv_stream_t *)si->pipe_stderr, on_alloc, |
820 | 0 | async_spawn_merge_bytes_read); |
821 | 0 | si->reading_stdout = 1; |
822 | 0 | si->reading_stderr = 1; |
823 | 0 | } |
824 | 0 | else if (si->reading_stdout && !si->permit_merge) { |
825 | 0 | uv_read_stop((uv_stream_t *)si->pipe_stdout); |
826 | 0 | uv_read_stop((uv_stream_t *)si->pipe_stderr); |
827 | 0 | si->reading_stdout = 0; |
828 | 0 | si->reading_stderr = 0; |
829 | 0 | } |
830 | 0 | } |
831 | 14 | else if (channel == 1 && si->pipe_stdout && !si->merge) { |
832 | 7 | if (permits < 0) |
833 | 7 | si->permit_stdout = -1; |
834 | 0 | else if (si->permit_stdout < 0) |
835 | 0 | si->permit_stdout = permits; |
836 | 0 | else |
837 | 0 | si->permit_stdout += permits; |
838 | 7 | if (!si->reading_stdout && si->permit_stdout) { |
839 | 7 | uv_read_start((uv_stream_t *)si->pipe_stdout, on_alloc, |
840 | 7 | async_spawn_stdout_bytes_read); |
841 | 7 | si->reading_stdout = 1; |
842 | 7 | } |
843 | 0 | else if (si->reading_stdout && !si->permit_stdout) { |
844 | 0 | uv_read_stop((uv_stream_t *)si->pipe_stdout); |
845 | 0 | si->reading_stdout = 0; |
846 | 0 | } |
847 | 7 | } |
848 | 7 | else if (channel == 2 && si->pipe_stderr && !si->merge) { |
849 | 7 | if (permits < 0) |
850 | 7 | si->permit_stderr = -1; |
851 | 0 | else if (si->permit_stderr < 0) |
852 | 0 | si->permit_stderr = permits; |
853 | 0 | else |
854 | 0 | si->permit_stderr += permits; |
855 | 7 | if (!si->reading_stderr && si->permit_stderr) { |
856 | 7 | uv_read_start((uv_stream_t *)si->pipe_stderr, on_alloc, |
857 | 7 | async_spawn_stderr_bytes_read); |
858 | 7 | si->reading_stderr = 1; |
859 | 7 | } |
860 | 0 | else if (si->reading_stderr && !si->permit_stderr) { |
861 | 0 | uv_read_stop((uv_stream_t *)si->pipe_stderr); |
862 | 0 | si->reading_stderr = 0; |
863 | 0 | } |
864 | 7 | } |
865 | 14 | } |
866 | | |
867 | | /* On cancel, kill the process. */ |
868 | 0 | static void spawn_cancel(MVMThreadContext *tc, uv_loop_t *loop, MVMObject *async_task, void *data) { |
869 | 0 | /* Locate handle. */ |
870 | 0 | SpawnInfo *si = (SpawnInfo *)data; |
871 | 0 | MVMOSHandle *handle = (MVMOSHandle *)si->handle; |
872 | 0 | MVMIOAsyncProcessData *apd = (MVMIOAsyncProcessData *)handle->body.data; |
873 | 0 | uv_process_t *phandle = apd->handle; |
874 | 0 |
|
875 | 0 | /* If it didn't already end, try to kill it. exit_cb will clean up phandle |
876 | 0 | * should the signal lead to process exit. */ |
877 | 0 | if (phandle) { |
878 | 0 | #ifdef _WIN32 |
879 | | /* On Windows, make sure we use a signal that will actually work. */ |
880 | | if (apd->signal != SIGTERM && apd->signal != SIGKILL && apd->signal != SIGINT) |
881 | | apd->signal = SIGKILL; |
882 | | #endif |
883 | 0 | uv_process_kill(phandle, (int)apd->signal); |
884 | 0 | } |
885 | 0 | } |
886 | | |
887 | | /* Marks objects for a spawn task. */ |
888 | 0 | static void spawn_gc_mark(MVMThreadContext *tc, void *data, MVMGCWorklist *worklist) { |
889 | 0 | SpawnInfo *si = (SpawnInfo *)data; |
890 | 0 | MVM_gc_worklist_add(tc, worklist, &si->handle); |
891 | 0 | MVM_gc_worklist_add(tc, worklist, &si->callbacks); |
892 | 0 | } |
893 | | |
894 | | /* Frees info for a spawn task. */ |
895 | 0 | static void spawn_gc_free(MVMThreadContext *tc, MVMObject *t, void *data) { |
896 | 0 | if (data) { |
897 | 0 | SpawnInfo *si = (SpawnInfo *)data; |
898 | 0 | if (si->cwd) { |
899 | 0 | MVM_free(si->cwd); |
900 | 0 | si->cwd = NULL; |
901 | 0 | } |
902 | 0 | if (si->env) { |
903 | 0 | MVMuint32 i; |
904 | 0 | char **_env = si->env; |
905 | 0 | FREE_ENV(); |
906 | 0 | si->env = NULL; |
907 | 0 | } |
908 | 0 | if (si->args) { |
909 | 0 | MVMuint32 i = 0; |
910 | 0 | while (si->args[i]) |
911 | 0 | MVM_free(si->args[i++]); |
912 | 0 | MVM_free(si->args); |
913 | 0 | si->args = NULL; |
914 | 0 | } |
915 | 0 | MVM_free(si); |
916 | 0 | } |
917 | 0 | } |
918 | | |
919 | | /* Operations table for async connect task. */ |
920 | | static const MVMAsyncTaskOps spawn_op_table = { |
921 | | spawn_setup, |
922 | | spawn_permit, |
923 | | spawn_cancel, |
924 | | spawn_gc_mark, |
925 | | spawn_gc_free |
926 | | }; |
927 | | |
928 | | /* Spawn a process asynchronously. */ |
929 | | MVMObject * MVM_proc_spawn_async(MVMThreadContext *tc, MVMObject *queue, MVMObject *argv, |
930 | 7 | MVMString *cwd, MVMObject *env, MVMObject *callbacks) { |
931 | 7 | MVMAsyncTask *task; |
932 | 7 | MVMOSHandle *handle; |
933 | 7 | SpawnInfo *si; |
934 | 7 | char *prog, *_cwd, **_env, **args; |
935 | 7 | MVMuint64 size, arg_size, i; |
936 | 7 | MVMIter *iter; |
937 | 7 | MVMRegister reg; |
938 | 7 | |
939 | 7 | /* Validate queue REPR. */ |
940 | 7 | if (REPR(queue)->ID != MVM_REPR_ID_ConcBlockingQueue) |
941 | 0 | MVM_exception_throw_adhoc(tc, |
942 | 0 | "spawnprocasync target queue must have ConcBlockingQueue REPR"); |
943 | 7 | |
944 | 7 | /* Encode arguments, taking first as program name. */ |
945 | 7 | arg_size = MVM_repr_elems(tc, argv); |
946 | 7 | if (arg_size < 1) |
947 | 0 | MVM_exception_throw_adhoc(tc, "spawnprocasync must have first arg for program"); |
948 | 7 | args = MVM_malloc((arg_size + 1) * sizeof(char *)); |
949 | 27 | for (i = 0; i < arg_size; i++) { |
950 | 20 | REPR(argv)->pos_funcs.at_pos(tc, STABLE(argv), argv, OBJECT_BODY(argv), i, ®, MVM_reg_obj); |
951 | 20 | args[i] = MVM_string_utf8_c8_encode_C_string(tc, MVM_repr_get_str(tc, reg.o)); |
952 | 20 | } |
953 | 7 | args[arg_size] = NULL; |
954 | 7 | prog = args[0]; |
955 | 7 | |
956 | 7 | /* Encode CWD. */ |
957 | 7 | _cwd = MVM_string_utf8_c8_encode_C_string(tc, cwd); |
958 | 7 | |
959 | 7 | MVMROOT3(tc, queue, env, callbacks, { |
960 | 7 | MVMIOAsyncProcessData *data; |
961 | 7 | |
962 | 7 | /* Encode environment. */ |
963 | 7 | size = MVM_repr_elems(tc, env); |
964 | 7 | iter = (MVMIter *)MVM_iter(tc, env); |
965 | 7 | _env = MVM_malloc((size + 1) * sizeof(char *)); |
966 | 7 | INIT_ENV(); |
967 | 7 | |
968 | 7 | /* Create handle. */ |
969 | 7 | data = MVM_calloc(1, sizeof(MVMIOAsyncProcessData)); |
970 | 7 | handle = (MVMOSHandle *)MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTIO); |
971 | 7 | handle->body.ops = &proc_op_table; |
972 | 7 | handle->body.data = data; |
973 | 7 | |
974 | 7 | /* Create async task handle. */ |
975 | 7 | MVMROOT(tc, handle, { |
976 | 7 | task = (MVMAsyncTask *)MVM_repr_alloc_init(tc, tc->instance->boot_types.BOOTAsync); |
977 | 7 | }); |
978 | 7 | MVM_ASSIGN_REF(tc, &(task->common.header), task->body.queue, queue); |
979 | 7 | task->body.ops = &spawn_op_table; |
980 | 7 | si = MVM_calloc(1, sizeof(SpawnInfo)); |
981 | 7 | si->prog = prog; |
982 | 7 | si->cwd = _cwd; |
983 | 7 | si->env = _env; |
984 | 7 | si->args = args; |
985 | 7 | si->state = STATE_UNSTARTED; |
986 | 7 | MVM_ASSIGN_REF(tc, &(task->common.header), si->handle, handle); |
987 | 7 | MVM_ASSIGN_REF(tc, &(task->common.header), si->callbacks, callbacks); |
988 | 7 | task->body.data = si; |
989 | 7 | MVM_ASSIGN_REF(tc, &(handle->common.header), data->async_task, task); |
990 | 7 | }); |
991 | 7 | |
992 | 7 | /* Hand the task off to the event loop. */ |
993 | 7 | MVMROOT(tc, handle, { |
994 | 7 | MVM_io_eventloop_queue_work(tc, (MVMObject *)task); |
995 | 7 | }); |
996 | 7 | |
997 | 7 | return (MVMObject *)handle; |
998 | 7 | } |
999 | | |
1000 | | /* Kills an asynchronously spawned process. */ |
1001 | 0 | void MVM_proc_kill_async(MVMThreadContext *tc, MVMObject *handle_obj, MVMint64 signal) { |
1002 | 0 | /* Ensure it's a handle for a process. */ |
1003 | 0 | if (REPR(handle_obj)->ID == MVM_REPR_ID_MVMOSHandle) { |
1004 | 0 | MVMOSHandle *handle = (MVMOSHandle *)handle_obj; |
1005 | 0 | if (handle->body.ops == &proc_op_table) { |
1006 | 0 | /* It's fine; send the kill by cancelling the task. */ |
1007 | 0 | MVMIOAsyncProcessData *data = (MVMIOAsyncProcessData *)handle->body.data; |
1008 | 0 | data->signal = signal; |
1009 | 0 | MVM_io_eventloop_cancel_work(tc, data->async_task, NULL, NULL); |
1010 | 0 | return; |
1011 | 0 | } |
1012 | 0 | } |
1013 | 0 | MVM_exception_throw_adhoc(tc, "killprocasync requires a process handle"); |
1014 | 0 | } |
1015 | | |
1016 | | /* Get the current process ID. */ |
1017 | 461 | MVMint64 MVM_proc_getpid(MVMThreadContext *tc) { |
1018 | 461 | #ifdef _WIN32 |
1019 | | return _getpid(); |
1020 | | #else |
1021 | 461 | return getpid(); |
1022 | 461 | #endif |
1023 | 461 | } |
1024 | | |
1025 | | /* Get the process ID of the parent process */ |
1026 | 0 | MVMint64 MVM_proc_getppid(MVMThreadContext *tc) { |
1027 | 0 | return uv_os_getppid(); |
1028 | 0 | } |
1029 | | |
1030 | | /* generates a random int64 */ |
1031 | 0 | MVMint64 MVM_proc_rand_i(MVMThreadContext *tc) { |
1032 | 0 | MVMuint64 result = tinymt64_generate_uint64(tc->rand_state); |
1033 | 0 | return *(MVMint64 *)&result; |
1034 | 0 | } |
1035 | | |
1036 | | /* generates a number between 0 and 1 */ |
1037 | 6.31k | MVMnum64 MVM_proc_rand_n(MVMThreadContext *tc) { |
1038 | 6.31k | return tinymt64_generate_double(tc->rand_state); |
1039 | 6.31k | } |
1040 | | |
1041 | 0 | MVMnum64 MVM_proc_randscale_n(MVMThreadContext *tc, MVMnum64 scale) { |
1042 | 0 | return tinymt64_generate_double(tc->rand_state) * scale; |
1043 | 0 | } |
1044 | | |
1045 | | /* seed random number generator */ |
1046 | 319 | void MVM_proc_seed(MVMThreadContext *tc, MVMint64 seed) { |
1047 | 319 | /* Seed our one, plus the normal C srand for libtommath. */ |
1048 | 319 | tinymt64_init(tc->rand_state, (MVMuint64)seed); |
1049 | 319 | /* do not call srand if we are not using rand */ |
1050 | 319 | #ifndef MP_USE_ALT_RAND |
1051 | 319 | srand((MVMuint32)seed); |
1052 | 319 | #endif |
1053 | 319 | } |
1054 | | |
1055 | | /* gets the system time since the epoch truncated to integral seconds */ |
1056 | 2 | MVMint64 MVM_proc_time_i(MVMThreadContext *tc) { |
1057 | 2 | return (MVMint64)(MVM_platform_now() / 1000000000); |
1058 | 2 | } |
1059 | | |
1060 | | /* gets the system time since the epoch as floating point seconds */ |
1061 | 14.4k | MVMnum64 MVM_proc_time_n(MVMThreadContext *tc) { |
1062 | 14.4k | return (MVMnum64)MVM_platform_now() / 1000000000.0; |
1063 | 14.4k | } |
1064 | | |
1065 | 0 | MVMString * MVM_executable_name(MVMThreadContext *tc) { |
1066 | 0 | MVMInstance * const instance = tc->instance; |
1067 | 0 | if (instance->exec_name) |
1068 | 0 | return MVM_string_utf8_c8_decode(tc, |
1069 | 0 | instance->VMString, |
1070 | 0 | instance->exec_name, strlen(instance->exec_name)); |
1071 | 0 | else |
1072 | 0 | return tc->instance->str_consts.empty; |
1073 | 0 | } |
1074 | | |
1075 | 144 | MVMObject * MVM_proc_clargs(MVMThreadContext *tc) { |
1076 | 144 | MVMInstance * const instance = tc->instance; |
1077 | 144 | MVMObject *clargs = instance->clargs; |
1078 | 144 | if (!clargs) { |
1079 | 144 | clargs = MVM_repr_alloc_init(tc, MVM_hll_current(tc)->slurpy_array_type); |
1080 | 144 | #ifndef _WIN32 |
1081 | 144 | MVMROOT(tc, clargs, { |
1082 | 144 | const MVMint64 num_clargs = instance->num_clargs; |
1083 | 144 | MVMint64 count; |
1084 | 144 | |
1085 | 144 | MVMString *prog_string = MVM_string_utf8_c8_decode(tc, |
1086 | 144 | instance->VMString, |
1087 | 144 | instance->prog_name, strlen(instance->prog_name)); |
1088 | 144 | MVMObject *boxed_str = MVM_repr_box_str(tc, |
1089 | 144 | instance->boot_types.BOOTStr, prog_string); |
1090 | 144 | MVM_repr_push_o(tc, clargs, boxed_str); |
1091 | 144 | |
1092 | 144 | for (count = 0; count < num_clargs; count++) { |
1093 | 144 | char *raw_clarg = instance->raw_clargs[count]; |
1094 | 144 | MVMString *string = MVM_string_utf8_c8_decode(tc, |
1095 | 144 | instance->VMString, raw_clarg, strlen(raw_clarg)); |
1096 | 144 | boxed_str = MVM_repr_box_str(tc, |
1097 | 144 | instance->boot_types.BOOTStr, string); |
1098 | 144 | MVM_repr_push_o(tc, clargs, boxed_str); |
1099 | 144 | } |
1100 | 144 | }); |
1101 | 144 | #else |
1102 | | MVMROOT(tc, clargs, { |
1103 | | const MVMint64 num_clargs = instance->num_clargs; |
1104 | | MVMint64 count; |
1105 | | |
1106 | | MVMString *prog_string = MVM_string_utf8_c8_decode(tc, |
1107 | | instance->VMString, |
1108 | | instance->prog_name, strlen(instance->prog_name)); |
1109 | | MVMObject *boxed_str = MVM_repr_box_str(tc, |
1110 | | instance->boot_types.BOOTStr, prog_string); |
1111 | | MVM_repr_push_o(tc, clargs, boxed_str); |
1112 | | |
1113 | | for (count = 0; count < num_clargs; count++) { |
1114 | | char *raw_clarg = instance->raw_clargs[count]; |
1115 | | MVMString *string = MVM_string_utf8_c8_decode(tc, |
1116 | | instance->VMString, raw_clarg, strlen(raw_clarg)); |
1117 | | boxed_str = MVM_repr_box_str(tc, |
1118 | | instance->boot_types.BOOTStr, string); |
1119 | | MVM_repr_push_o(tc, clargs, boxed_str); |
1120 | | } |
1121 | | }); |
1122 | | #endif |
1123 | 144 | |
1124 | 144 | instance->clargs = clargs; |
1125 | 144 | } |
1126 | 144 | return clargs; |
1127 | 144 | } |
1128 | | |
1129 | | /* Gets resource usage statistics, so far as they are portably available (see |
1130 | | * libuv docs) and puts them into an integer array. */ |
1131 | 0 | void MVM_proc_getrusage(MVMThreadContext *tc, MVMObject *result) { |
1132 | 0 | uv_rusage_t usage; |
1133 | 0 | int r; |
1134 | 0 | if ((r = uv_getrusage(&usage)) > 0) |
1135 | 0 | MVM_exception_throw_adhoc(tc, "Unable to getrusage: %s", uv_strerror(r)); |
1136 | 0 | if (REPR(result)->ID != MVM_REPR_ID_VMArray || !IS_CONCRETE(result) || |
1137 | 0 | ((MVMArrayREPRData *)STABLE(result)->REPR_data)->slot_type != MVM_ARRAY_I64) { |
1138 | 0 | MVM_exception_throw_adhoc(tc, "getrusage needs a concrete 64bit int array."); |
1139 | 0 | } |
1140 | 0 | MVM_repr_bind_pos_i(tc, result, 0, usage.ru_utime.tv_sec); |
1141 | 0 | MVM_repr_bind_pos_i(tc, result, 1, usage.ru_utime.tv_usec); |
1142 | 0 | MVM_repr_bind_pos_i(tc, result, 2, usage.ru_stime.tv_sec); |
1143 | 0 | MVM_repr_bind_pos_i(tc, result, 3, usage.ru_stime.tv_usec); |
1144 | 0 | MVM_repr_bind_pos_i(tc, result, 4, usage.ru_maxrss); |
1145 | 0 | MVM_repr_bind_pos_i(tc, result, 5, usage.ru_ixrss); |
1146 | 0 | MVM_repr_bind_pos_i(tc, result, 6, usage.ru_idrss); |
1147 | 0 | MVM_repr_bind_pos_i(tc, result, 7, usage.ru_isrss); |
1148 | 0 | MVM_repr_bind_pos_i(tc, result, 8, usage.ru_minflt); |
1149 | 0 | MVM_repr_bind_pos_i(tc, result, 9, usage.ru_majflt); |
1150 | 0 | MVM_repr_bind_pos_i(tc, result, 10, usage.ru_nswap); |
1151 | 0 | MVM_repr_bind_pos_i(tc, result, 11, usage.ru_inblock); |
1152 | 0 | MVM_repr_bind_pos_i(tc, result, 12, usage.ru_oublock); |
1153 | 0 | MVM_repr_bind_pos_i(tc, result, 13, usage.ru_msgsnd); |
1154 | 0 | MVM_repr_bind_pos_i(tc, result, 14, usage.ru_msgrcv); |
1155 | 0 | MVM_repr_bind_pos_i(tc, result, 15, usage.ru_nsignals); |
1156 | 0 | MVM_repr_bind_pos_i(tc, result, 16, usage.ru_nvcsw); |
1157 | 0 | MVM_repr_bind_pos_i(tc, result, 17, usage.ru_nivcsw); |
1158 | 0 | } |