/home/travis/build/MoarVM/MoarVM/src/profiler/log.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* Per-thread profiling data. */ |
2 | | struct MVMProfileThreadData { |
3 | | /* The root of the call graph. */ |
4 | | MVMProfileCallNode *call_graph; |
5 | | |
6 | | /* The current call graph node we're in. */ |
7 | | MVMProfileCallNode *current_call; |
8 | | |
9 | | /* The time we started profiling. */ |
10 | | MVMuint64 start_time; |
11 | | |
12 | | /* The time we finished profiling, if we got there already. */ |
13 | | MVMuint64 end_time; |
14 | | |
15 | | /* Garbage collection time measurements. */ |
16 | | MVMProfileGC *gcs; |
17 | | MVMuint32 num_gcs; |
18 | | MVMuint32 alloc_gcs; |
19 | | |
20 | | /* Amount of time spent in spesh. */ |
21 | | MVMuint64 spesh_time; |
22 | | |
23 | | /* Current spesh work start time, if any. */ |
24 | | MVMuint64 cur_spesh_start_time; |
25 | | |
26 | | /* Current GC start time, if any. */ |
27 | | MVMuint64 cur_gc_start_time; |
28 | | |
29 | | /* We have to make sure to not count the newest allocation infinitely |
30 | | * often if there's a conditionally-allocating operation (like getlex) |
31 | | * that gets called multiple times with no actual allocations in between */ |
32 | | MVMObject *last_counted_allocation; |
33 | | |
34 | | /* Used to pass generated data structure from the gc-orchestrated |
35 | | * dumping function back to the dump function that ends the profile */ |
36 | | MVMObject *collected_data; |
37 | | }; |
38 | | |
39 | | /* Information collected about a GC run. */ |
40 | | struct MVMProfileGC { |
41 | | /* How long the collection took. */ |
42 | | MVMuint64 time; |
43 | | |
44 | | /* When, relative to program start, did this GC take place? */ |
45 | | MVMuint64 abstime; |
46 | | |
47 | | /* Was it a full collection? */ |
48 | | MVMuint16 full; |
49 | | |
50 | | /* Was this thread responsible? */ |
51 | | MVMuint16 responsible; |
52 | | |
53 | | /* Which GC run does this belong to? |
54 | | * (Good to know in multithreaded situations where |
55 | | * some threads have their work stolen) */ |
56 | | AO_t gc_seq_num; |
57 | | |
58 | | /* Nursery statistics. */ |
59 | | MVMuint32 cleared_bytes; |
60 | | MVMuint32 retained_bytes; |
61 | | MVMuint32 promoted_bytes; |
62 | | |
63 | | /* Inter-generation links count */ |
64 | | MVMuint32 num_gen2roots; |
65 | | }; |
66 | | |
67 | | /* Call graph node, which is kept per thread. */ |
68 | | struct MVMProfileCallNode { |
69 | | /* The frame this data is for. |
70 | | * If this CallNode is for a native call, this is NULL. */ |
71 | | MVMStaticFrame *sf; |
72 | | |
73 | | /* If the static frame is NULL, we're collecting data on a native call */ |
74 | | char *native_target_name; |
75 | | |
76 | | /* The timestamp when we entered the node. */ |
77 | | MVMuint64 cur_entry_time; |
78 | | |
79 | | /* Time we should skip since cur_entry_time because execution was |
80 | | * suspended due to GC or spesh. */ |
81 | | MVMuint64 cur_skip_time; |
82 | | |
83 | | /* The node in the profiling call graph that we came from. */ |
84 | | MVMProfileCallNode *pred; |
85 | | |
86 | | /* Successor nodes so far. */ |
87 | | MVMProfileCallNode **succ; |
88 | | |
89 | | /* Number of successors we have, and have allocated space for. */ |
90 | | MVMuint32 num_succ; |
91 | | MVMuint32 alloc_succ; |
92 | | |
93 | | /* Allocations of different types, and the number of allocation |
94 | | * counts we have so far. */ |
95 | | MVMProfileAllocationCount *alloc; |
96 | | MVMuint32 num_alloc; |
97 | | MVMuint32 alloc_alloc; |
98 | | |
99 | | /* The total inclusive time so far spent in this node. */ |
100 | | MVMuint64 total_time; |
101 | | |
102 | | /* The total number of times this node was entered. */ |
103 | | MVMuint64 total_entries; |
104 | | |
105 | | /* Entries that were to specialized bytecode. */ |
106 | | MVMuint64 specialized_entries; |
107 | | |
108 | | /* Entries that were inlined. */ |
109 | | MVMuint64 inlined_entries; |
110 | | |
111 | | /* Entries that were to JITted code. */ |
112 | | MVMuint64 jit_entries; |
113 | | |
114 | | /* Number of times OSR took place. */ |
115 | | MVMuint64 osr_count; |
116 | | |
117 | | /* Number of times deopt_one happened. */ |
118 | | MVMuint64 deopt_one_count; |
119 | | |
120 | | /* Number of times deopt_all happened. */ |
121 | | MVMuint64 deopt_all_count; |
122 | | |
123 | | /* Entry mode, persisted for the sake of continuations. */ |
124 | | MVMuint64 entry_mode; |
125 | | }; |
126 | | |
127 | | /* Allocation counts for a call node. */ |
128 | | struct MVMProfileAllocationCount { |
129 | | /* The type we're counting allocations of. */ |
130 | | MVMObject *type; |
131 | | |
132 | | /* The number of allocations we've counted. */ |
133 | | /* a) in regularly interpreted code */ |
134 | | MVMuint64 allocations_interp; |
135 | | |
136 | | /* b) in spesh'd code */ |
137 | | MVMuint64 allocations_spesh; |
138 | | |
139 | | /* c) in jitted code */ |
140 | | MVMuint64 allocations_jit; |
141 | | }; |
142 | | |
143 | | /* When a continuation is taken, we attach one of these to it. It carries the |
144 | | * data needed to restore profiler state if the continuation is invoked. */ |
145 | | struct MVMProfileContinuationData { |
146 | | /* List of static frames we should restore, in reverse order. */ |
147 | | MVMStaticFrame **sfs; |
148 | | |
149 | | /* Entry modes to restore also. */ |
150 | | MVMuint64 *modes; |
151 | | |
152 | | /* Number of static frames in the list. */ |
153 | | MVMuint64 num_sfs; |
154 | | }; |
155 | | |
156 | | /* Ways we might enter a frame. */ |
157 | 0 | #define MVM_PROFILE_ENTER_NORMAL 0 |
158 | 0 | #define MVM_PROFILE_ENTER_SPESH 1 |
159 | 0 | #define MVM_PROFILE_ENTER_SPESH_INLINE 2 |
160 | 0 | #define MVM_PROFILE_ENTER_JIT 3 |
161 | 0 | #define MVM_PROFILE_ENTER_JIT_INLINE 4 |
162 | | |
163 | | /* Logging functions. */ |
164 | | void MVM_profile_log_enter(MVMThreadContext *tc, MVMStaticFrame *sf, MVMuint64 mode); |
165 | | void MVM_profile_log_enter_native(MVMThreadContext *tc, MVMObject *nativecallsite); |
166 | | void MVM_profile_log_exit(MVMThreadContext *tc); |
167 | | void MVM_profile_log_unwind(MVMThreadContext *tc); |
168 | | MVMProfileContinuationData * MVM_profile_log_continuation_control(MVMThreadContext *tc, const MVMFrame *root_frame); |
169 | | void MVM_profile_log_continuation_invoke(MVMThreadContext *tc, const MVMProfileContinuationData *cd); |
170 | | void MVM_profile_log_allocated(MVMThreadContext *tc, MVMObject *obj); |
171 | | void MVM_profiler_log_gc_start(MVMThreadContext *tc, MVMuint32 full, MVMuint32 this_thread_responsible); |
172 | | void MVM_profiler_log_gc_end(MVMThreadContext *tc); |
173 | | void MVM_profiler_log_spesh_start(MVMThreadContext *tc); |
174 | | void MVM_profiler_log_spesh_end(MVMThreadContext *tc); |
175 | | void MVM_profiler_log_osr(MVMThreadContext *tc, MVMuint64 jitted); |
176 | | void MVM_profiler_log_deopt_one(MVMThreadContext *tc); |
177 | | void MVM_profiler_log_deopt_all(MVMThreadContext *tc); |