Ruby  2.7.1p83(2020-03-31revisiona0c7c23c9cec0d0ffcba012279cd652d28ad5bf3)
mjit_compile.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  mjit_compile.c - MRI method JIT compiler
4 
5  Copyright (C) 2017 Takashi Kokubun <takashikkbn@gmail.com>.
6 
7 **********************************************************************/
8 
9 // NOTE: All functions in this file are executed on MJIT worker. So don't
10 // call Ruby methods (C functions that may call rb_funcall) or trigger
11 // GC (using ZALLOC, xmalloc, xfree, etc.) in this file.
12 
13 #include "internal.h"
14 
15 #if USE_MJIT
16 
17 #include "vm_core.h"
18 #include "vm_exec.h"
19 #include "mjit.h"
20 #include "builtin.h"
21 #include "insns.inc"
22 #include "insns_info.inc"
23 #include "vm_insnhelper.h"
24 
25 // Macros to check if a position is already compiled using compile_status.stack_size_for_pos
26 #define NOT_COMPILED_STACK_SIZE -1
27 #define ALREADY_COMPILED_P(status, pos) (status->stack_size_for_pos[pos] != NOT_COMPILED_STACK_SIZE)
28 
29 static size_t
30 call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
31 {
32  const struct rb_kwarg_call_data *kw_calls = (const struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
33  const struct rb_kwarg_call_data *kw_cd = (const struct rb_kwarg_call_data *)cd;
34 
35  VM_ASSERT(cd >= body->call_data && kw_cd < (kw_calls + body->ci_kw_size));
36  if (kw_cd < kw_calls) {
37  return cd - body->call_data;
38  }
39  else {
40  return kw_cd - kw_calls + body->ci_size;
41  }
42 }
43 
44 // For propagating information needed for lazily pushing a frame.
45 struct inlined_call_context {
46  int orig_argc; // ci->orig_argc
47  VALUE me; // cc->me
48  int param_size; // def_iseq_ptr(cc->me->def)->body->param.size
49  int local_size; // def_iseq_ptr(cc->me->def)->body->local_table_size
50 };
51 
52 // Storage to keep compiler's status. This should have information
53 // which is global during one `mjit_compile` call. Ones conditional
54 // in each branch should be stored in `compile_branch`.
55 struct compile_status {
56  bool success; // has true if compilation has had no issue
57  int *stack_size_for_pos; // stack_size_for_pos[pos] has stack size for the position (otherwise -1)
58  // If true, JIT-ed code will use local variables to store pushed values instead of
59  // using VM's stack and moving stack pointer.
60  bool local_stack_p;
61  // Safely-accessible cache entries copied from main thread.
62  union iseq_inline_storage_entry *is_entries;
63  struct rb_call_cache *cc_entries;
64  // Mutated optimization levels
65  struct rb_mjit_compile_info *compile_info;
66  // If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
67  const struct rb_iseq_constant_body **inlined_iseqs;
68  struct inlined_call_context inline_context;
69 };
70 
71 // Storage to keep data which is consistent in each conditional branch.
72 // This is created and used for one `compile_insns` call and its values
73 // should be copied for extra `compile_insns` call.
74 struct compile_branch {
75  unsigned int stack_size; // this simulates sp (stack pointer) of YARV
76  bool finish_p; // if true, compilation in this branch should stop and let another branch to be compiled
77 };
78 
79 struct case_dispatch_var {
80  FILE *f;
81  unsigned int base_pos;
82  VALUE last_value;
83 };
84 
85 // Returns true if call cache is still not obsoleted and cc->me->def->type is available.
86 static bool
87 has_valid_method_type(CALL_CACHE cc)
88 {
89  extern bool mjit_valid_class_serial_p(rb_serial_t class_serial);
92 }
93 
94 // Returns true if iseq can use fastpath for setup, otherwise NULL. This becomes true in the same condition
95 // as CC_SET_FASTPATH (in vm_callee_setup_arg) is called from vm_call_iseq_setup.
96 static bool
97 fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t *iseq)
98 {
99  extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
100  return iseq != NULL
101  && !(ci->flag & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0.
102  && ci->orig_argc == iseq->body->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns)
103  && vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition
104 }
105 
106 static int
107 compile_case_dispatch_each(VALUE key, VALUE value, VALUE arg)
108 {
109  struct case_dispatch_var *var = (struct case_dispatch_var *)arg;
110  unsigned int offset;
111 
112  if (var->last_value != value) {
113  offset = FIX2INT(value);
114  var->last_value = value;
115  fprintf(var->f, " case %d:\n", offset);
116  fprintf(var->f, " goto label_%d;\n", var->base_pos + offset);
117  fprintf(var->f, " break;\n");
118  }
119  return ST_CONTINUE;
120 }
121 
122 // Calling rb_id2str in MJIT worker causes random SEGV. So this is disabled by default.
123 static void
124 comment_id(FILE *f, ID id)
125 {
126 #ifdef MJIT_COMMENT_ID
127  VALUE name = rb_id2str(id);
128  const char *p, *e;
129  char c, prev = '\0';
130 
131  if (!name) return;
132  p = RSTRING_PTR(name);
133  e = RSTRING_END(name);
134  fputs("/* :\"", f);
135  for (; p < e; ++p) {
136  switch (c = *p) {
137  case '*': case '/': if (prev != (c ^ ('/' ^ '*'))) break;
138  case '\\': case '"': fputc('\\', f);
139  }
140  fputc(c, f);
141  prev = c;
142  }
143  fputs("\" */", f);
144 #endif
145 }
146 
147 static void compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size,
148  unsigned int pos, struct compile_status *status);
149 
150 // Main function of JIT compilation, vm_exec_core counterpart for JIT. Compile one insn to `f`, may modify
151 // b->stack_size and return next position.
152 //
153 // When you add a new instruction to insns.def, it would be nice to have JIT compilation support here but
154 // it's optional. This JIT compiler just ignores ISeq which includes unknown instruction, and ISeq which
155 // does not have it can be compiled as usual.
156 static unsigned int
157 compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn, const VALUE *operands,
158  const unsigned int pos, struct compile_status *status, struct compile_branch *b)
159 {
160  unsigned int next_pos = pos + insn_len(insn);
161 
162 /*****************/
163  #include "mjit_compile.inc"
164 /*****************/
165 
166  // If next_pos is already compiled and this branch is not finished yet,
167  // next instruction won't be compiled in C code next and will need `goto`.
168  if (!b->finish_p && next_pos < body->iseq_size && ALREADY_COMPILED_P(status, next_pos)) {
169  fprintf(f, "goto label_%d;\n", next_pos);
170 
171  // Verify stack size assumption is the same among multiple branches
172  if ((unsigned int)status->stack_size_for_pos[next_pos] != b->stack_size) {
174  fprintf(stderr, "MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n",
175  status->stack_size_for_pos[next_pos], b->stack_size);
176  status->success = false;
177  }
178  }
179 
180  return next_pos;
181 }
182 
183 // Compile one conditional branch. If it has branchXXX insn, this should be
184 // called multiple times for each branch.
185 static void
186 compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size,
187  unsigned int pos, struct compile_status *status)
188 {
189  int insn;
190  struct compile_branch branch;
191 
192  branch.stack_size = stack_size;
193  branch.finish_p = false;
194 
195  while (pos < body->iseq_size && !ALREADY_COMPILED_P(status, pos) && !branch.finish_p) {
196 #if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
197  insn = rb_vm_insn_addr2insn((void *)body->iseq_encoded[pos]);
198 #else
199  insn = (int)body->iseq_encoded[pos];
200 #endif
201  status->stack_size_for_pos[pos] = (int)branch.stack_size;
202 
203  fprintf(f, "\nlabel_%d: /* %s */\n", pos, insn_name(insn));
204  pos = compile_insn(f, body, insn, body->iseq_encoded + (pos+1), pos, status, &branch);
205  if (status->success && branch.stack_size > body->stack_max) {
207  fprintf(stderr, "MJIT warning: JIT stack size (%d) exceeded its max size (%d)\n", branch.stack_size, body->stack_max);
208  status->success = false;
209  }
210  if (!status->success)
211  break;
212  }
213 }
214 
215 // Print the block to cancel inlined method call. It's supporting only `opt_send_without_block` for now.
216 static void
217 compile_inlined_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct inlined_call_context *inline_context)
218 {
219  fprintf(f, "\ncancel:\n");
220  fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n");
221  fprintf(f, " rb_mjit_iseq_compile_info(original_iseq->body)->disable_inlining = true;\n");
222  fprintf(f, " rb_mjit_recompile_iseq(original_iseq);\n");
223 
224  // Swap pc/sp set on cancel with original pc/sp.
225  fprintf(f, " const VALUE current_pc = reg_cfp->pc;\n");
226  fprintf(f, " const VALUE current_sp = reg_cfp->sp;\n");
227  fprintf(f, " reg_cfp->pc = orig_pc;\n");
228  fprintf(f, " reg_cfp->sp = orig_sp;\n\n");
229 
230  // Lazily push the current call frame.
231  fprintf(f, " struct rb_calling_info calling;\n");
232  fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); // assumes `opt_send_without_block`
233  fprintf(f, " calling.argc = %d;\n", inline_context->orig_argc);
234  fprintf(f, " calling.recv = reg_cfp->self;\n");
235  fprintf(f, " reg_cfp->self = orig_self;\n");
236  fprintf(f, " vm_call_iseq_setup_normal(ec, reg_cfp, &calling, (const rb_callable_method_entry_t *)0x%"PRIxVALUE", 0, %d, %d);\n\n",
237  inline_context->me, inline_context->param_size, inline_context->local_size); // fastpath_applied_iseq_p checks rb_simple_iseq_p, which ensures has_opt == FALSE
238 
239  // Start usual cancel from here.
240  fprintf(f, " reg_cfp = ec->cfp;\n"); // work on the new frame
241  fprintf(f, " reg_cfp->pc = current_pc;\n");
242  fprintf(f, " reg_cfp->sp = current_sp;\n");
243  for (unsigned int i = 0; i < body->stack_max; i++) { // should be always `status->local_stack_p`
244  fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i);
245  }
246  // We're not just returning Qundef here so that caller's normal cancel handler can
247  // push back `stack` to `cfp->sp`.
248  fprintf(f, " return vm_exec(ec, ec->cfp);\n");
249 }
250 
251 // Print the block to cancel JIT execution.
252 static void
253 compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct compile_status *status)
254 {
255  if (status->inlined_iseqs == NULL) { // the current ISeq is being inlined
256  compile_inlined_cancel_handler(f, body, &status->inline_context);
257  return;
258  }
259 
260  fprintf(f, "\nsend_cancel:\n");
261  fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_send_inline);\n");
262  fprintf(f, " rb_mjit_iseq_compile_info(original_iseq->body)->disable_send_cache = true;\n");
263  fprintf(f, " rb_mjit_recompile_iseq(original_iseq);\n");
264  fprintf(f, " goto cancel;\n");
265 
266  fprintf(f, "\nivar_cancel:\n");
267  fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel_ivar_inline);\n");
268  fprintf(f, " rb_mjit_iseq_compile_info(original_iseq->body)->disable_ivar_cache = true;\n");
269  fprintf(f, " rb_mjit_recompile_iseq(original_iseq);\n");
270  fprintf(f, " goto cancel;\n");
271 
272  fprintf(f, "\ncancel:\n");
273  fprintf(f, " RB_DEBUG_COUNTER_INC(mjit_cancel);\n");
274  if (status->local_stack_p) {
275  for (unsigned int i = 0; i < body->stack_max; i++) {
276  fprintf(f, " *(vm_base_ptr(reg_cfp) + %d) = stack[%d];\n", i, i);
277  }
278  }
279  fprintf(f, " return Qundef;\n");
280 }
281 
282 extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries);
283 
284 static bool
285 mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
286 {
287  const struct rb_iseq_constant_body *body = iseq->body;
288  status->success = true;
289  status->local_stack_p = !body->catch_except_p;
290 
291  if (status->local_stack_p) {
292  fprintf(f, " VALUE stack[%d];\n", body->stack_max);
293  }
294  else {
295  fprintf(f, " VALUE *stack = reg_cfp->sp;\n");
296  }
297  if (status->inlined_iseqs != NULL) // i.e. compile root
298  fprintf(f, " static const rb_iseq_t *original_iseq = (const rb_iseq_t *)0x%"PRIxVALUE";\n", (VALUE)iseq);
299  fprintf(f, " static const VALUE *const original_body_iseq = (VALUE *)0x%"PRIxVALUE";\n",
300  (VALUE)body->iseq_encoded);
301 
302  // Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
303  // are not considered since vm_exec doesn't call mjit_exec for catch tables.
304  if (body->param.flags.has_opt) {
305  int i;
306  fprintf(f, "\n");
307  fprintf(f, " switch (reg_cfp->pc - reg_cfp->iseq->body->iseq_encoded) {\n");
308  for (i = 0; i <= body->param.opt_num; i++) {
309  VALUE pc_offset = body->param.opt_table[i];
310  fprintf(f, " case %"PRIdVALUE":\n", pc_offset);
311  fprintf(f, " goto label_%"PRIdVALUE";\n", pc_offset);
312  }
313  fprintf(f, " }\n");
314  }
315 
316  compile_insns(f, body, 0, 0, status);
317  compile_cancel_handler(f, body, status);
318  return status->success;
319 }
320 
321 // Return true if the ISeq can be inlined without pushing a new control frame.
322 static bool
323 inlinable_iseq_p(const struct rb_iseq_constant_body *body)
324 {
325  // 1) If catch_except_p, caller frame should be preserved when callee catches an exception.
326  // Then we need to wrap `vm_exec()` but then we can't inline the call inside it.
327  //
328  // 2) If `body->catch_except_p` is false and `handles_sp?` of an insn is false,
329  // sp is not moved as we assume `status->local_stack_p = !body->catch_except_p`.
330  //
331  // 3) If `body->catch_except_p` is false and `always_leaf?` of an insn is true,
332  // pc is not moved.
333  if (body->catch_except_p)
334  return false;
335 
336  unsigned int pos = 0;
337  while (pos < body->iseq_size) {
338 #if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
339  int insn = rb_vm_insn_addr2insn((void *)body->iseq_encoded[pos]);
340 #else
341  int insn = (int)body->iseq_encoded[pos];
342 #endif
343  // All insns in the ISeq except `leave` (to be overridden in the inlined code)
344  // should meet following strong assumptions:
345  // * Do not require `cfp->sp` motion
346  // * Do not move `cfp->pc`
347  // * Do not read any `cfp->pc`
348  if (insn != BIN(leave) && insn_may_depend_on_sp_or_pc(insn, body->iseq_encoded + (pos + 1)))
349  return false;
350  // At this moment, `cfp->ep` in an inlined method is not working.
351  switch (insn) {
352  case BIN(getlocal):
353  case BIN(getlocal_WC_0):
354  case BIN(getlocal_WC_1):
355  case BIN(setlocal):
356  case BIN(setlocal_WC_0):
357  case BIN(setlocal_WC_1):
358  case BIN(getblockparam):
359  case BIN(getblockparamproxy):
360  case BIN(setblockparam):
361  return false;
362  }
363  pos += insn_len(insn);
364  }
365  return true;
366 }
367 
368 // This needs to be macro instead of a function because it's using `alloca`.
369 #define INIT_COMPILE_STATUS(status, body, compile_root_p) do { \
370  status = (struct compile_status){ \
371  .stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
372  .inlined_iseqs = compile_root_p ? \
373  alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
374  .cc_entries = (body->ci_size + body->ci_kw_size) > 0 ? \
375  alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)) : NULL, \
376  .is_entries = (body->is_size > 0) ? \
377  alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
378  .compile_info = compile_root_p ? \
379  rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \
380  }; \
381  memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size); \
382  if (compile_root_p) \
383  memset((void *)status.inlined_iseqs, 0, sizeof(const struct rb_iseq_constant_body *) * body->iseq_size); \
384  else \
385  memset(status.compile_info, 0, sizeof(struct rb_mjit_compile_info)); \
386 } while (0)
387 
388 // Compile inlinable ISeqs to C code in `f`. It returns true if it succeeds to compile them.
389 static bool
390 precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
391 {
392  const struct rb_iseq_constant_body *body = iseq->body;
393  unsigned int pos = 0;
394  while (pos < body->iseq_size) {
395 #if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
396  int insn = rb_vm_insn_addr2insn((void *)body->iseq_encoded[pos]);
397 #else
398  int insn = (int)body->iseq_encoded[pos];
399 #endif
400 
401  if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
402  CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
403  CALL_INFO ci = &cd->ci;
404  CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition
405 
406  const rb_iseq_t *child_iseq;
407  if (has_valid_method_type(cc_copy) &&
408  !(ci->flag & VM_CALL_TAILCALL) && // inlining only non-tailcall path
409  cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg
410  inlinable_iseq_p(child_iseq->body)) {
411  status->inlined_iseqs[pos] = child_iseq->body;
412 
413  if (mjit_opts.verbose >= 1) // print beforehand because ISeq may be GCed during copy job.
414  fprintf(stderr, "JIT inline: %s@%s:%d => %s@%s:%d\n",
417  RSTRING_PTR(child_iseq->body->location.label),
418  RSTRING_PTR(rb_iseq_path(child_iseq)), FIX2INT(child_iseq->body->location.first_lineno));
419 
420  struct compile_status child_status;
421  INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
422  child_status.inline_context = (struct inlined_call_context){
423  .orig_argc = ci->orig_argc,
424  .me = (VALUE)cc_copy->me,
425  .param_size = child_iseq->body->param.size,
426  .local_size = child_iseq->body->local_table_size
427  };
428  if ((child_status.cc_entries != NULL || child_status.is_entries != NULL)
429  && !mjit_copy_cache_from_main_thread(child_iseq, child_status.cc_entries, child_status.is_entries))
430  return false;
431 
432  fprintf(f, "ALWAYS_INLINE(static VALUE _mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq));\n", pos);
433  fprintf(f, "static inline VALUE\n_mjit_inlined_%d(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE orig_self, const rb_iseq_t *original_iseq)\n{\n", pos);
434  fprintf(f, " const VALUE *orig_pc = reg_cfp->pc;\n");
435  fprintf(f, " const VALUE *orig_sp = reg_cfp->sp;\n");
436  bool success = mjit_compile_body(f, child_iseq, &child_status);
437  fprintf(f, "\n} /* end of _mjit_inlined_%d */\n\n", pos);
438 
439  if (!success)
440  return false;
441  }
442  }
443  pos += insn_len(insn);
444  }
445  return true;
446 }
447 
448 // Compile ISeq to C code in `f`. It returns true if it succeeds to compile.
449 bool
450 mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
451 {
452  // For performance, we verify stack size only on compilation time (mjit_compile.inc.erb) without --jit-debug
453  if (!mjit_opts.debug) {
454  fprintf(f, "#undef OPT_CHECKED_RUN\n");
455  fprintf(f, "#define OPT_CHECKED_RUN 0\n\n");
456  }
457 
458  struct compile_status status;
459  INIT_COMPILE_STATUS(status, iseq->body, true);
460  if ((status.cc_entries != NULL || status.is_entries != NULL)
461  && !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries))
462  return false;
463 
464  if (!status.compile_info->disable_send_cache && !status.compile_info->disable_inlining) {
465  if (!precompile_inlinable_iseqs(f, iseq, &status))
466  return false;
467  }
468 
469 #ifdef _WIN32
470  fprintf(f, "__declspec(dllexport)\n");
471 #endif
472  fprintf(f, "VALUE\n%s(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)\n{\n", funcname);
473  bool success = mjit_compile_body(f, iseq, &status);
474  fprintf(f, "\n} // end of %s\n", funcname);
475  return success;
476 }
477 
478 #endif // USE_MJIT
rb_kwarg_call_data
Definition: vm_core.h:257
i
uint32_t i
Definition: rb_mjit_min_header-2.7.1.h:5425
ID
unsigned long ID
Definition: ruby.h:103
FIX2INT
#define FIX2INT(x)
Definition: ruby.h:717
rb_iseq_struct
Definition: vm_core.h:456
VM_CALL_TAILCALL
#define VM_CALL_TAILCALL
Definition: vm_core.h:1109
rb_call_cache::me
const struct rb_callable_method_entry_struct * me
Definition: internal.h:2378
PRIxVALUE
#define PRIxVALUE
Definition: ruby.h:164
RSTRING_PTR
#define RSTRING_PTR(str)
Definition: ruby.h:1009
fputc
int fputc(int, FILE *)
VALUE
unsigned long VALUE
Definition: ruby.h:102
rb_iseq_path
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
rb_iseq_constant_body::location
rb_iseq_location_t location
Definition: vm_core.h:399
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.1.h:5562
rb_iseq_location_struct::first_lineno
VALUE first_lineno
Definition: vm_core.h:276
iseq
const rb_iseq_t * iseq
Definition: rb_mjit_min_header-2.7.1.h:13426
fputs
int fputs(const char *__restrict, FILE *__restrict)
me
const rb_callable_method_entry_t * me
Definition: rb_mjit_min_header-2.7.1.h:13151
rb_call_cache::method_state
rb_serial_t method_state
Definition: internal.h:2362
rb_id2str
#define rb_id2str(id)
Definition: vm_backtrace.c:30
rb_vm_insn_addr2insn
int rb_vm_insn_addr2insn(const void *)
Definition: iseq.c:3111
mjit_options::warnings
char warnings
Definition: rb_mjit_min_header-2.7.1.h:11652
NULL
#define NULL
Definition: _sdbm.c:101
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
rb_iseq_constant_body
Definition: vm_core.h:311
rb_iseq_constant_body::opt_table
const VALUE * opt_table
Definition: vm_core.h:374
rb_call_cache
Definition: internal.h:2360
if
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
rb_iseq_constant_body::param
struct rb_iseq_constant_body::@178 param
parameter information
rb_iseq_constant_body::lead_num
int lead_num
Definition: vm_core.h:367
VM_CALL_KW_SPLAT
#define VM_CALL_KW_SPLAT
Definition: vm_core.h:1108
rb_iseq_location_struct::label
VALUE label
Definition: vm_core.h:275
rb_iseq_constant_body::local_table_size
unsigned int local_table_size
Definition: vm_core.h:435
rb_serial_t
unsigned long rb_serial_t
Definition: internal.h:1014
param_size
rb_control_frame_t struct rb_calling_info const rb_callable_method_entry_t int int param_size
Definition: rb_mjit_min_header-2.7.1.h:14481
rb_simple_iseq_p
MJIT_STATIC bool rb_simple_iseq_p(const rb_iseq_t *iseq)
Definition: vm_insnhelper.c:1919
rb_iseq_constant_body::ci_size
unsigned int ci_size
Definition: vm_core.h:437
rb_mjit_compile_info
Definition: rb_mjit_min_header-2.7.1.h:11660
rb_iseq_constant_body::size
unsigned int size
Definition: vm_core.h:365
mjit.h
vm_core.h
mjit_valid_class_serial_p
bool mjit_valid_class_serial_p(rb_serial_t class_serial)
Definition: mjit_worker.c:475
GET_GLOBAL_METHOD_STATE
#define GET_GLOBAL_METHOD_STATE()
Definition: vm_insnhelper.h:189
rb_call_cache::class_serial
rb_serial_t class_serial[(CACHELINE - sizeof(rb_serial_t) - sizeof(struct rb_callable_method_entry_struct *) - sizeof(uintptr_t) - sizeof(enum method_missing_reason) - sizeof(VALUE(*)(struct rb_execution_context_struct *e, struct rb_control_frame_struct *, struct rb_calling_info *, const struct rb_call_data *)))/sizeof(rb_serial_t)]
Definition: internal.h:2375
rb_iseq_constant_body::flags
struct rb_iseq_constant_body::@178::@180 flags
BIN
#define BIN(n)
Definition: rb_mjit_min_header-2.7.1.h:11893
false
#define false
Definition: stdbool.h:14
key
key
Definition: openssl_missing.h:181
mjit_options::verbose
int verbose
Definition: rb_mjit_min_header-2.7.1.h:11657
internal.h
f
#define f
ST_CONTINUE
@ ST_CONTINUE
Definition: st.h:99
rb_iseq_constant_body::iseq_encoded
VALUE * iseq_encoded
Definition: vm_core.h:325
mjit_opts
struct mjit_options mjit_opts
Definition: mjit_worker.c:174
rb_iseq_constant_body::opt_num
int opt_num
Definition: vm_core.h:368
cc
const struct rb_call_cache * cc
Definition: rb_mjit_min_header-2.7.1.h:13153
mjit_copy_cache_from_main_thread
bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
Definition: mjit_worker.c:1141
local_size
rb_control_frame_t struct rb_calling_info const rb_callable_method_entry_t int int int local_size
Definition: rb_mjit_min_header-2.7.1.h:14481
int
__inline__ int
Definition: rb_mjit_min_header-2.7.1.h:2807
VM_METHOD_TYPE_ISEQ
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:102
ci
rb_control_frame_t struct rb_calling_info const struct rb_call_info * ci
Definition: rb_mjit_min_header-2.7.1.h:15083
rb_call_data::ci
struct rb_call_info ci
Definition: internal.h:2400
CALL_DATA
struct rb_call_data * CALL_DATA
Definition: vm_core.h:1134
vm_exec.h
PRIdVALUE
#define PRIdVALUE
Definition: ruby.h:161
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
mjit_compile
_Bool mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
rb_iseq_constant_body::iseq_size
unsigned int iseq_size
Definition: vm_core.h:324
rb_iseq_constant_body::ci_kw_size
unsigned int ci_kw_size
Definition: vm_core.h:438
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.1.h:1479
rb_iseq_constant_body::catch_except_p
char catch_except_p
Definition: vm_core.h:441
rb_call_info
Definition: internal.h:2392
builtin.h
rb_iseq_constant_body::call_data
struct rb_call_data * call_data
Definition: vm_core.h:421
vm_insnhelper.h
rb_call_data
Definition: internal.h:2398
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
__sFILE
Definition: vsnprintf.c:169
rb_iseq_constant_body::stack_max
unsigned int stack_max
Definition: vm_core.h:439
RSTRING_END
#define RSTRING_END(str)
Definition: ruby.h:1013
iseq_inline_storage_entry
Definition: vm_core.h:231
name
const char * name
Definition: nkf.c:208
mjit_options::debug
char debug
Definition: rb_mjit_min_header-2.7.1.h:11653