Ruby  2.7.1p83(2020-03-31revisiona0c7c23c9cec0d0ffcba012279cd652d28ad5bf3)
gc.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author$
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
16 
17 #include "ruby/encoding.h"
18 #include "ruby/io.h"
19 #include "ruby/st.h"
20 #include "ruby/re.h"
21 #include "ruby/thread.h"
22 #include "ruby/util.h"
23 #include "ruby/debug.h"
24 #include "internal.h"
25 #include "eval_intern.h"
26 #include "vm_core.h"
27 #include "builtin.h"
28 #include "gc.h"
29 #include "constant.h"
30 #include "ruby_atomic.h"
31 #include "probes.h"
32 #include "id_table.h"
33 #include "symbol.h"
34 #include <stdio.h>
35 #include <stdarg.h>
36 #include <setjmp.h>
37 #include <sys/types.h>
38 #include "ruby_assert.h"
39 #include "debug_counter.h"
40 #include "transient_heap.h"
41 #include "mjit.h"
42 
43 #undef rb_data_object_wrap
44 
45 #ifndef HAVE_MALLOC_USABLE_SIZE
46 # ifdef _WIN32
47 # define HAVE_MALLOC_USABLE_SIZE
48 # define malloc_usable_size(a) _msize(a)
49 # elif defined HAVE_MALLOC_SIZE
50 # define HAVE_MALLOC_USABLE_SIZE
51 # define malloc_usable_size(a) malloc_size(a)
52 # endif
53 #endif
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56 # include RUBY_ALTERNATIVE_MALLOC_HEADER
57 # elif HAVE_MALLOC_H
58 # include <malloc.h>
59 # elif defined(HAVE_MALLOC_NP_H)
60 # include <malloc_np.h>
61 # elif defined(HAVE_MALLOC_MALLOC_H)
62 # include <malloc/malloc.h>
63 # endif
64 #endif
65 
66 #ifdef HAVE_SYS_TIME_H
67 #include <sys/time.h>
68 #endif
69 
70 #ifdef HAVE_SYS_RESOURCE_H
71 #include <sys/resource.h>
72 #endif
73 
74 #if defined _WIN32 || defined __CYGWIN__
75 #include <windows.h>
76 #elif defined(HAVE_POSIX_MEMALIGN)
77 #elif defined(HAVE_MEMALIGN)
78 #include <malloc.h>
79 #endif
80 
81 #define rb_setjmp(env) RUBY_SETJMP(env)
82 #define rb_jmp_buf rb_jmpbuf_t
83 
84 #if defined(_MSC_VER) && defined(_WIN64)
85 #include <intrin.h>
86 #pragma intrinsic(_umul128)
87 #endif
88 
89 /* Expecting this struct to be eliminated by function inlinings */
90 struct optional {
91  bool left;
92  size_t right;
93 };
94 
95 static inline struct optional
96 size_mul_overflow(size_t x, size_t y)
97 {
98  bool p;
99  size_t z;
100 #if 0
101 
102 #elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103  p = __builtin_mul_overflow(x, y, &z);
104 
105 #elif defined(DSIZE_T)
106  RB_GNUC_EXTENSION DSIZE_T dx = x;
107  RB_GNUC_EXTENSION DSIZE_T dy = y;
108  RB_GNUC_EXTENSION DSIZE_T dz = dx * dy;
109  p = dz > SIZE_MAX;
110  z = (size_t)dz;
111 
112 #elif defined(_MSC_VER) && defined(_WIN64)
113  unsigned __int64 dp;
114  unsigned __int64 dz = _umul128(x, y, &dp);
115  p = (bool)dp;
116  z = (size_t)dz;
117 
118 #else
119  /* https://wiki.sei.cmu.edu/confluence/display/c/INT30-C.+Ensure+that+unsigned+integer+operations+do+not+wrap */
120  p = (y != 0) && (x > SIZE_MAX / y);
121  z = x * y;
122 
123 #endif
124  return (struct optional) { p, z, };
125 }
126 
127 static inline struct optional
128 size_add_overflow(size_t x, size_t y)
129 {
130  size_t z;
131  bool p;
132 #if 0
133 
134 #elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135  p = __builtin_add_overflow(x, y, &z);
136 
137 #elif defined(DSIZE_T)
138  RB_GNUC_EXTENSION DSIZE_T dx = x;
139  RB_GNUC_EXTENSION DSIZE_T dy = y;
140  RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
141  p = dz > SIZE_MAX;
142  z = (size_t)dz;
143 
144 #else
145  z = x + y;
146  p = z < y;
147 
148 #endif
149  return (struct optional) { p, z, };
150 }
151 
152 static inline struct optional
153 size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
154 {
155  struct optional t = size_mul_overflow(x, y);
156  struct optional u = size_add_overflow(t.right, z);
157  return (struct optional) { t.left || u.left, u.right };
158 }
159 
160 static inline struct optional
161 size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
162 {
163  struct optional t = size_mul_overflow(x, y);
164  struct optional u = size_mul_overflow(z, w);
165  struct optional v = size_add_overflow(t.right, u.right);
166  return (struct optional) { t.left || u.left || v.left, v.right };
167 }
168 
169 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
170 
171 static inline size_t
172 size_mul_or_raise(size_t x, size_t y, VALUE exc)
173 {
174  struct optional t = size_mul_overflow(x, y);
175  if (LIKELY(!t.left)) {
176  return t.right;
177  }
178  else if (rb_during_gc()) {
179  rb_memerror(); /* or...? */
180  }
181  else {
182  gc_raise(
183  exc,
184  "integer overflow: %"PRIuSIZE
185  " * %"PRIuSIZE
186  " > %"PRIuSIZE,
187  x, y, SIZE_MAX);
188  }
189 }
190 
191 size_t
192 rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
193 {
194  return size_mul_or_raise(x, y, exc);
195 }
196 
197 static inline size_t
198 size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
199 {
200  struct optional t = size_mul_add_overflow(x, y, z);
201  if (LIKELY(!t.left)) {
202  return t.right;
203  }
204  else if (rb_during_gc()) {
205  rb_memerror(); /* or...? */
206  }
207  else {
208  gc_raise(
209  exc,
210  "integer overflow: %"PRIuSIZE
211  " * %"PRIuSIZE
212  " + %"PRIuSIZE
213  " > %"PRIuSIZE,
214  x, y, z, SIZE_MAX);
215  }
216 }
217 
218 size_t
219 rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
220 {
221  return size_mul_add_or_raise(x, y, z, exc);
222 }
223 
224 static inline size_t
225 size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
226 {
227  struct optional t = size_mul_add_mul_overflow(x, y, z, w);
228  if (LIKELY(!t.left)) {
229  return t.right;
230  }
231  else if (rb_during_gc()) {
232  rb_memerror(); /* or...? */
233  }
234  else {
235  gc_raise(
236  exc,
237  "integer overflow: %"PRIdSIZE
238  " * %"PRIdSIZE
239  " + %"PRIdSIZE
240  " * %"PRIdSIZE
241  " > %"PRIdSIZE,
242  x, y, z, w, SIZE_MAX);
243  }
244 }
245 
246 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
247 /* trick the compiler into thinking a external signal handler uses this */
249 volatile VALUE *
251 {
252  rb_gc_guarded_val = val;
253 
254  return ptr;
255 }
256 #endif
257 
258 #ifndef GC_HEAP_INIT_SLOTS
259 #define GC_HEAP_INIT_SLOTS 10000
260 #endif
261 #ifndef GC_HEAP_FREE_SLOTS
262 #define GC_HEAP_FREE_SLOTS 4096
263 #endif
264 #ifndef GC_HEAP_GROWTH_FACTOR
265 #define GC_HEAP_GROWTH_FACTOR 1.8
266 #endif
267 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
268 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
269 #endif
270 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
272 #endif
273 
274 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
276 #endif
277 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
279 #endif
280 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
282 #endif
283 
284 #ifndef GC_MALLOC_LIMIT_MIN
285 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
286 #endif
287 #ifndef GC_MALLOC_LIMIT_MAX
288 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
289 #endif
290 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
292 #endif
293 
294 #ifndef GC_OLDMALLOC_LIMIT_MIN
295 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
296 #endif
297 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
299 #endif
300 #ifndef GC_OLDMALLOC_LIMIT_MAX
301 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
302 #endif
303 
304 #ifndef PRINT_MEASURE_LINE
305 #define PRINT_MEASURE_LINE 0
306 #endif
307 #ifndef PRINT_ENTER_EXIT_TICK
308 #define PRINT_ENTER_EXIT_TICK 0
309 #endif
310 #ifndef PRINT_ROOT_TICKS
311 #define PRINT_ROOT_TICKS 0
312 #endif
313 
314 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
315 #define TICK_TYPE 1
316 
317 typedef struct {
322 
327 
331 
335 
338 
339 static ruby_gc_params_t gc_params = {
344 
349 
353 
357 
358  FALSE,
359 };
360 
361 /* GC_DEBUG:
362  * enable to embed GC debugging information.
363  */
364 #ifndef GC_DEBUG
365 #define GC_DEBUG 0
366 #endif
367 
368 #if USE_RGENGC
369 /* RGENGC_DEBUG:
370  * 1: basic information
371  * 2: remember set operation
372  * 3: mark
373  * 4:
374  * 5: sweep
375  */
376 #ifndef RGENGC_DEBUG
377 #ifdef RUBY_DEVEL
378 #define RGENGC_DEBUG -1
379 #else
380 #define RGENGC_DEBUG 0
381 #endif
382 #endif
383 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
385 #else
386 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
387 #endif
389 
390 /* RGENGC_CHECK_MODE
391  * 0: disable all assertions
392  * 1: enable assertions (to debug RGenGC)
393  * 2: enable internal consistency check at each GC (for debugging)
394  * 3: enable internal consistency check at each GC steps (for debugging)
395  * 4: enable liveness check
396  * 5: show all references
397  */
398 #ifndef RGENGC_CHECK_MODE
399 #define RGENGC_CHECK_MODE 0
400 #endif
401 
402 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
403 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
404 
405 /* RGENGC_OLD_NEWOBJ_CHECK
406  * 0: disable all assertions
407  * >0: make a OLD object when new object creation.
408  *
409  * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
410  */
411 #ifndef RGENGC_OLD_NEWOBJ_CHECK
412 #define RGENGC_OLD_NEWOBJ_CHECK 0
413 #endif
414 
415 /* RGENGC_PROFILE
416  * 0: disable RGenGC profiling
417  * 1: enable profiling for basic information
418  * 2: enable profiling for each types
419  */
420 #ifndef RGENGC_PROFILE
421 #define RGENGC_PROFILE 0
422 #endif
423 
424 /* RGENGC_ESTIMATE_OLDMALLOC
425  * Enable/disable to estimate increase size of malloc'ed size by old objects.
426  * If estimation exceeds threshold, then will invoke full GC.
427  * 0: disable estimation.
428  * 1: enable estimation.
429  */
430 #ifndef RGENGC_ESTIMATE_OLDMALLOC
431 #define RGENGC_ESTIMATE_OLDMALLOC 1
432 #endif
433 
434 /* RGENGC_FORCE_MAJOR_GC
435  * Force major/full GC if this macro is not 0.
436  */
437 #ifndef RGENGC_FORCE_MAJOR_GC
438 #define RGENGC_FORCE_MAJOR_GC 0
439 #endif
440 
441 #else /* USE_RGENGC */
442 
443 #ifdef RGENGC_DEBUG
444 #undef RGENGC_DEBUG
445 #endif
446 #define RGENGC_DEBUG 0
447 #ifdef RGENGC_CHECK_MODE
448 #undef RGENGC_CHECK_MODE
449 #endif
450 #define RGENGC_CHECK_MODE 0
451 #define RGENGC_PROFILE 0
452 #define RGENGC_ESTIMATE_OLDMALLOC 0
453 #define RGENGC_FORCE_MAJOR_GC 0
454 
455 #endif /* USE_RGENGC */
456 
457 #ifndef GC_PROFILE_MORE_DETAIL
458 #define GC_PROFILE_MORE_DETAIL 0
459 #endif
460 #ifndef GC_PROFILE_DETAIL_MEMORY
461 #define GC_PROFILE_DETAIL_MEMORY 0
462 #endif
463 #ifndef GC_ENABLE_INCREMENTAL_MARK
464 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
465 #endif
466 #ifndef GC_ENABLE_LAZY_SWEEP
467 #define GC_ENABLE_LAZY_SWEEP 1
468 #endif
469 #ifndef CALC_EXACT_MALLOC_SIZE
470 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
471 #endif
472 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473 #ifndef MALLOC_ALLOCATED_SIZE
474 #define MALLOC_ALLOCATED_SIZE 0
475 #endif
476 #else
477 #define MALLOC_ALLOCATED_SIZE 0
478 #endif
479 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
480 #define MALLOC_ALLOCATED_SIZE_CHECK 0
481 #endif
482 
483 #ifndef GC_DEBUG_STRESS_TO_CLASS
484 #define GC_DEBUG_STRESS_TO_CLASS 0
485 #endif
486 
487 #ifndef RGENGC_OBJ_INFO
488 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
489 #endif
490 
491 typedef enum {
492  GPR_FLAG_NONE = 0x000,
493  /* major reason */
498 #if RGENGC_ESTIMATE_OLDMALLOC
500 #endif
502 
503  /* gc reason */
507  GPR_FLAG_CAPI = 0x800,
508  GPR_FLAG_STRESS = 0x1000,
509 
510  /* others */
515 
520 
521 typedef struct gc_profile_record {
522  int flags;
523 
524  double gc_time;
526 
530 
531 #if GC_PROFILE_MORE_DETAIL
532  double gc_mark_time;
533  double gc_sweep_time;
534 
535  size_t heap_use_pages;
536  size_t heap_live_objects;
537  size_t heap_free_objects;
538 
539  size_t allocate_increase;
540  size_t allocate_limit;
541 
542  double prepare_time;
543  size_t removing_objects;
544  size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
546  long maxrss;
547  long minflt;
548  long majflt;
549 #endif
550 #endif
551 #if MALLOC_ALLOCATED_SIZE
552  size_t allocated_size;
553 #endif
554 
555 #if RGENGC_PROFILE > 0
556  size_t old_objects;
557  size_t remembered_normal_objects;
558  size_t remembered_shady_objects;
559 #endif
561 
562 #if defined(_MSC_VER) || defined(__CYGWIN__)
563 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
564 #endif
565 
566 typedef struct RVALUE {
567  union {
568  struct {
569  VALUE flags; /* always 0 for freed obj */
570  struct RVALUE *next;
571  } free;
572  struct RMoved moved;
573  struct RBasic basic;
574  struct RObject object;
575  struct RClass klass;
576  struct RFloat flonum;
577  struct RString string;
578  struct RArray array;
579  struct RRegexp regexp;
580  struct RHash hash;
581  struct RData data;
583  struct RStruct rstruct;
584  struct RBignum bignum;
585  struct RFile file;
586  struct RMatch match;
589  union {
591  struct vm_svar svar;
593  struct vm_ifunc ifunc;
594  struct MEMO memo;
600  } imemo;
601  struct {
602  struct RBasic basic;
606  } values;
607  } as;
608 #if GC_DEBUG
609  const char *file;
610  int line;
611 #endif
612 } RVALUE;
613 
614 #if defined(_MSC_VER) || defined(__CYGWIN__)
615 #pragma pack(pop)
616 #endif
617 
619 enum {
620  BITS_SIZE = sizeof(bits_t),
622 };
623 #define popcount_bits rb_popcount_intptr
624 
626  struct heap_page *page;
627 };
628 
631  /* char gap[]; */
632  /* RVALUE values[]; */
633 };
634 
635 struct gc_list {
637  struct gc_list *next;
638 };
639 
640 #define STACK_CHUNK_SIZE 500
641 
642 typedef struct stack_chunk {
644  struct stack_chunk *next;
645 } stack_chunk_t;
646 
647 typedef struct mark_stack {
650  int index;
651  int limit;
652  size_t cache_size;
654 } mark_stack_t;
655 
656 typedef struct rb_heap_struct {
658 
661  struct list_head pages;
662  struct heap_page *sweeping_page; /* iterator for .pages */
663 #if GC_ENABLE_INCREMENTAL_MARK
665 #endif
666  size_t total_pages; /* total page count in a heap */
667  size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
668 } rb_heap_t;
669 
670 enum gc_mode {
674 };
675 
676 typedef struct rb_objspace {
677  struct {
678  size_t limit;
679  size_t increase;
680 #if MALLOC_ALLOCATED_SIZE
681  size_t allocated_size;
682  size_t allocations;
683 #endif
684  } malloc_params;
685 
686  struct {
687  unsigned int mode : 2;
688  unsigned int immediate_sweep : 1;
689  unsigned int dont_gc : 1;
690  unsigned int dont_incremental : 1;
691  unsigned int during_gc : 1;
692  unsigned int during_compacting : 1;
693  unsigned int gc_stressful: 1;
694  unsigned int has_hook: 1;
695 #if USE_RGENGC
696  unsigned int during_minor_gc : 1;
697 #endif
698 #if GC_ENABLE_INCREMENTAL_MARK
699  unsigned int during_incremental_marking : 1;
700 #endif
701  } flags;
702 
706 
708  rb_heap_t tomb_heap; /* heap for zombies and ghosts */
709 
710  struct {
712  } atomic_flags;
713 
715  void *data;
716  void (*mark_func)(VALUE v, void *data);
717  } *mark_func_data;
718 
720  size_t marked_slots;
721 
722  struct {
723  struct heap_page **sorted;
729 
730  /* final */
731  size_t final_slots;
733  } heap_pages;
734 
736 
737  struct {
738  int run;
742  size_t next_index;
743  size_t size;
744 
745 #if GC_PROFILE_MORE_DETAIL
746  double prepare_time;
747 #endif
748  double invoke_time;
749 
750 #if USE_RGENGC
754 #if RGENGC_PROFILE > 0
755  size_t total_generated_normal_object_count;
756  size_t total_generated_shady_object_count;
757  size_t total_shade_operation_count;
758  size_t total_promoted_count;
759  size_t total_remembered_normal_object_count;
760  size_t total_remembered_shady_object_count;
761 
762 #if RGENGC_PROFILE >= 2
763  size_t generated_normal_object_count_types[RUBY_T_MASK];
764  size_t generated_shady_object_count_types[RUBY_T_MASK];
765  size_t shade_operation_count_types[RUBY_T_MASK];
766  size_t promoted_types[RUBY_T_MASK];
767  size_t remembered_normal_object_count_types[RUBY_T_MASK];
768  size_t remembered_shady_object_count_types[RUBY_T_MASK];
769 #endif
770 #endif /* RGENGC_PROFILE */
771 #endif /* USE_RGENGC */
772 
773  /* temporary profiling space */
777 
778  /* basic statistics */
779  size_t count;
783  } profile;
785 
787 
788 #if USE_RGENGC
789  struct {
795  size_t old_objects;
797 
798 #if RGENGC_ESTIMATE_OLDMALLOC
801 #endif
802 
803 #if RGENGC_CHECK_MODE >= 2
804  struct st_table *allrefs_table;
805  size_t error_count;
806 #endif
807  } rgengc;
808 
809  struct {
812  } rcompactor;
813 
814 #if GC_ENABLE_INCREMENTAL_MARK
815  struct {
816  size_t pooled_slots;
817  size_t step_slots;
818  } rincgc;
819 #endif
820 #endif /* USE_RGENGC */
821 
824 
825 #if GC_DEBUG_STRESS_TO_CLASS
827 #endif
828 } rb_objspace_t;
829 
830 
831 /* default tiny heap size: 16KB */
832 #define HEAP_PAGE_ALIGN_LOG 14
833 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
834 enum {
837  REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
839  HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
842  HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */
843 };
844 
845 struct heap_page {
846  short total_slots;
847  short free_slots;
849  short final_slots;
850  struct {
851  unsigned int before_sweep : 1;
852  unsigned int has_remembered_objects : 1;
854  unsigned int in_tomb : 1;
855  } flags;
856 
861 
862 #if USE_RGENGC
864 #endif
865  /* the following three bitmaps are cleared at the beginning of full GC */
867 #if USE_RGENGC
870 #endif
871 
872  /* If set, the object is not movable */
874 };
875 
876 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
879 
880 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
884 
885 /* Bitmap Operations */
886 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
889 
890 /* getting bitmap */
891 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
893 #if USE_RGENGC
894 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
897 #endif
898 
899 /* Aliases */
900 #define rb_objspace (*rb_objspace_of(GET_VM()))
901 #define rb_objspace_of(vm) ((vm)->objspace)
902 
903 #define ruby_initial_gc_stress gc_params.gc_stress
904 
906 
907 #define malloc_limit objspace->malloc_params.limit
908 #define malloc_increase objspace->malloc_params.increase
909 #define malloc_allocated_size objspace->malloc_params.allocated_size
910 #define heap_pages_sorted objspace->heap_pages.sorted
911 #define heap_allocated_pages objspace->heap_pages.allocated_pages
912 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
913 #define heap_pages_lomem objspace->heap_pages.range[0]
914 #define heap_pages_himem objspace->heap_pages.range[1]
915 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917 #define heap_pages_final_slots objspace->heap_pages.final_slots
918 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
919 #define heap_eden (&objspace->eden_heap)
920 #define heap_tomb (&objspace->tomb_heap)
921 #define dont_gc objspace->flags.dont_gc
922 #define during_gc objspace->flags.during_gc
923 #define finalizing objspace->atomic_flags.finalizing
924 #define finalizer_table objspace->finalizer_table
925 #define global_list objspace->global_list
926 #define ruby_gc_stressful objspace->flags.gc_stressful
927 #define ruby_gc_stress_mode objspace->gc_stress_mode
928 #if GC_DEBUG_STRESS_TO_CLASS
929 #define stress_to_class objspace->stress_to_class
930 #else
931 #define stress_to_class 0
932 #endif
933 
934 static inline enum gc_mode
935 gc_mode_verify(enum gc_mode mode)
936 {
937 #if RGENGC_CHECK_MODE > 0
938  switch (mode) {
939  case gc_mode_none:
940  case gc_mode_marking:
941  case gc_mode_sweeping:
942  break;
943  default:
944  rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
945  }
946 #endif
947  return mode;
948 }
949 
950 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
952 
953 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
955 #if USE_RGENGC
956 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
957 #else
958 #define is_full_marking(objspace) TRUE
959 #endif
960 #if GC_ENABLE_INCREMENTAL_MARK
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
962 #else
963 #define is_incremental_marking(objspace) FALSE
964 #endif
965 #if GC_ENABLE_INCREMENTAL_MARK
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
967 #else
968 #define will_be_incremental_marking(objspace) FALSE
969 #endif
970 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
972 
973 #if SIZEOF_LONG == SIZEOF_VOIDP
974 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
976 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
980 #else
981 # error not supported
982 #endif
983 
984 #define RANY(o) ((RVALUE*)(o))
985 
986 struct RZombie {
987  struct RBasic basic;
989  void (*dfree)(void *);
990  void *data;
991 };
992 
993 #define RZOMBIE(o) ((struct RZombie *)(o))
994 
995 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
996 
997 #if RUBY_MARK_FREE_DEBUG
998 int ruby_gc_debug_indent = 0;
999 #endif
1002 
1003 void rb_iseq_mark(const rb_iseq_t *iseq);
1005 void rb_iseq_free(const rb_iseq_t *iseq);
1006 size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1007 void rb_vm_update_references(void *ptr);
1008 
1010 
1011 static VALUE define_final0(VALUE obj, VALUE block);
1012 
1013 NORETURN(static void negative_size_allocation_error(const char *));
1014 
1015 static void init_mark_stack(mark_stack_t *stack);
1016 
1017 static int ready_to_gc(rb_objspace_t *objspace);
1018 
1019 static int garbage_collect(rb_objspace_t *, int reason);
1020 
1021 static int gc_start(rb_objspace_t *objspace, int reason);
1022 static void gc_rest(rb_objspace_t *objspace);
1023 static inline void gc_enter(rb_objspace_t *objspace, const char *event);
1024 static inline void gc_exit(rb_objspace_t *objspace, const char *event);
1025 
1026 static void gc_marks(rb_objspace_t *objspace, int full_mark);
1027 static void gc_marks_start(rb_objspace_t *objspace, int full);
1028 static int gc_marks_finish(rb_objspace_t *objspace);
1029 static void gc_marks_rest(rb_objspace_t *objspace);
1030 static void gc_marks_step(rb_objspace_t *objspace, int slots);
1031 static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1032 
1033 static void gc_sweep(rb_objspace_t *objspace);
1034 static void gc_sweep_start(rb_objspace_t *objspace);
1035 static void gc_sweep_finish(rb_objspace_t *objspace);
1036 static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
1037 static void gc_sweep_rest(rb_objspace_t *objspace);
1038 static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1039 
1040 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1041 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1042 static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1043 static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1044 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1045 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1046 
1047 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1048 static int gc_mark_stacked_objects_all(rb_objspace_t *);
1049 static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1050 
1051 static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1052 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1053 
1054 static void push_mark_stack(mark_stack_t *, VALUE);
1055 static int pop_mark_stack(mark_stack_t *, VALUE *);
1056 static size_t mark_stack_size(mark_stack_t *stack);
1057 static void shrink_stack_chunk_cache(mark_stack_t *stack);
1058 
1059 static size_t obj_memsize_of(VALUE obj, int use_all_types);
1060 static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1061 static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1062 static int gc_verify_heap_pages(rb_objspace_t *objspace);
1063 
1064 static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1065 static VALUE gc_disable_no_rest(rb_objspace_t *);
1066 
1067 static double getrusage_time(void);
1068 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
1069 static inline void gc_prof_timer_start(rb_objspace_t *);
1070 static inline void gc_prof_timer_stop(rb_objspace_t *);
1071 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1072 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1073 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1074 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1075 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1076 static inline void gc_prof_set_heap_info(rb_objspace_t *);
1077 
1078 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079  if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080  *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1081  } \
1082 } while (0)
1083 
1084 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1085 
1086 #define gc_prof_record(objspace) (objspace)->profile.current_record
1087 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1088 
1089 #ifdef HAVE_VA_ARGS_MACRO
1090 # define gc_report(level, objspace, ...) \
1091  if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1092 #else
1093 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1094 #endif
1095 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1096 static const char *obj_info(VALUE obj);
1097 
1098 #define PUSH_MARK_FUNC_DATA(v) do { \
1099  struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100  objspace->mark_func_data = (v);
1101 
1102 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1103 
1104 /*
1105  * 1 - TSC (H/W Time Stamp Counter)
1106  * 2 - getrusage
1107  */
1108 #ifndef TICK_TYPE
1109 #define TICK_TYPE 1
1110 #endif
1111 
1112 #if USE_TICK_T
1113 
1114 #if TICK_TYPE == 1
1115 /* the following code is only for internal tuning. */
1116 
1117 /* Source code to use RDTSC is quoted and modified from
1118  * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1119  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1120  */
1121 
1122 #if defined(__GNUC__) && defined(__i386__)
1123 typedef unsigned long long tick_t;
1124 #define PRItick "llu"
1125 static inline tick_t
1126 tick(void)
1127 {
1128  unsigned long long int x;
1129  __asm__ __volatile__ ("rdtsc" : "=A" (x));
1130  return x;
1131 }
1132 
1133 #elif defined(__GNUC__) && defined(__x86_64__)
1134 typedef unsigned long long tick_t;
1135 #define PRItick "llu"
1136 
1137 static __inline__ tick_t
1138 tick(void)
1139 {
1140  unsigned long hi, lo;
1141  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1142  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1143 }
1144 
1145 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146 typedef unsigned long long tick_t;
1147 #define PRItick "llu"
1148 
1149 static __inline__ tick_t
1150 tick(void)
1151 {
1152  unsigned long long val = __builtin_ppc_get_timebase();
1153  return val;
1154 }
1155 
1156 #elif defined(_WIN32) && defined(_MSC_VER)
1157 #include <intrin.h>
1158 typedef unsigned __int64 tick_t;
1159 #define PRItick "llu"
1160 
1161 static inline tick_t
1162 tick(void)
1163 {
1164  return __rdtsc();
1165 }
1166 
1167 #else /* use clock */
1168 typedef clock_t tick_t;
1169 #define PRItick "llu"
1170 
1171 static inline tick_t
1172 tick(void)
1173 {
1174  return clock();
1175 }
1176 #endif /* TSC */
1177 
1178 #elif TICK_TYPE == 2
1179 typedef double tick_t;
1180 #define PRItick "4.9f"
1181 
1182 static inline tick_t
1183 tick(void)
1184 {
1185  return getrusage_time();
1186 }
1187 #else /* TICK_TYPE */
1188 #error "choose tick type"
1189 #endif /* TICK_TYPE */
1190 
1191 #define MEASURE_LINE(expr) do { \
1192  volatile tick_t start_time = tick(); \
1193  volatile tick_t end_time; \
1194  expr; \
1195  end_time = tick(); \
1196  fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1197 } while (0)
1198 
1199 #else /* USE_TICK_T */
1200 #define MEASURE_LINE(expr) expr
1201 #endif /* USE_TICK_T */
1202 
1203 #define FL_CHECK2(name, x, pred) \
1204  ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1205  (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1206 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1207 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1208 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1209 
1210 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1211 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1212 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1213 
1214 #if USE_RGENGC
1215 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1216 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1217 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1218 
1219 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1220 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1221 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1222 
1223 #define RVALUE_OLD_AGE 3
1224 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1225 
1226 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1227 static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1228 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1229 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1230 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1231 
1232 static inline int
1233 RVALUE_FLAGS_AGE(VALUE flags)
1234 {
1235  return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1236 }
1237 
1238 #endif /* USE_RGENGC */
1239 
1240 static int
1241 check_rvalue_consistency_force(const VALUE obj, int terminate)
1242 {
1243  rb_objspace_t *objspace = &rb_objspace;
1244  int err = 0;
1245 
1246  if (SPECIAL_CONST_P(obj)) {
1247  fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1248  err++;
1249  }
1250  else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1251  /* check if it is in tomb_pages */
1252  struct heap_page *page = NULL;
1253  list_for_each(&heap_tomb->pages, page, page_node) {
1254  if (&page->start[0] <= (RVALUE *)obj &&
1255  (RVALUE *)obj < &page->start[page->total_slots]) {
1256  fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1257  (void *)obj, (void *)page);
1258  err++;
1259  goto skip;
1260  }
1261  }
1262  fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1263  err++;
1264  skip:
1265  ;
1266  }
1267  else {
1268  const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1269  const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1270  const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1271  const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1272  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1273 
1274  if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1275  fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1276  err++;
1277  }
1278  if (BUILTIN_TYPE(obj) == T_NONE) {
1279  fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1280  err++;
1281  }
1282  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1283  fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1284  err++;
1285  }
1286 
1287  obj_memsize_of((VALUE)obj, FALSE);
1288 
1289  /* check generation
1290  *
1291  * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1292  */
1293  if (age > 0 && wb_unprotected_bit) {
1294  fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1295  err++;
1296  }
1297 
1298  if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1299  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1300  err++;
1301  }
1302 
1303  if (!is_full_marking(objspace)) {
1304  if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1305  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1306  obj_info(obj), age);
1307  err++;
1308  }
1309  if (remembered_bit && age != RVALUE_OLD_AGE) {
1310  fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1311  obj_info(obj), age);
1312  err++;
1313  }
1314  }
1315 
1316  /*
1317  * check coloring
1318  *
1319  * marking:false marking:true
1320  * marked:false white *invalid*
1321  * marked:true black grey
1322  */
1323  if (is_incremental_marking(objspace) && marking_bit) {
1324  if (!is_marking(objspace) && !mark_bit) {
1325  fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1326  err++;
1327  }
1328  }
1329  }
1330 
1331  if (err > 0 && terminate) {
1332  rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1333  }
1334 
1335  return err;
1336 }
1337 
1338 #if RGENGC_CHECK_MODE == 0
1339 static inline VALUE
1340 check_rvalue_consistency(const VALUE obj)
1341 {
1342  return obj;
1343 }
1344 #else
1345 static VALUE
1346 check_rvalue_consistency(const VALUE obj)
1347 {
1348  check_rvalue_consistency_force(obj, TRUE);
1349  return obj;
1350 }
1351 #endif
1352 
1353 static inline int
1354 gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1355 {
1356  if (RB_SPECIAL_CONST_P(obj)) {
1357  return FALSE;
1358  }
1359  else {
1360  void *poisoned = asan_poisoned_object_p(obj);
1361  asan_unpoison_object(obj, false);
1362 
1363  int ret = BUILTIN_TYPE(obj) == T_MOVED;
1364  /* Re-poison slot if it's not the one we want */
1365  if (poisoned) {
1367  asan_poison_object(obj);
1368  }
1369  return ret;
1370  }
1371 }
1372 
1373 static inline int
1374 RVALUE_MARKED(VALUE obj)
1375 {
1376  check_rvalue_consistency(obj);
1377  return RVALUE_MARK_BITMAP(obj) != 0;
1378 }
1379 
1380 static inline int
1381 RVALUE_PINNED(VALUE obj)
1382 {
1383  check_rvalue_consistency(obj);
1384  return RVALUE_PIN_BITMAP(obj) != 0;
1385 }
1386 
1387 #if USE_RGENGC
1388 static inline int
1389 RVALUE_WB_UNPROTECTED(VALUE obj)
1390 {
1391  check_rvalue_consistency(obj);
1392  return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1393 }
1394 
1395 static inline int
1396 RVALUE_MARKING(VALUE obj)
1397 {
1398  check_rvalue_consistency(obj);
1399  return RVALUE_MARKING_BITMAP(obj) != 0;
1400 }
1401 
1402 static inline int
1403 RVALUE_REMEMBERED(VALUE obj)
1404 {
1405  check_rvalue_consistency(obj);
1406  return RVALUE_MARKING_BITMAP(obj) != 0;
1407 }
1408 
1409 static inline int
1410 RVALUE_UNCOLLECTIBLE(VALUE obj)
1411 {
1412  check_rvalue_consistency(obj);
1413  return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1414 }
1415 
1416 static inline int
1417 RVALUE_OLD_P_RAW(VALUE obj)
1418 {
1419  const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1420  return (RBASIC(obj)->flags & promoted) == promoted;
1421 }
1422 
1423 static inline int
1424 RVALUE_OLD_P(VALUE obj)
1425 {
1426  check_rvalue_consistency(obj);
1427  return RVALUE_OLD_P_RAW(obj);
1428 }
1429 
1430 #if RGENGC_CHECK_MODE || GC_DEBUG
1431 static inline int
1432 RVALUE_AGE(VALUE obj)
1433 {
1434  check_rvalue_consistency(obj);
1435  return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1436 }
1437 #endif
1438 
1439 static inline void
1440 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1441 {
1443  objspace->rgengc.old_objects++;
1445 
1446 #if RGENGC_PROFILE >= 2
1447  objspace->profile.total_promoted_count++;
1448  objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1449 #endif
1450 }
1451 
1452 static inline void
1453 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1454 {
1455  RB_DEBUG_COUNTER_INC(obj_promote);
1456  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1457 }
1458 
1459 static inline VALUE
1460 RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1461 {
1463  flags |= (age << RVALUE_AGE_SHIFT);
1464  return flags;
1465 }
1466 
1467 /* set age to age+1 */
1468 static inline void
1469 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1470 {
1471  VALUE flags = RBASIC(obj)->flags;
1472  int age = RVALUE_FLAGS_AGE(flags);
1473 
1474  if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1475  rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1476  }
1477 
1478  age++;
1479  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1480 
1481  if (age == RVALUE_OLD_AGE) {
1482  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1483  }
1484  check_rvalue_consistency(obj);
1485 }
1486 
1487 /* set age to RVALUE_OLD_AGE */
1488 static inline void
1489 RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1490 {
1491  check_rvalue_consistency(obj);
1492  GC_ASSERT(!RVALUE_OLD_P(obj));
1493 
1494  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1495  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1496 
1497  check_rvalue_consistency(obj);
1498 }
1499 
1500 /* set age to RVALUE_OLD_AGE - 1 */
1501 static inline void
1502 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1503 {
1504  check_rvalue_consistency(obj);
1505  GC_ASSERT(!RVALUE_OLD_P(obj));
1506 
1507  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1508 
1509  check_rvalue_consistency(obj);
1510 }
1511 
1512 static inline void
1513 RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1514 {
1515  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1517 }
1518 
1519 static inline void
1520 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1521 {
1522  check_rvalue_consistency(obj);
1523  GC_ASSERT(RVALUE_OLD_P(obj));
1524 
1525  if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1527  }
1528 
1529  RVALUE_DEMOTE_RAW(objspace, obj);
1530 
1531  if (RVALUE_MARKED(obj)) {
1532  objspace->rgengc.old_objects--;
1533  }
1534 
1535  check_rvalue_consistency(obj);
1536 }
1537 
1538 static inline void
1539 RVALUE_AGE_RESET_RAW(VALUE obj)
1540 {
1541  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1542 }
1543 
1544 static inline void
1545 RVALUE_AGE_RESET(VALUE obj)
1546 {
1547  check_rvalue_consistency(obj);
1548  GC_ASSERT(!RVALUE_OLD_P(obj));
1549 
1550  RVALUE_AGE_RESET_RAW(obj);
1551  check_rvalue_consistency(obj);
1552 }
1553 
1554 static inline int
1555 RVALUE_BLACK_P(VALUE obj)
1556 {
1557  return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1558 }
1559 
1560 #if 0
1561 static inline int
1562 RVALUE_GREY_P(VALUE obj)
1563 {
1564  return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1565 }
1566 #endif
1567 
1568 static inline int
1569 RVALUE_WHITE_P(VALUE obj)
1570 {
1571  return RVALUE_MARKED(obj) == FALSE;
1572 }
1573 
1574 #endif /* USE_RGENGC */
1575 
1576 /*
1577  --------------------------- ObjectSpace -----------------------------
1578 */
1579 
1580 static inline void *
1581 calloc1(size_t n)
1582 {
1583  return calloc(1, n);
1584 }
1585 
1586 rb_objspace_t *
1588 {
1589  rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1590  malloc_limit = gc_params.malloc_limit_min;
1591  list_head_init(&objspace->eden_heap.pages);
1592  list_head_init(&objspace->tomb_heap.pages);
1593  dont_gc = TRUE;
1594 
1595  return objspace;
1596 }
1597 
1598 static void free_stack_chunks(mark_stack_t *);
1599 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1600 
1601 void
1603 {
1605  rb_bug("lazy sweeping underway when freeing object space");
1606 
1607  if (objspace->profile.records) {
1608  free(objspace->profile.records);
1609  objspace->profile.records = 0;
1610  }
1611 
1612  if (global_list) {
1613  struct gc_list *list, *next;
1614  for (list = global_list; list; list = next) {
1615  next = list->next;
1616  xfree(list);
1617  }
1618  }
1619  if (heap_pages_sorted) {
1620  size_t i;
1621  for (i = 0; i < heap_allocated_pages; ++i) {
1622  heap_page_free(objspace, heap_pages_sorted[i]);
1623  }
1627  heap_pages_lomem = 0;
1628  heap_pages_himem = 0;
1629 
1630  objspace->eden_heap.total_pages = 0;
1631  objspace->eden_heap.total_slots = 0;
1632  }
1633  st_free_table(objspace->id_to_obj_tbl);
1634  st_free_table(objspace->obj_to_id_tbl);
1635  free_stack_chunks(&objspace->mark_stack);
1636  free(objspace);
1637 }
1638 
1639 static void
1640 heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1641 {
1642  struct heap_page **sorted;
1643  size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1644 
1645  gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
1646 
1647  if (heap_pages_sorted_length > 0) {
1648  sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1649  if (sorted) heap_pages_sorted = sorted;
1650  }
1651  else {
1652  sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1653  }
1654 
1655  if (sorted == 0) {
1656  rb_memerror();
1657  }
1658 
1659  heap_pages_sorted_length = next_length;
1660 }
1661 
1662 static void
1663 heap_pages_expand_sorted(rb_objspace_t *objspace)
1664 {
1665  /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1666  * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1667  * however, if there are pages which do not have empty slots, then try to create new pages
1668  * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1669  */
1670  size_t next_length = heap_allocatable_pages;
1671  next_length += heap_eden->total_pages;
1672  next_length += heap_tomb->total_pages;
1673 
1674  if (next_length > heap_pages_sorted_length) {
1675  heap_pages_expand_sorted_to(objspace, next_length);
1676  }
1677 
1680 }
1681 
1682 static void
1683 heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
1684 {
1686  heap_pages_expand_sorted(objspace);
1687 }
1688 
1689 
1690 static inline void
1691 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1692 {
1693  RVALUE *p = (RVALUE *)obj;
1694  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1695 
1696  p->as.free.flags = 0;
1697  p->as.free.next = page->freelist;
1698  page->freelist = p;
1699  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1700 
1701  if (RGENGC_CHECK_MODE &&
1702  /* obj should belong to page */
1703  !(&page->start[0] <= (RVALUE *)obj &&
1704  (RVALUE *)obj < &page->start[page->total_slots] &&
1705  obj % sizeof(RVALUE) == 0)) {
1706  rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1707  }
1708 
1709  asan_poison_object(obj);
1710 
1711  gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1712 }
1713 
1714 static inline void
1715 heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1716 {
1717  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1718  GC_ASSERT(page->free_slots != 0);
1719  if (page->freelist) {
1720  page->free_next = heap->free_pages;
1721  heap->free_pages = page;
1722  }
1723  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1724 }
1725 
1726 #if GC_ENABLE_INCREMENTAL_MARK
1727 static inline int
1728 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1729 {
1730  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1731  if (page->freelist) {
1732  page->free_next = heap->pooled_pages;
1733  heap->pooled_pages = page;
1734  objspace->rincgc.pooled_slots += page->free_slots;
1735  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1736 
1737  return TRUE;
1738  }
1739  else {
1740  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1741 
1742  return FALSE;
1743  }
1744 }
1745 #endif
1746 
1747 static void
1748 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1749 {
1750  list_del(&page->page_node);
1751  heap->total_pages--;
1752  heap->total_slots -= page->total_slots;
1753 }
1754 
1755 static void rb_aligned_free(void *ptr);
1756 
1757 static void
1758 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1759 {
1761  objspace->profile.total_freed_pages++;
1762  rb_aligned_free(GET_PAGE_BODY(page->start));
1763  free(page);
1764 }
1765 
1766 static void
1767 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1768 {
1769  size_t i, j;
1770 
1771  if (!list_empty(&heap_tomb->pages)) {
1772  for (i = j = 1; j < heap_allocated_pages; i++) {
1773  struct heap_page *page = heap_pages_sorted[i];
1774 
1775  if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1776  heap_unlink_page(objspace, heap_tomb, page);
1777  heap_page_free(objspace, page);
1778  }
1779  else {
1780  if (i != j) {
1781  heap_pages_sorted[j] = page;
1782  }
1783  j++;
1784  }
1785  }
1787  }
1788 }
1789 
1790 static struct heap_page *
1791 heap_page_allocate(rb_objspace_t *objspace)
1792 {
1793  RVALUE *start, *end, *p;
1794  struct heap_page *page;
1795  struct heap_page_body *page_body = 0;
1796  size_t hi, lo, mid;
1797  int limit = HEAP_PAGE_OBJ_LIMIT;
1798 
1799  /* assign heap_page body (contains heap_page_header and RVALUEs) */
1801  if (page_body == 0) {
1802  rb_memerror();
1803  }
1804 
1805  /* assign heap_page entry */
1806  page = calloc1(sizeof(struct heap_page));
1807  if (page == 0) {
1808  rb_aligned_free(page_body);
1809  rb_memerror();
1810  }
1811 
1812  /* adjust obj_limit (object number available in this page) */
1813  start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1814  if ((VALUE)start % sizeof(RVALUE) != 0) {
1815  int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1816  start = (RVALUE*)((VALUE)start + delta);
1817  limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
1818  }
1819  end = start + limit;
1820 
1821  /* setup heap_pages_sorted */
1822  lo = 0;
1824  while (lo < hi) {
1825  struct heap_page *mid_page;
1826 
1827  mid = (lo + hi) / 2;
1828  mid_page = heap_pages_sorted[mid];
1829  if (mid_page->start < start) {
1830  lo = mid + 1;
1831  }
1832  else if (mid_page->start > start) {
1833  hi = mid;
1834  }
1835  else {
1836  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1837  }
1838  }
1839 
1840  if (hi < heap_allocated_pages) {
1842  }
1843 
1844  heap_pages_sorted[hi] = page;
1845 
1847 
1849  GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
1851 
1852  objspace->profile.total_allocated_pages++;
1853 
1855  rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
1857  }
1858 
1860  if (heap_pages_himem < end) heap_pages_himem = end;
1861 
1862  page->start = start;
1863  page->total_slots = limit;
1864  page_body->header.page = page;
1865 
1866  for (p = start; p != end; p++) {
1867  gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
1868  heap_page_add_freeobj(objspace, page, (VALUE)p);
1869  }
1870  page->free_slots = limit;
1871 
1872  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1873  return page;
1874 }
1875 
1876 static struct heap_page *
1877 heap_page_resurrect(rb_objspace_t *objspace)
1878 {
1879  struct heap_page *page = 0, *next;
1880 
1881  list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
1882  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1883  if (page->freelist != NULL) {
1884  heap_unlink_page(objspace, heap_tomb, page);
1885  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1886  return page;
1887  }
1888  }
1889 
1890  return NULL;
1891 }
1892 
1893 static struct heap_page *
1894 heap_page_create(rb_objspace_t *objspace)
1895 {
1896  struct heap_page *page;
1897  const char *method = "recycle";
1898 
1900 
1901  page = heap_page_resurrect(objspace);
1902 
1903  if (page == NULL) {
1904  page = heap_page_allocate(objspace);
1905  method = "allocate";
1906  }
1907  if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1908  method, (void *)page, (int)heap_pages_sorted_length, (int)heap_allocated_pages, (int)heap_tomb->total_pages);
1909  return page;
1910 }
1911 
1912 static void
1913 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1914 {
1915  page->flags.in_tomb = (heap == heap_tomb);
1916  list_add(&heap->pages, &page->page_node);
1917  heap->total_pages++;
1918  heap->total_slots += page->total_slots;
1919 }
1920 
1921 static void
1922 heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
1923 {
1924  struct heap_page *page = heap_page_create(objspace);
1925  heap_add_page(objspace, heap, page);
1926  heap_add_freepage(heap, page);
1927 }
1928 
1929 static void
1930 heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
1931 {
1932  size_t i;
1933 
1934  heap_allocatable_pages_set(objspace, add);
1935 
1936  for (i = 0; i < add; i++) {
1937  heap_assign_page(objspace, heap);
1938  }
1939 
1941 }
1942 
1943 static size_t
1944 heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
1945 {
1946  double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1948  size_t next_used;
1949 
1950  if (goal_ratio == 0.0) {
1951  next_used = (size_t)(used * gc_params.growth_factor);
1952  }
1953  else {
1954  /* Find `f' where free_slots = f * total_slots * goal_ratio
1955  * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1956  */
1957  double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1958 
1959  if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1960  if (f < 1.0) f = 1.1;
1961 
1962  next_used = (size_t)(f * used);
1963 
1964  if (0) {
1965  fprintf(stderr,
1966  "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1967  " G(%1.2f), f(%1.2f),"
1968  " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
1970  goal_ratio, f, used, next_used);
1971  }
1972  }
1973 
1974  if (gc_params.growth_max_slots > 0) {
1975  size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
1976  if (next_used > max_used) next_used = max_used;
1977  }
1978 
1979  return next_used - used;
1980 }
1981 
1982 static void
1983 heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
1984 {
1985  size_t used = heap_eden->total_pages;
1986  size_t next_used_limit = used + additional_pages;
1987 
1988  if (next_used_limit == heap_allocated_pages) next_used_limit++;
1989 
1990  heap_allocatable_pages_set(objspace, next_used_limit - used);
1991 
1992  gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %d\n", (int)heap_allocatable_pages);
1993 }
1994 
1995 static int
1996 heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
1997 {
1998  if (heap_allocatable_pages > 0) {
1999  gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2001 
2004 
2005  heap_assign_page(objspace, heap);
2006  return TRUE;
2007  }
2008  return FALSE;
2009 }
2010 
2011 static void
2012 heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2013 {
2014  GC_ASSERT(heap->free_pages == NULL);
2015 
2016  if (is_lazy_sweeping(heap)) {
2017  gc_sweep_continue(objspace, heap);
2018  }
2019  else if (is_incremental_marking(objspace)) {
2020  gc_marks_continue(objspace, heap);
2021  }
2022 
2023  if (heap->free_pages == NULL &&
2024  (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
2025  gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2026  rb_memerror();
2027  }
2028 }
2029 
2030 static RVALUE *
2031 heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
2032 {
2033  struct heap_page *page;
2034  RVALUE *p;
2035 
2036  while (heap->free_pages == NULL) {
2037  heap_prepare(objspace, heap);
2038  }
2039  page = heap->free_pages;
2040  heap->free_pages = page->free_next;
2041  heap->using_page = page;
2042 
2043  GC_ASSERT(page->free_slots != 0);
2044  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2045  p = page->freelist;
2046  page->freelist = NULL;
2047  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2048  page->free_slots = 0;
2049  asan_unpoison_object((VALUE)p, true);
2050  return p;
2051 }
2052 
2053 static inline VALUE
2054 heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
2055 {
2056  RVALUE *p = heap->freelist;
2057  if (LIKELY(p != NULL)) {
2058  heap->freelist = p->as.free.next;
2059  }
2060  asan_unpoison_object((VALUE)p, true);
2061  return (VALUE)p;
2062 }
2063 
2064 static inline VALUE
2065 heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
2066 {
2067  RVALUE *p = heap->freelist;
2068 
2069  while (1) {
2070  if (LIKELY(p != NULL)) {
2071  asan_unpoison_object((VALUE)p, true);
2072  heap->freelist = p->as.free.next;
2073  return (VALUE)p;
2074  }
2075  else {
2076  p = heap_get_freeobj_from_next_freepage(objspace, heap);
2077  }
2078  }
2079 }
2080 
2081 void
2083 {
2084  rb_objspace_t *objspace = &rb_objspace;
2085  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2086  objspace->flags.has_hook = (objspace->hook_events != 0);
2087 }
2088 
2089 static void
2090 gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2091 {
2092  const VALUE *pc = ec->cfp->pc;
2093  if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2094  /* increment PC because source line is calculated with PC-1 */
2095  ec->cfp->pc++;
2096  }
2097  EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2098  ec->cfp->pc = pc;
2099 }
2100 
2101 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2102 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2103 
2104 #define gc_event_hook(objspace, event, data) do { \
2105  if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2106  gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2107  } \
2108 } while (0)
2109 
2110 static inline VALUE
2111 newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2112 {
2113 #if !__has_feature(memory_sanitizer)
2115  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2116 #endif
2117 
2118  /* OBJSETUP */
2119  struct RVALUE buf = {
2120  .as = {
2121  .values = {
2122  .basic = {
2123  .flags = flags,
2124  .klass = klass,
2125  },
2126  .v1 = v1,
2127  .v2 = v2,
2128  .v3 = v3,
2129  },
2130  },
2131  };
2132  MEMCPY(RANY(obj), &buf, RVALUE, 1);
2133 
2134 #if RGENGC_CHECK_MODE
2135  GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2136  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2137  GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2138  GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2139 
2140  if (flags & FL_PROMOTED1) {
2141  if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2142  }
2143  else {
2144  if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2145  }
2146  if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2147 #endif
2148 
2149 #if USE_RGENGC
2150  if (UNLIKELY(wb_protected == FALSE)) {
2152  }
2153 #endif
2154 
2155 #if RGENGC_PROFILE
2156  if (wb_protected) {
2157  objspace->profile.total_generated_normal_object_count++;
2158 #if RGENGC_PROFILE >= 2
2159  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2160 #endif
2161  }
2162  else {
2163  objspace->profile.total_generated_shady_object_count++;
2164 #if RGENGC_PROFILE >= 2
2165  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2166 #endif
2167  }
2168 #endif
2169 
2170 #if GC_DEBUG
2171  RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2172  GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2173 #endif
2174 
2175  objspace->total_allocated_objects++;
2176 
2177  gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2178 
2179 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2180  {
2181  static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2182 
2183  if (!is_incremental_marking(objspace) &&
2184  flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2185  ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2186  if (--newobj_cnt == 0) {
2187  newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2188 
2189  gc_mark_set(objspace, obj);
2190  RVALUE_AGE_SET_OLD(objspace, obj);
2191 
2193  }
2194  }
2195  }
2196 #endif
2197  check_rvalue_consistency(obj);
2198  return obj;
2199 }
2200 
2201 static inline VALUE
2202 newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected)
2203 {
2204  VALUE obj;
2205 
2207  if (during_gc) {
2208  dont_gc = 1;
2209  during_gc = 0;
2210  rb_bug("object allocation during garbage collection phase");
2211  }
2212 
2213  if (ruby_gc_stressful) {
2214  if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2215  rb_memerror();
2216  }
2217  }
2218  }
2219 
2220  obj = heap_get_freeobj(objspace, heap_eden);
2221  newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2223  return obj;
2224 }
2225 
2226 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2227 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2228 
2229 static VALUE
2230 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2231 {
2232  return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE);
2233 }
2234 
2235 static VALUE
2236 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2237 {
2238  return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE);
2239 }
2240 
2241 static inline VALUE
2242 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
2243 {
2244  rb_objspace_t *objspace = &rb_objspace;
2245  VALUE obj;
2246 
2247  RB_DEBUG_COUNTER_INC(obj_newobj);
2248  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2249 
2250 #if GC_DEBUG_STRESS_TO_CLASS
2251  if (UNLIKELY(stress_to_class)) {
2252  long i, cnt = RARRAY_LEN(stress_to_class);
2253  for (i = 0; i < cnt; ++i) {
2255  }
2256  }
2257 #endif
2258  if (!(during_gc ||
2260  gc_event_hook_available_p(objspace)) &&
2261  (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
2262  return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2263  }
2264  else {
2265  RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2266 
2267  return wb_protected ?
2268  newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
2269  newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
2270  }
2271 }
2272 
2273 VALUE
2275 {
2276  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2277  return newobj_of(klass, flags, 0, 0, 0, FALSE);
2278 }
2279 
2280 VALUE
2282 {
2283  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2284  return newobj_of(klass, flags, 0, 0, 0, TRUE);
2285 }
2286 
2287 /* for compatibility */
2288 
2289 VALUE
2291 {
2292  return newobj_of(0, T_NONE, 0, 0, 0, FALSE);
2293 }
2294 
2295 VALUE
2297 {
2298  return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
2299 }
2300 
2301 #define UNEXPECTED_NODE(func) \
2302  rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2303  BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2304 
2305 #undef rb_imemo_new
2306 
2307 VALUE
2309 {
2310  VALUE flags = T_IMEMO | (type << FL_USHIFT);
2311  return newobj_of(v0, flags, v1, v2, v3, TRUE);
2312 }
2313 
2314 static VALUE
2315 rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2316 {
2318  return newobj_of(v0, flags, v1, v2, v3, FALSE);
2319 }
2320 
2321 static VALUE
2322 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2323 {
2324  return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2325 }
2326 
2329 {
2330  return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2331 }
2332 
2333 static size_t
2334 imemo_memsize(VALUE obj)
2335 {
2336  size_t size = 0;
2337  switch (imemo_type(obj)) {
2338  case imemo_ment:
2339  size += sizeof(RANY(obj)->as.imemo.ment.def);
2340  break;
2341  case imemo_iseq:
2343  break;
2344  case imemo_env:
2345  size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2346  break;
2347  case imemo_tmpbuf:
2348  size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2349  break;
2350  case imemo_ast:
2351  size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2352  break;
2353  case imemo_cref:
2354  case imemo_svar:
2355  case imemo_throw_data:
2356  case imemo_ifunc:
2357  case imemo_memo:
2358  case imemo_parser_strterm:
2359  break;
2360  default:
2361  /* unreachable */
2362  break;
2363  }
2364  return size;
2365 }
2366 
2367 #if IMEMO_DEBUG
2368 VALUE
2369 rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2370 {
2371  VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2372  fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2373  return memo;
2374 }
2375 #endif
2376 
2377 VALUE
2379 {
2380  if (klass) Check_Type(klass, T_CLASS);
2381  return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE);
2382 }
2383 
2384 #undef rb_data_object_alloc
2386  RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree),
2387  rb_data_object_wrap, (klass, datap, dmark, dfree))
2388 
2389 
2390 VALUE
2392 {
2393  VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2394  DATA_PTR(obj) = xcalloc(1, size);
2395  return obj;
2396 }
2397 
2398 VALUE
2400 {
2401  if (klass) Check_Type(klass, T_CLASS);
2402  return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED);
2403 }
2404 
2405 #undef rb_data_typed_object_alloc
2407  const rb_data_type_t *type),
2409 
2410 VALUE
2412 {
2414  DATA_PTR(obj) = xcalloc(1, size);
2415  return obj;
2416 }
2417 
2418 size_t
2420 {
2421  if (RTYPEDDATA_P(obj)) {
2423  const void *ptr = RTYPEDDATA_DATA(obj);
2424  if (ptr && type->function.dsize) {
2425  return type->function.dsize(ptr);
2426  }
2427  }
2428  return 0;
2429 }
2430 
2431 const char *
2433 {
2434  if (RTYPEDDATA_P(obj)) {
2435  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2436  }
2437  else {
2438  return 0;
2439  }
2440 }
2441 
2442 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2443 static inline int
2444 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2445 {
2446  register RVALUE *p = RANY(ptr);
2447  register struct heap_page *page;
2448  register size_t hi, lo, mid;
2449 
2450  RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2451 
2452  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2453  RB_DEBUG_COUNTER_INC(gc_isptr_range);
2454 
2455  if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2456  RB_DEBUG_COUNTER_INC(gc_isptr_align);
2457 
2458  /* check if p looks like a pointer using bsearch*/
2459  lo = 0;
2461  while (lo < hi) {
2462  mid = (lo + hi) / 2;
2463  page = heap_pages_sorted[mid];
2464  if (page->start <= p) {
2465  if (p < page->start + page->total_slots) {
2466  RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2467 
2468  if (page->flags.in_tomb) {
2469  return FALSE;
2470  }
2471  else {
2472  return TRUE;
2473  }
2474  }
2475  lo = mid + 1;
2476  }
2477  else {
2478  hi = mid;
2479  }
2480  }
2481  return FALSE;
2482 }
2483 
2484 static enum rb_id_table_iterator_result
2485 free_const_entry_i(VALUE value, void *data)
2486 {
2487  rb_const_entry_t *ce = (rb_const_entry_t *)value;
2488  xfree(ce);
2489  return ID_TABLE_CONTINUE;
2490 }
2491 
2492 void
2494 {
2495  rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2496  rb_id_table_free(tbl);
2497 }
2498 
2499 static inline void
2500 make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
2501 {
2502  struct RZombie *zombie = RZOMBIE(obj);
2503  zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
2504  zombie->dfree = dfree;
2505  zombie->data = data;
2506  zombie->next = heap_pages_deferred_final;
2507  heap_pages_deferred_final = (VALUE)zombie;
2508 }
2509 
2510 static inline void
2511 make_io_zombie(rb_objspace_t *objspace, VALUE obj)
2512 {
2513  rb_io_t *fptr = RANY(obj)->as.file.fptr;
2514  make_zombie(objspace, obj, (void (*)(void*))rb_io_fptr_finalize, fptr);
2515 }
2516 
2517 static void
2518 obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2519 {
2520  VALUE id;
2521 
2524 
2525  if (st_delete(objspace->obj_to_id_tbl, (st_data_t *)&obj, &id)) {
2526  GC_ASSERT(id);
2527  st_delete(objspace->id_to_obj_tbl, (st_data_t *)&id, NULL);
2528  }
2529  else {
2530  rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
2531  }
2532 }
2533 
2534 static int
2535 obj_free(rb_objspace_t *objspace, VALUE obj)
2536 {
2537  RB_DEBUG_COUNTER_INC(obj_free);
2538 
2540 
2541  switch (BUILTIN_TYPE(obj)) {
2542  case T_NIL:
2543  case T_FIXNUM:
2544  case T_TRUE:
2545  case T_FALSE:
2546  rb_bug("obj_free() called for broken object");
2547  break;
2548  }
2549 
2550  if (FL_TEST(obj, FL_EXIVAR)) {
2553  }
2554 
2556  obj_free_object_id(objspace, obj);
2557  }
2558 
2559 #if USE_RGENGC
2560  if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2561 
2562 #if RGENGC_CHECK_MODE
2563 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2564  CHECK(RVALUE_WB_UNPROTECTED);
2565  CHECK(RVALUE_MARKED);
2566  CHECK(RVALUE_MARKING);
2567  CHECK(RVALUE_UNCOLLECTIBLE);
2568 #undef CHECK
2569 #endif
2570 #endif
2571 
2572  switch (BUILTIN_TYPE(obj)) {
2573  case T_OBJECT:
2574  if ((RANY(obj)->as.basic.flags & ROBJECT_EMBED) ||
2575  RANY(obj)->as.object.as.heap.ivptr == NULL) {
2576  RB_DEBUG_COUNTER_INC(obj_obj_embed);
2577  }
2578  else if (ROBJ_TRANSIENT_P(obj)) {
2579  RB_DEBUG_COUNTER_INC(obj_obj_transient);
2580  }
2581  else {
2582  xfree(RANY(obj)->as.object.as.heap.ivptr);
2583  RB_DEBUG_COUNTER_INC(obj_obj_ptr);
2584  }
2585  break;
2586  case T_MODULE:
2587  case T_CLASS:
2590  if (RCLASS_IV_TBL(obj)) {
2592  }
2593  if (RCLASS_CONST_TBL(obj)) {
2595  }
2596  if (RCLASS_IV_INDEX_TBL(obj)) {
2598  }
2599  if (RCLASS_EXT(obj)->subclasses) {
2600  if (BUILTIN_TYPE(obj) == T_MODULE) {
2602  }
2603  else {
2605  }
2606  RCLASS_EXT(obj)->subclasses = NULL;
2607  }
2610  if (RANY(obj)->as.klass.ptr)
2611  xfree(RANY(obj)->as.klass.ptr);
2612  RANY(obj)->as.klass.ptr = NULL;
2613 
2614  (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
2615  (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
2616  break;
2617  case T_STRING:
2618  rb_str_free(obj);
2619  break;
2620  case T_ARRAY:
2621  rb_ary_free(obj);
2622  break;
2623  case T_HASH:
2624 #if USE_DEBUG_COUNTER
2625  switch RHASH_SIZE(obj) {
2626  case 0:
2627  RB_DEBUG_COUNTER_INC(obj_hash_empty);
2628  break;
2629  case 1:
2630  RB_DEBUG_COUNTER_INC(obj_hash_1);
2631  break;
2632  case 2:
2633  RB_DEBUG_COUNTER_INC(obj_hash_2);
2634  break;
2635  case 3:
2636  RB_DEBUG_COUNTER_INC(obj_hash_3);
2637  break;
2638  case 4:
2639  RB_DEBUG_COUNTER_INC(obj_hash_4);
2640  break;
2641  case 5:
2642  case 6:
2643  case 7:
2644  case 8:
2645  RB_DEBUG_COUNTER_INC(obj_hash_5_8);
2646  break;
2647  default:
2648  GC_ASSERT(RHASH_SIZE(obj) > 8);
2649  RB_DEBUG_COUNTER_INC(obj_hash_g8);
2650  }
2651 
2652  if (RHASH_AR_TABLE_P(obj)) {
2653  if (RHASH_AR_TABLE(obj) == NULL) {
2654  RB_DEBUG_COUNTER_INC(obj_hash_null);
2655  }
2656  else {
2657  RB_DEBUG_COUNTER_INC(obj_hash_ar);
2658  }
2659  }
2660  else {
2661  RB_DEBUG_COUNTER_INC(obj_hash_st);
2662  }
2663 #endif
2664  if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
2665  struct ar_table_struct *tab = RHASH(obj)->as.ar;
2666 
2667  if (tab) {
2668  if (RHASH_TRANSIENT_P(obj)) {
2669  RB_DEBUG_COUNTER_INC(obj_hash_transient);
2670  }
2671  else {
2672  ruby_xfree(tab);
2673  }
2674  }
2675  }
2676  else {
2678  st_free_table(RHASH(obj)->as.st);
2679  }
2680  break;
2681  case T_REGEXP:
2682  if (RANY(obj)->as.regexp.ptr) {
2683  onig_free(RANY(obj)->as.regexp.ptr);
2684  RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
2685  }
2686  break;
2687  case T_DATA:
2688  if (DATA_PTR(obj)) {
2689  int free_immediately = FALSE;
2690  void (*dfree)(void *);
2691  void *data = DATA_PTR(obj);
2692 
2693  if (RTYPEDDATA_P(obj)) {
2694  free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
2695  dfree = RANY(obj)->as.typeddata.type->function.dfree;
2696  if (0 && free_immediately == 0) {
2697  /* to expose non-free-immediate T_DATA */
2698  fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
2699  }
2700  }
2701  else {
2702  dfree = RANY(obj)->as.data.dfree;
2703  }
2704 
2705  if (dfree) {
2706  if (dfree == RUBY_DEFAULT_FREE) {
2707  xfree(data);
2708  RB_DEBUG_COUNTER_INC(obj_data_xfree);
2709  }
2710  else if (free_immediately) {
2711  (*dfree)(data);
2712  RB_DEBUG_COUNTER_INC(obj_data_imm_free);
2713  }
2714  else {
2715  make_zombie(objspace, obj, dfree, data);
2716  RB_DEBUG_COUNTER_INC(obj_data_zombie);
2717  return 1;
2718  }
2719  }
2720  else {
2721  RB_DEBUG_COUNTER_INC(obj_data_empty);
2722  }
2723  }
2724  break;
2725  case T_MATCH:
2726  if (RANY(obj)->as.match.rmatch) {
2727  struct rmatch *rm = RANY(obj)->as.match.rmatch;
2728 #if USE_DEBUG_COUNTER
2729  if (rm->regs.num_regs >= 8) {
2730  RB_DEBUG_COUNTER_INC(obj_match_ge8);
2731  }
2732  else if (rm->regs.num_regs >= 4) {
2733  RB_DEBUG_COUNTER_INC(obj_match_ge4);
2734  }
2735  else if (rm->regs.num_regs >= 1) {
2736  RB_DEBUG_COUNTER_INC(obj_match_under4);
2737  }
2738 #endif
2739  onig_region_free(&rm->regs, 0);
2740  if (rm->char_offset)
2741  xfree(rm->char_offset);
2742  xfree(rm);
2743 
2744  RB_DEBUG_COUNTER_INC(obj_match_ptr);
2745  }
2746  break;
2747  case T_FILE:
2748  if (RANY(obj)->as.file.fptr) {
2749  make_io_zombie(objspace, obj);
2750  RB_DEBUG_COUNTER_INC(obj_file_ptr);
2751  return 1;
2752  }
2753  break;
2754  case T_RATIONAL:
2755  RB_DEBUG_COUNTER_INC(obj_rational);
2756  break;
2757  case T_COMPLEX:
2758  RB_DEBUG_COUNTER_INC(obj_complex);
2759  break;
2760  case T_MOVED:
2761  break;
2762  case T_ICLASS:
2763  /* Basically , T_ICLASS shares table with the module */
2764  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
2766  }
2767  if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
2769  }
2770  if (RCLASS_EXT(obj)->subclasses) {
2772  RCLASS_EXT(obj)->subclasses = NULL;
2773  }
2776  xfree(RANY(obj)->as.klass.ptr);
2777  RANY(obj)->as.klass.ptr = NULL;
2778 
2779  RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
2780  break;
2781 
2782  case T_FLOAT:
2783  RB_DEBUG_COUNTER_INC(obj_float);
2784  break;
2785 
2786  case T_BIGNUM:
2787  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2789  RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
2790  }
2791  else {
2792  RB_DEBUG_COUNTER_INC(obj_bignum_embed);
2793  }
2794  break;
2795 
2796  case T_NODE:
2797  UNEXPECTED_NODE(obj_free);
2798  break;
2799 
2800  case T_STRUCT:
2801  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
2802  RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
2803  RB_DEBUG_COUNTER_INC(obj_struct_embed);
2804  }
2805  else if (RSTRUCT_TRANSIENT_P(obj)) {
2806  RB_DEBUG_COUNTER_INC(obj_struct_transient);
2807  }
2808  else {
2809  xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
2810  RB_DEBUG_COUNTER_INC(obj_struct_ptr);
2811  }
2812  break;
2813 
2814  case T_SYMBOL:
2815  {
2817  RB_DEBUG_COUNTER_INC(obj_symbol);
2818  }
2819  break;
2820 
2821  case T_IMEMO:
2822  switch (imemo_type(obj)) {
2823  case imemo_ment:
2824  rb_free_method_entry(&RANY(obj)->as.imemo.ment);
2825  RB_DEBUG_COUNTER_INC(obj_imemo_ment);
2826  break;
2827  case imemo_iseq:
2828  rb_iseq_free(&RANY(obj)->as.imemo.iseq);
2829  RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
2830  break;
2831  case imemo_env:
2832  GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
2833  xfree((VALUE *)RANY(obj)->as.imemo.env.env);
2834  RB_DEBUG_COUNTER_INC(obj_imemo_env);
2835  break;
2836  case imemo_tmpbuf:
2837  xfree(RANY(obj)->as.imemo.alloc.ptr);
2838  RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
2839  break;
2840  case imemo_ast:
2841  rb_ast_free(&RANY(obj)->as.imemo.ast);
2842  RB_DEBUG_COUNTER_INC(obj_imemo_ast);
2843  break;
2844  case imemo_cref:
2845  RB_DEBUG_COUNTER_INC(obj_imemo_cref);
2846  break;
2847  case imemo_svar:
2848  RB_DEBUG_COUNTER_INC(obj_imemo_svar);
2849  break;
2850  case imemo_throw_data:
2851  RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
2852  break;
2853  case imemo_ifunc:
2854  RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
2855  break;
2856  case imemo_memo:
2857  RB_DEBUG_COUNTER_INC(obj_imemo_memo);
2858  break;
2859  case imemo_parser_strterm:
2860  RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
2861  break;
2862  default:
2863  /* unreachable */
2864  break;
2865  }
2866  return 0;
2867 
2868  default:
2869  rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
2870  BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
2871  }
2872 
2873  if (FL_TEST(obj, FL_FINALIZE)) {
2874  make_zombie(objspace, obj, 0, 0);
2875  return 1;
2876  }
2877  else {
2878  return 0;
2879  }
2880 }
2881 
2882 
2883 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2884 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2885 
2886 static int
2887 object_id_cmp(st_data_t x, st_data_t y)
2888 {
2889  if (RB_TYPE_P(x, T_BIGNUM)) {
2890  return !rb_big_eql(x, y);
2891  } else {
2892  return x != y;
2893  }
2894 }
2895 
2896 static st_index_t
2897 object_id_hash(st_data_t n)
2898 {
2899  if (RB_TYPE_P(n, T_BIGNUM)) {
2900  return FIX2LONG(rb_big_hash(n));
2901  } else {
2902  return st_numhash(n);
2903  }
2904 }
2905 static const struct st_hash_type object_id_hash_type = {
2906  object_id_cmp,
2907  object_id_hash,
2908 };
2909 
2910 void
2912 {
2913  rb_objspace_t *objspace = &rb_objspace;
2914 
2915  objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
2916  objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
2917  objspace->obj_to_id_tbl = st_init_numtable();
2918 
2919 #if RGENGC_ESTIMATE_OLDMALLOC
2920  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
2921 #endif
2922 
2923  heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
2924  init_mark_stack(&objspace->mark_stack);
2925 
2926  objspace->profile.invoke_time = getrusage_time();
2928 }
2929 
2930 void
2932 {
2933  rb_objspace_t *objspace = &rb_objspace;
2934 
2935  gc_stress_set(objspace, ruby_initial_gc_stress);
2936 }
2937 
2938 typedef int each_obj_callback(void *, void *, size_t, void *);
2939 
2940 static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
2941 static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
2942 
2946  void *data;
2947 };
2948 
2949 static void
2950 objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
2951 {
2952  size_t i;
2953  struct heap_page *page;
2954  RVALUE *pstart = NULL, *pend;
2955 
2956  i = 0;
2957  while (i < heap_allocated_pages) {
2958  while (0 < i && pstart < heap_pages_sorted[i-1]->start) i--;
2959  while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
2960  if (heap_allocated_pages <= i) break;
2961 
2962  page = heap_pages_sorted[i];
2963 
2964  pstart = page->start;
2965  pend = pstart + page->total_slots;
2966 
2967  if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
2968  break;
2969  }
2970  }
2971 }
2972 
2973 static VALUE
2974 objspace_each_objects_protected(VALUE arg)
2975 {
2976  struct each_obj_args *args = (struct each_obj_args *)arg;
2977  objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
2978  return Qnil;
2979 }
2980 
2981 static VALUE
2982 incremental_enable(VALUE _)
2983 {
2985 
2987  return Qnil;
2988 }
2989 
2990 /*
2991  * rb_objspace_each_objects() is special C API to walk through
2992  * Ruby object space. This C API is too difficult to use it.
2993  * To be frank, you should not use it. Or you need to read the
2994  * source code of this function and understand what this function does.
2995  *
2996  * 'callback' will be called several times (the number of heap page,
2997  * at current implementation) with:
2998  * vstart: a pointer to the first living object of the heap_page.
2999  * vend: a pointer to next to the valid heap_page area.
3000  * stride: a distance to next VALUE.
3001  *
3002  * If callback() returns non-zero, the iteration will be stopped.
3003  *
3004  * This is a sample callback code to iterate liveness objects:
3005  *
3006  * int
3007  * sample_callback(void *vstart, void *vend, int stride, void *data) {
3008  * VALUE v = (VALUE)vstart;
3009  * for (; v != (VALUE)vend; v += stride) {
3010  * if (RBASIC(v)->flags) { // liveness check
3011  * // do something with live object 'v'
3012  * }
3013  * return 0; // continue to iteration
3014  * }
3015  *
3016  * Note: 'vstart' is not a top of heap_page. This point the first
3017  * living object to grasp at least one object to avoid GC issue.
3018  * This means that you can not walk through all Ruby object page
3019  * including freed object page.
3020  *
3021  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
3022  * However, there are possibilities to pass variable values with
3023  * 'stride' with some reasons. You must use stride instead of
3024  * use some constant value in the iteration.
3025  */
3026 void
3028 {
3029  objspace_each_objects(&rb_objspace, callback, data);
3030 }
3031 
3032 static void
3033 objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
3034 {
3035  int prev_dont_incremental = objspace->flags.dont_incremental;
3036 
3037  gc_rest(objspace);
3039 
3040  if (prev_dont_incremental) {
3041  objspace_each_objects_without_setup(objspace, callback, data);
3042  }
3043  else {
3044  struct each_obj_args args = {objspace, callback, data};
3045  rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
3046  }
3047 }
3048 
3049 void
3051 {
3052  objspace_each_objects_without_setup(&rb_objspace, callback, data);
3053 }
3054 
3056  size_t num;
3058 };
3059 
3060 static int
3061 internal_object_p(VALUE obj)
3062 {
3063  RVALUE *p = (RVALUE *)obj;
3065  asan_unpoison_object(obj, false);
3066  bool used_p = p->as.basic.flags;
3067 
3068  if (used_p) {
3069  switch (BUILTIN_TYPE(p)) {
3070  case T_NODE:
3071  UNEXPECTED_NODE(internal_object_p);
3072  break;
3073  case T_NONE:
3074  case T_MOVED:
3075  case T_IMEMO:
3076  case T_ICLASS:
3077  case T_ZOMBIE:
3078  break;
3079  case T_CLASS:
3080  if (!p->as.basic.klass) break;
3081  if (FL_TEST(obj, FL_SINGLETON)) {
3083  }
3084  return 0;
3085  default:
3086  if (!p->as.basic.klass) break;
3087  return 0;
3088  }
3089  }
3090  if (ptr || ! used_p) {
3091  asan_poison_object(obj);
3092  }
3093  return 1;
3094 }
3095 
3096 int
3098 {
3099  return internal_object_p(obj);
3100 }
3101 
3102 static int
3103 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3104 {
3105  struct os_each_struct *oes = (struct os_each_struct *)data;
3106  RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
3107 
3108  for (; p != pend; p++) {
3109  volatile VALUE v = (VALUE)p;
3110  if (!internal_object_p(v)) {
3111  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3112  rb_yield(v);
3113  oes->num++;
3114  }
3115  }
3116  }
3117 
3118  return 0;
3119 }
3120 
3121 static VALUE
3122 os_obj_of(VALUE of)
3123 {
3124  struct os_each_struct oes;
3125 
3126  oes.num = 0;
3127  oes.of = of;
3128  rb_objspace_each_objects(os_obj_of_i, &oes);
3129  return SIZET2NUM(oes.num);
3130 }
3131 
3132 /*
3133  * call-seq:
3134  * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3135  * ObjectSpace.each_object([module]) -> an_enumerator
3136  *
3137  * Calls the block once for each living, nonimmediate object in this
3138  * Ruby process. If <i>module</i> is specified, calls the block
3139  * for only those classes or modules that match (or are a subclass of)
3140  * <i>module</i>. Returns the number of objects found. Immediate
3141  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3142  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3143  * never returned. In the example below, #each_object returns both
3144  * the numbers we defined and several constants defined in the Math
3145  * module.
3146  *
3147  * If no block is given, an enumerator is returned instead.
3148  *
3149  * a = 102.7
3150  * b = 95 # Won't be returned
3151  * c = 12345678987654321
3152  * count = ObjectSpace.each_object(Numeric) {|x| p x }
3153  * puts "Total count: #{count}"
3154  *
3155  * <em>produces:</em>
3156  *
3157  * 12345678987654321
3158  * 102.7
3159  * 2.71828182845905
3160  * 3.14159265358979
3161  * 2.22044604925031e-16
3162  * 1.7976931348623157e+308
3163  * 2.2250738585072e-308
3164  * Total count: 7
3165  *
3166  */
3167 
3168 static VALUE
3169 os_each_obj(int argc, VALUE *argv, VALUE os)
3170 {
3171  VALUE of;
3172 
3173  of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3174  RETURN_ENUMERATOR(os, 1, &of);
3175  return os_obj_of(of);
3176 }
3177 
3178 /*
3179  * call-seq:
3180  * ObjectSpace.undefine_finalizer(obj)
3181  *
3182  * Removes all finalizers for <i>obj</i>.
3183  *
3184  */
3185 
3186 static VALUE
3187 undefine_final(VALUE os, VALUE obj)
3188 {
3189  return rb_undefine_finalizer(obj);
3190 }
3191 
3192 VALUE
3194 {
3195  rb_objspace_t *objspace = &rb_objspace;
3196  st_data_t data = obj;
3198  st_delete(finalizer_table, &data, 0);
3200  return obj;
3201 }
3202 
3203 static void
3204 should_be_callable(VALUE block)
3205 {
3206  if (!rb_obj_respond_to(block, idCall, TRUE)) {
3207  rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3208  rb_obj_class(block));
3209  }
3210 }
3211 
3212 static void
3213 should_be_finalizable(VALUE obj)
3214 {
3215  if (!FL_ABLE(obj)) {
3216  rb_raise(rb_eArgError, "cannot define finalizer for %s",
3218  }
3220 }
3221 
3222 /*
3223  * call-seq:
3224  * ObjectSpace.define_finalizer(obj, aProc=proc())
3225  *
3226  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3227  * was destroyed. The object ID of the <i>obj</i> will be passed
3228  * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3229  * method, make sure it can be called with a single argument.
3230  *
3231  */
3232 
3233 static VALUE
3234 define_final(int argc, VALUE *argv, VALUE os)
3235 {
3236  VALUE obj, block;
3237 
3238  rb_scan_args(argc, argv, "11", &obj, &block);
3239  should_be_finalizable(obj);
3240  if (argc == 1) {
3241  block = rb_block_proc();
3242  }
3243  else {
3244  should_be_callable(block);
3245  }
3246 
3247  return define_final0(obj, block);
3248 }
3249 
3250 static VALUE
3251 define_final0(VALUE obj, VALUE block)
3252 {
3253  rb_objspace_t *objspace = &rb_objspace;
3254  VALUE table;
3255  st_data_t data;
3256 
3257  RBASIC(obj)->flags |= FL_FINALIZE;
3258 
3259  block = rb_ary_new3(2, INT2FIX(0), block);
3260  OBJ_FREEZE(block);
3261 
3262  if (st_lookup(finalizer_table, obj, &data)) {
3263  table = (VALUE)data;
3264 
3265  /* avoid duplicate block, table is usually small */
3266  {
3267  long len = RARRAY_LEN(table);
3268  long i;
3269 
3270  for (i = 0; i < len; i++) {
3271  VALUE recv = RARRAY_AREF(table, i);
3272  if (rb_funcall(recv, idEq, 1, block)) {
3273  return recv;
3274  }
3275  }
3276  }
3277 
3278  rb_ary_push(table, block);
3279  }
3280  else {
3281  table = rb_ary_new3(1, block);
3282  RBASIC_CLEAR_CLASS(table);
3284  }
3285  return block;
3286 }
3287 
3288 VALUE
3290 {
3291  should_be_finalizable(obj);
3292  should_be_callable(block);
3293  return define_final0(obj, block);
3294 }
3295 
3296 void
3298 {
3299  rb_objspace_t *objspace = &rb_objspace;
3300  VALUE table;
3301  st_data_t data;
3302 
3303  if (!FL_TEST(obj, FL_FINALIZE)) return;
3304  if (st_lookup(finalizer_table, obj, &data)) {
3305  table = (VALUE)data;
3306  st_insert(finalizer_table, dest, table);
3307  }
3308  FL_SET(dest, FL_FINALIZE);
3309 }
3310 
3311 static VALUE
3312 run_single_final(VALUE final, VALUE objid)
3313 {
3314  const VALUE cmd = RARRAY_AREF(final, 1);
3315  return rb_check_funcall(cmd, idCall, 1, &objid);
3316 }
3317 
3318 static void
3319 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3320 {
3321  long i;
3322  enum ruby_tag_type state;
3323  volatile struct {
3324  VALUE errinfo;
3325  VALUE objid;
3327  long finished;
3328  } saved;
3329  rb_execution_context_t * volatile ec = GET_EC();
3330 #define RESTORE_FINALIZER() (\
3331  ec->cfp = saved.cfp, \
3332  rb_set_errinfo(saved.errinfo))
3333 
3334  saved.errinfo = rb_errinfo();
3335  saved.objid = rb_obj_id(obj);
3336  saved.cfp = ec->cfp;
3337  saved.finished = 0;
3338 
3339  EC_PUSH_TAG(ec);
3340  state = EC_EXEC_TAG();
3341  if (state != TAG_NONE) {
3342  ++saved.finished; /* skip failed finalizer */
3343  }
3344  for (i = saved.finished;
3345  RESTORE_FINALIZER(), i<RARRAY_LEN(table);
3346  saved.finished = ++i) {
3347  run_single_final(RARRAY_AREF(table, i), saved.objid);
3348  }
3349  EC_POP_TAG();
3350 #undef RESTORE_FINALIZER
3351 }
3352 
3353 static void
3354 run_final(rb_objspace_t *objspace, VALUE zombie)
3355 {
3356  st_data_t key, table;
3357 
3358  if (RZOMBIE(zombie)->dfree) {
3359  RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
3360  }
3361 
3362  key = (st_data_t)zombie;
3363  if (st_delete(finalizer_table, &key, &table)) {
3364  run_finalizer(objspace, zombie, (VALUE)table);
3365  }
3366 }
3367 
3368 static void
3369 finalize_list(rb_objspace_t *objspace, VALUE zombie)
3370 {
3371  while (zombie) {
3372  VALUE next_zombie;
3373  struct heap_page *page;
3374  asan_unpoison_object(zombie, false);
3375  next_zombie = RZOMBIE(zombie)->next;
3376  page = GET_HEAP_PAGE(zombie);
3377 
3378  run_final(objspace, zombie);
3379 
3380  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
3381  if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
3382  obj_free_object_id(objspace, zombie);
3383  }
3384 
3385  RZOMBIE(zombie)->basic.flags = 0;
3387  page->final_slots--;
3388  page->free_slots++;
3389  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(zombie), zombie);
3390 
3391  objspace->profile.total_freed_objects++;
3392 
3393  zombie = next_zombie;
3394  }
3395 }
3396 
3397 static void
3398 finalize_deferred(rb_objspace_t *objspace)
3399 {
3400  VALUE zombie;
3401 
3402  while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
3403  finalize_list(objspace, zombie);
3404  }
3405 }
3406 
3407 static void
3408 gc_finalize_deferred(void *dmy)
3409 {
3410  rb_objspace_t *objspace = dmy;
3411  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3412  finalize_deferred(objspace);
3413  ATOMIC_SET(finalizing, 0);
3414 }
3415 
3416 static void
3417 gc_finalize_deferred_register(rb_objspace_t *objspace)
3418 {
3419  if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
3420  rb_bug("gc_finalize_deferred_register: can't register finalizer.");
3421  }
3422 }
3423 
3428 };
3429 
3430 static int
3431 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
3432 {
3433  struct force_finalize_list **prev = (struct force_finalize_list **)arg;
3434  struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
3435  curr->obj = key;
3436  curr->table = val;
3437  curr->next = *prev;
3438  *prev = curr;
3439  return ST_CONTINUE;
3440 }
3441 
3442 void
3444 {
3445  RVALUE *p, *pend;
3446  size_t i;
3447 
3448 #if RGENGC_CHECK_MODE >= 2
3449  gc_verify_internal_consistency(objspace);
3450 #endif
3451  gc_rest(objspace);
3452 
3453  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3454 
3455  /* run finalizers */
3456  finalize_deferred(objspace);
3458 
3459  gc_rest(objspace);
3460  /* prohibit incremental GC */
3461  objspace->flags.dont_incremental = 1;
3462 
3463  /* force to run finalizer */
3464  while (finalizer_table->num_entries) {
3465  struct force_finalize_list *list = 0;
3466  st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
3467  while (list) {
3468  struct force_finalize_list *curr = list;
3469  st_data_t obj = (st_data_t)curr->obj;
3470  run_finalizer(objspace, curr->obj, curr->table);
3472  list = curr->next;
3473  xfree(curr);
3474  }
3475  }
3476 
3477  /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3478  dont_gc = 1;
3479 
3480  /* running data/file finalizers are part of garbage collection */
3481  gc_enter(objspace, "rb_objspace_call_finalizer");
3482 
3483  /* run data/file object's finalizers */
3484  for (i = 0; i < heap_allocated_pages; i++) {
3485  p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
3486  while (p < pend) {
3487  void *poisoned = asan_poisoned_object_p((VALUE)p);
3488  asan_unpoison_object((VALUE)p, false);
3489  switch (BUILTIN_TYPE(p)) {
3490  case T_DATA:
3491  if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
3492  if (rb_obj_is_thread((VALUE)p)) break;
3493  if (rb_obj_is_mutex((VALUE)p)) break;
3494  if (rb_obj_is_fiber((VALUE)p)) break;
3495  p->as.free.flags = 0;
3496  if (RTYPEDDATA_P(p)) {
3497  RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3498  }
3499  if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
3500  xfree(DATA_PTR(p));
3501  }
3502  else if (RANY(p)->as.data.dfree) {
3503  make_zombie(objspace, (VALUE)p, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
3504  }
3505  break;
3506  case T_FILE:
3507  if (RANY(p)->as.file.fptr) {
3508  make_io_zombie(objspace, (VALUE)p);
3509  }
3510  break;
3511  }
3512  if (poisoned) {
3513  GC_ASSERT(BUILTIN_TYPE(p) == T_NONE);
3514  asan_poison_object((VALUE)p);
3515  }
3516  p++;
3517  }
3518  }
3519 
3520  gc_exit(objspace, "rb_objspace_call_finalizer");
3521 
3523  finalize_list(objspace, heap_pages_deferred_final);
3524  }
3525 
3527  finalizer_table = 0;
3528  ATOMIC_SET(finalizing, 0);
3529 }
3530 
3531 PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr));
3532 static inline int
3533 is_id_value(rb_objspace_t *objspace, VALUE ptr)
3534 {
3535  if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
3536  if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
3537  if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
3538  return TRUE;
3539 }
3540 
3541 static inline int
3542 heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
3543 {
3544  struct heap_page *page = GET_HEAP_PAGE(ptr);
3545  return page->flags.before_sweep ? FALSE : TRUE;
3546 }
3547 
3548 static inline int
3549 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
3550 {
3551  if (heap_is_swept_object(objspace, heap_eden, ptr)) {
3552  return TRUE;
3553  }
3554  else {
3555  return FALSE;
3556  }
3557 }
3558 
3559 /* garbage objects will be collected soon. */
3560 static inline int
3561 is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
3562 {
3563  if (!is_lazy_sweeping(heap_eden) ||
3564  is_swept_object(objspace, ptr) ||
3566 
3567  return FALSE;
3568  }
3569  else {
3570  return TRUE;
3571  }
3572 }
3573 
3574 static inline int
3575 is_live_object(rb_objspace_t *objspace, VALUE ptr)
3576 {
3577  switch (BUILTIN_TYPE(ptr)) {
3578  case T_NONE:
3579  case T_ZOMBIE:
3580  return FALSE;
3581  }
3582 
3583  if (!is_garbage_object(objspace, ptr)) {
3584  return TRUE;
3585  }
3586  else {
3587  return FALSE;
3588  }
3589 }
3590 
3591 static inline int
3592 is_markable_object(rb_objspace_t *objspace, VALUE obj)
3593 {
3594  if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
3595  check_rvalue_consistency(obj);
3596  return TRUE;
3597 }
3598 
3599 int
3601 {
3602  rb_objspace_t *objspace = &rb_objspace;
3603  return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3604 }
3605 
3606 int
3608 {
3609  rb_objspace_t *objspace = &rb_objspace;
3610  return is_garbage_object(objspace, obj);
3611 }
3612 
3613 static VALUE
3614 id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
3615 {
3616  VALUE orig;
3617  if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
3618  return orig;
3619  }
3620  else {
3621  return Qundef;
3622  }
3623 }
3624 
3625 /*
3626  * call-seq:
3627  * ObjectSpace._id2ref(object_id) -> an_object
3628  *
3629  * Converts an object id to a reference to the object. May not be
3630  * called on an object id passed as a parameter to a finalizer.
3631  *
3632  * s = "I am a string" #=> "I am a string"
3633  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3634  * r == s #=> true
3635  *
3636  */
3637 
3638 static VALUE
3639 id2ref(VALUE objid)
3640 {
3641 #if SIZEOF_LONG == SIZEOF_VOIDP
3642 #define NUM2PTR(x) NUM2ULONG(x)
3643 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3644 #define NUM2PTR(x) NUM2ULL(x)
3645 #endif
3646  rb_objspace_t *objspace = &rb_objspace;
3647  VALUE ptr;
3648  VALUE orig;
3649  void *p0;
3650 
3651  if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
3652  ptr = NUM2PTR(objid);
3653  if (ptr == Qtrue) return Qtrue;
3654  if (ptr == Qfalse) return Qfalse;
3655  if (ptr == Qnil) return Qnil;
3656  if (FIXNUM_P(ptr)) return (VALUE)ptr;
3657  if (FLONUM_P(ptr)) return (VALUE)ptr;
3658 
3659  ptr = obj_id_to_ref(objid);
3660  if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3661  ID symid = ptr / sizeof(RVALUE);
3662  p0 = (void *)ptr;
3663  if (rb_id2str(symid) == 0)
3664  rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3665  return ID2SYM(symid);
3666  }
3667  }
3668 
3669  if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
3670  is_live_object(objspace, orig)) {
3671  return orig;
3672  }
3673 
3674  if (rb_int_ge(objid, objspace->next_object_id)) {
3675  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
3676  } else {
3677  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
3678  }
3679 }
3680 
3681 static VALUE
3682 os_id2ref(VALUE os, VALUE objid)
3683 {
3684  return id2ref(objid);
3685 }
3686 
3687 static VALUE
3688 rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
3689 {
3690  if (STATIC_SYM_P(obj)) {
3691  return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
3692  }
3693  else if (FLONUM_P(obj)) {
3694 #if SIZEOF_LONG == SIZEOF_VOIDP
3695  return LONG2NUM((SIGNED_VALUE)obj);
3696 #else
3697  return LL2NUM((SIGNED_VALUE)obj);
3698 #endif
3699  }
3700  else if (SPECIAL_CONST_P(obj)) {
3701  return LONG2NUM((SIGNED_VALUE)obj);
3702  }
3703 
3704  return get_heap_object_id(obj);
3705 }
3706 
3707 static VALUE
3708 cached_object_id(VALUE obj)
3709 {
3710  VALUE id;
3711  rb_objspace_t *objspace = &rb_objspace;
3712 
3713  if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
3715  return id;
3716  }
3717  else {
3719 
3720  id = objspace->next_object_id;
3722 
3723  st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
3724  st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
3726 
3727  return id;
3728  }
3729 }
3730 
3731 static VALUE
3732 nonspecial_obj_id_(VALUE obj)
3733 {
3734  return nonspecial_obj_id(obj);
3735 }
3736 
3737 
3738 VALUE
3740 {
3741  return rb_find_object_id(obj, nonspecial_obj_id_);
3742 }
3743 
3744 /*
3745  * Document-method: __id__
3746  * Document-method: object_id
3747  *
3748  * call-seq:
3749  * obj.__id__ -> integer
3750  * obj.object_id -> integer
3751  *
3752  * Returns an integer identifier for +obj+.
3753  *
3754  * The same number will be returned on all calls to +object_id+ for a given
3755  * object, and no two active objects will share an id.
3756  *
3757  * Note: that some objects of builtin classes are reused for optimization.
3758  * This is the case for immediate values and frozen string literals.
3759  *
3760  * BasicObject implements +__id__+, Kernel implements +object_id+.
3761  *
3762  * Immediate values are not passed by reference but are passed by value:
3763  * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
3764  *
3765  * Object.new.object_id == Object.new.object_id # => false
3766  * (21 * 2).object_id == (21 * 2).object_id # => true
3767  * "hello".object_id == "hello".object_id # => false
3768  * "hi".freeze.object_id == "hi".freeze.object_id # => true
3769  */
3770 
3771 VALUE
3773 {
3774  /*
3775  * 32-bit VALUE space
3776  * MSB ------------------------ LSB
3777  * false 00000000000000000000000000000000
3778  * true 00000000000000000000000000000010
3779  * nil 00000000000000000000000000000100
3780  * undef 00000000000000000000000000000110
3781  * symbol ssssssssssssssssssssssss00001110
3782  * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
3783  * fixnum fffffffffffffffffffffffffffffff1
3784  *
3785  * object_id space
3786  * LSB
3787  * false 00000000000000000000000000000000
3788  * true 00000000000000000000000000000010
3789  * nil 00000000000000000000000000000100
3790  * undef 00000000000000000000000000000110
3791  * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
3792  * object oooooooooooooooooooooooooooooo0 o...o % A = 0
3793  * fixnum fffffffffffffffffffffffffffffff1 bignum if required
3794  *
3795  * where A = sizeof(RVALUE)/4
3796  *
3797  * sizeof(RVALUE) is
3798  * 20 if 32-bit, double is 4-byte aligned
3799  * 24 if 32-bit, double is 8-byte aligned
3800  * 40 if 64-bit
3801  */
3802 
3803  return rb_find_object_id(obj, cached_object_id);
3804 }
3805 
3806 #include "regint.h"
3807 
3808 static size_t
3809 obj_memsize_of(VALUE obj, int use_all_types)
3810 {
3811  size_t size = 0;
3812 
3813  if (SPECIAL_CONST_P(obj)) {
3814  return 0;
3815  }
3816 
3817  if (FL_TEST(obj, FL_EXIVAR)) {
3819  }
3820 
3821  switch (BUILTIN_TYPE(obj)) {
3822  case T_OBJECT:
3823  if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
3824  ROBJECT(obj)->as.heap.ivptr) {
3825  size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
3826  }
3827  break;
3828  case T_MODULE:
3829  case T_CLASS:
3830  if (RCLASS_EXT(obj)) {
3831  if (RCLASS_M_TBL(obj)) {
3833  }
3834  if (RCLASS_IV_TBL(obj)) {
3836  }
3837  if (RCLASS_IV_INDEX_TBL(obj)) {
3839  }
3840  if (RCLASS(obj)->ptr->iv_tbl) {
3841  size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
3842  }
3843  if (RCLASS(obj)->ptr->const_tbl) {
3844  size += rb_id_table_memsize(RCLASS(obj)->ptr->const_tbl);
3845  }
3846  size += sizeof(rb_classext_t);
3847  }
3848  break;
3849  case T_ICLASS:
3850  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
3851  if (RCLASS_M_TBL(obj)) {
3853  }
3854  }
3855  break;
3856  case T_STRING:
3857  size += rb_str_memsize(obj);
3858  break;
3859  case T_ARRAY:
3860  size += rb_ary_memsize(obj);
3861  break;
3862  case T_HASH:
3863  if (RHASH_AR_TABLE_P(obj)) {
3864  if (RHASH_AR_TABLE(obj) != NULL) {
3865  size_t rb_hash_ar_table_size();
3867  }
3868  }
3869  else {
3872  }
3873  break;
3874  case T_REGEXP:
3875  if (RREGEXP_PTR(obj)) {
3877  }
3878  break;
3879  case T_DATA:
3880  if (use_all_types) size += rb_objspace_data_type_memsize(obj);
3881  break;
3882  case T_MATCH:
3883  if (RMATCH(obj)->rmatch) {
3884  struct rmatch *rm = RMATCH(obj)->rmatch;
3885  size += onig_region_memsize(&rm->regs);
3886  size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
3887  size += sizeof(struct rmatch);
3888  }
3889  break;
3890  case T_FILE:
3891  if (RFILE(obj)->fptr) {
3892  size += rb_io_memsize(RFILE(obj)->fptr);
3893  }
3894  break;
3895  case T_RATIONAL:
3896  case T_COMPLEX:
3897  break;
3898  case T_IMEMO:
3899  size += imemo_memsize(obj);
3900  break;
3901 
3902  case T_FLOAT:
3903  case T_SYMBOL:
3904  break;
3905 
3906  case T_BIGNUM:
3907  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
3908  size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
3909  }
3910  break;
3911 
3912  case T_NODE:
3913  UNEXPECTED_NODE(obj_memsize_of);
3914  break;
3915 
3916  case T_STRUCT:
3917  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
3918  RSTRUCT(obj)->as.heap.ptr) {
3919  size += sizeof(VALUE) * RSTRUCT_LEN(obj);
3920  }
3921  break;
3922 
3923  case T_ZOMBIE:
3924  case T_MOVED:
3925  break;
3926 
3927  default:
3928  rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
3929  BUILTIN_TYPE(obj), (void*)obj);
3930  }
3931 
3932  return size + sizeof(RVALUE);
3933 }
3934 
3935 size_t
3937 {
3938  return obj_memsize_of(obj, TRUE);
3939 }
3940 
3941 static int
3942 set_zero(st_data_t key, st_data_t val, st_data_t arg)
3943 {
3944  VALUE k = (VALUE)key;
3945  VALUE hash = (VALUE)arg;
3946  rb_hash_aset(hash, k, INT2FIX(0));
3947  return ST_CONTINUE;
3948 }
3949 
3950 static VALUE
3951 type_sym(size_t type)
3952 {
3953  switch (type) {
3954 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
3955  COUNT_TYPE(T_NONE);
3963  COUNT_TYPE(T_HASH);
3966  COUNT_TYPE(T_FILE);
3967  COUNT_TYPE(T_DATA);
3971  COUNT_TYPE(T_NIL);
3972  COUNT_TYPE(T_TRUE);
3978  COUNT_TYPE(T_NODE);
3982 #undef COUNT_TYPE
3983  default: return INT2NUM(type); break;
3984  }
3985 }
3986 
3987 /*
3988  * call-seq:
3989  * ObjectSpace.count_objects([result_hash]) -> hash
3990  *
3991  * Counts all objects grouped by type.
3992  *
3993  * It returns a hash, such as:
3994  * {
3995  * :TOTAL=>10000,
3996  * :FREE=>3011,
3997  * :T_OBJECT=>6,
3998  * :T_CLASS=>404,
3999  * # ...
4000  * }
4001  *
4002  * The contents of the returned hash are implementation specific.
4003  * It may be changed in future.
4004  *
4005  * The keys starting with +:T_+ means live objects.
4006  * For example, +:T_ARRAY+ is the number of arrays.
4007  * +:FREE+ means object slots which is not used now.
4008  * +:TOTAL+ means sum of above.
4009  *
4010  * If the optional argument +result_hash+ is given,
4011  * it is overwritten and returned. This is intended to avoid probe effect.
4012  *
4013  * h = {}
4014  * ObjectSpace.count_objects(h)
4015  * puts h
4016  * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4017  *
4018  * This method is only expected to work on C Ruby.
4019  *
4020  */
4021 
4022 static VALUE
4023 count_objects(int argc, VALUE *argv, VALUE os)
4024 {
4025  rb_objspace_t *objspace = &rb_objspace;
4026  size_t counts[T_MASK+1];
4027  size_t freed = 0;
4028  size_t total = 0;
4029  size_t i;
4030  VALUE hash = Qnil;
4031 
4032  if (rb_check_arity(argc, 0, 1) == 1) {
4033  hash = argv[0];
4034  if (!RB_TYPE_P(hash, T_HASH))
4035  rb_raise(rb_eTypeError, "non-hash given");
4036  }
4037 
4038  for (i = 0; i <= T_MASK; i++) {
4039  counts[i] = 0;
4040  }
4041 
4042  for (i = 0; i < heap_allocated_pages; i++) {
4043  struct heap_page *page = heap_pages_sorted[i];
4044  RVALUE *p, *pend;
4045 
4046  p = page->start; pend = p + page->total_slots;
4047  for (;p < pend; p++) {
4048  void *poisoned = asan_poisoned_object_p((VALUE)p);
4049  asan_unpoison_object((VALUE)p, false);
4050  if (p->as.basic.flags) {
4051  counts[BUILTIN_TYPE(p)]++;
4052  }
4053  else {
4054  freed++;
4055  }
4056  if (poisoned) {
4058  asan_poison_object((VALUE)p);
4059  }
4060  }
4061  total += page->total_slots;
4062  }
4063 
4064  if (hash == Qnil) {
4065  hash = rb_hash_new();
4066  }
4067  else if (!RHASH_EMPTY_P(hash)) {
4068  rb_hash_stlike_foreach(hash, set_zero, hash);
4069  }
4070  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4071  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4072 
4073  for (i = 0; i <= T_MASK; i++) {
4074  VALUE type = type_sym(i);
4075  if (counts[i])
4076  rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4077  }
4078 
4079  return hash;
4080 }
4081 
4082 /*
4083  ------------------------ Garbage Collection ------------------------
4084 */
4085 
4086 /* Sweeping */
4087 
4088 static size_t
4089 objspace_available_slots(rb_objspace_t *objspace)
4090 {
4091  return heap_eden->total_slots + heap_tomb->total_slots;
4092 }
4093 
4094 static size_t
4095 objspace_live_slots(rb_objspace_t *objspace)
4096 {
4098 }
4099 
4100 static size_t
4101 objspace_free_slots(rb_objspace_t *objspace)
4102 {
4103  return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4104 }
4105 
4106 static void
4107 gc_setup_mark_bits(struct heap_page *page)
4108 {
4109 #if USE_RGENGC
4110  /* copy oldgen bitmap to mark bitmap */
4112 #else
4113  /* clear mark bitmap */
4114  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
4115 #endif
4116 }
4117 
4118 static inline int
4119 gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
4120 {
4121  int i;
4122  int empty_slots = 0, freed_slots = 0, final_slots = 0;
4123  RVALUE *p, *pend,*offset;
4124  bits_t *bits, bitset;
4125 
4126  gc_report(2, objspace, "page_sweep: start.\n");
4127 
4128  sweep_page->flags.before_sweep = FALSE;
4129 
4130  p = sweep_page->start; pend = p + sweep_page->total_slots;
4131  offset = p - NUM_IN_PAGE(p);
4132  bits = sweep_page->mark_bits;
4133 
4134  /* create guard : fill 1 out-of-range */
4135  bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
4136  bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
4137 
4138  for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4139  bitset = ~bits[i];
4140  if (bitset) {
4141  p = offset + i * BITS_BITLENGTH;
4142  do {
4143  asan_unpoison_object((VALUE)p, false);
4144  if (bitset & 1) {
4145  switch (BUILTIN_TYPE(p)) {
4146  default: { /* majority case */
4147  gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
4148 #if USE_RGENGC && RGENGC_CHECK_MODE
4149  if (!is_full_marking(objspace)) {
4150  if (RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
4151  if (rgengc_remembered_sweep(objspace, (VALUE)p)) rb_bug("page_sweep: %p - remembered.", (void *)p);
4152  }
4153 #endif
4154  if (obj_free(objspace, (VALUE)p)) {
4155  final_slots++;
4156  }
4157  else {
4158  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
4159  heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
4160  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info((VALUE)p));
4161  freed_slots++;
4162  asan_poison_object((VALUE)p);
4163  }
4164  break;
4165  }
4166 
4167  /* minor cases */
4168  case T_ZOMBIE:
4169  /* already counted */
4170  break;
4171  case T_NONE:
4172  empty_slots++; /* already freed */
4173  break;
4174  }
4175  }
4176  p++;
4177  bitset >>= 1;
4178  } while (bitset);
4179  }
4180  }
4181 
4182  gc_setup_mark_bits(sweep_page);
4183 
4184 #if GC_PROFILE_MORE_DETAIL
4185  if (gc_prof_enabled(objspace)) {
4186  gc_profile_record *record = gc_prof_record(objspace);
4187  record->removing_objects += final_slots + freed_slots;
4188  record->empty_objects += empty_slots;
4189  }
4190 #endif
4191  if (0) fprintf(stderr, "gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4192  (int)rb_gc_count(),
4193  (int)sweep_page->total_slots,
4194  freed_slots, empty_slots, final_slots);
4195 
4196  sweep_page->free_slots = freed_slots + empty_slots;
4197  objspace->profile.total_freed_objects += freed_slots;
4199  sweep_page->final_slots += final_slots;
4200 
4202  rb_thread_t *th = GET_THREAD();
4203  if (th) {
4204  gc_finalize_deferred_register(objspace);
4205  }
4206  }
4207 
4208  gc_report(2, objspace, "page_sweep: end.\n");
4209 
4210  return freed_slots + empty_slots;
4211 }
4212 
4213 /* allocate additional minimum page to work */
4214 static void
4215 gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4216 {
4217  if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
4218  /* there is no free after page_sweep() */
4219  heap_set_increment(objspace, 1);
4220  if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
4221  rb_memerror();
4222  }
4223  }
4224 }
4225 
4226 static const char *
4227 gc_mode_name(enum gc_mode mode)
4228 {
4229  switch (mode) {
4230  case gc_mode_none: return "none";
4231  case gc_mode_marking: return "marking";
4232  case gc_mode_sweeping: return "sweeping";
4233  default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
4234  }
4235 }
4236 
4237 static void
4238 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
4239 {
4240 #if RGENGC_CHECK_MODE
4241  enum gc_mode prev_mode = gc_mode(objspace);
4242  switch (prev_mode) {
4243  case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
4244  case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
4245  case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
4246  }
4247 #endif
4248  if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
4249  gc_mode_set(objspace, mode);
4250 }
4251 
4252 static void
4253 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
4254 {
4255  heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
4256  heap->free_pages = NULL;
4257 #if GC_ENABLE_INCREMENTAL_MARK
4258  heap->pooled_pages = NULL;
4259  objspace->rincgc.pooled_slots = 0;
4260 #endif
4261  if (heap->using_page) {
4262  struct heap_page *page = heap->using_page;
4263  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
4264 
4265  RVALUE **p = &page->freelist;
4266  while (*p) {
4267  p = &(*p)->as.free.next;
4268  }
4269  *p = heap->freelist;
4270  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
4271  heap->using_page = NULL;
4272  }
4273  heap->freelist = NULL;
4274 }
4275 
4276 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4277 __attribute__((noinline))
4278 #endif
4279 static void
4280 gc_sweep_start(rb_objspace_t *objspace)
4281 {
4282  gc_mode_transition(objspace, gc_mode_sweeping);
4283  gc_sweep_start_heap(objspace, heap_eden);
4284 }
4285 
4286 static void
4287 gc_sweep_finish(rb_objspace_t *objspace)
4288 {
4289  gc_report(1, objspace, "gc_sweep_finish\n");
4290 
4291  gc_prof_set_heap_info(objspace);
4292  heap_pages_free_unused_pages(objspace);
4293 
4294  /* if heap_pages has unused pages, then assign them to increment */
4295  if (heap_allocatable_pages < heap_tomb->total_pages) {
4296  heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
4297  }
4298 
4300  gc_mode_transition(objspace, gc_mode_none);
4301 
4302 #if RGENGC_CHECK_MODE >= 2
4303  gc_verify_internal_consistency(objspace);
4304 #endif
4305 }
4306 
4307 static int
4308 gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
4309 {
4310  struct heap_page *sweep_page = heap->sweeping_page;
4311  int unlink_limit = 3;
4312 #if GC_ENABLE_INCREMENTAL_MARK
4313  int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
4314 
4315  gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
4316 #else
4317  gc_report(2, objspace, "gc_sweep_step\n");
4318 #endif
4319 
4320  if (sweep_page == NULL) return FALSE;
4321 
4322 #if GC_ENABLE_LAZY_SWEEP
4323  gc_prof_sweep_timer_start(objspace);
4324 #endif
4325 
4326  do {
4327  int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4328  heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
4329 
4330  if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
4332  unlink_limit > 0) {
4334  unlink_limit--;
4335  /* there are no living objects -> move this page to tomb heap */
4336  heap_unlink_page(objspace, heap, sweep_page);
4337  heap_add_page(objspace, heap_tomb, sweep_page);
4338  }
4339  else if (free_slots > 0) {
4340 #if GC_ENABLE_INCREMENTAL_MARK
4341  if (need_pool) {
4342  if (heap_add_poolpage(objspace, heap, sweep_page)) {
4343  need_pool = FALSE;
4344  }
4345  }
4346  else {
4347  heap_add_freepage(heap, sweep_page);
4348  break;
4349  }
4350 #else
4351  heap_add_freepage(heap, sweep_page);
4352  break;
4353 #endif
4354  }
4355  else {
4356  sweep_page->free_next = NULL;
4357  }
4358  } while ((sweep_page = heap->sweeping_page));
4359 
4360  if (!heap->sweeping_page) {
4361  gc_sweep_finish(objspace);
4362  }
4363 
4364 #if GC_ENABLE_LAZY_SWEEP
4365  gc_prof_sweep_timer_stop(objspace);
4366 #endif
4367 
4368  return heap->free_pages != NULL;
4369 }
4370 
4371 static void
4372 gc_sweep_rest(rb_objspace_t *objspace)
4373 {
4374  rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
4375 
4376  while (has_sweeping_pages(heap)) {
4377  gc_sweep_step(objspace, heap);
4378  }
4379 }
4380 
4381 static void
4382 gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
4383 {
4384  GC_ASSERT(dont_gc == FALSE);
4385  if (!GC_ENABLE_LAZY_SWEEP) return;
4386 
4387  gc_enter(objspace, "sweep_continue");
4388 #if USE_RGENGC
4389  if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
4390  gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
4391  }
4392 #endif
4393  gc_sweep_step(objspace, heap);
4394  gc_exit(objspace, "sweep_continue");
4395 }
4396 
4397 static void
4398 gc_sweep(rb_objspace_t *objspace)
4399 {
4400  const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4401 
4402  gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4403 
4404  if (immediate_sweep) {
4405 #if !GC_ENABLE_LAZY_SWEEP
4406  gc_prof_sweep_timer_start(objspace);
4407 #endif
4408  gc_sweep_start(objspace);
4409  gc_sweep_rest(objspace);
4410 #if !GC_ENABLE_LAZY_SWEEP
4411  gc_prof_sweep_timer_stop(objspace);
4412 #endif
4413  }
4414  else {
4415  struct heap_page *page = NULL;
4416  gc_sweep_start(objspace);
4417 
4418  list_for_each(&heap_eden->pages, page, page_node) {
4419  page->flags.before_sweep = TRUE;
4420  }
4421  gc_sweep_step(objspace, heap_eden);
4422  }
4423 
4424  gc_heap_prepare_minimum_pages(objspace, heap_eden);
4425 }
4426 
4427 /* Marking - Marking stack */
4428 
4429 static stack_chunk_t *
4430 stack_chunk_alloc(void)
4431 {
4432  stack_chunk_t *res;
4433 
4434  res = malloc(sizeof(stack_chunk_t));
4435  if (!res)
4436  rb_memerror();
4437 
4438  return res;
4439 }
4440 
4441 static inline int
4442 is_mark_stack_empty(mark_stack_t *stack)
4443 {
4444  return stack->chunk == NULL;
4445 }
4446 
4447 static size_t
4448 mark_stack_size(mark_stack_t *stack)
4449 {
4450  size_t size = stack->index;
4451  stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4452 
4453  while (chunk) {
4454  size += stack->limit;
4455  chunk = chunk->next;
4456  }
4457  return size;
4458 }
4459 
4460 static void
4461 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4462 {
4463  chunk->next = stack->cache;
4464  stack->cache = chunk;
4465  stack->cache_size++;
4466 }
4467 
4468 static void
4469 shrink_stack_chunk_cache(mark_stack_t *stack)
4470 {
4471  stack_chunk_t *chunk;
4472 
4473  if (stack->unused_cache_size > (stack->cache_size/2)) {
4474  chunk = stack->cache;
4475  stack->cache = stack->cache->next;
4476  stack->cache_size--;
4477  free(chunk);
4478  }
4479  stack->unused_cache_size = stack->cache_size;
4480 }
4481 
4482 static void
4483 push_mark_stack_chunk(mark_stack_t *stack)
4484 {
4485  stack_chunk_t *next;
4486 
4487  GC_ASSERT(stack->index == stack->limit);
4488 
4489  if (stack->cache_size > 0) {
4490  next = stack->cache;
4491  stack->cache = stack->cache->next;
4492  stack->cache_size--;
4493  if (stack->unused_cache_size > stack->cache_size)
4494  stack->unused_cache_size = stack->cache_size;
4495  }
4496  else {
4497  next = stack_chunk_alloc();
4498  }
4499  next->next = stack->chunk;
4500  stack->chunk = next;
4501  stack->index = 0;
4502 }
4503 
4504 static void
4505 pop_mark_stack_chunk(mark_stack_t *stack)
4506 {
4507  stack_chunk_t *prev;
4508 
4509  prev = stack->chunk->next;
4510  GC_ASSERT(stack->index == 0);
4511  add_stack_chunk_cache(stack, stack->chunk);
4512  stack->chunk = prev;
4513  stack->index = stack->limit;
4514 }
4515 
4516 static void
4517 free_stack_chunks(mark_stack_t *stack)
4518 {
4519  stack_chunk_t *chunk = stack->chunk;
4520  stack_chunk_t *next = NULL;
4521 
4522  while (chunk != NULL) {
4523  next = chunk->next;
4524  free(chunk);
4525  chunk = next;
4526  }
4527 }
4528 
4529 static void
4530 push_mark_stack(mark_stack_t *stack, VALUE data)
4531 {
4532  if (stack->index == stack->limit) {
4533  push_mark_stack_chunk(stack);
4534  }
4535  stack->chunk->data[stack->index++] = data;
4536 }
4537 
4538 static int
4539 pop_mark_stack(mark_stack_t *stack, VALUE *data)
4540 {
4541  if (is_mark_stack_empty(stack)) {
4542  return FALSE;
4543  }
4544  if (stack->index == 1) {
4545  *data = stack->chunk->data[--stack->index];
4546  pop_mark_stack_chunk(stack);
4547  }
4548  else {
4549  *data = stack->chunk->data[--stack->index];
4550  }
4551  return TRUE;
4552 }
4553 
4554 #if GC_ENABLE_INCREMENTAL_MARK
4555 static int
4556 invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
4557 {
4558  int i;
4559  for (i=0; i<limit; i++) {
4560  if (chunk->data[i] == obj) {
4561  chunk->data[i] = Qundef;
4562  return TRUE;
4563  }
4564  }
4565  return FALSE;
4566 }
4567 
4568 static void
4569 invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
4570 {
4571  stack_chunk_t *chunk = stack->chunk;
4572  int limit = stack->index;
4573 
4574  while (chunk) {
4575  if (invalidate_mark_stack_chunk(chunk, limit, obj)) return;
4576  chunk = chunk->next;
4577  limit = stack->limit;
4578  }
4579  rb_bug("invalid_mark_stack: unreachable");
4580 }
4581 #endif
4582 
4583 static void
4584 init_mark_stack(mark_stack_t *stack)
4585 {
4586  int i;
4587 
4588  MEMZERO(stack, mark_stack_t, 1);
4589  stack->index = stack->limit = STACK_CHUNK_SIZE;
4590  stack->cache_size = 0;
4591 
4592  for (i=0; i < 4; i++) {
4593  add_stack_chunk_cache(stack, stack_chunk_alloc());
4594  }
4595  stack->unused_cache_size = stack->cache_size;
4596 }
4597 
4598 /* Marking */
4599 
4600 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4601 
4602 #define STACK_START (ec->machine.stack_start)
4603 #define STACK_END (ec->machine.stack_end)
4604 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4605 
4606 #ifdef __EMSCRIPTEN__
4607 #undef STACK_GROW_DIRECTION
4608 #define STACK_GROW_DIRECTION 1
4609 #endif
4610 
4611 #if STACK_GROW_DIRECTION < 0
4612 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4613 #elif STACK_GROW_DIRECTION > 0
4614 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4615 #else
4616 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4617  : (size_t)(STACK_END - STACK_START + 1))
4618 #endif
4619 #if !STACK_GROW_DIRECTION
4621 int
4623 {
4624  VALUE *end;
4625  SET_MACHINE_STACK_END(&end);
4626 
4627  if (end > addr) return ruby_stack_grow_direction = 1;
4628  return ruby_stack_grow_direction = -1;
4629 }
4630 #endif
4631 
4632 size_t
4634 {
4636  SET_STACK_END;
4637  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
4638  return STACK_LENGTH;
4639 }
4640 
4641 #define PREVENT_STACK_OVERFLOW 1
4642 #ifndef PREVENT_STACK_OVERFLOW
4643 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4644 # define PREVENT_STACK_OVERFLOW 1
4645 #else
4646 # define PREVENT_STACK_OVERFLOW 0
4647 #endif
4648 #endif
4649 #if PREVENT_STACK_OVERFLOW
4650 static int
4651 stack_check(rb_execution_context_t *ec, int water_mark)
4652 {
4653  SET_STACK_END;
4654 
4655  size_t length = STACK_LENGTH;
4656  size_t maximum_length = STACK_LEVEL_MAX - water_mark;
4657 
4658  return length > maximum_length;
4659 }
4660 #else
4661 #define stack_check(ec, water_mark) FALSE
4662 #endif
4663 
4664 #define STACKFRAME_FOR_CALL_CFUNC 2048
4665 
4668 {
4669  return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
4670 }
4671 
4672 int
4674 {
4675  return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
4676 }
4677 
4678 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n));
4679 static void
4680 mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
4681 {
4682  VALUE v;
4683  while (n--) {
4684  v = *x;
4685  gc_mark_maybe(objspace, v);
4686  x++;
4687  }
4688 }
4689 
4690 static void
4691 gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
4692 {
4693  long n;
4694 
4695  if (end <= start) return;
4696  n = end - start;
4697  mark_locations_array(objspace, start, n);
4698 }
4699 
4700 void
4702 {
4703  gc_mark_locations(&rb_objspace, start, end);
4704 }
4705 
4706 static void
4707 gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
4708 {
4709  long i;
4710 
4711  for (i=0; i<n; i++) {
4712  gc_mark(objspace, values[i]);
4713  }
4714 }
4715 
4716 void
4717 rb_gc_mark_values(long n, const VALUE *values)
4718 {
4719  long i;
4720  rb_objspace_t *objspace = &rb_objspace;
4721 
4722  for (i=0; i<n; i++) {
4723  gc_mark_and_pin(objspace, values[i]);
4724  }
4725 }
4726 
4727 static void
4728 gc_mark_and_pin_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
4729 {
4730  long i;
4731 
4732  for (i=0; i<n; i++) {
4733  /* skip MOVED objects that are on the stack */
4734  if (is_markable_object(objspace, values[i]) && T_MOVED != BUILTIN_TYPE(values[i])) {
4735  gc_mark_and_pin(objspace, values[i]);
4736  }
4737  }
4738 }
4739 
4740 void
4741 rb_gc_mark_vm_stack_values(long n, const VALUE *values)
4742 {
4743  rb_objspace_t *objspace = &rb_objspace;
4744  gc_mark_and_pin_stack_values(objspace, n, values);
4745 }
4746 
4747 static int
4748 mark_value(st_data_t key, st_data_t value, st_data_t data)
4749 {
4750  rb_objspace_t *objspace = (rb_objspace_t *)data;
4751  gc_mark(objspace, (VALUE)value);
4752  return ST_CONTINUE;
4753 }
4754 
4755 static int
4756 mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
4757 {
4758  rb_objspace_t *objspace = (rb_objspace_t *)data;
4759  gc_mark_and_pin(objspace, (VALUE)value);
4760  return ST_CONTINUE;
4761 }
4762 
4763 static void
4764 mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
4765 {
4766  if (!tbl || tbl->num_entries == 0) return;
4767  st_foreach(tbl, mark_value, (st_data_t)objspace);
4768 }
4769 
4770 static void
4771 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
4772 {
4773  if (!tbl || tbl->num_entries == 0) return;
4774  st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
4775 }
4776 
4777 static int
4778 mark_key(st_data_t key, st_data_t value, st_data_t data)
4779 {
4780  rb_objspace_t *objspace = (rb_objspace_t *)data;
4781  gc_mark_and_pin(objspace, (VALUE)key);
4782  return ST_CONTINUE;
4783 }
4784 
4785 static void
4786 mark_set(rb_objspace_t *objspace, st_table *tbl)
4787 {
4788  if (!tbl) return;
4789  st_foreach(tbl, mark_key, (st_data_t)objspace);
4790 }
4791 
4792 static void
4793 mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
4794 {
4795  if (!tbl) return;
4796  st_foreach(tbl, mark_value, (st_data_t)objspace);
4797 }
4798 
4799 void
4801 {
4802  mark_set(&rb_objspace, tbl);
4803 }
4804 
4805 static int
4806 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
4807 {
4808  rb_objspace_t *objspace = (rb_objspace_t *)data;
4809 
4810  gc_mark(objspace, (VALUE)key);
4811  gc_mark(objspace, (VALUE)value);
4812  return ST_CONTINUE;
4813 }
4814 
4815 static int
4816 pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
4817 {
4818  rb_objspace_t *objspace = (rb_objspace_t *)data;
4819 
4820  gc_mark_and_pin(objspace, (VALUE)key);
4821  gc_mark_and_pin(objspace, (VALUE)value);
4822  return ST_CONTINUE;
4823 }
4824 
4825 static int
4826 pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
4827 {
4828  rb_objspace_t *objspace = (rb_objspace_t *)data;
4829 
4830  gc_mark_and_pin(objspace, (VALUE)key);
4831  gc_mark(objspace, (VALUE)value);
4832  return ST_CONTINUE;
4833 }
4834 
4835 static void
4836 mark_hash(rb_objspace_t *objspace, VALUE hash)
4837 {
4838  if (rb_hash_compare_by_id_p(hash)) {
4839  rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
4840  }
4841  else {
4842  rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
4843  }
4844 
4845  if (RHASH_AR_TABLE_P(hash)) {
4846  if (objspace->mark_func_data == NULL && RHASH_TRANSIENT_P(hash)) {
4848  }
4849  }
4850  else {
4851  VM_ASSERT(!RHASH_TRANSIENT_P(hash));
4852  }
4853  gc_mark(objspace, RHASH(hash)->ifnone);
4854 }
4855 
4856 static void
4857 mark_st(rb_objspace_t *objspace, st_table *tbl)
4858 {
4859  if (!tbl) return;
4860  st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
4861 }
4862 
4863 void
4865 {
4866  mark_st(&rb_objspace, tbl);
4867 }
4868 
4869 static void
4870 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
4871 {
4872  const rb_method_definition_t *def = me->def;
4873 
4874  gc_mark(objspace, me->owner);
4875  gc_mark(objspace, me->defined_class);
4876 
4877  if (def) {
4878  switch (def->type) {
4879  case VM_METHOD_TYPE_ISEQ:
4880  if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
4881  gc_mark(objspace, (VALUE)def->body.iseq.cref);
4882  break;
4884  case VM_METHOD_TYPE_IVAR:
4885  gc_mark(objspace, def->body.attr.location);
4886  break;
4888  gc_mark(objspace, def->body.bmethod.proc);
4890  break;
4891  case VM_METHOD_TYPE_ALIAS:
4892  gc_mark(objspace, (VALUE)def->body.alias.original_me);
4893  return;
4895  gc_mark(objspace, (VALUE)def->body.refined.orig_me);
4896  gc_mark(objspace, (VALUE)def->body.refined.owner);
4897  break;
4898  case VM_METHOD_TYPE_CFUNC:
4899  case VM_METHOD_TYPE_ZSUPER:
4902  case VM_METHOD_TYPE_UNDEF:
4904  break;
4905  }
4906  }
4907 }
4908 
4909 static enum rb_id_table_iterator_result
4910 mark_method_entry_i(VALUE me, void *data)
4911 {
4912  rb_objspace_t *objspace = (rb_objspace_t *)data;
4913 
4914  gc_mark(objspace, me);
4915  return ID_TABLE_CONTINUE;
4916 }
4917 
4918 static void
4919 mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4920 {
4921  if (tbl) {
4922  rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
4923  }
4924 }
4925 
4926 static enum rb_id_table_iterator_result
4927 mark_const_entry_i(VALUE value, void *data)
4928 {
4929  const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
4930  rb_objspace_t *objspace = data;
4931 
4932  gc_mark(objspace, ce->value);
4933  gc_mark(objspace, ce->file);
4934  return ID_TABLE_CONTINUE;
4935 }
4936 
4937 static void
4938 mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4939 {
4940  if (!tbl) return;
4941  rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
4942 }
4943 
4944 #if STACK_GROW_DIRECTION < 0
4945 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4946 #elif STACK_GROW_DIRECTION > 0
4947 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4948 #else
4949 #define GET_STACK_BOUNDS(start, end, appendix) \
4950  ((STACK_END < STACK_START) ? \
4951  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4952 #endif
4953 
4954 static void mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4955  const VALUE *stack_start, const VALUE *stack_end);
4956 
4957 static void
4958 mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
4959 {
4960  union {
4961  rb_jmp_buf j;
4962  VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
4963  } save_regs_gc_mark;
4964  VALUE *stack_start, *stack_end;
4965 
4967  memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
4968  /* This assumes that all registers are saved into the jmp_buf (and stack) */
4969  rb_setjmp(save_regs_gc_mark.j);
4970 
4971  /* SET_STACK_END must be called in this function because
4972  * the stack frame of this function may contain
4973  * callee save registers and they should be marked. */
4974  SET_STACK_END;
4975  GET_STACK_BOUNDS(stack_start, stack_end, 1);
4976 
4977  mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
4978 
4979  mark_stack_locations(objspace, ec, stack_start, stack_end);
4980 }
4981 
4982 void
4984 {
4985  rb_objspace_t *objspace = &rb_objspace;
4986  VALUE *stack_start, *stack_end;
4987 
4988  GET_STACK_BOUNDS(stack_start, stack_end, 0);
4989  mark_stack_locations(objspace, ec, stack_start, stack_end);
4990 }
4991 
4992 static void
4993 mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4994  const VALUE *stack_start, const VALUE *stack_end)
4995 {
4996 
4997  gc_mark_locations(objspace, stack_start, stack_end);
4998 
4999 #if defined(__mc68000__)
5000  gc_mark_locations(objspace,
5001  (VALUE*)((char*)stack_start + 2),
5002  (VALUE*)((char*)stack_end - 2));
5003 #endif
5004 }
5005 
5006 void
5008 {
5009  mark_tbl(&rb_objspace, tbl);
5010 }
5011 
5012 void
5014 {
5015  mark_tbl_no_pin(&rb_objspace, tbl);
5016 }
5017 
5018 static void
5019 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
5020 {
5021  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
5022 
5023  if (is_pointer_to_heap(objspace, (void *)obj)) {
5024  void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
5025  asan_unpoison_object(obj, false);
5026 
5027  /* Garbage can live on the stack, so do not mark or pin */
5028  switch (BUILTIN_TYPE(obj)) {
5029  case T_MOVED:
5030  case T_ZOMBIE:
5031  case T_NONE:
5032  break;
5033  default:
5034  gc_mark_and_pin(objspace, obj);
5035  break;
5036  }
5037 
5038  if (ptr) {
5040  asan_poison_object(obj);
5041  }
5042  }
5043 }
5044 
5045 void
5047 {
5048  gc_mark_maybe(&rb_objspace, obj);
5049 }
5050 
5051 static inline int
5052 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
5053 {
5054  if (RVALUE_MARKED(obj)) return 0;
5056  return 1;
5057 }
5058 
5059 #if USE_RGENGC
5060 static int
5061 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5062 {
5063  struct heap_page *page = GET_HEAP_PAGE(obj);
5065 
5070 
5071 #if RGENGC_PROFILE > 0
5072  objspace->profile.total_remembered_shady_object_count++;
5073 #if RGENGC_PROFILE >= 2
5074  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5075 #endif
5076 #endif
5077  return TRUE;
5078  }
5079  else {
5080  return FALSE;
5081  }
5082 }
5083 #endif
5084 
5085 static void
5086 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
5087 {
5088 #if USE_RGENGC
5089  const VALUE old_parent = objspace->rgengc.parent_object;
5090 
5091  if (old_parent) { /* parent object is old */
5092  if (RVALUE_WB_UNPROTECTED(obj)) {
5093  if (gc_remember_unprotected(objspace, obj)) {
5094  gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5095  }
5096  }
5097  else {
5098  if (!RVALUE_OLD_P(obj)) {
5099  if (RVALUE_MARKED(obj)) {
5100  /* An object pointed from an OLD object should be OLD. */
5101  gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5102  RVALUE_AGE_SET_OLD(objspace, obj);
5103  if (is_incremental_marking(objspace)) {
5104  if (!RVALUE_MARKING(obj)) {
5105  gc_grey(objspace, obj);
5106  }
5107  }
5108  else {
5109  rgengc_remember(objspace, obj);
5110  }
5111  }
5112  else {
5113  gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5114  RVALUE_AGE_SET_CANDIDATE(objspace, obj);
5115  }
5116  }
5117  }
5118  }
5119 
5120  GC_ASSERT(old_parent == objspace->rgengc.parent_object);
5121 #endif
5122 }
5123 
5124 static void
5125 gc_grey(rb_objspace_t *objspace, VALUE obj)
5126 {
5127 #if RGENGC_CHECK_MODE
5128  if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
5129  if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
5130 #endif
5131 
5132 #if GC_ENABLE_INCREMENTAL_MARK
5133  if (is_incremental_marking(objspace)) {
5135  }
5136 #endif
5137 
5138  push_mark_stack(&objspace->mark_stack, obj);
5139 }
5140 
5141 static void
5142 gc_aging(rb_objspace_t *objspace, VALUE obj)
5143 {
5144 #if USE_RGENGC
5145  struct heap_page *page = GET_HEAP_PAGE(obj);
5146 
5147  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
5148  check_rvalue_consistency(obj);
5149 
5150  if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
5151  if (!RVALUE_OLD_P(obj)) {
5152  gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
5153  RVALUE_AGE_INC(objspace, obj);
5154  }
5155  else if (is_full_marking(objspace)) {
5157  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
5158  }
5159  }
5160  check_rvalue_consistency(obj);
5161 #endif /* USE_RGENGC */
5162 
5163  objspace->marked_slots++;
5164 }
5165 
5166 NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
5167 
5168 static void
5169 gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
5170 {
5171  if (LIKELY(objspace->mark_func_data == NULL)) {
5172  rgengc_check_relation(objspace, obj);
5173  if (!gc_mark_set(objspace, obj)) return; /* already marked */
5174  if (RB_TYPE_P(obj, T_NONE)) rb_bug("try to mark T_NONE object"); /* check here will help debugging */
5175  gc_aging(objspace, obj);
5176  gc_grey(objspace, obj);
5177  }
5178  else {
5179  objspace->mark_func_data->mark_func(obj, objspace->mark_func_data->data);
5180  }
5181 }
5182 
5183 static inline void
5184 gc_pin(rb_objspace_t *objspace, VALUE obj)
5185 {
5186  GC_ASSERT(is_markable_object(objspace, obj));
5187  if (UNLIKELY(objspace->flags.during_compacting)) {
5189  }
5190 }
5191 
5192 static inline void
5193 gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
5194 {
5195  if (!is_markable_object(objspace, obj)) return;
5196  gc_pin(objspace, obj);
5197  gc_mark_ptr(objspace, obj);
5198 }
5199 
5200 static inline void
5201 gc_mark(rb_objspace_t *objspace, VALUE obj)
5202 {
5203  if (!is_markable_object(objspace, obj)) return;
5204  gc_mark_ptr(objspace, obj);
5205 }
5206 
5207 void
5209 {
5210  gc_mark(&rb_objspace, ptr);
5211 }
5212 
5213 void
5215 {
5216  gc_mark_and_pin(&rb_objspace, ptr);
5217 }
5218 
5219 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
5220  * This function is only for GC_END_MARK timing.
5221  */
5222 
5223 int
5225 {
5226  return RVALUE_MARKED(obj) ? TRUE : FALSE;
5227 }
5228 
5229 static inline void
5230 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
5231 {
5232 #if USE_RGENGC
5233  if (RVALUE_OLD_P(obj)) {
5234  objspace->rgengc.parent_object = obj;
5235  }
5236  else {
5237  objspace->rgengc.parent_object = Qfalse;
5238  }
5239 #endif
5240 }
5241 
5242 static void
5243 gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
5244 {
5245  switch (imemo_type(obj)) {
5246  case imemo_env:
5247  {
5248  const rb_env_t *env = (const rb_env_t *)obj;
5249  GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
5250  gc_mark_values(objspace, (long)env->env_size, env->env);
5251  VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
5252  gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
5253  gc_mark(objspace, (VALUE)env->iseq);
5254  }
5255  return;
5256  case imemo_cref:
5257  gc_mark(objspace, RANY(obj)->as.imemo.cref.klass);
5258  gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
5259  gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
5260  return;
5261  case imemo_svar:
5262  gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
5263  gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
5264  gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
5265  gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
5266  return;
5267  case imemo_throw_data:
5268  gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
5269  return;
5270  case imemo_ifunc:
5271  gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
5272  return;
5273  case imemo_memo:
5274  gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
5275  gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
5276  gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
5277  return;
5278  case imemo_ment:
5279  mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
5280  return;
5281  case imemo_iseq:
5283  return;
5284  case imemo_tmpbuf:
5285  {
5286  const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
5287  do {
5288  rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
5289  } while ((m = m->next) != NULL);
5290  }
5291  return;
5292  case imemo_ast:
5293  rb_ast_mark(&RANY(obj)->as.imemo.ast);
5294  return;
5295  case imemo_parser_strterm:
5297  return;
5298 #if VM_CHECK_MODE > 0
5299  default:
5300  VM_UNREACHABLE(gc_mark_imemo);
5301 #endif
5302  }
5303 }
5304 
5305 static void
5306 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
5307 {
5308  register RVALUE *any = RANY(obj);
5309  gc_mark_set_parent(objspace, obj);
5310 
5311  if (FL_TEST(obj, FL_EXIVAR)) {
5313  }
5314 
5315  switch (BUILTIN_TYPE(obj)) {
5316  case T_FLOAT:
5317  case T_BIGNUM:
5318  case T_SYMBOL:
5319  /* Not immediates, but does not have references and singleton
5320  * class */
5321  return;
5322 
5323  case T_NIL:
5324  case T_FIXNUM:
5325  rb_bug("rb_gc_mark() called for broken object");
5326  break;
5327 
5328  case T_NODE:
5330  break;
5331 
5332  case T_IMEMO:
5333  gc_mark_imemo(objspace, obj);
5334  return;
5335  }
5336 
5337  gc_mark(objspace, any->as.basic.klass);
5338 
5339  switch (BUILTIN_TYPE(obj)) {
5340  case T_CLASS:
5341  case T_MODULE:
5342  if (RCLASS_SUPER(obj)) {
5343  gc_mark(objspace, RCLASS_SUPER(obj));
5344  }
5345  if (!RCLASS_EXT(obj)) break;
5346  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5347  mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
5348  mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
5349  break;
5350 
5351  case T_ICLASS:
5352  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
5353  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5354  }
5355  if (RCLASS_SUPER(obj)) {
5356  gc_mark(objspace, RCLASS_SUPER(obj));
5357  }
5358  if (!RCLASS_EXT(obj)) break;
5359  mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
5360  break;
5361 
5362  case T_ARRAY:
5363  if (FL_TEST(obj, ELTS_SHARED)) {
5364  VALUE root = any->as.array.as.heap.aux.shared_root;
5365  gc_mark(objspace, root);
5366  }
5367  else {
5368  long i, len = RARRAY_LEN(obj);
5370  for (i=0; i < len; i++) {
5371  gc_mark(objspace, ptr[i]);
5372  }
5373 
5374  if (objspace->mark_func_data == NULL) {
5378  }
5379  }
5380  }
5381  break;
5382 
5383  case T_HASH:
5384  mark_hash(objspace, obj);
5385  break;
5386 
5387  case T_STRING:
5388  if (STR_SHARED_P(obj)) {
5389  gc_mark(objspace, any->as.string.as.heap.aux.shared);
5390  }
5391  break;
5392 
5393  case T_DATA:
5394  {
5395  void *const ptr = DATA_PTR(obj);
5396  if (ptr) {
5397  RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
5398  any->as.typeddata.type->function.dmark :
5399  any->as.data.dmark;
5400  if (mark_func) (*mark_func)(ptr);
5401  }
5402  }
5403  break;
5404 
5405  case T_OBJECT:
5406  {
5407  const VALUE * const ptr = ROBJECT_IVPTR(obj);
5408 
5409  if (ptr) {
5411  for (i = 0; i < len; i++) {
5412  gc_mark(objspace, ptr[i]);
5413  }
5414 
5415  if (objspace->mark_func_data == NULL &&
5416  ROBJ_TRANSIENT_P(obj)) {
5418  }
5419  }
5420  }
5421  break;
5422 
5423  case T_FILE:
5424  if (any->as.file.fptr) {
5425  gc_mark(objspace, any->as.file.fptr->pathv);
5426  gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
5427  gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
5428  gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
5429  gc_mark(objspace, any->as.file.fptr->encs.ecopts);
5430  gc_mark(objspace, any->as.file.fptr->write_lock);
5431  }
5432  break;
5433 
5434  case T_REGEXP:
5435  gc_mark(objspace, any->as.regexp.src);
5436  break;
5437 
5438  case T_MATCH:
5439  gc_mark(objspace, any->as.match.regexp);
5440  if (any->as.match.str) {
5441  gc_mark(objspace, any->as.match.str);
5442  }
5443  break;
5444 
5445  case T_RATIONAL:
5446  gc_mark(objspace, any->as.rational.num);
5447  gc_mark(objspace, any->as.rational.den);
5448  break;
5449 
5450  case T_COMPLEX:
5451  gc_mark(objspace, any->as.complex.real);
5452  gc_mark(objspace, any->as.complex.imag);
5453  break;
5454 
5455  case T_STRUCT:
5456  {
5457  long i;
5458  const long len = RSTRUCT_LEN(obj);
5459  const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
5460 
5461  for (i=0; i<len; i++) {
5462  gc_mark(objspace, ptr[i]);
5463  }
5464 
5465  if (objspace->mark_func_data == NULL &&
5468  }
5469  }
5470  break;
5471 
5472  default:
5473 #if GC_DEBUG
5475 #endif
5476  if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
5477  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
5478  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
5479  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
5480  BUILTIN_TYPE(obj), (void *)any,
5481  is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
5482  }
5483 }
5484 
5489 static inline int
5490 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
5491 {
5492  mark_stack_t *mstack = &objspace->mark_stack;
5493  VALUE obj;
5494 #if GC_ENABLE_INCREMENTAL_MARK
5495  size_t marked_slots_at_the_beginning = objspace->marked_slots;
5496  size_t popped_count = 0;
5497 #endif
5498 
5499  while (pop_mark_stack(mstack, &obj)) {
5500  if (obj == Qundef) continue; /* skip */
5501 
5502  if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
5503  rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
5504  }
5505  gc_mark_children(objspace, obj);
5506 
5507 #if GC_ENABLE_INCREMENTAL_MARK
5508  if (incremental) {
5509  if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
5510  rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
5511  }
5513  popped_count++;
5514 
5515  if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
5516  break;
5517  }
5518  }
5519  else {
5520  /* just ignore marking bits */
5521  }
5522 #endif
5523  }
5524 
5525  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
5526 
5527  if (is_mark_stack_empty(mstack)) {
5528  shrink_stack_chunk_cache(mstack);
5529  return TRUE;
5530  }
5531  else {
5532  return FALSE;
5533  }
5534 }
5535 
5536 static int
5537 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
5538 {
5539  return gc_mark_stacked_objects(objspace, TRUE, count);
5540 }
5541 
5542 static int
5543 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
5544 {
5545  return gc_mark_stacked_objects(objspace, FALSE, 0);
5546 }
5547 
5548 #if PRINT_ROOT_TICKS
5549 #define MAX_TICKS 0x100
5550 static tick_t mark_ticks[MAX_TICKS];
5551 static const char *mark_ticks_categories[MAX_TICKS];
5552 
5553 static void
5554 show_mark_ticks(void)
5555 {
5556  int i;
5557  fprintf(stderr, "mark ticks result:\n");
5558  for (i=0; i<MAX_TICKS; i++) {
5559  const char *category = mark_ticks_categories[i];
5560  if (category) {
5561  fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
5562  }
5563  else {
5564  break;
5565  }
5566  }
5567 }
5568 
5569 #endif /* PRINT_ROOT_TICKS */
5570 
5571 static void
5572 gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
5573 {
5574  struct gc_list *list;
5576  rb_vm_t *vm = rb_ec_vm_ptr(ec);
5577 
5578 #if PRINT_ROOT_TICKS
5579  tick_t start_tick = tick();
5580  int tick_count = 0;
5581  const char *prev_category = 0;
5582 
5583  if (mark_ticks_categories[0] == 0) {
5584  atexit(show_mark_ticks);
5585  }
5586 #endif
5587 
5588  if (categoryp) *categoryp = "xxx";
5589 
5590 #if USE_RGENGC
5591  objspace->rgengc.parent_object = Qfalse;
5592 #endif
5593 
5594 #if PRINT_ROOT_TICKS
5595 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5596  if (prev_category) { \
5597  tick_t t = tick(); \
5598  mark_ticks[tick_count] = t - start_tick; \
5599  mark_ticks_categories[tick_count] = prev_category; \
5600  tick_count++; \
5601  } \
5602  prev_category = category; \
5603  start_tick = tick(); \
5604 } while (0)
5605 #else /* PRINT_ROOT_TICKS */
5606 #define MARK_CHECKPOINT_PRINT_TICK(category)
5607 #endif
5608 
5609 #define MARK_CHECKPOINT(category) do { \
5610  if (categoryp) *categoryp = category; \
5611  MARK_CHECKPOINT_PRINT_TICK(category); \
5612 } while (0)
5613 
5614  MARK_CHECKPOINT("vm");
5615  SET_STACK_END;
5616  rb_vm_mark(vm);
5617  if (vm->self) gc_mark(objspace, vm->self);
5618 
5619  MARK_CHECKPOINT("finalizers");
5620  mark_finalizer_tbl(objspace, finalizer_table);
5621 
5622  MARK_CHECKPOINT("machine_context");
5623  mark_current_machine_context(objspace, ec);
5624 
5625  /* mark protected global variables */
5626  MARK_CHECKPOINT("global_list");
5627  for (list = global_list; list; list = list->next) {
5628  gc_mark_maybe(objspace, *list->varptr);
5629  }
5630 
5631  MARK_CHECKPOINT("end_proc");
5632  rb_mark_end_proc();
5633 
5634  MARK_CHECKPOINT("global_tbl");
5636 
5637  MARK_CHECKPOINT("object_id");
5638  rb_gc_mark(objspace->next_object_id);
5639  mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
5640 
5642 
5643  MARK_CHECKPOINT("finish");
5644 #undef MARK_CHECKPOINT
5645 }
5646 
5647 #if RGENGC_CHECK_MODE >= 4
5648 
5649 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5650 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5651 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5652 
5653 struct reflist {
5654  VALUE *list;
5655  int pos;
5656  int size;
5657 };
5658 
5659 static struct reflist *
5660 reflist_create(VALUE obj)
5661 {
5662  struct reflist *refs = xmalloc(sizeof(struct reflist));
5663  refs->size = 1;
5664  refs->list = ALLOC_N(VALUE, refs->size);
5665  refs->list[0] = obj;
5666  refs->pos = 1;
5667  return refs;
5668 }
5669 
5670 static void
5671 reflist_destruct(struct reflist *refs)
5672 {
5673  xfree(refs->list);
5674  xfree(refs);
5675 }
5676 
5677 static void
5678 reflist_add(struct reflist *refs, VALUE obj)
5679 {
5680  if (refs->pos == refs->size) {
5681  refs->size *= 2;
5682  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
5683  }
5684 
5685  refs->list[refs->pos++] = obj;
5686 }
5687 
5688 static void
5689 reflist_dump(struct reflist *refs)
5690 {
5691  int i;
5692  for (i=0; i<refs->pos; i++) {
5693  VALUE obj = refs->list[i];
5694  if (IS_ROOTSIG(obj)) { /* root */
5695  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
5696  }
5697  else {
5698  fprintf(stderr, "<%s>", obj_info(obj));
5699  }
5700  if (i+1 < refs->pos) fprintf(stderr, ", ");
5701  }
5702 }
5703 
5704 static int
5705 reflist_referred_from_machine_context(struct reflist *refs)
5706 {
5707  int i;
5708  for (i=0; i<refs->pos; i++) {
5709  VALUE obj = refs->list[i];
5710  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
5711  }
5712  return 0;
5713 }
5714 
5715 struct allrefs {
5716  rb_objspace_t *objspace;
5717  /* a -> obj1
5718  * b -> obj1
5719  * c -> obj1
5720  * c -> obj2
5721  * d -> obj3
5722  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
5723  */
5724  struct st_table *references;
5725  const char *category;
5726  VALUE root_obj;
5728 };
5729 
5730 static int
5731 allrefs_add(struct allrefs *data, VALUE obj)
5732 {
5733  struct reflist *refs;
5734 
5735  if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
5736  reflist_add(refs, data->root_obj);
5737  return 0;
5738  }
5739  else {
5740  refs = reflist_create(data->root_obj);
5741  st_insert(data->references, obj, (st_data_t)refs);
5742  return 1;
5743  }
5744 }
5745 
5746 static void
5747 allrefs_i(VALUE obj, void *ptr)
5748 {
5749  struct allrefs *data = (struct allrefs *)ptr;
5750 
5751  if (allrefs_add(data, obj)) {
5752  push_mark_stack(&data->mark_stack, obj);
5753  }
5754 }
5755 
5756 static void
5757 allrefs_roots_i(VALUE obj, void *ptr)
5758 {
5759  struct allrefs *data = (struct allrefs *)ptr;
5760  if (strlen(data->category) == 0) rb_bug("!!!");
5761  data->root_obj = MAKE_ROOTSIG(data->category);
5762 
5763  if (allrefs_add(data, obj)) {
5764  push_mark_stack(&data->mark_stack, obj);
5765  }
5766 }
5767 
5768 static st_table *
5769 objspace_allrefs(rb_objspace_t *objspace)
5770 {
5771  struct allrefs data;
5772  struct mark_func_data_struct mfd;
5773  VALUE obj;
5774  int prev_dont_gc = dont_gc;
5775  dont_gc = TRUE;
5776 
5777  data.objspace = objspace;
5778  data.references = st_init_numtable();
5779  init_mark_stack(&data.mark_stack);
5780 
5781  mfd.mark_func = allrefs_roots_i;
5782  mfd.data = &data;
5783 
5784  /* traverse root objects */
5785  PUSH_MARK_FUNC_DATA(&mfd);
5786  objspace->mark_func_data = &mfd;
5787  gc_mark_roots(objspace, &data.category);
5789 
5790  /* traverse rest objects reachable from root objects */
5791  while (pop_mark_stack(&data.mark_stack, &obj)) {
5792  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
5793  }
5794  free_stack_chunks(&data.mark_stack);
5795 
5796  dont_gc = prev_dont_gc;
5797  return data.references;
5798 }
5799 
5800 static int
5801 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
5802 {
5803  struct reflist *refs = (struct reflist *)value;
5804  reflist_destruct(refs);
5805  return ST_CONTINUE;
5806 }
5807 
5808 static void
5809 objspace_allrefs_destruct(struct st_table *refs)
5810 {
5811  st_foreach(refs, objspace_allrefs_destruct_i, 0);
5812  st_free_table(refs);
5813 }
5814 
5815 #if RGENGC_CHECK_MODE >= 5
5816 static int
5817 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
5818 {
5819  VALUE obj = (VALUE)k;
5820  struct reflist *refs = (struct reflist *)v;
5821  fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
5822  reflist_dump(refs);
5823  fprintf(stderr, "\n");
5824  return ST_CONTINUE;
5825 }
5826 
5827 static void
5828 allrefs_dump(rb_objspace_t *objspace)
5829 {
5830  fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
5831  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
5832 }
5833 #endif
5834 
5835 static int
5836 gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
5837 {
5838  VALUE obj = k;
5839  struct reflist *refs = (struct reflist *)v;
5840  rb_objspace_t *objspace = (rb_objspace_t *)ptr;
5841 
5842  /* object should be marked or oldgen */
5844  fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
5845  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
5846  reflist_dump(refs);
5847 
5848  if (reflist_referred_from_machine_context(refs)) {
5849  fprintf(stderr, " (marked from machine stack).\n");
5850  /* marked from machine context can be false positive */
5851  }
5852  else {
5853  objspace->rgengc.error_count++;
5854  fprintf(stderr, "\n");
5855  }
5856  }
5857  return ST_CONTINUE;
5858 }
5859 
5860 static void
5861 gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
5862 {
5863  size_t saved_malloc_increase = objspace->malloc_params.increase;
5864 #if RGENGC_ESTIMATE_OLDMALLOC
5865  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
5866 #endif
5867  VALUE already_disabled = rb_objspace_gc_disable(objspace);
5868 
5869  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
5870 
5871  if (checker_func) {
5872  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
5873  }
5874 
5875  if (objspace->rgengc.error_count > 0) {
5876 #if RGENGC_CHECK_MODE >= 5
5877  allrefs_dump(objspace);
5878 #endif
5879  if (checker_name) rb_bug("%s: GC has problem.", checker_name);
5880  }
5881 
5882  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
5883  objspace->rgengc.allrefs_table = 0;
5884 
5885  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
5886  objspace->malloc_params.increase = saved_malloc_increase;
5887 #if RGENGC_ESTIMATE_OLDMALLOC
5888  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
5889 #endif
5890 }
5891 #endif /* RGENGC_CHECK_MODE >= 4 */
5892 
5898 
5899 #if USE_RGENGC
5903 #endif
5904 };
5905 
5906 #if USE_RGENGC
5907 static void
5908 check_generation_i(const VALUE child, void *ptr)
5909 {
5911  const VALUE parent = data->parent;
5912 
5913  if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
5914 
5915  if (!RVALUE_OLD_P(child)) {
5916  if (!RVALUE_REMEMBERED(parent) &&
5917  !RVALUE_REMEMBERED(child) &&
5918  !RVALUE_UNCOLLECTIBLE(child)) {
5919  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
5920  data->err_count++;
5921  }
5922  }
5923 }
5924 
5925 static void
5926 check_color_i(const VALUE child, void *ptr)
5927 {
5929  const VALUE parent = data->parent;
5930 
5931  if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
5932  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5933  obj_info(parent), obj_info(child));
5934  data->err_count++;
5935  }
5936 }
5937 #endif
5938 
5939 static void
5940 check_children_i(const VALUE child, void *ptr)
5941 {
5943  if (check_rvalue_consistency_force(child, FALSE) != 0) {
5944  fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
5945  obj_info(child), obj_info(data->parent));
5946  rb_print_backtrace(); /* C backtrace will help to debug */
5947 
5948  data->err_count++;
5949  }
5950 }
5951 
5952 static int
5953 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
5954 {
5956  VALUE obj;
5957  rb_objspace_t *objspace = data->objspace;
5958 
5959  for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5960  void *poisoned = asan_poisoned_object_p(obj);
5961  asan_unpoison_object(obj, false);
5962 
5963  if (is_live_object(objspace, obj)) {
5964  /* count objects */
5965  data->live_object_count++;
5966  data->parent = obj;
5967 
5968  /* Normally, we don't expect T_MOVED objects to be in the heap.
5969  * But they can stay alive on the stack, */
5970  if (!gc_object_moved_p(objspace, obj)) {
5971  /* moved slots don't have children */
5972  rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5973  }
5974 
5975 #if USE_RGENGC
5976  /* check health of children */
5977  if (RVALUE_OLD_P(obj)) data->old_object_count++;
5978  if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
5979 
5980  if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
5981  /* reachable objects from an oldgen object should be old or (young with remember) */
5982  data->parent = obj;
5983  rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5984  }
5985 
5987  if (RVALUE_BLACK_P(obj)) {
5988  /* reachable objects from black objects should be black or grey objects */
5989  data->parent = obj;
5990  rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5991  }
5992  }
5993 #endif
5994  }
5995  else {
5996  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5997  GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
5998  data->zombie_object_count++;
5999  }
6000  }
6001  if (poisoned) {
6003  asan_poison_object(obj);
6004  }
6005  }
6006 
6007  return 0;
6008 }
6009 
6010 static int
6011 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
6012 {
6013 #if USE_RGENGC
6014  int i;
6015  unsigned int has_remembered_shady = FALSE;
6016  unsigned int has_remembered_old = FALSE;
6017  int remembered_old_objects = 0;
6018  int free_objects = 0;
6019  int zombie_objects = 0;
6020 
6021  for (i=0; i<page->total_slots; i++) {
6022  VALUE val = (VALUE)&page->start[i];
6023  void *poisoned = asan_poisoned_object_p(val);
6024  asan_unpoison_object(val, false);
6025 
6026  if (RBASIC(val) == 0) free_objects++;
6027  if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
6028  if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
6029  has_remembered_shady = TRUE;
6030  }
6031  if (RVALUE_PAGE_MARKING(page, val)) {
6032  has_remembered_old = TRUE;
6033  remembered_old_objects++;
6034  }
6035 
6036  if (poisoned) {
6037  GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
6038  asan_poison_object(val);
6039  }
6040  }
6041 
6043  page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
6044 
6045  for (i=0; i<page->total_slots; i++) {
6046  VALUE val = (VALUE)&page->start[i];
6047  if (RVALUE_PAGE_MARKING(page, val)) {
6048  fprintf(stderr, "marking -> %s\n", obj_info(val));
6049  }
6050  }
6051  rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6052  (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
6053  }
6054 
6055  if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
6056  rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6057  (void *)page, obj ? obj_info(obj) : "");
6058  }
6059 
6060  if (0) {
6061  /* free_slots may not equal to free_objects */
6062  if (page->free_slots != free_objects) {
6063  rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, (int)page->free_slots, free_objects);
6064  }
6065  }
6066  if (page->final_slots != zombie_objects) {
6067  rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, (int)page->final_slots, zombie_objects);
6068  }
6069 
6070  return remembered_old_objects;
6071 #else
6072  return 0;
6073 #endif
6074 }
6075 
6076 static int
6077 gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
6078 {
6079  int remembered_old_objects = 0;
6080  struct heap_page *page = 0;
6081 
6082  list_for_each(head, page, page_node) {
6083  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
6084  RVALUE *p = page->freelist;
6085  while (p) {
6086  RVALUE *prev = p;
6087  asan_unpoison_object((VALUE)p, false);
6088  if (BUILTIN_TYPE(p) != T_NONE) {
6089  fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info((VALUE)p));
6090  }
6091  p = p->as.free.next;
6092  asan_poison_object((VALUE)prev);
6093  }
6094  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
6095 
6096  if (page->flags.has_remembered_objects == FALSE) {
6097  remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
6098  }
6099  }
6100 
6101  return remembered_old_objects;
6102 }
6103 
6104 static int
6105 gc_verify_heap_pages(rb_objspace_t *objspace)
6106 {
6107  int remembered_old_objects = 0;
6108  remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
6109  remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
6110  return remembered_old_objects;
6111 }
6112 
6113 /*
6114  * call-seq:
6115  * GC.verify_internal_consistency -> nil
6116  *
6117  * Verify internal consistency.
6118  *
6119  * This method is implementation specific.
6120  * Now this method checks generational consistency
6121  * if RGenGC is supported.
6122  */
6123 static VALUE
6124 gc_verify_internal_consistency_m(VALUE dummy)
6125 {
6126  gc_verify_internal_consistency(&rb_objspace);
6127 
6128  return Qnil;
6129 }
6130 
6131 static void
6132 gc_verify_internal_consistency(rb_objspace_t *objspace)
6133 {
6134  struct verify_internal_consistency_struct data = {0};
6135 
6136  data.objspace = objspace;
6137  gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
6138 
6139  /* check relations */
6140 
6141  objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
6142 
6143  if (data.err_count != 0) {
6144 #if RGENGC_CHECK_MODE >= 5
6145  objspace->rgengc.error_count = data.err_count;
6146  gc_marks_check(objspace, NULL, NULL);
6147  allrefs_dump(objspace);
6148 #endif
6149  rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
6150  }
6151 
6152  /* check heap_page status */
6153  gc_verify_heap_pages(objspace);
6154 
6155  /* check counters */
6156 
6158  if (objspace_live_slots(objspace) != data.live_object_count) {
6159  fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6161  rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
6162  }
6163  }
6164 
6165 #if USE_RGENGC
6166  if (!is_marking(objspace)) {
6168  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
6169  }
6171  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
6172  }
6173  }
6174 #endif
6175 
6176  if (!finalizing) {
6177  size_t list_count = 0;
6178 
6179  {
6181  while (z) {
6182  list_count++;
6183  z = RZOMBIE(z)->next;
6184  }
6185  }
6186 
6188  heap_pages_final_slots != list_count) {
6189 
6190  rb_bug("inconsistent finalizing object count:\n"
6191  " expect %"PRIuSIZE"\n"
6192  " but %"PRIuSIZE" zombies\n"
6193  " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6195  data.zombie_object_count,
6196  list_count);
6197  }
6198  }
6199 
6200  gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
6201 }
6202 
6203 void
6205 {
6206  gc_verify_internal_consistency(&rb_objspace);
6207 }
6208 
6209 static VALUE
6210 gc_verify_transient_heap_internal_consistency(VALUE dmy)
6211 {
6213  return Qnil;
6214 }
6215 
6216 /* marks */
6217 
6218 static void
6219 gc_marks_start(rb_objspace_t *objspace, int full_mark)
6220 {
6221  /* start marking */
6222  gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
6223  gc_mode_transition(objspace, gc_mode_marking);
6224 
6225 #if USE_RGENGC
6226  if (full_mark) {
6227 #if GC_ENABLE_INCREMENTAL_MARK
6229 
6230  if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6232 #endif
6238  objspace->marked_slots = 0;
6239  rgengc_mark_and_rememberset_clear(objspace, heap_eden);
6240  }
6241  else {
6244  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
6246  rgengc_rememberset_mark(objspace, heap_eden);
6247  }
6248 #endif
6249 
6250  gc_mark_roots(objspace, NULL);
6251 
6252  gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %d\n", full_mark ? "full" : "minor", (int)mark_stack_size(&objspace->mark_stack));
6253 }
6254 
6255 #if GC_ENABLE_INCREMENTAL_MARK
6256 static void
6257 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
6258 {
6259  struct heap_page *page = 0;
6260 
6261  list_for_each(&heap_eden->pages, page, page_node) {
6262  bits_t *mark_bits = page->mark_bits;
6263  bits_t *wbun_bits = page->wb_unprotected_bits;
6264  RVALUE *p = page->start;
6265  RVALUE *offset = p - NUM_IN_PAGE(p);
6266  size_t j;
6267 
6268  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6269  bits_t bits = mark_bits[j] & wbun_bits[j];
6270 
6271  if (bits) {
6272  p = offset + j * BITS_BITLENGTH;
6273 
6274  do {
6275  if (bits & 1) {
6276  gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
6277  GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
6278  GC_ASSERT(RVALUE_MARKED((VALUE)p));
6279  gc_mark_children(objspace, (VALUE)p);
6280  }
6281  p++;
6282  bits >>= 1;
6283  } while (bits);
6284  }
6285  }
6286  }
6287 
6288  gc_mark_stacked_objects_all(objspace);
6289 }
6290 
6291 static struct heap_page *
6292 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
6293 {
6294  struct heap_page *page = heap->pooled_pages;
6295 
6296  if (page) {
6297  heap->pooled_pages = page->free_next;
6298  heap_add_freepage(heap, page);
6299  }
6300 
6301  return page;
6302 }
6303 #endif
6304 
6305 static int
6306 gc_marks_finish(rb_objspace_t *objspace)
6307 {
6308 #if GC_ENABLE_INCREMENTAL_MARK
6309  /* finish incremental GC */
6310  if (is_incremental_marking(objspace)) {
6311  if (heap_eden->pooled_pages) {
6312  heap_move_pooled_pages_to_free_pages(heap_eden);
6313  gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
6314  return FALSE; /* continue marking phase */
6315  }
6316 
6317  if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
6318  rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack));
6319  }
6320 
6321  gc_mark_roots(objspace, 0);
6322 
6323  if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
6324  gc_report(1, objspace, "gc_marks_finish: not empty (%d). retry.\n", (int)mark_stack_size(&objspace->mark_stack));
6325  return FALSE;
6326  }
6327 
6328 #if RGENGC_CHECK_MODE >= 2
6329  if (gc_verify_heap_pages(objspace) != 0) {
6330  rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
6331  }
6332 #endif
6333 
6335  /* check children of all marked wb-unprotected objects */
6336  gc_marks_wb_unprotected_objects(objspace);
6337  }
6338 #endif /* GC_ENABLE_INCREMENTAL_MARK */
6339 
6340 #if RGENGC_CHECK_MODE >= 2
6341  gc_verify_internal_consistency(objspace);
6342 #endif
6343 
6344 #if USE_RGENGC
6345  if (is_full_marking(objspace)) {
6346  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6347  const double r = gc_params.oldobject_limit_factor;
6349  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6350  }
6351 #endif
6352 
6353 #if RGENGC_CHECK_MODE >= 4
6354  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
6355 #endif
6356 
6357  {
6358  /* decide full GC is needed or not */
6359  rb_heap_t *heap = heap_eden;
6361  size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
6362  size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
6363  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
6364  int full_marking = is_full_marking(objspace);
6365 
6366  GC_ASSERT(heap->total_slots >= objspace->marked_slots);
6367 
6368  /* setup free-able page counts */
6369  if (max_free_slots < gc_params.heap_init_slots) max_free_slots = gc_params.heap_init_slots;
6370 
6371  if (sweep_slots > max_free_slots) {
6372  heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
6373  }
6374  else {
6376  }
6377 
6378  /* check free_min */
6379  if (min_free_slots < gc_params.heap_free_slots) min_free_slots = gc_params.heap_free_slots;
6380 
6381 #if USE_RGENGC
6382  if (sweep_slots < min_free_slots) {
6383  if (!full_marking) {
6384  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
6385  full_marking = TRUE;
6386  /* do not update last_major_gc, because full marking is not done. */
6387  goto increment;
6388  }
6389  else {
6390  gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
6392  }
6393  }
6394  else {
6395  increment:
6396  gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6397  heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
6398  heap_increment(objspace, heap);
6399  }
6400  }
6401 
6402  if (full_marking) {
6403  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6404  const double r = gc_params.oldobject_limit_factor;
6406  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6407  }
6408 
6411  }
6412  if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
6414  }
6415  if (RGENGC_FORCE_MAJOR_GC) {
6417  }
6418 
6419  gc_report(1, objspace, "gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6420  (int)objspace->marked_slots, (int)objspace->rgengc.old_objects, (int)heap->total_slots, (int)sweep_slots, (int)heap_allocatable_pages,
6421  objspace->rgengc.need_major_gc ? "major" : "minor");
6422 #else /* USE_RGENGC */
6423  if (sweep_slots < min_free_slots) {
6424  gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6425  heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6426  heap_increment(objspace, heap);
6427  }
6428 #endif
6429  }
6430 
6432 
6434 
6435  return TRUE;
6436 }
6437 
6438 static void
6439 gc_marks_step(rb_objspace_t *objspace, int slots)
6440 {
6441 #if GC_ENABLE_INCREMENTAL_MARK
6442  GC_ASSERT(is_marking(objspace));
6443 
6444  if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6445  if (gc_marks_finish(objspace)) {
6446  /* finish */
6447  gc_sweep(objspace);
6448  }
6449  }
6450  if (0) fprintf(stderr, "objspace->marked_slots: %d\n", (int)objspace->marked_slots);
6451 #endif
6452 }
6453 
6454 static void
6455 gc_marks_rest(rb_objspace_t *objspace)
6456 {
6457  gc_report(1, objspace, "gc_marks_rest\n");
6458 
6459 #if GC_ENABLE_INCREMENTAL_MARK
6460  heap_eden->pooled_pages = NULL;
6461 #endif
6462 
6463  if (is_incremental_marking(objspace)) {
6464  do {
6465  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
6466  } while (gc_marks_finish(objspace) == FALSE);
6467  }
6468  else {
6469  gc_mark_stacked_objects_all(objspace);
6470  gc_marks_finish(objspace);
6471  }
6472 
6473  /* move to sweep */
6474  gc_sweep(objspace);
6475 }
6476 
6477 static void
6478 gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
6479 {
6480  GC_ASSERT(dont_gc == FALSE);
6481 #if GC_ENABLE_INCREMENTAL_MARK
6482 
6483  gc_enter(objspace, "marks_continue");
6484 
6486  {
6487  int slots = 0;
6488  const char *from;
6489 
6490  if (heap->pooled_pages) {
6491  while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
6492  struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6493  slots += page->free_slots;
6494  }
6495  from = "pooled-pages";
6496  }
6497  else if (heap_increment(objspace, heap)) {
6498  slots = heap->free_pages->free_slots;
6499  from = "incremented-pages";
6500  }
6501 
6502  if (slots > 0) {
6503  gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from);
6504  gc_marks_step(objspace, (int)objspace->rincgc.step_slots);
6505  }
6506  else {
6507  gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %d).\n", (int)mark_stack_size(&objspace->mark_stack));
6508  gc_marks_rest(objspace);
6509  }
6510  }
6512 
6513  gc_exit(objspace, "marks_continue");
6514 #endif
6515 }
6516 
6517 static void
6518 gc_marks(rb_objspace_t *objspace, int full_mark)
6519 {
6520  gc_prof_mark_timer_start(objspace);
6521 
6523  {
6524  /* setup marking */
6525 
6526 #if USE_RGENGC
6527  gc_marks_start(objspace, full_mark);
6528  if (!is_incremental_marking(objspace)) {
6529  gc_marks_rest(objspace);
6530  }
6531 
6532 #if RGENGC_PROFILE > 0
6533  if (gc_prof_record(objspace)) {
6534  gc_profile_record *record = gc_prof_record(objspace);
6535  record->old_objects = objspace->rgengc.old_objects;
6536  }
6537 #endif
6538 
6539 #else /* USE_RGENGC */
6540  gc_marks_start(objspace, TRUE);
6541  gc_marks_rest(objspace);
6542 #endif
6543  }
6545  gc_prof_mark_timer_stop(objspace);
6546 }
6547 
6548 /* RGENGC */
6549 
6550 static void
6551 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
6552 {
6553  if (level <= RGENGC_DEBUG) {
6554  char buf[1024];
6555  FILE *out = stderr;
6556  va_list args;
6557  const char *status = " ";
6558 
6559 #if USE_RGENGC
6560  if (during_gc) {
6561  status = is_full_marking(objspace) ? "+" : "-";
6562  }
6563  else {
6564  if (is_lazy_sweeping(heap_eden)) {
6565  status = "S";
6566  }
6567  if (is_incremental_marking(objspace)) {
6568  status = "M";
6569  }
6570  }
6571 #endif
6572 
6573  va_start(args, fmt);
6574  vsnprintf(buf, 1024, fmt, args);
6575  va_end(args);
6576 
6577  fprintf(out, "%s|", status);
6578  fputs(buf, out);
6579  }
6580 }
6581 
6582 #if USE_RGENGC
6583 
6584 /* bit operations */
6585 
6586 static int
6587 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
6588 {
6589  return RVALUE_REMEMBERED(obj);
6590 }
6591 
6592 static int
6593 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
6594 {
6595  struct heap_page *page = GET_HEAP_PAGE(obj);
6596  bits_t *bits = &page->marking_bits[0];
6597 
6598  GC_ASSERT(!is_incremental_marking(objspace));
6599 
6600  if (MARKED_IN_BITMAP(bits, obj)) {
6601  return FALSE;
6602  }
6603  else {
6605  MARK_IN_BITMAP(bits, obj);
6606  return TRUE;
6607  }
6608 }
6609 
6610 /* wb, etc */
6611 
6612 /* return FALSE if already remembered */
6613 static int
6614 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
6615 {
6616  gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
6617  rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
6618 
6619  check_rvalue_consistency(obj);
6620 
6621  if (RGENGC_CHECK_MODE) {
6622  if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
6623  }
6624 
6625 #if RGENGC_PROFILE > 0
6626  if (!rgengc_remembered(objspace, obj)) {
6627  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6628  objspace->profile.total_remembered_normal_object_count++;
6629 #if RGENGC_PROFILE >= 2
6630  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
6631 #endif
6632  }
6633  }
6634 #endif /* RGENGC_PROFILE > 0 */
6635 
6636  return rgengc_remembersetbits_set(objspace, obj);
6637 }
6638 
6639 static int
6640 rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
6641 {
6642  int result = rgengc_remembersetbits_get(objspace, obj);
6643  check_rvalue_consistency(obj);
6644  return result;
6645 }
6646 
6647 static int
6648 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
6649 {
6650  gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
6651  return rgengc_remembered_sweep(objspace, obj);
6652 }
6653 
6654 #ifndef PROFILE_REMEMBERSET_MARK
6655 #define PROFILE_REMEMBERSET_MARK 0
6656 #endif
6657 
6658 static void
6659 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
6660 {
6661  size_t j;
6662  struct heap_page *page = 0;
6663 #if PROFILE_REMEMBERSET_MARK
6664  int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6665 #endif
6666  gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
6667 
6668  list_for_each(&heap->pages, page, page_node) {
6670  RVALUE *p = page->start;
6671  RVALUE *offset = p - NUM_IN_PAGE(p);
6672  bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
6673  bits_t *marking_bits = page->marking_bits;
6676 #if PROFILE_REMEMBERSET_MARK
6677  if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
6678  else if (page->flags.has_remembered_objects) has_old++;
6679  else if (page->flags.has_uncollectible_shady_objects) has_shady++;
6680 #endif
6681  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6682  bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
6683  marking_bits[j] = 0;
6684  }
6686 
6687  for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
6688  bitset = bits[j];
6689 
6690  if (bitset) {
6691  p = offset + j * BITS_BITLENGTH;
6692 
6693  do {
6694  if (bitset & 1) {
6695  VALUE obj = (VALUE)p;
6696  gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
6697  GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
6698  GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
6699 
6700  gc_mark_children(objspace, obj);
6701  }
6702  p++;
6703  bitset >>= 1;
6704  } while (bitset);
6705  }
6706  }
6707  }
6708 #if PROFILE_REMEMBERSET_MARK
6709  else {
6710  skip++;
6711  }
6712 #endif
6713  }
6714 
6715 #if PROFILE_REMEMBERSET_MARK
6716  fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6717 #endif
6718  gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6719 }
6720 
6721 static void
6722 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6723 {
6724  struct heap_page *page = 0;
6725 
6726  list_for_each(&heap->pages, page, page_node) {
6727  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6729  memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6730  memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6733  }
6734 }
6735 
6736 /* RGENGC: APIs */
6737 
6738 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6739 
6740 static void
6741 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6742 {
6743  if (RGENGC_CHECK_MODE) {
6744  if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6745  if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
6746  if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6747  }
6748 
6749 #if 1
6750  /* mark `a' and remember (default behavior) */
6751  if (!rgengc_remembered(objspace, a)) {
6752  rgengc_remember(objspace, a);
6753  gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6754  }
6755 #else
6756  /* mark `b' and remember */
6758  if (RVALUE_WB_UNPROTECTED(b)) {
6759  gc_remember_unprotected(objspace, b);
6760  }
6761  else {
6762  RVALUE_AGE_SET_OLD(objspace, b);
6763  rgengc_remember(objspace, b);
6764  }
6765 
6766  gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6767 #endif
6768 
6769  check_rvalue_consistency(a);
6770  check_rvalue_consistency(b);
6771 }
6772 
6773 #if GC_ENABLE_INCREMENTAL_MARK
6774 static void
6775 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6776 {
6777  gc_mark_set_parent(objspace, parent);
6778  rgengc_check_relation(objspace, obj);
6779  if (gc_mark_set(objspace, obj) == FALSE) return;
6780  gc_aging(objspace, obj);
6781  gc_grey(objspace, obj);
6782 }
6783 
6784 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6785 
6786 static void
6787 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6788 {
6789  gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
6790 
6791  if (RVALUE_BLACK_P(a)) {
6792  if (RVALUE_WHITE_P(b)) {
6793  if (!RVALUE_WB_UNPROTECTED(a)) {
6794  gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
6795  gc_mark_from(objspace, b, a);
6796  }
6797  }
6798  else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6799  if (!RVALUE_WB_UNPROTECTED(b)) {
6800  gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
6801  RVALUE_AGE_SET_OLD(objspace, b);
6802 
6803  if (RVALUE_BLACK_P(b)) {
6804  gc_grey(objspace, b);
6805  }
6806  }
6807  else {
6808  gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
6809  gc_remember_unprotected(objspace, b);
6810  }
6811  }
6812  }
6813 }
6814 #else
6815 #define gc_writebarrier_incremental(a, b, objspace)
6816 #endif
6817 
6818 void
6820 {
6821  rb_objspace_t *objspace = &rb_objspace;
6822 
6823  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
6824  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
6825 
6826  if (!is_incremental_marking(objspace)) {
6827  if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6828  return;
6829  }
6830  else {
6831  gc_writebarrier_generational(a, b, objspace);
6832  }
6833  }
6834  else { /* slow path */
6835  gc_writebarrier_incremental(a, b, objspace);
6836  }
6837 }
6838 
6839 void
6841 {
6842  if (RVALUE_WB_UNPROTECTED(obj)) {
6843  return;
6844  }
6845  else {
6846  rb_objspace_t *objspace = &rb_objspace;
6847 
6848  gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
6849  rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
6850 
6851  if (RVALUE_OLD_P(obj)) {
6852  gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
6853  RVALUE_DEMOTE(objspace, obj);
6854  gc_mark_set(objspace, obj);
6855  gc_remember_unprotected(objspace, obj);
6856 
6857 #if RGENGC_PROFILE
6858  objspace->profile.total_shade_operation_count++;
6859 #if RGENGC_PROFILE >= 2
6860  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6861 #endif /* RGENGC_PROFILE >= 2 */
6862 #endif /* RGENGC_PROFILE */
6863  }
6864  else {
6865  RVALUE_AGE_RESET(obj);
6866  }
6867 
6868  RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6870  }
6871 }
6872 
6873 /*
6874  * remember `obj' if needed.
6875  */
6876 MJIT_FUNC_EXPORTED void
6878 {
6879  rb_objspace_t *objspace = &rb_objspace;
6880 
6881  gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
6882 
6883  if (is_incremental_marking(objspace)) {
6884  if (RVALUE_BLACK_P(obj)) {
6885  gc_grey(objspace, obj);
6886  }
6887  }
6888  else {
6889  if (RVALUE_OLD_P(obj)) {
6890  rgengc_remember(objspace, obj);
6891  }
6892  }
6893 }
6894 
6895 static st_table *rgengc_unprotect_logging_table;
6896 
6897 static int
6898 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
6899 {
6900  fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
6901  return ST_CONTINUE;
6902 }
6903 
6904 static void
6905 rgengc_unprotect_logging_exit_func(void)
6906 {
6907  st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6908 }
6909 
6910 void
6911 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
6912 {
6913  VALUE obj = (VALUE)objptr;
6914 
6915  if (rgengc_unprotect_logging_table == 0) {
6916  rgengc_unprotect_logging_table = st_init_strtable();
6917  atexit(rgengc_unprotect_logging_exit_func);
6918  }
6919 
6920  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6921  char buff[0x100];
6922  st_data_t cnt = 1;
6923  char *ptr = buff;
6924 
6925  snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
6926 
6927  if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
6928  cnt++;
6929  }
6930  else {
6931  ptr = (strdup)(buff);
6932  if (!ptr) rb_memerror();
6933  }
6934  st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
6935  }
6936 }
6937 #endif /* USE_RGENGC */
6938 
6939 void
6941 {
6942 #if USE_RGENGC
6943  rb_objspace_t *objspace = &rb_objspace;
6944 
6945  if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6946  if (!RVALUE_OLD_P(dest)) {
6948  RVALUE_AGE_RESET_RAW(dest);
6949  }
6950  else {
6951  RVALUE_DEMOTE(objspace, dest);
6952  }
6953  }
6954 
6955  check_rvalue_consistency(dest);
6956 #endif
6957 }
6958 
6959 /* RGENGC analysis information */
6960 
6961 VALUE
6963 {
6964 #if USE_RGENGC
6965  return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
6966 #else
6967  return Qfalse;
6968 #endif
6969 }
6970 
6971 VALUE
6973 {
6974  return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
6975 }
6976 
6977 size_t
6979 {
6980  size_t n = 0;
6981  static ID ID_marked;
6982 #if USE_RGENGC
6983  static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6984 #endif
6985 
6986  if (!ID_marked) {
6987 #define I(s) ID_##s = rb_intern(#s);
6988  I(marked);
6989 #if USE_RGENGC
6990  I(wb_protected);
6991  I(old);
6992  I(marking);
6993  I(uncollectible);
6994  I(pinned);
6995 #endif
6996 #undef I
6997  }
6998 
6999 #if USE_RGENGC
7000  if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
7001  if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
7002  if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
7003  if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
7004 #endif
7005  if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
7006  if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
7007  return n;
7008 }
7009 
7010 /* GC */
7011 
7012 void
7014 {
7015  rb_objspace_t *objspace = &rb_objspace;
7016 
7017 #if USE_RGENGC
7018  int is_old = RVALUE_OLD_P(obj);
7019 
7020  gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
7021 
7022  if (is_old) {
7023  if (RVALUE_MARKED(obj)) {
7024  objspace->rgengc.old_objects--;
7025  }
7026  }
7029 
7030 #if GC_ENABLE_INCREMENTAL_MARK
7031  if (is_incremental_marking(objspace)) {
7033  invalidate_mark_stack(&objspace->mark_stack, obj);
7035  }
7037  }
7038  else {
7039 #endif
7040  if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
7042  }
7044 #if GC_ENABLE_INCREMENTAL_MARK
7045  }
7046 #endif
7047 #endif
7048 
7049  objspace->profile.total_freed_objects++;
7050 
7051  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
7052 
7053  /* Disable counting swept_slots because there are no meaning.
7054  * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
7055  * objspace->heap.swept_slots++;
7056  * }
7057  */
7058 }
7059 
7060 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7061 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7062 #endif
7063 
7064 void
7066 {
7067  VALUE ary_ary = GET_VM()->mark_object_ary;
7068  VALUE ary = rb_ary_last(0, 0, ary_ary);
7069 
7070  if (ary == Qnil || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
7072  rb_ary_push(ary_ary, ary);
7073  }
7074 
7075  rb_ary_push(ary, obj);
7076 }
7077 
7078 void
7080 {
7081  rb_objspace_t *objspace = &rb_objspace;
7082  struct gc_list *tmp;
7083 
7084  tmp = ALLOC(struct gc_list);
7085  tmp->next = global_list;
7086  tmp->varptr = addr;
7087  global_list = tmp;
7088 }
7089 
7090 void
7092 {
7093  rb_objspace_t *objspace = &rb_objspace;
7094  struct gc_list *tmp = global_list;
7095 
7096  if (tmp->varptr == addr) {
7097  global_list = tmp->next;
7098  xfree(tmp);
7099  return;
7100  }
7101  while (tmp->next) {
7102  if (tmp->next->varptr == addr) {
7103  struct gc_list *t = tmp->next;
7104 
7105  tmp->next = tmp->next->next;
7106  xfree(t);
7107  break;
7108  }
7109  tmp = tmp->next;
7110  }
7111 }
7112 
7113 void
7115 {
7117 }
7118 
7119 #define GC_NOTIFY 0
7120 
7121 enum {
7126 };
7127 
7128 #define gc_stress_full_mark_after_malloc_p() \
7129  (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7130 
7131 static void
7132 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
7133 {
7134  if (!heap->freelist && !heap->free_pages) {
7135  if (!heap_increment(objspace, heap)) {
7136  heap_set_increment(objspace, 1);
7137  heap_increment(objspace, heap);
7138  }
7139  }
7140 }
7141 
7142 static int
7143 ready_to_gc(rb_objspace_t *objspace)
7144 {
7145  if (dont_gc || during_gc || ruby_disable_gc) {
7146  heap_ready_to_gc(objspace, heap_eden);
7147  return FALSE;
7148  }
7149  else {
7150  return TRUE;
7151  }
7152 }
7153 
7154 static void
7155 gc_reset_malloc_info(rb_objspace_t *objspace)
7156 {
7157  gc_prof_set_malloc_info(objspace);
7158  {
7159  size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
7160  size_t old_limit = malloc_limit;
7161 
7162  if (inc > malloc_limit) {
7163  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
7164  if (malloc_limit > gc_params.malloc_limit_max) {
7165  malloc_limit = gc_params.malloc_limit_max;
7166  }
7167  }
7168  else {
7169  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
7170  if (malloc_limit < gc_params.malloc_limit_min) {
7171  malloc_limit = gc_params.malloc_limit_min;
7172  }
7173  }
7174 
7175  if (0) {
7176  if (old_limit != malloc_limit) {
7177  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
7178  rb_gc_count(), old_limit, malloc_limit);
7179  }
7180  else {
7181  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
7183  }
7184  }
7185  }
7186 
7187  /* reset oldmalloc info */
7188 #if RGENGC_ESTIMATE_OLDMALLOC
7189  if (!is_full_marking(objspace)) {
7190  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
7192  objspace->rgengc.oldmalloc_increase_limit =
7194 
7195  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
7196  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
7197  }
7198  }
7199 
7200  if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
7201  (int)rb_gc_count(),
7202  (int)objspace->rgengc.need_major_gc,
7203  (unsigned int)objspace->rgengc.oldmalloc_increase,
7204  (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
7205  (unsigned int)gc_params.oldmalloc_limit_max);
7206  }
7207  else {
7208  /* major GC */
7209  objspace->rgengc.oldmalloc_increase = 0;
7210 
7211  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
7212  objspace->rgengc.oldmalloc_increase_limit =
7213  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
7214  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
7215  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7216  }
7217  }
7218  }
7219 #endif
7220 }
7221 
7222 static int
7223 garbage_collect(rb_objspace_t *objspace, int reason)
7224 {
7225 #if GC_PROFILE_MORE_DETAIL
7226  objspace->profile.prepare_time = getrusage_time();
7227 #endif
7228 
7229  gc_rest(objspace);
7230 
7231 #if GC_PROFILE_MORE_DETAIL
7232  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
7233 #endif
7234 
7235  return gc_start(objspace, reason);
7236 }
7237 
7238 static int
7239 gc_start(rb_objspace_t *objspace, int reason)
7240 {
7241  unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
7242  unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
7243 
7244  /* reason may be clobbered, later, so keep set immediate_sweep here */
7245  objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
7246 
7247  if (!heap_allocated_pages) return FALSE; /* heap is not ready */
7248  if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
7249 
7250  GC_ASSERT(gc_mode(objspace) == gc_mode_none);
7252  GC_ASSERT(!is_incremental_marking(objspace));
7253 #if RGENGC_CHECK_MODE >= 2
7254  gc_verify_internal_consistency(objspace);
7255 #endif
7256 
7257  gc_enter(objspace, "gc_start");
7258 
7259  if (ruby_gc_stressful) {
7261 
7262  if ((flag & (1<<gc_stress_no_major)) == 0) {
7263  do_full_mark = TRUE;
7264  }
7265 
7266  objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
7267  }
7268  else {
7269 #if USE_RGENGC
7270  if (objspace->rgengc.need_major_gc) {
7271  reason |= objspace->rgengc.need_major_gc;
7272  do_full_mark = TRUE;
7273  }
7274  else if (RGENGC_FORCE_MAJOR_GC) {
7275  reason = GPR_FLAG_MAJOR_BY_FORCE;
7276  do_full_mark = TRUE;
7277  }
7278 
7279  objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
7280 #endif
7281  }
7282 
7283  if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
7284  reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
7285  }
7286 
7287 #if GC_ENABLE_INCREMENTAL_MARK
7288  if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
7290  }
7291  else {
7292  objspace->flags.during_incremental_marking = do_full_mark;
7293  }
7294 #endif
7295 
7296  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
7297  objspace->flags.immediate_sweep = TRUE;
7298  }
7299 
7300  if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
7301 
7302  gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
7303  reason,
7304  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
7305 
7306 #if USE_DEBUG_COUNTER
7307  RB_DEBUG_COUNTER_INC(gc_count);
7308 
7309  if (reason & GPR_FLAG_MAJOR_MASK) {
7310  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
7311  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
7312  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
7313  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
7314 #if RGENGC_ESTIMATE_OLDMALLOC
7315  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
7316 #endif
7317  }
7318  else {
7319  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
7320  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
7321  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
7322  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
7323  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
7324  }
7325 #endif
7326 
7327  objspace->profile.count++;
7328  objspace->profile.latest_gc_info = reason;
7331  gc_prof_setup_new_record(objspace, reason);
7332  gc_reset_malloc_info(objspace);
7333  rb_transient_heap_start_marking(do_full_mark);
7334 
7335  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
7337 
7338  gc_prof_timer_start(objspace);
7339  {
7340  gc_marks(objspace, do_full_mark);
7341  }
7342  gc_prof_timer_stop(objspace);
7343 
7344  gc_exit(objspace, "gc_start");
7345  return TRUE;
7346 }
7347 
7348 static void
7349 gc_rest(rb_objspace_t *objspace)
7350 {
7351  int marking = is_incremental_marking(objspace);
7352  int sweeping = is_lazy_sweeping(heap_eden);
7353 
7354  if (marking || sweeping) {
7355  gc_enter(objspace, "gc_rest");
7356 
7357  if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
7358 
7359  if (is_incremental_marking(objspace)) {
7361  gc_marks_rest(objspace);
7363  }
7364  if (is_lazy_sweeping(heap_eden)) {
7365  gc_sweep_rest(objspace);
7366  }
7367  gc_exit(objspace, "gc_rest");
7368  }
7369 }
7370 
7373  int reason;
7374 };
7375 
7376 static void
7377 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
7378 {
7379  int i = 0;
7380  if (is_marking(objspace)) {
7381  buff[i++] = 'M';
7382 #if USE_RGENGC
7383  if (is_full_marking(objspace)) buff[i++] = 'F';
7384 #if GC_ENABLE_INCREMENTAL_MARK
7385  if (is_incremental_marking(objspace)) buff[i++] = 'I';
7386 #endif
7387 #endif
7388  }
7389  else if (is_sweeping(objspace)) {
7390  buff[i++] = 'S';
7391  if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L';
7392  }
7393  else {
7394  buff[i++] = 'N';
7395  }
7396  buff[i] = '\0';
7397 }
7398 
7399 static const char *
7400 gc_current_status(rb_objspace_t *objspace)
7401 {
7402  static char buff[0x10];
7403  gc_current_status_fill(objspace, buff);
7404  return buff;
7405 }
7406 
7407 #if PRINT_ENTER_EXIT_TICK
7408 
7409 static tick_t last_exit_tick;
7410 static tick_t enter_tick;
7411 static int enter_count = 0;
7412 static char last_gc_status[0x10];
7413 
7414 static inline void
7415 gc_record(rb_objspace_t *objspace, int direction, const char *event)
7416 {
7417  if (direction == 0) { /* enter */
7418  enter_count++;
7419  enter_tick = tick();
7420  gc_current_status_fill(objspace, last_gc_status);
7421  }
7422  else { /* exit */
7423  tick_t exit_tick = tick();
7424  char current_gc_status[0x10];
7425  gc_current_status_fill(objspace, current_gc_status);
7426 #if 1
7427  /* [last mutator time] [gc time] [event] */
7428  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7429  enter_tick - last_exit_tick,
7430  exit_tick - enter_tick,
7431  event,
7432  last_gc_status, current_gc_status,
7433  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7434  last_exit_tick = exit_tick;
7435 #else
7436  /* [enter_tick] [gc time] [event] */
7437  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7438  enter_tick,
7439  exit_tick - enter_tick,
7440  event,
7441  last_gc_status, current_gc_status,
7442  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7443 #endif
7444  }
7445 }
7446 #else /* PRINT_ENTER_EXIT_TICK */
7447 static inline void
7448 gc_record(rb_objspace_t *objspace, int direction, const char *event)
7449 {
7450  /* null */
7451 }
7452 #endif /* PRINT_ENTER_EXIT_TICK */
7453 
7454 static inline void
7455 gc_enter(rb_objspace_t *objspace, const char *event)
7456 {
7457  GC_ASSERT(during_gc == 0);
7458  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7459 
7461 
7462  during_gc = TRUE;
7463  gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7464  gc_record(objspace, 0, event);
7465  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
7466 }
7467 
7468 static inline void
7469 gc_exit(rb_objspace_t *objspace, const char *event)
7470 {
7471  GC_ASSERT(during_gc != 0);
7472 
7473  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
7474  gc_record(objspace, 1, event);
7475  gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7476  during_gc = FALSE;
7477 
7479 }
7480 
7481 static void *
7482 gc_with_gvl(void *ptr)
7483 {
7484  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
7485  return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
7486 }
7487 
7488 static int
7489 garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
7490 {
7491  if (dont_gc) return TRUE;
7492  if (ruby_thread_has_gvl_p()) {
7493  return garbage_collect(objspace, reason);
7494  }
7495  else {
7496  if (ruby_native_thread_p()) {
7497  struct objspace_and_reason oar;
7498  oar.objspace = objspace;
7499  oar.reason = reason;
7500  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
7501  }
7502  else {
7503  /* no ruby thread */
7504  fprintf(stderr, "[FATAL] failed to allocate memory\n");
7505  exit(EXIT_FAILURE);
7506  }
7507  }
7508 }
7509 
7510 static VALUE
7511 gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep)
7512 {
7514  int reason = GPR_FLAG_FULL_MARK |
7518 
7519  if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
7520  if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
7521  if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
7522 
7523  garbage_collect(objspace, reason);
7524  gc_finalize_deferred(objspace);
7525 
7526  return Qnil;
7527 }
7528 
7529 static int
7530 gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
7531 {
7532  if (SPECIAL_CONST_P(obj)) {
7533  return FALSE;
7534  }
7535 
7536  switch (BUILTIN_TYPE(obj)) {
7537  case T_NONE:
7538  case T_NIL:
7539  case T_MOVED:
7540  case T_ZOMBIE:
7541  return FALSE;
7542  break;
7543  case T_SYMBOL:
7544  if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
7545  return FALSE;
7546  }
7547  /* fall through */
7548  case T_STRING:
7549  case T_OBJECT:
7550  case T_FLOAT:
7551  case T_IMEMO:
7552  case T_ARRAY:
7553  case T_BIGNUM:
7554  case T_ICLASS:
7555  case T_MODULE:
7556  case T_REGEXP:
7557  case T_DATA:
7558  case T_MATCH:
7559  case T_STRUCT:
7560  case T_HASH:
7561  case T_FILE:
7562  case T_COMPLEX:
7563  case T_RATIONAL:
7564  case T_NODE:
7565  case T_CLASS:
7566  if (FL_TEST(obj, FL_FINALIZE)) {
7568  return FALSE;
7569  }
7570  }
7571  return !RVALUE_PINNED(obj);
7572  break;
7573 
7574  default:
7575  rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
7576  break;
7577  }
7578 
7579  return FALSE;
7580 }
7581 
7582 static VALUE
7583 gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, VALUE moved_list)
7584 {
7585  int marked;
7586  int wb_unprotected;
7587  int uncollectible;
7588  int marking;
7589  RVALUE *dest = (RVALUE *)free;
7590  RVALUE *src = (RVALUE *)scan;
7591 
7592  gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
7593 
7594  GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
7596 
7597  /* Save off bits for current object. */
7599  wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
7600  uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
7601  marking = RVALUE_MARKING((VALUE)src);
7602 
7604 
7605  /* Clear bits for eventual T_MOVED */
7610 
7611  if (FL_TEST(src, FL_EXIVAR)) {
7612  rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
7613  }
7614 
7615  VALUE id;
7616 
7617  /* If the source object's object_id has been seen, we need to update
7618  * the object to object id mapping. */
7619  if (st_lookup(objspace->obj_to_id_tbl, (VALUE)src, &id)) {
7620  gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
7622  st_insert(objspace->obj_to_id_tbl, (VALUE)dest, id);
7623  }
7624 
7625  /* Move the object */
7626  memcpy(dest, src, sizeof(RVALUE));
7627  memset(src, 0, sizeof(RVALUE));
7628 
7629  /* Set bits for object in new location */
7630  if (marking) {
7632  }
7633  else {
7635  }
7636 
7637  if (marked) {
7639  }
7640  else {
7642  }
7643 
7644  if (wb_unprotected) {
7646  }
7647  else {
7649  }
7650 
7651  if (uncollectible) {
7653  }
7654  else {
7656  }
7657 
7658  /* Assign forwarding address */
7659  src->as.moved.flags = T_MOVED;
7660  src->as.moved.destination = (VALUE)dest;
7661  src->as.moved.next = moved_list;
7662  GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
7663 
7664  return (VALUE)src;
7665 }
7666 
7667 struct heap_cursor {
7669  size_t index;
7670  struct heap_page *page;
7672 };
7673 
7674 static void
7675 advance_cursor(struct heap_cursor *free, struct heap_page **page_list)
7676 {
7677  if (free->slot == free->page->start + free->page->total_slots - 1) {
7678  free->index++;
7679  free->page = page_list[free->index];
7680  free->slot = free->page->start;
7681  }
7682  else {
7683  free->slot++;
7684  }
7685 }
7686 
7687 static void
7688 retreat_cursor(struct heap_cursor *scan, struct heap_page **page_list)
7689 {
7690  if (scan->slot == scan->page->start) {
7691  scan->index--;
7692  scan->page = page_list[scan->index];
7693  scan->slot = scan->page->start + scan->page->total_slots - 1;
7694  }
7695  else {
7696  scan->slot--;
7697  }
7698 }
7699 
7700 static int
7701 not_met(struct heap_cursor *free, struct heap_cursor *scan)
7702 {
7703  if (free->index < scan->index)
7704  return 1;
7705 
7706  if (free->index > scan->index)
7707  return 0;
7708 
7709  return free->slot < scan->slot;
7710 }
7711 
7712 static void
7713 init_cursors(rb_objspace_t *objspace, struct heap_cursor *free, struct heap_cursor *scan, struct heap_page **page_list)
7714 {
7715  struct heap_page *page;
7716  size_t total_pages = heap_eden->total_pages;
7717  page = page_list[0];
7718 
7719  free->index = 0;
7720  free->page = page;
7721  free->slot = page->start;
7722  free->objspace = objspace;
7723 
7724  page = page_list[total_pages - 1];
7725  scan->index = total_pages - 1;
7726  scan->page = page;
7727  scan->slot = page->start + page->total_slots - 1;
7728  scan->objspace = objspace;
7729 }
7730 
7731 static int
7732 count_pinned(struct heap_page *page)
7733 {
7734  int pinned = 0;
7735  int i;
7736 
7737  for (i = 0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
7738  pinned += popcount_bits(page->pinned_bits[i]);
7739  }
7740 
7741  return pinned;
7742 }
7743 
7744 static int
7745 compare_pinned(const void *left, const void *right, void *dummy)
7746 {
7747  struct heap_page *left_page;
7748  struct heap_page *right_page;
7749 
7750  left_page = *(struct heap_page * const *)left;
7751  right_page = *(struct heap_page * const *)right;
7752 
7753  return right_page->pinned_slots - left_page->pinned_slots;
7754 }
7755 
7756 static int
7757 compare_free_slots(const void *left, const void *right, void *dummy)
7758 {
7759  struct heap_page *left_page;
7760  struct heap_page *right_page;
7761 
7762  left_page = *(struct heap_page * const *)left;
7763  right_page = *(struct heap_page * const *)right;
7764 
7765  return right_page->free_slots - left_page->free_slots;
7766 }
7767 
7768 typedef int page_compare_func_t(const void *, const void *, void *);
7769 
7770 static struct heap_page **
7771 allocate_page_list(rb_objspace_t *objspace, page_compare_func_t *comparator)
7772 {
7773  size_t total_pages = heap_eden->total_pages;
7774  size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7775  struct heap_page *page = 0, **page_list = malloc(size);
7776  int i = 0;
7777 
7778  list_for_each(&heap_eden->pages, page, page_node) {
7779  page_list[i++] = page;
7780  page->pinned_slots = count_pinned(page);
7781  GC_ASSERT(page != NULL);
7782  }
7783  GC_ASSERT(total_pages > 0);
7784  GC_ASSERT((size_t)i == total_pages);
7785 
7786  ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), comparator, NULL);
7787 
7788  return page_list;
7789 }
7790 
7791 static VALUE
7792 gc_compact_heap(rb_objspace_t *objspace, page_compare_func_t *comparator)
7793 {
7794  struct heap_cursor free_cursor;
7795  struct heap_cursor scan_cursor;
7796  struct heap_page **page_list;
7797  VALUE moved_list;
7798 
7799  moved_list = Qfalse;
7800  memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
7801  memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
7802 
7803  page_list = allocate_page_list(objspace, comparator);
7804 
7805  init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7806 
7807  /* Two finger algorithm */
7808  while (not_met(&free_cursor, &scan_cursor)) {
7809  /* Free cursor movement */
7810 
7811  /* Unpoison free_cursor slot */
7812  void *free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7813  asan_unpoison_object((VALUE)free_cursor.slot, false);
7814 
7815  while (BUILTIN_TYPE(free_cursor.slot) != T_NONE && not_met(&free_cursor, &scan_cursor)) {
7816  /* Re-poison slot if it's not the one we want */
7817  if (free_slot_poison) {
7818  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7819  asan_poison_object((VALUE)free_cursor.slot);
7820  }
7821 
7822  advance_cursor(&free_cursor, page_list);
7823 
7824  /* Unpoison free_cursor slot */
7825  free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7826  asan_unpoison_object((VALUE)free_cursor.slot, false);
7827  }
7828 
7829  /* Unpoison scan_cursor slot */
7830  void *scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7831  asan_unpoison_object((VALUE)scan_cursor.slot, false);
7832 
7833  /* Scan cursor movement */
7834  objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7835 
7836  while (!gc_is_moveable_obj(objspace, (VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7837 
7838  /* Re-poison slot if it's not the one we want */
7839  if (scan_slot_poison) {
7840  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_NONE);
7841  asan_poison_object((VALUE)scan_cursor.slot);
7842  }
7843 
7844  retreat_cursor(&scan_cursor, page_list);
7845 
7846  /* Unpoison scan_cursor slot */
7847  scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7848  asan_unpoison_object((VALUE)scan_cursor.slot, false);
7849 
7850  objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7851  }
7852 
7853  if (not_met(&free_cursor, &scan_cursor)) {
7854  objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7855 
7856  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7857  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_NONE);
7858  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_MOVED);
7859 
7860  moved_list = gc_move(objspace, (VALUE)scan_cursor.slot, (VALUE)free_cursor.slot, moved_list);
7861 
7862  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_MOVED);
7863  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_NONE);
7864  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_MOVED);
7865 
7866  advance_cursor(&free_cursor, page_list);
7867  retreat_cursor(&scan_cursor, page_list);
7868  }
7869  }
7870  free(page_list);
7871 
7872  return moved_list;
7873 }
7874 
7875 static void
7876 gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
7877 {
7878  long i, len;
7879 
7880  if (FL_TEST(v, ELTS_SHARED))
7881  return;
7882 
7883  len = RARRAY_LEN(v);
7884  if (len > 0) {
7886  for (i = 0; i < len; i++) {
7887  UPDATE_IF_MOVED(objspace, ptr[i]);
7888  }
7889  }
7890 }
7891 
7892 static void
7893 gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
7894 {
7895  VALUE *ptr = ROBJECT_IVPTR(v);
7896 
7897  if (ptr) {
7899  for (i = 0; i < len; i++) {
7900  UPDATE_IF_MOVED(objspace, ptr[i]);
7901  }
7902  }
7903 }
7904 
7905 static int
7906 hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7907 {
7908  rb_objspace_t *objspace = (rb_objspace_t *)argp;
7909 
7910  if (gc_object_moved_p(objspace, (VALUE)*key)) {
7911  *key = rb_gc_location((VALUE)*key);
7912  }
7913 
7914  if (gc_object_moved_p(objspace, (VALUE)*value)) {
7915  *value = rb_gc_location((VALUE)*value);
7916  }
7917 
7918  return ST_CONTINUE;
7919 }
7920 
7921 static int
7922 hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
7923 {
7924  rb_objspace_t *objspace;
7925 
7926  objspace = (rb_objspace_t *)argp;
7927 
7928  if (gc_object_moved_p(objspace, (VALUE)key)) {
7929  return ST_REPLACE;
7930  }
7931 
7932  if (gc_object_moved_p(objspace, (VALUE)value)) {
7933  return ST_REPLACE;
7934  }
7935  return ST_CONTINUE;
7936 }
7937 
7938 static int
7939 hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7940 {
7941  rb_objspace_t *objspace = (rb_objspace_t *)argp;
7942 
7943  if (gc_object_moved_p(objspace, (VALUE)*value)) {
7944  *value = rb_gc_location((VALUE)*value);
7945  }
7946 
7947  return ST_CONTINUE;
7948 }
7949 
7950 static int
7951 hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
7952 {
7953  rb_objspace_t *objspace;
7954 
7955  objspace = (rb_objspace_t *)argp;
7956 
7957  if (gc_object_moved_p(objspace, (VALUE)value)) {
7958  return ST_REPLACE;
7959  }
7960  return ST_CONTINUE;
7961 }
7962 
7963 static void
7964 gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
7965 {
7966  if (!tbl || tbl->num_entries == 0) return;
7967 
7968  if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
7969  rb_raise(rb_eRuntimeError, "hash modified during iteration");
7970  }
7971 }
7972 
7973 static void
7974 gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
7975 {
7976  if (!tbl || tbl->num_entries == 0) return;
7977 
7978  if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
7979  rb_raise(rb_eRuntimeError, "hash modified during iteration");
7980  }
7981 }
7982 
7983 /* Update MOVED references in an st_table */
7984 void
7986 {
7987  rb_objspace_t *objspace = &rb_objspace;
7988  gc_update_table_refs(objspace, ptr);
7989 }
7990 
7991 static void
7992 gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
7993 {
7994  rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
7995 }
7996 
7997 static void
7998 gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
7999 {
8000  rb_method_definition_t *def = me->def;
8001 
8002  UPDATE_IF_MOVED(objspace, me->owner);
8003  UPDATE_IF_MOVED(objspace, me->defined_class);
8004 
8005  if (def) {
8006  switch (def->type) {
8007  case VM_METHOD_TYPE_ISEQ:
8008  if (def->body.iseq.iseqptr) {
8009  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
8010  }
8011  TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
8012  break;
8014  case VM_METHOD_TYPE_IVAR:
8015  UPDATE_IF_MOVED(objspace, def->body.attr.location);
8016  break;
8018  UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
8019  break;
8020  case VM_METHOD_TYPE_ALIAS:
8022  return;
8025  UPDATE_IF_MOVED(objspace, def->body.refined.owner);
8026  break;
8027  case VM_METHOD_TYPE_CFUNC:
8028  case VM_METHOD_TYPE_ZSUPER:
8031  case VM_METHOD_TYPE_UNDEF:
8033  break;
8034  }
8035  }
8036 }
8037 
8038 static void
8039 gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
8040 {
8041  long i;
8042 
8043  for (i=0; i<n; i++) {
8044  UPDATE_IF_MOVED(objspace, values[i]);
8045  }
8046 }
8047 
8048 static void
8049 gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
8050 {
8051  switch (imemo_type(obj)) {
8052  case imemo_env:
8053  {
8054  rb_env_t *env = (rb_env_t *)obj;
8055  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
8056  UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
8057  gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
8058  }
8059  break;
8060  case imemo_cref:
8061  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass);
8062  TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
8063  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
8064  break;
8065  case imemo_svar:
8066  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
8067  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
8068  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
8069  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
8070  break;
8071  case imemo_throw_data:
8072  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
8073  break;
8074  case imemo_ifunc:
8075  break;
8076  case imemo_memo:
8077  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
8078  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
8079  break;
8080  case imemo_ment:
8081  gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
8082  break;
8083  case imemo_iseq:
8085  break;
8086  case imemo_ast:
8088  break;
8089  case imemo_parser_strterm:
8090  case imemo_tmpbuf:
8091  break;
8092  default:
8093  rb_bug("not reachable %d", imemo_type(obj));
8094  break;
8095  }
8096 }
8097 
8098 static enum rb_id_table_iterator_result
8099 check_id_table_move(ID id, VALUE value, void *data)
8100 {
8101  rb_objspace_t *objspace = (rb_objspace_t *)data;
8102 
8103  if (gc_object_moved_p(objspace, (VALUE)value)) {
8104  return ID_TABLE_REPLACE;
8105  }
8106 
8107  return ID_TABLE_CONTINUE;
8108 }
8109 
8110 /* Returns the new location of an object, if it moved. Otherwise returns
8111  * the existing location. */
8112 VALUE
8114 {
8115 
8116  VALUE destination;
8117 
8118  if (!SPECIAL_CONST_P((void *)value)) {
8119  void *poisoned = asan_poisoned_object_p(value);
8120  asan_unpoison_object(value, false);
8121 
8122  if (BUILTIN_TYPE(value) == T_MOVED) {
8123  destination = (VALUE)RMOVED(value)->destination;
8124  GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
8125  }
8126  else {
8127  destination = value;
8128  }
8129 
8130  /* Re-poison slot if it's not the one we want */
8131  if (poisoned) {
8132  GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
8133  asan_poison_object(value);
8134  }
8135  }
8136  else {
8137  destination = value;
8138  }
8139 
8140  return destination;
8141 }
8142 
8143 static enum rb_id_table_iterator_result
8144 update_id_table(ID *key, VALUE * value, void *data, int existing)
8145 {
8146  rb_objspace_t *objspace = (rb_objspace_t *)data;
8147 
8148  if (gc_object_moved_p(objspace, (VALUE)*value)) {
8149  *value = rb_gc_location((VALUE)*value);
8150  }
8151 
8152  return ID_TABLE_CONTINUE;
8153 }
8154 
8155 static void
8156 update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8157 {
8158  if (tbl) {
8159  rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
8160  }
8161 }
8162 
8163 static enum rb_id_table_iterator_result
8164 update_const_table(VALUE value, void *data)
8165 {
8166  rb_const_entry_t *ce = (rb_const_entry_t *)value;
8167  rb_objspace_t * objspace = (rb_objspace_t *)data;
8168 
8169  if (gc_object_moved_p(objspace, ce->value)) {
8170  ce->value = rb_gc_location(ce->value);
8171  }
8172 
8173  if (gc_object_moved_p(objspace, ce->file)) {
8174  ce->file = rb_gc_location(ce->file);
8175  }
8176 
8177  return ID_TABLE_CONTINUE;
8178 }
8179 
8180 static void
8181 update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8182 {
8183  if (!tbl) return;
8184  rb_id_table_foreach_values(tbl, update_const_table, objspace);
8185 }
8186 
8187 static void
8188 update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
8189 {
8190  while (entry) {
8191  UPDATE_IF_MOVED(objspace, entry->klass);
8192  entry = entry->next;
8193  }
8194 }
8195 
8196 static void
8197 update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
8198 {
8199  UPDATE_IF_MOVED(objspace, ext->origin_);
8200  UPDATE_IF_MOVED(objspace, ext->refined_class);
8201  update_subclass_entries(objspace, ext->subclasses);
8202 }
8203 
8204 static void
8205 gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
8206 {
8207  RVALUE *any = RANY(obj);
8208 
8209  gc_report(4, objspace, "update-refs: %p ->", (void *)obj);
8210 
8211  switch (BUILTIN_TYPE(obj)) {
8212  case T_CLASS:
8213  case T_MODULE:
8214  if (RCLASS_SUPER((VALUE)obj)) {
8215  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8216  }
8217  if (!RCLASS_EXT(obj)) break;
8218  update_m_tbl(objspace, RCLASS_M_TBL(obj));
8219  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8220  update_class_ext(objspace, RCLASS_EXT(obj));
8221  update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
8222  break;
8223 
8224  case T_ICLASS:
8225  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
8226  update_m_tbl(objspace, RCLASS_M_TBL(obj));
8227  }
8228  if (RCLASS_SUPER((VALUE)obj)) {
8229  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8230  }
8231  if (!RCLASS_EXT(obj)) break;
8232  if (RCLASS_IV_TBL(obj)) {
8233  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8234  }
8235  update_class_ext(objspace, RCLASS_EXT(obj));
8236  update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
8237  break;
8238 
8239  case T_IMEMO:
8240  gc_ref_update_imemo(objspace, obj);
8241  return;
8242 
8243  case T_NIL:
8244  case T_FIXNUM:
8245  case T_NODE:
8246  case T_MOVED:
8247  case T_NONE:
8248  /* These can't move */
8249  return;
8250 
8251  case T_ARRAY:
8252  if (FL_TEST(obj, ELTS_SHARED)) {
8253  UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
8254  }
8255  else {
8256  gc_ref_update_array(objspace, obj);
8257  }
8258  break;
8259 
8260  case T_HASH:
8261  gc_ref_update_hash(objspace, obj);
8262  UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
8263  break;
8264 
8265  case T_STRING:
8266  if (STR_SHARED_P(obj)) {
8267  UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
8268  }
8269  break;
8270 
8271  case T_DATA:
8272  /* Call the compaction callback, if it exists */
8273  {
8274  void *const ptr = DATA_PTR(obj);
8275  if (ptr) {
8276  if (RTYPEDDATA_P(obj)) {
8277  RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
8278  if (compact_func) (*compact_func)(ptr);
8279  }
8280  }
8281  }
8282  break;
8283 
8284  case T_OBJECT:
8285  gc_ref_update_object(objspace, obj);
8286  break;
8287 
8288  case T_FILE:
8289  if (any->as.file.fptr) {
8290  UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
8291  UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
8293  UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
8294  UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
8295  UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
8296  }
8297  break;
8298  case T_REGEXP:
8299  UPDATE_IF_MOVED(objspace, any->as.regexp.src);
8300  break;
8301 
8302  case T_SYMBOL:
8303  if (DYNAMIC_SYM_P((VALUE)any)) {
8304  UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
8305  }
8306  break;
8307 
8308  case T_FLOAT:
8309  case T_BIGNUM:
8310  break;
8311 
8312  case T_MATCH:
8313  UPDATE_IF_MOVED(objspace, any->as.match.regexp);
8314 
8315  if (any->as.match.str) {
8316  UPDATE_IF_MOVED(objspace, any->as.match.str);
8317  }
8318  break;
8319 
8320  case T_RATIONAL:
8321  UPDATE_IF_MOVED(objspace, any->as.rational.num);
8322  UPDATE_IF_MOVED(objspace, any->as.rational.den);
8323  break;
8324 
8325  case T_COMPLEX:
8326  UPDATE_IF_MOVED(objspace, any->as.complex.real);
8327  UPDATE_IF_MOVED(objspace, any->as.complex.imag);
8328 
8329  break;
8330 
8331  case T_STRUCT:
8332  {
8333  long i, len = RSTRUCT_LEN(obj);
8335 
8336  for (i = 0; i < len; i++) {
8337  UPDATE_IF_MOVED(objspace, ptr[i]);
8338  }
8339  }
8340  break;
8341  default:
8342 #if GC_DEBUG
8345  rb_bug("unreachable");
8346 #endif
8347  break;
8348 
8349  }
8350 
8351  UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
8352 
8353  gc_report(4, objspace, "update-refs: %p <-", (void *)obj);
8354 }
8355 
8356 static int
8357 gc_ref_update(void *vstart, void *vend, size_t stride, void * data)
8358 {
8359  rb_objspace_t * objspace;
8360  struct heap_page *page;
8361  short free_slots = 0;
8362 
8363  VALUE v = (VALUE)vstart;
8364  objspace = (rb_objspace_t *)data;
8365  page = GET_HEAP_PAGE(v);
8366  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
8367  page->freelist = NULL;
8368  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
8371 
8372  /* For each object on the page */
8373  for (; v != (VALUE)vend; v += stride) {
8374  if (!SPECIAL_CONST_P(v)) {
8375  void *poisoned = asan_poisoned_object_p(v);
8376  asan_unpoison_object(v, false);
8377 
8378  switch (BUILTIN_TYPE(v)) {
8379  case T_NONE:
8380  heap_page_add_freeobj(objspace, page, v);
8381  free_slots++;
8382  break;
8383  case T_MOVED:
8384  break;
8385  case T_ZOMBIE:
8386  break;
8387  default:
8388  if (RVALUE_WB_UNPROTECTED(v)) {
8390  }
8391  if (RVALUE_PAGE_MARKING(page, v)) {
8393  }
8394  gc_update_object_references(objspace, v);
8395  }
8396 
8397  if (poisoned) {
8399  asan_poison_object(v);
8400  }
8401  }
8402  }
8403 
8404  page->free_slots = free_slots;
8405  return 0;
8406 }
8407 
8409 #define global_symbols ruby_global_symbols
8410 
8411 static void
8412 gc_update_references(rb_objspace_t * objspace)
8413 {
8415  rb_vm_t *vm = rb_ec_vm_ptr(ec);
8416 
8417  objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8421  global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
8422  gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
8423  gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
8424  gc_update_table_refs(objspace, global_symbols.str_sym);
8425  gc_update_table_refs(objspace, finalizer_table);
8426 }
8427 
8428 static VALUE type_sym(size_t type);
8429 
8430 static VALUE
8431 gc_compact_stats(rb_objspace_t *objspace)
8432 {
8433  size_t i;
8434  VALUE h = rb_hash_new();
8435  VALUE considered = rb_hash_new();
8436  VALUE moved = rb_hash_new();
8437 
8438  for (i=0; i<T_MASK; i++) {
8439  rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
8440  }
8441 
8442  for (i=0; i<T_MASK; i++) {
8443  rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
8444  }
8445 
8446  rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
8447  rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
8448 
8449  return h;
8450 }
8451 
8452 static void gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier);
8453 
8454 static void
8455 gc_compact(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8456 {
8457 
8458  objspace->flags.during_compacting = TRUE;
8459  {
8460  /* pin objects referenced by maybe pointers */
8461  garbage_collect(objspace, GPR_DEFAULT_REASON);
8462  /* compact */
8463  gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8464  }
8465  objspace->flags.during_compacting = FALSE;
8466 }
8467 
8468 static VALUE
8469 rb_gc_compact(rb_execution_context_t *ec, VALUE self)
8470 {
8471  rb_objspace_t *objspace = &rb_objspace;
8472  if (dont_gc) return Qnil;
8473 
8474  gc_compact(objspace, FALSE, FALSE, FALSE);
8475  return gc_compact_stats(objspace);
8476 }
8477 
8478 static void
8479 root_obj_check_moved_i(const char *category, VALUE obj, void *data)
8480 {
8481  if (gc_object_moved_p(&rb_objspace, obj)) {
8482  rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
8483  }
8484 }
8485 
8486 static void
8487 reachable_object_check_moved_i(VALUE ref, void *data)
8488 {
8489  VALUE parent = (VALUE)data;
8490  if (gc_object_moved_p(&rb_objspace, ref)) {
8491  rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
8492  }
8493 }
8494 
8495 static int
8496 heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
8497 {
8498  VALUE v = (VALUE)vstart;
8499  for (; v != (VALUE)vend; v += stride) {
8500  if (gc_object_moved_p(&rb_objspace, v)) {
8501  /* Moved object still on the heap, something may have a reference. */
8502  }
8503  else {
8504  void *poisoned = asan_poisoned_object_p(v);
8505  asan_unpoison_object(v, false);
8506 
8507  switch (BUILTIN_TYPE(v)) {
8508  case T_NONE:
8509  case T_ZOMBIE:
8510  break;
8511  default:
8512  rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
8513  }
8514 
8515  if (poisoned) {
8517  asan_poison_object(v);
8518  }
8519  }
8520  }
8521 
8522  return 0;
8523 }
8524 
8525 static VALUE
8526 gc_check_references_for_moved(rb_objspace_t *objspace)
8527 {
8528  objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
8529  objspace_each_objects(objspace, heap_check_moved_i, NULL);
8530  return Qnil;
8531 }
8532 
8533 static void
8534 gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8535 {
8536  if (0) fprintf(stderr, "gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8537 
8538  mjit_gc_start_hook(); // prevent MJIT from running while moving pointers related to ISeq
8539 
8540  objspace->profile.compact_count++;
8541 
8542  if (use_verifier) {
8543  gc_verify_internal_consistency(objspace);
8544  }
8545 
8546  if (use_double_pages) {
8547  /* Double heap size */
8548  heap_add_pages(objspace, heap_eden, heap_allocated_pages);
8549  }
8550 
8551  VALUE moved_list_head;
8552  VALUE disabled = rb_objspace_gc_disable(objspace);
8553 
8554  if (use_toward_empty) {
8555  moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8556  }
8557  else {
8558  moved_list_head = gc_compact_heap(objspace, compare_pinned);
8559  }
8560  heap_eden->freelist = NULL;
8561 
8562  gc_update_references(objspace);
8563  if (!RTEST(disabled)) rb_objspace_gc_enable(objspace);
8564 
8565  if (use_verifier) {
8566  gc_check_references_for_moved(objspace);
8567  }
8568 
8571  heap_eden->free_pages = NULL;
8572  heap_eden->using_page = NULL;
8573 
8574  /* For each moved slot */
8575  while (moved_list_head) {
8576  VALUE next_moved;
8577  struct heap_page *page;
8578 
8579  page = GET_HEAP_PAGE(moved_list_head);
8580  next_moved = RMOVED(moved_list_head)->next;
8581 
8582  /* clear the memory for that moved slot */
8583  RMOVED(moved_list_head)->flags = 0;
8584  RMOVED(moved_list_head)->destination = 0;
8585  RMOVED(moved_list_head)->next = 0;
8586  page->free_slots++;
8587  heap_page_add_freeobj(objspace, page, moved_list_head);
8588 
8589  if (page->free_slots == page->total_slots && heap_pages_freeable_pages > 0) {
8591  heap_unlink_page(objspace, heap_eden, page);
8592  heap_add_page(objspace, heap_tomb, page);
8593  }
8594  objspace->profile.total_freed_objects++;
8595  moved_list_head = next_moved;
8596  }
8597 
8598  /* Add any eden pages with free slots back to the free pages list */
8599  struct heap_page *page = NULL;
8600  list_for_each(&heap_eden->pages, page, page_node) {
8601  if (page->free_slots > 0) {
8602  heap_add_freepage(heap_eden, page);
8603  } else {
8604  page->free_next = NULL;
8605  }
8606  }
8607 
8608  /* Set up "using_page" if we have any pages with free slots */
8609  if (heap_eden->free_pages) {
8610  heap_eden->using_page = heap_eden->free_pages;
8611  heap_eden->free_pages = heap_eden->free_pages->free_next;
8612  }
8613 
8614  if (use_verifier) {
8615  gc_verify_internal_consistency(objspace);
8616  }
8617 
8618  mjit_gc_exit_hook(); // unlock MJIT here, because `rb_gc()` calls `mjit_gc_start_hook()` again.
8619 }
8620 
8621 /*
8622  * call-seq:
8623  * GC.verify_compaction_references(toward: nil, double_heap: nil) -> nil
8624  *
8625  * Verify compaction reference consistency.
8626  *
8627  * This method is implementation specific. During compaction, objects that
8628  * were moved are replaced with T_MOVED objects. No object should have a
8629  * reference to a T_MOVED object after compaction.
8630  *
8631  * This function doubles the heap to ensure room to move all objects,
8632  * compacts the heap to make sure everything moves, updates all references,
8633  * then performs a full GC. If any object contains a reference to a T_MOVED
8634  * object, that object should be pushed on the mark stack, and will
8635  * make a SEGV.
8636  */
8637 static VALUE
8638 gc_verify_compaction_references(int argc, VALUE *argv, VALUE mod)
8639 {
8640  rb_objspace_t *objspace = &rb_objspace;
8641  int use_toward_empty = FALSE;
8642  int use_double_pages = FALSE;
8643 
8644  if (dont_gc) return Qnil;
8645 
8646  VALUE opt = Qnil;
8647  static ID keyword_ids[2];
8648  VALUE kwvals[2];
8649 
8650  kwvals[1] = Qtrue;
8651 
8652  rb_scan_args(argc, argv, "0:", &opt);
8653 
8654  if (!NIL_P(opt)) {
8655  if (!keyword_ids[0]) {
8656  keyword_ids[0] = rb_intern("toward");
8657  keyword_ids[1] = rb_intern("double_heap");
8658  }
8659 
8660  rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
8661  if (kwvals[0] != Qundef && rb_intern("empty") == rb_sym2id(kwvals[0])) {
8662  use_toward_empty = TRUE;
8663  }
8664  if (kwvals[1] != Qundef && RTEST(kwvals[1])) {
8665  use_double_pages = TRUE;
8666  }
8667  }
8668 
8669  gc_compact(objspace, use_toward_empty, use_double_pages, TRUE);
8670  return gc_compact_stats(objspace);
8671 }
8672 
8673 VALUE
8675 {
8676  rb_gc();
8677  return Qnil;
8678 }
8679 
8680 void
8681 rb_gc(void)
8682 {
8683  rb_objspace_t *objspace = &rb_objspace;
8684  int reason = GPR_DEFAULT_REASON;
8685  garbage_collect(objspace, reason);
8686 }
8687 
8688 int
8690 {
8691  rb_objspace_t *objspace = &rb_objspace;
8692  return during_gc;
8693 }
8694 
8695 #if RGENGC_PROFILE >= 2
8696 
8697 static const char *type_name(int type, VALUE obj);
8698 
8699 static void
8700 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
8701 {
8703  int i;
8704  for (i=0; i<T_MASK; i++) {
8705  const char *type = type_name(i, 0);
8707  }
8708  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
8709 }
8710 #endif
8711 
8712 size_t
8714 {
8715  return rb_objspace.profile.count;
8716 }
8717 
8718 static VALUE
8719 gc_count(rb_execution_context_t *ec, VALUE self)
8720 {
8721  return SIZET2NUM(rb_gc_count());
8722 }
8723 
8724 static VALUE
8725 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
8726 {
8727  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8728  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8729 #if RGENGC_ESTIMATE_OLDMALLOC
8730  static VALUE sym_oldmalloc;
8731 #endif
8732  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8733  static VALUE sym_none, sym_marking, sym_sweeping;
8734  VALUE hash = Qnil, key = Qnil;
8735  VALUE major_by;
8736  VALUE flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
8737 
8738  if (SYMBOL_P(hash_or_key)) {
8739  key = hash_or_key;
8740  }
8741  else if (RB_TYPE_P(hash_or_key, T_HASH)) {
8742  hash = hash_or_key;
8743  }
8744  else {
8745  rb_raise(rb_eTypeError, "non-hash or symbol given");
8746  }
8747 
8748  if (sym_major_by == Qnil) {
8749 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8750  S(major_by);
8751  S(gc_by);
8752  S(immediate_sweep);
8753  S(have_finalizer);
8754  S(state);
8755 
8756  S(stress);
8757  S(nofree);
8758  S(oldgen);
8759  S(shady);
8760  S(force);
8761 #if RGENGC_ESTIMATE_OLDMALLOC
8762  S(oldmalloc);
8763 #endif
8764  S(newobj);
8765  S(malloc);
8766  S(method);
8767  S(capi);
8768 
8769  S(none);
8770  S(marking);
8771  S(sweeping);
8772 #undef S
8773  }
8774 
8775 #define SET(name, attr) \
8776  if (key == sym_##name) \
8777  return (attr); \
8778  else if (hash != Qnil) \
8779  rb_hash_aset(hash, sym_##name, (attr));
8780 
8781  major_by =
8782  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
8783  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
8784  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
8785  (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
8786 #if RGENGC_ESTIMATE_OLDMALLOC
8787  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
8788 #endif
8789  Qnil;
8790  SET(major_by, major_by);
8791 
8792  SET(gc_by,
8793  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
8794  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
8795  (flags & GPR_FLAG_METHOD) ? sym_method :
8796  (flags & GPR_FLAG_CAPI) ? sym_capi :
8797  (flags & GPR_FLAG_STRESS) ? sym_stress :
8798  Qnil
8799  );
8800 
8801  SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8802  SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
8803 
8804  if (orig_flags == 0) {
8805  SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
8806  gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
8807  }
8808 #undef SET
8809 
8810  if (!NIL_P(key)) {/* matched key should return above */
8811  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
8812  }
8813 
8814  return hash;
8815 }
8816 
8817 VALUE
8819 {
8820  rb_objspace_t *objspace = &rb_objspace;
8821  return gc_info_decode(objspace, key, 0);
8822 }
8823 
8824 static VALUE
8825 gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
8826 {
8827  rb_objspace_t *objspace = &rb_objspace;
8828 
8829  if (NIL_P(arg)) {
8830  arg = rb_hash_new();
8831  }
8832  else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
8833  rb_raise(rb_eTypeError, "non-hash or symbol given");
8834  }
8835 
8836  return gc_info_decode(objspace, arg, 0);
8837 }
8838 
8857 #if USE_RGENGC
8865 #if RGENGC_ESTIMATE_OLDMALLOC
8868 #endif
8869 #if RGENGC_PROFILE
8870  gc_stat_sym_total_generated_normal_object_count,
8871  gc_stat_sym_total_generated_shady_object_count,
8872  gc_stat_sym_total_shade_operation_count,
8873  gc_stat_sym_total_promoted_count,
8874  gc_stat_sym_total_remembered_normal_object_count,
8875  gc_stat_sym_total_remembered_shady_object_count,
8876 #endif
8877 #endif
8879 };
8880 
8891 #if USE_RGENGC
8896 #endif
8901 #if RGENGC_ESTIMATE_OLDMALLOC
8904 #endif
8906 };
8907 
8908 static VALUE gc_stat_symbols[gc_stat_sym_last];
8909 static VALUE gc_stat_compat_symbols[gc_stat_compat_sym_last];
8910 static VALUE gc_stat_compat_table;
8911 
8912 static void
8913 setup_gc_stat_symbols(void)
8914 {
8915  if (gc_stat_symbols[0] == 0) {
8916 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8917  S(count);
8919  S(heap_sorted_length);
8921  S(heap_available_slots);
8922  S(heap_live_slots);
8923  S(heap_free_slots);
8924  S(heap_final_slots);
8925  S(heap_marked_slots);
8926  S(heap_eden_pages);
8927  S(heap_tomb_pages);
8928  S(total_allocated_pages);
8929  S(total_freed_pages);
8930  S(total_allocated_objects);
8931  S(total_freed_objects);
8932  S(malloc_increase_bytes);
8933  S(malloc_increase_bytes_limit);
8934 #if USE_RGENGC
8935  S(minor_gc_count);
8936  S(major_gc_count);
8937  S(compact_count);
8938  S(remembered_wb_unprotected_objects);
8939  S(remembered_wb_unprotected_objects_limit);
8940  S(old_objects);
8941  S(old_objects_limit);
8942 #if RGENGC_ESTIMATE_OLDMALLOC
8943  S(oldmalloc_increase_bytes);
8944  S(oldmalloc_increase_bytes_limit);
8945 #endif
8946 #if RGENGC_PROFILE
8947  S(total_generated_normal_object_count);
8948  S(total_generated_shady_object_count);
8949  S(total_shade_operation_count);
8950  S(total_promoted_count);
8951  S(total_remembered_normal_object_count);
8952  S(total_remembered_shady_object_count);
8953 #endif /* RGENGC_PROFILE */
8954 #endif /* USE_RGENGC */
8955 #undef S
8956 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8957  S(gc_stat_heap_used);
8958  S(heap_eden_page_length);
8959  S(heap_tomb_page_length);
8960  S(heap_increment);
8961  S(heap_length);
8962  S(heap_live_slot);
8963  S(heap_free_slot);
8964  S(heap_final_slot);
8965  S(heap_swept_slot);
8966 #if USE_RGEGC
8967  S(remembered_shady_object);
8968  S(remembered_shady_object_limit);
8969  S(old_object);
8970  S(old_object_limit);
8971 #endif
8972  S(total_allocated_object);
8973  S(total_freed_object);
8974  S(malloc_increase);
8975  S(malloc_limit);
8976 #if RGENGC_ESTIMATE_OLDMALLOC
8977  S(oldmalloc_increase);
8978  S(oldmalloc_limit);
8979 #endif
8980 #undef S
8981 
8982  {
8983  VALUE table = gc_stat_compat_table = rb_hash_new();
8984  rb_obj_hide(table);
8986 
8987  /* compatibility layer for Ruby 2.1 */
8988 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
8989 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
8990  rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages));
8991  rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages));
8992  rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages));
8993  rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages));
8994  rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length));
8995  rb_hash_aset(table, OLD_SYM(heap_live_slot), NEW_SYM(heap_live_slots));
8996  rb_hash_aset(table, OLD_SYM(heap_free_slot), NEW_SYM(heap_free_slots));
8997  rb_hash_aset(table, OLD_SYM(heap_final_slot), NEW_SYM(heap_final_slots));
8998 #if USE_RGEGC
8999  rb_hash_aset(table, OLD_SYM(remembered_shady_object), NEW_SYM(remembered_wb_unprotected_objects));
9000  rb_hash_aset(table, OLD_SYM(remembered_shady_object_limit), NEW_SYM(remembered_wb_unprotected_objects_limit));
9001  rb_hash_aset(table, OLD_SYM(old_object), NEW_SYM(old_objects));
9002  rb_hash_aset(table, OLD_SYM(old_object_limit), NEW_SYM(old_objects_limit));
9003 #endif
9004  rb_hash_aset(table, OLD_SYM(total_allocated_object), NEW_SYM(total_allocated_objects));
9005  rb_hash_aset(table, OLD_SYM(total_freed_object), NEW_SYM(total_freed_objects));
9006  rb_hash_aset(table, OLD_SYM(malloc_increase), NEW_SYM(malloc_increase_bytes));
9007  rb_hash_aset(table, OLD_SYM(malloc_limit), NEW_SYM(malloc_increase_bytes_limit));
9008 #if RGENGC_ESTIMATE_OLDMALLOC
9009  rb_hash_aset(table, OLD_SYM(oldmalloc_increase), NEW_SYM(oldmalloc_increase_bytes));
9010  rb_hash_aset(table, OLD_SYM(oldmalloc_limit), NEW_SYM(oldmalloc_increase_bytes_limit));
9011 #endif
9012 #undef OLD_SYM
9013 #undef NEW_SYM
9014  rb_obj_freeze(table);
9015  }
9016  }
9017 }
9018 
9019 static VALUE
9020 compat_key(VALUE key)
9021 {
9022  VALUE new_key = rb_hash_lookup(gc_stat_compat_table, key);
9023 
9024  if (!NIL_P(new_key)) {
9025  static int warned = 0;
9026  if (warned == 0) {
9027  rb_warn("GC.stat keys were changed from Ruby 2.1. "
9028  "In this case, you refer to obsolete `%"PRIsVALUE"' (new key is `%"PRIsVALUE"'). "
9029  "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9030  key, new_key);
9031  warned = 1;
9032  }
9033  }
9034 
9035  return new_key;
9036 }
9037 
9038 static VALUE
9039 default_proc_for_compat_func(RB_BLOCK_CALL_FUNC_ARGLIST(hash, _))
9040 {
9041  VALUE key, new_key;
9042 
9043  Check_Type(hash, T_HASH);
9044  rb_check_arity(argc, 2, 2);
9045  key = argv[1];
9046 
9047  if ((new_key = compat_key(key)) != Qnil) {
9048  return rb_hash_lookup(hash, new_key);
9049  }
9050 
9051  return Qnil;
9052 }
9053 
9054 static size_t
9055 gc_stat_internal(VALUE hash_or_sym)
9056 {
9057  rb_objspace_t *objspace = &rb_objspace;
9058  VALUE hash = Qnil, key = Qnil;
9059 
9060  setup_gc_stat_symbols();
9061 
9062  if (RB_TYPE_P(hash_or_sym, T_HASH)) {
9063  hash = hash_or_sym;
9064 
9065  if (NIL_P(RHASH_IFNONE(hash))) {
9066  static VALUE default_proc_for_compat = 0;
9067  if (default_proc_for_compat == 0) { /* TODO: it should be */
9068  default_proc_for_compat = rb_proc_new(default_proc_for_compat_func, Qnil);
9069  rb_gc_register_mark_object(default_proc_for_compat);
9070  }
9071  rb_hash_set_default_proc(hash, default_proc_for_compat);
9072  }
9073  }
9074  else if (SYMBOL_P(hash_or_sym)) {
9075  key = hash_or_sym;
9076  }
9077  else {
9078  rb_raise(rb_eTypeError, "non-hash or symbol argument");
9079  }
9080 
9081 #define SET(name, attr) \
9082  if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9083  return attr; \
9084  else if (hash != Qnil) \
9085  rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9086 
9087  again:
9088  SET(count, objspace->profile.count);
9089 
9090  /* implementation dependent counters */
9092  SET(heap_sorted_length, heap_pages_sorted_length);
9094  SET(heap_available_slots, objspace_available_slots(objspace));
9095  SET(heap_live_slots, objspace_live_slots(objspace));
9096  SET(heap_free_slots, objspace_free_slots(objspace));
9097  SET(heap_final_slots, heap_pages_final_slots);
9098  SET(heap_marked_slots, objspace->marked_slots);
9099  SET(heap_eden_pages, heap_eden->total_pages);
9100  SET(heap_tomb_pages, heap_tomb->total_pages);
9101  SET(total_allocated_pages, objspace->profile.total_allocated_pages);
9102  SET(total_freed_pages, objspace->profile.total_freed_pages);
9103  SET(total_allocated_objects, objspace->total_allocated_objects);
9104  SET(total_freed_objects, objspace->profile.total_freed_objects);
9105  SET(malloc_increase_bytes, malloc_increase);
9106  SET(malloc_increase_bytes_limit, malloc_limit);
9107 #if USE_RGENGC
9108  SET(minor_gc_count, objspace->profile.minor_gc_count);
9109  SET(major_gc_count, objspace->profile.major_gc_count);
9110  SET(compact_count, objspace->profile.compact_count);
9111  SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
9112  SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
9113  SET(old_objects, objspace->rgengc.old_objects);
9114  SET(old_objects_limit, objspace->rgengc.old_objects_limit);
9115 #if RGENGC_ESTIMATE_OLDMALLOC
9116  SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
9117  SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
9118 #endif
9119 
9120 #if RGENGC_PROFILE
9121  SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
9122  SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
9123  SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
9124  SET(total_promoted_count, objspace->profile.total_promoted_count);
9125  SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
9126  SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
9127 #endif /* RGENGC_PROFILE */
9128 #endif /* USE_RGENGC */
9129 #undef SET
9130 
9131  if (!NIL_P(key)) { /* matched key should return above */
9132  VALUE new_key;
9133  if ((new_key = compat_key(key)) != Qnil) {
9134  key = new_key;
9135  goto again;
9136  }
9137  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
9138  }
9139 
9140 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9141  if (hash != Qnil) {
9142  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
9143  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
9144  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
9145  gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
9146  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
9147  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
9148  }
9149 #endif
9150 
9151  return 0;
9152 }
9153 
9154 static VALUE
9155 gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
9156 {
9157  if (NIL_P(arg)) {
9158  arg = rb_hash_new();
9159  }
9160  else if (SYMBOL_P(arg)) {
9161  size_t value = gc_stat_internal(arg);
9162  return SIZET2NUM(value);
9163  }
9164  else if (RB_TYPE_P(arg, T_HASH)) {
9165  // ok
9166  }
9167  else {
9168  rb_raise(rb_eTypeError, "non-hash or symbol given");
9169  }
9170 
9171  gc_stat_internal(arg);
9172  return arg;
9173 }
9174 
9175 size_t
9177 {
9178  if (SYMBOL_P(key)) {
9179  size_t value = gc_stat_internal(key);
9180  return value;
9181  }
9182  else {
9183  gc_stat_internal(key);
9184  return 0;
9185  }
9186 }
9187 
9188 static VALUE
9189 gc_stress_get(rb_execution_context_t *ec, VALUE self)
9190 {
9191  rb_objspace_t *objspace = &rb_objspace;
9192  return ruby_gc_stress_mode;
9193 }
9194 
9195 static void
9196 gc_stress_set(rb_objspace_t *objspace, VALUE flag)
9197 {
9198  objspace->flags.gc_stressful = RTEST(flag);
9199  objspace->gc_stress_mode = flag;
9200 }
9201 
9202 static VALUE
9203 gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
9204 {
9205  rb_objspace_t *objspace = &rb_objspace;
9206  gc_stress_set(objspace, flag);
9207  return flag;
9208 }
9209 
9210 VALUE
9212 {
9213  rb_objspace_t *objspace = &rb_objspace;
9214  return rb_objspace_gc_enable(objspace);
9215 }
9216 
9217 VALUE
9219 {
9220  int old = dont_gc;
9221 
9222  dont_gc = FALSE;
9223  return old ? Qtrue : Qfalse;
9224 }
9225 
9226 static VALUE
9227 gc_enable(rb_execution_context_t *ec, VALUE _)
9228 {
9229  return rb_gc_enable();
9230 }
9231 
9232 VALUE
9234 {
9235  rb_objspace_t *objspace = &rb_objspace;
9236  return gc_disable_no_rest(objspace);
9237 }
9238 
9239 static VALUE
9240 gc_disable_no_rest(rb_objspace_t *objspace)
9241 {
9242  int old = dont_gc;
9243  dont_gc = TRUE;
9244  return old ? Qtrue : Qfalse;
9245 }
9246 
9247 VALUE
9249 {
9250  rb_objspace_t *objspace = &rb_objspace;
9251  return rb_objspace_gc_disable(objspace);
9252 }
9253 
9254 VALUE
9256 {
9257  gc_rest(objspace);
9258  return gc_disable_no_rest(objspace);
9259 }
9260 
9261 static VALUE
9262 gc_disable(rb_execution_context_t *ec, VALUE _)
9263 {
9264  return rb_gc_disable();
9265 }
9266 
9267 static int
9268 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
9269 {
9270  char *ptr = getenv(name);
9271  ssize_t val;
9272 
9273  if (ptr != NULL && *ptr) {
9274  size_t unit = 0;
9275  char *end;
9276 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9277  val = strtoll(ptr, &end, 0);
9278 #else
9279  val = strtol(ptr, &end, 0);
9280 #endif
9281  switch (*end) {
9282  case 'k': case 'K':
9283  unit = 1024;
9284  ++end;
9285  break;
9286  case 'm': case 'M':
9287  unit = 1024*1024;
9288  ++end;
9289  break;
9290  case 'g': case 'G':
9291  unit = 1024*1024*1024;
9292  ++end;
9293  break;
9294  }
9295  while (*end && isspace((unsigned char)*end)) end++;
9296  if (*end) {
9297  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9298  return 0;
9299  }
9300  if (unit > 0) {
9301  if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
9302  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
9303  return 0;
9304  }
9305  val *= unit;
9306  }
9307  if (val > 0 && (size_t)val > lower_bound) {
9308  if (RTEST(ruby_verbose)) {
9309  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
9310  }
9311  *default_value = (size_t)val;
9312  return 1;
9313  }
9314  else {
9315  if (RTEST(ruby_verbose)) {
9316  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
9317  name, val, *default_value, lower_bound);
9318  }
9319  return 0;
9320  }
9321  }
9322  return 0;
9323 }
9324 
9325 static int
9326 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
9327 {
9328  char *ptr = getenv(name);
9329  double val;
9330 
9331  if (ptr != NULL && *ptr) {
9332  char *end;
9333  val = strtod(ptr, &end);
9334  if (!*ptr || *end) {
9335  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9336  return 0;
9337  }
9338 
9339  if (accept_zero && val == 0.0) {
9340  goto accept;
9341  }
9342  else if (val <= lower_bound) {
9343  if (RTEST(ruby_verbose)) {
9344  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9345  name, val, *default_value, lower_bound);
9346  }
9347  }
9348  else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
9349  val > upper_bound) {
9350  if (RTEST(ruby_verbose)) {
9351  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9352  name, val, *default_value, upper_bound);
9353  }
9354  }
9355  else {
9356  accept:
9357  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
9358  *default_value = val;
9359  return 1;
9360  }
9361  }
9362  return 0;
9363 }
9364 
9365 static void
9366 gc_set_initial_pages(void)
9367 {
9368  size_t min_pages;
9369  rb_objspace_t *objspace = &rb_objspace;
9370 
9371  min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
9372  if (min_pages > heap_eden->total_pages) {
9373  heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
9374  }
9375 }
9376 
9377 /*
9378  * GC tuning environment variables
9379  *
9380  * * RUBY_GC_HEAP_INIT_SLOTS
9381  * - Initial allocation slots.
9382  * * RUBY_GC_HEAP_FREE_SLOTS
9383  * - Prepare at least this amount of slots after GC.
9384  * - Allocate slots if there are not enough slots.
9385  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
9386  * - Allocate slots by this factor.
9387  * - (next slots number) = (current slots number) * (this factor)
9388  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
9389  * - Allocation rate is limited to this number of slots.
9390  * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
9391  * - Allocate additional pages when the number of free slots is
9392  * lower than the value (total_slots * (this ratio)).
9393  * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
9394  * - Allocate slots to satisfy this formula:
9395  * free_slots = total_slots * goal_ratio
9396  * - In other words, prepare (total_slots * goal_ratio) free slots.
9397  * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
9398  * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
9399  * - Allow to free pages when the number of free slots is
9400  * greater than the value (total_slots * (this ratio)).
9401  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
9402  * - Do full GC when the number of old objects is more than R * N
9403  * where R is this factor and
9404  * N is the number of old objects just after last full GC.
9405  *
9406  * * obsolete
9407  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
9408  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
9409  *
9410  * * RUBY_GC_MALLOC_LIMIT
9411  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
9412  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9413  *
9414  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
9415  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
9416  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9417  */
9418 
9419 void
9421 {
9422  /* RUBY_GC_HEAP_FREE_SLOTS */
9423  if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
9424  /* ok */
9425  }
9426  else if (get_envparam_size("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
9427  rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9428  }
9429 
9430  /* RUBY_GC_HEAP_INIT_SLOTS */
9431  if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
9432  gc_set_initial_pages();
9433  }
9434  else if (get_envparam_size("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
9435  rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9436  gc_set_initial_pages();
9437  }
9438 
9439  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
9440  get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
9441  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
9442  0.0, 1.0, FALSE);
9443  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
9444  gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
9445  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
9447  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
9448 
9449  get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
9450  get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
9451  if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
9452  gc_params.malloc_limit_max = SIZE_MAX;
9453  }
9454  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
9455 
9456 #if RGENGC_ESTIMATE_OLDMALLOC
9457  if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
9458  rb_objspace_t *objspace = &rb_objspace;
9459  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9460  }
9461  get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
9462  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
9463 #endif
9464 }
9465 
9466 void
9467 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
9468 {
9469  rb_objspace_t *objspace = &rb_objspace;
9470 
9471  if (is_markable_object(objspace, obj)) {
9472  struct mark_func_data_struct mfd;
9473  mfd.mark_func = func;
9474  mfd.data = data;
9475  PUSH_MARK_FUNC_DATA(&mfd);
9476  gc_mark_children(objspace, obj);
9478  }
9479 }
9480 
9482  const char *category;
9483  void (*func)(const char *category, VALUE, void *);
9484  void *data;
9485 };
9486 
9487 static void
9488 root_objects_from(VALUE obj, void *ptr)
9489 {
9490  const struct root_objects_data *data = (struct root_objects_data *)ptr;
9491  (*data->func)(data->category, obj, data->data);
9492 }
9493 
9494 void
9495 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
9496 {
9497  rb_objspace_t *objspace = &rb_objspace;
9498  objspace_reachable_objects_from_root(objspace, func, passing_data);
9499 }
9500 
9501 static void
9502 objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
9503 {
9504  struct root_objects_data data;
9505  struct mark_func_data_struct mfd;
9506 
9507  data.func = func;
9508  data.data = passing_data;
9509 
9510  mfd.mark_func = root_objects_from;
9511  mfd.data = &data;
9512 
9513  PUSH_MARK_FUNC_DATA(&mfd);
9514  gc_mark_roots(objspace, &data.category);
9516 }
9517 
9518 /*
9519  ------------------------ Extended allocator ------------------------
9520 */
9521 
9524  const char *fmt;
9526 };
9527 
9528 static void *
9529 gc_vraise(void *ptr)
9530 {
9531  struct gc_raise_tag *argv = ptr;
9532  rb_vraise(argv->exc, argv->fmt, *argv->ap);
9534 }
9535 
9536 static void
9537 gc_raise(VALUE exc, const char *fmt, ...)
9538 {
9539  va_list ap;
9540  va_start(ap, fmt);
9541  struct gc_raise_tag argv = {
9542  exc, fmt, &ap,
9543  };
9544 
9545  if (ruby_thread_has_gvl_p()) {
9546  gc_vraise(&argv);
9547  UNREACHABLE;
9548  }
9549  else if (ruby_native_thread_p()) {
9550  rb_thread_call_with_gvl(gc_vraise, &argv);
9551  UNREACHABLE;
9552  }
9553  else {
9554  /* Not in a ruby thread */
9555  fprintf(stderr, "%s", "[FATAL] ");
9556  vfprintf(stderr, fmt, ap);
9557  abort();
9558  }
9559 
9560  va_end(ap);
9561 }
9562 
9563 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
9564 
9565 static void
9566 negative_size_allocation_error(const char *msg)
9567 {
9568  gc_raise(rb_eNoMemError, "%s", msg);
9569 }
9570 
9571 static void *
9572 ruby_memerror_body(void *dummy)
9573 {
9574  rb_memerror();
9575  return 0;
9576 }
9577 
9578 static void
9579 ruby_memerror(void)
9580 {
9581  if (ruby_thread_has_gvl_p()) {
9582  rb_memerror();
9583  }
9584  else {
9585  if (ruby_native_thread_p()) {
9586  rb_thread_call_with_gvl(ruby_memerror_body, 0);
9587  }
9588  else {
9589  /* no ruby thread */
9590  fprintf(stderr, "[FATAL] failed to allocate memory\n");
9591  exit(EXIT_FAILURE);
9592  }
9593  }
9594 }
9595 
9596 void
9598 {
9600  rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
9601  VALUE exc;
9602 
9603  if (0) {
9604  // Print out pid, sleep, so you can attach debugger to see what went wrong:
9605  fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
9606  sleep(60);
9607  }
9608 
9609  if (during_gc) gc_exit(objspace, "rb_memerror");
9610 
9611  exc = nomem_error;
9612  if (!exc ||
9614  fprintf(stderr, "[FATAL] failed to allocate memory\n");
9615  exit(EXIT_FAILURE);
9616  }
9617  if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
9618  rb_ec_raised_clear(ec);
9619  }
9620  else {
9623  }
9624  ec->errinfo = exc;
9625  EC_JUMP_TAG(ec, TAG_RAISE);
9626 }
9627 
9628 void *
9629 rb_aligned_malloc(size_t alignment, size_t size)
9630 {
9631  void *res;
9632 
9633 #if defined __MINGW32__
9634  res = __mingw_aligned_malloc(size, alignment);
9635 #elif defined _WIN32
9636  void *_aligned_malloc(size_t, size_t);
9637  res = _aligned_malloc(size, alignment);
9638 #elif defined(HAVE_POSIX_MEMALIGN)
9639  if (posix_memalign(&res, alignment, size) == 0) {
9640  return res;
9641  }
9642  else {
9643  return NULL;
9644  }
9645 #elif defined(HAVE_MEMALIGN)
9646  res = memalign(alignment, size);
9647 #else
9648  char* aligned;
9649  res = malloc(alignment + size + sizeof(void*));
9650  aligned = (char*)res + alignment + sizeof(void*);
9651  aligned -= ((VALUE)aligned & (alignment - 1));
9652  ((void**)aligned)[-1] = res;
9653  res = (void*)aligned;
9654 #endif
9655 
9656  /* alignment must be a power of 2 */
9657  GC_ASSERT(((alignment - 1) & alignment) == 0);
9658  GC_ASSERT(alignment % sizeof(void*) == 0);
9659  return res;
9660 }
9661 
9662 static void
9663 rb_aligned_free(void *ptr)
9664 {
9665 #if defined __MINGW32__
9666  __mingw_aligned_free(ptr);
9667 #elif defined _WIN32
9668  _aligned_free(ptr);
9669 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9670  free(ptr);
9671 #else
9672  free(((void**)ptr)[-1]);
9673 #endif
9674 }
9675 
9676 static inline size_t
9677 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
9678 {
9679 #ifdef HAVE_MALLOC_USABLE_SIZE
9680  return malloc_usable_size(ptr);
9681 #else
9682  return hint;
9683 #endif
9684 }
9685 
9690 };
9691 
9692 static inline void
9693 atomic_sub_nounderflow(size_t *var, size_t sub)
9694 {
9695  if (sub == 0) return;
9696 
9697  while (1) {
9698  size_t val = *var;
9699  if (val < sub) sub = val;
9700  if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
9701  }
9702 }
9703 
9704 static void
9705 objspace_malloc_gc_stress(rb_objspace_t *objspace)
9706 {
9710 
9712  reason |= GPR_FLAG_FULL_MARK;
9713  }
9714  garbage_collect_with_gvl(objspace, reason);
9715  }
9716 }
9717 
9718 static void
9719 objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
9720 {
9721  if (new_size > old_size) {
9722  ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
9723 #if RGENGC_ESTIMATE_OLDMALLOC
9724  ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
9725 #endif
9726  }
9727  else {
9728  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
9729 #if RGENGC_ESTIMATE_OLDMALLOC
9730  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
9731 #endif
9732  }
9733 
9734  if (type == MEMOP_TYPE_MALLOC) {
9735  retry:
9738  gc_rest(objspace); /* gc_rest can reduce malloc_increase */
9739  goto retry;
9740  }
9741  garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
9742  }
9743  }
9744 
9745 #if MALLOC_ALLOCATED_SIZE
9746  if (new_size >= old_size) {
9747  ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
9748  }
9749  else {
9750  size_t dec_size = old_size - new_size;
9751  size_t allocated_size = objspace->malloc_params.allocated_size;
9752 
9753 #if MALLOC_ALLOCATED_SIZE_CHECK
9754  if (allocated_size < dec_size) {
9755  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
9756  }
9757 #endif
9758  atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
9759  }
9760 
9761  if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9762  mem,
9763  type == MEMOP_TYPE_MALLOC ? "malloc" :
9764  type == MEMOP_TYPE_FREE ? "free " :
9765  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
9766  (int)new_size, (int)old_size);
9767 
9768  switch (type) {
9769  case MEMOP_TYPE_MALLOC:
9770  ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
9771  break;
9772  case MEMOP_TYPE_FREE:
9773  {
9774  size_t allocations = objspace->malloc_params.allocations;
9775  if (allocations > 0) {
9776  atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
9777  }
9778 #if MALLOC_ALLOCATED_SIZE_CHECK
9779  else {
9780  GC_ASSERT(objspace->malloc_params.allocations > 0);
9781  }
9782 #endif
9783  }
9784  break;
9785  case MEMOP_TYPE_REALLOC: /* ignore */ break;
9786  }
9787 #endif
9788 }
9789 
9790 struct malloc_obj_info { /* 4 words */
9791  size_t size;
9792 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9793  size_t gen;
9794  const char *file;
9795  size_t line;
9796 #endif
9797 };
9798 
9799 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9800 const char *ruby_malloc_info_file;
9801 int ruby_malloc_info_line;
9802 #endif
9803 
9804 static inline size_t
9805 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
9806 {
9807  if (size == 0) size = 1;
9808 
9809 #if CALC_EXACT_MALLOC_SIZE
9810  size += sizeof(struct malloc_obj_info);
9811 #endif
9812 
9813  return size;
9814 }
9815 
9816 static inline void *
9817 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
9818 {
9819  size = objspace_malloc_size(objspace, mem, size);
9820  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
9821 
9822 #if CALC_EXACT_MALLOC_SIZE
9823  {
9824  struct malloc_obj_info *info = mem;
9825  info->size = size;
9826 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9827  info->gen = objspace->profile.count;
9828  info->file = ruby_malloc_info_file;
9829  info->line = info->file ? ruby_malloc_info_line : 0;
9830 #else
9831  info->file = NULL;
9832 #endif
9833  mem = info + 1;
9834  }
9835 #endif
9836 
9837  return mem;
9838 }
9839 
9840 #define TRY_WITH_GC(alloc) do { \
9841  objspace_malloc_gc_stress(objspace); \
9842  if (!(alloc) && \
9843  (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9844  GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9845  GPR_FLAG_MALLOC) || \
9846  !(alloc))) { \
9847  ruby_memerror(); \
9848  } \
9849  } while (0)
9850 
9851 /* these shouldn't be called directly.
9852  * objspace_* functinos do not check allocation size.
9853  */
9854 static void *
9855 objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
9856 {
9857  void *mem;
9858 
9859  size = objspace_malloc_prepare(objspace, size);
9860  TRY_WITH_GC(mem = malloc(size));
9861  RB_DEBUG_COUNTER_INC(heap_xmalloc);
9862  return objspace_malloc_fixup(objspace, mem, size);
9863 }
9864 
9865 static inline size_t
9866 xmalloc2_size(const size_t count, const size_t elsize)
9867 {
9868  return size_mul_or_raise(count, elsize, rb_eArgError);
9869 }
9870 
9871 static void *
9872 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
9873 {
9874  void *mem;
9875 
9876  if (!ptr) return objspace_xmalloc0(objspace, new_size);
9877 
9878  /*
9879  * The behavior of realloc(ptr, 0) is implementation defined.
9880  * Therefore we don't use realloc(ptr, 0) for portability reason.
9881  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
9882  */
9883  if (new_size == 0) {
9884  if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
9885  /*
9886  * - OpenBSD's malloc(3) man page says that when 0 is passed, it
9887  * returns a non-NULL pointer to an access-protected memory page.
9888  * The returned pointer cannot be read / written at all, but
9889  * still be a valid argument of free().
9890  *
9891  * https://man.openbsd.org/malloc.3
9892  *
9893  * - Linux's malloc(3) man page says that it _might_ perhaps return
9894  * a non-NULL pointer when its argument is 0. That return value
9895  * is safe (and is expected) to be passed to free().
9896  *
9897  * http://man7.org/linux/man-pages/man3/malloc.3.html
9898  *
9899  * - As I read the implementation jemalloc's malloc() returns fully
9900  * normal 16 bytes memory region when its argument is 0.
9901  *
9902  * - As I read the implementation musl libc's malloc() returns
9903  * fully normal 32 bytes memory region when its argument is 0.
9904  *
9905  * - Other malloc implementations can also return non-NULL.
9906  */
9907  objspace_xfree(objspace, ptr, old_size);
9908  return mem;
9909  }
9910  else {
9911  /*
9912  * It is dangerous to return NULL here, because that could lead to
9913  * RCE. Fallback to 1 byte instead of zero.
9914  *
9915  * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
9916  */
9917  new_size = 1;
9918  }
9919  }
9920 
9921 #if CALC_EXACT_MALLOC_SIZE
9922  {
9923  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
9924  new_size += sizeof(struct malloc_obj_info);
9925  ptr = info;
9926  old_size = info->size;
9927  }
9928 #endif
9929 
9930  old_size = objspace_malloc_size(objspace, ptr, old_size);
9931  TRY_WITH_GC(mem = realloc(ptr, new_size));
9932  new_size = objspace_malloc_size(objspace, mem, new_size);
9933 
9934 #if CALC_EXACT_MALLOC_SIZE
9935  {
9936  struct malloc_obj_info *info = mem;
9937  info->size = new_size;
9938  mem = info + 1;
9939  }
9940 #endif
9941 
9942  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
9943 
9944  RB_DEBUG_COUNTER_INC(heap_xrealloc);
9945  return mem;
9946 }
9947 
9948 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9949 
9950 #define MALLOC_INFO_GEN_SIZE 100
9951 #define MALLOC_INFO_SIZE_SIZE 10
9952 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9953 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9954 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9955 static st_table *malloc_info_file_table;
9956 
9957 static int
9958 mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
9959 {
9960  const char *file = (void *)key;
9961  const size_t *data = (void *)val;
9962 
9963  fprintf(stderr, "%s\t%d\t%d\n", file, (int)data[0], (int)data[1]);
9964 
9965  return ST_CONTINUE;
9966 }
9967 
9968 __attribute__((destructor))
9969 void
9971 {
9972  int i;
9973 
9974  fprintf(stderr, "* malloc_info gen statistics\n");
9975  for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
9976  if (i == MALLOC_INFO_GEN_SIZE-1) {
9977  fprintf(stderr, "more\t%d\t%d\n", (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9978  }
9979  else {
9980  fprintf(stderr, "%d\t%d\t%d\n", i, (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9981  }
9982  }
9983 
9984  fprintf(stderr, "* malloc_info size statistics\n");
9985  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
9986  int s = 16 << i;
9987  fprintf(stderr, "%d\t%d\n", (int)s, (int)malloc_info_size[i]);
9988  }
9989  fprintf(stderr, "more\t%d\n", (int)malloc_info_size[i]);
9990 
9991  if (malloc_info_file_table) {
9992  fprintf(stderr, "* malloc_info file statistics\n");
9993  st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
9994  }
9995 }
9996 #else
9997 void
9999 {
10000 }
10001 #endif
10002 
10003 static void
10004 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
10005 {
10006  if (!ptr) {
10007  /*
10008  * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
10009  * its first version. We would better follow.
10010  */
10011  return;
10012  }
10013 #if CALC_EXACT_MALLOC_SIZE
10014  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10015  ptr = info;
10016  old_size = info->size;
10017 
10018 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10019  {
10020  int gen = (int)(objspace->profile.count - info->gen);
10021  int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10022  int i;
10023 
10024  malloc_info_gen_cnt[gen_index]++;
10025  malloc_info_gen_size[gen_index] += info->size;
10026 
10027  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10028  size_t s = 16 << i;
10029  if (info->size <= s) {
10030  malloc_info_size[i]++;
10031  goto found;
10032  }
10033  }
10034  malloc_info_size[i]++;
10035  found:;
10036 
10037  {
10038  st_data_t key = (st_data_t)info->file;
10039  size_t *data;
10040 
10041  if (malloc_info_file_table == NULL) {
10042  malloc_info_file_table = st_init_numtable_with_size(1024);
10043  }
10044  if (st_lookup(malloc_info_file_table, key, (st_data_t *)&data)) {
10045  /* hit */
10046  }
10047  else {
10048  data = malloc(xmalloc2_size(2, sizeof(size_t)));
10049  if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
10050  data[0] = data[1] = 0;
10051  st_insert(malloc_info_file_table, key, (st_data_t)data);
10052  }
10053  data[0] ++;
10054  data[1] += info->size;
10055  };
10056 #if 0 /* verbose output */
10057  if (gen >= 2) {
10058  if (info->file) {
10059  fprintf(stderr, "free - size:%d, gen:%d, pos: %s:%d\n", (int)info->size, gen, info->file, (int)info->line);
10060  }
10061  else {
10062  fprintf(stderr, "free - size:%d, gen:%d\n", (int)info->size, gen);
10063  }
10064  }
10065 #endif
10066  }
10067 #endif
10068 #endif
10069  old_size = objspace_malloc_size(objspace, ptr, old_size);
10070 
10071  free(ptr);
10072  RB_DEBUG_COUNTER_INC(heap_xfree);
10073 
10074  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
10075 }
10076 
10077 static void *
10078 ruby_xmalloc0(size_t size)
10079 {
10080  return objspace_xmalloc0(&rb_objspace, size);
10081 }
10082 
10083 void *
10085 {
10086  if ((ssize_t)size < 0) {
10087  negative_size_allocation_error("too large allocation size");
10088  }
10089  return ruby_xmalloc0(size);
10090 }
10091 
10092 void
10093 ruby_malloc_size_overflow(size_t count, size_t elsize)
10094 {
10096  "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
10097  count, elsize);
10098 }
10099 
10100 void *
10101 ruby_xmalloc2_body(size_t n, size_t size)
10102 {
10103  return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
10104 }
10105 
10106 static void *
10107 objspace_xcalloc(rb_objspace_t *objspace, size_t size)
10108 {
10109  void *mem;
10110 
10111  size = objspace_malloc_prepare(objspace, size);
10112  TRY_WITH_GC(mem = calloc1(size));
10113  return objspace_malloc_fixup(objspace, mem, size);
10114 }
10115 
10116 void *
10117 ruby_xcalloc_body(size_t n, size_t size)
10118 {
10119  return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
10120 }
10121 
10122 #ifdef ruby_sized_xrealloc
10123 #undef ruby_sized_xrealloc
10124 #endif
10125 void *
10126 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
10127 {
10128  if ((ssize_t)new_size < 0) {
10129  negative_size_allocation_error("too large allocation size");
10130  }
10131 
10132  return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
10133 }
10134 
10135 void *
10136 ruby_xrealloc_body(void *ptr, size_t new_size)
10137 {
10138  return ruby_sized_xrealloc(ptr, new_size, 0);
10139 }
10140 
10141 #ifdef ruby_sized_xrealloc2
10142 #undef ruby_sized_xrealloc2
10143 #endif
10144 void *
10145 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
10146 {
10147  size_t len = xmalloc2_size(n, size);
10148  return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
10149 }
10150 
10151 void *
10152 ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
10153 {
10154  return ruby_sized_xrealloc2(ptr, n, size, 0);
10155 }
10156 
10157 #ifdef ruby_sized_xfree
10158 #undef ruby_sized_xfree
10159 #endif
10160 void
10161 ruby_sized_xfree(void *x, size_t size)
10162 {
10163  if (x) {
10164  objspace_xfree(&rb_objspace, x, size);
10165  }
10166 }
10167 
10168 void
10169 ruby_xfree(void *x)
10170 {
10171  ruby_sized_xfree(x, 0);
10172 }
10173 
10174 void *
10175 rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
10176 {
10177  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10178  return ruby_xmalloc(w);
10179 }
10180 
10181 void *
10182 rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
10183 {
10184  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10185  return ruby_xrealloc((void *)p, w);
10186 }
10187 
10188 void *
10189 rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10190 {
10191  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10192  return ruby_xmalloc(u);
10193 }
10194 
10195 void *
10196 rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10197 {
10198  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10199  return ruby_xcalloc(u, 1);
10200 }
10201 
10202 /* Mimic ruby_xmalloc, but need not rb_objspace.
10203  * should return pointer suitable for ruby_xfree
10204  */
10205 void *
10207 {
10208  void *mem;
10209 #if CALC_EXACT_MALLOC_SIZE
10210  size += sizeof(struct malloc_obj_info);
10211 #endif
10212  mem = malloc(size);
10213 #if CALC_EXACT_MALLOC_SIZE
10214  if (!mem) {
10215  return NULL;
10216  }
10217  else
10218  /* set 0 for consistency of allocated_size/allocations */
10219  {
10220  struct malloc_obj_info *info = mem;
10221  info->size = 0;
10222 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10223  info->gen = 0;
10224  info->file = NULL;
10225  info->line = 0;
10226 #else
10227  info->file = NULL;
10228 #endif
10229  mem = info + 1;
10230  }
10231 #endif
10232  return mem;
10233 }
10234 
10235 void
10237 {
10238 #if CALC_EXACT_MALLOC_SIZE
10239  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10240  ptr = info;
10241 #endif
10242  free(ptr);
10243 }
10244 
10245 void *
10246 rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
10247 {
10248  void *ptr;
10249  VALUE imemo;
10250  rb_imemo_tmpbuf_t *tmpbuf;
10251 
10252  /* Keep the order; allocate an empty imemo first then xmalloc, to
10253  * get rid of potential memory leak */
10254  imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
10255  *store = imemo;
10256  ptr = ruby_xmalloc0(size);
10257  tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
10258  tmpbuf->ptr = ptr;
10259  tmpbuf->cnt = cnt;
10260  return ptr;
10261 }
10262 
10263 void *
10264 rb_alloc_tmp_buffer(volatile VALUE *store, long len)
10265 {
10266  long cnt;
10267 
10268  if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
10269  rb_raise(rb_eArgError, "negative buffer size (or size too big)");
10270  }
10271 
10272  return rb_alloc_tmp_buffer_with_count(store, len, cnt);
10273 }
10274 
10275 void
10276 rb_free_tmp_buffer(volatile VALUE *store)
10277 {
10279  if (s) {
10280  void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
10281  s->cnt = 0;
10282  ruby_xfree(ptr);
10283  }
10284 }
10285 
10286 #if MALLOC_ALLOCATED_SIZE
10287 /*
10288  * call-seq:
10289  * GC.malloc_allocated_size -> Integer
10290  *
10291  * Returns the size of memory allocated by malloc().
10292  *
10293  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10294  */
10295 
10296 static VALUE
10297 gc_malloc_allocated_size(VALUE self)
10298 {
10299  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
10300 }
10301 
10302 /*
10303  * call-seq:
10304  * GC.malloc_allocations -> Integer
10305  *
10306  * Returns the number of malloc() allocations.
10307  *
10308  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10309  */
10310 
10311 static VALUE
10312 gc_malloc_allocations(VALUE self)
10313 {
10314  return UINT2NUM(rb_objspace.malloc_params.allocations);
10315 }
10316 #endif
10317 
10318 void
10320 {
10321  rb_objspace_t *objspace = &rb_objspace;
10322  if (diff > 0) {
10323  objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
10324  }
10325  else if (diff < 0) {
10326  objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
10327  }
10328 }
10329 
10330 /*
10331  ------------------------------ WeakMap ------------------------------
10332 */
10333 
10334 struct weakmap {
10335  st_table *obj2wmap; /* obj -> [ref,...] */
10336  st_table *wmap2obj; /* ref -> obj */
10337  VALUE final;
10338 };
10339 
10340 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10341 
10342 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10343 static int
10344 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
10345 {
10346  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10347  VALUE obj = (VALUE)val;
10348  if (!is_live_object(objspace, obj)) return ST_DELETE;
10349  return ST_CONTINUE;
10350 }
10351 #endif
10352 
10353 static void
10354 wmap_compact(void *ptr)
10355 {
10356  struct weakmap *w = ptr;
10359  w->final = rb_gc_location(w->final);
10360 }
10361 
10362 static void
10363 wmap_mark(void *ptr)
10364 {
10365  struct weakmap *w = ptr;
10366 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10367  if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
10368 #endif
10370 }
10371 
10372 static int
10373 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
10374 {
10375  VALUE *ptr = (VALUE *)val;
10376  ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
10377  return ST_CONTINUE;
10378 }
10379 
10380 static void
10381 wmap_free(void *ptr)
10382 {
10383  struct weakmap *w = ptr;
10384  st_foreach(w->obj2wmap, wmap_free_map, 0);
10385  st_free_table(w->obj2wmap);
10386  st_free_table(w->wmap2obj);
10387 }
10388 
10389 static int
10390 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
10391 {
10392  VALUE *ptr = (VALUE *)val;
10393  *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
10394  return ST_CONTINUE;
10395 }
10396 
10397 static size_t
10398 wmap_memsize(const void *ptr)
10399 {
10400  size_t size;
10401  const struct weakmap *w = ptr;
10402  size = sizeof(*w);
10403  size += st_memsize(w->obj2wmap);
10404  size += st_memsize(w->wmap2obj);
10405  st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
10406  return size;
10407 }
10408 
10409 static const rb_data_type_t weakmap_type = {
10410  "weakmap",
10411  {
10412  wmap_mark,
10413  wmap_free,
10414  wmap_memsize,
10415  wmap_compact,
10416  },
10418 };
10419 
10420 extern const struct st_hash_type rb_hashtype_ident;
10421 static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
10422 
10423 static VALUE
10424 wmap_allocate(VALUE klass)
10425 {
10426  struct weakmap *w;
10427  VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
10430  w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
10431  return obj;
10432 }
10433 
10434 static int
10435 wmap_live_p(rb_objspace_t *objspace, VALUE obj)
10436 {
10437  if (!FL_ABLE(obj)) return TRUE;
10438  if (!is_id_value(objspace, obj)) return FALSE;
10439  if (!is_live_object(objspace, obj)) return FALSE;
10440  return TRUE;
10441 }
10442 
10443 static int
10444 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
10445 {
10446  VALUE wmap, *ptr, size, i, j;
10447  if (!existing) return ST_STOP;
10448  wmap = (VALUE)arg, ptr = (VALUE *)*value;
10449  for (i = j = 1, size = ptr[0]; i <= size; ++i) {
10450  if (ptr[i] != wmap) {
10451  ptr[j++] = ptr[i];
10452  }
10453  }
10454  if (j == 1) {
10455  ruby_sized_xfree(ptr, i * sizeof(VALUE));
10456  return ST_DELETE;
10457  }
10458  if (j < i) {
10459  SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
10460  ptr[0] = j;
10461  *value = (st_data_t)ptr;
10462  }
10463  return ST_CONTINUE;
10464 }
10465 
10466 /* :nodoc: */
10467 static VALUE
10468 wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
10469 {
10470  st_data_t orig, wmap, data;
10471  VALUE obj, *rids, i, size;
10472  struct weakmap *w;
10473 
10474  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10475  /* Get reference from object id. */
10476  if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
10477  rb_bug("wmap_finalize: objid is not found.");
10478  }
10479 
10480  /* obj is original referenced object and/or weak reference. */
10481  orig = (st_data_t)obj;
10482  if (st_delete(w->obj2wmap, &orig, &data)) {
10483  rids = (VALUE *)data;
10484  size = *rids++;
10485  for (i = 0; i < size; ++i) {
10486  wmap = (st_data_t)rids[i];
10487  st_delete(w->wmap2obj, &wmap, NULL);
10488  }
10489  ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
10490  }
10491 
10492  wmap = (st_data_t)obj;
10493  if (st_delete(w->wmap2obj, &wmap, &orig)) {
10494  wmap = (st_data_t)obj;
10495  st_update(w->obj2wmap, orig, wmap_final_func, wmap);
10496  }
10497  return self;
10498 }
10499 
10503 };
10504 
10505 static int
10506 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
10507 {
10508  VALUE str = (VALUE)arg;
10509  VALUE k = (VALUE)key, v = (VALUE)val;
10510 
10511  if (RSTRING_PTR(str)[0] == '#') {
10512  rb_str_cat2(str, ", ");
10513  }
10514  else {
10515  rb_str_cat2(str, ": ");
10516  RSTRING_PTR(str)[0] = '#';
10517  }
10518  k = SPECIAL_CONST_P(k) ? rb_inspect(k) : rb_any_to_s(k);
10519  rb_str_append(str, k);
10520  rb_str_cat2(str, " => ");
10522  rb_str_append(str, v);
10523 
10524  return ST_CONTINUE;
10525 }
10526 
10527 static VALUE
10528 wmap_inspect(VALUE self)
10529 {
10530  VALUE str;
10531  VALUE c = rb_class_name(CLASS_OF(self));
10532  struct weakmap *w;
10533 
10534  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10535  str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
10536  if (w->wmap2obj) {
10537  st_foreach(w->wmap2obj, wmap_inspect_i, str);
10538  }
10539  RSTRING_PTR(str)[0] = '#';
10540  rb_str_cat2(str, ">");
10541  return str;
10542 }
10543 
10544 static int
10545 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
10546 {
10547  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10548  VALUE obj = (VALUE)val;
10549  if (wmap_live_p(objspace, obj)) {
10550  rb_yield_values(2, (VALUE)key, obj);
10551  }
10552  return ST_CONTINUE;
10553 }
10554 
10555 /* Iterates over keys and objects in a weakly referenced object */
10556 static VALUE
10557 wmap_each(VALUE self)
10558 {
10559  struct weakmap *w;
10560  rb_objspace_t *objspace = &rb_objspace;
10561 
10562  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10563  st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
10564  return self;
10565 }
10566 
10567 static int
10568 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
10569 {
10570  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10571  VALUE obj = (VALUE)val;
10572  if (wmap_live_p(objspace, obj)) {
10573  rb_yield((VALUE)key);
10574  }
10575  return ST_CONTINUE;
10576 }
10577 
10578 /* Iterates over keys and objects in a weakly referenced object */
10579 static VALUE
10580 wmap_each_key(VALUE self)
10581 {
10582  struct weakmap *w;
10583  rb_objspace_t *objspace = &rb_objspace;
10584 
10585  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10586  st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
10587  return self;
10588 }
10589 
10590 static int
10591 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
10592 {
10593  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10594  VALUE obj = (VALUE)val;
10595  if (wmap_live_p(objspace, obj)) {
10596  rb_yield(obj);
10597  }
10598  return ST_CONTINUE;
10599 }
10600 
10601 /* Iterates over keys and objects in a weakly referenced object */
10602 static VALUE
10603 wmap_each_value(VALUE self)
10604 {
10605  struct weakmap *w;
10606  rb_objspace_t *objspace = &rb_objspace;
10607 
10608  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10609  st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
10610  return self;
10611 }
10612 
10613 static int
10614 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
10615 {
10616  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10617  rb_objspace_t *objspace = argp->objspace;
10618  VALUE ary = argp->value;
10619  VALUE obj = (VALUE)val;
10620  if (wmap_live_p(objspace, obj)) {
10621  rb_ary_push(ary, (VALUE)key);
10622  }
10623  return ST_CONTINUE;
10624 }
10625 
10626 /* Iterates over keys and objects in a weakly referenced object */
10627 static VALUE
10628 wmap_keys(VALUE self)
10629 {
10630  struct weakmap *w;
10631  struct wmap_iter_arg args;
10632 
10633  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10634  args.objspace = &rb_objspace;
10635  args.value = rb_ary_new();
10636  st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
10637  return args.value;
10638 }
10639 
10640 static int
10641 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
10642 {
10643  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10644  rb_objspace_t *objspace = argp->objspace;
10645  VALUE ary = argp->value;
10646  VALUE obj = (VALUE)val;
10647  if (wmap_live_p(objspace, obj)) {
10648  rb_ary_push(ary, obj);
10649  }
10650  return ST_CONTINUE;
10651 }
10652 
10653 /* Iterates over values and objects in a weakly referenced object */
10654 static VALUE
10655 wmap_values(VALUE self)
10656 {
10657  struct weakmap *w;
10658  struct wmap_iter_arg args;
10659 
10660  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10661  args.objspace = &rb_objspace;
10662  args.value = rb_ary_new();
10663  st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
10664  return args.value;
10665 }
10666 
10667 static int
10668 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
10669 {
10670  VALUE size, *ptr, *optr;
10671  if (existing) {
10672  size = (ptr = optr = (VALUE *)*val)[0];
10673  ++size;
10674  SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
10675  }
10676  else {
10677  optr = 0;
10678  size = 1;
10679  ptr = ruby_xmalloc0(2 * sizeof(VALUE));
10680  }
10681  ptr[0] = size;
10682  ptr[size] = (VALUE)arg;
10683  if (ptr == optr) return ST_STOP;
10684  *val = (st_data_t)ptr;
10685  return ST_CONTINUE;
10686 }
10687 
10688 /* Creates a weak reference from the given key to the given value */
10689 static VALUE
10690 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
10691 {
10692  struct weakmap *w;
10693 
10694  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10695  if (FL_ABLE(orig)) {
10696  define_final0(orig, w->final);
10697  }
10698  if (FL_ABLE(wmap)) {
10699  define_final0(wmap, w->final);
10700  }
10701 
10702  st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
10703  st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
10704  return nonspecial_obj_id(orig);
10705 }
10706 
10707 /* Retrieves a weakly referenced object with the given key */
10708 static VALUE
10709 wmap_aref(VALUE self, VALUE wmap)
10710 {
10711  st_data_t data;
10712  VALUE obj;
10713  struct weakmap *w;
10714  rb_objspace_t *objspace = &rb_objspace;
10715 
10716  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10717  if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
10718  obj = (VALUE)data;
10719  if (!wmap_live_p(objspace, obj)) return Qnil;
10720  return obj;
10721 }
10722 
10723 /* Returns +true+ if +key+ is registered */
10724 static VALUE
10725 wmap_has_key(VALUE self, VALUE key)
10726 {
10727  return NIL_P(wmap_aref(self, key)) ? Qfalse : Qtrue;
10728 }
10729 
10730 /* Returns the number of referenced objects */
10731 static VALUE
10732 wmap_size(VALUE self)
10733 {
10734  struct weakmap *w;
10735  st_index_t n;
10736 
10737  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10738  n = w->wmap2obj->num_entries;
10739 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10740  return ULONG2NUM(n);
10741 #else
10742  return ULL2NUM(n);
10743 #endif
10744 }
10745 
10746 /*
10747  ------------------------------ GC profiler ------------------------------
10748 */
10749 
10750 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10751 
10752 /* return sec in user time */
10753 static double
10754 getrusage_time(void)
10755 {
10756 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10757  {
10758  static int try_clock_gettime = 1;
10759  struct timespec ts;
10760  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
10761  return ts.tv_sec + ts.tv_nsec * 1e-9;
10762  }
10763  else {
10764  try_clock_gettime = 0;
10765  }
10766  }
10767 #endif
10768 
10769 #ifdef RUSAGE_SELF
10770  {
10771  struct rusage usage;
10772  struct timeval time;
10773  if (getrusage(RUSAGE_SELF, &usage) == 0) {
10774  time = usage.ru_utime;
10775  return time.tv_sec + time.tv_usec * 1e-6;
10776  }
10777  }
10778 #endif
10779 
10780 #ifdef _WIN32
10781  {
10782  FILETIME creation_time, exit_time, kernel_time, user_time;
10783  ULARGE_INTEGER ui;
10784  LONG_LONG q;
10785  double t;
10786 
10787  if (GetProcessTimes(GetCurrentProcess(),
10788  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10789  memcpy(&ui, &user_time, sizeof(FILETIME));
10790  q = ui.QuadPart / 10L;
10791  t = (DWORD)(q % 1000000L) * 1e-6;
10792  q /= 1000000L;
10793 #ifdef __GNUC__
10794  t += q;
10795 #else
10796  t += (double)(DWORD)(q >> 16) * (1 << 16);
10797  t += (DWORD)q & ~(~0 << 16);
10798 #endif
10799  return t;
10800  }
10801  }
10802 #endif
10803 
10804  return 0.0;
10805 }
10806 
10807 static inline void
10808 gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
10809 {
10810  if (objspace->profile.run) {
10811  size_t index = objspace->profile.next_index;
10812  gc_profile_record *record;
10813 
10814  /* create new record */
10815  objspace->profile.next_index++;
10816 
10817  if (!objspace->profile.records) {
10819  objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10820  }
10821  if (index >= objspace->profile.size) {
10822  void *ptr;
10823  objspace->profile.size += 1000;
10824  ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10825  if (!ptr) rb_memerror();
10826  objspace->profile.records = ptr;
10827  }
10828  if (!objspace->profile.records) {
10829  rb_bug("gc_profile malloc or realloc miss");
10830  }
10831  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
10832  MEMZERO(record, gc_profile_record, 1);
10833 
10834  /* setup before-GC parameter */
10835  record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
10836 #if MALLOC_ALLOCATED_SIZE
10837  record->allocated_size = malloc_allocated_size;
10838 #endif
10839 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10840 #ifdef RUSAGE_SELF
10841  {
10842  struct rusage usage;
10843  if (getrusage(RUSAGE_SELF, &usage) == 0) {
10844  record->maxrss = usage.ru_maxrss;
10845  record->minflt = usage.ru_minflt;
10846  record->majflt = usage.ru_majflt;
10847  }
10848  }
10849 #endif
10850 #endif
10851  }
10852 }
10853 
10854 static inline void
10855 gc_prof_timer_start(rb_objspace_t *objspace)
10856 {
10857  if (gc_prof_enabled(objspace)) {
10858  gc_profile_record *record = gc_prof_record(objspace);
10859 #if GC_PROFILE_MORE_DETAIL
10860  record->prepare_time = objspace->profile.prepare_time;
10861 #endif
10862  record->gc_time = 0;
10863  record->gc_invoke_time = getrusage_time();
10864  }
10865 }
10866 
10867 static double
10868 elapsed_time_from(double time)
10869 {
10870  double now = getrusage_time();
10871  if (now > time) {
10872  return now - time;
10873  }
10874  else {
10875  return 0;
10876  }
10877 }
10878 
10879 static inline void
10880 gc_prof_timer_stop(rb_objspace_t *objspace)
10881 {
10882  if (gc_prof_enabled(objspace)) {
10883  gc_profile_record *record = gc_prof_record(objspace);
10884  record->gc_time = elapsed_time_from(record->gc_invoke_time);
10885  record->gc_invoke_time -= objspace->profile.invoke_time;
10886  }
10887 }
10888 
10889 #define RUBY_DTRACE_GC_HOOK(name) \
10890  do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10891 static inline void
10892 gc_prof_mark_timer_start(rb_objspace_t *objspace)
10893 {
10894  RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
10895 #if GC_PROFILE_MORE_DETAIL
10896  if (gc_prof_enabled(objspace)) {
10897  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
10898  }
10899 #endif
10900 }
10901 
10902 static inline void
10903 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
10904 {
10905  RUBY_DTRACE_GC_HOOK(MARK_END);
10906 #if GC_PROFILE_MORE_DETAIL
10907  if (gc_prof_enabled(objspace)) {
10908  gc_profile_record *record = gc_prof_record(objspace);
10909  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10910  }
10911 #endif
10912 }
10913 
10914 static inline void
10915 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
10916 {
10917  RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
10918  if (gc_prof_enabled(objspace)) {
10919  gc_profile_record *record = gc_prof_record(objspace);
10920 
10921  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
10922  objspace->profile.gc_sweep_start_time = getrusage_time();
10923  }
10924  }
10925 }
10926 
10927 static inline void
10928 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
10929 {
10930  RUBY_DTRACE_GC_HOOK(SWEEP_END);
10931 
10932  if (gc_prof_enabled(objspace)) {
10933  double sweep_time;
10934  gc_profile_record *record = gc_prof_record(objspace);
10935 
10936  if (record->gc_time > 0) {
10937  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10938  /* need to accumulate GC time for lazy sweep after gc() */
10939  record->gc_time += sweep_time;
10940  }
10941  else if (GC_PROFILE_MORE_DETAIL) {
10942  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10943  }
10944 
10945 #if GC_PROFILE_MORE_DETAIL
10946  record->gc_sweep_time += sweep_time;
10948 #endif
10950  }
10951 }
10952 
10953 static inline void
10954 gc_prof_set_malloc_info(rb_objspace_t *objspace)
10955 {
10956 #if GC_PROFILE_MORE_DETAIL
10957  if (gc_prof_enabled(objspace)) {
10958  gc_profile_record *record = gc_prof_record(objspace);
10959  record->allocate_increase = malloc_increase;
10960  record->allocate_limit = malloc_limit;
10961  }
10962 #endif
10963 }
10964 
10965 static inline void
10966 gc_prof_set_heap_info(rb_objspace_t *objspace)
10967 {
10968  if (gc_prof_enabled(objspace)) {
10969  gc_profile_record *record = gc_prof_record(objspace);
10970  size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
10971  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
10972 
10973 #if GC_PROFILE_MORE_DETAIL
10974  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
10975  record->heap_live_objects = live;
10976  record->heap_free_objects = total - live;
10977 #endif
10978 
10979  record->heap_total_objects = total;
10980  record->heap_use_size = live * sizeof(RVALUE);
10981  record->heap_total_size = total * sizeof(RVALUE);
10982  }
10983 }
10984 
10985 /*
10986  * call-seq:
10987  * GC::Profiler.clear -> nil
10988  *
10989  * Clears the GC profiler data.
10990  *
10991  */
10992 
10993 static VALUE
10994 gc_profile_clear(VALUE _)
10995 {
10996  rb_objspace_t *objspace = &rb_objspace;
10997  void *p = objspace->profile.records;
10998  objspace->profile.records = NULL;
10999  objspace->profile.size = 0;
11000  objspace->profile.next_index = 0;
11001  objspace->profile.current_record = 0;
11002  if (p) {
11003  free(p);
11004  }
11005  return Qnil;
11006 }
11007 
11008 /*
11009  * call-seq:
11010  * GC::Profiler.raw_data -> [Hash, ...]
11011  *
11012  * Returns an Array of individual raw profile data Hashes ordered
11013  * from earliest to latest by +:GC_INVOKE_TIME+.
11014  *
11015  * For example:
11016  *
11017  * [
11018  * {
11019  * :GC_TIME=>1.3000000000000858e-05,
11020  * :GC_INVOKE_TIME=>0.010634999999999999,
11021  * :HEAP_USE_SIZE=>289640,
11022  * :HEAP_TOTAL_SIZE=>588960,
11023  * :HEAP_TOTAL_OBJECTS=>14724,
11024  * :GC_IS_MARKED=>false
11025  * },
11026  * # ...
11027  * ]
11028  *
11029  * The keys mean:
11030  *
11031  * +:GC_TIME+::
11032  * Time elapsed in seconds for this GC run
11033  * +:GC_INVOKE_TIME+::
11034  * Time elapsed in seconds from startup to when the GC was invoked
11035  * +:HEAP_USE_SIZE+::
11036  * Total bytes of heap used
11037  * +:HEAP_TOTAL_SIZE+::
11038  * Total size of heap in bytes
11039  * +:HEAP_TOTAL_OBJECTS+::
11040  * Total number of objects
11041  * +:GC_IS_MARKED+::
11042  * Returns +true+ if the GC is in mark phase
11043  *
11044  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
11045  * to the following hash keys:
11046  *
11047  * +:GC_MARK_TIME+::
11048  * +:GC_SWEEP_TIME+::
11049  * +:ALLOCATE_INCREASE+::
11050  * +:ALLOCATE_LIMIT+::
11051  * +:HEAP_USE_PAGES+::
11052  * +:HEAP_LIVE_OBJECTS+::
11053  * +:HEAP_FREE_OBJECTS+::
11054  * +:HAVE_FINALIZE+::
11055  *
11056  */
11057 
11058 static VALUE
11059 gc_profile_record_get(VALUE _)
11060 {
11061  VALUE prof;
11062  VALUE gc_profile = rb_ary_new();
11063  size_t i;
11064  rb_objspace_t *objspace = (&rb_objspace);
11065 
11066  if (!objspace->profile.run) {
11067  return Qnil;
11068  }
11069 
11070  for (i =0; i < objspace->profile.next_index; i++) {
11071  gc_profile_record *record = &objspace->profile.records[i];
11072 
11073  prof = rb_hash_new();
11074  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
11075  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
11076  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
11077  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
11078  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
11079  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
11080  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
11081 #if GC_PROFILE_MORE_DETAIL
11082  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
11083  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
11084  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
11085  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
11086  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
11087  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
11088  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
11089 
11090  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
11091  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
11092 
11093  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
11094 #endif
11095 
11096 #if RGENGC_PROFILE > 0
11097  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
11098  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
11099  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
11100 #endif
11101  rb_ary_push(gc_profile, prof);
11102  }
11103 
11104  return gc_profile;
11105 }
11106 
11107 #if GC_PROFILE_MORE_DETAIL
11108 #define MAJOR_REASON_MAX 0x10
11109 
11110 static char *
11111 gc_profile_dump_major_reason(int flags, char *buff)
11112 {
11113  int reason = flags & GPR_FLAG_MAJOR_MASK;
11114  int i = 0;
11115 
11116  if (reason == GPR_FLAG_NONE) {
11117  buff[0] = '-';
11118  buff[1] = 0;
11119  }
11120  else {
11121 #define C(x, s) \
11122  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11123  buff[i++] = #x[0]; \
11124  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11125  buff[i] = 0; \
11126  }
11127  C(NOFREE, N);
11128  C(OLDGEN, O);
11129  C(SHADY, S);
11130 #if RGENGC_ESTIMATE_OLDMALLOC
11131  C(OLDMALLOC, M);
11132 #endif
11133 #undef C
11134  }
11135  return buff;
11136 }
11137 #endif
11138 
11139 static void
11140 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
11141 {
11142  rb_objspace_t *objspace = &rb_objspace;
11143  size_t count = objspace->profile.next_index;
11144 #ifdef MAJOR_REASON_MAX
11145  char reason_str[MAJOR_REASON_MAX];
11146 #endif
11147 
11148  if (objspace->profile.run && count /* > 1 */) {
11149  size_t i;
11150  const gc_profile_record *record;
11151 
11152  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
11153  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11154 
11155  for (i = 0; i < count; i++) {
11156  record = &objspace->profile.records[i];
11157  append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
11158  i+1, record->gc_invoke_time, record->heap_use_size,
11159  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
11160  }
11161 
11162 #if GC_PROFILE_MORE_DETAIL
11163  append(out, rb_str_new_cstr("\n\n" \
11164  "More detail.\n" \
11165  "Prepare Time = Previously GC's rest sweep time\n"
11166  "Index Flags Allocate Inc. Allocate Limit"
11168  " Allocated Size"
11169 #endif
11170  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11171 #if RGENGC_PROFILE
11172  " OldgenObj RemNormObj RemShadObj"
11173 #endif
11175  " MaxRSS(KB) MinorFLT MajorFLT"
11176 #endif
11177  "\n"));
11178 
11179  for (i = 0; i < count; i++) {
11180  record = &objspace->profile.records[i];
11181  append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
11183  " %15"PRIuSIZE
11184 #endif
11185  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11186 #if RGENGC_PROFILE
11187  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11188 #endif
11190  "%11ld %8ld %8ld"
11191 #endif
11192 
11193  "\n",
11194  i+1,
11195  gc_profile_dump_major_reason(record->flags, reason_str),
11196  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
11197  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
11198  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
11199  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
11200  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
11201  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
11202  record->allocate_increase, record->allocate_limit,
11204  record->allocated_size,
11205 #endif
11206  record->heap_use_pages,
11207  record->gc_mark_time*1000,
11208  record->gc_sweep_time*1000,
11209  record->prepare_time*1000,
11210 
11211  record->heap_live_objects,
11212  record->heap_free_objects,
11213  record->removing_objects,
11214  record->empty_objects
11215 #if RGENGC_PROFILE
11216  ,
11217  record->old_objects,
11218  record->remembered_normal_objects,
11219  record->remembered_shady_objects
11220 #endif
11222  ,
11223  record->maxrss / 1024,
11224  record->minflt,
11225  record->majflt
11226 #endif
11227 
11228  ));
11229  }
11230 #endif
11231  }
11232 }
11233 
11234 /*
11235  * call-seq:
11236  * GC::Profiler.result -> String
11237  *
11238  * Returns a profile data report such as:
11239  *
11240  * GC 1 invokes.
11241  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
11242  * 1 0.012 159240 212940 10647 0.00000000000001530000
11243  */
11244 
11245 static VALUE
11246 gc_profile_result(VALUE _)
11247 {
11248  VALUE str = rb_str_buf_new(0);
11249  gc_profile_dump_on(str, rb_str_buf_append);
11250  return str;
11251 }
11252 
11253 /*
11254  * call-seq:
11255  * GC::Profiler.report
11256  * GC::Profiler.report(io)
11257  *
11258  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
11259  *
11260  */
11261 
11262 static VALUE
11263 gc_profile_report(int argc, VALUE *argv, VALUE self)
11264 {
11265  VALUE out;
11266 
11267  out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
11268  gc_profile_dump_on(out, rb_io_write);
11269 
11270  return Qnil;
11271 }
11272 
11273 /*
11274  * call-seq:
11275  * GC::Profiler.total_time -> float
11276  *
11277  * The total time used for garbage collection in seconds
11278  */
11279 
11280 static VALUE
11281 gc_profile_total_time(VALUE self)
11282 {
11283  double time = 0;
11284  rb_objspace_t *objspace = &rb_objspace;
11285 
11286  if (objspace->profile.run && objspace->profile.next_index > 0) {
11287  size_t i;
11288  size_t count = objspace->profile.next_index;
11289 
11290  for (i = 0; i < count; i++) {
11291  time += objspace->profile.records[i].gc_time;
11292  }
11293  }
11294  return DBL2NUM(time);
11295 }
11296 
11297 /*
11298  * call-seq:
11299  * GC::Profiler.enabled? -> true or false
11300  *
11301  * The current status of GC profile mode.
11302  */
11303 
11304 static VALUE
11305 gc_profile_enable_get(VALUE self)
11306 {
11307  rb_objspace_t *objspace = &rb_objspace;
11308  return objspace->profile.run ? Qtrue : Qfalse;
11309 }
11310 
11311 /*
11312  * call-seq:
11313  * GC::Profiler.enable -> nil
11314  *
11315  * Starts the GC profiler.
11316  *
11317  */
11318 
11319 static VALUE
11320 gc_profile_enable(VALUE _)
11321 {
11322  rb_objspace_t *objspace = &rb_objspace;
11323  objspace->profile.run = TRUE;
11324  objspace->profile.current_record = 0;
11325  return Qnil;
11326 }
11327 
11328 /*
11329  * call-seq:
11330  * GC::Profiler.disable -> nil
11331  *
11332  * Stops the GC profiler.
11333  *
11334  */
11335 
11336 static VALUE
11337 gc_profile_disable(VALUE _)
11338 {
11339  rb_objspace_t *objspace = &rb_objspace;
11340 
11341  objspace->profile.run = FALSE;
11342  objspace->profile.current_record = 0;
11343  return Qnil;
11344 }
11345 
11346 /*
11347  ------------------------------ DEBUG ------------------------------
11348 */
11349 
11350 static const char *
11351 type_name(int type, VALUE obj)
11352 {
11353  switch (type) {
11354 #define TYPE_NAME(t) case (t): return #t;
11355  TYPE_NAME(T_NONE);
11357  TYPE_NAME(T_CLASS);
11359  TYPE_NAME(T_FLOAT);
11362  TYPE_NAME(T_ARRAY);
11363  TYPE_NAME(T_HASH);
11366  TYPE_NAME(T_FILE);
11367  TYPE_NAME(T_MATCH);
11370  TYPE_NAME(T_NIL);
11371  TYPE_NAME(T_TRUE);
11372  TYPE_NAME(T_FALSE);
11375  TYPE_NAME(T_UNDEF);
11376  TYPE_NAME(T_IMEMO);
11378  TYPE_NAME(T_MOVED);
11380  case T_DATA:
11383  }
11384  return "T_DATA";
11385 #undef TYPE_NAME
11386  }
11387  return "unknown";
11388 }
11389 
11390 static const char *
11391 obj_type_name(VALUE obj)
11392 {
11393  return type_name(TYPE(obj), obj);
11394 }
11395 
11396 const char *
11398 {
11399  switch (type) {
11400  case VM_METHOD_TYPE_ISEQ: return "iseq";
11401  case VM_METHOD_TYPE_ATTRSET: return "attrest";
11402  case VM_METHOD_TYPE_IVAR: return "ivar";
11403  case VM_METHOD_TYPE_BMETHOD: return "bmethod";
11404  case VM_METHOD_TYPE_ALIAS: return "alias";
11405  case VM_METHOD_TYPE_REFINED: return "refined";
11406  case VM_METHOD_TYPE_CFUNC: return "cfunc";
11407  case VM_METHOD_TYPE_ZSUPER: return "zsuper";
11408  case VM_METHOD_TYPE_MISSING: return "missing";
11409  case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
11410  case VM_METHOD_TYPE_UNDEF: return "undef";
11411  case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
11412  }
11413  rb_bug("rb_method_type_name: unreachable (type: %d)", type);
11414 }
11415 
11416 /* from array.c */
11417 # define ARY_SHARED_P(ary) \
11418  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11419  FL_TEST((ary),ELTS_SHARED)!=0)
11420 # define ARY_EMBED_P(ary) \
11421  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11422  FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11423 
11424 static void
11425 rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
11426 {
11427  if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
11430  snprintf(buff, buff_size, " %s@%s:%d",
11432  RSTRING_PTR(path),
11433  n ? FIX2INT(n) : 0 );
11434  }
11435 }
11436 
11437 const char *
11438 rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
11439 {
11440  int pos = 0;
11441 
11442 #define BUFF_ARGS buff + pos, buff_size - pos
11443 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11444  if (SPECIAL_CONST_P(obj)) {
11445  APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
11446 
11447  if (FIXNUM_P(obj)) {
11448  APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
11449  }
11450  else if (SYMBOL_P(obj)) {
11451  APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
11452  }
11453  }
11454  else {
11455 #define TF(c) ((c) != 0 ? "true" : "false")
11456 #define C(c, s) ((c) != 0 ? (s) : " ")
11457  const int type = BUILTIN_TYPE(obj);
11458 #if USE_RGENGC
11459  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
11460 
11461  if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
11462  APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s] %s ",
11463  (void *)obj, age,
11465  C(RVALUE_MARK_BITMAP(obj), "M"),
11466  C(RVALUE_PIN_BITMAP(obj), "P"),
11467  C(RVALUE_MARKING_BITMAP(obj), "R"),
11469  obj_type_name(obj)));
11470  }
11471  else {
11472  /* fake */
11473  APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
11474  (void *)obj, age,
11475  obj_type_name(obj)));
11476  }
11477 #else
11478  APPENDF((BUFF_ARGS, "%p [%s] %s",
11479  (void *)obj,
11480  C(RVALUE_MARK_BITMAP(obj), "M"),
11481  obj_type_name(obj)));
11482 #endif
11483 
11484  if (internal_object_p(obj)) {
11485  /* ignore */
11486  }
11487  else if (RBASIC(obj)->klass == 0) {
11488  APPENDF((BUFF_ARGS, "(temporary internal)"));
11489  }
11490  else {
11491  if (RTEST(RBASIC(obj)->klass)) {
11492  VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
11493  if (!NIL_P(class_path)) {
11494  APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
11495  }
11496  }
11497  }
11498 
11499 #if GC_DEBUG
11500  APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
11501 #endif
11502 
11503  switch (type) {
11504  case T_NODE:
11506  break;
11507  case T_ARRAY:
11508  if (FL_TEST(obj, ELTS_SHARED)) {
11509  APPENDF((BUFF_ARGS, "shared -> %s",
11510  rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
11511  }
11512  else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
11513  APPENDF((BUFF_ARGS, "[%s%s] len: %d (embed)",
11514  C(ARY_EMBED_P(obj), "E"),
11515  C(ARY_SHARED_P(obj), "S"),
11516  (int)RARRAY_LEN(obj)));
11517  }
11518  else {
11519  APPENDF((BUFF_ARGS, "[%s%s%s] len: %d, capa:%d ptr:%p",
11520  C(ARY_EMBED_P(obj), "E"),
11521  C(ARY_SHARED_P(obj), "S"),
11522  C(RARRAY_TRANSIENT_P(obj), "T"),
11523  (int)RARRAY_LEN(obj),
11524  ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
11525  (void *)RARRAY_CONST_PTR_TRANSIENT(obj)));
11526  }
11527  break;
11528  case T_STRING: {
11529  APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(obj)));
11530  break;
11531  }
11532  case T_MOVED: {
11533  APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
11534  break;
11535  }
11536  case T_HASH: {
11537  APPENDF((BUFF_ARGS, "[%c%c] %d",
11538  RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
11539  RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
11540  (int)RHASH_SIZE(obj)));
11541  break;
11542  }
11543  case T_CLASS:
11544  case T_MODULE:
11545  {
11546  VALUE class_path = rb_class_path_cached(obj);
11547  if (!NIL_P(class_path)) {
11548  APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
11549  }
11550  break;
11551  }
11552  case T_ICLASS:
11553  {
11554  VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
11555  if (!NIL_P(class_path)) {
11556  APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
11557  }
11558  break;
11559  }
11560  case T_OBJECT:
11561  {
11563 
11564  if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
11565  APPENDF((BUFF_ARGS, "(embed) len:%d", len));
11566  }
11567  else {
11568  VALUE *ptr = ROBJECT_IVPTR(obj);
11569  APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
11570  }
11571  }
11572  break;
11573  case T_DATA: {
11574  const struct rb_block *block;
11575  const rb_iseq_t *iseq;
11576  if (rb_obj_is_proc(obj) &&
11577  (block = vm_proc_block(obj)) != NULL &&
11578  (vm_block_type(block) == block_type_iseq) &&
11579  (iseq = vm_block_iseq(block)) != NULL) {
11580  rb_raw_iseq_info(BUFF_ARGS, iseq);
11581  }
11582  else {
11583  const char * const type_name = rb_objspace_data_type_name(obj);
11584  if (type_name) {
11585  APPENDF((BUFF_ARGS, "%s", type_name));
11586  }
11587  }
11588  break;
11589  }
11590  case T_IMEMO: {
11591  const char *imemo_name = "\0";
11592  switch (imemo_type(obj)) {
11593 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11594  IMEMO_NAME(env);
11595  IMEMO_NAME(cref);
11596  IMEMO_NAME(svar);
11597  IMEMO_NAME(throw_data);
11598  IMEMO_NAME(ifunc);
11599  IMEMO_NAME(memo);
11600  IMEMO_NAME(ment);
11601  IMEMO_NAME(iseq);
11602  IMEMO_NAME(tmpbuf);
11603  IMEMO_NAME(ast);
11604  IMEMO_NAME(parser_strterm);
11605 #undef IMEMO_NAME
11606  default: UNREACHABLE;
11607  }
11608  APPENDF((BUFF_ARGS, "/%s", imemo_name));
11609 
11610  switch (imemo_type(obj)) {
11611  case imemo_ment: {
11612  const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
11613  if (me->def) {
11614  APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11617  me->def->alias_count,
11618  obj_info(me->owner),
11619  obj_info(me->defined_class)));
11620  }
11621  else {
11622  APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id)));
11623  }
11624  break;
11625  }
11626  case imemo_iseq: {
11627  const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
11628  rb_raw_iseq_info(BUFF_ARGS, iseq);
11629  break;
11630  }
11631  default:
11632  break;
11633  }
11634  }
11635  default:
11636  break;
11637  }
11638 #undef TF
11639 #undef C
11640  }
11641  end:
11642  return buff;
11643 #undef APPENDF
11644 #undef BUFF_ARGS
11645 }
11646 
11647 #if RGENGC_OBJ_INFO
11648 #define OBJ_INFO_BUFFERS_NUM 10
11649 #define OBJ_INFO_BUFFERS_SIZE 0x100
11650 static int obj_info_buffers_index = 0;
11651 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11652 
11653 static const char *
11654 obj_info(VALUE obj)
11655 {
11656  const int index = obj_info_buffers_index++;
11657  char *const buff = &obj_info_buffers[index][0];
11658 
11659  if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11660  obj_info_buffers_index = 0;
11661  }
11662 
11663  return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
11664 }
11665 #else
11666 static const char *
11667 obj_info(VALUE obj)
11668 {
11669  return obj_type_name(obj);
11670 }
11671 #endif
11672 
11673 MJIT_FUNC_EXPORTED const char *
11675 {
11676  return obj_info(obj);
11677 }
11678 
11679 void
11681 {
11682  char buff[0x100];
11683  fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
11684 }
11685 
11686 void
11687 rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
11688 {
11689  char buff[0x100];
11690  fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
11691 }
11692 
11693 #if GC_DEBUG
11694 
11695 void
11697 {
11698  rb_objspace_t *objspace = &rb_objspace;
11699 
11700  fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
11701 
11702  if (BUILTIN_TYPE(obj) == T_MOVED) {
11703  fprintf(stderr, "moved?: true\n");
11704  }
11705  else {
11706  fprintf(stderr, "moved?: false\n");
11707  }
11708  if (is_pointer_to_heap(objspace, (void *)obj)) {
11709  fprintf(stderr, "pointer to heap?: true\n");
11710  }
11711  else {
11712  fprintf(stderr, "pointer to heap?: false\n");
11713  return;
11714  }
11715 
11716  fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
11717  fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
11718 #if USE_RGENGC
11719  fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
11720  fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
11721  fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
11722  fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
11723 #endif
11724 
11725  if (is_lazy_sweeping(heap_eden)) {
11726  fprintf(stderr, "lazy sweeping?: true\n");
11727  fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
11728  }
11729  else {
11730  fprintf(stderr, "lazy sweeping?: false\n");
11731  }
11732 }
11733 
11734 static VALUE
11735 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
11736 {
11737  fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
11738  return Qnil;
11739 }
11740 
11741 void
11742 rb_gcdebug_sentinel(VALUE obj, const char *name)
11743 {
11744  rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
11745 }
11746 
11747 #endif /* GC_DEBUG */
11748 
11749 #if GC_DEBUG_STRESS_TO_CLASS
11750 /*
11751  * call-seq:
11752  * GC.add_stress_to_class(class[, ...])
11753  *
11754  * Raises NoMemoryError when allocating an instance of the given classes.
11755  *
11756  */
11757 static VALUE
11758 rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
11759 {
11760  rb_objspace_t *objspace = &rb_objspace;
11761 
11762  if (!stress_to_class) {
11764  }
11766  return self;
11767 }
11768 
11769 /*
11770  * call-seq:
11771  * GC.remove_stress_to_class(class[, ...])
11772  *
11773  * No longer raises NoMemoryError when allocating an instance of the
11774  * given classes.
11775  *
11776  */
11777 static VALUE
11778 rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
11779 {
11780  rb_objspace_t *objspace = &rb_objspace;
11781  int i;
11782 
11783  if (stress_to_class) {
11784  for (i = 0; i < argc; ++i) {
11786  }
11787  if (RARRAY_LEN(stress_to_class) == 0) {
11788  stress_to_class = 0;
11789  }
11790  }
11791  return Qnil;
11792 }
11793 #endif
11794 
11795 /*
11796  * Document-module: ObjectSpace
11797  *
11798  * The ObjectSpace module contains a number of routines
11799  * that interact with the garbage collection facility and allow you to
11800  * traverse all living objects with an iterator.
11801  *
11802  * ObjectSpace also provides support for object finalizers, procs that will be
11803  * called when a specific object is about to be destroyed by garbage
11804  * collection.
11805  *
11806  * require 'objspace'
11807  *
11808  * a = "A"
11809  * b = "B"
11810  *
11811  * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
11812  * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
11813  *
11814  * _produces:_
11815  *
11816  * Finalizer two on 537763470
11817  * Finalizer one on 537763480
11818  */
11819 
11820 /*
11821  * Document-class: ObjectSpace::WeakMap
11822  *
11823  * An ObjectSpace::WeakMap object holds references to
11824  * any objects, but those objects can get garbage collected.
11825  *
11826  * This class is mostly used internally by WeakRef, please use
11827  * +lib/weakref.rb+ for the public interface.
11828  */
11829 
11830 /* Document-class: GC::Profiler
11831  *
11832  * The GC profiler provides access to information on GC runs including time,
11833  * length and object space size.
11834  *
11835  * Example:
11836  *
11837  * GC::Profiler.enable
11838  *
11839  * require 'rdoc/rdoc'
11840  *
11841  * GC::Profiler.report
11842  *
11843  * GC::Profiler.disable
11844  *
11845  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
11846  */
11847 
11848 #include "gc.rbinc"
11849 
11850 void
11851 Init_GC(void)
11852 {
11853 #undef rb_intern
11854  VALUE rb_mObjSpace;
11855  VALUE rb_mProfiler;
11856  VALUE gc_constants;
11857 
11858  rb_mGC = rb_define_module("GC");
11859  load_gc();
11860 
11861  gc_constants = rb_hash_new();
11862  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
11863  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
11864  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
11865  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES));
11866  OBJ_FREEZE(gc_constants);
11867  /* internal constants */
11868  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
11869 
11870  rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
11871  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
11872  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
11873  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
11874  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
11875  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
11876  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
11877  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
11878  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
11879 
11880  rb_mObjSpace = rb_define_module("ObjectSpace");
11881 
11882  rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
11883 
11884  rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
11885  rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
11886 
11887  rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
11888 
11890 
11892  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
11893 
11894  rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
11895 
11896  {
11897  VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
11898  rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
11899  rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
11900  rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
11901  rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
11902  rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
11903  rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
11904  rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
11905  rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
11906  rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
11907  rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
11908  rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
11909  rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
11910  rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
11911  rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
11912  rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
11913  rb_include_module(rb_cWeakMap, rb_mEnumerable);
11914  }
11915 
11916  /* internal methods */
11917  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
11918  rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
11919  rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
11920 #if MALLOC_ALLOCATED_SIZE
11921  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
11922  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
11923 #endif
11924 
11925 #if GC_DEBUG_STRESS_TO_CLASS
11926  rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
11927  rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
11928 #endif
11929 
11930  {
11931  VALUE opts;
11932  /* GC build options */
11933  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
11934 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11935  OPT(GC_DEBUG);
11936  OPT(USE_RGENGC);
11937  OPT(RGENGC_DEBUG);
11947 #undef OPT
11948  OBJ_FREEZE(opts);
11949  }
11950 }
11951 
11952 #ifdef ruby_xmalloc
11953 #undef ruby_xmalloc
11954 #endif
11955 #ifdef ruby_xmalloc2
11956 #undef ruby_xmalloc2
11957 #endif
11958 #ifdef ruby_xcalloc
11959 #undef ruby_xcalloc
11960 #endif
11961 #ifdef ruby_xrealloc
11962 #undef ruby_xrealloc
11963 #endif
11964 #ifdef ruby_xrealloc2
11965 #undef ruby_xrealloc2
11966 #endif
11967 
11968 void *
11970 {
11971 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11972  ruby_malloc_info_file = __FILE__;
11973  ruby_malloc_info_line = __LINE__;
11974 #endif
11975  return ruby_xmalloc_body(size);
11976 }
11977 
11978 void *
11979 ruby_xmalloc2(size_t n, size_t size)
11980 {
11981 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11982  ruby_malloc_info_file = __FILE__;
11983  ruby_malloc_info_line = __LINE__;
11984 #endif
11985  return ruby_xmalloc2_body(n, size);
11986 }
11987 
11988 void *
11989 ruby_xcalloc(size_t n, size_t size)
11990 {
11991 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11992  ruby_malloc_info_file = __FILE__;
11993  ruby_malloc_info_line = __LINE__;
11994 #endif
11995  return ruby_xcalloc_body(n, size);
11996 }
11997 
11998 void *
11999 ruby_xrealloc(void *ptr, size_t new_size)
12000 {
12001 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12002  ruby_malloc_info_file = __FILE__;
12003  ruby_malloc_info_line = __LINE__;
12004 #endif
12005  return ruby_xrealloc_body(ptr, new_size);
12006 }
12007 
12008 void *
12009 ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
12010 {
12011 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12012  ruby_malloc_info_file = __FILE__;
12013  ruby_malloc_info_line = __LINE__;
12014 #endif
12015  return ruby_xrealloc2_body(ptr, n, new_size);
12016 }
ULL2NUM
#define ULL2NUM(v)
Definition: rb_mjit_min_header-2.7.1.h:4210
strcmp
int strcmp(const char *, const char *)
RMatch::regexp
VALUE regexp
Definition: re.h:47
rb_objspace::dont_gc
unsigned int dont_gc
Definition: gc.c:689
rb_objspace::atomic_flags
struct rb_objspace::@84 atomic_flags
FLONUM_P
#define FLONUM_P(x)
Definition: ruby.h:430
global_symbols
#define global_symbols
Definition: gc.c:8409
gc_stat_sym_heap_available_slots
@ gc_stat_sym_heap_available_slots
Definition: gc.c:8844
__attribute__
unsigned int UINT8 __attribute__((__mode__(__QI__)))
Definition: ffi_common.h:110
rb_objspace::mark_func_data_struct::mark_func
void(* mark_func)(VALUE v, void *data)
Definition: gc.c:716
gc_stat_sym_total_allocated_pages
@ gc_stat_sym_total_allocated_pages
Definition: gc.c:8851
rb_io_t::writeconv_pre_ecopts
VALUE writeconv_pre_ecopts
Definition: io.h:99
ATOMIC_VALUE_EXCHANGE
#define ATOMIC_VALUE_EXCHANGE(var, val)
Definition: ruby_atomic.h:216
rb_subclass_entry::next
rb_subclass_entry_t * next
Definition: internal.h:1000
gc_stat_compat_sym_malloc_limit
@ gc_stat_compat_sym_malloc_limit
Definition: gc.c:8900
rb_objspace::count
size_t count
Definition: gc.c:779
RARRAY_TRANSIENT_P
#define RARRAY_TRANSIENT_P(ary)
Definition: ruby.h:1076
rb_get_kwargs
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Definition: class.c:1886
FL_FINALIZE
#define FL_FINALIZE
Definition: ruby.h:1282
i
uint32_t i
Definition: rb_mjit_min_header-2.7.1.h:5425
rmatch::regs
struct re_registers regs
Definition: re.h:37
heap_eden
#define heap_eden
Definition: gc.c:919
rb_big_eql
VALUE rb_big_eql(VALUE x, VALUE y)
Definition: bignum.c:5544
nonspecial_obj_id
#define nonspecial_obj_id(obj)
Definition: gc.c:974
UNLIKELY
#define UNLIKELY(x)
Definition: ffi_common.h:126
OBJ_ID_INCREMENT
#define OBJ_ID_INCREMENT
Definition: gc.c:2883
ID
unsigned long ID
Definition: ruby.h:103
gc_stat_sym_oldmalloc_increase_bytes_limit
@ gc_stat_sym_oldmalloc_increase_bytes_limit
Definition: gc.c:8867
MEMOP_TYPE_MALLOC
@ MEMOP_TYPE_MALLOC
Definition: gc.c:9687
rb_check_funcall
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
Definition: vm_eval.c:505
ruby_xfree
void ruby_xfree(void *x)
Definition: gc.c:10169
list_for_each
#define list_for_each(h, i, member)
Definition: rb_mjit_min_header-2.7.1.h:9021
GC_MALLOC_LIMIT_GROWTH_FACTOR
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:291
T_FALSE
#define T_FALSE
Definition: ruby.h:537
ruby::backward::cxxanyargs::rb_proc_new
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:324
BIGNUM_DIGITS
#define BIGNUM_DIGITS(b)
Definition: internal.h:780
MEMOP_TYPE_FREE
@ MEMOP_TYPE_FREE
Definition: gc.c:9688
ruby_stack_length
size_t ruby_stack_length(VALUE **p)
Definition: gc.c:4633
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.1.h:5703
ruby_gc_stress_mode
#define ruby_gc_stress_mode
Definition: gc.c:927
STACK_END
#define STACK_END
Definition: gc.c:4603
COUNT_TYPE
#define COUNT_TYPE(t)
rb_objspace::id_to_obj_tbl
st_table * id_to_obj_tbl
Definition: gc.c:822
rb_raw_obj_info
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
Definition: gc.c:11438
rb_id2name
const char * rb_id2name(ID)
Definition: symbol.c:801
rb_method_bmethod_struct::proc
VALUE proc
Definition: method.h:152
constant.h
bits_t
uintptr_t bits_t
Definition: gc.c:618
gc_stat_sym_heap_free_slots
@ gc_stat_sym_heap_free_slots
Definition: gc.c:8846
STATIC_SYM_P
#define STATIC_SYM_P(x)
Definition: ruby.h:411
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
idEq
@ idEq
Definition: id.h:96
Check_Type
#define Check_Type(v, t)
Definition: ruby.h:595
gc_stat_compat_sym_heap_final_slot
@ gc_stat_compat_sym_heap_final_slot
Definition: gc.c:8889
RGENGC_PROFILE
#define RGENGC_PROFILE
Definition: gc.c:421
RVALUE::free
struct RVALUE::@78::@79 free
rb_xcalloc_mul_add_mul
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10196
TRUE
#define TRUE
Definition: nkf.h:175
rb_transient_heap_mark
void rb_transient_heap_mark(VALUE obj, const void *ptr)
Definition: transient_heap.c:529
abort
void abort(void) __attribute__((__noreturn__))
MALLOC_ALLOCATED_SIZE_CHECK
#define MALLOC_ALLOCATED_SIZE_CHECK
Definition: gc.c:480
long
#define long
Definition: rb_mjit_min_header-2.7.1.h:2848
st_foreach_with_replace
int st_foreach_with_replace(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: st.c:1700
rb_gc_count
size_t rb_gc_count(void)
Definition: gc.c:8713
GPR_FLAG_IMMEDIATE_MARK
@ GPR_FLAG_IMMEDIATE_MARK
Definition: gc.c:513
RFILE
#define RFILE(obj)
Definition: ruby.h:1276
rb_memory_id
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:3739
GPR_FLAG_CAPI
@ GPR_FLAG_CAPI
Definition: gc.c:507
T_FLOAT
#define T_FLOAT
Definition: ruby.h:527
STACK_UPPER
#define STACK_UPPER(x, a, b)
Definition: gc.h:83
RB_DEBUG_COUNTER_INC_IF
#define RB_DEBUG_COUNTER_INC_IF(type, cond)
Definition: debug_counter.h:377
rb_mGC
VALUE rb_mGC
Definition: gc.c:1000
ruby_mimmalloc
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10206
xcalloc
#define xcalloc
Definition: defines.h:213
rb_include_module
void rb_include_module(VALUE klass, VALUE module)
Definition: class.c:869
GPR_FLAG_HAVE_FINALIZE
@ GPR_FLAG_HAVE_FINALIZE
Definition: gc.c:512
RVALUE::v3
VALUE v3
Definition: gc.c:605
rb_mark_generic_ivar
void rb_mark_generic_ivar(VALUE)
Definition: variable.c:973
rb_objspace_call_finalizer
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
Definition: gc.c:3443
strtod
#define strtod(s, e)
Definition: util.h:76
RTypedData::type
const rb_data_type_t * type
Definition: ruby.h:1170
rb_obj_hide
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:78
double
double
Definition: rb_mjit_min_header-2.7.1.h:5884
rb_objspace::oldmalloc_increase
size_t oldmalloc_increase
Definition: gc.c:799
RUBY_FL_WB_PROTECTED
@ RUBY_FL_WB_PROTECTED
Definition: ruby.h:842
rb_objspace::dont_incremental
unsigned int dont_incremental
Definition: gc.c:690
rb_ec_raised_clear
#define rb_ec_raised_clear(ec)
Definition: eval_intern.h:261
rb_objspace::total_freed_pages
size_t total_freed_pages
Definition: gc.c:782
rb_objspace::oldmalloc_increase_limit
size_t oldmalloc_increase_limit
Definition: gc.c:800
RVALUE::string
struct RString string
Definition: gc.c:577
rb_objspace_data_type_name
const char * rb_objspace_data_type_name(VALUE obj)
Definition: gc.c:2432
RZombie::basic
struct RBasic basic
Definition: gc.c:987
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.1.h:13179
rb_objspace::total_allocated_objects
size_t total_allocated_objects
Definition: gc.c:704
ruby_gc_params_t::heap_init_slots
size_t heap_init_slots
Definition: gc.c:318
RArray::heap
struct RArray::@97::@98 heap
rb_objspace::malloc_params
struct rb_objspace::@82 malloc_params
st_data_t
unsigned long st_data_t
Definition: rb_mjit_min_header-2.7.1.h:5324
force_finalize_list::table
VALUE table
Definition: gc.c:3426
gc_stat_sym_heap_marked_slots
@ gc_stat_sym_heap_marked_slots
Definition: gc.c:8848
GC_PROFILE_DETAIL_MEMORY
#define GC_PROFILE_DETAIL_MEMORY
Definition: gc.c:461
rb_io_t::write_lock
VALUE write_lock
Definition: io.h:101
id
const int id
Definition: nkf.c:209
onig_memsize
size_t onig_memsize(const regex_t *reg)
Definition: regcomp.c:5654
RZOMBIE
#define RZOMBIE(o)
Definition: gc.c:993
st_table::num_entries
st_index_t num_entries
Definition: st.h:86
rb_objspace::during_minor_gc
unsigned int during_minor_gc
Definition: gc.c:696
VM_METHOD_TYPE_REFINED
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:113
rb_objspace::considered_count_table
size_t considered_count_table[T_MASK]
Definition: gc.c:810
env
#define env
FIX2INT
#define FIX2INT(x)
Definition: ruby.h:717
RVALUE::flags
VALUE flags
Definition: gc.c:569
rb_iseq_struct
Definition: vm_core.h:456
rb_hash_new
VALUE rb_hash_new(void)
Definition: hash.c:1523
RFloat
Definition: internal.h:798
ruby_gc_stressful
#define ruby_gc_stressful
Definition: gc.c:926
ATOMIC_PTR_EXCHANGE
#define ATOMIC_PTR_EXCHANGE(var, val)
Definition: ruby_atomic.h:186
RVALUE::bignum
struct RBignum bignum
Definition: gc.c:584
rb_gc_register_mark_object
void rb_gc_register_mark_object(VALUE obj)
Definition: gc.c:7065
RVALUE::env
rb_env_t env
Definition: gc.c:597
ruby_malloc_size_overflow
void ruby_malloc_size_overflow(size_t count, size_t elsize)
Definition: gc.c:10093
rb_define_module_under
VALUE rb_define_module_under(VALUE outer, const char *name)
Definition: class.c:797
gc_mode
#define gc_mode(objspace)
Definition: gc.c:950
mark_stack::chunk
stack_chunk_t * chunk
Definition: gc.c:648
rb_classext_struct::subclasses
rb_subclass_entry_t * subclasses
Definition: internal.h:1028
rb_objspace::next_object_id
VALUE next_object_id
Definition: gc.c:705
RVALUE::complex
struct RComplex complex
Definition: gc.c:588
VM_METHOD_TYPE_OPTIMIZED
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
Definition: method.h:111
rb_objspace::records
gc_profile_record * records
Definition: gc.c:740
rb_str_buf_new
VALUE rb_str_buf_new(long)
Definition: string.c:1315
rb_warn
void rb_warn(const char *fmt,...)
Definition: error.c:313
rb_postponed_job_register_one
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1614
is_incremental_marking
#define is_incremental_marking(objspace)
Definition: gc.c:961
RVALUE::values
struct RVALUE::@78::@81 values
ruby_stack_grow_direction
int ruby_stack_grow_direction
Definition: gc.c:4620
rb_gc_update_tbl_refs
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:7985
imemo_memo
@ imemo_memo
Definition: internal.h:1138
RVALUE_PAGE_MARKING
#define RVALUE_PAGE_MARKING(page, obj)
Definition: gc.c:1221
rb_data_type_struct::dmark
void(* dmark)(void *)
Definition: ruby.h:1151
rb_method_definition_struct::attr
rb_method_attr_t attr
Definition: method.h:171
rb_func_lambda_new
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc)
Definition: proc.c:735
gc_stat_compat_sym_old_object
@ gc_stat_compat_sym_old_object
Definition: gc.c:8894
weakmap::wmap2obj
st_table * wmap2obj
Definition: gc.c:10336
CALC_EXACT_MALLOC_SIZE
#define CALC_EXACT_MALLOC_SIZE
Definition: gc.c:470
rb_singleton_class_internal_p
int rb_singleton_class_internal_p(VALUE sklass)
Definition: class.c:455
gc.h
BITMAP_INDEX
#define BITMAP_INDEX(p)
Definition: gc.c:881
HEAP_PAGE_ALIGN
@ HEAP_PAGE_ALIGN
Definition: gc.c:835
rb_ary_free
void rb_ary_free(VALUE ary)
Definition: array.c:786
RBASIC_CLEAR_CLASS
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:1986
rb_method_iseq_struct::cref
rb_cref_t * cref
class reference, should be marked
Definition: method.h:128
ST_STOP
@ ST_STOP
Definition: st.h:99
gc_stress_no_immediate_sweep
@ gc_stress_no_immediate_sweep
Definition: gc.c:7123
rb_objspace::heap_pages
struct rb_objspace::@85 heap_pages
RRational::num
VALUE num
Definition: internal.h:790
rb_funcall
#define rb_funcall(recv, mid, argc,...)
Definition: rb_mjit_min_header-2.7.1.h:6546
rb_gc_force_recycle
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7013
INT2FIX
#define INT2FIX(i)
Definition: ruby.h:263
n
const char size_t n
Definition: rb_mjit_min_header-2.7.1.h:5417
RVALUE::rstruct
struct RStruct rstruct
Definition: gc.c:583
T_MASK
#define T_MASK
Definition: md5.c:131
ruby_gc_params_t::oldobject_limit_factor
double oldobject_limit_factor
Definition: gc.c:326
GPR_FLAG_MAJOR_BY_NOFREE
@ GPR_FLAG_MAJOR_BY_NOFREE
Definition: gc.c:494
st_is_member
#define st_is_member(table, key)
Definition: st.h:97
gc_stat_compat_sym_heap_free_slot
@ gc_stat_compat_sym_heap_free_slot
Definition: gc.c:8888
gc_report
#define gc_report
Definition: gc.c:1093
rb_during_gc
int rb_during_gc(void)
Definition: gc.c:8689
RObject
Definition: ruby.h:922
heap_page::final_slots
short final_slots
Definition: gc.c:849
rb_heap_struct::pages
struct list_head pages
Definition: gc.c:661
rb_mark_tbl_no_pin
void rb_mark_tbl_no_pin(st_table *tbl)
Definition: gc.c:5013
PRIxVALUE
#define PRIxVALUE
Definition: ruby.h:164
ruby_xmalloc
void * ruby_xmalloc(size_t size)
Definition: gc.c:11969
gc_stat_sym_total_allocated_objects
@ gc_stat_sym_total_allocated_objects
Definition: gc.c:8853
rb_int2str
VALUE rb_int2str(VALUE num, int base)
Definition: numeric.c:3562
RHash::ifnone
const VALUE ifnone
Definition: internal.h:893
gc_profile_record::heap_total_size
size_t heap_total_size
Definition: gc.c:529
rb_class_remove_from_module_subclasses
void rb_class_remove_from_module_subclasses(VALUE klass)
Definition: class.c:94
RSTRING_PTR
#define RSTRING_PTR(str)
Definition: ruby.h:1009
rb_objspace::during_incremental_marking
unsigned int during_incremental_marking
Definition: gc.c:699
re.h
rb_gc_mark_locations
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4701
SIZE_MAX
#define SIZE_MAX
Definition: ruby.h:307
GET_HEAP_MARK_BITS
#define GET_HEAP_MARK_BITS(x)
Definition: gc.c:891
STACKFRAME_FOR_CALL_CFUNC
#define STACKFRAME_FOR_CALL_CFUNC
Definition: gc.c:4664
PRI_PIDT_PREFIX
#define PRI_PIDT_PREFIX
Definition: rb_mjit_min_header-2.7.1.h:103
rb_callable_method_entry_struct::owner
const VALUE owner
Definition: method.h:64
RTYPEDDATA_TYPE
#define RTYPEDDATA_TYPE(v)
Definition: ruby.h:1178
rb_gc_verify_internal_consistency
void rb_gc_verify_internal_consistency(void)
Definition: gc.c:6204
gc_stat_compat_sym_malloc_increase
@ gc_stat_compat_sym_malloc_increase
Definition: gc.c:8899
os_each_struct::num
size_t num
Definition: gc.c:3056
EXEC_EVENT_HOOK
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
rb_io_t::pathv
VALUE pathv
Definition: io.h:72
rb_str_new_cstr
#define rb_str_new_cstr(str)
Definition: rb_mjit_min_header-2.7.1.h:6078
ruby_sized_xrealloc
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
Definition: gc.c:10126
malloc_obj_info::size
size_t size
Definition: gc.c:9791
rb_strterm_mark
void rb_strterm_mark(VALUE obj)
Definition: ripper.c:765
rb_objspace::mark_func_data_struct::data
void * data
Definition: gc.c:715
rb_objspace
Definition: gc.c:676
GPR_FLAG_NEWOBJ
@ GPR_FLAG_NEWOBJ
Definition: gc.c:504
FLUSH_REGISTER_WINDOWS
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:431
GET_HEAP_WB_UNPROTECTED_BITS
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
Definition: gc.c:895
mark_stack
Definition: gc.c:647
BITS_SIZE
@ BITS_SIZE
Definition: gc.c:620
st_init_numtable
st_table * st_init_numtable(void)
Definition: st.c:653
PUSH_MARK_FUNC_DATA
#define PUSH_MARK_FUNC_DATA(v)
Definition: gc.c:1098
ruby_gc_params_t::malloc_limit_min
size_t malloc_limit_min
Definition: gc.c:328
objspace_and_reason::reason
int reason
Definition: gc.c:7373
HEAP_PAGE_OBJ_LIMIT
@ HEAP_PAGE_OBJ_LIMIT
Definition: gc.c:839
RUBY_INTERNAL_EVENT_FREEOBJ
#define RUBY_INTERNAL_EVENT_FREEOBJ
Definition: ruby.h:2269
ruby_xrealloc_body
void * ruby_xrealloc_body(void *ptr, size_t new_size)
Definition: gc.c:10136
heap_page::wb_unprotected_bits
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:863
gc_stat_compat_sym_heap_swept_slot
@ gc_stat_compat_sym_heap_swept_slot
Definition: gc.c:8890
VALGRIND_MAKE_MEM_DEFINED
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
Definition: zlib.c:24
rb_objspace::has_hook
unsigned int has_hook
Definition: gc.c:694
rb_objspace_of
#define rb_objspace_of(vm)
Definition: gc.c:901
VALUE
unsigned long VALUE
Definition: ruby.h:102
M
#define M
Definition: mt19937.c:53
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
RVALUE_MARKING_BITMAP
#define RVALUE_MARKING_BITMAP(obj)
Definition: gc.c:1217
rb_eArgError
VALUE rb_eArgError
Definition: error.c:923
each_obj_args
Definition: gc.c:2943
rb_clear_method_cache_by_class
void rb_clear_method_cache_by_class(VALUE)
Definition: vm_method.c:93
encoding.h
OPT
#define OPT(o)
ruby_verbose
#define ruby_verbose
Definition: ruby.h:1925
rb_intern
#define rb_intern(str)
gc_stress_max
@ gc_stress_max
Definition: gc.c:7125
os_each_struct::of
VALUE of
Definition: gc.c:3057
RUBY_DATA_FUNC
void(* RUBY_DATA_FUNC)(void *)
Definition: ruby.h:1184
st_delete
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
Definition: st.c:1418
C
#define C(c, s)
RB_TYPE_P
#define RB_TYPE_P(obj, type)
Definition: ruby.h:560
rb_ary_memsize
RUBY_FUNC_EXPORTED size_t rb_ary_memsize(VALUE ary)
Definition: array.c:816
sleep
unsigned sleep(unsigned int __seconds)
heap_allocatable_pages
#define heap_allocatable_pages
Definition: gc.c:915
RVALUE_PAGE_WB_UNPROTECTED
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
Definition: gc.c:1219
NORETURN
NORETURN(static void negative_size_allocation_error(const char *))
rb_xmalloc_mul_add
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
Definition: gc.c:10175
RMatch
Definition: re.h:43
rb_str_memsize
size_t rb_str_memsize(VALUE)
Definition: string.c:1371
rb_gc_location
VALUE rb_gc_location(VALUE value)
Definition: gc.c:8113
fmt
const VALUE int int int int int int VALUE char * fmt
Definition: rb_mjit_min_header-2.7.1.h:6423
TYPE
#define TYPE(x)
Definition: ruby.h:554
rb_const_entry_struct::value
VALUE value
Definition: constant.h:34
st_add_direct
void st_add_direct(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1251
rb_aligned_malloc
void * rb_aligned_malloc(size_t alignment, size_t size)
Definition: gc.c:9629
imemo_iseq
@ imemo_iseq
Definition: internal.h:1140
RUBY_INTERNAL_EVENT_GC_ENTER
#define RUBY_INTERNAL_EVENT_GC_ENTER
Definition: ruby.h:2273
gc_stat_compat_sym_heap_live_slot
@ gc_stat_compat_sym_heap_live_slot
Definition: gc.c:8887
rb_method_definition_struct::type
rb_method_type_t type
Definition: rb_mjit_min_header-2.7.1.h:8804
imemo_env
@ imemo_env
Definition: internal.h:1133
weakmap
Definition: gc.c:10334
rb_objspace::pooled_slots
size_t pooled_slots
Definition: gc.c:816
RVALUE::flonum
struct RFloat flonum
Definition: gc.c:576
posix_memalign
int posix_memalign(void **, size_t, size_t) __attribute__((__nonnull__(1))) __attribute__((__warn_unused_result__))
nomem_error
#define nomem_error
Definition: gc.c:995
GET_STACK_BOUNDS
#define GET_STACK_BOUNDS(start, end, appendix)
Definition: gc.c:4949
RRational
Definition: internal.h:788
rb_objspace::gc_sweep_start_time
double gc_sweep_start_time
Definition: gc.c:774
rb_define_module
VALUE rb_define_module(const char *name)
Definition: class.c:772
rb_id_table_iterator_result
rb_id_table_iterator_result
Definition: id_table.h:8
Init_GC
void Init_GC(void)
Definition: gc.c:11851
ruby_rgengc_debug
int ruby_rgengc_debug
Definition: gc.c:388
SIGNED_VALUE
#define SIGNED_VALUE
Definition: ruby.h:104
rb_ast_mark
void rb_ast_mark(rb_ast_t *ast)
Definition: node.c:1342
rb_objspace
#define rb_objspace
Definition: gc.c:900
rb_iseq_path
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
rb_heap_struct::total_pages
size_t total_pages
Definition: gc.c:666
rb_data_object_wrap
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Definition: gc.c:2378
ID_SCOPE_MASK
#define ID_SCOPE_MASK
Definition: id.h:32
I
#define I(s)
getenv
#define getenv(name)
Definition: win32.c:73
EC_JUMP_TAG
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:184
RSYMBOL
#define RSYMBOL(obj)
Definition: symbol.h:33
rb_int_ge
VALUE rb_int_ge(VALUE x, VALUE y)
Definition: numeric.c:4292
during_gc
#define during_gc
Definition: gc.c:922
rb_objspace::mode
unsigned int mode
Definition: gc.c:687
UINT2NUM
#define UINT2NUM(x)
Definition: ruby.h:1610
rb_objspace::heap_used_at_gc_start
size_t heap_used_at_gc_start
Definition: gc.c:776
rb_free_generic_ivar
void rb_free_generic_ivar(VALUE)
Definition: variable.c:993
FL_SEEN_OBJ_ID
#define FL_SEEN_OBJ_ID
Definition: ruby.h:1285
wmap_iter_arg::value
VALUE value
Definition: gc.c:10502
GC_ENABLE_INCREMENTAL_MARK
#define GC_ENABLE_INCREMENTAL_MARK
Definition: gc.c:464
rb_gc_disable
VALUE rb_gc_disable(void)
Definition: gc.c:9248
rb_iseq_constant_body::location
rb_iseq_location_t location
Definition: vm_core.h:399
rb_inspect
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:551
DWORD
IUnknown DWORD
Definition: win32ole.c:33
rb_objspace::profile
struct rb_objspace::@86 profile
IMEMO_NAME
#define IMEMO_NAME(x)
rb_id_table
Definition: id_table.c:40
ruby_stack_check
int ruby_stack_check(void)
Definition: gc.c:4673
idCall
@ idCall
Definition: rb_mjit_min_header-2.7.1.h:8654
rb_mark_end_proc
void rb_mark_end_proc(void)
Definition: eval_jump.c:78
rb_obj_rgengc_promoted_p
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
Definition: gc.c:6972
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.1.h:5562
rb_method_definition_struct::refined
rb_method_refined_t refined
Definition: method.h:173
heap_page::start
RVALUE * start
Definition: gc.c:858
verify_internal_consistency_struct::err_count
int err_count
Definition: gc.c:5895
gc_stress_no_major
@ gc_stress_no_major
Definition: gc.c:7122
rb_iseq_location_struct::first_lineno
VALUE first_lineno
Definition: vm_core.h:276
rb_objspace::obj_to_id_tbl
st_table * obj_to_id_tbl
Definition: gc.c:823
rb_str_cat2
#define rb_str_cat2
Definition: intern.h:912
STACK_LEVEL_MAX
#define STACK_LEVEL_MAX
Definition: gc.c:4604
iseq
const rb_iseq_t * iseq
Definition: rb_mjit_min_header-2.7.1.h:13426
rb_objspace::allocatable_pages
size_t allocatable_pages
Definition: gc.c:725
gc_stat_compat_sym_old_object_limit
@ gc_stat_compat_sym_old_object_limit
Definition: gc.c:8895
rb_class_detach_module_subclasses
void rb_class_detach_module_subclasses(VALUE klass)
Definition: class.c:145
BITS_BITLENGTH
@ BITS_BITLENGTH
Definition: gc.c:621
getpid
pid_t getpid(void)
rb_method_refined_struct::orig_me
struct rb_method_entry_struct * orig_me
Definition: method.h:147
rmatch::char_offset
struct rmatch_offset * char_offset
Definition: re.h:39
CEILDIV
#define CEILDIV(i, mod)
Definition: gc.c:833
DYNAMIC_SYM_P
#define DYNAMIC_SYM_P(x)
Definition: ruby.h:412
Qundef
#define Qundef
Definition: ruby.h:470
RVALUE::memo
struct MEMO memo
Definition: gc.c:594
heap_cursor::page
struct heap_page * page
Definition: gc.c:7670
imemo
union @11::@13 imemo
rb_define_singleton_method
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1755
T_RATIONAL
#define T_RATIONAL
Definition: ruby.h:541
CHAR_BIT
#define CHAR_BIT
Definition: ruby.h:227
RHASH_ST_TABLE_FLAG
@ RHASH_ST_TABLE_FLAG
Definition: internal.h:820
EXIT_FAILURE
#define EXIT_FAILURE
Definition: eval_intern.h:32
fputs
int fputs(const char *__restrict, FILE *__restrict)
heap_pages_final_slots
#define heap_pages_final_slots
Definition: gc.c:917
gc_profile_record::heap_use_size
size_t heap_use_size
Definition: gc.c:528
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
RUBY_DTRACE_GC_HOOK
#define RUBY_DTRACE_GC_HOOK(name)
Definition: gc.c:10889
RVALUE::throw_data
struct vm_throw_data throw_data
Definition: gc.c:592
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
MALLOC_ALLOCATED_SIZE
#define MALLOC_ALLOCATED_SIZE
Definition: gc.c:477
ptr
struct RIMemo * ptr
Definition: debug.c:74
RVALUE::ment
struct rb_method_entry_struct ment
Definition: gc.c:595
me
const rb_callable_method_entry_t * me
Definition: rb_mjit_min_header-2.7.1.h:13151
vm_ifunc
IFUNC (Internal FUNCtion)
Definition: internal.h:1215
heap_page_body::header
struct heap_page_header header
Definition: gc.c:630
list_next
#define list_next(h, i, member)
Definition: rb_mjit_min_header-2.7.1.h:9025
STACK_START
#define STACK_START
Definition: gc.c:4602
rb_obj_id
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:3772
T_DATA
#define T_DATA
Definition: ruby.h:538
rb_objspace_reachable_objects_from
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
Definition: gc.c:9467
rb_method_iseq_struct::iseqptr
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:127
ruby_xmalloc_body
void * ruby_xmalloc_body(size_t size)
Definition: gc.c:10084
has_sweeping_pages
#define has_sweeping_pages(heap)
Definition: gc.c:970
Qfalse
#define Qfalse
Definition: ruby.h:467
weakmap::obj2wmap
st_table * obj2wmap
Definition: gc.c:10335
is_marking
#define is_marking(objspace)
Definition: gc.c:953
gc_stat_compat_sym_remembered_shady_object
@ gc_stat_compat_sym_remembered_shady_object
Definition: gc.c:8892
uintptr_t
unsigned int uintptr_t
Definition: win32.h:106
rb_stdout
RUBY_EXTERN VALUE rb_stdout
Definition: ruby.h:2090
DBL2NUM
#define DBL2NUM(dbl)
Definition: ruby.h:967
__asan_region_is_poisoned
#define __asan_region_is_poisoned(x, y)
Definition: internal.h:110
GC_HEAP_FREE_SLOTS_GOAL_RATIO
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
Definition: gc.c:278
ruby_gc_params_t
Definition: gc.c:317
RVALUE::ast
rb_ast_t ast
Definition: gc.c:599
wmap_iter_arg
Definition: gc.c:10500
rb_objspace::minor_gc_count
size_t minor_gc_count
Definition: gc.c:751
RVALUE_WB_UNPROTECTED_BITMAP
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
Definition: gc.c:1215
rb_method_definition_struct::alias_count
int alias_count
Definition: method.h:165
rb_objspace::major_gc_count
size_t major_gc_count
Definition: gc.c:752
heap_page::flags
struct heap_page::@90 flags
RMATCH
#define RMATCH(obj)
Definition: re.h:50
rb_id2str
#define rb_id2str(id)
Definition: vm_backtrace.c:30
T_NODE
#define T_NODE
Definition: ruby.h:545
each_obj_args::data
void * data
Definition: gc.c:2946
rb_objspace_alloc
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:1587
dp
#define dp(v)
Definition: vm_debug.h:21
SPECIAL_CONST_P
#define SPECIAL_CONST_P(x)
Definition: ruby.h:1313
rb_ary_new3
#define rb_ary_new3
Definition: intern.h:104
st.h
NULL
#define NULL
Definition: _sdbm.c:101
RVALUE::object
struct RObject object
Definition: gc.c:574
T_COMPLEX
#define T_COMPLEX
Definition: ruby.h:542
heap_cursor::objspace
rb_objspace_t * objspace
Definition: gc.c:7671
gc_list
Definition: gc.c:635
rb_print_backtrace
void rb_print_backtrace(void)
Definition: vm_dump.c:750
ST_DELETE
@ ST_DELETE
Definition: st.h:99
gc_raise_tag::exc
VALUE exc
Definition: gc.c:9523
uint32_t
unsigned int uint32_t
Definition: sha2.h:101
SIZEOF_VOIDP
#define SIZEOF_VOIDP
Definition: rb_mjit_min_header-2.7.1.h:90
heap_page::before_sweep
unsigned int before_sweep
Definition: gc.c:851
imemo_svar
@ imemo_svar
special variable
Definition: internal.h:1135
gc_stat_sym_count
@ gc_stat_sym_count
Definition: gc.c:8840
rb_gc_mark_vm_stack_values
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:4741
FL_TEST
#define FL_TEST(x, f)
Definition: ruby.h:1353
rb_class_remove_from_super_subclasses
void rb_class_remove_from_super_subclasses(VALUE klass)
Definition: class.c:76
RVALUE
Definition: gc.c:566
FL_WB_PROTECTED
#define FL_WB_PROTECTED
Definition: ruby.h:1279
HEAP_PAGE_ALIGN_LOG
#define HEAP_PAGE_ALIGN_LOG
Definition: gc.c:832
PRIsVALUE
#define PRIsVALUE
Definition: ruby.h:166
HEAP_PAGE_BITMAP_SIZE
@ HEAP_PAGE_BITMAP_SIZE
Definition: gc.c:841
PRINTF_ARGS
PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char *,...)), 2, 3)
stack_chunk::next
struct stack_chunk * next
Definition: gc.c:644
gc_stat_sym_total_freed_objects
@ gc_stat_sym_total_freed_objects
Definition: gc.c:8854
heap_pages_sorted_length
#define heap_pages_sorted_length
Definition: gc.c:912
rb_obj_respond_to
int rb_obj_respond_to(VALUE, ID, int)
Definition: vm_method.c:2180
RUBY_T_MASK
@ RUBY_T_MASK
Definition: ruby.h:518
RVALUE_PAGE_UNCOLLECTIBLE
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
Definition: gc.c:1220
heap_pages_sorted
#define heap_pages_sorted
Definition: gc.c:910
BUFF_ARGS
#define BUFF_ARGS
FL_SET
#define FL_SET(x, f)
Definition: ruby.h:1359
GC_ENABLE_LAZY_SWEEP
#define GC_ENABLE_LAZY_SWEEP
Definition: gc.c:467
st_insert
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
FIX2LONG
#define FIX2LONG(x)
Definition: ruby.h:394
ID2SYM
#define ID2SYM(x)
Definition: ruby.h:414
popcount_bits
#define popcount_bits
Definition: gc.c:623
heap_cursor::index
size_t index
Definition: gc.c:7669
rb_method_definition_struct::bmethod
rb_method_bmethod_t bmethod
Definition: method.h:174
strlen
size_t strlen(const char *)
TYPED_UPDATE_IF_MOVED
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
Definition: gc.c:1078
OBJ_FREEZE
#define OBJ_FREEZE(x)
Definition: ruby.h:1377
rb_gc_enable
VALUE rb_gc_enable(void)
Definition: gc.c:9211
T_SYMBOL
#define T_SYMBOL
Definition: ruby.h:540
T_OBJECT
#define T_OBJECT
Definition: ruby.h:523
rb_free_tmp_buffer
void rb_free_tmp_buffer(volatile VALUE *store)
Definition: gc.c:10276
root_objects_data::data
void * data
Definition: gc.c:9484
rb_alloc_tmp_buffer
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
Definition: gc.c:10264
VM_METHOD_TYPE_IVAR
@ VM_METHOD_TYPE_IVAR
attr_reader or attr_accessor
Definition: method.h:105
rb_objspace::rincgc
struct rb_objspace::@89 rincgc
rb_hash_set_default_proc
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc)
Definition: hash.c:2237
rb_mark_set
void rb_mark_set(st_table *tbl)
Definition: gc.c:4800
gc_stress_full_mark_after_malloc_p
#define gc_stress_full_mark_after_malloc_p()
Definition: gc.c:7128
rb_iseq_update_references
void rb_iseq_update_references(rb_iseq_t *iseq)
Definition: iseq.c:221
RAISED_NOMEMORY
@ RAISED_NOMEMORY
Definition: eval_intern.h:256
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
rb_special_const_p
#define rb_special_const_p(obj)
Definition: rb_mjit_min_header-2.7.1.h:5318
ATOMIC_SET
#define ATOMIC_SET(var, val)
Definition: ruby_atomic.h:131
rb_big_hash
VALUE rb_big_hash(VALUE x)
Definition: bignum.c:6726
ruby_gc_params_t::gc_stress
VALUE gc_stress
Definition: gc.c:336
rb_free_method_entry
void rb_free_method_entry(const rb_method_entry_t *me)
Definition: vm_method.c:174
L
#define L(x)
Definition: asm.h:125
imemo_throw_data
@ imemo_throw_data
Definition: internal.h:1136
MEMO
MEMO.
Definition: internal.h:1278
rb_imemo_new
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2308
RB_BLOCK_CALL_FUNC_ARGLIST
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1964
HEAP_PAGE_BITMAP_PLANES
@ HEAP_PAGE_BITMAP_PLANES
Definition: gc.c:842
rb_check_arity
#define rb_check_arity
Definition: intern.h:347
strdup
char * strdup(const char *) __attribute__((__malloc__)) __attribute__((__warn_unused_result__))
rb_iseq_free
void rb_iseq_free(const rb_iseq_t *iseq)
Definition: iseq.c:89
rb_objspace::current_record
gc_profile_record * current_record
Definition: gc.c:741
RTypedData
Definition: ruby.h:1168
timespec::tv_nsec
long tv_nsec
Definition: missing.h:62
add
#define add(x, y)
Definition: date_strftime.c:23
onig_region_free
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
Definition: regexec.c:343
rb_hash_new_with_size
MJIT_FUNC_EXPORTED VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1529
ruby_initial_gc_stress_ptr
VALUE * ruby_initial_gc_stress_ptr
Definition: gc.c:905
VM_METHOD_TYPE_UNDEF
@ VM_METHOD_TYPE_UNDEF
Definition: method.h:109
gc_stat_compat_sym
gc_stat_compat_sym
Definition: gc.c:8881
heap_cursor
Definition: gc.c:7667
GET_HEAP_PINNED_BITS
#define GET_HEAP_PINNED_BITS(x)
Definition: gc.c:892
ALLOC_N
#define ALLOC_N(type, n)
Definition: ruby.h:1663
RVALUE
struct RVALUE RVALUE
GET_HEAP_UNCOLLECTIBLE_BITS
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
Definition: gc.c:894
rb_vm_register_special_exception
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1726
imemo_ast
@ imemo_ast
Definition: internal.h:1142
rb_gc_writebarrier
void rb_gc_writebarrier(VALUE a, VALUE b)
Definition: gc.c:6819
exc
const rb_iseq_t const VALUE exc
Definition: rb_mjit_min_header-2.7.1.h:13426
RData::dmark
void(* dmark)(void *)
Definition: ruby.h:1141
rb_transient_heap_update_references
void rb_transient_heap_update_references(void)
Definition: transient_heap.c:853
STR_SHARED_P
#define STR_SHARED_P(s)
Definition: internal.h:2163
ruby_global_symbols
rb_symbols_t ruby_global_symbols
Definition: symbol.c:66
rb_iseq_mark
void rb_iseq_mark(const rb_iseq_t *iseq)
Definition: iseq.c:287
gc_profile_record
struct gc_profile_record gc_profile_record
is_full_marking
#define is_full_marking(objspace)
Definition: gc.c:956
GC_HEAP_FREE_SLOTS_MIN_RATIO
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
Definition: gc.c:275
ruby_gc_params_t::oldmalloc_limit_max
size_t oldmalloc_limit_max
Definition: gc.c:333
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2669
each_obj_args::objspace
rb_objspace_t * objspace
Definition: gc.c:2944
each_obj_args::callback
each_obj_callback * callback
Definition: gc.c:2945
verify_internal_consistency_struct::zombie_object_count
size_t zombie_object_count
Definition: gc.c:5897
rb_imemo_tmpbuf_struct::cnt
size_t cnt
Definition: internal.h:1236
force_finalize_list
Definition: gc.c:3424
strtoll
long long strtoll(const char *__restrict __n, char **__restrict __end_PTR, int __base)
RESTORE_FINALIZER
#define RESTORE_FINALIZER()
GPR_FLAG_MAJOR_BY_OLDGEN
@ GPR_FLAG_MAJOR_BY_OLDGEN
Definition: gc.c:495
GC_HEAP_INIT_SLOTS
#define GC_HEAP_INIT_SLOTS
Definition: gc.c:259
gc_stat_compat_sym_last
@ gc_stat_compat_sym_last
Definition: gc.c:8905
T_FILE
#define T_FILE
Definition: ruby.h:534
rb_execution_context_struct::cfp
rb_control_frame_t * cfp
Definition: vm_core.h:847
rb_eRangeError
VALUE rb_eRangeError
Definition: error.c:926
rb_obj_gc_flags
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
Definition: gc.c:6978
ruby_gc_params_t::heap_free_slots
size_t heap_free_slots
Definition: gc.c:319
FL_PROMOTED0
#define FL_PROMOTED0
Definition: ruby.h:1280
rb_objspace::mark_stack
mark_stack_t mark_stack
Definition: gc.c:719
RVALUE::as
union RVALUE::@78 as
imemo_parser_strterm
@ imemo_parser_strterm
Definition: internal.h:1143
rb_callable_method_entry_struct::called_id
ID called_id
Definition: method.h:63
RUBY_DEFAULT_FREE
#define RUBY_DEFAULT_FREE
Definition: ruby.h:1201
gc_stat_sym_heap_sorted_length
@ gc_stat_sym_heap_sorted_length
Definition: gc.c:8842
if
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
LONG2NUM
#define LONG2NUM(x)
Definition: ruby.h:1644
heap_pages_freeable_pages
#define heap_pages_freeable_pages
Definition: gc.c:916
rb_heap_struct::sweeping_page
struct heap_page * sweeping_page
Definition: gc.c:662
ATOMIC_SIZE_EXCHANGE
#define ATOMIC_SIZE_EXCHANGE(var, val)
Definition: ruby_atomic.h:140
ruby_gc_params_t::malloc_limit_growth_factor
double malloc_limit_growth_factor
Definition: gc.c:330
heap_page::page_node
struct list_node page_node
Definition: gc.c:860
rb_xrealloc_mul_add
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10182
void
void
Definition: rb_mjit_min_header-2.7.1.h:13198
rb_objspace_t
struct rb_objspace rb_objspace_t
rb_obj_class
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
list_for_each_safe
#define list_for_each_safe(h, i, nxt, member)
Definition: rb_mjit_min_header-2.7.1.h:9024
rb_method_definition_struct::body
union rb_method_definition_struct::@118 body
onig_free
ONIG_EXTERN void onig_free(OnigRegex)
ELTS_SHARED
#define ELTS_SHARED
Definition: ruby.h:970
rb_objspace::deferred_final
VALUE deferred_final
Definition: gc.c:732
probes.h
GC_MALLOC_LIMIT_MIN
#define GC_MALLOC_LIMIT_MIN
Definition: gc.c:285
SIZEOF_VALUE
#define SIZEOF_VALUE
Definition: ruby.h:105
OBJ_ID_INITIAL
#define OBJ_ID_INITIAL
Definition: gc.c:2884
RGENGC_ESTIMATE_OLDMALLOC
#define RGENGC_ESTIMATE_OLDMALLOC
Definition: gc.c:431
rb_obj_is_proc
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
rb_obj_info
const MJIT_FUNC_EXPORTED char * rb_obj_info(VALUE obj)
Definition: gc.c:11674
RICLASS_IS_ORIGIN
#define RICLASS_IS_ORIGIN
Definition: internal.h:1085
rb_data_typed_object_zalloc
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
strtol
long strtol(const char *__restrict __n, char **__restrict __end_PTR, int __base)
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
gc_stat_sym_remembered_wb_unprotected_objects
@ gc_stat_sym_remembered_wb_unprotected_objects
Definition: gc.c:8861
rb_vraise
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
Definition: error.c:2663
T_ICLASS
#define T_ICLASS
Definition: ruby.h:525
rb_transient_heap_finish_marking
void rb_transient_heap_finish_marking(void)
Definition: transient_heap.c:916
ULONG2NUM
#define ULONG2NUM(x)
Definition: ruby.h:1645
GC_PROFILE_RECORD_DEFAULT_SIZE
#define GC_PROFILE_RECORD_DEFAULT_SIZE
Definition: gc.c:10750
FIXNUM_FLAG
#define FIXNUM_FLAG
Definition: ruby.h:472
rb_ast_struct
Definition: node.h:399
gc_profile_record::heap_total_objects
size_t heap_total_objects
Definition: gc.c:527
MARK_OBJECT_ARY_BUCKET_SIZE
#define MARK_OBJECT_ARY_BUCKET_SIZE
Definition: gc.c:7061
rb_method_type_name
const char * rb_method_type_name(rb_method_type_t type)
Definition: gc.c:11397
rb_objspace::marked_slots
size_t marked_slots
Definition: gc.c:720
imemo_type
imemo_type
Definition: internal.h:1132
rb_objspace_markable_object_p
int rb_objspace_markable_object_p(VALUE obj)
Definition: gc.c:3600
mark_stack::unused_cache_size
size_t unused_cache_size
Definition: gc.c:653
rb_method_attr_struct::location
VALUE location
Definition: method.h:139
vm_svar
SVAR (Special VARiable)
Definition: internal.h:1181
RVALUE::moved
struct RMoved moved
Definition: gc.c:572
OLD_SYM
#define OLD_SYM(s)
ST_REPLACE
@ ST_REPLACE
Definition: st.h:99
RVALUE::iseq
const rb_iseq_t iseq
Definition: gc.c:596
rb_objspace_internal_object_p
int rb_objspace_internal_object_p(VALUE obj)
Definition: gc.c:3097
RCLASS_IV_INDEX_TBL
#define RCLASS_IV_INDEX_TBL(c)
Definition: internal.h:1074
rb_objspace::step_slots
size_t step_slots
Definition: gc.c:817
va_start
#define va_start(v, l)
Definition: rb_mjit_min_header-2.7.1.h:3946
rb_gc_mark_maybe
void rb_gc_mark_maybe(VALUE obj)
Definition: gc.c:5046
RFile::fptr
struct rb_io_t * fptr
Definition: ruby.h:1136
VM_ENV_DATA_INDEX_ENV
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1196
gc_stat_sym_old_objects
@ gc_stat_sym_old_objects
Definition: gc.c:8863
gc_stat_sym_heap_live_slots
@ gc_stat_sym_heap_live_slots
Definition: gc.c:8845
DATA_PTR
#define DATA_PTR(dta)
Definition: ruby.h:1175
rb_gc_unregister_address
void rb_gc_unregister_address(VALUE *addr)
Definition: gc.c:7091
TYPE_NAME
#define TYPE_NAME(t)
verify_internal_consistency_struct::remembered_shady_count
size_t remembered_shady_count
Definition: gc.c:5902
ruby_error_nomemory
@ ruby_error_nomemory
Definition: vm_core.h:508
TRY_WITH_GC
#define TRY_WITH_GC(alloc)
Definition: gc.c:9840
rb_malloc_info_show_results
void rb_malloc_info_show_results(void)
Definition: gc.c:9998
VM_UNREACHABLE
#define VM_UNREACHABLE(func)
Definition: vm_core.h:57
rb_heap_struct
Definition: gc.c:656
LIKELY
#define LIKELY(x)
Definition: ffi_common.h:125
EC_POP_TAG
#define EC_POP_TAG()
Definition: eval_intern.h:137
verify_internal_consistency_struct::old_object_count
size_t old_object_count
Definition: gc.c:5901
timespec::tv_sec
time_t tv_sec
Definition: missing.h:61
rb_imemo_tmpbuf_struct::ptr
VALUE * ptr
Definition: internal.h:1234
list_top
#define list_top(h, type, member)
Definition: rb_mjit_min_header-2.7.1.h:8997
rb_objspace::moved_count_table
size_t moved_count_table[T_MASK]
Definition: gc.c:811
rb_cBasicObject
RUBY_EXTERN VALUE rb_cBasicObject
Definition: ruby.h:2009
FL_ABLE
#define FL_ABLE(x)
Definition: ruby.h:1351
MARKED_IN_BITMAP
#define MARKED_IN_BITMAP(bits, p)
Definition: gc.c:886
rb_objspace::size
size_t size
Definition: gc.c:743
gc_stat_compat_sym_oldmalloc_limit
@ gc_stat_compat_sym_oldmalloc_limit
Definition: gc.c:8903
RCLASS_M_TBL
#define RCLASS_M_TBL(c)
Definition: internal.h:1069
rb_check_frozen
#define rb_check_frozen(obj)
Definition: intern.h:319
rb_callable_method_entry_struct::defined_class
const VALUE defined_class
Definition: method.h:61
RSTRUCT_TRANSIENT_P
#define RSTRUCT_TRANSIENT_P(st)
Definition: internal.h:933
rb_objspace::run
int run
Definition: gc.c:738
RHASH_SIZE
#define RHASH_SIZE(hsh)
Definition: fbuffer.h:8
rb_method_definition_struct::alias
rb_method_alias_t alias
Definition: method.h:172
cfp
rb_control_frame_t * cfp
Definition: rb_mjit_min_header-2.7.1.h:14481
GPR_FLAG_IMMEDIATE_SWEEP
@ GPR_FLAG_IMMEDIATE_SWEEP
Definition: gc.c:511
rb_objspace::tomb_heap
rb_heap_t tomb_heap
Definition: gc.c:708
RCLASS_CONST_TBL
#define RCLASS_CONST_TBL(c)
Definition: internal.h:1067
ruby_gc_params_t::heap_free_slots_goal_ratio
double heap_free_slots_goal_ratio
Definition: gc.c:324
heap_page::has_remembered_objects
unsigned int has_remembered_objects
Definition: gc.c:852
gc_stat_compat_sym_remembered_shady_object_limit
@ gc_stat_compat_sym_remembered_shady_object_limit
Definition: gc.c:8893
rb_obj_is_thread
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:2655
RVALUE::hash
struct RHash hash
Definition: gc.c:580
UNEXPECTED_NODE
#define UNEXPECTED_NODE(func)
Definition: gc.c:2301
RRegexp
Definition: ruby.h:1112
SET
#define SET(name, attr)
st_init_numtable_with_size
st_table * st_init_numtable_with_size(st_index_t size)
Definition: st.c:660
ATOMIC_SIZE_CAS
#define ATOMIC_SIZE_CAS(var, oldval, val)
Definition: ruby_atomic.h:163
rb_hash_compare_by_id_p
MJIT_FUNC_EXPORTED VALUE rb_hash_compare_by_id_p(VALUE hash)
Definition: hash.c:4261
bool
#define bool
Definition: stdbool.h:13
mark_stack::cache
stack_chunk_t * cache
Definition: gc.c:649
list_head
Definition: rb_mjit_min_header-2.7.1.h:8902
ruby_xrealloc
void * ruby_xrealloc(void *ptr, size_t new_size)
Definition: gc.c:11999
memalign
void * memalign(size_t, size_t)
rb_iseq_location_struct::label
VALUE label
Definition: vm_core.h:275
gc_stat_sym
gc_stat_sym
Definition: gc.c:8839
T_FIXNUM
#define T_FIXNUM
Definition: ruby.h:535
malloc_limit
#define malloc_limit
Definition: gc.c:907
gc_stat_compat_sym_total_freed_object
@ gc_stat_compat_sym_total_freed_object
Definition: gc.c:8898
GPR_FLAG_FULL_MARK
@ GPR_FLAG_FULL_MARK
Definition: gc.c:514
RVALUE::alloc
struct rb_imemo_tmpbuf_struct alloc
Definition: gc.c:598
USE_RGENGC
#define USE_RGENGC
Definition: ruby.h:791
rb_ary_cat
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
Definition: array.c:1208
rb_objspace::need_major_gc
int need_major_gc
Definition: gc.c:791
st_foreach_callback_func
int st_foreach_callback_func(st_data_t, st_data_t, st_data_t)
Definition: st.h:137
weakmap::final
VALUE final
Definition: gc.c:10337
unsigned
#define unsigned
Definition: rb_mjit_min_header-2.7.1.h:2843
APPENDF
#define APPENDF(f)
RSTRUCT_EMBED_LEN_MASK
#define RSTRUCT_EMBED_LEN_MASK
Definition: internal.h:920
rb_io_memsize
RUBY_FUNC_EXPORTED size_t rb_io_memsize(const rb_io_t *fptr)
Definition: io.c:4760
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
rb_hash_ar_table_size
size_t rb_hash_ar_table_size(void)
Definition: hash.c:355
FL_EXIVAR
#define FL_EXIVAR
Definition: ruby.h:1286
rb_ast_memsize
size_t rb_ast_memsize(const rb_ast_t *ast)
Definition: node.c:1375
h
size_t st_index_t h
Definition: rb_mjit_min_header-2.7.1.h:5423
RHASH_EMPTY_P
#define RHASH_EMPTY_P(h)
Definition: ruby.h:1131
gc_list::next
struct gc_list * next
Definition: gc.c:637
RFile
Definition: ruby.h:1134
RZombie
Definition: gc.c:986
rb_clear_constant_cache
void rb_clear_constant_cache(void)
Definition: vm_method.c:87
heap_page::free_slots
short free_slots
Definition: gc.c:847
st_data_t
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
st_numhash
st_index_t st_numhash(st_data_t n)
Definition: st.c:2176
GPR_DEFAULT_REASON
@ GPR_DEFAULT_REASON
Definition: gc.c:516
force_finalize_list::obj
VALUE obj
Definition: gc.c:3425
VM_METHOD_TYPE_CFUNC
@ VM_METHOD_TYPE_CFUNC
C method.
Definition: method.h:103
heap_pages_deferred_final
#define heap_pages_deferred_final
Definition: gc.c:918
ID_TABLE_REPLACE
@ ID_TABLE_REPLACE
Definition: id_table.h:12
GPR_FLAG_METHOD
@ GPR_FLAG_METHOD
Definition: gc.c:506
rb_jmp_buf
#define rb_jmp_buf
Definition: gc.c:82
rb_ast_update_references
void rb_ast_update_references(rb_ast_t *ast)
Definition: node.c:1332
st_init_strtable
st_table * st_init_strtable(void)
Definition: st.c:668
RCLASS
#define RCLASS(obj)
Definition: ruby.h:1269
rb_execution_context_struct::errinfo
VALUE errinfo
Definition: vm_core.h:875
obj_id_to_ref
#define obj_id_to_ref(objid)
Definition: gc.c:975
ruby_xcalloc_body
void * ruby_xcalloc_body(size_t n, size_t size)
Definition: gc.c:10117
rb_ary_last
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
Definition: array.c:1677
rb_imemo_tmpbuf_struct::next
struct rb_imemo_tmpbuf_struct * next
Definition: internal.h:1235
HEAP_PAGE_ALIGN_MASK
@ HEAP_PAGE_ALIGN_MASK
Definition: gc.c:836
heap_pages_lomem
#define heap_pages_lomem
Definition: gc.c:913
T_REGEXP
#define T_REGEXP
Definition: ruby.h:529
RVALUE_OLD_AGE
#define RVALUE_OLD_AGE
Definition: gc.c:1223
VM_METHOD_TYPE_NOTIMPLEMENTED
@ VM_METHOD_TYPE_NOTIMPLEMENTED
Definition: method.h:110
rb_gc_writebarrier_unprotect
void rb_gc_writebarrier_unprotect(VALUE obj)
Definition: gc.c:6840
rb_objspace::next_index
size_t next_index
Definition: gc.c:742
MARK_IN_BITMAP
#define MARK_IN_BITMAP(bits, p)
Definition: gc.c:887
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n))
FL_UNSET
#define FL_UNSET(x, f)
Definition: ruby.h:1361
GPR_FLAG_MALLOC
@ GPR_FLAG_MALLOC
Definition: gc.c:505
rb_cref_struct
CREF (Class REFerence)
Definition: method.h:41
mjit.h
RCLASS_SERIAL
#define RCLASS_SERIAL(c)
Definition: internal.h:1078
rb_hash_lookup
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:2058
rb_errinfo
VALUE rb_errinfo(void)
The current exception in the current thread.
Definition: eval.c:1882
rb_objspace::range
RVALUE * range[2]
Definition: gc.c:727
rb_ary_push
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1195
ruby_xcalloc
void * ruby_xcalloc(size_t n, size_t size)
Definition: gc.c:11989
GET_HEAP_PAGE
#define GET_HEAP_PAGE(x)
Definition: gc.c:878
st_index_t
st_data_t st_index_t
Definition: st.h:50
RComplex::real
VALUE real
Definition: internal.h:807
st_hash_type
Definition: st.h:61
vm_throw_data
THROW_DATA.
Definition: internal.h:1193
VM_METHOD_TYPE_BMETHOD
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:106
EC_EXEC_TAG
#define EC_EXEC_TAG()
Definition: eval_intern.h:181
rb_env_t
Definition: vm_core.h:1055
objspace_and_reason::objspace
rb_objspace_t * objspace
Definition: gc.c:7372
cnt
rb_atomic_t cnt[RUBY_NSIG]
Definition: signal.c:503
ruby_sized_xrealloc2
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
Definition: gc.c:10145
transient_heap.h
rb_obj_freeze
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
Definition: object.c:1080
rb_method_definition_struct::iseq
rb_method_iseq_t iseq
Definition: method.h:169
RHASH_IFNONE
#define RHASH_IFNONE(h)
Definition: ruby.h:1129
va_list
__gnuc_va_list va_list
Definition: rb_mjit_min_header-2.7.1.h:831
rb_transient_heap_promote
void rb_transient_heap_promote(VALUE obj)
Definition: transient_heap.c:640
onig_region_memsize
size_t onig_region_memsize(const OnigRegion *regs)
Definition: regcomp.c:5669
rb_classext_struct::refined_class
const VALUE refined_class
Definition: internal.h:1040
vm_core.h
RCLASS_CALLABLE_M_TBL
#define RCLASS_CALLABLE_M_TBL(c)
Definition: internal.h:1073
malloc
void * malloc(size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1)))
GPR_FLAG_MAJOR_BY_SHADY
@ GPR_FLAG_MAJOR_BY_SHADY
Definition: gc.c:496
CHECK
#define CHECK(sub)
Definition: compile.c:448
rb_source_location_cstr
const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1376
ruby_xmalloc2_body
void * ruby_xmalloc2_body(size_t n, size_t size)
Definition: gc.c:10101
rb_eTypeError
VALUE rb_eTypeError
Definition: error.c:922
gc_stat_sym_remembered_wb_unprotected_objects_limit
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
Definition: gc.c:8862
rb_gc_mark_machine_stack
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4983
RBASIC_CLASS
#define RBASIC_CLASS(obj)
Definition: ruby.h:906
SIZED_REALLOC_N
#define SIZED_REALLOC_N(var, type, n, old_n)
Definition: internal.h:1662
RRational::den
VALUE den
Definition: internal.h:791
rb_obj_is_mutex
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:131
rb_heap_t
struct rb_heap_struct rb_heap_t
ALLOC
#define ALLOC(type)
Definition: ruby.h:1664
STACK_CHUNK_SIZE
#define STACK_CHUNK_SIZE
Definition: gc.c:640
T_CLASS
#define T_CLASS
Definition: ruby.h:524
rb_method_alias_struct::original_me
struct rb_method_entry_struct * original_me
Definition: method.h:143
T_MATCH
#define T_MATCH
Definition: ruby.h:539
rb_free_const_table
void rb_free_const_table(struct rb_id_table *tbl)
Definition: gc.c:2493
RARRAY_EMBED_FLAG
@ RARRAY_EMBED_FLAG
Definition: ruby.h:1029
CLOCK_PROCESS_CPUTIME_ID
#define CLOCK_PROCESS_CPUTIME_ID
Definition: rb_mjit_min_header-2.7.1.h:2340
rb_eRuntimeError
VALUE rb_eRuntimeError
Definition: error.c:920
rb_gc_mark_global_tbl
void rb_gc_mark_global_tbl(void)
Definition: variable.c:434
rb_objspace::immediate_sweep
unsigned int immediate_sweep
Definition: gc.c:688
size_t
unsigned int size_t
Definition: rb_mjit_min_header-2.7.1.h:660
gc_mode
gc_mode
Definition: gc.c:670
rb_imemo_tmpbuf_parser_heap
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
Definition: gc.c:2328
rb_objspace::total_freed_objects
size_t total_freed_objects
Definition: gc.c:780
rb_size_mul_or_raise
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
Definition: gc.c:192
rb_objspace::gc_stressful
unsigned int gc_stressful
Definition: gc.c:693
rb_gc_copy_finalizer
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
Definition: gc.c:3297
RETURN_ENUMERATOR
#define RETURN_ENUMERATOR(obj, argc, argv)
Definition: intern.h:279
mod
#define mod(x, y)
Definition: date_strftime.c:28
RARRAY_AREF
#define RARRAY_AREF(a, i)
Definition: ruby.h:1101
rb_control_frame_struct
Definition: vm_core.h:760
RHASH_TRANSIENT_P
#define RHASH_TRANSIENT_P(hash)
Definition: internal.h:870
rb_mv_generic_ivar
void rb_mv_generic_ivar(VALUE src, VALUE dst)
Definition: variable.c:983
rb_newobj_of
VALUE rb_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2296
FL_USHIFT
#define FL_USHIFT
Definition: ruby.h:1289
ROBJECT_IVPTR
#define ROBJECT_IVPTR(o)
Definition: ruby.h:937
gc_stat_sym_heap_final_slots
@ gc_stat_sym_heap_final_slots
Definition: gc.c:8847
RTYPEDDATA_DATA
#define RTYPEDDATA_DATA(v)
Definition: ruby.h:1179
heap_page::total_slots
short total_slots
Definition: gc.c:846
symbol.h
size
int size
Definition: encoding.c:58
gc_stat_compat_sym_oldmalloc_increase
@ gc_stat_compat_sym_oldmalloc_increase
Definition: gc.c:8902
FALSE
#define FALSE
Definition: nkf.h:174
rb_vm_mark
void rb_vm_mark(void *ptr)
Definition: vm.c:2243
ruby_gc_params_t::oldmalloc_limit_growth_factor
double oldmalloc_limit_growth_factor
Definition: gc.c:334
FIXNUM_P
#define FIXNUM_P(f)
Definition: ruby.h:396
optional::right
size_t right
Definition: gc.c:92
GET_HEAP_MARKING_BITS
#define GET_HEAP_MARKING_BITS(x)
Definition: gc.c:896
RString
Definition: ruby.h:988
rb_gc_adjust_memory_usage
void rb_gc_adjust_memory_usage(ssize_t diff)
Definition: gc.c:10319
rb_ast_free
void rb_ast_free(rb_ast_t *ast)
Definition: node.c:1354
rb_objspace_marked_object_p
int rb_objspace_marked_object_p(VALUE obj)
Definition: gc.c:5224
RCLASS_SUPER
#define RCLASS_SUPER(c)
Definition: classext.h:16
rb_gc
void rb_gc(void)
Definition: gc.c:8681
rb_gc_mark_movable
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:5208
gc_stat_sym_malloc_increase_bytes
@ gc_stat_sym_malloc_increase_bytes
Definition: gc.c:8855
rb_objspace_free
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:1602
gc_stat_sym_heap_tomb_pages
@ gc_stat_sym_heap_tomb_pages
Definition: gc.c:8850
rb_id_table_memsize
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:123
POP_MARK_FUNC_DATA
#define POP_MARK_FUNC_DATA()
Definition: gc.c:1102
VM_METHOD_TYPE_ZSUPER
@ VM_METHOD_TYPE_ZSUPER
Definition: method.h:107
ruby_xmalloc2
void * ruby_xmalloc2(size_t n, size_t size)
Definition: gc.c:11979
rb_objspace::latest_gc_info
int latest_gc_info
Definition: gc.c:739
verify_internal_consistency_struct::objspace
rb_objspace_t * objspace
Definition: gc.c:5894
rb_gc_writebarrier_remember
MJIT_FUNC_EXPORTED void rb_gc_writebarrier_remember(VALUE obj)
Definition: gc.c:6877
rb_obj_is_fiber
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1041
mark_stack::cache_size
size_t cache_size
Definition: gc.c:652
rb_gc_free_dsymbol
void rb_gc_free_dsymbol(VALUE)
Definition: symbol.c:678
list
struct rb_encoding_entry * list
Definition: encoding.c:56
MEMZERO
#define MEMZERO(p, type, n)
Definition: ruby.h:1752
GPR_FLAG_NONE
@ GPR_FLAG_NONE
Definition: gc.c:492
gc_raise_tag::fmt
const char * fmt
Definition: gc.c:9524
rb_gc_start
VALUE rb_gc_start(void)
Definition: gc.c:8674
rb_gc_register_address
void rb_gc_register_address(VALUE *addr)
Definition: gc.c:7079
gc_stat_sym_heap_eden_pages
@ gc_stat_sym_heap_eden_pages
Definition: gc.c:8849
RGENGC_OLD_NEWOBJ_CHECK
#define RGENGC_OLD_NEWOBJ_CHECK
Definition: gc.c:412
RHASH
#define RHASH(obj)
Definition: internal.h:859
rb_iseq_location_struct::pathobj
VALUE pathobj
Definition: vm_core.h:273
RVALUE::cref
rb_cref_t cref
Definition: gc.c:590
rb_gc_stat
size_t rb_gc_stat(VALUE key)
Definition: gc.c:9176
RArray
Definition: ruby.h:1048
NOINLINE
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
T_NONE
#define T_NONE
Definition: ruby.h:521
N
#define N
Definition: lgamma_r.c:20
heap_page
Definition: gc.c:845
ROBJECT
#define ROBJECT(obj)
Definition: ruby.h:1268
rb_data_object_zalloc
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
rb_objspace::sorted
struct heap_page ** sorted
Definition: gc.c:723
LL2NUM
#define LL2NUM(v)
Definition: rb_mjit_min_header-2.7.1.h:4208
rb_objspace::eden_heap
rb_heap_t eden_heap
Definition: gc.c:707
rb_class_path_cached
VALUE rb_class_path_cached(VALUE)
Definition: variable.c:162
rb_data_type_struct::dcompact
void(* dcompact)(void *)
Definition: ruby.h:1154
rb_ec_raised_set
#define rb_ec_raised_set(ec, f)
Definition: eval_intern.h:258
ruby_xrealloc2
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
Definition: gc.c:12009
st_update
int st_update(st_table *tab, st_data_t key, st_update_callback_func *func, st_data_t arg)
Definition: st.c:1510
RGENGC_DEBUG
#define RGENGC_DEBUG
Definition: gc.c:380
NEW_SYM
#define NEW_SYM(s)
index
int index
Definition: rb_mjit_min_header-2.7.1.h:11171
clock_t
unsigned long clock_t
Definition: rb_mjit_min_header-2.7.1.h:1298
heap_page::free_next
struct heap_page * free_next
Definition: gc.c:857
list_del
#define list_del(n)
Definition: rb_mjit_min_header-2.7.1.h:8968
vsnprintf
int int vsnprintf(char *__restrict, size_t, const char *__restrict, __gnuc_va_list) __attribute__((__format__(__printf__
key
key
Definition: openssl_missing.h:181
T_HASH
#define T_HASH
Definition: ruby.h:531
RCLASS_IV_TBL
#define RCLASS_IV_TBL(c)
Definition: internal.h:1066
REQUIRED_SIZE_BY_MALLOC
@ REQUIRED_SIZE_BY_MALLOC
Definition: gc.c:837
heap_page::in_tomb
unsigned int in_tomb
Definition: gc.c:854
path
VALUE path
Definition: rb_mjit_min_header-2.7.1.h:7300
rb_objspace::allocated_pages
size_t allocated_pages
Definition: gc.c:724
rb_objspace::rcompactor
struct rb_objspace::@88 rcompactor
RHASH_ST_TABLE_P
#define RHASH_ST_TABLE_P(h)
Definition: internal.h:861
gc_profile_record_flag
gc_profile_record_flag
Definition: gc.c:491
clock
clock_t clock(void)
SET_STACK_END
#define SET_STACK_END
Definition: gc.c:4600
rb_heap_struct::freelist
RVALUE * freelist
Definition: gc.c:657
rb_global_variable
void rb_global_variable(VALUE *var)
Definition: gc.c:7114
RCLASS_EXT
#define RCLASS_EXT(c)
Definition: classext.h:15
rb_copy_wb_protected_attribute
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
Definition: gc.c:6940
RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Definition: ruby.h:2275
RHash
Definition: internal.h:887
CLASS_OF
#define CLASS_OF(v)
Definition: ruby.h:484
rb_str_buf_append
VALUE rb_str_buf_append(VALUE, VALUE)
Definition: string.c:2950
rb_io_t::writeconv_asciicompat
VALUE writeconv_asciicompat
Definition: io.h:96
T_MODULE
#define T_MODULE
Definition: ruby.h:526
ruby_mimfree
void ruby_mimfree(void *ptr)
Definition: gc.c:10236
force_finalize_list::next
struct force_finalize_list * next
Definition: gc.c:3427
rb_yield_values
#define rb_yield_values(argc,...)
Definition: rb_mjit_min_header-2.7.1.h:6545
rb_gcdebug_print_obj_condition
void rb_gcdebug_print_obj_condition(VALUE obj)
RString::as
union RString::@94 as
RVALUE::imemo
union RVALUE::@78::@80 imemo
RString::heap
struct RString::@94::@95 heap
RVALUE::ifunc
struct vm_ifunc ifunc
Definition: gc.c:593
RVALUE::v2
VALUE v2
Definition: gc.c:604
gc_stat_sym_minor_gc_count
@ gc_stat_sym_minor_gc_count
Definition: gc.c:8858
heap_allocated_pages
#define heap_allocated_pages
Definition: gc.c:911
gc_event_hook_available_p
#define gc_event_hook_available_p(objspace)
Definition: gc.c:2101
RMoved
Definition: internal.h:908
TAG_RAISE
#define TAG_RAISE
Definition: vm_core.h:203
RARRAY_LEN
#define RARRAY_LEN(a)
Definition: ruby.h:1070
RUBY_INTERNAL_EVENT_GC_EXIT
#define RUBY_INTERNAL_EVENT_GC_EXIT
Definition: ruby.h:2274
st_foreach
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1718
RHASH_AR_TABLE
#define RHASH_AR_TABLE(hash)
Definition: internal.h:855
ruby_get_stack_grow_direction
int ruby_get_stack_grow_direction(volatile VALUE *addr)
Definition: gc.c:4622
imemo_ment
@ imemo_ment
Definition: internal.h:1139
rb_define_module_function
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
Definition: class.c:1771
rb_method_bmethod_struct::hooks
struct rb_hook_list_struct * hooks
Definition: method.h:153
rb_scan_args
#define rb_scan_args(argc, argvp, fmt,...)
Definition: rb_mjit_min_header-2.7.1.h:6333
rb_objspace::finalizer_table
st_table * finalizer_table
Definition: gc.c:735
RSTRUCT_LEN
#define RSTRUCT_LEN(st)
Definition: ruby.h:1255
verify_internal_consistency_struct::parent
VALUE parent
Definition: gc.c:5900
rb_id_table_free
void rb_id_table_free(struct rb_id_table *tbl)
Definition: id_table.c:102
UPDATE_IF_MOVED
#define UPDATE_IF_MOVED(_objspace, _thing)
Definition: gc.c:1084
optional::left
bool left
Definition: gc.c:91
ruby_gc_params_t::oldmalloc_limit_min
size_t oldmalloc_limit_min
Definition: gc.c:332
rb_method_type_t
rb_method_type_t
Definition: method.h:101
rb_hook_list_mark
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
FL_TEST_RAW
#define FL_TEST_RAW(x, f)
Definition: ruby.h:1352
rb_method_refined_struct::owner
VALUE owner
Definition: method.h:148
rb_objspace::during_compacting
unsigned int during_compacting
Definition: gc.c:692
heap_page::mark_bits
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:866
ATOMIC_SIZE_ADD
#define ATOMIC_SIZE_ADD(var, val)
Definition: ruby_atomic.h:138
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
rb_event_flag_t
uint32_t rb_event_flag_t
Definition: ruby.h:2278
GC_HEAP_GROWTH_FACTOR
#define GC_HEAP_GROWTH_FACTOR
Definition: gc.c:265
verify_internal_consistency_struct
Definition: gc.c:5893
is_lazy_sweeping
#define is_lazy_sweeping(heap)
Definition: gc.c:971
buf
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
RVALUE_AGE_SHIFT
#define RVALUE_AGE_SHIFT
Definition: gc.c:1224
rb_classext_t
struct rb_classext_struct rb_classext_t
Definition: internal.h:1045
list_add
#define list_add(h, n)
Definition: rb_mjit_min_header-2.7.1.h:8931
T_BIGNUM
#define T_BIGNUM
Definition: ruby.h:533
gc_mode_none
@ gc_mode_none
Definition: gc.c:671
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
rb_data_typed_object_wrap
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
Definition: gc.c:2399
rb_objspace_each_objects
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
Definition: gc.c:3027
root_objects_data::category
const char * category
Definition: gc.c:9482
rb_objspace_data_type_memsize
size_t rb_objspace_data_type_memsize(VALUE obj)
Definition: gc.c:2419
rb_str_append
VALUE rb_str_append(VALUE, VALUE)
Definition: string.c:2965
mjit_remove_class_serial
void mjit_remove_class_serial(rb_serial_t class_serial)
VM_METHOD_TYPE_MISSING
@ VM_METHOD_TYPE_MISSING
wrapper for method_missing(id)
Definition: method.h:112
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:634
rb_control_frame_struct::self
VALUE self
Definition: vm_core.h:764
RVALUE::match
struct RMatch match
Definition: gc.c:586
rb_objspace_reachable_objects_from_root
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
Definition: gc.c:9495
gc_stat_compat_sym_heap_increment
@ gc_stat_compat_sym_heap_increment
Definition: gc.c:8885
rb_io_t::rb_io_enc_t::ecopts
VALUE ecopts
Definition: io.h:89
rmatch::char_offset_num_allocated
int char_offset_num_allocated
Definition: re.h:40
GC_ASSERT
#define GC_ASSERT(expr)
Definition: gc.c:403
internal.h
dont_gc
#define dont_gc
Definition: gc.c:921
gc_mode_set
#define gc_mode_set(objspace, mode)
Definition: gc.c:951
error
const rb_iseq_t const char * error
Definition: rb_mjit_min_header-2.7.1.h:13428
rb_objspace::last_major_gc
size_t last_major_gc
Definition: gc.c:792
T_ARRAY
#define T_ARRAY
Definition: ruby.h:530
rb_objspace::increase
size_t increase
Definition: gc.c:679
stack_chunk::data
VALUE data[STACK_CHUNK_SIZE]
Definition: gc.c:643
rb_mKernel
RUBY_EXTERN VALUE rb_mKernel
Definition: ruby.h:1998
ROBJ_TRANSIENT_P
#define ROBJ_TRANSIENT_P(obj)
Definition: internal.h:2261
rb_gc_guarded_val
volatile VALUE rb_gc_guarded_val
Definition: gc.c:248
argv
char ** argv
Definition: ruby.c:223
f
#define f
gc_stat_sym_major_gc_count
@ gc_stat_sym_major_gc_count
Definition: gc.c:8859
time
time_t time(time_t *_timer)
RHASH_ST_TABLE
#define RHASH_ST_TABLE(hash)
Definition: internal.h:856
objspace_and_reason
Definition: gc.c:7371
gc_stat_compat_sym_heap_length
@ gc_stat_compat_sym_heap_length
Definition: gc.c:8886
LONG_LONG
#define LONG_LONG
Definition: rb_mjit_min_header-2.7.1.h:3907
os_each_struct
Definition: gc.c:3055
ST_CONTINUE
@ ST_CONTINUE
Definition: st.h:99
RBignum
Definition: internal.h:749
rb_heap_struct::total_slots
size_t total_slots
Definition: gc.c:667
rb_setjmp
#define rb_setjmp(env)
Definition: gc.c:81
RVALUE::rational
struct RRational rational
Definition: gc.c:587
RVALUE::v1
VALUE v1
Definition: gc.c:603
st_init_table
st_table * st_init_table(const struct st_hash_type *type)
Definition: st.c:645
rb_data_typed_object_alloc
#define rb_data_typed_object_alloc
Definition: gc.c:15
xmalloc
#define xmalloc
Definition: defines.h:211
ruby_thread_has_gvl_p
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1705
rb_objspace::hook_events
rb_event_flag_t hook_events
Definition: gc.c:703
UNREACHABLE
#define UNREACHABLE
Definition: ruby.h:63
VM_METHOD_TYPE_ATTRSET
@ VM_METHOD_TYPE_ATTRSET
attr_writer or attr_accessor
Definition: method.h:104
GET_PAGE_BODY
#define GET_PAGE_BODY(x)
Definition: gc.c:876
rb_sprintf
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1197
ARY_SHARED_P
#define ARY_SHARED_P(ary)
Definition: gc.c:11417
rb_objspace_garbage_object_p
int rb_objspace_garbage_object_p(VALUE obj)
Definition: gc.c:3607
gc_profile_record::flags
int flags
Definition: gc.c:522
rb_mark_hash
void rb_mark_hash(st_table *tbl)
Definition: gc.c:4864
rb_wb_protected_newobj_of
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2281
rb_gc_guarded_ptr_val
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
Definition: gc.c:250
rb_hashtype_ident
const struct st_hash_type rb_hashtype_ident
Definition: hash.c:322
rb_id_table_foreach_with_replace
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
Definition: id_table.c:270
RARRAY
#define RARRAY(obj)
Definition: ruby.h:1273
rb_xmalloc_mul_add_mul
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10189
gc_event_hook
#define gc_event_hook(objspace, event, data)
Definition: gc.c:2104
GC_HEAP_FREE_SLOTS_MAX_RATIO
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
Definition: gc.c:281
pc
rb_control_frame_t const VALUE * pc
Definition: rb_mjit_min_header-2.7.1.h:16860
GC_OLDMALLOC_LIMIT_MIN
#define GC_OLDMALLOC_LIMIT_MIN
Definition: gc.c:295
T_NIL
#define T_NIL
Definition: ruby.h:522
GPR_FLAG_MAJOR_BY_FORCE
@ GPR_FLAG_MAJOR_BY_FORCE
Definition: gc.c:497
rb_subclass_entry::klass
VALUE klass
Definition: internal.h:999
heap_page_body
Definition: gc.c:629
rb_objspace::old_objects_limit
size_t old_objects_limit
Definition: gc.c:796
ruby_gc_params_t::growth_factor
double growth_factor
Definition: gc.c:320
BDIGIT
#define BDIGIT
Definition: bigdecimal.h:48
timeval
Definition: missing.h:53
rb_objspace::total_allocated_pages
size_t total_allocated_pages
Definition: gc.c:781
global_list
#define global_list
Definition: gc.c:925
lo
#define lo
Definition: siphash.c:21
gc_stat_sym_heap_allocatable_pages
@ gc_stat_sym_heap_allocatable_pages
Definition: gc.c:8843
RVALUE::next
struct RVALUE * next
Definition: gc.c:570
T_MOVED
#define T_MOVED
Definition: ruby.h:547
str
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
GPR_FLAG_MAJOR_MASK
@ GPR_FLAG_MAJOR_MASK
Definition: gc.c:501
ssize_t
_ssize_t ssize_t
Definition: rb_mjit_min_header-2.7.1.h:1324
stack_chunk
Definition: gc.c:642
mark_stack::limit
int limit
Definition: gc.c:651
ruby_disable_gc
int ruby_disable_gc
Definition: gc.c:1001
NO_SANITIZE
NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr))
gc_stat_sym_compact_count
@ gc_stat_sym_compact_count
Definition: gc.c:8860
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
ruby_native_thread_p
int ruby_native_thread_p(void)
Definition: thread.c:5277
heap_pages_himem
#define heap_pages_himem
Definition: gc.c:914
memset
void * memset(void *, int, size_t)
debug_counter.h
T_ZOMBIE
#define T_ZOMBIE
Definition: ruby.h:546
MEMCPY
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1753
MARK_CHECKPOINT
#define MARK_CHECKPOINT(category)
PRIuVALUE
#define PRIuVALUE
Definition: ruby.h:163
RZombie::next
VALUE next
Definition: gc.c:988
types
enum imemo_type types
Definition: debug.c:72
GPR_FLAG_MAJOR_BY_OLDMALLOC
@ GPR_FLAG_MAJOR_BY_OLDMALLOC
Definition: gc.c:499
stress_to_class
#define stress_to_class
Definition: gc.c:931
Init_heap
void Init_heap(void)
Definition: gc.c:2911
rb_hash_aset
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2847
ruby_gc_params_t::malloc_limit_max
size_t malloc_limit_max
Definition: gc.c:329
int
__inline__ int
Definition: rb_mjit_min_header-2.7.1.h:2807
RComplex
Definition: internal.h:805
ATOMIC_EXCHANGE
#define ATOMIC_EXCHANGE(var, val)
Definition: ruby_atomic.h:135
old
VALUE ID VALUE old
Definition: rb_mjit_min_header-2.7.1.h:16070
RRegexp::src
const VALUE src
Definition: ruby.h:1115
gc_prof_enabled
#define gc_prof_enabled(objspace)
Definition: gc.c:1087
RVALUE::array
struct RArray array
Definition: gc.c:578
clock_gettime
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4612
rb_obj_info_dump
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11680
heap_tomb
#define heap_tomb
Definition: gc.c:920
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
RDATA
#define RDATA(obj)
Definition: ruby.h:1274
RVALUE_PIN_BITMAP
#define RVALUE_PIN_BITMAP(obj)
Definition: gc.c:1211
PRIdSIZE
#define PRIdSIZE
Definition: ruby.h:205
memcpy
void * memcpy(void *__restrict, const void *__restrict, size_t)
TAG_NONE
#define TAG_NONE
Definition: vm_core.h:197
RUBY_INTERNAL_EVENT_NEWOBJ
#define RUBY_INTERNAL_EVENT_NEWOBJ
Definition: ruby.h:2268
rb_objspace::uncollectible_wb_unprotected_objects_limit
size_t uncollectible_wb_unprotected_objects_limit
Definition: gc.c:794
RGENGC_CHECK_MODE
#define RGENGC_CHECK_MODE
Definition: gc.c:399
gc_stat_compat_sym_heap_eden_page_length
@ gc_stat_compat_sym_heap_eden_page_length
Definition: gc.c:8883
snprintf
int snprintf(char *__restrict, size_t, const char *__restrict,...) __attribute__((__format__(__printf__
BIGNUM_EMBED_FLAG
#define BIGNUM_EMBED_FLAG
Definition: internal.h:769
gc_stress_full_mark_after_malloc
@ gc_stress_full_mark_after_malloc
Definition: gc.c:7124
io.h
Init_gc_stress
void Init_gc_stress(void)
Definition: gc.c:2931
rb_objspace_set_event_hook
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2082
BITMAP_BIT
#define BITMAP_BIT(p)
Definition: gc.c:883
argc
int argc
Definition: ruby.c:222
malloc_increase
#define malloc_increase
Definition: gc.c:908
VM_ENV_FLAG_WB_REQUIRED
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1188
VM_METHOD_TYPE_ISEQ
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:102
rb_objspace::total_allocated_objects_at_gc_start
size_t total_allocated_objects_at_gc_start
Definition: gc.c:775
rb_objspace::freeable_pages
size_t freeable_pages
Definition: gc.c:728
GC_OLDMALLOC_LIMIT_MAX
#define GC_OLDMALLOC_LIMIT_MAX
Definition: gc.c:301
regint.h
T_IMEMO
#define T_IMEMO
Definition: ruby.h:543
GC_MALLOC_LIMIT_MAX
#define GC_MALLOC_LIMIT_MAX
Definition: gc.c:288
rb_obj_classname
const char * rb_obj_classname(VALUE)
Definition: variable.c:289
memop_type
memop_type
Definition: gc.c:9686
rb_objspace_gc_disable
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
Definition: gc.c:9255
rb_iseq_memsize
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
Definition: iseq.c:373
rb_vm_struct::self
VALUE self
Definition: vm_core.h:577
mark_stack::index
int index
Definition: gc.c:650
roomof
#define roomof(x, y)
Definition: internal.h:1298
rb_obj_memsize_of
size_t rb_obj_memsize_of(VALUE obj)
Definition: gc.c:3936
ruby_gc_params_t::growth_max_slots
size_t growth_max_slots
Definition: gc.c:321
list_node
Definition: rb_mjit_min_header-2.7.1.h:8898
GC_DEBUG
#define GC_DEBUG
Definition: gc.c:365
rb_define_const
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2880
free
#define free(x)
Definition: dln.c:52
err
int err
Definition: win32.c:135
will_be_incremental_marking
#define will_be_incremental_marking(objspace)
Definition: gc.c:966
GPR_FLAG_STRESS
@ GPR_FLAG_STRESS
Definition: gc.c:508
rb_objspace::flags
struct rb_objspace::@83 flags
is_sweeping
#define is_sweeping(objspace)
Definition: gc.c:954
gc_stat_sym_malloc_increase_bytes_limit
@ gc_stat_sym_malloc_increase_bytes_limit
Definition: gc.c:8856
STACK_LENGTH
#define STACK_LENGTH
Definition: gc.c:4616
rb_io_write
VALUE rb_io_write(VALUE, VALUE)
Definition: io.c:1804
verify_internal_consistency_struct::live_object_count
size_t live_object_count
Definition: gc.c:5896
gc_raise_tag::ap
va_list * ap
Definition: gc.c:9525
__asm__
#define __asm__
Definition: Context.c:12
rb_data_type_struct
Definition: ruby.h:1148
BUILTIN_TYPE
#define BUILTIN_TYPE(x)
Definition: ruby.h:551
gc_mode_marking
@ gc_mode_marking
Definition: gc.c:672
rb_vm_struct
Definition: vm_core.h:576
xfree
#define xfree
Definition: defines.h:216
heap_page::pinned_slots
short pinned_slots
Definition: gc.c:848
gc_stat_sym_oldmalloc_increase_bytes
@ gc_stat_sym_oldmalloc_increase_bytes
Definition: gc.c:8866
RVALUE::regexp
struct RRegexp regexp
Definition: gc.c:579
RZombie::data
void * data
Definition: gc.c:990
RUBY_INTERNAL_EVENT_GC_END_MARK
#define RUBY_INTERNAL_EVENT_GC_END_MARK
Definition: ruby.h:2271
RMOVED
#define RMOVED(obj)
Definition: ruby.h:1266
ARY_EMBED_P
#define ARY_EMBED_P(ary)
Definition: gc.c:11420
MEMOP_TYPE_REALLOC
@ MEMOP_TYPE_REALLOC
Definition: gc.c:9689
SET_MACHINE_STACK_END
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:13
v
int VALUE v
Definition: rb_mjit_min_header-2.7.1.h:12257
RBASIC
#define RBASIC(obj)
Definition: ruby.h:1267
PUREFUNC
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
CLEAR_IN_BITMAP
#define CLEAR_IN_BITMAP(bits, p)
Definition: gc.c:888
root_objects_data
Definition: gc.c:9481
ruby_gc_params_t::heap_free_slots_max_ratio
double heap_free_slots_max_ratio
Definition: gc.c:325
rb_obj_info_dump_loc
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
Definition: gc.c:11687
rb_sym2id
ID rb_sym2id(VALUE)
Definition: symbol.c:748
root_objects_data::func
void(* func)(const char *category, VALUE, void *)
Definition: gc.c:9483
rb_method_definition_struct
Definition: method.h:163
HEAP_PAGE_SIZE
@ HEAP_PAGE_SIZE
Definition: gc.c:838
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5214
gc_stat_compat_sym_total_allocated_object
@ gc_stat_compat_sym_total_allocated_object
Definition: gc.c:8897
GC_HEAP_GROWTH_MAX_SLOTS
#define GC_HEAP_GROWTH_MAX_SLOTS
Definition: gc.c:268
imemo_cref
@ imemo_cref
class reference
Definition: internal.h:1134
rb_objspace_each_objects_without_setup
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
Definition: gc.c:3050
rb_gc_mark_values
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:4717
MJIT_FUNC_EXPORTED
#define MJIT_FUNC_EXPORTED
Definition: defines.h:396
rb_undefine_finalizer
VALUE rb_undefine_finalizer(VALUE obj)
Definition: gc.c:3193
rb_size_mul_add_or_raise
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
Definition: gc.c:219
_
#define _(args)
Definition: dln.h:28
EC_PUSH_TAG
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
rb_objspace::limit
size_t limit
Definition: gc.c:678
ar_table_struct
Definition: hash.c:349
count
int count
Definition: encoding.c:57
st_memsize
size_t st_memsize(const st_table *tab)
Definition: st.c:719
rb_callable_method_entry_struct::def
struct rb_method_definition_struct *const def
Definition: method.h:62
Qtrue
#define Qtrue
Definition: ruby.h:468
heap_page_header
Definition: gc.c:625
DSIZE_T
#define DSIZE_T
Definition: rb_mjit_min_header-2.7.1.h:5006
rb_obj_rgengc_writebarrier_protected_p
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
Definition: gc.c:6962
S
#define S(s)
rb_objspace::global_list
struct gc_list * global_list
Definition: gc.c:784
NUM_IN_PAGE
#define NUM_IN_PAGE(p)
Definition: gc.c:880
rb_class_name
VALUE rb_class_name(VALUE)
Definition: variable.c:274
rb_io_fptr_finalize
#define rb_io_fptr_finalize
Definition: internal.h:1733
re_registers::num_regs
int num_regs
Definition: onigmo.h:718
gc_stat_sym_heap_allocated_pages
@ gc_stat_sym_heap_allocated_pages
Definition: gc.c:8841
RArray::as
union RArray::@97 as
exit
void exit(int __status) __attribute__((__noreturn__))
FL_PROMOTED1
#define FL_PROMOTED1
Definition: ruby.h:1281
RANY
#define RANY(o)
Definition: gc.c:984
len
uint8_t len
Definition: escape.c:17
gc_prof_record
#define gc_prof_record(objspace)
Definition: gc.c:1086
SYMBOL_P
#define SYMBOL_P(x)
Definition: ruby.h:413
RB_DEBUG_COUNTER_INC
#define RB_DEBUG_COUNTER_INC(type)
Definition: debug_counter.h:375
rb_heap_struct::using_page
struct heap_page * using_page
Definition: gc.c:660
RVALUE::svar
struct vm_svar svar
Definition: gc.c:591
rb_const_entry_struct::file
VALUE file
Definition: constant.h:35
rb_memerror
void rb_memerror(void)
Definition: gc.c:9597
GC_PROFILE_MORE_DETAIL
#define GC_PROFILE_MORE_DETAIL
Definition: gc.c:458
RVALUE::typeddata
struct RTypedData typeddata
Definition: gc.c:582
rb_method_entry_struct
Definition: method.h:51
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
rb_mark_tbl
void rb_mark_tbl(st_table *tbl)
Definition: gc.c:5007
rb_subclass_entry
Definition: internal.h:998
RStruct
Definition: internal.h:942
rb_control_frame_struct::pc
const VALUE * pc
Definition: vm_core.h:761
MEMMOVE
#define MEMMOVE(p1, p2, type, n)
Definition: ruby.h:1754
rb_gc_unprotect_logging
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
Definition: gc.c:6911
rb_data_type_struct::function
struct rb_data_type_struct::@100 function
gc_stat_compat_sym_heap_tomb_page_length
@ gc_stat_compat_sym_heap_tomb_page_length
Definition: gc.c:8884
timespec
Definition: missing.h:60
vfprintf
int int int int int int vfprintf(FILE *__restrict, const char *__restrict, __gnuc_va_list) __attribute__((__format__(__printf__
gc_stat_sym_old_objects_limit
@ gc_stat_sym_old_objects_limit
Definition: gc.c:8864
ruby_atomic.h
rb_io_t::tied_io_for_writing
VALUE tied_io_for_writing
Definition: io.h:77
heap_page::freelist
RVALUE * freelist
Definition: gc.c:859
RTYPEDDATA_P
#define RTYPEDDATA_P(v)
Definition: ruby.h:1177
rb_define_finalizer
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Definition: gc.c:3289
RVALUE::basic
struct RBasic basic
Definition: gc.c:573
rb_str_free
void rb_str_free(VALUE)
Definition: string.c:1349
rb_objspace::rgengc
struct rb_objspace::@87 rgengc
rb_alloc_tmp_buffer_with_count
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
Definition: gc.c:10246
rb_objspace::mark_func_data_struct
Definition: gc.c:714
OBJ_PROMOTED
#define OBJ_PROMOTED(x)
Definition: ruby.h:1494
ROBJECT_NUMIV
#define ROBJECT_NUMIV(o)
Definition: ruby.h:933
rb_class_detach_subclasses
void rb_class_detach_subclasses(VALUE klass)
Definition: class.c:133
T_STRING
#define T_STRING
Definition: ruby.h:528
gc_raise_tag
Definition: gc.c:9522
imemo_tmpbuf
@ imemo_tmpbuf
Definition: internal.h:1141
finalizing
#define finalizing
Definition: gc.c:923
rb_hash_stlike_foreach_with_replace
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: hash.c:1453
rb_atomic_t
int rb_atomic_t
Definition: ruby_atomic.h:124
RZombie::dfree
void(* dfree)(void *)
Definition: gc.c:989
rb_block
Definition: vm_core.h:751
FL_SINGLETON
#define FL_SINGLETON
Definition: ruby.h:1278
heap_page_header::page
struct heap_page * page
Definition: gc.c:626
RMatch::str
VALUE str
Definition: re.h:45
list_empty
#define list_empty(h)
Definition: rb_mjit_min_header-2.7.1.h:8957
rb_define_class_under
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:698
rb_sym2str
VALUE rb_sym2str(VALUE)
Definition: symbol.c:784
atexit
int atexit(void(*__func)(void))
SIZET2NUM
#define SIZET2NUM(v)
Definition: ruby.h:295
rb_objspace::gc_stress_mode
VALUE gc_stress_mode
Definition: gc.c:786
rb_id_table_foreach_values
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
Definition: id_table.c:311
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.1.h:1479
mjit_gc_exit_hook
void mjit_gc_exit_hook(void)
rb_generic_ivar_memsize
size_t rb_generic_ivar_memsize(VALUE)
Definition: variable.c:1010
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
RVALUE::klass
struct RClass klass
Definition: gc.c:575
rb_hash_stlike_foreach
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg)
Definition: hash.c:1442
rb_gc_disable_no_rest
VALUE rb_gc_disable_no_rest(void)
Definition: gc.c:9233
eval_intern.h
gc_list::varptr
VALUE * varptr
Definition: gc.c:636
ruby_initial_gc_stress
#define ruby_initial_gc_stress
Definition: gc.c:903
rb_objspace::mark_func_data
struct rb_objspace::mark_func_data_struct * mark_func_data
rb_ensure
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1115
rb_io_t::encs
struct rb_io_t::rb_io_enc_t encs
RComplex::imag
VALUE imag
Definition: internal.h:808
RARRAY_CONST_PTR_TRANSIENT
#define RARRAY_CONST_PTR_TRANSIENT(a)
Definition: ruby.h:1073
ruby_vm_special_exception_copy
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
Definition: rb_mjit_min_header-2.7.1.h:12144
rb_ary_new
VALUE rb_ary_new(void)
Definition: array.c:723
gc_stat_sym_last
@ gc_stat_sym_last
Definition: gc.c:8878
ruby_xrealloc2_body
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
Definition: gc.c:10152
rb_symbols_t
Definition: symbol.h:61
rb_vm_update_references
void rb_vm_update_references(void *ptr)
Definition: vm.c:2234
builtin.h
heap_page::has_uncollectible_shady_objects
unsigned int has_uncollectible_shady_objects
Definition: gc.c:853
ROBJECT_EMBED
@ ROBJECT_EMBED
Definition: ruby.h:917
rb_imemo_tmpbuf_struct
Definition: internal.h:1231
Qnil
#define Qnil
Definition: ruby.h:469
T_STRUCT
#define T_STRUCT
Definition: ruby.h:532
mark_stack_t
struct mark_stack mark_stack_t
GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:298
gc_profile_record::gc_invoke_time
double gc_invoke_time
Definition: gc.c:525
rb_big_size
size_t rb_big_size(VALUE big)
Definition: bignum.c:6778
mjit_gc_start_hook
void mjit_gc_start_hook(void)
rb_objspace::final_slots
size_t final_slots
Definition: gc.c:731
heap_cursor::slot
RVALUE * slot
Definition: gc.c:7668
heap_page::marking_bits
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:869
gc_stat_sym_total_freed_pages
@ gc_stat_sym_total_freed_pages
Definition: gc.c:8852
RVALUE_UNCOLLECTIBLE_BITMAP
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
Definition: gc.c:1216
thread.h
rmatch_offset
Definition: re.h:31
rb_mEnumerable
VALUE rb_mEnumerable
Definition: enum.c:20
ruby_qsort
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
st_lookup
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
util.h
O
#define O(member)
BIGNUM_LEN
#define BIGNUM_LEN(b)
Definition: internal.h:774
stack_chunk_t
struct stack_chunk stack_chunk_t
rb_io_t
Definition: io.h:66
page_compare_func_t
int page_compare_func_t(const void *, const void *, void *)
Definition: gc.c:7768
GC_HEAP_FREE_SLOTS
#define GC_HEAP_FREE_SLOTS
Definition: gc.c:262
gc_mode_sweeping
@ gc_mode_sweeping
Definition: gc.c:673
rb_classext_struct
Definition: internal.h:1020
rb_int_plus
VALUE rb_int_plus(VALUE x, VALUE y)
Definition: numeric.c:3610
RUBY_INTERNAL_EVENT_GC_START
#define RUBY_INTERNAL_EVENT_GC_START
Definition: ruby.h:2270
rb_gc_latest_gc_info
VALUE rb_gc_latest_gc_info(VALUE key)
Definition: gc.c:8818
gc_profile_record
Definition: gc.c:521
numberof
#define numberof(array)
Definition: etc.c:618
rb_thread_struct
Definition: vm_core.h:910
rb_ary_delete_same
void rb_ary_delete_same(VALUE ary, VALUE item)
Definition: array.c:3396
UNREACHABLE_RETURN
#define UNREACHABLE_RETURN(val)
Definition: ruby.h:59
rb_objspace::finalizing
rb_atomic_t finalizing
Definition: gc.c:711
rb_classext_struct::origin_
const VALUE origin_
Definition: internal.h:1039
rb_objspace::uncollectible_wb_unprotected_objects
size_t uncollectible_wb_unprotected_objects
Definition: gc.c:793
st_free_table
void st_free_table(st_table *tab)
Definition: st.c:709
rb_eNoMemError
VALUE rb_eNoMemError
Definition: error.c:933
wmap_iter_arg::objspace
rb_objspace_t * objspace
Definition: gc.c:10501
st_table
Definition: st.h:79
realloc
void * realloc(void *, size_t) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(2)))
rb_transient_heap_verify
void rb_transient_heap_verify(void)
Definition: transient_heap.c:219
RHASH_AR_TABLE_P
#define RHASH_AR_TABLE_P(hash)
Definition: internal.h:854
block_type_iseq
@ block_type_iseq
Definition: vm_core.h:745
rb_ec_raised_p
#define rb_ec_raised_p(ec, f)
Definition: eval_intern.h:260
rb_objspace::old_objects
size_t old_objects
Definition: gc.c:795
hi
#define hi
Definition: siphash.c:22
RVALUE::file
struct RFile file
Definition: gc.c:585
INT_MAX
#define INT_MAX
Definition: rb_mjit_min_header-2.7.1.h:4020
RData
Definition: ruby.h:1139
ruby_assert.h
rb_objspace::sorted_length
size_t sorted_length
Definition: gc.c:726
gc_profile_record::gc_time
double gc_time
Definition: gc.c:524
malloc_allocated_size
#define malloc_allocated_size
Definition: gc.c:909
rb_any_to_s
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
RUBY_INTERNAL_EVENT_GC_END_SWEEP
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
Definition: ruby.h:2272
sub
#define sub(x, y)
Definition: date_strftime.c:24
ID_TABLE_CONTINUE
@ ID_TABLE_CONTINUE
Definition: id_table.h:9
rb_heap_struct::free_pages
struct heap_page * free_pages
Definition: gc.c:659
GC_HEAP_OLDOBJECT_LIMIT_FACTOR
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
Definition: gc.c:271
calloc
void * calloc(size_t, size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1
rb_data_object_alloc
#define rb_data_object_alloc
Definition: gc.c:14
rb_const_entry_struct
Definition: constant.h:31
each_obj_callback
int each_obj_callback(void *, void *, size_t, void *)
Definition: gc.c:2938
id_table.h
finalizer_table
#define finalizer_table
Definition: gc.c:924
RREGEXP_PTR
#define RREGEXP_PTR(r)
Definition: ruby.h:1118
rb_newobj
VALUE rb_newobj(void)
Definition: gc.c:2290
rb_objspace::invoke_time
double invoke_time
Definition: gc.c:748
gc_stat_compat_sym_gc_stat_heap_used
@ gc_stat_compat_sym_gc_stat_heap_used
Definition: gc.c:8882
T_TRUE
#define T_TRUE
Definition: ruby.h:536
rb_obj_is_kind_of
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:692
ruby_gc_set_params
void ruby_gc_set_params(void)
Definition: gc.c:9420
malloc_obj_info
Definition: gc.c:9790
ruby_tag_type
ruby_tag_type
Definition: vm_core.h:184
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALGRIND_MAKE_MEM_UNDEFINED
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
Definition: zlib.c:25
RUBY_ALIAS_FUNCTION
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
Definition: gc.c:2385
RTEST
#define RTEST(v)
Definition: ruby.h:481
imemo_ifunc
@ imemo_ifunc
iterator function
Definition: internal.h:1137
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
rb_wb_unprotected_newobj_of
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2274
ruby::backward::cxxanyargs::type
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:39
debug.h
RB_SPECIAL_CONST_P
#define RB_SPECIAL_CONST_P(x)
Definition: ruby.h:1312
RB_GNUC_EXTENSION
#define RB_GNUC_EXTENSION
Definition: defines.h:121
VM_METHOD_TYPE_ALIAS
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:108
heap_page::uncollectible_bits
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:868
SYM2ID
#define SYM2ID(x)
Definition: ruby.h:415
rb_vm_env_prev_env
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:796
T_UNDEF
#define T_UNDEF
Definition: ruby.h:544
rmatch
Definition: re.h:36
optional
Definition: gc.c:90
ATOMIC_SIZE_INC
#define ATOMIC_SIZE_INC(var)
Definition: ruby_atomic.h:151
__sFILE
Definition: vsnprintf.c:169
RSTRUCT_CONST_PTR
#define RSTRUCT_CONST_PTR(st)
Definition: internal.h:962
heap_page::pinned_bits
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:873
va_end
#define va_end(v)
Definition: rb_mjit_min_header-2.7.1.h:3947
rb_ec_stack_check
MJIT_FUNC_EXPORTED int rb_ec_stack_check(rb_execution_context_t *ec)
Definition: gc.c:4667
rb_thread_call_with_gvl
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1662
rb_heap_struct::pooled_pages
struct heap_page * pooled_pages
Definition: gc.c:664
HEAP_PAGE_BITMAP_LIMIT
@ HEAP_PAGE_BITMAP_LIMIT
Definition: gc.c:840
RSTRUCT
#define RSTRUCT(obj)
Definition: internal.h:966
rb_objspace::compact_count
size_t compact_count
Definition: gc.c:753
rb_objspace::during_gc
unsigned int during_gc
Definition: gc.c:691
rb_objspace_gc_enable
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
Definition: gc.c:9218
RGENGC_FORCE_MAJOR_GC
#define RGENGC_FORCE_MAJOR_GC
Definition: gc.c:438
RClass
Definition: internal.h:1048
rb_transient_heap_start_marking
void rb_transient_heap_start_marking(int full_marking)
Definition: transient_heap.c:868
RVALUE::data
struct RData data
Definition: gc.c:581
ruby_sized_xfree
void ruby_sized_xfree(void *x, size_t size)
Definition: gc.c:10161
src
__inline__ const void *__restrict src
Definition: rb_mjit_min_header-2.7.1.h:2804
rb_objspace::parent_object
VALUE parent_object
Definition: gc.c:790
name
const char * name
Definition: nkf.c:208
NUM2PTR
#define NUM2PTR(x)
RVALUE_MARK_BITMAP
#define RVALUE_MARK_BITMAP(obj)
Definition: gc.c:1210
rb_execution_context_struct
Definition: vm_core.h:843
ruby_gc_params_t::heap_free_slots_min_ratio
double heap_free_slots_min_ratio
Definition: gc.c:323
rb_block_proc
VALUE rb_block_proc(void)
Definition: proc.c:837