source: EcnlProtoTool/trunk/mruby-2.1.1/src/gc.c@ 460

Last change on this file since 460 was 439, checked in by coas-nagasima, 4 years ago

mrubyを2.1.1に更新

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 43.7 KB
Line 
1/*
2** gc.c - garbage collector for mruby
3**
4** See Copyright Notice in mruby.h
5*/
6
7#include <string.h>
8#include <stdlib.h>
9#include <mruby.h>
10#include <mruby/array.h>
11#include <mruby/class.h>
12#include <mruby/data.h>
13#include <mruby/istruct.h>
14#include <mruby/hash.h>
15#include <mruby/proc.h>
16#include <mruby/range.h>
17#include <mruby/string.h>
18#include <mruby/variable.h>
19#include <mruby/gc.h>
20#include <mruby/error.h>
21#include <mruby/throw.h>
22
23/*
24 = Tri-color Incremental Garbage Collection
25
26 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
27 Algorithm details are omitted.
28 Instead, the implementation part is described below.
29
30 == Object's Color
31
32 Each object can be painted in three colors:
33
34 * White - Unmarked.
35 * Gray - Marked, But the child objects are unmarked.
36 * Black - Marked, the child objects are also marked.
37
38 == Two White Types
39
40 There're two white color types in a flip-flop fashion: White-A and White-B,
41 which respectively represent the Current White color (the newly allocated
42 objects in the current GC cycle) and the Sweep Target White color (the
43 dead objects to be swept).
44
45 A and B will be switched just at the beginning of the next GC cycle. At
46 that time, all the dead objects have been swept, while the newly created
47 objects in the current GC cycle which finally remains White are now
48 regarded as dead objects. Instead of traversing all the White-A objects and
49 painting them as White-B, just switch the meaning of White-A and White-B as
50 this will be much cheaper.
51
52 As a result, the objects we sweep in the current GC cycle are always
53 left from the previous GC cycle. This allows us to sweep objects
54 incrementally, without the disturbance of the newly created objects.
55
56 == Execution Timing
57
58 GC Execution Time and Each step interval are decided by live objects count.
59 List of Adjustment API:
60
61 * gc_interval_ratio_set
62 * gc_step_ratio_set
63
64 For details, see the comments for each function.
65
66 == Write Barrier
67
68 mruby implementer and C extension library writer must insert a write
69 barrier when updating a reference from a field of an object.
70 When updating a reference from a field of object A to object B,
71 two different types of write barrier are available:
72
73 * mrb_field_write_barrier - target B object for a mark.
74 * mrb_write_barrier - target A object for a mark.
75
76 == Generational Mode
77
78 mruby's GC offers an Generational Mode while re-using the tri-color GC
79 infrastructure. It will treat the Black objects as Old objects after each
80 sweep phase, instead of painting them White. The key ideas are still the same
81 as traditional generational GC:
82
83 * Minor GC - just traverse the Young objects (Gray objects) in the mark
84 phase, then only sweep the newly created objects, and leave
85 the Old objects live.
86
87 * Major GC - same as a full regular GC cycle.
88
89 The difference from "traditional" generational GC is, that the major GC
90 in mruby is triggered incrementally in a tri-color manner.
91
92
93 For details, see the comments for each function.
94
95*/
96
97struct free_obj {
98 MRB_OBJECT_HEADER;
99 struct RBasic *next;
100};
101
102typedef struct {
103 union {
104 struct free_obj free;
105 struct RBasic basic;
106 struct RObject object;
107 struct RClass klass;
108 struct RString string;
109 struct RArray array;
110 struct RHash hash;
111 struct RRange range;
112 struct RData data;
113 struct RIStruct istruct;
114 struct RProc proc;
115 struct REnv env;
116 struct RFiber fiber;
117 struct RException exc;
118 struct RBreak brk;
119#ifdef MRB_WORD_BOXING
120#ifndef MRB_WITHOUT_FLOAT
121 struct RFloat floatv;
122#endif
123 struct RCptr cptr;
124#endif
125 } as;
126} RVALUE;
127
128#ifdef GC_PROFILE
129#include <stdio.h>
130#include <sys/time.h>
131
132static double program_invoke_time = 0;
133static double gc_time = 0;
134static double gc_total_time = 0;
135
136static double
137gettimeofday_time(void)
138{
139 struct timeval tv;
140 gettimeofday(&tv, NULL);
141 return tv.tv_sec + tv.tv_usec * 1e-6;
142}
143
144#define GC_INVOKE_TIME_REPORT(with) do {\
145 fprintf(stderr, "%s\n", with);\
146 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
147 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
148 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
149} while(0)
150
151#define GC_TIME_START do {\
152 gc_time = gettimeofday_time();\
153} while(0)
154
155#define GC_TIME_STOP_AND_REPORT do {\
156 gc_time = gettimeofday_time() - gc_time;\
157 gc_total_time += gc_time;\
158 fprintf(stderr, "gc_state: %d\n", gc->state);\
159 fprintf(stderr, "live: %zu\n", gc->live);\
160 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
161 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
162 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
163 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
164} while(0)
165#else
166#define GC_INVOKE_TIME_REPORT(s)
167#define GC_TIME_START
168#define GC_TIME_STOP_AND_REPORT
169#endif
170
171#ifdef GC_DEBUG
172#define DEBUG(x) (x)
173#else
174#define DEBUG(x)
175#endif
176
177#ifndef MRB_HEAP_PAGE_SIZE
178#define MRB_HEAP_PAGE_SIZE 1024
179#endif
180
181#define GC_STEP_SIZE 1024
182
183/* white: 001 or 010, black: 100, gray: 000 */
184#define GC_GRAY 0
185#define GC_WHITE_A 1
186#define GC_WHITE_B (1 << 1)
187#define GC_BLACK (1 << 2)
188#define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
189#define GC_COLOR_MASK 7
190
191#define paint_gray(o) ((o)->color = GC_GRAY)
192#define paint_black(o) ((o)->color = GC_BLACK)
193#define paint_white(o) ((o)->color = GC_WHITES)
194#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
195#define is_gray(o) ((o)->color == GC_GRAY)
196#define is_white(o) ((o)->color & GC_WHITES)
197#define is_black(o) ((o)->color & GC_BLACK)
198#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
199#define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
200#define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
201
202#define objects(p) ((RVALUE *)p->objects)
203
204MRB_API void*
205mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
206{
207 void *p2;
208
209 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
210 if (!p2 && len > 0 && mrb->gc.heaps) {
211 mrb_full_gc(mrb);
212 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
213 }
214
215 return p2;
216}
217
218MRB_API void*
219mrb_realloc(mrb_state *mrb, void *p, size_t len)
220{
221 void *p2;
222
223 p2 = mrb_realloc_simple(mrb, p, len);
224 if (len == 0) return p2;
225 if (p2 == NULL) {
226 if (mrb->gc.out_of_memory) {
227 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
228 /* mrb_panic(mrb); */
229 }
230 else {
231 mrb->gc.out_of_memory = TRUE;
232 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
233 }
234 }
235 else {
236 mrb->gc.out_of_memory = FALSE;
237 }
238
239 return p2;
240}
241
242MRB_API void*
243mrb_malloc(mrb_state *mrb, size_t len)
244{
245 return mrb_realloc(mrb, 0, len);
246}
247
248MRB_API void*
249mrb_malloc_simple(mrb_state *mrb, size_t len)
250{
251 return mrb_realloc_simple(mrb, 0, len);
252}
253
254MRB_API void*
255mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
256{
257 void *p;
258
259 if (nelem > 0 && len > 0 &&
260 nelem <= SIZE_MAX / len) {
261 size_t size;
262 size = nelem * len;
263 p = mrb_malloc(mrb, size);
264
265 memset(p, 0, size);
266 }
267 else {
268 p = NULL;
269 }
270
271 return p;
272}
273
274MRB_API void
275mrb_free(mrb_state *mrb, void *p)
276{
277 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
278}
279
280MRB_API void*
281mrb_alloca(mrb_state *mrb, size_t size)
282{
283 struct RString *s;
284 s = (struct RString*)mrb_obj_alloc(mrb, MRB_TT_STRING, mrb->string_class);
285 return s->as.heap.ptr = (char*)mrb_malloc(mrb, size);
286}
287
288static mrb_bool
289heap_p(mrb_gc *gc, struct RBasic *object)
290{
291 mrb_heap_page* page;
292
293 page = gc->heaps;
294 while (page) {
295 RVALUE *p;
296
297 p = objects(page);
298 if (&p[0].as.basic <= object && object <= &p[MRB_HEAP_PAGE_SIZE].as.basic) {
299 return TRUE;
300 }
301 page = page->next;
302 }
303 return FALSE;
304}
305
306MRB_API mrb_bool
307mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
308 mrb_gc *gc = &mrb->gc;
309 if (!heap_p(gc, object)) return TRUE;
310 return is_dead(gc, object);
311}
312
313static void
314link_heap_page(mrb_gc *gc, mrb_heap_page *page)
315{
316 page->next = gc->heaps;
317 if (gc->heaps)
318 gc->heaps->prev = page;
319 gc->heaps = page;
320}
321
322static void
323unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
324{
325 if (page->prev)
326 page->prev->next = page->next;
327 if (page->next)
328 page->next->prev = page->prev;
329 if (gc->heaps == page)
330 gc->heaps = page->next;
331 page->prev = NULL;
332 page->next = NULL;
333}
334
335static void
336link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
337{
338 page->free_next = gc->free_heaps;
339 if (gc->free_heaps) {
340 gc->free_heaps->free_prev = page;
341 }
342 gc->free_heaps = page;
343}
344
345static void
346unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
347{
348 if (page->free_prev)
349 page->free_prev->free_next = page->free_next;
350 if (page->free_next)
351 page->free_next->free_prev = page->free_prev;
352 if (gc->free_heaps == page)
353 gc->free_heaps = page->free_next;
354 page->free_prev = NULL;
355 page->free_next = NULL;
356}
357
358static void
359add_heap(mrb_state *mrb, mrb_gc *gc)
360{
361 mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
362 RVALUE *p, *e;
363 struct RBasic *prev = NULL;
364
365 for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
366 p->as.free.tt = MRB_TT_FREE;
367 p->as.free.next = prev;
368 prev = &p->as.basic;
369 }
370 page->freelist = prev;
371
372 link_heap_page(gc, page);
373 link_free_heap_page(gc, page);
374}
375
376#define DEFAULT_GC_INTERVAL_RATIO 200
377#define DEFAULT_GC_STEP_RATIO 200
378#define MAJOR_GC_INC_RATIO 120
379#define MAJOR_GC_TOOMANY 10000
380#define is_generational(gc) ((gc)->generational)
381#define is_major_gc(gc) (is_generational(gc) && (gc)->full)
382#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
383
384void
385mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
386{
387#ifndef MRB_GC_FIXED_ARENA
388 gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
389 gc->arena_capa = MRB_GC_ARENA_SIZE;
390#endif
391
392 gc->current_white_part = GC_WHITE_A;
393 gc->heaps = NULL;
394 gc->free_heaps = NULL;
395 add_heap(mrb, gc);
396 gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
397 gc->step_ratio = DEFAULT_GC_STEP_RATIO;
398#ifndef MRB_GC_TURN_OFF_GENERATIONAL
399 gc->generational = TRUE;
400 gc->full = TRUE;
401#endif
402
403#ifdef GC_PROFILE
404 program_invoke_time = gettimeofday_time();
405#endif
406}
407
408static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
409
410static void
411free_heap(mrb_state *mrb, mrb_gc *gc)
412{
413 mrb_heap_page *page = gc->heaps;
414 mrb_heap_page *tmp;
415 RVALUE *p, *e;
416
417 while (page) {
418 tmp = page;
419 page = page->next;
420 for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
421 if (p->as.free.tt != MRB_TT_FREE)
422 obj_free(mrb, &p->as.basic, TRUE);
423 }
424 mrb_free(mrb, tmp);
425 }
426}
427
428void
429mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
430{
431 free_heap(mrb, gc);
432#ifndef MRB_GC_FIXED_ARENA
433 mrb_free(mrb, gc->arena);
434#endif
435}
436
437static void
438gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
439{
440#ifdef MRB_GC_FIXED_ARENA
441 if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
442 /* arena overflow error */
443 gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
444 mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
445 }
446#else
447 if (gc->arena_idx >= gc->arena_capa) {
448 /* extend arena */
449 gc->arena_capa = (int)(gc->arena_capa * 3 / 2);
450 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
451 }
452#endif
453 gc->arena[gc->arena_idx++] = p;
454}
455
456/* mrb_gc_protect() leaves the object in the arena */
457MRB_API void
458mrb_gc_protect(mrb_state *mrb, mrb_value obj)
459{
460 if (mrb_immediate_p(obj)) return;
461 gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
462}
463
464#define GC_ROOT_NAME "_gc_root_"
465
466/* mrb_gc_register() keeps the object from GC.
467
468 Register your object when it's exported to C world,
469 without reference from Ruby world, e.g. callback
470 arguments. Don't forget to remove the object using
471 mrb_gc_unregister, otherwise your object will leak.
472*/
473
474MRB_API void
475mrb_gc_register(mrb_state *mrb, mrb_value obj)
476{
477 mrb_sym root;
478 mrb_value table;
479
480 if (mrb_immediate_p(obj)) return;
481 root = mrb_intern_lit(mrb, GC_ROOT_NAME);
482 table = mrb_gv_get(mrb, root);
483 if (mrb_nil_p(table) || !mrb_array_p(table)) {
484 table = mrb_ary_new(mrb);
485 mrb_gv_set(mrb, root, table);
486 }
487 mrb_ary_push(mrb, table, obj);
488}
489
490/* mrb_gc_unregister() removes the object from GC root. */
491MRB_API void
492mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
493{
494 mrb_sym root;
495 mrb_value table;
496 struct RArray *a;
497 mrb_int i;
498
499 if (mrb_immediate_p(obj)) return;
500 root = mrb_intern_lit(mrb, GC_ROOT_NAME);
501 table = mrb_gv_get(mrb, root);
502 if (mrb_nil_p(table)) return;
503 if (!mrb_array_p(table)) {
504 mrb_gv_set(mrb, root, mrb_nil_value());
505 return;
506 }
507 a = mrb_ary_ptr(table);
508 mrb_ary_modify(mrb, a);
509 for (i = 0; i < ARY_LEN(a); i++) {
510 if (mrb_ptr(ARY_PTR(a)[i]) == mrb_ptr(obj)) {
511 mrb_int len = ARY_LEN(a)-1;
512 mrb_value *ptr = ARY_PTR(a);
513
514 ARY_SET_LEN(a, len);
515 memmove(&ptr[i], &ptr[i + 1], (len - i) * sizeof(mrb_value));
516 break;
517 }
518 }
519}
520
521MRB_API struct RBasic*
522mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
523{
524 struct RBasic *p;
525 static const RVALUE RVALUE_zero = { { { NULL, NULL, MRB_TT_FALSE } } };
526 mrb_gc *gc = &mrb->gc;
527
528 if (cls) {
529 enum mrb_vtype tt;
530
531 switch (cls->tt) {
532 case MRB_TT_CLASS:
533 case MRB_TT_SCLASS:
534 case MRB_TT_MODULE:
535 case MRB_TT_ENV:
536 break;
537 default:
538 mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
539 }
540 tt = MRB_INSTANCE_TT(cls);
541 if (tt != MRB_TT_FALSE &&
542 ttype != MRB_TT_SCLASS &&
543 ttype != MRB_TT_ICLASS &&
544 ttype != MRB_TT_ENV &&
545 ttype != tt) {
546 mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %C", cls);
547 }
548 }
549
550#ifdef MRB_GC_STRESS
551 mrb_full_gc(mrb);
552#endif
553 if (gc->threshold < gc->live) {
554 mrb_incremental_gc(mrb);
555 }
556 if (gc->free_heaps == NULL) {
557 add_heap(mrb, gc);
558 }
559
560 p = gc->free_heaps->freelist;
561 gc->free_heaps->freelist = ((struct free_obj*)p)->next;
562 if (gc->free_heaps->freelist == NULL) {
563 unlink_free_heap_page(gc, gc->free_heaps);
564 }
565
566 gc->live++;
567 gc_protect(mrb, gc, p);
568 *(RVALUE *)p = RVALUE_zero;
569 p->tt = ttype;
570 p->c = cls;
571 paint_partial_white(gc, p);
572 return p;
573}
574
575static inline void
576add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
577{
578#ifdef MRB_GC_STRESS
579 if (obj->tt > MRB_TT_MAXDEFINE) {
580 abort();
581 }
582#endif
583 paint_gray(obj);
584 obj->gcnext = gc->gray_list;
585 gc->gray_list = obj;
586}
587
588static int
589ci_nregs(mrb_callinfo *ci)
590{
591 struct RProc *p = ci->proc;
592 int n = 0;
593
594 if (!p) {
595 if (ci->argc < 0) return 3;
596 return ci->argc+2;
597 }
598 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
599 n = p->body.irep->nregs;
600 }
601 if (ci->argc < 0) {
602 if (n < 3) n = 3; /* self + args + blk */
603 }
604 if (ci->argc > n) {
605 n = ci->argc + 2; /* self + blk */
606 }
607 return n;
608}
609
610static void
611mark_context_stack(mrb_state *mrb, struct mrb_context *c)
612{
613 size_t i;
614 size_t e;
615 mrb_value nil;
616
617 if (c->stack == NULL) return;
618 e = c->stack - c->stbase;
619 if (c->ci) {
620 e += ci_nregs(c->ci);
621 }
622 if (c->stbase + e > c->stend) e = c->stend - c->stbase;
623 for (i=0; i<e; i++) {
624 mrb_value v = c->stbase[i];
625
626 if (!mrb_immediate_p(v)) {
627 mrb_gc_mark(mrb, mrb_basic_ptr(v));
628 }
629 }
630 e = c->stend - c->stbase;
631 nil = mrb_nil_value();
632 for (; i<e; i++) {
633 c->stbase[i] = nil;
634 }
635}
636
637static void
638mark_context(mrb_state *mrb, struct mrb_context *c)
639{
640 int i;
641 mrb_callinfo *ci;
642
643 start:
644 if (c->status == MRB_FIBER_TERMINATED) return;
645
646 /* mark VM stack */
647 mark_context_stack(mrb, c);
648
649 /* mark call stack */
650 if (c->cibase) {
651 for (ci = c->cibase; ci <= c->ci; ci++) {
652 mrb_gc_mark(mrb, (struct RBasic*)ci->env);
653 mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
654 mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
655 }
656 }
657 /* mark ensure stack */
658 for (i=0; i<c->eidx; i++) {
659 mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
660 }
661 /* mark fibers */
662 mrb_gc_mark(mrb, (struct RBasic*)c->fib);
663 if (c->prev) {
664 c = c->prev;
665 goto start;
666 }
667}
668
669static void
670gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
671{
672 mrb_assert(is_gray(obj));
673 paint_black(obj);
674 gc->gray_list = obj->gcnext;
675 mrb_gc_mark(mrb, (struct RBasic*)obj->c);
676 switch (obj->tt) {
677 case MRB_TT_ICLASS:
678 {
679 struct RClass *c = (struct RClass*)obj;
680 if (MRB_FLAG_TEST(c, MRB_FL_CLASS_IS_ORIGIN))
681 mrb_gc_mark_mt(mrb, c);
682 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
683 }
684 break;
685
686 case MRB_TT_CLASS:
687 case MRB_TT_MODULE:
688 case MRB_TT_SCLASS:
689 {
690 struct RClass *c = (struct RClass*)obj;
691
692 mrb_gc_mark_mt(mrb, c);
693 mrb_gc_mark(mrb, (struct RBasic*)c->super);
694 }
695 /* fall through */
696
697 case MRB_TT_OBJECT:
698 case MRB_TT_DATA:
699 case MRB_TT_EXCEPTION:
700 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
701 break;
702
703 case MRB_TT_PROC:
704 {
705 struct RProc *p = (struct RProc*)obj;
706
707 mrb_gc_mark(mrb, (struct RBasic*)p->upper);
708 mrb_gc_mark(mrb, (struct RBasic*)p->e.env);
709 }
710 break;
711
712 case MRB_TT_ENV:
713 {
714 struct REnv *e = (struct REnv*)obj;
715 mrb_int i, len;
716
717 if (MRB_ENV_STACK_SHARED_P(e) && e->cxt && e->cxt->fib) {
718 mrb_gc_mark(mrb, (struct RBasic*)e->cxt->fib);
719 }
720 len = MRB_ENV_STACK_LEN(e);
721 for (i=0; i<len; i++) {
722 mrb_gc_mark_value(mrb, e->stack[i]);
723 }
724 }
725 break;
726
727 case MRB_TT_FIBER:
728 {
729 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
730
731 if (c) mark_context(mrb, c);
732 }
733 break;
734
735 case MRB_TT_ARRAY:
736 {
737 struct RArray *a = (struct RArray*)obj;
738 size_t i, e;
739
740 for (i=0,e=ARY_LEN(a); i<e; i++) {
741 mrb_gc_mark_value(mrb, ARY_PTR(a)[i]);
742 }
743 }
744 break;
745
746 case MRB_TT_HASH:
747 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
748 mrb_gc_mark_hash(mrb, (struct RHash*)obj);
749 break;
750
751 case MRB_TT_STRING:
752 if (RSTR_FSHARED_P(obj)) {
753 struct RString *s = (struct RString*)obj;
754 mrb_gc_mark(mrb, (struct RBasic*)s->as.heap.aux.fshared);
755 }
756 break;
757
758 case MRB_TT_RANGE:
759 mrb_gc_mark_range(mrb, (struct RRange*)obj);
760 break;
761
762 default:
763 break;
764 }
765}
766
767MRB_API void
768mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
769{
770 if (obj == 0) return;
771 if (!is_white(obj)) return;
772 mrb_assert((obj)->tt != MRB_TT_FREE);
773 add_gray_list(mrb, &mrb->gc, obj);
774}
775
776static void
777obj_free(mrb_state *mrb, struct RBasic *obj, int end)
778{
779 DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
780 switch (obj->tt) {
781 /* immediate - no mark */
782 case MRB_TT_TRUE:
783 case MRB_TT_FIXNUM:
784 case MRB_TT_SYMBOL:
785 /* cannot happen */
786 return;
787
788#ifndef MRB_WITHOUT_FLOAT
789 case MRB_TT_FLOAT:
790#ifdef MRB_WORD_BOXING
791 break;
792#else
793 return;
794#endif
795#endif
796
797 case MRB_TT_OBJECT:
798 mrb_gc_free_iv(mrb, (struct RObject*)obj);
799 break;
800
801 case MRB_TT_EXCEPTION:
802 mrb_gc_free_iv(mrb, (struct RObject*)obj);
803 break;
804
805 case MRB_TT_CLASS:
806 case MRB_TT_MODULE:
807 case MRB_TT_SCLASS:
808 mrb_gc_free_mt(mrb, (struct RClass*)obj);
809 mrb_gc_free_iv(mrb, (struct RObject*)obj);
810 mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
811 break;
812 case MRB_TT_ICLASS:
813 if (MRB_FLAG_TEST(obj, MRB_FL_CLASS_IS_ORIGIN))
814 mrb_gc_free_mt(mrb, (struct RClass*)obj);
815 mrb_mc_clear_by_class(mrb, (struct RClass*)obj);
816 break;
817 case MRB_TT_ENV:
818 {
819 struct REnv *e = (struct REnv*)obj;
820
821 if (MRB_ENV_STACK_SHARED_P(e)) {
822 /* cannot be freed */
823 e->stack = NULL;
824 break;
825 }
826 mrb_free(mrb, e->stack);
827 e->stack = NULL;
828 }
829 break;
830
831 case MRB_TT_FIBER:
832 {
833 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
834
835 if (c && c != mrb->root_c) {
836 if (!end && c->status != MRB_FIBER_TERMINATED) {
837 mrb_callinfo *ci = c->ci;
838 mrb_callinfo *ce = c->cibase;
839
840 while (ce <= ci) {
841 struct REnv *e = ci->env;
842 if (e && !mrb_object_dead_p(mrb, (struct RBasic*)e) &&
843 e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) {
844 mrb_env_unshare(mrb, e);
845 }
846 ci--;
847 }
848 }
849 mrb_free_context(mrb, c);
850 }
851 }
852 break;
853
854 case MRB_TT_ARRAY:
855 if (ARY_SHARED_P(obj))
856 mrb_ary_decref(mrb, ((struct RArray*)obj)->as.heap.aux.shared);
857 else if (!ARY_EMBED_P(obj))
858 mrb_free(mrb, ((struct RArray*)obj)->as.heap.ptr);
859 break;
860
861 case MRB_TT_HASH:
862 mrb_gc_free_iv(mrb, (struct RObject*)obj);
863 mrb_gc_free_hash(mrb, (struct RHash*)obj);
864 break;
865
866 case MRB_TT_STRING:
867 mrb_gc_free_str(mrb, (struct RString*)obj);
868 break;
869
870 case MRB_TT_PROC:
871 {
872 struct RProc *p = (struct RProc*)obj;
873
874 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
875 mrb_irep *irep = p->body.irep;
876 if (end) {
877 mrb_irep_cutref(mrb, irep);
878 }
879 mrb_irep_decref(mrb, irep);
880 }
881 }
882 break;
883
884 case MRB_TT_RANGE:
885 mrb_gc_free_range(mrb, ((struct RRange*)obj));
886 break;
887
888 case MRB_TT_DATA:
889 {
890 struct RData *d = (struct RData*)obj;
891 if (d->type && d->type->dfree) {
892 d->type->dfree(mrb, d->data);
893 }
894 mrb_gc_free_iv(mrb, (struct RObject*)obj);
895 }
896 break;
897
898 default:
899 break;
900 }
901 obj->tt = MRB_TT_FREE;
902}
903
904static void
905root_scan_phase(mrb_state *mrb, mrb_gc *gc)
906{
907 int i, e;
908
909 if (!is_minor_gc(gc)) {
910 gc->gray_list = NULL;
911 gc->atomic_gray_list = NULL;
912 }
913
914 mrb_gc_mark_gv(mrb);
915 /* mark arena */
916 for (i=0,e=gc->arena_idx; i<e; i++) {
917 mrb_gc_mark(mrb, gc->arena[i]);
918 }
919 /* mark class hierarchy */
920 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
921
922 /* mark built-in classes */
923 mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
924 mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
925 mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
926 mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
927 mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
928 mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
929 mrb_gc_mark(mrb, (struct RBasic*)mrb->range_class);
930
931#ifndef MRB_WITHOUT_FLOAT
932 mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
933#endif
934 mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
935 mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
936 mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
937 mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
938 mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
939 mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
940
941 mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
942 mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
943
944 /* mark top_self */
945 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
946 /* mark exception */
947 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
948 /* mark pre-allocated exception */
949 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
950 mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
951#ifdef MRB_GC_FIXED_ARENA
952 mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
953#endif
954
955 mark_context(mrb, mrb->c);
956 if (mrb->root_c != mrb->c) {
957 mark_context(mrb, mrb->root_c);
958 }
959}
960
961static size_t
962gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
963{
964 size_t children = 0;
965
966 gc_mark_children(mrb, gc, obj);
967
968 switch (obj->tt) {
969 case MRB_TT_ICLASS:
970 children++;
971 break;
972
973 case MRB_TT_CLASS:
974 case MRB_TT_SCLASS:
975 case MRB_TT_MODULE:
976 {
977 struct RClass *c = (struct RClass*)obj;
978
979 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
980 children += mrb_gc_mark_mt_size(mrb, c);
981 children++;
982 }
983 break;
984
985 case MRB_TT_OBJECT:
986 case MRB_TT_DATA:
987 case MRB_TT_EXCEPTION:
988 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
989 break;
990
991 case MRB_TT_ENV:
992 children += MRB_ENV_STACK_LEN(obj);
993 break;
994
995 case MRB_TT_FIBER:
996 {
997 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
998 size_t i;
999 mrb_callinfo *ci;
1000
1001 if (!c || c->status == MRB_FIBER_TERMINATED) break;
1002
1003 /* mark stack */
1004 i = c->stack - c->stbase;
1005
1006 if (c->ci) {
1007 i += ci_nregs(c->ci);
1008 }
1009 if (c->stbase + i > c->stend) i = c->stend - c->stbase;
1010 children += i;
1011
1012 /* mark ensure stack */
1013 children += c->eidx;
1014
1015 /* mark closure */
1016 if (c->cibase) {
1017 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
1018 ;
1019 }
1020 children += i;
1021 }
1022 break;
1023
1024 case MRB_TT_ARRAY:
1025 {
1026 struct RArray *a = (struct RArray*)obj;
1027 children += ARY_LEN(a);
1028 }
1029 break;
1030
1031 case MRB_TT_HASH:
1032 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
1033 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
1034 break;
1035
1036 case MRB_TT_PROC:
1037 case MRB_TT_RANGE:
1038 children+=2;
1039 break;
1040
1041 default:
1042 break;
1043 }
1044 return children;
1045}
1046
1047
1048static void
1049gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
1050 while (gc->gray_list) {
1051 if (is_gray(gc->gray_list))
1052 gc_mark_children(mrb, gc, gc->gray_list);
1053 else
1054 gc->gray_list = gc->gray_list->gcnext;
1055 }
1056}
1057
1058
1059static size_t
1060incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1061{
1062 size_t tried_marks = 0;
1063
1064 while (gc->gray_list && tried_marks < limit) {
1065 tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
1066 }
1067
1068 return tried_marks;
1069}
1070
1071static void
1072final_marking_phase(mrb_state *mrb, mrb_gc *gc)
1073{
1074 int i, e;
1075
1076 /* mark arena */
1077 for (i=0,e=gc->arena_idx; i<e; i++) {
1078 mrb_gc_mark(mrb, gc->arena[i]);
1079 }
1080 mrb_gc_mark_gv(mrb);
1081 mark_context(mrb, mrb->c);
1082 mark_context(mrb, mrb->root_c);
1083 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1084 gc_mark_gray_list(mrb, gc);
1085 mrb_assert(gc->gray_list == NULL);
1086 gc->gray_list = gc->atomic_gray_list;
1087 gc->atomic_gray_list = NULL;
1088 gc_mark_gray_list(mrb, gc);
1089 mrb_assert(gc->gray_list == NULL);
1090}
1091
1092static void
1093prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1094{
1095 gc->state = MRB_GC_STATE_SWEEP;
1096 gc->sweeps = gc->heaps;
1097 gc->live_after_mark = gc->live;
1098}
1099
1100static size_t
1101incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1102{
1103 mrb_heap_page *page = gc->sweeps;
1104 size_t tried_sweep = 0;
1105
1106 while (page && (tried_sweep < limit)) {
1107 RVALUE *p = objects(page);
1108 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1109 size_t freed = 0;
1110 mrb_bool dead_slot = TRUE;
1111 mrb_bool full = (page->freelist == NULL);
1112
1113 if (is_minor_gc(gc) && page->old) {
1114 /* skip a slot which doesn't contain any young object */
1115 p = e;
1116 dead_slot = FALSE;
1117 }
1118 while (p<e) {
1119 if (is_dead(gc, &p->as.basic)) {
1120 if (p->as.basic.tt != MRB_TT_FREE) {
1121 obj_free(mrb, &p->as.basic, FALSE);
1122 if (p->as.basic.tt == MRB_TT_FREE) {
1123 p->as.free.next = page->freelist;
1124 page->freelist = (struct RBasic*)p;
1125 freed++;
1126 }
1127 else {
1128 dead_slot = FALSE;
1129 }
1130 }
1131 }
1132 else {
1133 if (!is_generational(gc))
1134 paint_partial_white(gc, &p->as.basic); /* next gc target */
1135 dead_slot = FALSE;
1136 }
1137 p++;
1138 }
1139
1140 /* free dead slot */
1141 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1142 mrb_heap_page *next = page->next;
1143
1144 unlink_heap_page(gc, page);
1145 unlink_free_heap_page(gc, page);
1146 mrb_free(mrb, page);
1147 page = next;
1148 }
1149 else {
1150 if (full && freed > 0) {
1151 link_free_heap_page(gc, page);
1152 }
1153 if (page->freelist == NULL && is_minor_gc(gc))
1154 page->old = TRUE;
1155 else
1156 page->old = FALSE;
1157 page = page->next;
1158 }
1159 tried_sweep += MRB_HEAP_PAGE_SIZE;
1160 gc->live -= freed;
1161 gc->live_after_mark -= freed;
1162 }
1163 gc->sweeps = page;
1164 return tried_sweep;
1165}
1166
1167static size_t
1168incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1169{
1170 switch (gc->state) {
1171 case MRB_GC_STATE_ROOT:
1172 root_scan_phase(mrb, gc);
1173 gc->state = MRB_GC_STATE_MARK;
1174 flip_white_part(gc);
1175 return 0;
1176 case MRB_GC_STATE_MARK:
1177 if (gc->gray_list) {
1178 return incremental_marking_phase(mrb, gc, limit);
1179 }
1180 else {
1181 final_marking_phase(mrb, gc);
1182 prepare_incremental_sweep(mrb, gc);
1183 return 0;
1184 }
1185 case MRB_GC_STATE_SWEEP: {
1186 size_t tried_sweep = 0;
1187 tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1188 if (tried_sweep == 0)
1189 gc->state = MRB_GC_STATE_ROOT;
1190 return tried_sweep;
1191 }
1192 default:
1193 /* unknown state */
1194 mrb_assert(0);
1195 return 0;
1196 }
1197}
1198
1199static void
1200incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1201{
1202 do {
1203 incremental_gc(mrb, gc, SIZE_MAX);
1204 } while (gc->state != to_state);
1205}
1206
1207static void
1208incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1209{
1210 size_t limit = 0, result = 0;
1211 limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1212 while (result < limit) {
1213 result += incremental_gc(mrb, gc, limit);
1214 if (gc->state == MRB_GC_STATE_ROOT)
1215 break;
1216 }
1217
1218 gc->threshold = gc->live + GC_STEP_SIZE;
1219}
1220
1221static void
1222clear_all_old(mrb_state *mrb, mrb_gc *gc)
1223{
1224 mrb_bool origin_mode = gc->generational;
1225
1226 mrb_assert(is_generational(gc));
1227 if (is_major_gc(gc)) {
1228 /* finish the half baked GC */
1229 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1230 }
1231
1232 /* Sweep the dead objects, then reset all the live objects
1233 * (including all the old objects, of course) to white. */
1234 gc->generational = FALSE;
1235 prepare_incremental_sweep(mrb, gc);
1236 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1237 gc->generational = origin_mode;
1238
1239 /* The gray objects have already been painted as white */
1240 gc->atomic_gray_list = gc->gray_list = NULL;
1241}
1242
1243MRB_API void
1244mrb_incremental_gc(mrb_state *mrb)
1245{
1246 mrb_gc *gc = &mrb->gc;
1247
1248 if (gc->disabled || gc->iterating) return;
1249
1250 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1251 GC_TIME_START;
1252
1253 if (is_minor_gc(gc)) {
1254 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1255 }
1256 else {
1257 incremental_gc_step(mrb, gc);
1258 }
1259
1260 if (gc->state == MRB_GC_STATE_ROOT) {
1261 mrb_assert(gc->live >= gc->live_after_mark);
1262 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1263 if (gc->threshold < GC_STEP_SIZE) {
1264 gc->threshold = GC_STEP_SIZE;
1265 }
1266
1267 if (is_major_gc(gc)) {
1268 size_t threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1269
1270 gc->full = FALSE;
1271 if (threshold < MAJOR_GC_TOOMANY) {
1272 gc->majorgc_old_threshold = threshold;
1273 }
1274 else {
1275 /* too many objects allocated during incremental GC, */
1276 /* instead of increasing threshold, invoke full GC. */
1277 mrb_full_gc(mrb);
1278 }
1279 }
1280 else if (is_minor_gc(gc)) {
1281 if (gc->live > gc->majorgc_old_threshold) {
1282 clear_all_old(mrb, gc);
1283 gc->full = TRUE;
1284 }
1285 }
1286 }
1287
1288 GC_TIME_STOP_AND_REPORT;
1289}
1290
1291/* Perform a full gc cycle */
1292MRB_API void
1293mrb_full_gc(mrb_state *mrb)
1294{
1295 mrb_gc *gc = &mrb->gc;
1296
1297 if (gc->disabled || gc->iterating) return;
1298
1299 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1300 GC_TIME_START;
1301
1302 if (is_generational(gc)) {
1303 /* clear all the old objects back to young */
1304 clear_all_old(mrb, gc);
1305 gc->full = TRUE;
1306 }
1307 else if (gc->state != MRB_GC_STATE_ROOT) {
1308 /* finish half baked GC cycle */
1309 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1310 }
1311
1312 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1313 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1314
1315 if (is_generational(gc)) {
1316 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1317 gc->full = FALSE;
1318 }
1319
1320 GC_TIME_STOP_AND_REPORT;
1321}
1322
1323MRB_API void
1324mrb_garbage_collect(mrb_state *mrb)
1325{
1326 mrb_full_gc(mrb);
1327}
1328
1329/*
1330 * Field write barrier
1331 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1332 */
1333
1334MRB_API void
1335mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1336{
1337 mrb_gc *gc = &mrb->gc;
1338
1339 if (!is_black(obj)) return;
1340 if (!is_white(value)) return;
1341
1342 mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1343 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1344
1345 if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1346 add_gray_list(mrb, gc, value);
1347 }
1348 else {
1349 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1350 paint_partial_white(gc, obj); /* for never write barriers */
1351 }
1352}
1353
1354/*
1355 * Write barrier
1356 * Paint obj(Black) to obj(Gray).
1357 *
1358 * The object that is painted gray will be traversed atomically in final
1359 * mark phase. So you use this write barrier if it's frequency written spot.
1360 * e.g. Set element on Array.
1361 */
1362
1363MRB_API void
1364mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1365{
1366 mrb_gc *gc = &mrb->gc;
1367
1368 if (!is_black(obj)) return;
1369
1370 mrb_assert(!is_dead(gc, obj));
1371 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1372 paint_gray(obj);
1373 obj->gcnext = gc->atomic_gray_list;
1374 gc->atomic_gray_list = obj;
1375}
1376
1377/*
1378 * call-seq:
1379 * GC.start -> nil
1380 *
1381 * Initiates full garbage collection.
1382 *
1383 */
1384
1385static mrb_value
1386gc_start(mrb_state *mrb, mrb_value obj)
1387{
1388 mrb_full_gc(mrb);
1389 return mrb_nil_value();
1390}
1391
1392/*
1393 * call-seq:
1394 * GC.enable -> true or false
1395 *
1396 * Enables garbage collection, returning <code>true</code> if garbage
1397 * collection was previously disabled.
1398 *
1399 * GC.disable #=> false
1400 * GC.enable #=> true
1401 * GC.enable #=> false
1402 *
1403 */
1404
1405static mrb_value
1406gc_enable(mrb_state *mrb, mrb_value obj)
1407{
1408 mrb_bool old = mrb->gc.disabled;
1409
1410 mrb->gc.disabled = FALSE;
1411
1412 return mrb_bool_value(old);
1413}
1414
1415/*
1416 * call-seq:
1417 * GC.disable -> true or false
1418 *
1419 * Disables garbage collection, returning <code>true</code> if garbage
1420 * collection was already disabled.
1421 *
1422 * GC.disable #=> false
1423 * GC.disable #=> true
1424 *
1425 */
1426
1427static mrb_value
1428gc_disable(mrb_state *mrb, mrb_value obj)
1429{
1430 mrb_bool old = mrb->gc.disabled;
1431
1432 mrb->gc.disabled = TRUE;
1433
1434 return mrb_bool_value(old);
1435}
1436
1437/*
1438 * call-seq:
1439 * GC.interval_ratio -> fixnum
1440 *
1441 * Returns ratio of GC interval. Default value is 200(%).
1442 *
1443 */
1444
1445static mrb_value
1446gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1447{
1448 return mrb_fixnum_value(mrb->gc.interval_ratio);
1449}
1450
1451/*
1452 * call-seq:
1453 * GC.interval_ratio = fixnum -> nil
1454 *
1455 * Updates ratio of GC interval. Default value is 200(%).
1456 * GC start as soon as after end all step of GC if you set 100(%).
1457 *
1458 */
1459
1460static mrb_value
1461gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1462{
1463 mrb_int ratio;
1464
1465 mrb_get_args(mrb, "i", &ratio);
1466 mrb->gc.interval_ratio = (int)ratio;
1467 return mrb_nil_value();
1468}
1469
1470/*
1471 * call-seq:
1472 * GC.step_ratio -> fixnum
1473 *
1474 * Returns step span ratio of Incremental GC. Default value is 200(%).
1475 *
1476 */
1477
1478static mrb_value
1479gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1480{
1481 return mrb_fixnum_value(mrb->gc.step_ratio);
1482}
1483
1484/*
1485 * call-seq:
1486 * GC.step_ratio = fixnum -> nil
1487 *
1488 * Updates step span ratio of Incremental GC. Default value is 200(%).
1489 * 1 step of incrementalGC becomes long if a rate is big.
1490 *
1491 */
1492
1493static mrb_value
1494gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1495{
1496 mrb_int ratio;
1497
1498 mrb_get_args(mrb, "i", &ratio);
1499 mrb->gc.step_ratio = (int)ratio;
1500 return mrb_nil_value();
1501}
1502
1503static void
1504change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1505{
1506 if (gc->disabled || gc->iterating) {
1507 mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1508 return;
1509 }
1510 if (is_generational(gc) && !enable) {
1511 clear_all_old(mrb, gc);
1512 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1513 gc->full = FALSE;
1514 }
1515 else if (!is_generational(gc) && enable) {
1516 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1517 gc->majorgc_old_threshold = gc->live_after_mark/100 * MAJOR_GC_INC_RATIO;
1518 gc->full = FALSE;
1519 }
1520 gc->generational = enable;
1521}
1522
1523/*
1524 * call-seq:
1525 * GC.generational_mode -> true or false
1526 *
1527 * Returns generational or normal gc mode.
1528 *
1529 */
1530
1531static mrb_value
1532gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1533{
1534 return mrb_bool_value(mrb->gc.generational);
1535}
1536
1537/*
1538 * call-seq:
1539 * GC.generational_mode = true or false -> true or false
1540 *
1541 * Changes to generational or normal gc mode.
1542 *
1543 */
1544
1545static mrb_value
1546gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1547{
1548 mrb_bool enable;
1549
1550 mrb_get_args(mrb, "b", &enable);
1551 if (mrb->gc.generational != enable)
1552 change_gen_gc_mode(mrb, &mrb->gc, enable);
1553
1554 return mrb_bool_value(enable);
1555}
1556
1557
1558static void
1559gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1560{
1561 mrb_heap_page* page;
1562
1563 page = gc->heaps;
1564 while (page != NULL) {
1565 RVALUE *p;
1566 int i;
1567
1568 p = objects(page);
1569 for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1570 if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1571 return;
1572 }
1573 page = page->next;
1574 }
1575}
1576
1577void
1578mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1579{
1580 mrb_bool iterating = mrb->gc.iterating;
1581
1582 mrb_full_gc(mrb);
1583 mrb->gc.iterating = TRUE;
1584 if (iterating) {
1585 gc_each_objects(mrb, &mrb->gc, callback, data);
1586 }
1587 else {
1588 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1589 struct mrb_jmpbuf c_jmp;
1590
1591 MRB_TRY(&c_jmp) {
1592 mrb->jmp = &c_jmp;
1593 gc_each_objects(mrb, &mrb->gc, callback, data);
1594 mrb->jmp = prev_jmp;
1595 mrb->gc.iterating = iterating;
1596 } MRB_CATCH(&c_jmp) {
1597 mrb->gc.iterating = iterating;
1598 mrb->jmp = prev_jmp;
1599 MRB_THROW(prev_jmp);
1600 } MRB_END_EXC(&c_jmp);
1601 }
1602}
1603
1604#ifdef GC_TEST
1605#ifdef GC_DEBUG
1606static mrb_value gc_test(mrb_state *, mrb_value);
1607#endif
1608#endif
1609
1610void
1611mrb_init_gc(mrb_state *mrb)
1612{
1613 struct RClass *gc;
1614
1615 mrb_static_assert(sizeof(RVALUE) <= sizeof(void*) * 6,
1616 "RVALUE size must be within 6 words");
1617
1618 gc = mrb_define_module(mrb, "GC");
1619
1620 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1621 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1622 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1623 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1624 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1625 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1626 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1627 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1628 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1629#ifdef GC_TEST
1630#ifdef GC_DEBUG
1631 mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1632#endif
1633#endif
1634}
1635
1636#ifdef GC_TEST
1637#ifdef GC_DEBUG
1638void
1639test_mrb_field_write_barrier(void)
1640{
1641 mrb_state *mrb = mrb_open();
1642 struct RBasic *obj, *value;
1643 mrb_gc *gc = &mrb->gc;
1644
1645 puts("test_mrb_field_write_barrier");
1646 gc->generational = FALSE;
1647 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1648 value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
1649 paint_black(obj);
1650 paint_partial_white(gc, value);
1651
1652
1653 puts(" in MRB_GC_STATE_MARK");
1654 gc->state = MRB_GC_STATE_MARK;
1655 mrb_field_write_barrier(mrb, obj, value);
1656
1657 mrb_assert(is_gray(value));
1658
1659
1660 puts(" in MRB_GC_STATE_SWEEP");
1661 paint_partial_white(gc, value);
1662 gc->state = MRB_GC_STATE_SWEEP;
1663 mrb_field_write_barrier(mrb, obj, value);
1664
1665 mrb_assert(obj->color & gc->current_white_part);
1666 mrb_assert(value->color & gc->current_white_part);
1667
1668
1669 puts(" fail with black");
1670 gc->state = MRB_GC_STATE_MARK;
1671 paint_white(obj);
1672 paint_partial_white(gc, value);
1673 mrb_field_write_barrier(mrb, obj, value);
1674
1675 mrb_assert(obj->color & gc->current_white_part);
1676
1677
1678 puts(" fail with gray");
1679 gc->state = MRB_GC_STATE_MARK;
1680 paint_black(obj);
1681 paint_gray(value);
1682 mrb_field_write_barrier(mrb, obj, value);
1683
1684 mrb_assert(is_gray(value));
1685
1686
1687 {
1688 puts("test_mrb_field_write_barrier_value");
1689 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1690 mrb_value value = mrb_str_new_lit(mrb, "value");
1691 paint_black(obj);
1692 paint_partial_white(gc, mrb_basic_ptr(value));
1693
1694 gc->state = MRB_GC_STATE_MARK;
1695 mrb_field_write_barrier_value(mrb, obj, value);
1696
1697 mrb_assert(is_gray(mrb_basic_ptr(value)));
1698 }
1699
1700 mrb_close(mrb);
1701}
1702
1703void
1704test_mrb_write_barrier(void)
1705{
1706 mrb_state *mrb = mrb_open();
1707 struct RBasic *obj;
1708 mrb_gc *gc = &mrb->gc;
1709
1710 puts("test_mrb_write_barrier");
1711 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1712 paint_black(obj);
1713
1714 puts(" in MRB_GC_STATE_MARK");
1715 gc->state = MRB_GC_STATE_MARK;
1716 mrb_write_barrier(mrb, obj);
1717
1718 mrb_assert(is_gray(obj));
1719 mrb_assert(gc->atomic_gray_list == obj);
1720
1721
1722 puts(" fail with gray");
1723 paint_gray(obj);
1724 mrb_write_barrier(mrb, obj);
1725
1726 mrb_assert(is_gray(obj));
1727
1728 mrb_close(mrb);
1729}
1730
1731void
1732test_add_gray_list(void)
1733{
1734 mrb_state *mrb = mrb_open();
1735 struct RBasic *obj1, *obj2;
1736 mrb_gc *gc = &mrb->gc;
1737
1738 puts("test_add_gray_list");
1739 change_gen_gc_mode(mrb, gc, FALSE);
1740 mrb_assert(gc->gray_list == NULL);
1741 obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1742 add_gray_list(mrb, gc, obj1);
1743 mrb_assert(gc->gray_list == obj1);
1744 mrb_assert(is_gray(obj1));
1745
1746 obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1747 add_gray_list(mrb, gc, obj2);
1748 mrb_assert(gc->gray_list == obj2);
1749 mrb_assert(gc->gray_list->gcnext == obj1);
1750 mrb_assert(is_gray(obj2));
1751
1752 mrb_close(mrb);
1753}
1754
1755void
1756test_gc_gray_mark(void)
1757{
1758 mrb_state *mrb = mrb_open();
1759 mrb_value obj_v, value_v;
1760 struct RBasic *obj;
1761 size_t gray_num = 0;
1762 mrb_gc *gc = &mrb->gc;
1763
1764 puts("test_gc_gray_mark");
1765
1766 puts(" in MRB_TT_CLASS");
1767 obj = (struct RBasic*)mrb->object_class;
1768 paint_gray(obj);
1769 gray_num = gc_gray_mark(mrb, gc, obj);
1770 mrb_assert(is_black(obj));
1771 mrb_assert(gray_num > 1);
1772
1773 puts(" in MRB_TT_ARRAY");
1774 obj_v = mrb_ary_new(mrb);
1775 value_v = mrb_str_new_lit(mrb, "test");
1776 paint_gray(mrb_basic_ptr(obj_v));
1777 paint_partial_white(gc, mrb_basic_ptr(value_v));
1778 mrb_ary_push(mrb, obj_v, value_v);
1779 gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
1780 mrb_assert(is_black(mrb_basic_ptr(obj_v)));
1781 mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1782 mrb_assert(gray_num == 1);
1783
1784 mrb_close(mrb);
1785}
1786
1787void
1788test_incremental_gc(void)
1789{
1790 mrb_state *mrb = mrb_open();
1791 size_t max = ~0, live = 0, total = 0, freed = 0;
1792 RVALUE *free;
1793 mrb_heap_page *page;
1794 mrb_gc *gc = &mrb->gc;
1795
1796 puts("test_incremental_gc");
1797 change_gen_gc_mode(mrb, gc, FALSE);
1798
1799 puts(" in mrb_full_gc");
1800 mrb_full_gc(mrb);
1801
1802 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1803 puts(" in MRB_GC_STATE_ROOT");
1804 incremental_gc(mrb, gc, max);
1805 mrb_assert(gc->state == MRB_GC_STATE_MARK);
1806 puts(" in MRB_GC_STATE_MARK");
1807 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1808 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1809
1810 puts(" in MRB_GC_STATE_SWEEP");
1811 page = gc->heaps;
1812 while (page) {
1813 RVALUE *p = objects(page);
1814 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1815 while (p<e) {
1816 if (is_black(&p->as.basic)) {
1817 live++;
1818 }
1819 if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
1820 printf("%p\n", &p->as.basic);
1821 }
1822 p++;
1823 }
1824 page = page->next;
1825 total += MRB_HEAP_PAGE_SIZE;
1826 }
1827
1828 mrb_assert(gc->gray_list == NULL);
1829
1830 incremental_gc(mrb, gc, max);
1831 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1832
1833 incremental_gc(mrb, gc, max);
1834 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1835
1836 free = (RVALUE*)gc->heaps->freelist;
1837 while (free) {
1838 freed++;
1839 free = (RVALUE*)free->as.free.next;
1840 }
1841
1842 mrb_assert(gc->live == live);
1843 mrb_assert(gc->live == total-freed);
1844
1845 puts("test_incremental_gc(gen)");
1846 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1847 change_gen_gc_mode(mrb, gc, TRUE);
1848
1849 mrb_assert(gc->full == FALSE);
1850 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1851
1852 puts(" in minor");
1853 mrb_assert(is_minor_gc(gc));
1854 mrb_assert(gc->majorgc_old_threshold > 0);
1855 gc->majorgc_old_threshold = 0;
1856 mrb_incremental_gc(mrb);
1857 mrb_assert(gc->full == TRUE);
1858 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1859
1860 puts(" in major");
1861 mrb_assert(is_major_gc(gc));
1862 do {
1863 mrb_incremental_gc(mrb);
1864 } while (gc->state != MRB_GC_STATE_ROOT);
1865 mrb_assert(gc->full == FALSE);
1866
1867 mrb_close(mrb);
1868}
1869
1870void
1871test_incremental_sweep_phase(void)
1872{
1873 mrb_state *mrb = mrb_open();
1874 mrb_gc *gc = &mrb->gc;
1875
1876 puts("test_incremental_sweep_phase");
1877
1878 add_heap(mrb, gc);
1879 gc->sweeps = gc->heaps;
1880
1881 mrb_assert(gc->heaps->next->next == NULL);
1882 mrb_assert(gc->free_heaps->next->next == NULL);
1883 incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
1884
1885 mrb_assert(gc->heaps->next == NULL);
1886 mrb_assert(gc->heaps == gc->free_heaps);
1887
1888 mrb_close(mrb);
1889}
1890
1891static mrb_value
1892gc_test(mrb_state *mrb, mrb_value self)
1893{
1894 test_mrb_field_write_barrier();
1895 test_mrb_write_barrier();
1896 test_add_gray_list();
1897 test_gc_gray_mark();
1898 test_incremental_gc();
1899 test_incremental_sweep_phase();
1900 return mrb_nil_value();
1901}
1902#endif /* GC_DEBUG */
1903#endif /* GC_TEST */
Note: See TracBrowser for help on using the repository browser.