source: EcnlProtoTool/trunk/mruby-1.3.0/src/gc.c@ 331

Last change on this file since 331 was 331, checked in by coas-nagasima, 6 years ago

prototoolに関連するプロジェクトをnewlibからmuslを使うよう変更・更新
ntshellをnewlibの下位の実装から、muslのsyscallの実装に変更・更新
以下のOSSをアップデート
・mruby-1.3.0
・musl-1.1.18
・onigmo-6.1.3
・tcc-0.9.27
以下のOSSを追加
・openssl-1.1.0e
・curl-7.57.0
・zlib-1.2.11
以下のmrbgemsを追加
・iij/mruby-digest
・iij/mruby-env
・iij/mruby-errno
・iij/mruby-iijson
・iij/mruby-ipaddr
・iij/mruby-mock
・iij/mruby-require
・iij/mruby-tls-openssl

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 42.2 KB
Line 
1/*
2** gc.c - garbage collector for mruby
3**
4** See Copyright Notice in mruby.h
5*/
6
7#include <string.h>
8#include <stdlib.h>
9#include <mruby.h>
10#include <mruby/array.h>
11#include <mruby/class.h>
12#include <mruby/data.h>
13#include <mruby/hash.h>
14#include <mruby/proc.h>
15#include <mruby/range.h>
16#include <mruby/string.h>
17#include <mruby/variable.h>
18#include <mruby/gc.h>
19#include <mruby/error.h>
20#include <mruby/throw.h>
21
22/*
23 = Tri-color Incremental Garbage Collection
24
25 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
26 Algorithm details are omitted.
27 Instead, the implementation part is described below.
28
29 == Object's Color
30
31 Each object can be painted in three colors:
32
33 * White - Unmarked.
34 * Gray - Marked, But the child objects are unmarked.
35 * Black - Marked, the child objects are also marked.
36
37 == Two White Types
38
39 There're two white color types in a flip-flop fashion: White-A and White-B,
40 which respectively represent the Current White color (the newly allocated
41 objects in the current GC cycle) and the Sweep Target White color (the
42 dead objects to be swept).
43
44 A and B will be switched just at the beginning of the next GC cycle. At
45 that time, all the dead objects have been swept, while the newly created
46 objects in the current GC cycle which finally remains White are now
47 regarded as dead objects. Instead of traversing all the White-A objects and
48 painting them as White-B, just switch the meaning of White-A and White-B as
49 this will be much cheaper.
50
51 As a result, the objects we sweep in the current GC cycle are always
52 left from the previous GC cycle. This allows us to sweep objects
53 incrementally, without the disturbance of the newly created objects.
54
55 == Execution Timing
56
57 GC Execution Time and Each step interval are decided by live objects count.
58 List of Adjustment API:
59
60 * gc_interval_ratio_set
61 * gc_step_ratio_set
62
63 For details, see the comments for each function.
64
65 == Write Barrier
66
67 mruby implementer and C extension library writer must insert a write
68 barrier when updating a reference from a field of an object.
69 When updating a reference from a field of object A to object B,
70 two different types of write barrier are available:
71
72 * mrb_field_write_barrier - target B object for a mark.
73 * mrb_write_barrier - target A object for a mark.
74
75 == Generational Mode
76
77 mruby's GC offers an Generational Mode while re-using the tri-color GC
78 infrastructure. It will treat the Black objects as Old objects after each
79 sweep phase, instead of painting them White. The key ideas are still the same
80 as traditional generational GC:
81
82 * Minor GC - just traverse the Young objects (Gray objects) in the mark
83 phase, then only sweep the newly created objects, and leave
84 the Old objects live.
85
86 * Major GC - same as a full regular GC cycle.
87
88 The difference from "traditional" generational GC is, that the major GC
89 in mruby is triggered incrementally in a tri-color manner.
90
91
92 For details, see the comments for each function.
93
94*/
95
96struct free_obj {
97 MRB_OBJECT_HEADER;
98 struct RBasic *next;
99};
100
101typedef struct {
102 union {
103 struct free_obj free;
104 struct RBasic basic;
105 struct RObject object;
106 struct RClass klass;
107 struct RString string;
108 struct RArray array;
109 struct RHash hash;
110 struct RRange range;
111 struct RData data;
112 struct RProc proc;
113 struct REnv env;
114 struct RException exc;
115 struct RBreak brk;
116#ifdef MRB_WORD_BOXING
117 struct RFloat floatv;
118 struct RCptr cptr;
119#endif
120 } as;
121} RVALUE;
122
123#ifdef GC_PROFILE
124#include <stdio.h>
125#include <sys/time.h>
126
127static double program_invoke_time = 0;
128static double gc_time = 0;
129static double gc_total_time = 0;
130
131static double
132gettimeofday_time(void)
133{
134 struct timeval tv;
135 gettimeofday(&tv, NULL);
136 return tv.tv_sec + tv.tv_usec * 1e-6;
137}
138
139#define GC_INVOKE_TIME_REPORT(with) do {\
140 fprintf(stderr, "%s\n", with);\
141 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
142 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
143 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(gc));\
144} while(0)
145
146#define GC_TIME_START do {\
147 gc_time = gettimeofday_time();\
148} while(0)
149
150#define GC_TIME_STOP_AND_REPORT do {\
151 gc_time = gettimeofday_time() - gc_time;\
152 gc_total_time += gc_time;\
153 fprintf(stderr, "gc_state: %d\n", gc->state);\
154 fprintf(stderr, "live: %zu\n", gc->live);\
155 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
156 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
157 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
158 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
159} while(0)
160#else
161#define GC_INVOKE_TIME_REPORT(s)
162#define GC_TIME_START
163#define GC_TIME_STOP_AND_REPORT
164#endif
165
166#ifdef GC_DEBUG
167#define DEBUG(x) (x)
168#else
169#define DEBUG(x)
170#endif
171
172#ifndef MRB_HEAP_PAGE_SIZE
173#define MRB_HEAP_PAGE_SIZE 1024
174#endif
175
176#define GC_STEP_SIZE 1024
177
178/* white: 011, black: 100, gray: 000 */
179#define GC_GRAY 0
180#define GC_WHITE_A 1
181#define GC_WHITE_B (1 << 1)
182#define GC_BLACK (1 << 2)
183#define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
184#define GC_COLOR_MASK 7
185
186#define paint_gray(o) ((o)->color = GC_GRAY)
187#define paint_black(o) ((o)->color = GC_BLACK)
188#define paint_white(o) ((o)->color = GC_WHITES)
189#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
190#define is_gray(o) ((o)->color == GC_GRAY)
191#define is_white(o) ((o)->color & GC_WHITES)
192#define is_black(o) ((o)->color & GC_BLACK)
193#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
194#define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
195#define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
196
197#define objects(p) ((RVALUE *)p->objects)
198
199MRB_API void*
200mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
201{
202 void *p2;
203
204 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
205 if (!p2 && len > 0 && mrb->gc.heaps) {
206 mrb_full_gc(mrb);
207 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
208 }
209
210 return p2;
211}
212
213MRB_API void*
214mrb_realloc(mrb_state *mrb, void *p, size_t len)
215{
216 void *p2;
217
218 p2 = mrb_realloc_simple(mrb, p, len);
219 if (len == 0) return p2;
220 if (p2 == NULL) {
221 if (mrb->gc.out_of_memory) {
222 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
223 /* mrb_panic(mrb); */
224 }
225 else {
226 mrb->gc.out_of_memory = TRUE;
227 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
228 }
229 }
230 else {
231 mrb->gc.out_of_memory = FALSE;
232 }
233
234 return p2;
235}
236
237MRB_API void*
238mrb_malloc(mrb_state *mrb, size_t len)
239{
240 return mrb_realloc(mrb, 0, len);
241}
242
243MRB_API void*
244mrb_malloc_simple(mrb_state *mrb, size_t len)
245{
246 return mrb_realloc_simple(mrb, 0, len);
247}
248
249MRB_API void*
250mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
251{
252 void *p;
253
254 if (nelem > 0 && len > 0 &&
255 nelem <= SIZE_MAX / len) {
256 size_t size;
257 size = nelem * len;
258 p = mrb_malloc(mrb, size);
259
260 memset(p, 0, size);
261 }
262 else {
263 p = NULL;
264 }
265
266 return p;
267}
268
269MRB_API void
270mrb_free(mrb_state *mrb, void *p)
271{
272 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
273}
274
275MRB_API mrb_bool
276mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
277 return is_dead(&mrb->gc, object);
278}
279
280static void
281link_heap_page(mrb_gc *gc, mrb_heap_page *page)
282{
283 page->next = gc->heaps;
284 if (gc->heaps)
285 gc->heaps->prev = page;
286 gc->heaps = page;
287}
288
289static void
290unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
291{
292 if (page->prev)
293 page->prev->next = page->next;
294 if (page->next)
295 page->next->prev = page->prev;
296 if (gc->heaps == page)
297 gc->heaps = page->next;
298 page->prev = NULL;
299 page->next = NULL;
300}
301
302static void
303link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
304{
305 page->free_next = gc->free_heaps;
306 if (gc->free_heaps) {
307 gc->free_heaps->free_prev = page;
308 }
309 gc->free_heaps = page;
310}
311
312static void
313unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
314{
315 if (page->free_prev)
316 page->free_prev->free_next = page->free_next;
317 if (page->free_next)
318 page->free_next->free_prev = page->free_prev;
319 if (gc->free_heaps == page)
320 gc->free_heaps = page->free_next;
321 page->free_prev = NULL;
322 page->free_next = NULL;
323}
324
325static void
326add_heap(mrb_state *mrb, mrb_gc *gc)
327{
328 mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
329 RVALUE *p, *e;
330 struct RBasic *prev = NULL;
331
332 for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
333 p->as.free.tt = MRB_TT_FREE;
334 p->as.free.next = prev;
335 prev = &p->as.basic;
336 }
337 page->freelist = prev;
338
339 link_heap_page(gc, page);
340 link_free_heap_page(gc, page);
341}
342
343#define DEFAULT_GC_INTERVAL_RATIO 200
344#define DEFAULT_GC_STEP_RATIO 200
345#define DEFAULT_MAJOR_GC_INC_RATIO 200
346#define is_generational(gc) ((gc)->generational)
347#define is_major_gc(gc) (is_generational(gc) && (gc)->full)
348#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
349
350void
351mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
352{
353#ifndef MRB_GC_FIXED_ARENA
354 gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
355 gc->arena_capa = MRB_GC_ARENA_SIZE;
356#endif
357
358 gc->current_white_part = GC_WHITE_A;
359 gc->heaps = NULL;
360 gc->free_heaps = NULL;
361 add_heap(mrb, gc);
362 gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
363 gc->step_ratio = DEFAULT_GC_STEP_RATIO;
364#ifndef MRB_GC_TURN_OFF_GENERATIONAL
365 gc->generational = TRUE;
366 gc->full = TRUE;
367#endif
368
369#ifdef GC_PROFILE
370 program_invoke_time = gettimeofday_time();
371#endif
372}
373
374static void obj_free(mrb_state *mrb, struct RBasic *obj, int end);
375
376void
377free_heap(mrb_state *mrb, mrb_gc *gc)
378{
379 mrb_heap_page *page = gc->heaps;
380 mrb_heap_page *tmp;
381 RVALUE *p, *e;
382
383 while (page) {
384 tmp = page;
385 page = page->next;
386 for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
387 if (p->as.free.tt != MRB_TT_FREE)
388 obj_free(mrb, &p->as.basic, TRUE);
389 }
390 mrb_free(mrb, tmp);
391 }
392}
393
394void
395mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
396{
397 free_heap(mrb, gc);
398#ifndef MRB_GC_FIXED_ARENA
399 mrb_free(mrb, gc->arena);
400#endif
401}
402
403static void
404gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
405{
406#ifdef MRB_GC_FIXED_ARENA
407 if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
408 /* arena overflow error */
409 gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
410 mrb_exc_raise(mrb, mrb_obj_value(mrb->arena_err));
411 }
412#else
413 if (gc->arena_idx >= gc->arena_capa) {
414 /* extend arena */
415 gc->arena_capa = (int)(gc->arena_capa * 1.5);
416 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
417 }
418#endif
419 gc->arena[gc->arena_idx++] = p;
420}
421
422/* mrb_gc_protect() leaves the object in the arena */
423MRB_API void
424mrb_gc_protect(mrb_state *mrb, mrb_value obj)
425{
426 if (mrb_immediate_p(obj)) return;
427 gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
428}
429
430#define GC_ROOT_NAME "_gc_root_"
431
432/* mrb_gc_register() keeps the object from GC.
433
434 Register your object when it's exported to C world,
435 without reference from Ruby world, e.g. callback
436 arguments. Don't forget to remove the object using
437 mrb_gc_unregister, otherwise your object will leak.
438*/
439
440MRB_API void
441mrb_gc_register(mrb_state *mrb, mrb_value obj)
442{
443 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
444 mrb_value table = mrb_gv_get(mrb, root);
445
446 if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) {
447 table = mrb_ary_new(mrb);
448 mrb_gv_set(mrb, root, table);
449 }
450 mrb_ary_push(mrb, table, obj);
451}
452
453/* mrb_gc_unregister() removes the object from GC root. */
454MRB_API void
455mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
456{
457 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
458 mrb_value table = mrb_gv_get(mrb, root);
459 struct RArray *a;
460 mrb_int i;
461
462 if (mrb_nil_p(table)) return;
463 if (mrb_type(table) != MRB_TT_ARRAY) {
464 mrb_gv_set(mrb, root, mrb_nil_value());
465 return;
466 }
467 a = mrb_ary_ptr(table);
468 mrb_ary_modify(mrb, a);
469 for (i = 0; i < a->len; i++) {
470 if (mrb_obj_eq(mrb, a->ptr[i], obj)) {
471 a->len--;
472 memmove(&a->ptr[i], &a->ptr[i + 1], (a->len - i) * sizeof(a->ptr[i]));
473 break;
474 }
475 }
476}
477
478MRB_API struct RBasic*
479mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
480{
481 struct RBasic *p;
482 static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
483 mrb_gc *gc = &mrb->gc;
484
485 if (cls) {
486 enum mrb_vtype tt;
487
488 switch (cls->tt) {
489 case MRB_TT_CLASS:
490 case MRB_TT_SCLASS:
491 case MRB_TT_MODULE:
492 case MRB_TT_ENV:
493 break;
494 default:
495 mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
496 }
497 tt = MRB_INSTANCE_TT(cls);
498 if (tt != MRB_TT_FALSE &&
499 ttype != MRB_TT_SCLASS &&
500 ttype != MRB_TT_ICLASS &&
501 ttype != MRB_TT_ENV &&
502 ttype != tt) {
503 mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
504 }
505 }
506
507#ifdef MRB_GC_STRESS
508 mrb_full_gc(mrb);
509#endif
510 if (gc->threshold < gc->live) {
511 mrb_incremental_gc(mrb);
512 }
513 if (gc->free_heaps == NULL) {
514 add_heap(mrb, gc);
515 }
516
517 p = gc->free_heaps->freelist;
518 gc->free_heaps->freelist = ((struct free_obj*)p)->next;
519 if (gc->free_heaps->freelist == NULL) {
520 unlink_free_heap_page(gc, gc->free_heaps);
521 }
522
523 gc->live++;
524 gc_protect(mrb, gc, p);
525 *(RVALUE *)p = RVALUE_zero;
526 p->tt = ttype;
527 p->c = cls;
528 paint_partial_white(gc, p);
529 return p;
530}
531
532static inline void
533add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
534{
535#ifdef MRB_GC_STRESS
536 if (obj->tt > MRB_TT_MAXDEFINE) {
537 abort();
538 }
539#endif
540 paint_gray(obj);
541 obj->gcnext = gc->gray_list;
542 gc->gray_list = obj;
543}
544
545static void
546mark_context_stack(mrb_state *mrb, struct mrb_context *c)
547{
548 size_t i;
549 size_t e;
550 mrb_value nil;
551 int nregs;
552
553 if (c->stack == NULL) return;
554 e = c->stack - c->stbase;
555 if (c->ci) {
556 nregs = c->ci->argc + 2;
557 if (c->ci->nregs > nregs)
558 nregs = c->ci->nregs;
559 e += nregs;
560 }
561 if (c->stbase + e > c->stend) e = c->stend - c->stbase;
562 for (i=0; i<e; i++) {
563 mrb_value v = c->stbase[i];
564
565 if (!mrb_immediate_p(v)) {
566 mrb_gc_mark(mrb, mrb_basic_ptr(v));
567 }
568 }
569 e = c->stend - c->stbase;
570 nil = mrb_nil_value();
571 for (; i<e; i++) {
572 c->stbase[i] = nil;
573 }
574}
575
576static void
577mark_context(mrb_state *mrb, struct mrb_context *c)
578{
579 int i;
580 mrb_callinfo *ci;
581
582 /* mark VM stack */
583 mark_context_stack(mrb, c);
584
585 /* mark call stack */
586 if (c->cibase) {
587 for (ci = c->cibase; ci <= c->ci; ci++) {
588 mrb_gc_mark(mrb, (struct RBasic*)ci->env);
589 mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
590 mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
591 }
592 }
593 /* mark ensure stack */
594 for (i=0; i<c->esize; i++) {
595 if (c->ensure[i] == NULL) break;
596 mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
597 }
598 /* mark fibers */
599 mrb_gc_mark(mrb, (struct RBasic*)c->fib);
600 if (c->prev) {
601 mark_context(mrb, c->prev);
602 }
603}
604
605static void
606gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
607{
608 mrb_assert(is_gray(obj));
609 paint_black(obj);
610 gc->gray_list = obj->gcnext;
611 mrb_gc_mark(mrb, (struct RBasic*)obj->c);
612 switch (obj->tt) {
613 case MRB_TT_ICLASS:
614 {
615 struct RClass *c = (struct RClass*)obj;
616 if (MRB_FLAG_TEST(c, MRB_FLAG_IS_ORIGIN))
617 mrb_gc_mark_mt(mrb, c);
618 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
619 }
620 break;
621
622 case MRB_TT_CLASS:
623 case MRB_TT_MODULE:
624 case MRB_TT_SCLASS:
625 {
626 struct RClass *c = (struct RClass*)obj;
627
628 mrb_gc_mark_mt(mrb, c);
629 mrb_gc_mark(mrb, (struct RBasic*)c->super);
630 }
631 /* fall through */
632
633 case MRB_TT_OBJECT:
634 case MRB_TT_DATA:
635 case MRB_TT_EXCEPTION:
636 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
637 break;
638
639 case MRB_TT_PROC:
640 {
641 struct RProc *p = (struct RProc*)obj;
642
643 mrb_gc_mark(mrb, (struct RBasic*)p->env);
644 mrb_gc_mark(mrb, (struct RBasic*)p->target_class);
645 }
646 break;
647
648 case MRB_TT_ENV:
649 {
650 struct REnv *e = (struct REnv*)obj;
651 mrb_int i, len;
652
653 if (MRB_ENV_STACK_SHARED_P(e)) {
654 if (e->cxt.c->fib) {
655 mrb_gc_mark(mrb, (struct RBasic*)e->cxt.c->fib);
656 }
657 break;
658 }
659 len = MRB_ENV_STACK_LEN(e);
660 for (i=0; i<len; i++) {
661 mrb_gc_mark_value(mrb, e->stack[i]);
662 }
663 }
664 break;
665
666 case MRB_TT_FIBER:
667 {
668 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
669
670 if (c) mark_context(mrb, c);
671 }
672 break;
673
674 case MRB_TT_ARRAY:
675 {
676 struct RArray *a = (struct RArray*)obj;
677 size_t i, e;
678
679 for (i=0,e=a->len; i<e; i++) {
680 mrb_gc_mark_value(mrb, a->ptr[i]);
681 }
682 }
683 break;
684
685 case MRB_TT_HASH:
686 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
687 mrb_gc_mark_hash(mrb, (struct RHash*)obj);
688 break;
689
690 case MRB_TT_STRING:
691 break;
692
693 case MRB_TT_RANGE:
694 {
695 struct RRange *r = (struct RRange*)obj;
696
697 if (r->edges) {
698 mrb_gc_mark_value(mrb, r->edges->beg);
699 mrb_gc_mark_value(mrb, r->edges->end);
700 }
701 }
702 break;
703
704 default:
705 break;
706 }
707}
708
709MRB_API void
710mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
711{
712 if (obj == 0) return;
713 if (!is_white(obj)) return;
714 mrb_assert((obj)->tt != MRB_TT_FREE);
715 add_gray_list(mrb, &mrb->gc, obj);
716}
717
718static void
719obj_free(mrb_state *mrb, struct RBasic *obj, int end)
720{
721 DEBUG(fprintf(stderr, "obj_free(%p,tt=%d)\n",obj,obj->tt));
722 switch (obj->tt) {
723 /* immediate - no mark */
724 case MRB_TT_TRUE:
725 case MRB_TT_FIXNUM:
726 case MRB_TT_SYMBOL:
727 /* cannot happen */
728 return;
729
730 case MRB_TT_FLOAT:
731#ifdef MRB_WORD_BOXING
732 break;
733#else
734 return;
735#endif
736
737 case MRB_TT_OBJECT:
738 mrb_gc_free_iv(mrb, (struct RObject*)obj);
739 break;
740
741 case MRB_TT_EXCEPTION:
742 mrb_gc_free_iv(mrb, (struct RObject*)obj);
743 break;
744
745 case MRB_TT_CLASS:
746 case MRB_TT_MODULE:
747 case MRB_TT_SCLASS:
748 mrb_gc_free_mt(mrb, (struct RClass*)obj);
749 mrb_gc_free_iv(mrb, (struct RObject*)obj);
750 break;
751 case MRB_TT_ICLASS:
752 if (MRB_FLAG_TEST(obj, MRB_FLAG_IS_ORIGIN))
753 mrb_gc_free_mt(mrb, (struct RClass*)obj);
754 break;
755 case MRB_TT_ENV:
756 {
757 struct REnv *e = (struct REnv*)obj;
758
759 if (MRB_ENV_STACK_SHARED_P(e)) {
760 /* cannot be freed */
761 return;
762 }
763 mrb_free(mrb, e->stack);
764 e->stack = NULL;
765 }
766 break;
767
768 case MRB_TT_FIBER:
769 {
770 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
771
772 if (!end && c && c != mrb->root_c) {
773 mrb_callinfo *ci = c->ci;
774 mrb_callinfo *ce = c->cibase;
775
776 while (ce <= ci) {
777 struct REnv *e = ci->env;
778 if (e && !is_dead(&mrb->gc, e) &&
779 e->tt == MRB_TT_ENV && MRB_ENV_STACK_SHARED_P(e)) {
780 mrb_env_unshare(mrb, e);
781 }
782 ci--;
783 }
784 mrb_free_context(mrb, c);
785 }
786 }
787 break;
788
789 case MRB_TT_ARRAY:
790 if (ARY_SHARED_P(obj))
791 mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared);
792 else
793 mrb_free(mrb, ((struct RArray*)obj)->ptr);
794 break;
795
796 case MRB_TT_HASH:
797 mrb_gc_free_iv(mrb, (struct RObject*)obj);
798 mrb_gc_free_hash(mrb, (struct RHash*)obj);
799 break;
800
801 case MRB_TT_STRING:
802 mrb_gc_free_str(mrb, (struct RString*)obj);
803 break;
804
805 case MRB_TT_PROC:
806 {
807 struct RProc *p = (struct RProc*)obj;
808
809 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
810 mrb_irep_decref(mrb, p->body.irep);
811 }
812 }
813 break;
814
815 case MRB_TT_RANGE:
816 mrb_free(mrb, ((struct RRange*)obj)->edges);
817 break;
818
819 case MRB_TT_DATA:
820 {
821 struct RData *d = (struct RData*)obj;
822 if (d->type && d->type->dfree) {
823 d->type->dfree(mrb, d->data);
824 }
825 mrb_gc_free_iv(mrb, (struct RObject*)obj);
826 }
827 break;
828
829 default:
830 break;
831 }
832 obj->tt = MRB_TT_FREE;
833}
834
835static void
836root_scan_phase(mrb_state *mrb, mrb_gc *gc)
837{
838 int i, e;
839
840 if (!is_minor_gc(gc)) {
841 gc->gray_list = NULL;
842 gc->atomic_gray_list = NULL;
843 }
844
845 mrb_gc_mark_gv(mrb);
846 /* mark arena */
847 for (i=0,e=gc->arena_idx; i<e; i++) {
848 mrb_gc_mark(mrb, gc->arena[i]);
849 }
850 /* mark class hierarchy */
851 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
852
853 /* mark built-in classes */
854 mrb_gc_mark(mrb, (struct RBasic*)mrb->class_class);
855 mrb_gc_mark(mrb, (struct RBasic*)mrb->module_class);
856 mrb_gc_mark(mrb, (struct RBasic*)mrb->proc_class);
857 mrb_gc_mark(mrb, (struct RBasic*)mrb->string_class);
858 mrb_gc_mark(mrb, (struct RBasic*)mrb->array_class);
859 mrb_gc_mark(mrb, (struct RBasic*)mrb->hash_class);
860
861 mrb_gc_mark(mrb, (struct RBasic*)mrb->float_class);
862 mrb_gc_mark(mrb, (struct RBasic*)mrb->fixnum_class);
863 mrb_gc_mark(mrb, (struct RBasic*)mrb->true_class);
864 mrb_gc_mark(mrb, (struct RBasic*)mrb->false_class);
865 mrb_gc_mark(mrb, (struct RBasic*)mrb->nil_class);
866 mrb_gc_mark(mrb, (struct RBasic*)mrb->symbol_class);
867 mrb_gc_mark(mrb, (struct RBasic*)mrb->kernel_module);
868
869 mrb_gc_mark(mrb, (struct RBasic*)mrb->eException_class);
870 mrb_gc_mark(mrb, (struct RBasic*)mrb->eStandardError_class);
871
872 /* mark top_self */
873 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
874 /* mark exception */
875 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
876 /* mark pre-allocated exception */
877 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
878 mrb_gc_mark(mrb, (struct RBasic*)mrb->stack_err);
879#ifdef MRB_GC_FIXED_ARENA
880 mrb_gc_mark(mrb, (struct RBasic*)mrb->arena_err);
881#endif
882
883 mark_context(mrb, mrb->c);
884 if (mrb->root_c != mrb->c) {
885 mark_context(mrb, mrb->root_c);
886 }
887}
888
889static size_t
890gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
891{
892 size_t children = 0;
893
894 gc_mark_children(mrb, gc, obj);
895
896 switch (obj->tt) {
897 case MRB_TT_ICLASS:
898 children++;
899 break;
900
901 case MRB_TT_CLASS:
902 case MRB_TT_SCLASS:
903 case MRB_TT_MODULE:
904 {
905 struct RClass *c = (struct RClass*)obj;
906
907 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
908 children += mrb_gc_mark_mt_size(mrb, c);
909 children++;
910 }
911 break;
912
913 case MRB_TT_OBJECT:
914 case MRB_TT_DATA:
915 case MRB_TT_EXCEPTION:
916 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
917 break;
918
919 case MRB_TT_ENV:
920 children += (int)obj->flags;
921 break;
922
923 case MRB_TT_FIBER:
924 {
925 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
926 size_t i;
927 mrb_callinfo *ci;
928
929 if (!c) break;
930 /* mark stack */
931 i = c->stack - c->stbase;
932 if (c->ci) i += c->ci->nregs;
933 if (c->stbase + i > c->stend) i = c->stend - c->stbase;
934 children += i;
935
936 /* mark ensure stack */
937 children += c->eidx;
938
939 /* mark closure */
940 if (c->cibase) {
941 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
942 ;
943 }
944 children += i;
945 }
946 break;
947
948 case MRB_TT_ARRAY:
949 {
950 struct RArray *a = (struct RArray*)obj;
951 children += a->len;
952 }
953 break;
954
955 case MRB_TT_HASH:
956 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
957 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
958 break;
959
960 case MRB_TT_PROC:
961 case MRB_TT_RANGE:
962 children+=2;
963 break;
964
965 default:
966 break;
967 }
968 return children;
969}
970
971
972static void
973gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
974 while (gc->gray_list) {
975 if (is_gray(gc->gray_list))
976 gc_mark_children(mrb, gc, gc->gray_list);
977 else
978 gc->gray_list = gc->gray_list->gcnext;
979 }
980}
981
982
983static size_t
984incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
985{
986 size_t tried_marks = 0;
987
988 while (gc->gray_list && tried_marks < limit) {
989 tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
990 }
991
992 return tried_marks;
993}
994
995static void
996final_marking_phase(mrb_state *mrb, mrb_gc *gc)
997{
998 int i, e;
999
1000 /* mark arena */
1001 for (i=0,e=gc->arena_idx; i<e; i++) {
1002 mrb_gc_mark(mrb, gc->arena[i]);
1003 }
1004 mrb_gc_mark_gv(mrb);
1005 mark_context(mrb, mrb->c);
1006 mark_context(mrb, mrb->root_c);
1007 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
1008 gc_mark_gray_list(mrb, gc);
1009 mrb_assert(gc->gray_list == NULL);
1010 gc->gray_list = gc->atomic_gray_list;
1011 gc->atomic_gray_list = NULL;
1012 gc_mark_gray_list(mrb, gc);
1013 mrb_assert(gc->gray_list == NULL);
1014}
1015
1016static void
1017prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
1018{
1019 gc->state = MRB_GC_STATE_SWEEP;
1020 gc->sweeps = gc->heaps;
1021 gc->live_after_mark = gc->live;
1022}
1023
1024static size_t
1025incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
1026{
1027 mrb_heap_page *page = gc->sweeps;
1028 size_t tried_sweep = 0;
1029
1030 while (page && (tried_sweep < limit)) {
1031 RVALUE *p = objects(page);
1032 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1033 size_t freed = 0;
1034 mrb_bool dead_slot = TRUE;
1035 mrb_bool full = (page->freelist == NULL);
1036
1037 if (is_minor_gc(gc) && page->old) {
1038 /* skip a slot which doesn't contain any young object */
1039 p = e;
1040 dead_slot = FALSE;
1041 }
1042 while (p<e) {
1043 if (is_dead(gc, &p->as.basic)) {
1044 if (p->as.basic.tt != MRB_TT_FREE) {
1045 obj_free(mrb, &p->as.basic, FALSE);
1046 if (p->as.basic.tt == MRB_TT_FREE) {
1047 p->as.free.next = page->freelist;
1048 page->freelist = (struct RBasic*)p;
1049 freed++;
1050 }
1051 else {
1052 dead_slot = FALSE;
1053 }
1054 }
1055 }
1056 else {
1057 if (!is_generational(gc))
1058 paint_partial_white(gc, &p->as.basic); /* next gc target */
1059 dead_slot = FALSE;
1060 }
1061 p++;
1062 }
1063
1064 /* free dead slot */
1065 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
1066 mrb_heap_page *next = page->next;
1067
1068 unlink_heap_page(gc, page);
1069 unlink_free_heap_page(gc, page);
1070 mrb_free(mrb, page);
1071 page = next;
1072 }
1073 else {
1074 if (full && freed > 0) {
1075 link_free_heap_page(gc, page);
1076 }
1077 if (page->freelist == NULL && is_minor_gc(gc))
1078 page->old = TRUE;
1079 else
1080 page->old = FALSE;
1081 page = page->next;
1082 }
1083 tried_sweep += MRB_HEAP_PAGE_SIZE;
1084 gc->live -= freed;
1085 gc->live_after_mark -= freed;
1086 }
1087 gc->sweeps = page;
1088 return tried_sweep;
1089}
1090
1091static size_t
1092incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1093{
1094 switch (gc->state) {
1095 case MRB_GC_STATE_ROOT:
1096 root_scan_phase(mrb, gc);
1097 gc->state = MRB_GC_STATE_MARK;
1098 flip_white_part(gc);
1099 return 0;
1100 case MRB_GC_STATE_MARK:
1101 if (gc->gray_list) {
1102 return incremental_marking_phase(mrb, gc, limit);
1103 }
1104 else {
1105 final_marking_phase(mrb, gc);
1106 prepare_incremental_sweep(mrb, gc);
1107 return 0;
1108 }
1109 case MRB_GC_STATE_SWEEP: {
1110 size_t tried_sweep = 0;
1111 tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1112 if (tried_sweep == 0)
1113 gc->state = MRB_GC_STATE_ROOT;
1114 return tried_sweep;
1115 }
1116 default:
1117 /* unknown state */
1118 mrb_assert(0);
1119 return 0;
1120 }
1121}
1122
1123static void
1124incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1125{
1126 do {
1127 incremental_gc(mrb, gc, SIZE_MAX);
1128 } while (gc->state != to_state);
1129}
1130
1131static void
1132incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1133{
1134 size_t limit = 0, result = 0;
1135 limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1136 while (result < limit) {
1137 result += incremental_gc(mrb, gc, limit);
1138 if (gc->state == MRB_GC_STATE_ROOT)
1139 break;
1140 }
1141
1142 gc->threshold = gc->live + GC_STEP_SIZE;
1143}
1144
1145static void
1146clear_all_old(mrb_state *mrb, mrb_gc *gc)
1147{
1148 mrb_bool origin_mode = gc->generational;
1149
1150 mrb_assert(is_generational(gc));
1151 if (is_major_gc(gc)) {
1152 /* finish the half baked GC */
1153 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1154 }
1155
1156 /* Sweep the dead objects, then reset all the live objects
1157 * (including all the old objects, of course) to white. */
1158 gc->generational = FALSE;
1159 prepare_incremental_sweep(mrb, gc);
1160 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1161 gc->generational = origin_mode;
1162
1163 /* The gray objects have already been painted as white */
1164 gc->atomic_gray_list = gc->gray_list = NULL;
1165}
1166
1167MRB_API void
1168mrb_incremental_gc(mrb_state *mrb)
1169{
1170 mrb_gc *gc = &mrb->gc;
1171
1172 if (gc->disabled || gc->iterating) return;
1173
1174 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1175 GC_TIME_START;
1176
1177 if (is_minor_gc(gc)) {
1178 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1179 }
1180 else {
1181 incremental_gc_step(mrb, gc);
1182 }
1183
1184 if (gc->state == MRB_GC_STATE_ROOT) {
1185 mrb_assert(gc->live >= gc->live_after_mark);
1186 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1187 if (gc->threshold < GC_STEP_SIZE) {
1188 gc->threshold = GC_STEP_SIZE;
1189 }
1190
1191 if (is_major_gc(gc)) {
1192 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1193 gc->full = FALSE;
1194 }
1195 else if (is_minor_gc(gc)) {
1196 if (gc->live > gc->majorgc_old_threshold) {
1197 clear_all_old(mrb, gc);
1198 gc->full = TRUE;
1199 }
1200 }
1201 }
1202
1203 GC_TIME_STOP_AND_REPORT;
1204}
1205
1206/* Perform a full gc cycle */
1207MRB_API void
1208mrb_full_gc(mrb_state *mrb)
1209{
1210 mrb_gc *gc = &mrb->gc;
1211
1212 if (gc->disabled || gc->iterating) return;
1213
1214 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1215 GC_TIME_START;
1216
1217 if (is_generational(gc)) {
1218 /* clear all the old objects back to young */
1219 clear_all_old(mrb, gc);
1220 gc->full = TRUE;
1221 }
1222 else if (gc->state != MRB_GC_STATE_ROOT) {
1223 /* finish half baked GC cycle */
1224 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1225 }
1226
1227 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1228 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1229
1230 if (is_generational(gc)) {
1231 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1232 gc->full = FALSE;
1233 }
1234
1235 GC_TIME_STOP_AND_REPORT;
1236}
1237
1238MRB_API void
1239mrb_garbage_collect(mrb_state *mrb)
1240{
1241 mrb_full_gc(mrb);
1242}
1243
1244MRB_API int
1245mrb_gc_arena_save(mrb_state *mrb)
1246{
1247 return mrb->gc.arena_idx;
1248}
1249
1250MRB_API void
1251mrb_gc_arena_restore(mrb_state *mrb, int idx)
1252{
1253 mrb_gc *gc = &mrb->gc;
1254
1255#ifndef MRB_GC_FIXED_ARENA
1256 int capa = gc->arena_capa;
1257
1258 if (idx < capa / 2) {
1259 capa = (int)(capa * 0.66);
1260 if (capa < MRB_GC_ARENA_SIZE) {
1261 capa = MRB_GC_ARENA_SIZE;
1262 }
1263 if (capa != gc->arena_capa) {
1264 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
1265 gc->arena_capa = capa;
1266 }
1267 }
1268#endif
1269 gc->arena_idx = idx;
1270}
1271
1272/*
1273 * Field write barrier
1274 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1275 */
1276
1277MRB_API void
1278mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1279{
1280 mrb_gc *gc = &mrb->gc;
1281
1282 if (!is_black(obj)) return;
1283 if (!is_white(value)) return;
1284
1285 mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1286 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1287
1288 if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1289 add_gray_list(mrb, gc, value);
1290 }
1291 else {
1292 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1293 paint_partial_white(gc, obj); /* for never write barriers */
1294 }
1295}
1296
1297/*
1298 * Write barrier
1299 * Paint obj(Black) to obj(Gray).
1300 *
1301 * The object that is painted gray will be traversed atomically in final
1302 * mark phase. So you use this write barrier if it's frequency written spot.
1303 * e.g. Set element on Array.
1304 */
1305
1306MRB_API void
1307mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1308{
1309 mrb_gc *gc = &mrb->gc;
1310
1311 if (!is_black(obj)) return;
1312
1313 mrb_assert(!is_dead(gc, obj));
1314 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1315 paint_gray(obj);
1316 obj->gcnext = gc->atomic_gray_list;
1317 gc->atomic_gray_list = obj;
1318}
1319
1320/*
1321 * call-seq:
1322 * GC.start -> nil
1323 *
1324 * Initiates full garbage collection.
1325 *
1326 */
1327
1328static mrb_value
1329gc_start(mrb_state *mrb, mrb_value obj)
1330{
1331 mrb_full_gc(mrb);
1332 return mrb_nil_value();
1333}
1334
1335/*
1336 * call-seq:
1337 * GC.enable -> true or false
1338 *
1339 * Enables garbage collection, returning <code>true</code> if garbage
1340 * collection was previously disabled.
1341 *
1342 * GC.disable #=> false
1343 * GC.enable #=> true
1344 * GC.enable #=> false
1345 *
1346 */
1347
1348static mrb_value
1349gc_enable(mrb_state *mrb, mrb_value obj)
1350{
1351 mrb_bool old = mrb->gc.disabled;
1352
1353 mrb->gc.disabled = FALSE;
1354
1355 return mrb_bool_value(old);
1356}
1357
1358/*
1359 * call-seq:
1360 * GC.disable -> true or false
1361 *
1362 * Disables garbage collection, returning <code>true</code> if garbage
1363 * collection was already disabled.
1364 *
1365 * GC.disable #=> false
1366 * GC.disable #=> true
1367 *
1368 */
1369
1370static mrb_value
1371gc_disable(mrb_state *mrb, mrb_value obj)
1372{
1373 mrb_bool old = mrb->gc.disabled;
1374
1375 mrb->gc.disabled = TRUE;
1376
1377 return mrb_bool_value(old);
1378}
1379
1380/*
1381 * call-seq:
1382 * GC.interval_ratio -> fixnum
1383 *
1384 * Returns ratio of GC interval. Default value is 200(%).
1385 *
1386 */
1387
1388static mrb_value
1389gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1390{
1391 return mrb_fixnum_value(mrb->gc.interval_ratio);
1392}
1393
1394/*
1395 * call-seq:
1396 * GC.interval_ratio = fixnum -> nil
1397 *
1398 * Updates ratio of GC interval. Default value is 200(%).
1399 * GC start as soon as after end all step of GC if you set 100(%).
1400 *
1401 */
1402
1403static mrb_value
1404gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1405{
1406 mrb_int ratio;
1407
1408 mrb_get_args(mrb, "i", &ratio);
1409 mrb->gc.interval_ratio = ratio;
1410 return mrb_nil_value();
1411}
1412
1413/*
1414 * call-seq:
1415 * GC.step_ratio -> fixnum
1416 *
1417 * Returns step span ratio of Incremental GC. Default value is 200(%).
1418 *
1419 */
1420
1421static mrb_value
1422gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1423{
1424 return mrb_fixnum_value(mrb->gc.step_ratio);
1425}
1426
1427/*
1428 * call-seq:
1429 * GC.step_ratio = fixnum -> nil
1430 *
1431 * Updates step span ratio of Incremental GC. Default value is 200(%).
1432 * 1 step of incrementalGC becomes long if a rate is big.
1433 *
1434 */
1435
1436static mrb_value
1437gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1438{
1439 mrb_int ratio;
1440
1441 mrb_get_args(mrb, "i", &ratio);
1442 mrb->gc.step_ratio = ratio;
1443 return mrb_nil_value();
1444}
1445
1446static void
1447change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1448{
1449 if (gc->disabled || gc->iterating) {
1450 mrb_raise(mrb, E_RUNTIME_ERROR, "generational mode changed when GC disabled");
1451 return;
1452 }
1453 if (is_generational(gc) && !enable) {
1454 clear_all_old(mrb, gc);
1455 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1456 gc->full = FALSE;
1457 }
1458 else if (!is_generational(gc) && enable) {
1459 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1460 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1461 gc->full = FALSE;
1462 }
1463 gc->generational = enable;
1464}
1465
1466/*
1467 * call-seq:
1468 * GC.generational_mode -> true or false
1469 *
1470 * Returns generational or normal gc mode.
1471 *
1472 */
1473
1474static mrb_value
1475gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1476{
1477 return mrb_bool_value(mrb->gc.generational);
1478}
1479
1480/*
1481 * call-seq:
1482 * GC.generational_mode = true or false -> true or false
1483 *
1484 * Changes to generational or normal gc mode.
1485 *
1486 */
1487
1488static mrb_value
1489gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1490{
1491 mrb_bool enable;
1492
1493 mrb_get_args(mrb, "b", &enable);
1494 if (mrb->gc.generational != enable)
1495 change_gen_gc_mode(mrb, &mrb->gc, enable);
1496
1497 return mrb_bool_value(enable);
1498}
1499
1500
1501static void
1502gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1503{
1504 mrb_heap_page* page;
1505
1506 page = gc->heaps;
1507 while (page != NULL) {
1508 RVALUE *p;
1509 int i;
1510
1511 p = objects(page);
1512 for (i=0; i < MRB_HEAP_PAGE_SIZE; i++) {
1513 if ((*callback)(mrb, &p[i].as.basic, data) == MRB_EACH_OBJ_BREAK)
1514 return;
1515 }
1516 page = page->next;
1517 }
1518}
1519
1520void
1521mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1522{
1523 mrb_bool iterating = mrb->gc.iterating;
1524
1525 mrb->gc.iterating = TRUE;
1526 if (iterating) {
1527 gc_each_objects(mrb, &mrb->gc, callback, data);
1528 }
1529 else {
1530 struct mrb_jmpbuf *prev_jmp = mrb->jmp;
1531 struct mrb_jmpbuf c_jmp;
1532
1533 MRB_TRY(&c_jmp) {
1534 mrb->jmp = &c_jmp;
1535 gc_each_objects(mrb, &mrb->gc, callback, data);
1536 mrb->jmp = prev_jmp;
1537 mrb->gc.iterating = iterating;
1538 } MRB_CATCH(&c_jmp) {
1539 mrb->gc.iterating = iterating;
1540 mrb->jmp = prev_jmp;
1541 MRB_THROW(prev_jmp);
1542 } MRB_END_EXC(&c_jmp);
1543 }
1544}
1545
1546#ifdef GC_TEST
1547#ifdef GC_DEBUG
1548static mrb_value gc_test(mrb_state *, mrb_value);
1549#endif
1550#endif
1551
1552void
1553mrb_init_gc(mrb_state *mrb)
1554{
1555 struct RClass *gc;
1556
1557 gc = mrb_define_module(mrb, "GC");
1558
1559 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1560 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1561 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1562 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1563 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1564 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1565 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1566 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1567 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1568#ifdef GC_TEST
1569#ifdef GC_DEBUG
1570 mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1571#endif
1572#endif
1573}
1574
1575#ifdef GC_TEST
1576#ifdef GC_DEBUG
1577void
1578test_mrb_field_write_barrier(void)
1579{
1580 mrb_state *mrb = mrb_open();
1581 struct RBasic *obj, *value;
1582 mrb_gc *gc = &mrb->gc;
1583
1584 puts("test_mrb_field_write_barrier");
1585 gc->generational = FALSE;
1586 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1587 value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
1588 paint_black(obj);
1589 paint_partial_white(gc, value);
1590
1591
1592 puts(" in MRB_GC_STATE_MARK");
1593 gc->state = MRB_GC_STATE_MARK;
1594 mrb_field_write_barrier(mrb, obj, value);
1595
1596 mrb_assert(is_gray(value));
1597
1598
1599 puts(" in MRB_GC_STATE_SWEEP");
1600 paint_partial_white(gc, value);
1601 gc->state = MRB_GC_STATE_SWEEP;
1602 mrb_field_write_barrier(mrb, obj, value);
1603
1604 mrb_assert(obj->color & gc->current_white_part);
1605 mrb_assert(value->color & gc->current_white_part);
1606
1607
1608 puts(" fail with black");
1609 gc->state = MRB_GC_STATE_MARK;
1610 paint_white(obj);
1611 paint_partial_white(gc, value);
1612 mrb_field_write_barrier(mrb, obj, value);
1613
1614 mrb_assert(obj->color & gc->current_white_part);
1615
1616
1617 puts(" fail with gray");
1618 gc->state = MRB_GC_STATE_MARK;
1619 paint_black(obj);
1620 paint_gray(value);
1621 mrb_field_write_barrier(mrb, obj, value);
1622
1623 mrb_assert(is_gray(value));
1624
1625
1626 {
1627 puts("test_mrb_field_write_barrier_value");
1628 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1629 mrb_value value = mrb_str_new_lit(mrb, "value");
1630 paint_black(obj);
1631 paint_partial_white(gc, mrb_basic_ptr(value));
1632
1633 gc->state = MRB_GC_STATE_MARK;
1634 mrb_field_write_barrier_value(mrb, obj, value);
1635
1636 mrb_assert(is_gray(mrb_basic_ptr(value)));
1637 }
1638
1639 mrb_close(mrb);
1640}
1641
1642void
1643test_mrb_write_barrier(void)
1644{
1645 mrb_state *mrb = mrb_open();
1646 struct RBasic *obj;
1647 mrb_gc *gc = &mrb->gc;
1648
1649 puts("test_mrb_write_barrier");
1650 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1651 paint_black(obj);
1652
1653 puts(" in MRB_GC_STATE_MARK");
1654 gc->state = MRB_GC_STATE_MARK;
1655 mrb_write_barrier(mrb, obj);
1656
1657 mrb_assert(is_gray(obj));
1658 mrb_assert(gc->atomic_gray_list == obj);
1659
1660
1661 puts(" fail with gray");
1662 paint_gray(obj);
1663 mrb_write_barrier(mrb, obj);
1664
1665 mrb_assert(is_gray(obj));
1666
1667 mrb_close(mrb);
1668}
1669
1670void
1671test_add_gray_list(void)
1672{
1673 mrb_state *mrb = mrb_open();
1674 struct RBasic *obj1, *obj2;
1675 mrb_gc *gc = &mrb->gc;
1676
1677 puts("test_add_gray_list");
1678 change_gen_gc_mode(mrb, gc, FALSE);
1679 mrb_assert(gc->gray_list == NULL);
1680 obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1681 add_gray_list(mrb, gc, obj1);
1682 mrb_assert(gc->gray_list == obj1);
1683 mrb_assert(is_gray(obj1));
1684
1685 obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1686 add_gray_list(mrb, gc, obj2);
1687 mrb_assert(gc->gray_list == obj2);
1688 mrb_assert(gc->gray_list->gcnext == obj1);
1689 mrb_assert(is_gray(obj2));
1690
1691 mrb_close(mrb);
1692}
1693
1694void
1695test_gc_gray_mark(void)
1696{
1697 mrb_state *mrb = mrb_open();
1698 mrb_value obj_v, value_v;
1699 struct RBasic *obj;
1700 size_t gray_num = 0;
1701 mrb_gc *gc = &mrb->gc;
1702
1703 puts("test_gc_gray_mark");
1704
1705 puts(" in MRB_TT_CLASS");
1706 obj = (struct RBasic*)mrb->object_class;
1707 paint_gray(obj);
1708 gray_num = gc_gray_mark(mrb, gc, obj);
1709 mrb_assert(is_black(obj));
1710 mrb_assert(gray_num > 1);
1711
1712 puts(" in MRB_TT_ARRAY");
1713 obj_v = mrb_ary_new(mrb);
1714 value_v = mrb_str_new_lit(mrb, "test");
1715 paint_gray(mrb_basic_ptr(obj_v));
1716 paint_partial_white(gc, mrb_basic_ptr(value_v));
1717 mrb_ary_push(mrb, obj_v, value_v);
1718 gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
1719 mrb_assert(is_black(mrb_basic_ptr(obj_v)));
1720 mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1721 mrb_assert(gray_num == 1);
1722
1723 mrb_close(mrb);
1724}
1725
1726void
1727test_incremental_gc(void)
1728{
1729 mrb_state *mrb = mrb_open();
1730 size_t max = ~0, live = 0, total = 0, freed = 0;
1731 RVALUE *free;
1732 mrb_heap_page *page;
1733 mrb_gc *gc = &mrb->gc;
1734
1735 puts("test_incremental_gc");
1736 change_gen_gc_mode(mrb, gc, FALSE);
1737
1738 puts(" in mrb_full_gc");
1739 mrb_full_gc(mrb);
1740
1741 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1742 puts(" in MRB_GC_STATE_ROOT");
1743 incremental_gc(mrb, gc, max);
1744 mrb_assert(gc->state == MRB_GC_STATE_MARK);
1745 puts(" in MRB_GC_STATE_MARK");
1746 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1747 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1748
1749 puts(" in MRB_GC_STATE_SWEEP");
1750 page = gc->heaps;
1751 while (page) {
1752 RVALUE *p = objects(page);
1753 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1754 while (p<e) {
1755 if (is_black(&p->as.basic)) {
1756 live++;
1757 }
1758 if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
1759 printf("%p\n", &p->as.basic);
1760 }
1761 p++;
1762 }
1763 page = page->next;
1764 total += MRB_HEAP_PAGE_SIZE;
1765 }
1766
1767 mrb_assert(gc->gray_list == NULL);
1768
1769 incremental_gc(mrb, gc, max);
1770 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1771
1772 incremental_gc(mrb, gc, max);
1773 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1774
1775 free = (RVALUE*)gc->heaps->freelist;
1776 while (free) {
1777 freed++;
1778 free = (RVALUE*)free->as.free.next;
1779 }
1780
1781 mrb_assert(gc->live == live);
1782 mrb_assert(gc->live == total-freed);
1783
1784 puts("test_incremental_gc(gen)");
1785 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1786 change_gen_gc_mode(mrb, gc, TRUE);
1787
1788 mrb_assert(gc->full == FALSE);
1789 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1790
1791 puts(" in minor");
1792 mrb_assert(is_minor_gc(gc));
1793 mrb_assert(gc->majorgc_old_threshold > 0);
1794 gc->majorgc_old_threshold = 0;
1795 mrb_incremental_gc(mrb);
1796 mrb_assert(gc->full == TRUE);
1797 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1798
1799 puts(" in major");
1800 mrb_assert(is_major_gc(gc));
1801 do {
1802 mrb_incremental_gc(mrb);
1803 } while (gc->state != MRB_GC_STATE_ROOT);
1804 mrb_assert(gc->full == FALSE);
1805
1806 mrb_close(mrb);
1807}
1808
1809void
1810test_incremental_sweep_phase(void)
1811{
1812 mrb_state *mrb = mrb_open();
1813 mrb_gc *gc = &mrb->gc;
1814
1815 puts("test_incremental_sweep_phase");
1816
1817 add_heap(mrb, gc);
1818 gc->sweeps = gc->heaps;
1819
1820 mrb_assert(gc->heaps->next->next == NULL);
1821 mrb_assert(gc->free_heaps->next->next == NULL);
1822 incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
1823
1824 mrb_assert(gc->heaps->next == NULL);
1825 mrb_assert(gc->heaps == gc->free_heaps);
1826
1827 mrb_close(mrb);
1828}
1829
1830static mrb_value
1831gc_test(mrb_state *mrb, mrb_value self)
1832{
1833 test_mrb_field_write_barrier();
1834 test_mrb_write_barrier();
1835 test_add_gray_list();
1836 test_gc_gray_mark();
1837 test_incremental_gc();
1838 test_incremental_sweep_phase();
1839 return mrb_nil_value();
1840}
1841#endif /* GC_DEBUG */
1842#endif /* GC_TEST */
Note: See TracBrowser for help on using the repository browser.