source: EcnlProtoTool/trunk/mruby-1.2.0/src/gc.c@ 270

Last change on this file since 270 was 270, checked in by coas-nagasima, 7 years ago

mruby版ECNLプロトタイピング・ツールを追加

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
  • Property svn:mime-type set to text/x-csrc
File size: 38.9 KB
Line 
1/*
2** gc.c - garbage collector for mruby
3**
4** See Copyright Notice in mruby.h
5*/
6
7#include <string.h>
8#include <stdlib.h>
9#include "mruby.h"
10#include "mruby/array.h"
11#include "mruby/class.h"
12#include "mruby/data.h"
13#include "mruby/hash.h"
14#include "mruby/proc.h"
15#include "mruby/range.h"
16#include "mruby/string.h"
17#include "mruby/variable.h"
18#include "mruby/gc.h"
19#include "mruby/error.h"
20
21/*
22 = Tri-color Incremental Garbage Collection
23
24 mruby's GC is Tri-color Incremental GC with Mark & Sweep.
25 Algorithm details are omitted.
26 Instead, the implementation part is described below.
27
28 == Object's Color
29
30 Each object can be painted in three colors:
31
32 * White - Unmarked.
33 * Gray - Marked, But the child objects are unmarked.
34 * Black - Marked, the child objects are also marked.
35
36 == Two White Types
37
38 There're two white color types in a flip-flop fashion: White-A and White-B,
39 which respectively represent the Current White color (the newly allocated
40 objects in the current GC cycle) and the Sweep Target White color (the
41 dead objects to be swept).
42
43 A and B will be switched just at the beginning of the next GC cycle. At
44 that time, all the dead objects have been swept, while the newly created
45 objects in the current GC cycle which finally remains White are now
46 regarded as dead objects. Instead of traversing all the White-A objects and
47 painting them as White-B, just switch the meaning of White-A and White-B as
48 this will be much cheaper.
49
50 As a result, the objects we sweep in the current GC cycle are always
51 left from the previous GC cycle. This allows us to sweep objects
52 incrementally, without the disturbance of the newly created objects.
53
54 == Execution Timing
55
56 GC Execution Time and Each step interval are decided by live objects count.
57 List of Adjustment API:
58
59 * gc_interval_ratio_set
60 * gc_step_ratio_set
61
62 For details, see the comments for each function.
63
64 == Write Barrier
65
66 mruby implementer and C extension library writer must insert a write
67 barrier when updating a reference from a field of an object.
68 When updating a reference from a field of object A to object B,
69 two different types of write barrier are available:
70
71 * mrb_field_write_barrier - target B object for a mark.
72 * mrb_write_barrier - target A object for a mark.
73
74 == Generational Mode
75
76 mruby's GC offers an Generational Mode while re-using the tri-color GC
77 infrastructure. It will treat the Black objects as Old objects after each
78 sweep phase, instead of painting them White. The key ideas are still the same
79 as traditional generational GC:
80
81 * Minor GC - just traverse the Young objects (Gray objects) in the mark
82 phase, then only sweep the newly created objects, and leave
83 the Old objects live.
84
85 * Major GC - same as a full regular GC cycle.
86
87 The difference from "traditional" generational GC is, that the major GC
88 in mruby is triggered incrementally in a tri-color manner.
89
90
91 For details, see the comments for each function.
92
93*/
94
95struct free_obj {
96 MRB_OBJECT_HEADER;
97 struct RBasic *next;
98};
99
100typedef struct {
101 union {
102 struct free_obj free;
103 struct RBasic basic;
104 struct RObject object;
105 struct RClass klass;
106 struct RString string;
107 struct RArray array;
108 struct RHash hash;
109 struct RRange range;
110 struct RData data;
111 struct RProc proc;
112 struct RException exc;
113#ifdef MRB_WORD_BOXING
114 struct RFloat floatv;
115 struct RCptr cptr;
116#endif
117 } as;
118} RVALUE;
119
120#ifdef GC_PROFILE
121#include <stdio.h>
122#include <sys/time.h>
123
124static double program_invoke_time = 0;
125static double gc_time = 0;
126static double gc_total_time = 0;
127
128static double
129gettimeofday_time(void)
130{
131 struct timeval tv;
132 gettimeofday(&tv, NULL);
133 return tv.tv_sec + tv.tv_usec * 1e-6;
134}
135
136#define GC_INVOKE_TIME_REPORT(with) do {\
137 fprintf(stderr, "%s\n", with);\
138 fprintf(stderr, "gc_invoke: %19.3f\n", gettimeofday_time() - program_invoke_time);\
139 fprintf(stderr, "is_generational: %d\n", is_generational(gc));\
140 fprintf(stderr, "is_major_gc: %d\n", is_major_gc(mrb));\
141} while(0)
142
143#define GC_TIME_START do {\
144 gc_time = gettimeofday_time();\
145} while(0)
146
147#define GC_TIME_STOP_AND_REPORT do {\
148 gc_time = gettimeofday_time() - gc_time;\
149 gc_total_time += gc_time;\
150 fprintf(stderr, "gc_state: %d\n", gc->state);\
151 fprintf(stderr, "live: %zu\n", gc->live);\
152 fprintf(stderr, "majorgc_old_threshold: %zu\n", gc->majorgc_old_threshold);\
153 fprintf(stderr, "gc_threshold: %zu\n", gc->threshold);\
154 fprintf(stderr, "gc_time: %30.20f\n", gc_time);\
155 fprintf(stderr, "gc_total_time: %30.20f\n\n", gc_total_time);\
156} while(0)
157#else
158#define GC_INVOKE_TIME_REPORT(s)
159#define GC_TIME_START
160#define GC_TIME_STOP_AND_REPORT
161#endif
162
163#ifdef GC_DEBUG
164#define DEBUG(x) (x)
165#else
166#define DEBUG(x)
167#endif
168
169#ifndef MRB_HEAP_PAGE_SIZE
170#define MRB_HEAP_PAGE_SIZE 1024
171#endif
172
173#define GC_STEP_SIZE 1024
174
175/* white: 011, black: 100, gray: 000 */
176#define GC_GRAY 0
177#define GC_WHITE_A 1
178#define GC_WHITE_B (1 << 1)
179#define GC_BLACK (1 << 2)
180#define GC_WHITES (GC_WHITE_A | GC_WHITE_B)
181#define GC_COLOR_MASK 7
182
183#define paint_gray(o) ((o)->color = GC_GRAY)
184#define paint_black(o) ((o)->color = GC_BLACK)
185#define paint_white(o) ((o)->color = GC_WHITES)
186#define paint_partial_white(s, o) ((o)->color = (s)->current_white_part)
187#define is_gray(o) ((o)->color == GC_GRAY)
188#define is_white(o) ((o)->color & GC_WHITES)
189#define is_black(o) ((o)->color & GC_BLACK)
190#define flip_white_part(s) ((s)->current_white_part = other_white_part(s))
191#define other_white_part(s) ((s)->current_white_part ^ GC_WHITES)
192#define is_dead(s, o) (((o)->color & other_white_part(s) & GC_WHITES) || (o)->tt == MRB_TT_FREE)
193
194#define objects(p) ((RVALUE *)p->objects)
195
196MRB_API void*
197mrb_realloc_simple(mrb_state *mrb, void *p, size_t len)
198{
199 void *p2;
200
201 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
202 if (!p2 && len > 0 && mrb->gc.heaps) {
203 mrb_full_gc(mrb);
204 p2 = (mrb->allocf)(mrb, p, len, mrb->allocf_ud);
205 }
206
207 return p2;
208}
209
210MRB_API void*
211mrb_realloc(mrb_state *mrb, void *p, size_t len)
212{
213 void *p2;
214
215 p2 = mrb_realloc_simple(mrb, p, len);
216 if (!p2 && len) {
217 if (mrb->gc.out_of_memory) {
218 /* mrb_panic(mrb); */
219 }
220 else {
221 mrb->gc.out_of_memory = TRUE;
222 mrb_exc_raise(mrb, mrb_obj_value(mrb->nomem_err));
223 }
224 }
225 else {
226 mrb->gc.out_of_memory = FALSE;
227 }
228
229 return p2;
230}
231
232MRB_API void*
233mrb_malloc(mrb_state *mrb, size_t len)
234{
235 return mrb_realloc(mrb, 0, len);
236}
237
238MRB_API void*
239mrb_malloc_simple(mrb_state *mrb, size_t len)
240{
241 return mrb_realloc_simple(mrb, 0, len);
242}
243
244MRB_API void*
245mrb_calloc(mrb_state *mrb, size_t nelem, size_t len)
246{
247 void *p;
248
249 if (nelem > 0 && len > 0 &&
250 nelem <= SIZE_MAX / len) {
251 size_t size;
252 size = nelem * len;
253 p = mrb_malloc(mrb, size);
254
255 memset(p, 0, size);
256 }
257 else {
258 p = NULL;
259 }
260
261 return p;
262}
263
264MRB_API void
265mrb_free(mrb_state *mrb, void *p)
266{
267 (mrb->allocf)(mrb, p, 0, mrb->allocf_ud);
268}
269
270MRB_API mrb_bool
271mrb_object_dead_p(mrb_state *mrb, struct RBasic *object) {
272 return is_dead(&mrb->gc, object);
273}
274
275static void
276link_heap_page(mrb_gc *gc, mrb_heap_page *page)
277{
278 page->next = gc->heaps;
279 if (gc->heaps)
280 gc->heaps->prev = page;
281 gc->heaps = page;
282}
283
284static void
285unlink_heap_page(mrb_gc *gc, mrb_heap_page *page)
286{
287 if (page->prev)
288 page->prev->next = page->next;
289 if (page->next)
290 page->next->prev = page->prev;
291 if (gc->heaps == page)
292 gc->heaps = page->next;
293 page->prev = NULL;
294 page->next = NULL;
295}
296
297static void
298link_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
299{
300 page->free_next = gc->free_heaps;
301 if (gc->free_heaps) {
302 gc->free_heaps->free_prev = page;
303 }
304 gc->free_heaps = page;
305}
306
307static void
308unlink_free_heap_page(mrb_gc *gc, mrb_heap_page *page)
309{
310 if (page->free_prev)
311 page->free_prev->free_next = page->free_next;
312 if (page->free_next)
313 page->free_next->free_prev = page->free_prev;
314 if (gc->free_heaps == page)
315 gc->free_heaps = page->free_next;
316 page->free_prev = NULL;
317 page->free_next = NULL;
318}
319
320static void
321add_heap(mrb_state *mrb, mrb_gc *gc)
322{
323 mrb_heap_page *page = (mrb_heap_page *)mrb_calloc(mrb, 1, sizeof(mrb_heap_page) + MRB_HEAP_PAGE_SIZE * sizeof(RVALUE));
324 RVALUE *p, *e;
325 struct RBasic *prev = NULL;
326
327 for (p = objects(page), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
328 p->as.free.tt = MRB_TT_FREE;
329 p->as.free.next = prev;
330 prev = &p->as.basic;
331 }
332 page->freelist = prev;
333
334 link_heap_page(gc, page);
335 link_free_heap_page(gc, page);
336}
337
338#define DEFAULT_GC_INTERVAL_RATIO 200
339#define DEFAULT_GC_STEP_RATIO 200
340#define DEFAULT_MAJOR_GC_INC_RATIO 200
341#define is_generational(gc) ((gc)->generational)
342#define is_major_gc(gc) (is_generational(gc) && (gc)->full)
343#define is_minor_gc(gc) (is_generational(gc) && !(gc)->full)
344
345void
346mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
347{
348#ifndef MRB_GC_FIXED_ARENA
349 gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
350 gc->arena_capa = MRB_GC_ARENA_SIZE;
351#endif
352
353 gc->current_white_part = GC_WHITE_A;
354 gc->heaps = NULL;
355 gc->free_heaps = NULL;
356 add_heap(mrb, gc);
357 gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
358 gc->step_ratio = DEFAULT_GC_STEP_RATIO;
359#ifndef MRB_GC_TURN_OFF_GENERATIONAL
360 gc->generational = TRUE;
361 gc->full = TRUE;
362#endif
363
364#ifdef GC_PROFILE
365 program_invoke_time = gettimeofday_time();
366#endif
367}
368
369static void obj_free(mrb_state *mrb, struct RBasic *obj);
370
371void
372free_heap(mrb_state *mrb, mrb_gc *gc)
373{
374 mrb_heap_page *page = gc->heaps;
375 mrb_heap_page *tmp;
376 RVALUE *p, *e;
377
378 while (page) {
379 tmp = page;
380 page = page->next;
381 for (p = objects(tmp), e=p+MRB_HEAP_PAGE_SIZE; p<e; p++) {
382 if (p->as.free.tt != MRB_TT_FREE)
383 obj_free(mrb, &p->as.basic);
384 }
385 mrb_free(mrb, tmp);
386 }
387}
388
389void
390mrb_gc_destroy(mrb_state *mrb, mrb_gc *gc)
391{
392 free_heap(mrb, gc);
393#ifndef MRB_GC_FIXED_ARENA
394 mrb_free(mrb, gc->arena);
395#endif
396}
397
398static void
399gc_protect(mrb_state *mrb, mrb_gc *gc, struct RBasic *p)
400{
401#ifdef MRB_GC_FIXED_ARENA
402 if (gc->arena_idx >= MRB_GC_ARENA_SIZE) {
403 /* arena overflow error */
404 gc->arena_idx = MRB_GC_ARENA_SIZE - 4; /* force room in arena */
405 mrb_raise(mrb, E_RUNTIME_ERROR, "arena overflow error");
406 }
407#else
408 if (gc->arena_idx >= gc->arena_capa) {
409 /* extend arena */
410 gc->arena_capa = (int)(gc->arena_capa * 1.5);
411 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*gc->arena_capa);
412 }
413#endif
414 gc->arena[gc->arena_idx++] = p;
415}
416
417/* mrb_gc_protect() leaves the object in the arena */
418MRB_API void
419mrb_gc_protect(mrb_state *mrb, mrb_value obj)
420{
421 if (mrb_immediate_p(obj)) return;
422 gc_protect(mrb, &mrb->gc, mrb_basic_ptr(obj));
423}
424
425#define GC_ROOT_NAME "_gc_root_"
426
427/* mrb_gc_register() keeps the object from GC.
428
429 Register your object when it's exported to C world,
430 without reference from Ruby world, e.g. callback
431 arguments. Don't forget to remove the obejct using
432 mrb_gc_unregister, otherwise your object will leak.
433*/
434
435MRB_API void
436mrb_gc_register(mrb_state *mrb, mrb_value obj)
437{
438 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
439 mrb_value table = mrb_gv_get(mrb, root);
440
441 if (mrb_nil_p(table) || mrb_type(table) != MRB_TT_ARRAY) {
442 table = mrb_ary_new(mrb);
443 mrb_gv_set(mrb, root, table);
444 }
445 mrb_ary_push(mrb, table, obj);
446}
447
448/* mrb_gc_unregister() removes the object from GC root. */
449MRB_API void
450mrb_gc_unregister(mrb_state *mrb, mrb_value obj)
451{
452 mrb_sym root = mrb_intern_lit(mrb, GC_ROOT_NAME);
453 mrb_value table = mrb_gv_get(mrb, root);
454 struct RArray *a;
455 mrb_int i, j;
456
457 if (mrb_nil_p(table)) return;
458 if (mrb_type(table) != MRB_TT_ARRAY) {
459 mrb_gv_set(mrb, root, mrb_nil_value());
460 return;
461 }
462 a = mrb_ary_ptr(table);
463 mrb_ary_modify(mrb, a);
464 for (i=j=0; i<a->len; i++) {
465 if (!mrb_obj_eq(mrb, a->ptr[i], obj)) {
466 a->ptr[j++] = a->ptr[i];
467 }
468 }
469 a->len = j;
470}
471
472MRB_API struct RBasic*
473mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
474{
475 struct RBasic *p;
476 static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
477 mrb_gc *gc = &mrb->gc;
478
479#ifdef MRB_GC_STRESS
480 mrb_full_gc(mrb);
481#endif
482 if (gc->threshold < gc->live) {
483 mrb_incremental_gc(mrb);
484 }
485 if (gc->free_heaps == NULL) {
486 add_heap(mrb, gc);
487 }
488
489 p = gc->free_heaps->freelist;
490 gc->free_heaps->freelist = ((struct free_obj*)p)->next;
491 if (gc->free_heaps->freelist == NULL) {
492 unlink_free_heap_page(gc, gc->free_heaps);
493 }
494
495 gc->live++;
496 gc_protect(mrb, gc, p);
497 *(RVALUE *)p = RVALUE_zero;
498 p->tt = ttype;
499 p->c = cls;
500 paint_partial_white(gc, p);
501 return p;
502}
503
504static inline void
505add_gray_list(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
506{
507#ifdef MRB_GC_STRESS
508 if (obj->tt > MRB_TT_MAXDEFINE) {
509 abort();
510 }
511#endif
512 paint_gray(obj);
513 obj->gcnext = gc->gray_list;
514 gc->gray_list = obj;
515}
516
517static void
518mark_context_stack(mrb_state *mrb, struct mrb_context *c)
519{
520 size_t i;
521 size_t e;
522
523 e = c->stack - c->stbase;
524 if (c->ci) e += c->ci->nregs;
525 if (c->stbase + e > c->stend) e = c->stend - c->stbase;
526 for (i=0; i<e; i++) {
527 mrb_value v = c->stbase[i];
528
529 if (!mrb_immediate_p(v)) {
530 if (mrb_basic_ptr(v)->tt == MRB_TT_FREE) {
531 c->stbase[i] = mrb_nil_value();
532 }
533 else {
534 mrb_gc_mark(mrb, mrb_basic_ptr(v));
535 }
536 }
537 }
538}
539
540static void
541mark_context(mrb_state *mrb, struct mrb_context *c)
542{
543 int i, e = 0;
544 mrb_callinfo *ci;
545
546 /* mark stack */
547 mark_context_stack(mrb, c);
548
549 /* mark VM stack */
550 if (c->cibase) {
551 for (ci = c->cibase; ci <= c->ci; ci++) {
552 if (ci->eidx > e) {
553 e = ci->eidx;
554 }
555 mrb_gc_mark(mrb, (struct RBasic*)ci->env);
556 mrb_gc_mark(mrb, (struct RBasic*)ci->proc);
557 mrb_gc_mark(mrb, (struct RBasic*)ci->target_class);
558 }
559 }
560 /* mark ensure stack */
561 for (i=0; i<e; i++) {
562 mrb_gc_mark(mrb, (struct RBasic*)c->ensure[i]);
563 }
564 /* mark fibers */
565 if (c->prev && c->prev->fib) {
566 mrb_gc_mark(mrb, (struct RBasic*)c->prev->fib);
567 }
568}
569
570static void
571gc_mark_children(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
572{
573 mrb_assert(is_gray(obj));
574 paint_black(obj);
575 gc->gray_list = obj->gcnext;
576 mrb_gc_mark(mrb, (struct RBasic*)obj->c);
577 switch (obj->tt) {
578 case MRB_TT_ICLASS:
579 {
580 struct RClass *c = (struct RClass*)obj;
581 if (MRB_FLAG_TEST(c, MRB_FLAG_IS_ORIGIN))
582 mrb_gc_mark_mt(mrb, c);
583 mrb_gc_mark(mrb, (struct RBasic*)((struct RClass*)obj)->super);
584 }
585 break;
586
587 case MRB_TT_CLASS:
588 case MRB_TT_MODULE:
589 case MRB_TT_SCLASS:
590 {
591 struct RClass *c = (struct RClass*)obj;
592
593 mrb_gc_mark_mt(mrb, c);
594 mrb_gc_mark(mrb, (struct RBasic*)c->super);
595 }
596 /* fall through */
597
598 case MRB_TT_OBJECT:
599 case MRB_TT_DATA:
600 case MRB_TT_EXCEPTION:
601 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
602 break;
603
604 case MRB_TT_PROC:
605 {
606 struct RProc *p = (struct RProc*)obj;
607
608 mrb_gc_mark(mrb, (struct RBasic*)p->env);
609 mrb_gc_mark(mrb, (struct RBasic*)p->target_class);
610 }
611 break;
612
613 case MRB_TT_ENV:
614 {
615 struct REnv *e = (struct REnv*)obj;
616
617 if (!MRB_ENV_STACK_SHARED_P(e)) {
618 mrb_int i, len;
619
620 len = MRB_ENV_STACK_LEN(e);
621 for (i=0; i<len; i++) {
622 mrb_gc_mark_value(mrb, e->stack[i]);
623 }
624 }
625 }
626 break;
627
628 case MRB_TT_FIBER:
629 {
630 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
631
632 if (c) mark_context(mrb, c);
633 }
634 break;
635
636 case MRB_TT_ARRAY:
637 {
638 struct RArray *a = (struct RArray*)obj;
639 size_t i, e;
640
641 for (i=0,e=a->len; i<e; i++) {
642 mrb_gc_mark_value(mrb, a->ptr[i]);
643 }
644 }
645 break;
646
647 case MRB_TT_HASH:
648 mrb_gc_mark_iv(mrb, (struct RObject*)obj);
649 mrb_gc_mark_hash(mrb, (struct RHash*)obj);
650 break;
651
652 case MRB_TT_STRING:
653 break;
654
655 case MRB_TT_RANGE:
656 {
657 struct RRange *r = (struct RRange*)obj;
658
659 if (r->edges) {
660 mrb_gc_mark_value(mrb, r->edges->beg);
661 mrb_gc_mark_value(mrb, r->edges->end);
662 }
663 }
664 break;
665
666 default:
667 break;
668 }
669}
670
671MRB_API void
672mrb_gc_mark(mrb_state *mrb, struct RBasic *obj)
673{
674 if (obj == 0) return;
675 if (!is_white(obj)) return;
676 mrb_assert((obj)->tt != MRB_TT_FREE);
677 add_gray_list(mrb, &mrb->gc, obj);
678}
679
680static void
681obj_free(mrb_state *mrb, struct RBasic *obj)
682{
683 DEBUG(printf("obj_free(%p,tt=%d)\n",obj,obj->tt));
684 switch (obj->tt) {
685 /* immediate - no mark */
686 case MRB_TT_TRUE:
687 case MRB_TT_FIXNUM:
688 case MRB_TT_SYMBOL:
689 /* cannot happen */
690 return;
691
692 case MRB_TT_FLOAT:
693#ifdef MRB_WORD_BOXING
694 break;
695#else
696 return;
697#endif
698
699 case MRB_TT_OBJECT:
700 case MRB_TT_EXCEPTION:
701 mrb_gc_free_iv(mrb, (struct RObject*)obj);
702 break;
703
704 case MRB_TT_CLASS:
705 case MRB_TT_MODULE:
706 case MRB_TT_SCLASS:
707 mrb_gc_free_mt(mrb, (struct RClass*)obj);
708 mrb_gc_free_iv(mrb, (struct RObject*)obj);
709 break;
710 case MRB_TT_ICLASS:
711 if (MRB_FLAG_TEST(obj, MRB_FLAG_IS_ORIGIN))
712 mrb_gc_free_mt(mrb, (struct RClass*)obj);
713 break;
714 case MRB_TT_ENV:
715 {
716 struct REnv *e = (struct REnv*)obj;
717
718 if (!MRB_ENV_STACK_SHARED_P(e)) {
719 mrb_free(mrb, e->stack);
720 e->stack = NULL;
721 }
722 }
723 break;
724
725 case MRB_TT_FIBER:
726 {
727 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
728
729 if (c != mrb->root_c)
730 mrb_free_context(mrb, c);
731 }
732 break;
733
734 case MRB_TT_ARRAY:
735 if (ARY_SHARED_P(obj))
736 mrb_ary_decref(mrb, ((struct RArray*)obj)->aux.shared);
737 else
738 mrb_free(mrb, ((struct RArray*)obj)->ptr);
739 break;
740
741 case MRB_TT_HASH:
742 mrb_gc_free_iv(mrb, (struct RObject*)obj);
743 mrb_gc_free_hash(mrb, (struct RHash*)obj);
744 break;
745
746 case MRB_TT_STRING:
747 mrb_gc_free_str(mrb, (struct RString*)obj);
748 break;
749
750 case MRB_TT_PROC:
751 {
752 struct RProc *p = (struct RProc*)obj;
753
754 if (!MRB_PROC_CFUNC_P(p) && p->body.irep) {
755 mrb_irep_decref(mrb, p->body.irep);
756 }
757 }
758 break;
759
760 case MRB_TT_RANGE:
761 mrb_free(mrb, ((struct RRange*)obj)->edges);
762 break;
763
764 case MRB_TT_DATA:
765 {
766 struct RData *d = (struct RData*)obj;
767 if (d->type && d->type->dfree) {
768 d->type->dfree(mrb, d->data);
769 }
770 mrb_gc_free_iv(mrb, (struct RObject*)obj);
771 }
772 break;
773
774 default:
775 break;
776 }
777 obj->tt = MRB_TT_FREE;
778}
779
780static void
781root_scan_phase(mrb_state *mrb, mrb_gc *gc)
782{
783 size_t i, e;
784
785 if (!is_minor_gc(gc)) {
786 gc->gray_list = NULL;
787 gc->atomic_gray_list = NULL;
788 }
789
790 mrb_gc_mark_gv(mrb);
791 /* mark arena */
792 for (i=0,e=gc->arena_idx; i<e; i++) {
793 mrb_gc_mark(mrb, gc->arena[i]);
794 }
795 /* mark class hierarchy */
796 mrb_gc_mark(mrb, (struct RBasic*)mrb->object_class);
797 /* mark top_self */
798 mrb_gc_mark(mrb, (struct RBasic*)mrb->top_self);
799 /* mark exception */
800 mrb_gc_mark(mrb, (struct RBasic*)mrb->exc);
801 /* mark pre-allocated exception */
802 mrb_gc_mark(mrb, (struct RBasic*)mrb->nomem_err);
803
804 mark_context(mrb, mrb->root_c);
805 if (mrb->root_c->fib) {
806 mrb_gc_mark(mrb, (struct RBasic*)mrb->root_c->fib);
807 }
808 if (mrb->root_c != mrb->c) {
809 mark_context(mrb, mrb->c);
810 }
811}
812
813static size_t
814gc_gray_mark(mrb_state *mrb, mrb_gc *gc, struct RBasic *obj)
815{
816 size_t children = 0;
817
818 gc_mark_children(mrb, gc, obj);
819
820 switch (obj->tt) {
821 case MRB_TT_ICLASS:
822 children++;
823 break;
824
825 case MRB_TT_CLASS:
826 case MRB_TT_SCLASS:
827 case MRB_TT_MODULE:
828 {
829 struct RClass *c = (struct RClass*)obj;
830
831 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
832 children += mrb_gc_mark_mt_size(mrb, c);
833 children++;
834 }
835 break;
836
837 case MRB_TT_OBJECT:
838 case MRB_TT_DATA:
839 case MRB_TT_EXCEPTION:
840 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
841 break;
842
843 case MRB_TT_ENV:
844 children += (int)obj->flags;
845 break;
846
847 case MRB_TT_FIBER:
848 {
849 struct mrb_context *c = ((struct RFiber*)obj)->cxt;
850 size_t i;
851 mrb_callinfo *ci;
852
853 if (!c) break;
854 /* mark stack */
855 i = c->stack - c->stbase;
856 if (c->ci) i += c->ci->nregs;
857 if (c->stbase + i > c->stend) i = c->stend - c->stbase;
858 children += i;
859
860 /* mark ensure stack */
861 children += (c->ci) ? c->ci->eidx : 0;
862
863 /* mark closure */
864 if (c->cibase) {
865 for (i=0, ci = c->cibase; ci <= c->ci; i++, ci++)
866 ;
867 }
868 children += i;
869 }
870 break;
871
872 case MRB_TT_ARRAY:
873 {
874 struct RArray *a = (struct RArray*)obj;
875 children += a->len;
876 }
877 break;
878
879 case MRB_TT_HASH:
880 children += mrb_gc_mark_iv_size(mrb, (struct RObject*)obj);
881 children += mrb_gc_mark_hash_size(mrb, (struct RHash*)obj);
882 break;
883
884 case MRB_TT_PROC:
885 case MRB_TT_RANGE:
886 children+=2;
887 break;
888
889 default:
890 break;
891 }
892 return children;
893}
894
895
896static void
897gc_mark_gray_list(mrb_state *mrb, mrb_gc *gc) {
898 while (gc->gray_list) {
899 if (is_gray(gc->gray_list))
900 gc_mark_children(mrb, gc, gc->gray_list);
901 else
902 gc->gray_list = gc->gray_list->gcnext;
903 }
904}
905
906
907static size_t
908incremental_marking_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
909{
910 size_t tried_marks = 0;
911
912 while (gc->gray_list && tried_marks < limit) {
913 tried_marks += gc_gray_mark(mrb, gc, gc->gray_list);
914 }
915
916 return tried_marks;
917}
918
919static void
920final_marking_phase(mrb_state *mrb, mrb_gc *gc)
921{
922 mark_context_stack(mrb, mrb->root_c);
923 gc_mark_gray_list(mrb, gc);
924 mrb_assert(gc->gray_list == NULL);
925 gc->gray_list = gc->atomic_gray_list;
926 gc->atomic_gray_list = NULL;
927 gc_mark_gray_list(mrb, gc);
928 mrb_assert(gc->gray_list == NULL);
929}
930
931static void
932prepare_incremental_sweep(mrb_state *mrb, mrb_gc *gc)
933{
934 gc->state = MRB_GC_STATE_SWEEP;
935 gc->sweeps = gc->heaps;
936 gc->live_after_mark = gc->live;
937}
938
939static size_t
940incremental_sweep_phase(mrb_state *mrb, mrb_gc *gc, size_t limit)
941{
942 mrb_heap_page *page = gc->sweeps;
943 size_t tried_sweep = 0;
944
945 while (page && (tried_sweep < limit)) {
946 RVALUE *p = objects(page);
947 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
948 size_t freed = 0;
949 mrb_bool dead_slot = TRUE;
950 mrb_bool full = (page->freelist == NULL);
951
952 if (is_minor_gc(gc) && page->old) {
953 /* skip a slot which doesn't contain any young object */
954 p = e;
955 dead_slot = FALSE;
956 }
957 while (p<e) {
958 if (is_dead(gc, &p->as.basic)) {
959 if (p->as.basic.tt != MRB_TT_FREE) {
960 obj_free(mrb, &p->as.basic);
961 p->as.free.next = page->freelist;
962 page->freelist = (struct RBasic*)p;
963 freed++;
964 }
965 }
966 else {
967 if (!is_generational(gc))
968 paint_partial_white(gc, &p->as.basic); /* next gc target */
969 dead_slot = 0;
970 }
971 p++;
972 }
973
974 /* free dead slot */
975 if (dead_slot && freed < MRB_HEAP_PAGE_SIZE) {
976 mrb_heap_page *next = page->next;
977
978 unlink_heap_page(gc, page);
979 unlink_free_heap_page(gc, page);
980 mrb_free(mrb, page);
981 page = next;
982 }
983 else {
984 if (full && freed > 0) {
985 link_free_heap_page(gc, page);
986 }
987 if (page->freelist == NULL && is_minor_gc(gc))
988 page->old = TRUE;
989 else
990 page->old = FALSE;
991 page = page->next;
992 }
993 tried_sweep += MRB_HEAP_PAGE_SIZE;
994 gc->live -= freed;
995 gc->live_after_mark -= freed;
996 }
997 gc->sweeps = page;
998 return tried_sweep;
999}
1000
1001static size_t
1002incremental_gc(mrb_state *mrb, mrb_gc *gc, size_t limit)
1003{
1004 switch (gc->state) {
1005 case MRB_GC_STATE_ROOT:
1006 root_scan_phase(mrb, gc);
1007 gc->state = MRB_GC_STATE_MARK;
1008 flip_white_part(gc);
1009 return 0;
1010 case MRB_GC_STATE_MARK:
1011 if (gc->gray_list) {
1012 return incremental_marking_phase(mrb, gc, limit);
1013 }
1014 else {
1015 final_marking_phase(mrb, gc);
1016 prepare_incremental_sweep(mrb, gc);
1017 return 0;
1018 }
1019 case MRB_GC_STATE_SWEEP: {
1020 size_t tried_sweep = 0;
1021 tried_sweep = incremental_sweep_phase(mrb, gc, limit);
1022 if (tried_sweep == 0)
1023 gc->state = MRB_GC_STATE_ROOT;
1024 return tried_sweep;
1025 }
1026 default:
1027 /* unknown state */
1028 mrb_assert(0);
1029 return 0;
1030 }
1031}
1032
1033static void
1034incremental_gc_until(mrb_state *mrb, mrb_gc *gc, mrb_gc_state to_state)
1035{
1036 do {
1037 incremental_gc(mrb, gc, SIZE_MAX);
1038 } while (gc->state != to_state);
1039}
1040
1041static void
1042incremental_gc_step(mrb_state *mrb, mrb_gc *gc)
1043{
1044 size_t limit = 0, result = 0;
1045 limit = (GC_STEP_SIZE/100) * gc->step_ratio;
1046 while (result < limit) {
1047 result += incremental_gc(mrb, gc, limit);
1048 if (gc->state == MRB_GC_STATE_ROOT)
1049 break;
1050 }
1051
1052 gc->threshold = gc->live + GC_STEP_SIZE;
1053}
1054
1055static void
1056clear_all_old(mrb_state *mrb, mrb_gc *gc)
1057{
1058 mrb_bool origin_mode = gc->generational;
1059
1060 mrb_assert(is_generational(gc));
1061 if (is_major_gc(gc)) {
1062 /* finish the half baked GC */
1063 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1064 }
1065
1066 /* Sweep the dead objects, then reset all the live objects
1067 * (including all the old objects, of course) to white. */
1068 gc->generational = FALSE;
1069 prepare_incremental_sweep(mrb, gc);
1070 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1071 gc->generational = origin_mode;
1072
1073 /* The gray objects have already been painted as white */
1074 gc->atomic_gray_list = gc->gray_list = NULL;
1075}
1076
1077MRB_API void
1078mrb_incremental_gc(mrb_state *mrb)
1079{
1080 mrb_gc *gc = &mrb->gc;
1081
1082 if (gc->disabled) return;
1083
1084 GC_INVOKE_TIME_REPORT("mrb_incremental_gc()");
1085 GC_TIME_START;
1086
1087 if (is_minor_gc(gc)) {
1088 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1089 }
1090 else {
1091 incremental_gc_step(mrb, gc);
1092 }
1093
1094 if (gc->state == MRB_GC_STATE_ROOT) {
1095 mrb_assert(gc->live >= gc->live_after_mark);
1096 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1097 if (gc->threshold < GC_STEP_SIZE) {
1098 gc->threshold = GC_STEP_SIZE;
1099 }
1100
1101 if (is_major_gc(gc)) {
1102 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1103 gc->full = FALSE;
1104 }
1105 else if (is_minor_gc(gc)) {
1106 if (gc->live > gc->majorgc_old_threshold) {
1107 clear_all_old(mrb, gc);
1108 gc->full = TRUE;
1109 }
1110 }
1111 }
1112
1113 GC_TIME_STOP_AND_REPORT;
1114}
1115
1116/* Perform a full gc cycle */
1117MRB_API void
1118mrb_full_gc(mrb_state *mrb)
1119{
1120 mrb_gc *gc = &mrb->gc;
1121
1122 if (gc->disabled) return;
1123
1124 GC_INVOKE_TIME_REPORT("mrb_full_gc()");
1125 GC_TIME_START;
1126
1127 if (is_generational(gc)) {
1128 /* clear all the old objects back to young */
1129 clear_all_old(mrb, gc);
1130 gc->full = TRUE;
1131 }
1132 else if (gc->state != MRB_GC_STATE_ROOT) {
1133 /* finish half baked GC cycle */
1134 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1135 }
1136
1137 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1138 gc->threshold = (gc->live_after_mark/100) * gc->interval_ratio;
1139
1140 if (is_generational(gc)) {
1141 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1142 gc->full = FALSE;
1143 }
1144
1145 GC_TIME_STOP_AND_REPORT;
1146}
1147
1148MRB_API void
1149mrb_garbage_collect(mrb_state *mrb)
1150{
1151 mrb_full_gc(mrb);
1152}
1153
1154MRB_API int
1155mrb_gc_arena_save(mrb_state *mrb)
1156{
1157 return mrb->gc.arena_idx;
1158}
1159
1160MRB_API void
1161mrb_gc_arena_restore(mrb_state *mrb, int idx)
1162{
1163 mrb_gc *gc = &mrb->gc;
1164
1165#ifndef MRB_GC_FIXED_ARENA
1166 int capa = gc->arena_capa;
1167
1168 if (idx < capa / 2) {
1169 capa = (int)(capa * 0.66);
1170 if (capa < MRB_GC_ARENA_SIZE) {
1171 capa = MRB_GC_ARENA_SIZE;
1172 }
1173 if (capa != gc->arena_capa) {
1174 gc->arena = (struct RBasic**)mrb_realloc(mrb, gc->arena, sizeof(struct RBasic*)*capa);
1175 gc->arena_capa = capa;
1176 }
1177 }
1178#endif
1179 gc->arena_idx = idx;
1180}
1181
1182/*
1183 * Field write barrier
1184 * Paint obj(Black) -> value(White) to obj(Black) -> value(Gray).
1185 */
1186
1187MRB_API void
1188mrb_field_write_barrier(mrb_state *mrb, struct RBasic *obj, struct RBasic *value)
1189{
1190 mrb_gc *gc = &mrb->gc;
1191
1192 if (!is_black(obj)) return;
1193 if (!is_white(value)) return;
1194
1195 mrb_assert(gc->state == MRB_GC_STATE_MARK || (!is_dead(gc, value) && !is_dead(gc, obj)));
1196 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1197
1198 if (is_generational(gc) || gc->state == MRB_GC_STATE_MARK) {
1199 add_gray_list(mrb, gc, value);
1200 }
1201 else {
1202 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1203 paint_partial_white(gc, obj); /* for never write barriers */
1204 }
1205}
1206
1207/*
1208 * Write barrier
1209 * Paint obj(Black) to obj(Gray).
1210 *
1211 * The object that is painted gray will be traversed atomically in final
1212 * mark phase. So you use this write barrier if it's frequency written spot.
1213 * e.g. Set element on Array.
1214 */
1215
1216MRB_API void
1217mrb_write_barrier(mrb_state *mrb, struct RBasic *obj)
1218{
1219 mrb_gc *gc = &mrb->gc;
1220
1221 if (!is_black(obj)) return;
1222
1223 mrb_assert(!is_dead(gc, obj));
1224 mrb_assert(is_generational(gc) || gc->state != MRB_GC_STATE_ROOT);
1225 paint_gray(obj);
1226 obj->gcnext = gc->atomic_gray_list;
1227 gc->atomic_gray_list = obj;
1228}
1229
1230/*
1231 * call-seq:
1232 * GC.start -> nil
1233 *
1234 * Initiates full garbage collection.
1235 *
1236 */
1237
1238static mrb_value
1239gc_start(mrb_state *mrb, mrb_value obj)
1240{
1241 mrb_full_gc(mrb);
1242 return mrb_nil_value();
1243}
1244
1245/*
1246 * call-seq:
1247 * GC.enable -> true or false
1248 *
1249 * Enables garbage collection, returning <code>true</code> if garbage
1250 * collection was previously disabled.
1251 *
1252 * GC.disable #=> false
1253 * GC.enable #=> true
1254 * GC.enable #=> false
1255 *
1256 */
1257
1258static mrb_value
1259gc_enable(mrb_state *mrb, mrb_value obj)
1260{
1261 mrb_bool old = mrb->gc.disabled;
1262
1263 mrb->gc.disabled = FALSE;
1264
1265 return mrb_bool_value(old);
1266}
1267
1268/*
1269 * call-seq:
1270 * GC.disable -> true or false
1271 *
1272 * Disables garbage collection, returning <code>true</code> if garbage
1273 * collection was already disabled.
1274 *
1275 * GC.disable #=> false
1276 * GC.disable #=> true
1277 *
1278 */
1279
1280static mrb_value
1281gc_disable(mrb_state *mrb, mrb_value obj)
1282{
1283 mrb_bool old = mrb->gc.disabled;
1284
1285 mrb->gc.disabled = TRUE;
1286
1287 return mrb_bool_value(old);
1288}
1289
1290/*
1291 * call-seq:
1292 * GC.interval_ratio -> fixnum
1293 *
1294 * Returns ratio of GC interval. Default value is 200(%).
1295 *
1296 */
1297
1298static mrb_value
1299gc_interval_ratio_get(mrb_state *mrb, mrb_value obj)
1300{
1301 return mrb_fixnum_value(mrb->gc.interval_ratio);
1302}
1303
1304/*
1305 * call-seq:
1306 * GC.interval_ratio = fixnum -> nil
1307 *
1308 * Updates ratio of GC interval. Default value is 200(%).
1309 * GC start as soon as after end all step of GC if you set 100(%).
1310 *
1311 */
1312
1313static mrb_value
1314gc_interval_ratio_set(mrb_state *mrb, mrb_value obj)
1315{
1316 mrb_int ratio;
1317
1318 mrb_get_args(mrb, "i", &ratio);
1319 mrb->gc.interval_ratio = ratio;
1320 return mrb_nil_value();
1321}
1322
1323/*
1324 * call-seq:
1325 * GC.step_ratio -> fixnum
1326 *
1327 * Returns step span ratio of Incremental GC. Default value is 200(%).
1328 *
1329 */
1330
1331static mrb_value
1332gc_step_ratio_get(mrb_state *mrb, mrb_value obj)
1333{
1334 return mrb_fixnum_value(mrb->gc.step_ratio);
1335}
1336
1337/*
1338 * call-seq:
1339 * GC.step_ratio = fixnum -> nil
1340 *
1341 * Updates step span ratio of Incremental GC. Default value is 200(%).
1342 * 1 step of incrementalGC becomes long if a rate is big.
1343 *
1344 */
1345
1346static mrb_value
1347gc_step_ratio_set(mrb_state *mrb, mrb_value obj)
1348{
1349 mrb_int ratio;
1350
1351 mrb_get_args(mrb, "i", &ratio);
1352 mrb->gc.step_ratio = ratio;
1353 return mrb_nil_value();
1354}
1355
1356static void
1357change_gen_gc_mode(mrb_state *mrb, mrb_gc *gc, mrb_bool enable)
1358{
1359 if (is_generational(gc) && !enable) {
1360 clear_all_old(mrb, gc);
1361 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1362 gc->full = FALSE;
1363 }
1364 else if (!is_generational(gc) && enable) {
1365 incremental_gc_until(mrb, gc, MRB_GC_STATE_ROOT);
1366 gc->majorgc_old_threshold = gc->live_after_mark/100 * DEFAULT_MAJOR_GC_INC_RATIO;
1367 gc->full = FALSE;
1368 }
1369 gc->generational = enable;
1370}
1371
1372/*
1373 * call-seq:
1374 * GC.generational_mode -> true or false
1375 *
1376 * Returns generational or normal gc mode.
1377 *
1378 */
1379
1380static mrb_value
1381gc_generational_mode_get(mrb_state *mrb, mrb_value self)
1382{
1383 return mrb_bool_value(mrb->gc.generational);
1384}
1385
1386/*
1387 * call-seq:
1388 * GC.generational_mode = true or false -> true or false
1389 *
1390 * Changes to generational or normal gc mode.
1391 *
1392 */
1393
1394static mrb_value
1395gc_generational_mode_set(mrb_state *mrb, mrb_value self)
1396{
1397 mrb_bool enable;
1398
1399 mrb_get_args(mrb, "b", &enable);
1400 if (mrb->gc.generational != enable)
1401 change_gen_gc_mode(mrb, &mrb->gc, enable);
1402
1403 return mrb_bool_value(enable);
1404}
1405
1406
1407static void
1408gc_each_objects(mrb_state *mrb, mrb_gc *gc, mrb_each_object_callback *callback, void *data)
1409{
1410 mrb_heap_page* page = gc->heaps;
1411
1412 while (page != NULL) {
1413 RVALUE *p, *pend;
1414
1415 p = objects(page);
1416 pend = p + MRB_HEAP_PAGE_SIZE;
1417 for (;p < pend; p++) {
1418 (*callback)(mrb, &p->as.basic, data);
1419 }
1420
1421 page = page->next;
1422 }
1423}
1424
1425void
1426mrb_objspace_each_objects(mrb_state *mrb, mrb_each_object_callback *callback, void *data)
1427{
1428 gc_each_objects(mrb, &mrb->gc, callback, data);
1429}
1430
1431#ifdef GC_TEST
1432#ifdef GC_DEBUG
1433static mrb_value gc_test(mrb_state *, mrb_value);
1434#endif
1435#endif
1436
1437void
1438mrb_init_gc(mrb_state *mrb)
1439{
1440 struct RClass *gc;
1441
1442 gc = mrb_define_module(mrb, "GC");
1443
1444 mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE());
1445 mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE());
1446 mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE());
1447 mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE());
1448 mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1));
1449 mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE());
1450 mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1));
1451 mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1));
1452 mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE());
1453#ifdef GC_TEST
1454#ifdef GC_DEBUG
1455 mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE());
1456#endif
1457#endif
1458}
1459
1460#ifdef GC_TEST
1461#ifdef GC_DEBUG
1462void
1463test_mrb_field_write_barrier(void)
1464{
1465 mrb_state *mrb = mrb_open();
1466 struct RBasic *obj, *value;
1467 mrb_gc *gc = &mrb->gc;
1468
1469 puts("test_mrb_field_write_barrier");
1470 gc->generational = FALSE;
1471 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1472 value = mrb_basic_ptr(mrb_str_new_lit(mrb, "value"));
1473 paint_black(obj);
1474 paint_partial_white(gc, value);
1475
1476
1477 puts(" in MRB_GC_STATE_MARK");
1478 gc->state = MRB_GC_STATE_MARK;
1479 mrb_field_write_barrier(mrb, obj, value);
1480
1481 mrb_assert(is_gray(value));
1482
1483
1484 puts(" in MRB_GC_STATE_SWEEP");
1485 paint_partial_white(gc, value);
1486 gc->state = MRB_GC_STATE_SWEEP;
1487 mrb_field_write_barrier(mrb, obj, value);
1488
1489 mrb_assert(obj->color & gc->current_white_part);
1490 mrb_assert(value->color & gc->current_white_part);
1491
1492
1493 puts(" fail with black");
1494 gc->state = MRB_GC_STATE_MARK;
1495 paint_white(obj);
1496 paint_partial_white(gc, value);
1497 mrb_field_write_barrier(mrb, obj, value);
1498
1499 mrb_assert(obj->color & gc->current_white_part);
1500
1501
1502 puts(" fail with gray");
1503 gc->state = MRB_GC_STATE_MARK;
1504 paint_black(obj);
1505 paint_gray(value);
1506 mrb_field_write_barrier(mrb, obj, value);
1507
1508 mrb_assert(is_gray(value));
1509
1510
1511 {
1512 puts("test_mrb_field_write_barrier_value");
1513 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1514 mrb_value value = mrb_str_new_lit(mrb, "value");
1515 paint_black(obj);
1516 paint_partial_white(gc, mrb_basic_ptr(value));
1517
1518 gc->state = MRB_GC_STATE_MARK;
1519 mrb_field_write_barrier_value(mrb, obj, value);
1520
1521 mrb_assert(is_gray(mrb_basic_ptr(value)));
1522 }
1523
1524 mrb_close(mrb);
1525}
1526
1527void
1528test_mrb_write_barrier(void)
1529{
1530 mrb_state *mrb = mrb_open();
1531 struct RBasic *obj;
1532 mrb_gc *gc = &mrb->gc;
1533
1534 puts("test_mrb_write_barrier");
1535 obj = mrb_basic_ptr(mrb_ary_new(mrb));
1536 paint_black(obj);
1537
1538 puts(" in MRB_GC_STATE_MARK");
1539 gc->state = MRB_GC_STATE_MARK;
1540 mrb_write_barrier(mrb, obj);
1541
1542 mrb_assert(is_gray(obj));
1543 mrb_assert(gc->atomic_gray_list == obj);
1544
1545
1546 puts(" fail with gray");
1547 paint_gray(obj);
1548 mrb_write_barrier(mrb, obj);
1549
1550 mrb_assert(is_gray(obj));
1551
1552 mrb_close(mrb);
1553}
1554
1555void
1556test_add_gray_list(void)
1557{
1558 mrb_state *mrb = mrb_open();
1559 struct RBasic *obj1, *obj2;
1560 mrb_gc *gc = &mrb->gc;
1561
1562 puts("test_add_gray_list");
1563 change_gen_gc_mode(mrb, gc, FALSE);
1564 mrb_assert(gc->gray_list == NULL);
1565 obj1 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1566 add_gray_list(mrb, gc, obj1);
1567 mrb_assert(gc->gray_list == obj1);
1568 mrb_assert(is_gray(obj1));
1569
1570 obj2 = mrb_basic_ptr(mrb_str_new_lit(mrb, "test"));
1571 add_gray_list(mrb, gc, obj2);
1572 mrb_assert(gc->gray_list == obj2);
1573 mrb_assert(gc->gray_list->gcnext == obj1);
1574 mrb_assert(is_gray(obj2));
1575
1576 mrb_close(mrb);
1577}
1578
1579void
1580test_gc_gray_mark(void)
1581{
1582 mrb_state *mrb = mrb_open();
1583 mrb_value obj_v, value_v;
1584 struct RBasic *obj;
1585 size_t gray_num = 0;
1586 mrb_gc *gc = &mrb->gc;
1587
1588 puts("test_gc_gray_mark");
1589
1590 puts(" in MRB_TT_CLASS");
1591 obj = (struct RBasic*)mrb->object_class;
1592 paint_gray(obj);
1593 gray_num = gc_gray_mark(mrb, gc, obj);
1594 mrb_assert(is_black(obj));
1595 mrb_assert(gray_num > 1);
1596
1597 puts(" in MRB_TT_ARRAY");
1598 obj_v = mrb_ary_new(mrb);
1599 value_v = mrb_str_new_lit(mrb, "test");
1600 paint_gray(mrb_basic_ptr(obj_v));
1601 paint_partial_white(gc, mrb_basic_ptr(value_v));
1602 mrb_ary_push(mrb, obj_v, value_v);
1603 gray_num = gc_gray_mark(mrb, gc, mrb_basic_ptr(obj_v));
1604 mrb_assert(is_black(mrb_basic_ptr(obj_v)));
1605 mrb_assert(is_gray(mrb_basic_ptr(value_v)));
1606 mrb_assert(gray_num == 1);
1607
1608 mrb_close(mrb);
1609}
1610
1611void
1612test_incremental_gc(void)
1613{
1614 mrb_state *mrb = mrb_open();
1615 size_t max = ~0, live = 0, total = 0, freed = 0;
1616 RVALUE *free;
1617 mrb_heap_page *page;
1618 mrb_gc *gc = &mrb->gc;
1619
1620 puts("test_incremental_gc");
1621 change_gen_gc_mode(mrb, gc, FALSE);
1622
1623 puts(" in mrb_full_gc");
1624 mrb_full_gc(mrb);
1625
1626 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1627 puts(" in MRB_GC_STATE_ROOT");
1628 incremental_gc(mrb, gc, max);
1629 mrb_assert(gc->state == MRB_GC_STATE_MARK);
1630 puts(" in MRB_GC_STATE_MARK");
1631 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1632 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1633
1634 puts(" in MRB_GC_STATE_SWEEP");
1635 page = gc->heaps;
1636 while (page) {
1637 RVALUE *p = objects(page);
1638 RVALUE *e = p + MRB_HEAP_PAGE_SIZE;
1639 while (p<e) {
1640 if (is_black(&p->as.basic)) {
1641 live++;
1642 }
1643 if (is_gray(&p->as.basic) && !is_dead(gc, &p->as.basic)) {
1644 printf("%p\n", &p->as.basic);
1645 }
1646 p++;
1647 }
1648 page = page->next;
1649 total += MRB_HEAP_PAGE_SIZE;
1650 }
1651
1652 mrb_assert(gc->gray_list == NULL);
1653
1654 incremental_gc(mrb, gc, max);
1655 mrb_assert(gc->state == MRB_GC_STATE_SWEEP);
1656
1657 incremental_gc(mrb, gc, max);
1658 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1659
1660 free = (RVALUE*)gc->heaps->freelist;
1661 while (free) {
1662 freed++;
1663 free = (RVALUE*)free->as.free.next;
1664 }
1665
1666 mrb_assert(gc->live == live);
1667 mrb_assert(gc->live == total-freed);
1668
1669 puts("test_incremental_gc(gen)");
1670 incremental_gc_until(mrb, gc, MRB_GC_STATE_SWEEP);
1671 change_gen_gc_mode(mrb, gc, TRUE);
1672
1673 mrb_assert(gc->full == FALSE);
1674 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1675
1676 puts(" in minor");
1677 mrb_assert(is_minor_gc(gc));
1678 mrb_assert(gc->majorgc_old_threshold > 0);
1679 gc->majorgc_old_threshold = 0;
1680 mrb_incremental_gc(mrb);
1681 mrb_assert(gc->full == TRUE);
1682 mrb_assert(gc->state == MRB_GC_STATE_ROOT);
1683
1684 puts(" in major");
1685 mrb_assert(is_major_gc(gc));
1686 do {
1687 mrb_incremental_gc(mrb);
1688 } while (gc->state != MRB_GC_STATE_ROOT);
1689 mrb_assert(gc->full == FALSE);
1690
1691 mrb_close(mrb);
1692}
1693
1694void
1695test_incremental_sweep_phase(void)
1696{
1697 mrb_state *mrb = mrb_open();
1698 mrb_gc *gc = &mrb->gc;
1699
1700 puts("test_incremental_sweep_phase");
1701
1702 add_heap(mrb, gc);
1703 gc->sweeps = gc->heaps;
1704
1705 mrb_assert(gc->heaps->next->next == NULL);
1706 mrb_assert(gc->free_heaps->next->next == NULL);
1707 incremental_sweep_phase(mrb, gc, MRB_HEAP_PAGE_SIZE * 3);
1708
1709 mrb_assert(gc->heaps->next == NULL);
1710 mrb_assert(gc->heaps == gc->free_heaps);
1711
1712 mrb_close(mrb);
1713}
1714
1715static mrb_value
1716gc_test(mrb_state *mrb, mrb_value self)
1717{
1718 test_mrb_field_write_barrier();
1719 test_mrb_write_barrier();
1720 test_add_gray_list();
1721 test_gc_gray_mark();
1722 test_incremental_gc();
1723 test_incremental_sweep_phase();
1724 return mrb_nil_value();
1725}
1726#endif /* GC_DEBUG */
1727#endif /* GC_TEST */
Note: See TracBrowser for help on using the repository browser.