source: EcnlProtoTool/trunk/tcc-0.9.26/lib/bcheck.c@ 321

Last change on this file since 321 was 321, checked in by coas-nagasima, 7 years ago

文字コードを設定

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 24.2 KB
Line 
1/*
2 * Tiny C Memory and bounds checker
3 *
4 * Copyright (c) 2002 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdlib.h>
21#include <stdio.h>
22#include <stdarg.h>
23#include <string.h>
24#if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) \
25 && !defined(__DragonFly__) && !defined(__OpenBSD__)
26#include <malloc.h>
27#endif
28#if !defined(_WIN32)
29#include <unistd.h>
30#endif
31
32//#define BOUND_DEBUG
33
34/* define so that bound array is static (faster, but use memory if
35 bound checking not used) */
36//#define BOUND_STATIC
37
38/* use malloc hooks. Currently the code cannot be reliable if no hooks */
39#define CONFIG_TCC_MALLOC_HOOKS
40#define HAVE_MEMALIGN
41
42#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
43 || defined(__DragonFly__) || defined(__dietlibc__) \
44 || defined(__UCLIBC__) || defined(__OpenBSD__) \
45 || defined(_WIN32) || defined(TCC_UCLIBC)
46#warning Bound checking does not support malloc (etc.) in this environment.
47#undef CONFIG_TCC_MALLOC_HOOKS
48#undef HAVE_MEMALIGN
49#endif
50
51#define BOUND_T1_BITS 13
52#define BOUND_T2_BITS 11
53#define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
54
55#define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
56#define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
57#define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
58#define BOUND_E_BITS 4
59
60#define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
61#define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
62
63
64/* this pointer is generated when bound check is incorrect */
65#define INVALID_POINTER ((void *)(-2))
66/* size of an empty region */
67#define EMPTY_SIZE 0xffffffff
68/* size of an invalid region */
69#define INVALID_SIZE 0
70
71typedef struct BoundEntry {
72 unsigned long start;
73 unsigned long size;
74 struct BoundEntry *next;
75 unsigned long is_invalid; /* true if pointers outside region are invalid */
76} BoundEntry;
77
78/* external interface */
79void __bound_init(void);
80void __bound_new_region(void *p, unsigned long size);
81int __bound_delete_region(void *p);
82
83#define FASTCALL __attribute__((regparm(3)))
84
85void *__bound_malloc(size_t size, const void *caller);
86void *__bound_memalign(size_t size, size_t align, const void *caller);
87void __bound_free(void *ptr, const void *caller);
88void *__bound_realloc(void *ptr, size_t size, const void *caller);
89static void *libc_malloc(size_t size);
90static void libc_free(void *ptr);
91static void install_malloc_hooks(void);
92static void restore_malloc_hooks(void);
93
94#ifdef CONFIG_TCC_MALLOC_HOOKS
95static void *saved_malloc_hook;
96static void *saved_free_hook;
97static void *saved_realloc_hook;
98static void *saved_memalign_hook;
99#endif
100
101/* TCC definitions */
102extern char __bounds_start; /* start of static bounds table */
103/* error message, just for TCC */
104const char *__bound_error_msg;
105
106/* runtime error output */
107extern void rt_error(unsigned long pc, const char *fmt, ...);
108
109#ifdef BOUND_STATIC
110static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
111#else
112static BoundEntry **__bound_t1; /* page table */
113#endif
114static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
115static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
116
117static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
118{
119 unsigned long addr, tmp;
120 BoundEntry *e;
121
122 e = e1;
123 while (e != NULL) {
124 addr = (unsigned long)p;
125 addr -= e->start;
126 if (addr <= e->size) {
127 /* put region at the head */
128 tmp = e1->start;
129 e1->start = e->start;
130 e->start = tmp;
131 tmp = e1->size;
132 e1->size = e->size;
133 e->size = tmp;
134 return e1;
135 }
136 e = e->next;
137 }
138 /* no entry found: return empty entry or invalid entry */
139 if (e1->is_invalid)
140 return __bound_invalid_t2;
141 else
142 return __bound_empty_t2;
143}
144
145/* print a bound error message */
146static void bound_error(const char *fmt, ...)
147{
148 __bound_error_msg = fmt;
149 *(int *)0 = 0; /* force a runtime error */
150}
151
152static void bound_alloc_error(void)
153{
154 bound_error("not enough memory for bound checking code");
155}
156
157/* return '(p + offset)' for pointer arithmetic (a pointer can reach
158 the end of a region in this case */
159void * FASTCALL __bound_ptr_add(void *p, int offset)
160{
161 unsigned long addr = (unsigned long)p;
162 BoundEntry *e;
163#if defined(BOUND_DEBUG)
164 printf("add: 0x%x %d\n", (int)p, offset);
165#endif
166
167 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
168 e = (BoundEntry *)((char *)e +
169 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
170 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
171 addr -= e->start;
172 if (addr > e->size) {
173 e = __bound_find_region(e, p);
174 addr = (unsigned long)p - e->start;
175 }
176 addr += offset;
177 if (addr > e->size)
178 return INVALID_POINTER; /* return an invalid pointer */
179 return p + offset;
180}
181
182/* return '(p + offset)' for pointer indirection (the resulting must
183 be strictly inside the region */
184#define BOUND_PTR_INDIR(dsize) \
185void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
186{ \
187 unsigned long addr = (unsigned long)p; \
188 BoundEntry *e; \
189 \
190 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
191 e = (BoundEntry *)((char *)e + \
192 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
193 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
194 addr -= e->start; \
195 if (addr > e->size) { \
196 e = __bound_find_region(e, p); \
197 addr = (unsigned long)p - e->start; \
198 } \
199 addr += offset + dsize; \
200 if (addr > e->size) \
201 return INVALID_POINTER; /* return an invalid pointer */ \
202 return p + offset; \
203}
204
205BOUND_PTR_INDIR(1)
206BOUND_PTR_INDIR(2)
207BOUND_PTR_INDIR(4)
208BOUND_PTR_INDIR(8)
209BOUND_PTR_INDIR(12)
210BOUND_PTR_INDIR(16)
211
212/* return the frame pointer of the caller */
213#define GET_CALLER_FP(fp)\
214{\
215 fp = (unsigned long)__builtin_frame_address(1);\
216}
217
218/* called when entering a function to add all the local regions */
219void FASTCALL __bound_local_new(void *p1)
220{
221 unsigned long addr, size, fp, *p = p1;
222 GET_CALLER_FP(fp);
223 for(;;) {
224 addr = p[0];
225 if (addr == 0)
226 break;
227 addr += fp;
228 size = p[1];
229 p += 2;
230 __bound_new_region((void *)addr, size);
231 }
232}
233
234/* called when leaving a function to delete all the local regions */
235void FASTCALL __bound_local_delete(void *p1)
236{
237 unsigned long addr, fp, *p = p1;
238 GET_CALLER_FP(fp);
239 for(;;) {
240 addr = p[0];
241 if (addr == 0)
242 break;
243 addr += fp;
244 p += 2;
245 __bound_delete_region((void *)addr);
246 }
247}
248
249static BoundEntry *__bound_new_page(void)
250{
251 BoundEntry *page;
252 int i;
253
254 page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
255 if (!page)
256 bound_alloc_error();
257 for(i=0;i<BOUND_T2_SIZE;i++) {
258 /* put empty entries */
259 page[i].start = 0;
260 page[i].size = EMPTY_SIZE;
261 page[i].next = NULL;
262 page[i].is_invalid = 0;
263 }
264 return page;
265}
266
267/* currently we use malloc(). Should use bound_new_page() */
268static BoundEntry *bound_new_entry(void)
269{
270 BoundEntry *e;
271 e = libc_malloc(sizeof(BoundEntry));
272 return e;
273}
274
275static void bound_free_entry(BoundEntry *e)
276{
277 libc_free(e);
278}
279
280static inline BoundEntry *get_page(int index)
281{
282 BoundEntry *page;
283 page = __bound_t1[index];
284 if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
285 /* create a new page if necessary */
286 page = __bound_new_page();
287 __bound_t1[index] = page;
288 }
289 return page;
290}
291
292/* mark a region as being invalid (can only be used during init) */
293static void mark_invalid(unsigned long addr, unsigned long size)
294{
295 unsigned long start, end;
296 BoundEntry *page;
297 int t1_start, t1_end, i, j, t2_start, t2_end;
298
299 start = addr;
300 end = addr + size;
301
302 t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
303 if (end != 0)
304 t2_end = end >> BOUND_T3_BITS;
305 else
306 t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
307
308#if 0
309 printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
310#endif
311
312 /* first we handle full pages */
313 t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
314 t1_end = t2_end >> BOUND_T2_BITS;
315
316 i = t2_start & (BOUND_T2_SIZE - 1);
317 j = t2_end & (BOUND_T2_SIZE - 1);
318
319 if (t1_start == t1_end) {
320 page = get_page(t2_start >> BOUND_T2_BITS);
321 for(; i < j; i++) {
322 page[i].size = INVALID_SIZE;
323 page[i].is_invalid = 1;
324 }
325 } else {
326 if (i > 0) {
327 page = get_page(t2_start >> BOUND_T2_BITS);
328 for(; i < BOUND_T2_SIZE; i++) {
329 page[i].size = INVALID_SIZE;
330 page[i].is_invalid = 1;
331 }
332 }
333 for(i = t1_start; i < t1_end; i++) {
334 __bound_t1[i] = __bound_invalid_t2;
335 }
336 if (j != 0) {
337 page = get_page(t1_end);
338 for(i = 0; i < j; i++) {
339 page[i].size = INVALID_SIZE;
340 page[i].is_invalid = 1;
341 }
342 }
343 }
344}
345
346void __bound_init(void)
347{
348 int i;
349 BoundEntry *page;
350 unsigned long start, size;
351 int *p;
352
353 /* save malloc hooks and install bound check hooks */
354 install_malloc_hooks();
355
356#ifndef BOUND_STATIC
357 __bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
358 if (!__bound_t1)
359 bound_alloc_error();
360#endif
361 __bound_empty_t2 = __bound_new_page();
362 for(i=0;i<BOUND_T1_SIZE;i++) {
363 __bound_t1[i] = __bound_empty_t2;
364 }
365
366 page = __bound_new_page();
367 for(i=0;i<BOUND_T2_SIZE;i++) {
368 /* put invalid entries */
369 page[i].start = 0;
370 page[i].size = INVALID_SIZE;
371 page[i].next = NULL;
372 page[i].is_invalid = 1;
373 }
374 __bound_invalid_t2 = page;
375
376 /* invalid pointer zone */
377 start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
378 size = BOUND_T23_SIZE;
379 mark_invalid(start, size);
380
381#if defined(CONFIG_TCC_MALLOC_HOOKS)
382 /* malloc zone is also marked invalid. can only use that with
383 * hooks because all libs should use the same malloc. The solution
384 * would be to build a new malloc for tcc.
385 *
386 * usually heap (= malloc zone) comes right after bss, i.e. after _end, but
387 * not always - either if we are running from under `tcc -b -run`, or if
388 * address space randomization is turned on(a), heap start will be separated
389 * from bss end.
390 *
391 * So sbrk(0) will be a good approximation for start_brk:
392 *
393 * - if we are a separately compiled program, __bound_init() runs early,
394 * and sbrk(0) should be equal or very near to start_brk(b) (in case other
395 * constructors malloc something), or
396 *
397 * - if we are running from under `tcc -b -run`, sbrk(0) will return
398 * start of heap portion which is under this program control, and not
399 * mark as invalid earlier allocated memory.
400 *
401 *
402 * (a) /proc/sys/kernel/randomize_va_space = 2, on Linux;
403 * usually turned on by default.
404 *
405 * (b) on Linux >= v3.3, the alternative is to read
406 * start_brk from /proc/self/stat
407 */
408 start = (unsigned long)sbrk(0);
409 size = 128 * 0x100000;
410 mark_invalid(start, size);
411#endif
412
413 /* add all static bound check values */
414 p = (int *)&__bounds_start;
415 while (p[0] != 0) {
416 __bound_new_region((void *)p[0], p[1]);
417 p += 2;
418 }
419}
420
421void __bound_exit(void)
422{
423 restore_malloc_hooks();
424}
425
426static inline void add_region(BoundEntry *e,
427 unsigned long start, unsigned long size)
428{
429 BoundEntry *e1;
430 if (e->start == 0) {
431 /* no region : add it */
432 e->start = start;
433 e->size = size;
434 } else {
435 /* already regions in the list: add it at the head */
436 e1 = bound_new_entry();
437 e1->start = e->start;
438 e1->size = e->size;
439 e1->next = e->next;
440 e->start = start;
441 e->size = size;
442 e->next = e1;
443 }
444}
445
446/* create a new region. It should not already exist in the region list */
447void __bound_new_region(void *p, unsigned long size)
448{
449 unsigned long start, end;
450 BoundEntry *page, *e, *e2;
451 int t1_start, t1_end, i, t2_start, t2_end;
452
453 start = (unsigned long)p;
454 end = start + size;
455 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
456 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
457
458 /* start */
459 page = get_page(t1_start);
460 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
461 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
462 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
463 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
464#ifdef BOUND_DEBUG
465 printf("new %lx %lx %x %x %x %x\n",
466 start, end, t1_start, t1_end, t2_start, t2_end);
467#endif
468
469 e = (BoundEntry *)((char *)page + t2_start);
470 add_region(e, start, size);
471
472 if (t1_end == t1_start) {
473 /* same ending page */
474 e2 = (BoundEntry *)((char *)page + t2_end);
475 if (e2 > e) {
476 e++;
477 for(;e<e2;e++) {
478 e->start = start;
479 e->size = size;
480 }
481 add_region(e, start, size);
482 }
483 } else {
484 /* mark until end of page */
485 e2 = page + BOUND_T2_SIZE;
486 e++;
487 for(;e<e2;e++) {
488 e->start = start;
489 e->size = size;
490 }
491 /* mark intermediate pages, if any */
492 for(i=t1_start+1;i<t1_end;i++) {
493 page = get_page(i);
494 e2 = page + BOUND_T2_SIZE;
495 for(e=page;e<e2;e++) {
496 e->start = start;
497 e->size = size;
498 }
499 }
500 /* last page */
501 page = get_page(t1_end);
502 e2 = (BoundEntry *)((char *)page + t2_end);
503 for(e=page;e<e2;e++) {
504 e->start = start;
505 e->size = size;
506 }
507 add_region(e, start, size);
508 }
509}
510
511/* delete a region */
512static inline void delete_region(BoundEntry *e,
513 void *p, unsigned long empty_size)
514{
515 unsigned long addr;
516 BoundEntry *e1;
517
518 addr = (unsigned long)p;
519 addr -= e->start;
520 if (addr <= e->size) {
521 /* region found is first one */
522 e1 = e->next;
523 if (e1 == NULL) {
524 /* no more region: mark it empty */
525 e->start = 0;
526 e->size = empty_size;
527 } else {
528 /* copy next region in head */
529 e->start = e1->start;
530 e->size = e1->size;
531 e->next = e1->next;
532 bound_free_entry(e1);
533 }
534 } else {
535 /* find the matching region */
536 for(;;) {
537 e1 = e;
538 e = e->next;
539 /* region not found: do nothing */
540 if (e == NULL)
541 break;
542 addr = (unsigned long)p - e->start;
543 if (addr <= e->size) {
544 /* found: remove entry */
545 e1->next = e->next;
546 bound_free_entry(e);
547 break;
548 }
549 }
550 }
551}
552
553/* WARNING: 'p' must be the starting point of the region. */
554/* return non zero if error */
555int __bound_delete_region(void *p)
556{
557 unsigned long start, end, addr, size, empty_size;
558 BoundEntry *page, *e, *e2;
559 int t1_start, t1_end, t2_start, t2_end, i;
560
561 start = (unsigned long)p;
562 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
563 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
564 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
565
566 /* find region size */
567 page = __bound_t1[t1_start];
568 e = (BoundEntry *)((char *)page + t2_start);
569 addr = start - e->start;
570 if (addr > e->size)
571 e = __bound_find_region(e, p);
572 /* test if invalid region */
573 if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
574 return -1;
575 /* compute the size we put in invalid regions */
576 if (e->is_invalid)
577 empty_size = INVALID_SIZE;
578 else
579 empty_size = EMPTY_SIZE;
580 size = e->size;
581 end = start + size;
582
583 /* now we can free each entry */
584 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
585 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
586 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
587
588 delete_region(e, p, empty_size);
589 if (t1_end == t1_start) {
590 /* same ending page */
591 e2 = (BoundEntry *)((char *)page + t2_end);
592 if (e2 > e) {
593 e++;
594 for(;e<e2;e++) {
595 e->start = 0;
596 e->size = empty_size;
597 }
598 delete_region(e, p, empty_size);
599 }
600 } else {
601 /* mark until end of page */
602 e2 = page + BOUND_T2_SIZE;
603 e++;
604 for(;e<e2;e++) {
605 e->start = 0;
606 e->size = empty_size;
607 }
608 /* mark intermediate pages, if any */
609 /* XXX: should free them */
610 for(i=t1_start+1;i<t1_end;i++) {
611 page = get_page(i);
612 e2 = page + BOUND_T2_SIZE;
613 for(e=page;e<e2;e++) {
614 e->start = 0;
615 e->size = empty_size;
616 }
617 }
618 /* last page */
619 page = get_page(t1_end);
620 e2 = (BoundEntry *)((char *)page + t2_end);
621 for(e=page;e<e2;e++) {
622 e->start = 0;
623 e->size = empty_size;
624 }
625 delete_region(e, p, empty_size);
626 }
627 return 0;
628}
629
630/* return the size of the region starting at p, or EMPTY_SIZE if non
631 existant region. */
632static unsigned long get_region_size(void *p)
633{
634 unsigned long addr = (unsigned long)p;
635 BoundEntry *e;
636
637 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
638 e = (BoundEntry *)((char *)e +
639 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
640 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
641 addr -= e->start;
642 if (addr > e->size)
643 e = __bound_find_region(e, p);
644 if (e->start != (unsigned long)p)
645 return EMPTY_SIZE;
646 return e->size;
647}
648
649/* patched memory functions */
650
651/* force compiler to perform stores coded up to this point */
652#define barrier() __asm__ __volatile__ ("": : : "memory")
653
654static void install_malloc_hooks(void)
655{
656#ifdef CONFIG_TCC_MALLOC_HOOKS
657 saved_malloc_hook = __malloc_hook;
658 saved_free_hook = __free_hook;
659 saved_realloc_hook = __realloc_hook;
660 saved_memalign_hook = __memalign_hook;
661 __malloc_hook = __bound_malloc;
662 __free_hook = __bound_free;
663 __realloc_hook = __bound_realloc;
664 __memalign_hook = __bound_memalign;
665
666 barrier();
667#endif
668}
669
670static void restore_malloc_hooks(void)
671{
672#ifdef CONFIG_TCC_MALLOC_HOOKS
673 __malloc_hook = saved_malloc_hook;
674 __free_hook = saved_free_hook;
675 __realloc_hook = saved_realloc_hook;
676 __memalign_hook = saved_memalign_hook;
677
678 barrier();
679#endif
680}
681
682static void *libc_malloc(size_t size)
683{
684 void *ptr;
685 restore_malloc_hooks();
686 ptr = malloc(size);
687 install_malloc_hooks();
688 return ptr;
689}
690
691static void libc_free(void *ptr)
692{
693 restore_malloc_hooks();
694 free(ptr);
695 install_malloc_hooks();
696}
697
698/* XXX: we should use a malloc which ensure that it is unlikely that
699 two malloc'ed data have the same address if 'free' are made in
700 between. */
701void *__bound_malloc(size_t size, const void *caller)
702{
703 void *ptr;
704
705 /* we allocate one more byte to ensure the regions will be
706 separated by at least one byte. With the glibc malloc, it may
707 be in fact not necessary */
708 ptr = libc_malloc(size + 1);
709
710 if (!ptr)
711 return NULL;
712 __bound_new_region(ptr, size);
713 return ptr;
714}
715
716void *__bound_memalign(size_t size, size_t align, const void *caller)
717{
718 void *ptr;
719
720 restore_malloc_hooks();
721
722#ifndef HAVE_MEMALIGN
723 if (align > 4) {
724 /* XXX: handle it ? */
725 ptr = NULL;
726 } else {
727 /* we suppose that malloc aligns to at least four bytes */
728 ptr = malloc(size + 1);
729 }
730#else
731 /* we allocate one more byte to ensure the regions will be
732 separated by at least one byte. With the glibc malloc, it may
733 be in fact not necessary */
734 ptr = memalign(size + 1, align);
735#endif
736
737 install_malloc_hooks();
738
739 if (!ptr)
740 return NULL;
741 __bound_new_region(ptr, size);
742 return ptr;
743}
744
745void __bound_free(void *ptr, const void *caller)
746{
747 if (ptr == NULL)
748 return;
749 if (__bound_delete_region(ptr) != 0)
750 bound_error("freeing invalid region");
751
752 libc_free(ptr);
753}
754
755void *__bound_realloc(void *ptr, size_t size, const void *caller)
756{
757 void *ptr1;
758 int old_size;
759
760 if (size == 0) {
761 __bound_free(ptr, caller);
762 return NULL;
763 } else {
764 ptr1 = __bound_malloc(size, caller);
765 if (ptr == NULL || ptr1 == NULL)
766 return ptr1;
767 old_size = get_region_size(ptr);
768 if (old_size == EMPTY_SIZE)
769 bound_error("realloc'ing invalid pointer");
770 memcpy(ptr1, ptr, old_size);
771 __bound_free(ptr, caller);
772 return ptr1;
773 }
774}
775
776#ifndef CONFIG_TCC_MALLOC_HOOKS
777void *__bound_calloc(size_t nmemb, size_t size)
778{
779 void *ptr;
780 size = size * nmemb;
781 ptr = __bound_malloc(size, NULL);
782 if (!ptr)
783 return NULL;
784 memset(ptr, 0, size);
785 return ptr;
786}
787#endif
788
789#if 0
790static void bound_dump(void)
791{
792 BoundEntry *page, *e;
793 int i, j;
794
795 printf("region dump:\n");
796 for(i=0;i<BOUND_T1_SIZE;i++) {
797 page = __bound_t1[i];
798 for(j=0;j<BOUND_T2_SIZE;j++) {
799 e = page + j;
800 /* do not print invalid or empty entries */
801 if (e->size != EMPTY_SIZE && e->start != 0) {
802 printf("%08x:",
803 (i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
804 (j << BOUND_T3_BITS));
805 do {
806 printf(" %08lx:%08lx", e->start, e->start + e->size);
807 e = e->next;
808 } while (e != NULL);
809 printf("\n");
810 }
811 }
812 }
813}
814#endif
815
816/* some useful checked functions */
817
818/* check that (p ... p + size - 1) lies inside 'p' region, if any */
819static void __bound_check(const void *p, size_t size)
820{
821 if (size == 0)
822 return;
823 p = __bound_ptr_add((void *)p, size);
824 if (p == INVALID_POINTER)
825 bound_error("invalid pointer");
826}
827
828void *__bound_memcpy(void *dst, const void *src, size_t size)
829{
830 __bound_check(dst, size);
831 __bound_check(src, size);
832 /* check also region overlap */
833 if (src >= dst && src < dst + size)
834 bound_error("overlapping regions in memcpy()");
835 return memcpy(dst, src, size);
836}
837
838void *__bound_memmove(void *dst, const void *src, size_t size)
839{
840 __bound_check(dst, size);
841 __bound_check(src, size);
842 return memmove(dst, src, size);
843}
844
845void *__bound_memset(void *dst, int c, size_t size)
846{
847 __bound_check(dst, size);
848 return memset(dst, c, size);
849}
850
851/* XXX: could be optimized */
852int __bound_strlen(const char *s)
853{
854 const char *p;
855 int len;
856
857 len = 0;
858 for(;;) {
859 p = __bound_ptr_indir1((char *)s, len);
860 if (p == INVALID_POINTER)
861 bound_error("bad pointer in strlen()");
862 if (*p == '\0')
863 break;
864 len++;
865 }
866 return len;
867}
868
869char *__bound_strcpy(char *dst, const char *src)
870{
871 int len;
872 len = __bound_strlen(src);
873 return __bound_memcpy(dst, src, len + 1);
874}
875
Note: See TracBrowser for help on using the repository browser.