source: UsbWattMeter/trunk/tlsf-3.0/tlsf.c@ 167

Last change on this file since 167 was 167, checked in by coas-nagasima, 8 years ago

MIMEにSJISを設定

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
  • Property svn:mime-type set to text/x-csrc; charset=SHIFT_JIS
File size: 28.2 KB
Line 
1//#include <assert.h>
2//#include <limits.h>
3//#include <stddef.h>
4//#include <stdio.h>
5//#include <stdlib.h>
6#include <string.h>
7#include <stdarg.h>
8
9#include "tlsf.h"
10#include "tlsfbits.h"
11#include "t_syslog.h"
12
13#ifdef __RX
14#define printf(...)
15#else
16void printf(const char *format, ...)
17{
18 va_list va;
19 va_start(va, format);
20 vsyslog(LOG_DEBUG, format, va);
21 va_end(va);
22}
23#endif
24
25/*
26** Constants.
27*/
28
29/* Public constants: may be modified. */
30enum tlsf_public
31{
32 /* log2 of number of linear subdivisions of block sizes. */
33 SL_INDEX_COUNT_LOG2 = 5,
34};
35
36/* Private constants: do not modify. */
37enum tlsf_private
38{
39#if defined (TLSF_64BIT)
40 /* All allocation sizes and addresses are aligned to 8 bytes. */
41 ALIGN_SIZE_LOG2 = 3,
42#else
43 /* All allocation sizes and addresses are aligned to 4 bytes. */
44 ALIGN_SIZE_LOG2 = 2,
45#endif
46 ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
47
48 /*
49 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
50 ** However, because we linearly subdivide the second-level lists, and
51 ** our minimum size granularity is 4 bytes, it doesn't make sense to
52 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
53 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
54 ** trying to split size ranges into more slots than we have available.
55 ** Instead, we calculate the minimum threshold size, and place all
56 ** blocks below that size into the 0th first-level list.
57 */
58
59#if defined (TLSF_64BIT)
60 /*
61 ** TODO: We can increase this to support larger sizes, at the expense
62 ** of more overhead in the TLSF structure.
63 */
64 FL_INDEX_MAX = 32,
65#else
66 FL_INDEX_MAX = 30,
67#endif
68 SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
69 FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
70 FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
71
72 SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
73};
74
75/*
76** Cast and min/max macros.
77*/
78
79#define tlsf_cast(t, exp) ((t) (exp))
80#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
81#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
82
83/*
84** Set assert macro, if it has not been provided by the user.
85*/
86#if !defined (tlsf_assert)
87#define tlsf_assert assert
88#endif
89
90/*
91** Static assertion mechanism.
92*/
93
94#define _tlsf_glue2(x, y) x ## y
95#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
96#define tlsf_static_assert(exp) \
97 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
98
99/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
100tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
101tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
102tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
103
104/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
105tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
106
107/* Ensure we've properly tuned our sizes. */
108tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
109
110/*
111** Data structures and associated constants.
112*/
113
114/*
115** Block header structure.
116**
117** There are several implementation subtleties involved:
118** - The prev_phys_block field is only valid if the previous block is free.
119** - The prev_phys_block field is actually stored at the end of the
120** previous block. It appears at the beginning of this structure only to
121** simplify the implementation.
122** - The next_free / prev_free fields are only valid if the block is free.
123*/
124typedef struct block_header_t
125{
126 /* Points to the previous physical block. */
127 struct block_header_t* prev_phys_block;
128
129 /* The size of this block, excluding the block header. */
130 size_t size;
131
132 /* Next and previous free blocks. */
133 struct block_header_t* next_free;
134 struct block_header_t* prev_free;
135} block_header_t;
136
137/*
138** Since block sizes are always at least a multiple of 4, the two least
139** significant bits of the size field are used to store the block status:
140** - bit 0: whether block is busy or free
141** - bit 1: whether previous block is busy or free
142*/
143static const size_t block_header_free_bit = 1 << 0;
144static const size_t block_header_prev_free_bit = 1 << 1;
145
146/*
147** The size of the block header exposed to used blocks is the size field.
148** The prev_phys_block field is stored *inside* the previous free block.
149*/
150static const size_t block_header_overhead = sizeof(size_t);
151
152/* User data starts directly after the size field in a used block. */
153static const size_t block_start_offset =
154 offsetof(block_header_t, size) + sizeof(size_t);
155
156/*
157** A free block must be large enough to store its header minus the size of
158** the prev_phys_block field, and no larger than the number of addressable
159** bits for FL_INDEX.
160*/
161static const size_t block_size_min =
162 sizeof(block_header_t) - sizeof(block_header_t*);
163static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX;
164
165
166/* The TLSF control structure. */
167typedef struct control_t
168{
169 /* Empty lists point at this block to indicate they are free. */
170 block_header_t block_null;
171
172 /* Bitmaps for free lists. */
173 unsigned int fl_bitmap;
174 unsigned int sl_bitmap[FL_INDEX_COUNT];
175
176 /* Head of free lists. */
177 block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
178} control_t;
179
180/* A type used for casting when doing pointer arithmetic. */
181typedef ptrdiff_t tlsfptr_t;
182
183/*
184** block_header_t member functions.
185*/
186
187static size_t block_size(const block_header_t* block)
188{
189 return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
190}
191
192static void block_set_size(block_header_t* block, size_t size)
193{
194 const size_t oldsize = block->size;
195 block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
196}
197
198static int block_is_last(const block_header_t* block)
199{
200 return 0 == block_size(block);
201}
202
203static int block_is_free(const block_header_t* block)
204{
205 return tlsf_cast(int, block->size & block_header_free_bit);
206}
207
208static void block_set_free(block_header_t* block)
209{
210 block->size |= block_header_free_bit;
211}
212
213static void block_set_used(block_header_t* block)
214{
215 block->size &= ~block_header_free_bit;
216}
217
218static int block_is_prev_free(const block_header_t* block)
219{
220 return tlsf_cast(int, block->size & block_header_prev_free_bit);
221}
222
223static void block_set_prev_free(block_header_t* block)
224{
225 block->size |= block_header_prev_free_bit;
226}
227
228static void block_set_prev_used(block_header_t* block)
229{
230 block->size &= ~block_header_prev_free_bit;
231}
232
233static block_header_t* block_from_ptr(const void* ptr)
234{
235 return tlsf_cast(block_header_t*,
236 tlsf_cast(unsigned char*, ptr) - block_start_offset);
237}
238
239static void* block_to_ptr(const block_header_t* block)
240{
241 return tlsf_cast(void*,
242 tlsf_cast(unsigned char*, block) + block_start_offset);
243}
244
245/* Return location of next block after block of given size. */
246static block_header_t* offset_to_block(const void* ptr, size_t size)
247{
248 return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
249}
250
251/* Return location of previous block. */
252static block_header_t* block_prev(const block_header_t* block)
253{
254 return block->prev_phys_block;
255}
256
257/* Return location of next existing block. */
258static block_header_t* block_next(const block_header_t* block)
259{
260 block_header_t* next = offset_to_block(block_to_ptr(block),
261 block_size(block) - block_header_overhead);
262 tlsf_assert(!block_is_last(block));
263 return next;
264}
265
266/* Link a new block with its physical neighbor, return the neighbor. */
267static block_header_t* block_link_next(block_header_t* block)
268{
269 block_header_t* next = block_next(block);
270 next->prev_phys_block = block;
271 return next;
272}
273
274static void block_mark_as_free(block_header_t* block)
275{
276 /* Link the block to the next block, first. */
277 block_header_t* next = block_link_next(block);
278 block_set_prev_free(next);
279 block_set_free(block);
280}
281
282static void block_mark_as_used(block_header_t* block)
283{
284 block_header_t* next = block_next(block);
285 block_set_prev_used(next);
286 block_set_used(block);
287}
288
289static size_t align_up(size_t x, size_t align)
290{
291 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
292 return (x + (align - 1)) & ~(align - 1);
293}
294
295static size_t align_down(size_t x, size_t align)
296{
297 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
298 return x - (x & (align - 1));
299}
300
301static void* align_ptr(const void* ptr, size_t align)
302{
303 const tlsfptr_t aligned =
304 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
305 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
306 return tlsf_cast(void*, aligned);
307}
308
309/*
310** Adjust an allocation size to be aligned to word size, and no smaller
311** than internal minimum.
312*/
313static size_t adjust_request_size(size_t size, size_t align)
314{
315 size_t adjust = 0;
316 if (size && size < block_size_max)
317 {
318 const size_t aligned = align_up(size, align);
319 adjust = tlsf_max(aligned, block_size_min);
320 }
321 return adjust;
322}
323
324/*
325** TLSF utility functions. In most cases, these are direct translations of
326** the documentation found in the white paper.
327*/
328
329static void mapping_insert(size_t size, int* fli, int* sli)
330{
331 int fl, sl;
332 if (size < SMALL_BLOCK_SIZE)
333 {
334 /* Store small blocks in first list. */
335 fl = 0;
336 sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
337 }
338 else
339 {
340 fl = tlsf_fls_sizet(size);
341 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
342 fl -= (FL_INDEX_SHIFT - 1);
343 }
344 *fli = fl;
345 *sli = sl;
346}
347
348/* This version rounds up to the next block size (for allocations) */
349static void mapping_search(size_t size, int* fli, int* sli)
350{
351 if (size >= (1 << SL_INDEX_COUNT_LOG2))
352 {
353 const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1;
354 size += round;
355 }
356 mapping_insert(size, fli, sli);
357}
358
359static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
360{
361 int fl = *fli;
362 int sl = *sli;
363
364 /*
365 ** First, search for a block in the list associated with the given
366 ** fl/sl index.
367 */
368 unsigned int sl_map = control->sl_bitmap[fl] & (~0 << sl);
369 if (!sl_map)
370 {
371 /* No block exists. Search in the next largest first-level list. */
372 const unsigned int fl_map = control->fl_bitmap & (~0 << (fl + 1));
373 if (!fl_map)
374 {
375 /* No free blocks available, memory has been exhausted. */
376 return 0;
377 }
378
379 fl = tlsf_ffs(fl_map);
380 *fli = fl;
381 sl_map = control->sl_bitmap[fl];
382 }
383 tlsf_assert(sl_map && "internal error - second level bitmap is null");
384 sl = tlsf_ffs(sl_map);
385 *sli = sl;
386
387 /* Return the first block in the free list. */
388 return control->blocks[fl][sl];
389}
390
391/* Remove a free block from the free list.*/
392static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
393{
394 block_header_t* prev = block->prev_free;
395 block_header_t* next = block->next_free;
396 tlsf_assert(prev && "prev_free field can not be null");
397 tlsf_assert(next && "next_free field can not be null");
398 next->prev_free = prev;
399 prev->next_free = next;
400
401 /* If this block is the head of the free list, set new head. */
402 if (control->blocks[fl][sl] == block)
403 {
404 control->blocks[fl][sl] = next;
405
406 /* If the new head is null, clear the bitmap. */
407 if (next == &control->block_null)
408 {
409 control->sl_bitmap[fl] &= ~(1 << sl);
410
411 /* If the second bitmap is now empty, clear the fl bitmap. */
412 if (!control->sl_bitmap[fl])
413 {
414 control->fl_bitmap &= ~(1 << fl);
415 }
416 }
417 }
418}
419
420/* Insert a free block into the free block list. */
421static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
422{
423 block_header_t* current = control->blocks[fl][sl];
424 tlsf_assert(current && "free list cannot have a null entry");
425 tlsf_assert(block && "cannot insert a null entry into the free list");
426 block->next_free = current;
427 block->prev_free = &control->block_null;
428 current->prev_free = block;
429
430 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
431 && "block not aligned properly");
432 /*
433 ** Insert the new block at the head of the list, and mark the first-
434 ** and second-level bitmaps appropriately.
435 */
436 control->blocks[fl][sl] = block;
437 control->fl_bitmap |= (1 << fl);
438 control->sl_bitmap[fl] |= (1 << sl);
439}
440
441/* Remove a given block from the free list. */
442static void block_remove(control_t* control, block_header_t* block)
443{
444 int fl, sl;
445 mapping_insert(block_size(block), &fl, &sl);
446 remove_free_block(control, block, fl, sl);
447}
448
449/* Insert a given block into the free list. */
450static void block_insert(control_t* control, block_header_t* block)
451{
452 int fl, sl;
453 mapping_insert(block_size(block), &fl, &sl);
454 insert_free_block(control, block, fl, sl);
455}
456
457static int block_can_split(block_header_t* block, size_t size)
458{
459 return block_size(block) >= sizeof(block_header_t) + size;
460}
461
462/* Split a block into two, the second of which is free. */
463static block_header_t* block_split(block_header_t* block, size_t size)
464{
465 /* Calculate the amount of space left in the remaining block. */
466 block_header_t* remaining =
467 offset_to_block(block_to_ptr(block), size - block_header_overhead);
468
469 const size_t remain_size = block_size(block) - (size + block_header_overhead);
470
471 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
472 && "remaining block not aligned properly");
473
474 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
475 block_set_size(remaining, remain_size);
476 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
477
478 block_set_size(block, size);
479 block_mark_as_free(remaining);
480
481 return remaining;
482}
483
484/* Absorb a free block's storage into an adjacent previous free block. */
485static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
486{
487 tlsf_assert(!block_is_last(prev) && "previous block can't be last!");
488 /* Note: Leaves flags untouched. */
489 prev->size += block_size(block) + block_header_overhead;
490 block_link_next(prev);
491 return prev;
492}
493
494/* Merge a just-freed block with an adjacent previous free block. */
495static block_header_t* block_merge_prev(control_t* control, block_header_t* block)
496{
497 if (block_is_prev_free(block))
498 {
499 block_header_t* prev = block_prev(block);
500 tlsf_assert(prev && "prev physical block can't be null");
501 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
502 block_remove(control, prev);
503 block = block_absorb(prev, block);
504 }
505
506 return block;
507}
508
509/* Merge a just-freed block with an adjacent free block. */
510static block_header_t* block_merge_next(control_t* control, block_header_t* block)
511{
512 block_header_t* next = block_next(block);
513 tlsf_assert(next && "next physical block can't be null");
514
515 if (block_is_free(next))
516 {
517 tlsf_assert(!block_is_last(block) && "previous block can't be last!");
518 block_remove(control, next);
519 block = block_absorb(block, next);
520 }
521
522 return block;
523}
524
525/* Trim any trailing block space off the end of a block, return to pool. */
526static void block_trim_free(control_t* control, block_header_t* block, size_t size)
527{
528 tlsf_assert(block_is_free(block) && "block must be free");
529 if (block_can_split(block, size))
530 {
531 block_header_t* remaining_block = block_split(block, size);
532 block_link_next(block);
533 block_set_prev_free(remaining_block);
534 block_insert(control, remaining_block);
535 }
536}
537
538/* Trim any trailing block space off the end of a used block, return to pool. */
539static void block_trim_used(control_t* control, block_header_t* block, size_t size)
540{
541 tlsf_assert(!block_is_free(block) && "block must be used");
542 if (block_can_split(block, size))
543 {
544 /* If the next block is free, we must coalesce. */
545 block_header_t* remaining_block = block_split(block, size);
546 block_set_prev_used(remaining_block);
547
548 remaining_block = block_merge_next(control, remaining_block);
549 block_insert(control, remaining_block);
550 }
551}
552
553static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
554{
555 block_header_t* remaining_block = block;
556 if (block_can_split(block, size))
557 {
558 /* We want the 2nd block. */
559 remaining_block = block_split(block, size - block_header_overhead);
560 block_set_prev_free(remaining_block);
561
562 block_link_next(block);
563 block_insert(control, block);
564 }
565
566 return remaining_block;
567}
568
569static block_header_t* block_locate_free(control_t* control, size_t size)
570{
571 int fl = 0, sl = 0;
572 block_header_t* block = 0;
573
574 if (size)
575 {
576 mapping_search(size, &fl, &sl);
577 block = search_suitable_block(control, &fl, &sl);
578 }
579
580 if (block)
581 {
582 tlsf_assert(block_size(block) >= size);
583 remove_free_block(control, block, fl, sl);
584 }
585
586 return block;
587}
588
589static void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
590{
591 void* p = 0;
592 if (block)
593 {
594 block_trim_free(control, block, size);
595 block_mark_as_used(block);
596 p = block_to_ptr(block);
597 }
598 return p;
599}
600
601/* Clear structure and point all empty lists at the null block. */
602static void control_construct(control_t* control)
603{
604 int i, j;
605
606 control->block_null.next_free = &control->block_null;
607 control->block_null.prev_free = &control->block_null;
608
609 control->fl_bitmap = 0;
610 for (i = 0; i < FL_INDEX_COUNT; ++i)
611 {
612 control->sl_bitmap[i] = 0;
613 for (j = 0; j < SL_INDEX_COUNT; ++j)
614 {
615 control->blocks[i][j] = &control->block_null;
616 }
617 }
618}
619
620/*
621** Debugging utilities.
622*/
623
624typedef struct integrity_t
625{
626 int prev_status;
627 int status;
628} integrity_t;
629
630#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
631
632static void integrity_walker(void* ptr, size_t size, int used, void* user)
633{
634 block_header_t* block = block_from_ptr(ptr);
635 integrity_t* integ = tlsf_cast(integrity_t*, user);
636 const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
637 const int this_status = block_is_free(block) ? 1 : 0;
638 const size_t this_block_size = block_size(block);
639
640 int status = 0;
641 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
642 tlsf_insist(size == this_block_size && "block size incorrect");
643
644 integ->prev_status = this_status;
645 integ->status += status;
646}
647
648int tlsf_check(tlsf_t tlsf)
649{
650 int i, j;
651
652 control_t* control = tlsf_cast(control_t*, tlsf);
653 int status = 0;
654
655 /* Check that the free lists and bitmaps are accurate. */
656 for (i = 0; i < FL_INDEX_COUNT; ++i)
657 {
658 for (j = 0; j < SL_INDEX_COUNT; ++j)
659 {
660 const int fl_map = control->fl_bitmap & (1 << i);
661 const int sl_list = control->sl_bitmap[i];
662 const int sl_map = sl_list & (1 << j);
663 const block_header_t* block = control->blocks[i][j];
664
665 /* Check that first- and second-level lists agree. */
666 if (!fl_map)
667 {
668 tlsf_insist(!sl_map && "second-level map must be null");
669 }
670
671 if (!sl_map)
672 {
673 tlsf_insist(block == &control->block_null && "block list must be null");
674 continue;
675 }
676
677 /* Check that there is at least one free block. */
678 tlsf_insist(sl_list && "no free blocks in second-level map");
679 tlsf_insist(block != &control->block_null && "block should not be null");
680
681 while (block != &control->block_null)
682 {
683 int fli, sli;
684 tlsf_insist(block_is_free(block) && "block should be free");
685 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
686 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
687 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
688 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
689
690 mapping_insert(block_size(block), &fli, &sli);
691 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
692 block = block->next_free;
693 }
694 }
695 }
696
697 return status;
698}
699
700#undef tlsf_insist
701
702static void default_walker(void* ptr, size_t size, int used, void* user)
703{
704 (void)user;
705 printf("\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
706}
707
708void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
709{
710 tlsf_walker pool_walker = walker ? walker : default_walker;
711 block_header_t* block =
712 offset_to_block(pool, -(int)block_header_overhead);
713
714 while (block && !block_is_last(block))
715 {
716 pool_walker(
717 block_to_ptr(block),
718 block_size(block),
719 !block_is_free(block),
720 user);
721 block = block_next(block);
722 }
723}
724
725size_t tlsf_block_size(void* ptr)
726{
727 size_t size = 0;
728 if (ptr)
729 {
730 const block_header_t* block = block_from_ptr(ptr);
731 size = block_size(block);
732 }
733 return size;
734}
735
736int tlsf_check_pool(pool_t pool)
737{
738 /* Check that the blocks are physically correct. */
739 integrity_t integ = { 0, 0 };
740 tlsf_walk_pool(pool, integrity_walker, &integ);
741
742 return integ.status;
743}
744
745/*
746** Size of the TLSF structures in a given memory block passed to
747** tlsf_create, equal to the size of a control_t
748*/
749size_t tlsf_size()
750{
751 return sizeof(control_t);
752}
753
754size_t tlsf_align_size()
755{
756 return ALIGN_SIZE;
757}
758
759size_t tlsf_block_size_min()
760{
761 return block_size_min;
762}
763
764size_t tlsf_block_size_max()
765{
766 return block_size_max;
767}
768
769/*
770** Overhead of the TLSF structures in a given memory block passes to
771** tlsf_add_pool, equal to the overhead of a free block and the
772** sentinel block.
773*/
774size_t tlsf_pool_overhead()
775{
776 return 2 * block_header_overhead;
777}
778
779size_t tlsf_alloc_overhead()
780{
781 return block_header_overhead;
782}
783
784pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
785{
786 block_header_t* block;
787 block_header_t* next;
788
789 const size_t pool_overhead = tlsf_pool_overhead();
790 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
791
792 if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
793 {
794 printf("tlsf_add_pool: Memory must be aligned by %u bytes.\n",
795 (unsigned int)ALIGN_SIZE);
796 return 0;
797 }
798
799 if (pool_bytes < block_size_min || pool_bytes > block_size_max)
800 {
801#if defined (TLSF_64BIT)
802 printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
803 (unsigned int)(pool_overhead + block_size_min),
804 (unsigned int)((pool_overhead + block_size_max) / 256));
805#else
806 printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
807 (unsigned int)(pool_overhead + block_size_min),
808 (unsigned int)(pool_overhead + block_size_max));
809#endif
810 return 0;
811 }
812
813 /*
814 ** Create the main free block. Offset the start of the block slightly
815 ** so that the prev_phys_block field falls outside of the pool -
816 ** it will never be used.
817 */
818 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
819 block_set_size(block, pool_bytes);
820 block_set_free(block);
821 block_set_prev_used(block);
822 block_insert(tlsf_cast(control_t*, tlsf), block);
823
824 /* Split the block to create a zero-size sentinel block. */
825 next = block_link_next(block);
826 block_set_size(next, 0);
827 block_set_used(next);
828 block_set_prev_free(next);
829
830 return mem;
831}
832
833void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
834{
835 control_t* control = tlsf_cast(control_t*, tlsf);
836 block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
837
838 int fl = 0, sl = 0;
839
840 tlsf_assert(block_is_free(block) && "block should be free");
841 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
842 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
843
844 mapping_insert(block_size(block), &fl, &sl);
845 remove_free_block(control, block, fl, sl);
846}
847
848/*
849** TLSF main interface.
850*/
851
852#if _DEBUG
853int test_ffs_fls()
854{
855 /* Verify ffs/fls work properly. */
856 int rv = 0;
857 rv += (tlsf_ffs(0) == -1) ? 0 : 0x1;
858 rv += (tlsf_fls(0) == -1) ? 0 : 0x2;
859 rv += (tlsf_ffs(1) == 0) ? 0 : 0x4;
860 rv += (tlsf_fls(1) == 0) ? 0 : 0x8;
861 rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10;
862 rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20;
863 rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40;
864 rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80;
865
866#if defined (TLSF_64BIT)
867 rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100;
868 rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200;
869 rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400;
870#endif
871
872 if (rv)
873 {
874 printf("tlsf_create: %x ffs/fls tests failed!\n", rv);
875 }
876 return rv;
877}
878#endif
879
880tlsf_t tlsf_create(void* mem)
881{
882#if _DEBUG
883 if (test_ffs_fls())
884 {
885 return 0;
886 }
887#endif
888
889 if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
890 {
891 printf("tlsf_create: Memory must be aligned to %u bytes.\n",
892 (unsigned int)ALIGN_SIZE);
893 return 0;
894 }
895
896 control_construct(tlsf_cast(control_t*, mem));
897
898 return tlsf_cast(tlsf_t, mem);
899}
900
901tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
902{
903 tlsf_t tlsf = tlsf_create(mem);
904 tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
905 return tlsf;
906}
907
908void tlsf_destroy(tlsf_t tlsf)
909{
910 /* Nothing to do. */
911 (void)tlsf;
912}
913
914pool_t tlsf_get_pool(tlsf_t tlsf)
915{
916 return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
917}
918
919void* tlsf_malloc(tlsf_t tlsf, size_t size)
920{
921 control_t* control = tlsf_cast(control_t*, tlsf);
922 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
923 block_header_t* block = block_locate_free(control, adjust);
924 return block_prepare_used(control, block, adjust);
925}
926
927void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
928{
929 control_t* control = tlsf_cast(control_t*, tlsf);
930 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
931
932 /*
933 ** We must allocate an additional minimum block size bytes so that if
934 ** our free block will leave an alignment gap which is smaller, we can
935 ** trim a leading free block and release it back to the pool. We must
936 ** do this because the previous physical block is in use, therefore
937 ** the prev_phys_block field is not valid, and we can't simply adjust
938 ** the size of that block.
939 */
940 const size_t gap_minimum = sizeof(block_header_t);
941 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
942
943 /* If alignment is less than or equals base alignment, we're done. */
944 const size_t aligned_size = (align <= ALIGN_SIZE) ? adjust : size_with_gap;
945
946 block_header_t* block = block_locate_free(control, aligned_size);
947
948 /* This can't be a static assert. */
949 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
950
951 if (block)
952 {
953 void* ptr = block_to_ptr(block);
954 void* aligned = align_ptr(ptr, align);
955 size_t gap = tlsf_cast(size_t,
956 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
957
958 /* If gap size is too small, offset to next aligned boundary. */
959 if (gap && gap < gap_minimum)
960 {
961 const size_t gap_remain = gap_minimum - gap;
962 const size_t offset = tlsf_max(gap_remain, align);
963 const void* next_aligned = tlsf_cast(void*,
964 tlsf_cast(tlsfptr_t, aligned) + offset);
965
966 aligned = align_ptr(next_aligned, align);
967 gap = tlsf_cast(size_t,
968 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
969 }
970
971 if (gap)
972 {
973 tlsf_assert(gap >= gap_minimum && "gap size too small");
974 block = block_trim_free_leading(control, block, gap);
975 }
976 }
977
978 return block_prepare_used(control, block, adjust);
979}
980
981void tlsf_free(tlsf_t tlsf, void* ptr)
982{
983 /* Don't attempt to free a NULL pointer. */
984 if (ptr)
985 {
986 control_t* control = tlsf_cast(control_t*, tlsf);
987 block_header_t* block = block_from_ptr(ptr);
988 tlsf_assert(!block_is_free(block) && "block already marked as free");
989 block_mark_as_free(block);
990 block = block_merge_prev(control, block);
991 block = block_merge_next(control, block);
992 block_insert(control, block);
993 }
994}
995
996/*
997** The TLSF block information provides us with enough information to
998** provide a reasonably intelligent implementation of realloc, growing or
999** shrinking the currently allocated block as required.
1000**
1001** This routine handles the somewhat esoteric edge cases of realloc:
1002** - a non-zero size with a null pointer will behave like malloc
1003** - a zero size with a non-null pointer will behave like free
1004** - a request that cannot be satisfied will leave the original buffer
1005** untouched
1006** - an extended buffer size will leave the newly-allocated area with
1007** contents undefined
1008*/
1009void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
1010{
1011 control_t* control = tlsf_cast(control_t*, tlsf);
1012 void* p = 0;
1013
1014 /* Zero-size requests are treated as free. */
1015 if (ptr && size == 0)
1016 {
1017 tlsf_free(tlsf, ptr);
1018 }
1019 /* Requests with NULL pointers are treated as malloc. */
1020 else if (!ptr)
1021 {
1022 p = tlsf_malloc(tlsf, size);
1023 }
1024 else
1025 {
1026 block_header_t* block = block_from_ptr(ptr);
1027 block_header_t* next = block_next(block);
1028
1029 const size_t cursize = block_size(block);
1030 const size_t combined = cursize + block_size(next) + block_header_overhead;
1031 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1032
1033 tlsf_assert(!block_is_free(block) && "block already marked as free");
1034
1035 /*
1036 ** If the next block is used, or when combined with the current
1037 ** block, does not offer enough space, we must reallocate and copy.
1038 */
1039 if (adjust > cursize && (!block_is_free(next) || adjust > combined))
1040 {
1041 p = tlsf_malloc(tlsf, size);
1042 if (p)
1043 {
1044 const size_t minsize = tlsf_min(cursize, size);
1045 memcpy(p, ptr, minsize);
1046 tlsf_free(tlsf, ptr);
1047 }
1048 }
1049 else
1050 {
1051 /* Do we need to expand to the next block? */
1052 if (adjust > cursize)
1053 {
1054 block_merge_next(control, block);
1055 block_mark_as_used(block);
1056 }
1057
1058 /* Trim the resulting block and return the original pointer. */
1059 block_trim_used(control, block, adjust);
1060 p = ptr;
1061 }
1062 }
1063
1064 return p;
1065}
Note: See TracBrowser for help on using the repository browser.