source: EcnlProtoTool/trunk/ntshell/tlsf/tlsf.c@ 279

Last change on this file since 279 was 279, checked in by coas-nagasima, 7 years ago

ファイルを追加、更新。

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
  • Property svn:mime-type set to text/x-csrc
File size: 32.7 KB
Line 
1#include <assert.h>
2#include <limits.h>
3#include <stddef.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7#include "syssvc/syslog.h"
8
9#include "tlsf.h"
10
11#undef _MSC_VER
12
13#if defined(__cplusplus)
14#define tlsf_decl inline
15#else
16#define tlsf_decl static
17#endif
18
19/*
20** Architecture-specific bit manipulation routines.
21**
22** TLSF achieves O(1) cost for malloc and free operations by limiting
23** the search for a free block to a free list of guaranteed size
24** adequate to fulfill the request, combined with efficient free list
25** queries using bitmasks and architecture-specific bit-manipulation
26** routines.
27**
28** Most modern processors provide instructions to count leading zeroes
29** in a word, find the lowest and highest set bit, etc. These
30** specific implementations will be used when available, falling back
31** to a reasonably efficient generic implementation.
32**
33** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
34** ffs/fls return 1-32 by default, returning 0 for error.
35*/
36
37/*
38** Detect whether or not we are building for a 32- or 64-bit (LP/LLP)
39** architecture. There is no reliable portable method at compile-time.
40*/
41#if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) \
42 || defined (_WIN64) || defined (__LP64__) || defined (__LLP64__)
43#define TLSF_64BIT
44#endif
45
46/*
47** gcc 3.4 and above have builtin support, specialized for architecture.
48** Some compilers masquerade as gcc; patchlevel test filters them out.
49*/
50#if defined (__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
51 && defined (__GNUC_PATCHLEVEL__)
52
53#if defined (__SNC__)
54/* SNC for Playstation 3. */
55
56tlsf_decl int tlsf_ffs(unsigned int word)
57{
58 const unsigned int reverse = word & (~word + 1);
59 const int bit = 32 - __builtin_clz(reverse);
60 return bit - 1;
61}
62
63#else
64
65tlsf_decl int tlsf_ffs(unsigned int word)
66{
67 return __builtin_ffs(word) - 1;
68}
69
70#endif
71
72tlsf_decl int tlsf_fls(unsigned int word)
73{
74 const int bit = word ? 32 - __builtin_clz(word) : 0;
75 return bit - 1;
76}
77
78#elif defined (_MSC_VER) && (_MSC_VER >= 1400) && (defined (_M_IX86) || defined (_M_X64))
79/* Microsoft Visual C++ support on x86/X64 architectures. */
80
81#include <intrin.h>
82
83#pragma intrinsic(_BitScanReverse)
84#pragma intrinsic(_BitScanForward)
85
86tlsf_decl int tlsf_fls(unsigned int word)
87{
88 unsigned long index;
89 return _BitScanReverse(&index, word) ? index : -1;
90}
91
92tlsf_decl int tlsf_ffs(unsigned int word)
93{
94 unsigned long index;
95 return _BitScanForward(&index, word) ? index : -1;
96}
97
98#elif defined (_MSC_VER) && defined (_M_PPC)
99/* Microsoft Visual C++ support on PowerPC architectures. */
100
101#include <ppcintrinsics.h>
102
103tlsf_decl int tlsf_fls(unsigned int word)
104{
105 const int bit = 32 - _CountLeadingZeros(word);
106 return bit - 1;
107}
108
109tlsf_decl int tlsf_ffs(unsigned int word)
110{
111 const unsigned int reverse = word & (~word + 1);
112 const int bit = 32 - _CountLeadingZeros(reverse);
113 return bit - 1;
114}
115
116#elif defined (__ARMCC_VERSION)
117/* RealView Compilation Tools for ARM */
118
119tlsf_decl int tlsf_ffs(unsigned int word)
120{
121 const unsigned int reverse = word & (~word + 1);
122 const int bit = 32 - __clz(reverse);
123 return bit - 1;
124}
125
126tlsf_decl int tlsf_fls(unsigned int word)
127{
128 const int bit = word ? 32 - __clz(word) : 0;
129 return bit - 1;
130}
131
132#elif defined (__ghs__)
133/* Green Hills support for PowerPC */
134
135#include <ppc_ghs.h>
136
137tlsf_decl int tlsf_ffs(unsigned int word)
138{
139 const unsigned int reverse = word & (~word + 1);
140 const int bit = 32 - __CLZ32(reverse);
141 return bit - 1;
142}
143
144tlsf_decl int tlsf_fls(unsigned int word)
145{
146 const int bit = word ? 32 - __CLZ32(word) : 0;
147 return bit - 1;
148}
149
150#else
151/* Fall back to generic implementation. */
152
153tlsf_decl int tlsf_fls_generic(unsigned int word)
154{
155 int bit = 32;
156
157 if (!word) bit -= 1;
158 if (!(word & 0xffff0000)) { word <<= 16; bit -= 16; }
159 if (!(word & 0xff000000)) { word <<= 8; bit -= 8; }
160 if (!(word & 0xf0000000)) { word <<= 4; bit -= 4; }
161 if (!(word & 0xc0000000)) { word <<= 2; bit -= 2; }
162 if (!(word & 0x80000000)) { word <<= 1; bit -= 1; }
163
164 return bit;
165}
166
167/* Implement ffs in terms of fls. */
168tlsf_decl int tlsf_ffs(unsigned int word)
169{
170 return tlsf_fls_generic(word & (~word + 1)) - 1;
171}
172
173tlsf_decl int tlsf_fls(unsigned int word)
174{
175 return tlsf_fls_generic(word) - 1;
176}
177
178#endif
179
180/* Possibly 64-bit version of tlsf_fls. */
181#if defined (TLSF_64BIT)
182tlsf_decl int tlsf_fls_sizet(size_t size)
183{
184 int high = (int)(size >> 32);
185 int bits = 0;
186 if (high)
187 {
188 bits = 32 + tlsf_fls(high);
189 }
190 else
191 {
192 bits = tlsf_fls((int)size & 0xffffffff);
193
194 }
195 return bits;
196}
197#else
198#define tlsf_fls_sizet tlsf_fls
199#endif
200
201#undef tlsf_decl
202
203/*
204** Constants.
205*/
206
207/* Public constants: may be modified. */
208enum tlsf_public
209{
210 /* log2 of number of linear subdivisions of block sizes. Larger
211 ** values require more memory in the control structure. Values of
212 ** 4 or 5 are typical.
213 */
214 SL_INDEX_COUNT_LOG2 = 5,
215};
216
217/* Private constants: do not modify. */
218enum tlsf_private
219{
220#if defined (TLSF_64BIT)
221 /* All allocation sizes and addresses are aligned to 8 bytes. */
222 ALIGN_SIZE_LOG2 = 3,
223#else
224 /* All allocation sizes and addresses are aligned to 4 bytes. */
225 ALIGN_SIZE_LOG2 = 2,
226#endif
227 ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
228
229 /*
230 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
231 ** However, because we linearly subdivide the second-level lists, and
232 ** our minimum size granularity is 4 bytes, it doesn't make sense to
233 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
234 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
235 ** trying to split size ranges into more slots than we have available.
236 ** Instead, we calculate the minimum threshold size, and place all
237 ** blocks below that size into the 0th first-level list.
238 */
239
240#if defined (TLSF_64BIT)
241 /*
242 ** TODO: We can increase this to support larger sizes, at the expense
243 ** of more overhead in the TLSF structure.
244 */
245 FL_INDEX_MAX = 32,
246#else
247 FL_INDEX_MAX = 30,
248#endif
249 SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
250 FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
251 FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
252
253 SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
254};
255
256/*
257** Cast and min/max macros.
258*/
259
260#define tlsf_cast(t, exp) ((t) (exp))
261#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
262#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
263
264/*
265** Set assert macro, if it has not been provided by the user.
266*/
267#if !defined (tlsf_assert)
268#define tlsf_assert assert
269#endif
270
271/*
272** Static assertion mechanism.
273*/
274
275#define _tlsf_glue2(x, y) x ## y
276#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
277#define tlsf_static_assert(exp) \
278 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
279
280/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
281tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
282tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
283tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
284
285/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
286tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
287
288/* Ensure we've properly tuned our sizes. */
289tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
290
291/*
292** Data structures and associated constants.
293*/
294
295/*
296** Block header structure.
297**
298** There are several implementation subtleties involved:
299** - The prev_phys_block field is only valid if the previous block is free.
300** - The prev_phys_block field is actually stored at the end of the
301** previous block. It appears at the beginning of this structure only to
302** simplify the implementation.
303** - The next_free / prev_free fields are only valid if the block is free.
304*/
305typedef struct block_header_t
306{
307 /* Points to the previous physical block. */
308 struct block_header_t* prev_phys_block;
309
310 /* The size of this block, excluding the block header. */
311 size_t size;
312
313 /* Next and previous free blocks. */
314 struct block_header_t* next_free;
315 struct block_header_t* prev_free;
316} block_header_t;
317
318/*
319** Since block sizes are always at least a multiple of 4, the two least
320** significant bits of the size field are used to store the block status:
321** - bit 0: whether block is busy or free
322** - bit 1: whether previous block is busy or free
323*/
324static const size_t block_header_free_bit = 1 << 0;
325static const size_t block_header_prev_free_bit = 1 << 1;
326
327/*
328** The size of the block header exposed to used blocks is the size field.
329** The prev_phys_block field is stored *inside* the previous free block.
330*/
331static const size_t block_header_overhead = sizeof(size_t);
332
333/* User data starts directly after the size field in a used block. */
334static const size_t block_start_offset =
335 offsetof(block_header_t, size) + sizeof(size_t);
336
337/*
338** A free block must be large enough to store its header minus the size of
339** the prev_phys_block field, and no larger than the number of addressable
340** bits for FL_INDEX.
341*/
342static const size_t block_size_min =
343 sizeof(block_header_t) - sizeof(block_header_t*);
344static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX;
345
346
347/* The TLSF control structure. */
348typedef struct control_t
349{
350 /* Empty lists point at this block to indicate they are free. */
351 block_header_t block_null;
352
353 /* Bitmaps for free lists. */
354 unsigned int fl_bitmap;
355 unsigned int sl_bitmap[FL_INDEX_COUNT];
356
357 /* Head of free lists. */
358 block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
359} control_t;
360
361/* A type used for casting when doing pointer arithmetic. */
362typedef ptrdiff_t tlsfptr_t;
363
364/*
365** block_header_t member functions.
366*/
367
368static size_t block_size(const block_header_t* block)
369{
370 return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
371}
372
373static void block_set_size(block_header_t* block, size_t size)
374{
375 const size_t oldsize = block->size;
376 block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
377}
378
379static int block_is_last(const block_header_t* block)
380{
381 return block_size(block) == 0;
382}
383
384static int block_is_free(const block_header_t* block)
385{
386 return tlsf_cast(int, block->size & block_header_free_bit);
387}
388
389static void block_set_free(block_header_t* block)
390{
391 block->size |= block_header_free_bit;
392}
393
394static void block_set_used(block_header_t* block)
395{
396 block->size &= ~block_header_free_bit;
397}
398
399static int block_is_prev_free(const block_header_t* block)
400{
401 return tlsf_cast(int, block->size & block_header_prev_free_bit);
402}
403
404static void block_set_prev_free(block_header_t* block)
405{
406 block->size |= block_header_prev_free_bit;
407}
408
409static void block_set_prev_used(block_header_t* block)
410{
411 block->size &= ~block_header_prev_free_bit;
412}
413
414static block_header_t* block_from_ptr(const void* ptr)
415{
416 return tlsf_cast(block_header_t*,
417 tlsf_cast(unsigned char*, ptr) - block_start_offset);
418}
419
420static void* block_to_ptr(const block_header_t* block)
421{
422 return tlsf_cast(void*,
423 tlsf_cast(unsigned char*, block) + block_start_offset);
424}
425
426/* Return location of next block after block of given size. */
427static block_header_t* offset_to_block(const void* ptr, size_t size)
428{
429 return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
430}
431
432/* Return location of previous block. */
433static block_header_t* block_prev(const block_header_t* block)
434{
435 tlsf_assert(block_is_prev_free(block) && "previous block must be free");
436 return block->prev_phys_block;
437}
438
439/* Return location of next existing block. */
440static block_header_t* block_next(const block_header_t* block)
441{
442 block_header_t* next = offset_to_block(block_to_ptr(block),
443 block_size(block) - block_header_overhead);
444 tlsf_assert(!block_is_last(block));
445 return next;
446}
447
448/* Link a new block with its physical neighbor, return the neighbor. */
449static block_header_t* block_link_next(block_header_t* block)
450{
451 block_header_t* next = block_next(block);
452 next->prev_phys_block = block;
453 return next;
454}
455
456static void block_mark_as_free(block_header_t* block)
457{
458 /* Link the block to the next block, first. */
459 block_header_t* next = block_link_next(block);
460 block_set_prev_free(next);
461 block_set_free(block);
462}
463
464static void block_mark_as_used(block_header_t* block)
465{
466 block_header_t* next = block_next(block);
467 block_set_prev_used(next);
468 block_set_used(block);
469}
470
471static size_t align_up(size_t x, size_t align)
472{
473 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
474 return (x + (align - 1)) & ~(align - 1);
475}
476
477static size_t align_down(size_t x, size_t align)
478{
479 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
480 return x - (x & (align - 1));
481}
482
483static void* align_ptr(const void* ptr, size_t align)
484{
485 const tlsfptr_t aligned =
486 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
487 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
488 return tlsf_cast(void*, aligned);
489}
490
491/*
492** Adjust an allocation size to be aligned to word size, and no smaller
493** than internal minimum.
494*/
495static size_t adjust_request_size(size_t size, size_t align)
496{
497 size_t adjust = 0;
498 if (size && size < block_size_max)
499 {
500 const size_t aligned = align_up(size, align);
501 adjust = tlsf_max(aligned, block_size_min);
502 }
503 return adjust;
504}
505
506/*
507** TLSF utility functions. In most cases, these are direct translations of
508** the documentation found in the white paper.
509*/
510
511static void mapping_insert(size_t size, int* fli, int* sli)
512{
513 int fl, sl;
514 if (size < SMALL_BLOCK_SIZE)
515 {
516 /* Store small blocks in first list. */
517 fl = 0;
518 sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
519 }
520 else
521 {
522 fl = tlsf_fls_sizet(size);
523 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
524 fl -= (FL_INDEX_SHIFT - 1);
525 }
526 *fli = fl;
527 *sli = sl;
528}
529
530/* This version rounds up to the next block size (for allocations) */
531static void mapping_search(size_t size, int* fli, int* sli)
532{
533 if (size >= SMALL_BLOCK_SIZE)
534 {
535 const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1;
536 size += round;
537 }
538 mapping_insert(size, fli, sli);
539}
540
541static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
542{
543 int fl = *fli;
544 int sl = *sli;
545
546 /*
547 ** First, search for a block in the list associated with the given
548 ** fl/sl index.
549 */
550 unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
551 if (!sl_map)
552 {
553 /* No block exists. Search in the next largest first-level list. */
554 const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
555 if (!fl_map)
556 {
557 /* No free blocks available, memory has been exhausted. */
558 return 0;
559 }
560
561 fl = tlsf_ffs(fl_map);
562 *fli = fl;
563 sl_map = control->sl_bitmap[fl];
564 }
565 tlsf_assert(sl_map && "internal error - second level bitmap is null");
566 sl = tlsf_ffs(sl_map);
567 *sli = sl;
568
569 /* Return the first block in the free list. */
570 return control->blocks[fl][sl];
571}
572
573/* Remove a free block from the free list.*/
574static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
575{
576 block_header_t* prev = block->prev_free;
577 block_header_t* next = block->next_free;
578 tlsf_assert(prev && "prev_free field can not be null");
579 tlsf_assert(next && "next_free field can not be null");
580 next->prev_free = prev;
581 prev->next_free = next;
582
583 /* If this block is the head of the free list, set new head. */
584 if (control->blocks[fl][sl] == block)
585 {
586 control->blocks[fl][sl] = next;
587
588 /* If the new head is null, clear the bitmap. */
589 if (next == &control->block_null)
590 {
591 control->sl_bitmap[fl] &= ~(1 << sl);
592
593 /* If the second bitmap is now empty, clear the fl bitmap. */
594 if (!control->sl_bitmap[fl])
595 {
596 control->fl_bitmap &= ~(1 << fl);
597 }
598 }
599 }
600}
601
602/* Insert a free block into the free block list. */
603static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
604{
605 block_header_t* current = control->blocks[fl][sl];
606 tlsf_assert(current && "free list cannot have a null entry");
607 tlsf_assert(block && "cannot insert a null entry into the free list");
608 block->next_free = current;
609 block->prev_free = &control->block_null;
610 current->prev_free = block;
611
612 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
613 && "block not aligned properly");
614 /*
615 ** Insert the new block at the head of the list, and mark the first-
616 ** and second-level bitmaps appropriately.
617 */
618 control->blocks[fl][sl] = block;
619 control->fl_bitmap |= (1 << fl);
620 control->sl_bitmap[fl] |= (1 << sl);
621}
622
623/* Remove a given block from the free list. */
624static void block_remove(control_t* control, block_header_t* block)
625{
626 int fl, sl;
627 mapping_insert(block_size(block), &fl, &sl);
628 remove_free_block(control, block, fl, sl);
629}
630
631/* Insert a given block into the free list. */
632static void block_insert(control_t* control, block_header_t* block)
633{
634 int fl, sl;
635 mapping_insert(block_size(block), &fl, &sl);
636 insert_free_block(control, block, fl, sl);
637}
638
639static int block_can_split(block_header_t* block, size_t size)
640{
641 return block_size(block) >= sizeof(block_header_t) + size;
642}
643
644/* Split a block into two, the second of which is free. */
645static block_header_t* block_split(block_header_t* block, size_t size)
646{
647 /* Calculate the amount of space left in the remaining block. */
648 block_header_t* remaining =
649 offset_to_block(block_to_ptr(block), size - block_header_overhead);
650
651 const size_t remain_size = block_size(block) - (size + block_header_overhead);
652
653 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
654 && "remaining block not aligned properly");
655
656 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
657 block_set_size(remaining, remain_size);
658 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
659
660 block_set_size(block, size);
661 block_mark_as_free(remaining);
662
663 return remaining;
664}
665
666/* Absorb a free block's storage into an adjacent previous free block. */
667static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
668{
669 tlsf_assert(!block_is_last(prev) && "previous block can't be last");
670 /* Note: Leaves flags untouched. */
671 prev->size += block_size(block) + block_header_overhead;
672 block_link_next(prev);
673 return prev;
674}
675
676/* Merge a just-freed block with an adjacent previous free block. */
677static block_header_t* block_merge_prev(control_t* control, block_header_t* block)
678{
679 if (block_is_prev_free(block))
680 {
681 block_header_t* prev = block_prev(block);
682 tlsf_assert(prev && "prev physical block can't be null");
683 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
684 block_remove(control, prev);
685 block = block_absorb(prev, block);
686 }
687
688 return block;
689}
690
691/* Merge a just-freed block with an adjacent free block. */
692static block_header_t* block_merge_next(control_t* control, block_header_t* block)
693{
694 block_header_t* next = block_next(block);
695 tlsf_assert(next && "next physical block can't be null");
696
697 if (block_is_free(next))
698 {
699 tlsf_assert(!block_is_last(block) && "previous block can't be last");
700 block_remove(control, next);
701 block = block_absorb(block, next);
702 }
703
704 return block;
705}
706
707/* Trim any trailing block space off the end of a block, return to pool. */
708static void block_trim_free(control_t* control, block_header_t* block, size_t size)
709{
710 tlsf_assert(block_is_free(block) && "block must be free");
711 if (block_can_split(block, size))
712 {
713 block_header_t* remaining_block = block_split(block, size);
714 block_link_next(block);
715 block_set_prev_free(remaining_block);
716 block_insert(control, remaining_block);
717 }
718}
719
720/* Trim any trailing block space off the end of a used block, return to pool. */
721static void block_trim_used(control_t* control, block_header_t* block, size_t size)
722{
723 tlsf_assert(!block_is_free(block) && "block must be used");
724 if (block_can_split(block, size))
725 {
726 /* If the next block is free, we must coalesce. */
727 block_header_t* remaining_block = block_split(block, size);
728 block_set_prev_used(remaining_block);
729
730 remaining_block = block_merge_next(control, remaining_block);
731 block_insert(control, remaining_block);
732 }
733}
734
735static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
736{
737 block_header_t* remaining_block = block;
738 if (block_can_split(block, size))
739 {
740 /* We want the 2nd block. */
741 remaining_block = block_split(block, size - block_header_overhead);
742 block_set_prev_free(remaining_block);
743
744 block_link_next(block);
745 block_insert(control, block);
746 }
747
748 return remaining_block;
749}
750
751static block_header_t* block_locate_free(control_t* control, size_t size)
752{
753 int fl = 0, sl = 0;
754 block_header_t* block = 0;
755
756 if (size)
757 {
758 mapping_search(size, &fl, &sl);
759 block = search_suitable_block(control, &fl, &sl);
760 }
761
762 if (block)
763 {
764 tlsf_assert(block_size(block) >= size);
765 remove_free_block(control, block, fl, sl);
766 }
767
768 return block;
769}
770
771static void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
772{
773 void* p = 0;
774 if (block)
775 {
776 tlsf_assert(size && "size must be non-zero");
777 block_trim_free(control, block, size);
778 block_mark_as_used(block);
779 p = block_to_ptr(block);
780 }
781 return p;
782}
783
784/* Clear structure and point all empty lists at the null block. */
785static void control_construct(control_t* control)
786{
787 int i, j;
788
789 control->block_null.next_free = &control->block_null;
790 control->block_null.prev_free = &control->block_null;
791
792 control->fl_bitmap = 0;
793 for (i = 0; i < FL_INDEX_COUNT; ++i)
794 {
795 control->sl_bitmap[i] = 0;
796 for (j = 0; j < SL_INDEX_COUNT; ++j)
797 {
798 control->blocks[i][j] = &control->block_null;
799 }
800 }
801}
802
803/*
804** Debugging utilities.
805*/
806
807typedef struct integrity_t
808{
809 int prev_status;
810 int status;
811} integrity_t;
812
813#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
814
815static void integrity_walker(void* ptr, size_t size, int used, void* user)
816{
817 block_header_t* block = block_from_ptr(ptr);
818 integrity_t* integ = tlsf_cast(integrity_t*, user);
819 const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
820 const int this_status = block_is_free(block) ? 1 : 0;
821 const size_t this_block_size = block_size(block);
822
823 int status = 0;
824 (void)used;
825 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
826 tlsf_insist(size == this_block_size && "block size incorrect");
827
828 integ->prev_status = this_status;
829 integ->status += status;
830}
831
832int tlsf_check(tlsf_t tlsf)
833{
834 int i, j;
835
836 control_t* control = tlsf_cast(control_t*, tlsf);
837 int status = 0;
838
839 /* Check that the free lists and bitmaps are accurate. */
840 for (i = 0; i < FL_INDEX_COUNT; ++i)
841 {
842 for (j = 0; j < SL_INDEX_COUNT; ++j)
843 {
844 const int fl_map = control->fl_bitmap & (1 << i);
845 const int sl_list = control->sl_bitmap[i];
846 const int sl_map = sl_list & (1 << j);
847 const block_header_t* block = control->blocks[i][j];
848
849 /* Check that first- and second-level lists agree. */
850 if (!fl_map)
851 {
852 tlsf_insist(!sl_map && "second-level map must be null");
853 }
854
855 if (!sl_map)
856 {
857 tlsf_insist(block == &control->block_null && "block list must be null");
858 continue;
859 }
860
861 /* Check that there is at least one free block. */
862 tlsf_insist(sl_list && "no free blocks in second-level map");
863 tlsf_insist(block != &control->block_null && "block should not be null");
864
865 while (block != &control->block_null)
866 {
867 int fli, sli;
868 tlsf_insist(block_is_free(block) && "block should be free");
869 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
870 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
871 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
872 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
873
874 mapping_insert(block_size(block), &fli, &sli);
875 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
876 block = block->next_free;
877 }
878 }
879 }
880
881 return status;
882}
883
884#undef tlsf_insist
885
886static void default_walker(void* ptr, size_t size, int used, void* user)
887{
888 (void)user;
889 syslog(LOG_INFO, "\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
890}
891
892void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
893{
894 tlsf_walker pool_walker = walker ? walker : default_walker;
895 block_header_t* block =
896 offset_to_block(pool, -(int)block_header_overhead);
897
898 while (block && !block_is_last(block))
899 {
900 pool_walker(
901 block_to_ptr(block),
902 block_size(block),
903 !block_is_free(block),
904 user);
905 block = block_next(block);
906 }
907}
908
909size_t tlsf_block_size(void* ptr)
910{
911 size_t size = 0;
912 if (ptr)
913 {
914 const block_header_t* block = block_from_ptr(ptr);
915 size = block_size(block);
916 }
917 return size;
918}
919
920int tlsf_check_pool(pool_t pool)
921{
922 /* Check that the blocks are physically correct. */
923 integrity_t integ = { 0, 0 };
924 tlsf_walk_pool(pool, integrity_walker, &integ);
925
926 return integ.status;
927}
928
929/*
930** Size of the TLSF structures in a given memory block passed to
931** tlsf_create, equal to the size of a control_t
932*/
933size_t tlsf_size(void)
934{
935 return sizeof(control_t);
936}
937
938size_t tlsf_align_size(void)
939{
940 return ALIGN_SIZE;
941}
942
943size_t tlsf_block_size_min(void)
944{
945 return block_size_min;
946}
947
948size_t tlsf_block_size_max(void)
949{
950 return block_size_max;
951}
952
953/*
954** Overhead of the TLSF structures in a given memory block passed to
955** tlsf_add_pool, equal to the overhead of a free block and the
956** sentinel block.
957*/
958size_t tlsf_pool_overhead(void)
959{
960 return 2 * block_header_overhead;
961}
962
963size_t tlsf_alloc_overhead(void)
964{
965 return block_header_overhead;
966}
967
968pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
969{
970 block_header_t* block;
971 block_header_t* next;
972
973 const size_t pool_overhead = tlsf_pool_overhead();
974 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
975
976 if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
977 {
978 syslog(LOG_INFO, "tlsf_add_pool: Memory must be aligned by %u bytes.\n",
979 (unsigned int)ALIGN_SIZE);
980 return 0;
981 }
982
983 if (pool_bytes < block_size_min || pool_bytes > block_size_max)
984 {
985#if defined (TLSF_64BIT)
986 syslog(LOG_INFO, "tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
987 (unsigned int)(pool_overhead + block_size_min),
988 (unsigned int)((pool_overhead + block_size_max) / 256));
989#else
990 syslog(LOG_INFO, "tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
991 (unsigned int)(pool_overhead + block_size_min),
992 (unsigned int)(pool_overhead + block_size_max));
993#endif
994 return 0;
995 }
996
997 /*
998 ** Create the main free block. Offset the start of the block slightly
999 ** so that the prev_phys_block field falls outside of the pool -
1000 ** it will never be used.
1001 */
1002 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
1003 block_set_size(block, pool_bytes);
1004 block_set_free(block);
1005 block_set_prev_used(block);
1006 block_insert(tlsf_cast(control_t*, tlsf), block);
1007
1008 /* Split the block to create a zero-size sentinel block. */
1009 next = block_link_next(block);
1010 block_set_size(next, 0);
1011 block_set_used(next);
1012 block_set_prev_free(next);
1013
1014 return mem;
1015}
1016
1017void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
1018{
1019 control_t* control = tlsf_cast(control_t*, tlsf);
1020 block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
1021
1022 int fl = 0, sl = 0;
1023
1024 tlsf_assert(block_is_free(block) && "block should be free");
1025 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
1026 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
1027
1028 mapping_insert(block_size(block), &fl, &sl);
1029 remove_free_block(control, block, fl, sl);
1030}
1031
1032/*
1033** TLSF main interface.
1034*/
1035
1036#if _DEBUG
1037int test_ffs_fls()
1038{
1039 /* Verify ffs/fls work properly. */
1040 int rv = 0;
1041 rv += (tlsf_ffs(0) == -1) ? 0 : 0x1;
1042 rv += (tlsf_fls(0) == -1) ? 0 : 0x2;
1043 rv += (tlsf_ffs(1) == 0) ? 0 : 0x4;
1044 rv += (tlsf_fls(1) == 0) ? 0 : 0x8;
1045 rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10;
1046 rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20;
1047 rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40;
1048 rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80;
1049
1050#if defined (TLSF_64BIT)
1051 rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100;
1052 rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200;
1053 rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400;
1054#endif
1055
1056 if (rv)
1057 {
1058 syslog(LOG_INFO, "test_ffs_fls: %x ffs/fls tests failed.\n", rv);
1059 }
1060 return rv;
1061}
1062#endif
1063
1064tlsf_t tlsf_create(void* mem)
1065{
1066#if _DEBUG
1067 if (test_ffs_fls())
1068 {
1069 return 0;
1070 }
1071#endif
1072
1073 if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
1074 {
1075 syslog(LOG_INFO, "tlsf_create: Memory must be aligned to %u bytes.\n",
1076 (unsigned int)ALIGN_SIZE);
1077 return 0;
1078 }
1079
1080 control_construct(tlsf_cast(control_t*, mem));
1081
1082 return tlsf_cast(tlsf_t, mem);
1083}
1084
1085tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
1086{
1087 tlsf_t tlsf = tlsf_create(mem);
1088 tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
1089 return tlsf;
1090}
1091
1092void tlsf_destroy(tlsf_t tlsf)
1093{
1094 /* Nothing to do. */
1095 (void)tlsf;
1096}
1097
1098pool_t tlsf_get_pool(tlsf_t tlsf)
1099{
1100 return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
1101}
1102
1103void* tlsf_malloc(tlsf_t tlsf, size_t size)
1104{
1105 control_t* control = tlsf_cast(control_t*, tlsf);
1106 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1107 block_header_t* block = block_locate_free(control, adjust);
1108 return block_prepare_used(control, block, adjust);
1109}
1110
1111void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
1112{
1113 control_t* control = tlsf_cast(control_t*, tlsf);
1114 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1115
1116 /*
1117 ** We must allocate an additional minimum block size bytes so that if
1118 ** our free block will leave an alignment gap which is smaller, we can
1119 ** trim a leading free block and release it back to the pool. We must
1120 ** do this because the previous physical block is in use, therefore
1121 ** the prev_phys_block field is not valid, and we can't simply adjust
1122 ** the size of that block.
1123 */
1124 const size_t gap_minimum = sizeof(block_header_t);
1125 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
1126
1127 /*
1128 ** If alignment is less than or equals base alignment, we're done.
1129 ** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
1130 */
1131 const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
1132
1133 block_header_t* block = block_locate_free(control, aligned_size);
1134
1135 /* This can't be a static assert. */
1136 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
1137
1138 if (block)
1139 {
1140 void* ptr = block_to_ptr(block);
1141 void* aligned = align_ptr(ptr, align);
1142 size_t gap = tlsf_cast(size_t,
1143 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
1144
1145 /* If gap size is too small, offset to next aligned boundary. */
1146 if (gap && gap < gap_minimum)
1147 {
1148 const size_t gap_remain = gap_minimum - gap;
1149 const size_t offset = tlsf_max(gap_remain, align);
1150 const void* next_aligned = tlsf_cast(void*,
1151 tlsf_cast(tlsfptr_t, aligned) + offset);
1152
1153 aligned = align_ptr(next_aligned, align);
1154 gap = tlsf_cast(size_t,
1155 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
1156 }
1157
1158 if (gap)
1159 {
1160 tlsf_assert(gap >= gap_minimum && "gap size too small");
1161 block = block_trim_free_leading(control, block, gap);
1162 }
1163 }
1164
1165 return block_prepare_used(control, block, adjust);
1166}
1167
1168void tlsf_free(tlsf_t tlsf, void* ptr)
1169{
1170 /* Don't attempt to free a NULL pointer. */
1171 if (ptr)
1172 {
1173 control_t* control = tlsf_cast(control_t*, tlsf);
1174 block_header_t* block = block_from_ptr(ptr);
1175 tlsf_assert(!block_is_free(block) && "block already marked as free");
1176 block_mark_as_free(block);
1177 block = block_merge_prev(control, block);
1178 block = block_merge_next(control, block);
1179 block_insert(control, block);
1180 }
1181}
1182
1183/*
1184** The TLSF block information provides us with enough information to
1185** provide a reasonably intelligent implementation of realloc, growing or
1186** shrinking the currently allocated block as required.
1187**
1188** This routine handles the somewhat esoteric edge cases of realloc:
1189** - a non-zero size with a null pointer will behave like malloc
1190** - a zero size with a non-null pointer will behave like free
1191** - a request that cannot be satisfied will leave the original buffer
1192** untouched
1193** - an extended buffer size will leave the newly-allocated area with
1194** contents undefined
1195*/
1196void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
1197{
1198 control_t* control = tlsf_cast(control_t*, tlsf);
1199 void* p = 0;
1200
1201 /* Zero-size requests are treated as free. */
1202 if (ptr && size == 0)
1203 {
1204 tlsf_free(tlsf, ptr);
1205 }
1206 /* Requests with NULL pointers are treated as malloc. */
1207 else if (!ptr)
1208 {
1209 p = tlsf_malloc(tlsf, size);
1210 }
1211 else
1212 {
1213 block_header_t* block = block_from_ptr(ptr);
1214 block_header_t* next = block_next(block);
1215
1216 const size_t cursize = block_size(block);
1217 const size_t combined = cursize + block_size(next) + block_header_overhead;
1218 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1219
1220 tlsf_assert(!block_is_free(block) && "block already marked as free");
1221
1222 /*
1223 ** If the next block is used, or when combined with the current
1224 ** block, does not offer enough space, we must reallocate and copy.
1225 */
1226 if (adjust > cursize && (!block_is_free(next) || adjust > combined))
1227 {
1228 p = tlsf_malloc(tlsf, size);
1229 if (p)
1230 {
1231 const size_t minsize = tlsf_min(cursize, size);
1232 memcpy(p, ptr, minsize);
1233 tlsf_free(tlsf, ptr);
1234 }
1235 }
1236 else
1237 {
1238 /* Do we need to expand to the next block? */
1239 if (adjust > cursize)
1240 {
1241 block_merge_next(control, block);
1242 block_mark_as_used(block);
1243 }
1244
1245 /* Trim the resulting block and return the original pointer. */
1246 block_trim_used(control, block, adjust);
1247 p = ptr;
1248 }
1249 }
1250
1251 return p;
1252}
Note: See TracBrowser for help on using the repository browser.