source: asp3_tinet_ecnl_rx/trunk/ntshell/tlsf/tlsf.c@ 337

Last change on this file since 337 was 337, checked in by coas-nagasima, 6 years ago

ASP3版ECNLを追加

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 32.7 KB
Line 
1#include <assert.h>
2#include <limits.h>
3#include <stddef.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <string.h>
7#include "util/ntstdio.h"
8
9#include "tlsf.h"
10
11#undef _MSC_VER
12
13#if defined(__cplusplus)
14#define tlsf_decl inline
15#else
16#define tlsf_decl static
17#endif
18
19extern ntstdio_t ntstdio;
20
21/*
22** Architecture-specific bit manipulation routines.
23**
24** TLSF achieves O(1) cost for malloc and free operations by limiting
25** the search for a free block to a free list of guaranteed size
26** adequate to fulfill the request, combined with efficient free list
27** queries using bitmasks and architecture-specific bit-manipulation
28** routines.
29**
30** Most modern processors provide instructions to count leading zeroes
31** in a word, find the lowest and highest set bit, etc. These
32** specific implementations will be used when available, falling back
33** to a reasonably efficient generic implementation.
34**
35** NOTE: TLSF spec relies on ffs/fls returning value 0..31.
36** ffs/fls return 1-32 by default, returning 0 for error.
37*/
38
39/*
40** Detect whether or not we are building for a 32- or 64-bit (LP/LLP)
41** architecture. There is no reliable portable method at compile-time.
42*/
43#if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) \
44 || defined (_WIN64) || defined (__LP64__) || defined (__LLP64__)
45#define TLSF_64BIT
46#endif
47
48/*
49** gcc 3.4 and above have builtin support, specialized for architecture.
50** Some compilers masquerade as gcc; patchlevel test filters them out.
51*/
52#if defined (__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
53 && defined (__GNUC_PATCHLEVEL__)
54
55#if defined (__SNC__)
56/* SNC for Playstation 3. */
57
58tlsf_decl int tlsf_ffs(unsigned int word)
59{
60 const unsigned int reverse = word & (~word + 1);
61 const int bit = 32 - __builtin_clz(reverse);
62 return bit - 1;
63}
64
65#else
66
67tlsf_decl int tlsf_ffs(unsigned int word)
68{
69 return __builtin_ffs(word) - 1;
70}
71
72#endif
73
74tlsf_decl int tlsf_fls(unsigned int word)
75{
76 const int bit = word ? 32 - __builtin_clz(word) : 0;
77 return bit - 1;
78}
79
80#elif defined (_MSC_VER) && (_MSC_VER >= 1400) && (defined (_M_IX86) || defined (_M_X64))
81/* Microsoft Visual C++ support on x86/X64 architectures. */
82
83#include <intrin.h>
84
85#pragma intrinsic(_BitScanReverse)
86#pragma intrinsic(_BitScanForward)
87
88tlsf_decl int tlsf_fls(unsigned int word)
89{
90 unsigned long index;
91 return _BitScanReverse(&index, word) ? index : -1;
92}
93
94tlsf_decl int tlsf_ffs(unsigned int word)
95{
96 unsigned long index;
97 return _BitScanForward(&index, word) ? index : -1;
98}
99
100#elif defined (_MSC_VER) && defined (_M_PPC)
101/* Microsoft Visual C++ support on PowerPC architectures. */
102
103#include <ppcintrinsics.h>
104
105tlsf_decl int tlsf_fls(unsigned int word)
106{
107 const int bit = 32 - _CountLeadingZeros(word);
108 return bit - 1;
109}
110
111tlsf_decl int tlsf_ffs(unsigned int word)
112{
113 const unsigned int reverse = word & (~word + 1);
114 const int bit = 32 - _CountLeadingZeros(reverse);
115 return bit - 1;
116}
117
118#elif defined (__ARMCC_VERSION)
119/* RealView Compilation Tools for ARM */
120
121tlsf_decl int tlsf_ffs(unsigned int word)
122{
123 const unsigned int reverse = word & (~word + 1);
124 const int bit = 32 - __clz(reverse);
125 return bit - 1;
126}
127
128tlsf_decl int tlsf_fls(unsigned int word)
129{
130 const int bit = word ? 32 - __clz(word) : 0;
131 return bit - 1;
132}
133
134#elif defined (__ghs__)
135/* Green Hills support for PowerPC */
136
137#include <ppc_ghs.h>
138
139tlsf_decl int tlsf_ffs(unsigned int word)
140{
141 const unsigned int reverse = word & (~word + 1);
142 const int bit = 32 - __CLZ32(reverse);
143 return bit - 1;
144}
145
146tlsf_decl int tlsf_fls(unsigned int word)
147{
148 const int bit = word ? 32 - __CLZ32(word) : 0;
149 return bit - 1;
150}
151
152#else
153/* Fall back to generic implementation. */
154
155tlsf_decl int tlsf_fls_generic(unsigned int word)
156{
157 int bit = 32;
158
159 if (!word) bit -= 1;
160 if (!(word & 0xffff0000)) { word <<= 16; bit -= 16; }
161 if (!(word & 0xff000000)) { word <<= 8; bit -= 8; }
162 if (!(word & 0xf0000000)) { word <<= 4; bit -= 4; }
163 if (!(word & 0xc0000000)) { word <<= 2; bit -= 2; }
164 if (!(word & 0x80000000)) { word <<= 1; bit -= 1; }
165
166 return bit;
167}
168
169/* Implement ffs in terms of fls. */
170tlsf_decl int tlsf_ffs(unsigned int word)
171{
172 return tlsf_fls_generic(word & (~word + 1)) - 1;
173}
174
175tlsf_decl int tlsf_fls(unsigned int word)
176{
177 return tlsf_fls_generic(word) - 1;
178}
179
180#endif
181
182/* Possibly 64-bit version of tlsf_fls. */
183#if defined (TLSF_64BIT)
184tlsf_decl int tlsf_fls_sizet(size_t size)
185{
186 int high = (int)(size >> 32);
187 int bits = 0;
188 if (high)
189 {
190 bits = 32 + tlsf_fls(high);
191 }
192 else
193 {
194 bits = tlsf_fls((int)size & 0xffffffff);
195
196 }
197 return bits;
198}
199#else
200#define tlsf_fls_sizet tlsf_fls
201#endif
202
203#undef tlsf_decl
204
205/*
206** Constants.
207*/
208
209/* Public constants: may be modified. */
210enum tlsf_public
211{
212 /* log2 of number of linear subdivisions of block sizes. Larger
213 ** values require more memory in the control structure. Values of
214 ** 4 or 5 are typical.
215 */
216 SL_INDEX_COUNT_LOG2 = 5,
217};
218
219/* Private constants: do not modify. */
220enum tlsf_private
221{
222#if defined (TLSF_64BIT)
223 /* All allocation sizes and addresses are aligned to 8 bytes. */
224 ALIGN_SIZE_LOG2 = 3,
225#else
226 /* All allocation sizes and addresses are aligned to 4 bytes. */
227 ALIGN_SIZE_LOG2 = 2,
228#endif
229 ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
230
231 /*
232 ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
233 ** However, because we linearly subdivide the second-level lists, and
234 ** our minimum size granularity is 4 bytes, it doesn't make sense to
235 ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
236 ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
237 ** trying to split size ranges into more slots than we have available.
238 ** Instead, we calculate the minimum threshold size, and place all
239 ** blocks below that size into the 0th first-level list.
240 */
241
242#if defined (TLSF_64BIT)
243 /*
244 ** TODO: We can increase this to support larger sizes, at the expense
245 ** of more overhead in the TLSF structure.
246 */
247 FL_INDEX_MAX = 32,
248#else
249 FL_INDEX_MAX = 30,
250#endif
251 SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
252 FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
253 FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
254
255 SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
256};
257
258/*
259** Cast and min/max macros.
260*/
261
262#define tlsf_cast(t, exp) ((t) (exp))
263#define tlsf_min(a, b) ((a) < (b) ? (a) : (b))
264#define tlsf_max(a, b) ((a) > (b) ? (a) : (b))
265
266/*
267** Set assert macro, if it has not been provided by the user.
268*/
269#if !defined (tlsf_assert)
270#define tlsf_assert assert
271#endif
272
273/*
274** Static assertion mechanism.
275*/
276
277#define _tlsf_glue2(x, y) x ## y
278#define _tlsf_glue(x, y) _tlsf_glue2(x, y)
279#define tlsf_static_assert(exp) \
280 typedef char _tlsf_glue(static_assert, __LINE__) [(exp) ? 1 : -1]
281
282/* This code has been tested on 32- and 64-bit (LP/LLP) architectures. */
283tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
284tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
285tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
286
287/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
288tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
289
290/* Ensure we've properly tuned our sizes. */
291tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
292
293/*
294** Data structures and associated constants.
295*/
296
297/*
298** Block header structure.
299**
300** There are several implementation subtleties involved:
301** - The prev_phys_block field is only valid if the previous block is free.
302** - The prev_phys_block field is actually stored at the end of the
303** previous block. It appears at the beginning of this structure only to
304** simplify the implementation.
305** - The next_free / prev_free fields are only valid if the block is free.
306*/
307typedef struct block_header_t
308{
309 /* Points to the previous physical block. */
310 struct block_header_t* prev_phys_block;
311
312 /* The size of this block, excluding the block header. */
313 size_t size;
314
315 /* Next and previous free blocks. */
316 struct block_header_t* next_free;
317 struct block_header_t* prev_free;
318} block_header_t;
319
320/*
321** Since block sizes are always at least a multiple of 4, the two least
322** significant bits of the size field are used to store the block status:
323** - bit 0: whether block is busy or free
324** - bit 1: whether previous block is busy or free
325*/
326static const size_t block_header_free_bit = 1 << 0;
327static const size_t block_header_prev_free_bit = 1 << 1;
328
329/*
330** The size of the block header exposed to used blocks is the size field.
331** The prev_phys_block field is stored *inside* the previous free block.
332*/
333static const size_t block_header_overhead = sizeof(size_t);
334
335/* User data starts directly after the size field in a used block. */
336static const size_t block_start_offset =
337 offsetof(block_header_t, size) + sizeof(size_t);
338
339/*
340** A free block must be large enough to store its header minus the size of
341** the prev_phys_block field, and no larger than the number of addressable
342** bits for FL_INDEX.
343*/
344static const size_t block_size_min =
345 sizeof(block_header_t) - sizeof(block_header_t*);
346static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX;
347
348
349/* The TLSF control structure. */
350typedef struct control_t
351{
352 /* Empty lists point at this block to indicate they are free. */
353 block_header_t block_null;
354
355 /* Bitmaps for free lists. */
356 unsigned int fl_bitmap;
357 unsigned int sl_bitmap[FL_INDEX_COUNT];
358
359 /* Head of free lists. */
360 block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
361} control_t;
362
363/* A type used for casting when doing pointer arithmetic. */
364typedef ptrdiff_t tlsfptr_t;
365
366/*
367** block_header_t member functions.
368*/
369
370static size_t block_size(const block_header_t* block)
371{
372 return block->size & ~(block_header_free_bit | block_header_prev_free_bit);
373}
374
375static void block_set_size(block_header_t* block, size_t size)
376{
377 const size_t oldsize = block->size;
378 block->size = size | (oldsize & (block_header_free_bit | block_header_prev_free_bit));
379}
380
381static int block_is_last(const block_header_t* block)
382{
383 return block_size(block) == 0;
384}
385
386static int block_is_free(const block_header_t* block)
387{
388 return tlsf_cast(int, block->size & block_header_free_bit);
389}
390
391static void block_set_free(block_header_t* block)
392{
393 block->size |= block_header_free_bit;
394}
395
396static void block_set_used(block_header_t* block)
397{
398 block->size &= ~block_header_free_bit;
399}
400
401static int block_is_prev_free(const block_header_t* block)
402{
403 return tlsf_cast(int, block->size & block_header_prev_free_bit);
404}
405
406static void block_set_prev_free(block_header_t* block)
407{
408 block->size |= block_header_prev_free_bit;
409}
410
411static void block_set_prev_used(block_header_t* block)
412{
413 block->size &= ~block_header_prev_free_bit;
414}
415
416static block_header_t* block_from_ptr(const void* ptr)
417{
418 return tlsf_cast(block_header_t*,
419 tlsf_cast(unsigned char*, ptr) - block_start_offset);
420}
421
422static void* block_to_ptr(const block_header_t* block)
423{
424 return tlsf_cast(void*,
425 tlsf_cast(unsigned char*, block) + block_start_offset);
426}
427
428/* Return location of next block after block of given size. */
429static block_header_t* offset_to_block(const void* ptr, size_t size)
430{
431 return tlsf_cast(block_header_t*, tlsf_cast(tlsfptr_t, ptr) + size);
432}
433
434/* Return location of previous block. */
435static block_header_t* block_prev(const block_header_t* block)
436{
437 tlsf_assert(block_is_prev_free(block) && "previous block must be free");
438 return block->prev_phys_block;
439}
440
441/* Return location of next existing block. */
442static block_header_t* block_next(const block_header_t* block)
443{
444 block_header_t* next = offset_to_block(block_to_ptr(block),
445 block_size(block) - block_header_overhead);
446 tlsf_assert(!block_is_last(block));
447 return next;
448}
449
450/* Link a new block with its physical neighbor, return the neighbor. */
451static block_header_t* block_link_next(block_header_t* block)
452{
453 block_header_t* next = block_next(block);
454 next->prev_phys_block = block;
455 return next;
456}
457
458static void block_mark_as_free(block_header_t* block)
459{
460 /* Link the block to the next block, first. */
461 block_header_t* next = block_link_next(block);
462 block_set_prev_free(next);
463 block_set_free(block);
464}
465
466static void block_mark_as_used(block_header_t* block)
467{
468 block_header_t* next = block_next(block);
469 block_set_prev_used(next);
470 block_set_used(block);
471}
472
473static size_t align_up(size_t x, size_t align)
474{
475 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
476 return (x + (align - 1)) & ~(align - 1);
477}
478
479static size_t align_down(size_t x, size_t align)
480{
481 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
482 return x - (x & (align - 1));
483}
484
485static void* align_ptr(const void* ptr, size_t align)
486{
487 const tlsfptr_t aligned =
488 (tlsf_cast(tlsfptr_t, ptr) + (align - 1)) & ~(align - 1);
489 tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
490 return tlsf_cast(void*, aligned);
491}
492
493/*
494** Adjust an allocation size to be aligned to word size, and no smaller
495** than internal minimum.
496*/
497static size_t adjust_request_size(size_t size, size_t align)
498{
499 size_t adjust = 0;
500 if (size && size < block_size_max)
501 {
502 const size_t aligned = align_up(size, align);
503 adjust = tlsf_max(aligned, block_size_min);
504 }
505 return adjust;
506}
507
508/*
509** TLSF utility functions. In most cases, these are direct translations of
510** the documentation found in the white paper.
511*/
512
513static void mapping_insert(size_t size, int* fli, int* sli)
514{
515 int fl, sl;
516 if (size < SMALL_BLOCK_SIZE)
517 {
518 /* Store small blocks in first list. */
519 fl = 0;
520 sl = tlsf_cast(int, size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
521 }
522 else
523 {
524 fl = tlsf_fls_sizet(size);
525 sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
526 fl -= (FL_INDEX_SHIFT - 1);
527 }
528 *fli = fl;
529 *sli = sl;
530}
531
532/* This version rounds up to the next block size (for allocations) */
533static void mapping_search(size_t size, int* fli, int* sli)
534{
535 if (size >= SMALL_BLOCK_SIZE)
536 {
537 const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1;
538 size += round;
539 }
540 mapping_insert(size, fli, sli);
541}
542
543static block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
544{
545 int fl = *fli;
546 int sl = *sli;
547
548 /*
549 ** First, search for a block in the list associated with the given
550 ** fl/sl index.
551 */
552 unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl);
553 if (!sl_map)
554 {
555 /* No block exists. Search in the next largest first-level list. */
556 const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1));
557 if (!fl_map)
558 {
559 /* No free blocks available, memory has been exhausted. */
560 return 0;
561 }
562
563 fl = tlsf_ffs(fl_map);
564 *fli = fl;
565 sl_map = control->sl_bitmap[fl];
566 }
567 tlsf_assert(sl_map && "internal error - second level bitmap is null");
568 sl = tlsf_ffs(sl_map);
569 *sli = sl;
570
571 /* Return the first block in the free list. */
572 return control->blocks[fl][sl];
573}
574
575/* Remove a free block from the free list.*/
576static void remove_free_block(control_t* control, block_header_t* block, int fl, int sl)
577{
578 block_header_t* prev = block->prev_free;
579 block_header_t* next = block->next_free;
580 tlsf_assert(prev && "prev_free field can not be null");
581 tlsf_assert(next && "next_free field can not be null");
582 next->prev_free = prev;
583 prev->next_free = next;
584
585 /* If this block is the head of the free list, set new head. */
586 if (control->blocks[fl][sl] == block)
587 {
588 control->blocks[fl][sl] = next;
589
590 /* If the new head is null, clear the bitmap. */
591 if (next == &control->block_null)
592 {
593 control->sl_bitmap[fl] &= ~(1 << sl);
594
595 /* If the second bitmap is now empty, clear the fl bitmap. */
596 if (!control->sl_bitmap[fl])
597 {
598 control->fl_bitmap &= ~(1 << fl);
599 }
600 }
601 }
602}
603
604/* Insert a free block into the free block list. */
605static void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
606{
607 block_header_t* current = control->blocks[fl][sl];
608 tlsf_assert(current && "free list cannot have a null entry");
609 tlsf_assert(block && "cannot insert a null entry into the free list");
610 block->next_free = current;
611 block->prev_free = &control->block_null;
612 current->prev_free = block;
613
614 tlsf_assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE)
615 && "block not aligned properly");
616 /*
617 ** Insert the new block at the head of the list, and mark the first-
618 ** and second-level bitmaps appropriately.
619 */
620 control->blocks[fl][sl] = block;
621 control->fl_bitmap |= (1 << fl);
622 control->sl_bitmap[fl] |= (1 << sl);
623}
624
625/* Remove a given block from the free list. */
626static void block_remove(control_t* control, block_header_t* block)
627{
628 int fl, sl;
629 mapping_insert(block_size(block), &fl, &sl);
630 remove_free_block(control, block, fl, sl);
631}
632
633/* Insert a given block into the free list. */
634static void block_insert(control_t* control, block_header_t* block)
635{
636 int fl, sl;
637 mapping_insert(block_size(block), &fl, &sl);
638 insert_free_block(control, block, fl, sl);
639}
640
641static int block_can_split(block_header_t* block, size_t size)
642{
643 return block_size(block) >= sizeof(block_header_t) + size;
644}
645
646/* Split a block into two, the second of which is free. */
647static block_header_t* block_split(block_header_t* block, size_t size)
648{
649 /* Calculate the amount of space left in the remaining block. */
650 block_header_t* remaining =
651 offset_to_block(block_to_ptr(block), size - block_header_overhead);
652
653 const size_t remain_size = block_size(block) - (size + block_header_overhead);
654
655 tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
656 && "remaining block not aligned properly");
657
658 tlsf_assert(block_size(block) == remain_size + size + block_header_overhead);
659 block_set_size(remaining, remain_size);
660 tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size");
661
662 block_set_size(block, size);
663 block_mark_as_free(remaining);
664
665 return remaining;
666}
667
668/* Absorb a free block's storage into an adjacent previous free block. */
669static block_header_t* block_absorb(block_header_t* prev, block_header_t* block)
670{
671 tlsf_assert(!block_is_last(prev) && "previous block can't be last");
672 /* Note: Leaves flags untouched. */
673 prev->size += block_size(block) + block_header_overhead;
674 block_link_next(prev);
675 return prev;
676}
677
678/* Merge a just-freed block with an adjacent previous free block. */
679static block_header_t* block_merge_prev(control_t* control, block_header_t* block)
680{
681 if (block_is_prev_free(block))
682 {
683 block_header_t* prev = block_prev(block);
684 tlsf_assert(prev && "prev physical block can't be null");
685 tlsf_assert(block_is_free(prev) && "prev block is not free though marked as such");
686 block_remove(control, prev);
687 block = block_absorb(prev, block);
688 }
689
690 return block;
691}
692
693/* Merge a just-freed block with an adjacent free block. */
694static block_header_t* block_merge_next(control_t* control, block_header_t* block)
695{
696 block_header_t* next = block_next(block);
697 tlsf_assert(next && "next physical block can't be null");
698
699 if (block_is_free(next))
700 {
701 tlsf_assert(!block_is_last(block) && "previous block can't be last");
702 block_remove(control, next);
703 block = block_absorb(block, next);
704 }
705
706 return block;
707}
708
709/* Trim any trailing block space off the end of a block, return to pool. */
710static void block_trim_free(control_t* control, block_header_t* block, size_t size)
711{
712 tlsf_assert(block_is_free(block) && "block must be free");
713 if (block_can_split(block, size))
714 {
715 block_header_t* remaining_block = block_split(block, size);
716 block_link_next(block);
717 block_set_prev_free(remaining_block);
718 block_insert(control, remaining_block);
719 }
720}
721
722/* Trim any trailing block space off the end of a used block, return to pool. */
723static void block_trim_used(control_t* control, block_header_t* block, size_t size)
724{
725 tlsf_assert(!block_is_free(block) && "block must be used");
726 if (block_can_split(block, size))
727 {
728 /* If the next block is free, we must coalesce. */
729 block_header_t* remaining_block = block_split(block, size);
730 block_set_prev_used(remaining_block);
731
732 remaining_block = block_merge_next(control, remaining_block);
733 block_insert(control, remaining_block);
734 }
735}
736
737static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
738{
739 block_header_t* remaining_block = block;
740 if (block_can_split(block, size))
741 {
742 /* We want the 2nd block. */
743 remaining_block = block_split(block, size - block_header_overhead);
744 block_set_prev_free(remaining_block);
745
746 block_link_next(block);
747 block_insert(control, block);
748 }
749
750 return remaining_block;
751}
752
753static block_header_t* block_locate_free(control_t* control, size_t size)
754{
755 int fl = 0, sl = 0;
756 block_header_t* block = 0;
757
758 if (size)
759 {
760 mapping_search(size, &fl, &sl);
761 block = search_suitable_block(control, &fl, &sl);
762 }
763
764 if (block)
765 {
766 tlsf_assert(block_size(block) >= size);
767 remove_free_block(control, block, fl, sl);
768 }
769
770 return block;
771}
772
773static void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
774{
775 void* p = 0;
776 if (block)
777 {
778 tlsf_assert(size && "size must be non-zero");
779 block_trim_free(control, block, size);
780 block_mark_as_used(block);
781 p = block_to_ptr(block);
782 }
783 return p;
784}
785
786/* Clear structure and point all empty lists at the null block. */
787static void control_construct(control_t* control)
788{
789 int i, j;
790
791 control->block_null.next_free = &control->block_null;
792 control->block_null.prev_free = &control->block_null;
793
794 control->fl_bitmap = 0;
795 for (i = 0; i < FL_INDEX_COUNT; ++i)
796 {
797 control->sl_bitmap[i] = 0;
798 for (j = 0; j < SL_INDEX_COUNT; ++j)
799 {
800 control->blocks[i][j] = &control->block_null;
801 }
802 }
803}
804
805/*
806** Debugging utilities.
807*/
808
809typedef struct integrity_t
810{
811 int prev_status;
812 int status;
813} integrity_t;
814
815#define tlsf_insist(x) { tlsf_assert(x); if (!(x)) { status--; } }
816
817static void integrity_walker(void* ptr, size_t size, int used, void* user)
818{
819 block_header_t* block = block_from_ptr(ptr);
820 integrity_t* integ = tlsf_cast(integrity_t*, user);
821 const int this_prev_status = block_is_prev_free(block) ? 1 : 0;
822 const int this_status = block_is_free(block) ? 1 : 0;
823 const size_t this_block_size = block_size(block);
824
825 int status = 0;
826 (void)used;
827 tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect");
828 tlsf_insist(size == this_block_size && "block size incorrect");
829
830 integ->prev_status = this_status;
831 integ->status += status;
832}
833
834int tlsf_check(tlsf_t tlsf)
835{
836 int i, j;
837
838 control_t* control = tlsf_cast(control_t*, tlsf);
839 int status = 0;
840
841 /* Check that the free lists and bitmaps are accurate. */
842 for (i = 0; i < FL_INDEX_COUNT; ++i)
843 {
844 for (j = 0; j < SL_INDEX_COUNT; ++j)
845 {
846 const int fl_map = control->fl_bitmap & (1 << i);
847 const int sl_list = control->sl_bitmap[i];
848 const int sl_map = sl_list & (1 << j);
849 const block_header_t* block = control->blocks[i][j];
850
851 /* Check that first- and second-level lists agree. */
852 if (!fl_map)
853 {
854 tlsf_insist(!sl_map && "second-level map must be null");
855 }
856
857 if (!sl_map)
858 {
859 tlsf_insist(block == &control->block_null && "block list must be null");
860 continue;
861 }
862
863 /* Check that there is at least one free block. */
864 tlsf_insist(sl_list && "no free blocks in second-level map");
865 tlsf_insist(block != &control->block_null && "block should not be null");
866
867 while (block != &control->block_null)
868 {
869 int fli, sli;
870 tlsf_insist(block_is_free(block) && "block should be free");
871 tlsf_insist(!block_is_prev_free(block) && "blocks should have coalesced");
872 tlsf_insist(!block_is_free(block_next(block)) && "blocks should have coalesced");
873 tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
874 tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
875
876 mapping_insert(block_size(block), &fli, &sli);
877 tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
878 block = block->next_free;
879 }
880 }
881 }
882
883 return status;
884}
885
886#undef tlsf_insist
887
888static void default_walker(void* ptr, size_t size, int used, void* user)
889{
890 (void)user;
891 ntstdio_printf(&ntstdio, "\t%p %s size: %x (%p)\n", ptr, used ? "used" : "free", (unsigned int)size, block_from_ptr(ptr));
892}
893
894void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user)
895{
896 tlsf_walker pool_walker = walker ? walker : default_walker;
897 block_header_t* block =
898 offset_to_block(pool, -(int)block_header_overhead);
899
900 while (block && !block_is_last(block))
901 {
902 pool_walker(
903 block_to_ptr(block),
904 block_size(block),
905 !block_is_free(block),
906 user);
907 block = block_next(block);
908 }
909}
910
911size_t tlsf_block_size(void* ptr)
912{
913 size_t size = 0;
914 if (ptr)
915 {
916 const block_header_t* block = block_from_ptr(ptr);
917 size = block_size(block);
918 }
919 return size;
920}
921
922int tlsf_check_pool(pool_t pool)
923{
924 /* Check that the blocks are physically correct. */
925 integrity_t integ = { 0, 0 };
926 tlsf_walk_pool(pool, integrity_walker, &integ);
927
928 return integ.status;
929}
930
931/*
932** Size of the TLSF structures in a given memory block passed to
933** tlsf_create, equal to the size of a control_t
934*/
935size_t tlsf_size(void)
936{
937 return sizeof(control_t);
938}
939
940size_t tlsf_align_size(void)
941{
942 return ALIGN_SIZE;
943}
944
945size_t tlsf_block_size_min(void)
946{
947 return block_size_min;
948}
949
950size_t tlsf_block_size_max(void)
951{
952 return block_size_max;
953}
954
955/*
956** Overhead of the TLSF structures in a given memory block passed to
957** tlsf_add_pool, equal to the overhead of a free block and the
958** sentinel block.
959*/
960size_t tlsf_pool_overhead(void)
961{
962 return 2 * block_header_overhead;
963}
964
965size_t tlsf_alloc_overhead(void)
966{
967 return block_header_overhead;
968}
969
970pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
971{
972 block_header_t* block;
973 block_header_t* next;
974
975 const size_t pool_overhead = tlsf_pool_overhead();
976 const size_t pool_bytes = align_down(bytes - pool_overhead, ALIGN_SIZE);
977
978 if (((ptrdiff_t)mem % ALIGN_SIZE) != 0)
979 {
980 ntstdio_printf(&ntstdio, "tlsf_add_pool: Memory must be aligned by %u bytes.\n",
981 (unsigned int)ALIGN_SIZE);
982 return 0;
983 }
984
985 if (pool_bytes < block_size_min || pool_bytes > block_size_max)
986 {
987#if defined (TLSF_64BIT)
988 ntstdio_printf(&ntstdio, "tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
989 (unsigned int)(pool_overhead + block_size_min),
990 (unsigned int)((pool_overhead + block_size_max) / 256));
991#else
992 ntstdio_printf(&ntstdio, "tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
993 (unsigned int)(pool_overhead + block_size_min),
994 (unsigned int)(pool_overhead + block_size_max));
995#endif
996 return 0;
997 }
998
999 /*
1000 ** Create the main free block. Offset the start of the block slightly
1001 ** so that the prev_phys_block field falls outside of the pool -
1002 ** it will never be used.
1003 */
1004 block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead);
1005 block_set_size(block, pool_bytes);
1006 block_set_free(block);
1007 block_set_prev_used(block);
1008 block_insert(tlsf_cast(control_t*, tlsf), block);
1009
1010 /* Split the block to create a zero-size sentinel block. */
1011 next = block_link_next(block);
1012 block_set_size(next, 0);
1013 block_set_used(next);
1014 block_set_prev_free(next);
1015
1016 return mem;
1017}
1018
1019void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
1020{
1021 control_t* control = tlsf_cast(control_t*, tlsf);
1022 block_header_t* block = offset_to_block(pool, -(int)block_header_overhead);
1023
1024 int fl = 0, sl = 0;
1025
1026 tlsf_assert(block_is_free(block) && "block should be free");
1027 tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
1028 tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
1029
1030 mapping_insert(block_size(block), &fl, &sl);
1031 remove_free_block(control, block, fl, sl);
1032}
1033
1034/*
1035** TLSF main interface.
1036*/
1037
1038#if _DEBUG
1039int test_ffs_fls()
1040{
1041 /* Verify ffs/fls work properly. */
1042 int rv = 0;
1043 rv += (tlsf_ffs(0) == -1) ? 0 : 0x1;
1044 rv += (tlsf_fls(0) == -1) ? 0 : 0x2;
1045 rv += (tlsf_ffs(1) == 0) ? 0 : 0x4;
1046 rv += (tlsf_fls(1) == 0) ? 0 : 0x8;
1047 rv += (tlsf_ffs(0x80000000) == 31) ? 0 : 0x10;
1048 rv += (tlsf_ffs(0x80008000) == 15) ? 0 : 0x20;
1049 rv += (tlsf_fls(0x80000008) == 31) ? 0 : 0x40;
1050 rv += (tlsf_fls(0x7FFFFFFF) == 30) ? 0 : 0x80;
1051
1052#if defined (TLSF_64BIT)
1053 rv += (tlsf_fls_sizet(0x80000000) == 31) ? 0 : 0x100;
1054 rv += (tlsf_fls_sizet(0x100000000) == 32) ? 0 : 0x200;
1055 rv += (tlsf_fls_sizet(0xffffffffffffffff) == 63) ? 0 : 0x400;
1056#endif
1057
1058 if (rv)
1059 {
1060 ntstdio_printf(&ntstdio, "test_ffs_fls: %x ffs/fls tests failed.\n", rv);
1061 }
1062 return rv;
1063}
1064#endif
1065
1066tlsf_t tlsf_create(void* mem)
1067{
1068#if _DEBUG
1069 if (test_ffs_fls())
1070 {
1071 return 0;
1072 }
1073#endif
1074
1075 if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
1076 {
1077 ntstdio_printf(&ntstdio, "tlsf_create: Memory must be aligned to %u bytes.\n",
1078 (unsigned int)ALIGN_SIZE);
1079 return 0;
1080 }
1081
1082 control_construct(tlsf_cast(control_t*, mem));
1083
1084 return tlsf_cast(tlsf_t, mem);
1085}
1086
1087tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
1088{
1089 tlsf_t tlsf = tlsf_create(mem);
1090 tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
1091 return tlsf;
1092}
1093
1094void tlsf_destroy(tlsf_t tlsf)
1095{
1096 /* Nothing to do. */
1097 (void)tlsf;
1098}
1099
1100pool_t tlsf_get_pool(tlsf_t tlsf)
1101{
1102 return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
1103}
1104
1105void* tlsf_malloc(tlsf_t tlsf, size_t size)
1106{
1107 control_t* control = tlsf_cast(control_t*, tlsf);
1108 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1109 block_header_t* block = block_locate_free(control, adjust);
1110 return block_prepare_used(control, block, adjust);
1111}
1112
1113void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
1114{
1115 control_t* control = tlsf_cast(control_t*, tlsf);
1116 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1117
1118 /*
1119 ** We must allocate an additional minimum block size bytes so that if
1120 ** our free block will leave an alignment gap which is smaller, we can
1121 ** trim a leading free block and release it back to the pool. We must
1122 ** do this because the previous physical block is in use, therefore
1123 ** the prev_phys_block field is not valid, and we can't simply adjust
1124 ** the size of that block.
1125 */
1126 const size_t gap_minimum = sizeof(block_header_t);
1127 const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
1128
1129 /*
1130 ** If alignment is less than or equals base alignment, we're done.
1131 ** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
1132 */
1133 const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
1134
1135 block_header_t* block = block_locate_free(control, aligned_size);
1136
1137 /* This can't be a static assert. */
1138 tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead);
1139
1140 if (block)
1141 {
1142 void* ptr = block_to_ptr(block);
1143 void* aligned = align_ptr(ptr, align);
1144 size_t gap = tlsf_cast(size_t,
1145 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
1146
1147 /* If gap size is too small, offset to next aligned boundary. */
1148 if (gap && gap < gap_minimum)
1149 {
1150 const size_t gap_remain = gap_minimum - gap;
1151 const size_t offset = tlsf_max(gap_remain, align);
1152 const void* next_aligned = tlsf_cast(void*,
1153 tlsf_cast(tlsfptr_t, aligned) + offset);
1154
1155 aligned = align_ptr(next_aligned, align);
1156 gap = tlsf_cast(size_t,
1157 tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
1158 }
1159
1160 if (gap)
1161 {
1162 tlsf_assert(gap >= gap_minimum && "gap size too small");
1163 block = block_trim_free_leading(control, block, gap);
1164 }
1165 }
1166
1167 return block_prepare_used(control, block, adjust);
1168}
1169
1170void tlsf_free(tlsf_t tlsf, void* ptr)
1171{
1172 /* Don't attempt to free a NULL pointer. */
1173 if (ptr)
1174 {
1175 control_t* control = tlsf_cast(control_t*, tlsf);
1176 block_header_t* block = block_from_ptr(ptr);
1177 tlsf_assert(!block_is_free(block) && "block already marked as free");
1178 block_mark_as_free(block);
1179 block = block_merge_prev(control, block);
1180 block = block_merge_next(control, block);
1181 block_insert(control, block);
1182 }
1183}
1184
1185/*
1186** The TLSF block information provides us with enough information to
1187** provide a reasonably intelligent implementation of realloc, growing or
1188** shrinking the currently allocated block as required.
1189**
1190** This routine handles the somewhat esoteric edge cases of realloc:
1191** - a non-zero size with a null pointer will behave like malloc
1192** - a zero size with a non-null pointer will behave like free
1193** - a request that cannot be satisfied will leave the original buffer
1194** untouched
1195** - an extended buffer size will leave the newly-allocated area with
1196** contents undefined
1197*/
1198void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
1199{
1200 control_t* control = tlsf_cast(control_t*, tlsf);
1201 void* p = 0;
1202
1203 /* Zero-size requests are treated as free. */
1204 if (ptr && size == 0)
1205 {
1206 tlsf_free(tlsf, ptr);
1207 }
1208 /* Requests with NULL pointers are treated as malloc. */
1209 else if (!ptr)
1210 {
1211 p = tlsf_malloc(tlsf, size);
1212 }
1213 else
1214 {
1215 block_header_t* block = block_from_ptr(ptr);
1216 block_header_t* next = block_next(block);
1217
1218 const size_t cursize = block_size(block);
1219 const size_t combined = cursize + block_size(next) + block_header_overhead;
1220 const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
1221
1222 tlsf_assert(!block_is_free(block) && "block already marked as free");
1223
1224 /*
1225 ** If the next block is used, or when combined with the current
1226 ** block, does not offer enough space, we must reallocate and copy.
1227 */
1228 if (adjust > cursize && (!block_is_free(next) || adjust > combined))
1229 {
1230 p = tlsf_malloc(tlsf, size);
1231 if (p)
1232 {
1233 const size_t minsize = tlsf_min(cursize, size);
1234 memcpy(p, ptr, minsize);
1235 tlsf_free(tlsf, ptr);
1236 }
1237 }
1238 else
1239 {
1240 /* Do we need to expand to the next block? */
1241 if (adjust > cursize)
1242 {
1243 block_merge_next(control, block);
1244 block_mark_as_used(block);
1245 }
1246
1247 /* Trim the resulting block and return the original pointer. */
1248 block_trim_used(control, block, adjust);
1249 p = ptr;
1250 }
1251 }
1252
1253 return p;
1254}
Note: See TracBrowser for help on using the repository browser.