1 | #define _GNU_SOURCE
|
---|
2 | #include <stdlib.h>
|
---|
3 | #include <string.h>
|
---|
4 | #include <limits.h>
|
---|
5 | #include <stdint.h>
|
---|
6 | #include <errno.h>
|
---|
7 | #include <sys/mman.h>
|
---|
8 | #include "libc.h"
|
---|
9 | #include "atomic.h"
|
---|
10 | #include "pthread_impl.h"
|
---|
11 |
|
---|
12 | #if defined(__GNUC__) && defined(__PIC__)
|
---|
13 | #define inline inline __attribute__((always_inline))
|
---|
14 | #endif
|
---|
15 |
|
---|
16 | void *__mmap(void *, size_t, int, int, int, off_t);
|
---|
17 | int __munmap(void *, size_t);
|
---|
18 | void *__mremap(void *, size_t, size_t, int, ...);
|
---|
19 | int __madvise(void *, size_t, int);
|
---|
20 |
|
---|
21 | struct chunk {
|
---|
22 | size_t psize, csize;
|
---|
23 | struct chunk *next, *prev;
|
---|
24 | };
|
---|
25 |
|
---|
26 | struct bin {
|
---|
27 | volatile int lock[2];
|
---|
28 | struct chunk *head;
|
---|
29 | struct chunk *tail;
|
---|
30 | };
|
---|
31 |
|
---|
32 | static struct {
|
---|
33 | volatile uint64_t binmap;
|
---|
34 | struct bin bins[64];
|
---|
35 | volatile int free_lock[2];
|
---|
36 | } mal;
|
---|
37 |
|
---|
38 |
|
---|
39 | #define SIZE_ALIGN (4*sizeof(size_t))
|
---|
40 | #define SIZE_MASK (-SIZE_ALIGN)
|
---|
41 | #define OVERHEAD (2*sizeof(size_t))
|
---|
42 | #define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN)
|
---|
43 | #define DONTCARE 16
|
---|
44 | #define RECLAIM 163840
|
---|
45 |
|
---|
46 | #define CHUNK_SIZE(c) ((c)->csize & -2)
|
---|
47 | #define CHUNK_PSIZE(c) ((c)->psize & -2)
|
---|
48 | #define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c)))
|
---|
49 | #define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c)))
|
---|
50 | #define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD)
|
---|
51 | #define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD)
|
---|
52 | #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
|
---|
53 |
|
---|
54 | #define C_INUSE ((size_t)1)
|
---|
55 |
|
---|
56 | #define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
|
---|
57 |
|
---|
58 |
|
---|
59 | /* Synchronization tools */
|
---|
60 |
|
---|
61 | static inline void lock(volatile int *lk)
|
---|
62 | {
|
---|
63 | if (libc.threads_minus_1)
|
---|
64 | while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1);
|
---|
65 | }
|
---|
66 |
|
---|
67 | static inline void unlock(volatile int *lk)
|
---|
68 | {
|
---|
69 | if (lk[0]) {
|
---|
70 | a_store(lk, 0);
|
---|
71 | if (lk[1]) __wake(lk, 1, 1);
|
---|
72 | }
|
---|
73 | }
|
---|
74 |
|
---|
75 | static inline void lock_bin(int i)
|
---|
76 | {
|
---|
77 | lock(mal.bins[i].lock);
|
---|
78 | if (!mal.bins[i].head)
|
---|
79 | mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
|
---|
80 | }
|
---|
81 |
|
---|
82 | static inline void unlock_bin(int i)
|
---|
83 | {
|
---|
84 | unlock(mal.bins[i].lock);
|
---|
85 | }
|
---|
86 |
|
---|
87 | static int first_set(uint64_t x)
|
---|
88 | {
|
---|
89 | #if 1
|
---|
90 | return a_ctz_64(x);
|
---|
91 | #else
|
---|
92 | static const char debruijn64[64] = {
|
---|
93 | 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
|
---|
94 | 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
|
---|
95 | 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
|
---|
96 | 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
|
---|
97 | };
|
---|
98 | static const char debruijn32[32] = {
|
---|
99 | 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
|
---|
100 | 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
|
---|
101 | };
|
---|
102 | if (sizeof(long) < 8) {
|
---|
103 | uint32_t y = x;
|
---|
104 | if (!y) {
|
---|
105 | y = x>>32;
|
---|
106 | return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
|
---|
107 | }
|
---|
108 | return debruijn32[(y&-y)*0x076be629 >> 27];
|
---|
109 | }
|
---|
110 | return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
|
---|
111 | #endif
|
---|
112 | }
|
---|
113 |
|
---|
114 | static const unsigned char bin_tab[60] = {
|
---|
115 | 32,33,34,35,36,36,37,37,38,38,39,39,
|
---|
116 | 40,40,40,40,41,41,41,41,42,42,42,42,43,43,43,43,
|
---|
117 | 44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,
|
---|
118 | 46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,
|
---|
119 | };
|
---|
120 |
|
---|
121 | static int bin_index(size_t x)
|
---|
122 | {
|
---|
123 | x = x / SIZE_ALIGN - 1;
|
---|
124 | if (x <= 32) return x;
|
---|
125 | if (x < 512) return bin_tab[x/8-4];
|
---|
126 | if (x > 0x1c00) return 63;
|
---|
127 | return bin_tab[x/128-4] + 16;
|
---|
128 | }
|
---|
129 |
|
---|
130 | static int bin_index_up(size_t x)
|
---|
131 | {
|
---|
132 | x = x / SIZE_ALIGN - 1;
|
---|
133 | if (x <= 32) return x;
|
---|
134 | x--;
|
---|
135 | if (x < 512) return bin_tab[x/8-4] + 1;
|
---|
136 | return bin_tab[x/128-4] + 17;
|
---|
137 | }
|
---|
138 |
|
---|
139 | #if 0
|
---|
140 | void __dump_heap(int x)
|
---|
141 | {
|
---|
142 | struct chunk *c;
|
---|
143 | int i;
|
---|
144 | for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
|
---|
145 | fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
|
---|
146 | c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
|
---|
147 | c->csize & 15,
|
---|
148 | NEXT_CHUNK(c)->psize & 15);
|
---|
149 | for (i=0; i<64; i++) {
|
---|
150 | if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
|
---|
151 | fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
|
---|
152 | if (!(mal.binmap & 1ULL<<i))
|
---|
153 | fprintf(stderr, "missing from binmap!\n");
|
---|
154 | } else if (mal.binmap & 1ULL<<i)
|
---|
155 | fprintf(stderr, "binmap wrongly contains %d!\n", i);
|
---|
156 | }
|
---|
157 | }
|
---|
158 | #endif
|
---|
159 |
|
---|
160 | void *__expand_heap(size_t *);
|
---|
161 |
|
---|
162 | static struct chunk *expand_heap(size_t n)
|
---|
163 | {
|
---|
164 | static int heap_lock[2];
|
---|
165 | static void *end;
|
---|
166 | void *p;
|
---|
167 | struct chunk *w;
|
---|
168 |
|
---|
169 | /* The argument n already accounts for the caller's chunk
|
---|
170 | * overhead needs, but if the heap can't be extended in-place,
|
---|
171 | * we need room for an extra zero-sized sentinel chunk. */
|
---|
172 | n += SIZE_ALIGN;
|
---|
173 |
|
---|
174 | lock(heap_lock);
|
---|
175 |
|
---|
176 | p = __expand_heap(&n);
|
---|
177 | if (!p) {
|
---|
178 | unlock(heap_lock);
|
---|
179 | return 0;
|
---|
180 | }
|
---|
181 |
|
---|
182 | /* If not just expanding existing space, we need to make a
|
---|
183 | * new sentinel chunk below the allocated space. */
|
---|
184 | if (p != end) {
|
---|
185 | /* Valid/safe because of the prologue increment. */
|
---|
186 | n -= SIZE_ALIGN;
|
---|
187 | p = (char *)p + SIZE_ALIGN;
|
---|
188 | w = MEM_TO_CHUNK(p);
|
---|
189 | w->psize = 0 | C_INUSE;
|
---|
190 | }
|
---|
191 |
|
---|
192 | /* Record new heap end and fill in footer. */
|
---|
193 | end = (char *)p + n;
|
---|
194 | w = MEM_TO_CHUNK(end);
|
---|
195 | w->psize = n | C_INUSE;
|
---|
196 | w->csize = 0 | C_INUSE;
|
---|
197 |
|
---|
198 | /* Fill in header, which may be new or may be replacing a
|
---|
199 | * zero-size sentinel header at the old end-of-heap. */
|
---|
200 | w = MEM_TO_CHUNK(p);
|
---|
201 | w->csize = n | C_INUSE;
|
---|
202 |
|
---|
203 | unlock(heap_lock);
|
---|
204 |
|
---|
205 | return w;
|
---|
206 | }
|
---|
207 |
|
---|
208 | static int adjust_size(size_t *n)
|
---|
209 | {
|
---|
210 | /* Result of pointer difference must fit in ptrdiff_t. */
|
---|
211 | if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
|
---|
212 | if (*n) {
|
---|
213 | errno = ENOMEM;
|
---|
214 | return -1;
|
---|
215 | } else {
|
---|
216 | *n = SIZE_ALIGN;
|
---|
217 | return 0;
|
---|
218 | }
|
---|
219 | }
|
---|
220 | *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
|
---|
221 | return 0;
|
---|
222 | }
|
---|
223 |
|
---|
224 | static void unbin(struct chunk *c, int i)
|
---|
225 | {
|
---|
226 | if (c->prev == c->next)
|
---|
227 | a_and_64(&mal.binmap, ~(1ULL<<i));
|
---|
228 | c->prev->next = c->next;
|
---|
229 | c->next->prev = c->prev;
|
---|
230 | c->csize |= C_INUSE;
|
---|
231 | NEXT_CHUNK(c)->psize |= C_INUSE;
|
---|
232 | }
|
---|
233 |
|
---|
234 | static int alloc_fwd(struct chunk *c)
|
---|
235 | {
|
---|
236 | int i;
|
---|
237 | size_t k;
|
---|
238 | while (!((k=c->csize) & C_INUSE)) {
|
---|
239 | i = bin_index(k);
|
---|
240 | lock_bin(i);
|
---|
241 | if (c->csize == k) {
|
---|
242 | unbin(c, i);
|
---|
243 | unlock_bin(i);
|
---|
244 | return 1;
|
---|
245 | }
|
---|
246 | unlock_bin(i);
|
---|
247 | }
|
---|
248 | return 0;
|
---|
249 | }
|
---|
250 |
|
---|
251 | static int alloc_rev(struct chunk *c)
|
---|
252 | {
|
---|
253 | int i;
|
---|
254 | size_t k;
|
---|
255 | while (!((k=c->psize) & C_INUSE)) {
|
---|
256 | i = bin_index(k);
|
---|
257 | lock_bin(i);
|
---|
258 | if (c->psize == k) {
|
---|
259 | unbin(PREV_CHUNK(c), i);
|
---|
260 | unlock_bin(i);
|
---|
261 | return 1;
|
---|
262 | }
|
---|
263 | unlock_bin(i);
|
---|
264 | }
|
---|
265 | return 0;
|
---|
266 | }
|
---|
267 |
|
---|
268 |
|
---|
269 | /* pretrim - trims a chunk _prior_ to removing it from its bin.
|
---|
270 | * Must be called with i as the ideal bin for size n, j the bin
|
---|
271 | * for the _free_ chunk self, and bin j locked. */
|
---|
272 | static int pretrim(struct chunk *self, size_t n, int i, int j)
|
---|
273 | {
|
---|
274 | size_t n1;
|
---|
275 | struct chunk *next, *split;
|
---|
276 |
|
---|
277 | /* We cannot pretrim if it would require re-binning. */
|
---|
278 | if (j < 40) return 0;
|
---|
279 | if (j < i+3) {
|
---|
280 | if (j != 63) return 0;
|
---|
281 | n1 = CHUNK_SIZE(self);
|
---|
282 | if (n1-n <= MMAP_THRESHOLD) return 0;
|
---|
283 | } else {
|
---|
284 | n1 = CHUNK_SIZE(self);
|
---|
285 | }
|
---|
286 | if (bin_index(n1-n) != j) return 0;
|
---|
287 |
|
---|
288 | next = NEXT_CHUNK(self);
|
---|
289 | split = (void *)((char *)self + n);
|
---|
290 |
|
---|
291 | split->prev = self->prev;
|
---|
292 | split->next = self->next;
|
---|
293 | split->prev->next = split;
|
---|
294 | split->next->prev = split;
|
---|
295 | split->psize = n | C_INUSE;
|
---|
296 | split->csize = n1-n;
|
---|
297 | next->psize = n1-n;
|
---|
298 | self->csize = n | C_INUSE;
|
---|
299 | return 1;
|
---|
300 | }
|
---|
301 |
|
---|
302 | static void trim(struct chunk *self, size_t n)
|
---|
303 | {
|
---|
304 | size_t n1 = CHUNK_SIZE(self);
|
---|
305 | struct chunk *next, *split;
|
---|
306 |
|
---|
307 | if (n >= n1 - DONTCARE) return;
|
---|
308 |
|
---|
309 | next = NEXT_CHUNK(self);
|
---|
310 | split = (void *)((char *)self + n);
|
---|
311 |
|
---|
312 | split->psize = n | C_INUSE;
|
---|
313 | split->csize = n1-n | C_INUSE;
|
---|
314 | next->psize = n1-n | C_INUSE;
|
---|
315 | self->csize = n | C_INUSE;
|
---|
316 |
|
---|
317 | free(CHUNK_TO_MEM(split));
|
---|
318 | }
|
---|
319 |
|
---|
320 | void *malloc(size_t n)
|
---|
321 | {
|
---|
322 | struct chunk *c;
|
---|
323 | int i, j;
|
---|
324 |
|
---|
325 | if (adjust_size(&n) < 0) return 0;
|
---|
326 |
|
---|
327 | if (n > MMAP_THRESHOLD) {
|
---|
328 | size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
|
---|
329 | char *base = __mmap(0, len, PROT_READ|PROT_WRITE,
|
---|
330 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
---|
331 | if (base == (void *)-1) return 0;
|
---|
332 | c = (void *)(base + SIZE_ALIGN - OVERHEAD);
|
---|
333 | c->csize = len - (SIZE_ALIGN - OVERHEAD);
|
---|
334 | c->psize = SIZE_ALIGN - OVERHEAD;
|
---|
335 | return CHUNK_TO_MEM(c);
|
---|
336 | }
|
---|
337 |
|
---|
338 | i = bin_index_up(n);
|
---|
339 | for (;;) {
|
---|
340 | uint64_t mask = mal.binmap & -(1ULL<<i);
|
---|
341 | if (!mask) {
|
---|
342 | c = expand_heap(n);
|
---|
343 | if (!c) return 0;
|
---|
344 | if (alloc_rev(c)) {
|
---|
345 | struct chunk *x = c;
|
---|
346 | c = PREV_CHUNK(c);
|
---|
347 | NEXT_CHUNK(x)->psize = c->csize =
|
---|
348 | x->csize + CHUNK_SIZE(c);
|
---|
349 | }
|
---|
350 | break;
|
---|
351 | }
|
---|
352 | j = first_set(mask);
|
---|
353 | lock_bin(j);
|
---|
354 | c = mal.bins[j].head;
|
---|
355 | if (c != BIN_TO_CHUNK(j)) {
|
---|
356 | if (!pretrim(c, n, i, j)) unbin(c, j);
|
---|
357 | unlock_bin(j);
|
---|
358 | break;
|
---|
359 | }
|
---|
360 | unlock_bin(j);
|
---|
361 | }
|
---|
362 |
|
---|
363 | /* Now patch up in case we over-allocated */
|
---|
364 | trim(c, n);
|
---|
365 |
|
---|
366 | return CHUNK_TO_MEM(c);
|
---|
367 | }
|
---|
368 |
|
---|
369 | void *__malloc0(size_t n)
|
---|
370 | {
|
---|
371 | void *p = malloc(n);
|
---|
372 | if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))) {
|
---|
373 | size_t *z;
|
---|
374 | n = (n + sizeof *z - 1)/sizeof *z;
|
---|
375 | for (z=p; n; n--, z++) if (*z) *z=0;
|
---|
376 | }
|
---|
377 | return p;
|
---|
378 | }
|
---|
379 |
|
---|
380 | void *realloc(void *p, size_t n)
|
---|
381 | {
|
---|
382 | struct chunk *self, *next;
|
---|
383 | size_t n0, n1;
|
---|
384 | void *new;
|
---|
385 |
|
---|
386 | if (!p) return malloc(n);
|
---|
387 |
|
---|
388 | if (adjust_size(&n) < 0) return 0;
|
---|
389 |
|
---|
390 | self = MEM_TO_CHUNK(p);
|
---|
391 | n1 = n0 = CHUNK_SIZE(self);
|
---|
392 |
|
---|
393 | if (IS_MMAPPED(self)) {
|
---|
394 | size_t extra = self->psize;
|
---|
395 | char *base = (char *)self - extra;
|
---|
396 | size_t oldlen = n0 + extra;
|
---|
397 | size_t newlen = n + extra;
|
---|
398 | /* Crash on realloc of freed chunk */
|
---|
399 | if (extra & 1) a_crash();
|
---|
400 | if (newlen < PAGE_SIZE && (new = malloc(n))) {
|
---|
401 | memcpy(new, p, n-OVERHEAD);
|
---|
402 | free(p);
|
---|
403 | return new;
|
---|
404 | }
|
---|
405 | newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
|
---|
406 | if (oldlen == newlen) return p;
|
---|
407 | base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
|
---|
408 | if (base == (void *)-1)
|
---|
409 | goto copy_realloc;
|
---|
410 | self = (void *)(base + extra);
|
---|
411 | self->csize = newlen - extra;
|
---|
412 | return CHUNK_TO_MEM(self);
|
---|
413 | }
|
---|
414 |
|
---|
415 | next = NEXT_CHUNK(self);
|
---|
416 |
|
---|
417 | /* Crash on corrupted footer (likely from buffer overflow) */
|
---|
418 | if (next->psize != self->csize) a_crash();
|
---|
419 |
|
---|
420 | /* Merge adjacent chunks if we need more space. This is not
|
---|
421 | * a waste of time even if we fail to get enough space, because our
|
---|
422 | * subsequent call to free would otherwise have to do the merge. */
|
---|
423 | if (n > n1 && alloc_fwd(next)) {
|
---|
424 | n1 += CHUNK_SIZE(next);
|
---|
425 | next = NEXT_CHUNK(next);
|
---|
426 | }
|
---|
427 | /* FIXME: find what's wrong here and reenable it..? */
|
---|
428 | if (0 && n > n1 && alloc_rev(self)) {
|
---|
429 | self = PREV_CHUNK(self);
|
---|
430 | n1 += CHUNK_SIZE(self);
|
---|
431 | }
|
---|
432 | self->csize = n1 | C_INUSE;
|
---|
433 | next->psize = n1 | C_INUSE;
|
---|
434 |
|
---|
435 | /* If we got enough space, split off the excess and return */
|
---|
436 | if (n <= n1) {
|
---|
437 | //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
|
---|
438 | trim(self, n);
|
---|
439 | return CHUNK_TO_MEM(self);
|
---|
440 | }
|
---|
441 |
|
---|
442 | copy_realloc:
|
---|
443 | /* As a last resort, allocate a new chunk and copy to it. */
|
---|
444 | new = malloc(n-OVERHEAD);
|
---|
445 | if (!new) return 0;
|
---|
446 | memcpy(new, p, n0-OVERHEAD);
|
---|
447 | free(CHUNK_TO_MEM(self));
|
---|
448 | return new;
|
---|
449 | }
|
---|
450 |
|
---|
451 | void free(void *p)
|
---|
452 | {
|
---|
453 | struct chunk *self, *next;
|
---|
454 | size_t final_size, new_size, size;
|
---|
455 | int reclaim=0;
|
---|
456 | int i;
|
---|
457 |
|
---|
458 | if (!p) return;
|
---|
459 |
|
---|
460 | self = MEM_TO_CHUNK(p);
|
---|
461 |
|
---|
462 | if (IS_MMAPPED(self)) {
|
---|
463 | size_t extra = self->psize;
|
---|
464 | char *base = (char *)self - extra;
|
---|
465 | size_t len = CHUNK_SIZE(self) + extra;
|
---|
466 | /* Crash on double free */
|
---|
467 | if (extra & 1) a_crash();
|
---|
468 | __munmap(base, len);
|
---|
469 | return;
|
---|
470 | }
|
---|
471 |
|
---|
472 | final_size = new_size = CHUNK_SIZE(self);
|
---|
473 | next = NEXT_CHUNK(self);
|
---|
474 |
|
---|
475 | /* Crash on corrupted footer (likely from buffer overflow) */
|
---|
476 | if (next->psize != self->csize) a_crash();
|
---|
477 |
|
---|
478 | for (;;) {
|
---|
479 | if (self->psize & next->csize & C_INUSE) {
|
---|
480 | self->csize = final_size | C_INUSE;
|
---|
481 | next->psize = final_size | C_INUSE;
|
---|
482 | i = bin_index(final_size);
|
---|
483 | lock_bin(i);
|
---|
484 | lock(mal.free_lock);
|
---|
485 | if (self->psize & next->csize & C_INUSE)
|
---|
486 | break;
|
---|
487 | unlock(mal.free_lock);
|
---|
488 | unlock_bin(i);
|
---|
489 | }
|
---|
490 |
|
---|
491 | if (alloc_rev(self)) {
|
---|
492 | self = PREV_CHUNK(self);
|
---|
493 | size = CHUNK_SIZE(self);
|
---|
494 | final_size += size;
|
---|
495 | if (new_size+size > RECLAIM && (new_size+size^size) > size)
|
---|
496 | reclaim = 1;
|
---|
497 | }
|
---|
498 |
|
---|
499 | if (alloc_fwd(next)) {
|
---|
500 | size = CHUNK_SIZE(next);
|
---|
501 | final_size += size;
|
---|
502 | if (new_size+size > RECLAIM && (new_size+size^size) > size)
|
---|
503 | reclaim = 1;
|
---|
504 | next = NEXT_CHUNK(next);
|
---|
505 | }
|
---|
506 | }
|
---|
507 |
|
---|
508 | if (!(mal.binmap & 1ULL<<i))
|
---|
509 | a_or_64(&mal.binmap, 1ULL<<i);
|
---|
510 |
|
---|
511 | self->csize = final_size;
|
---|
512 | next->psize = final_size;
|
---|
513 | unlock(mal.free_lock);
|
---|
514 |
|
---|
515 | self->next = BIN_TO_CHUNK(i);
|
---|
516 | self->prev = mal.bins[i].tail;
|
---|
517 | self->next->prev = self;
|
---|
518 | self->prev->next = self;
|
---|
519 |
|
---|
520 | /* Replace middle of large chunks with fresh zero pages */
|
---|
521 | if (reclaim) {
|
---|
522 | uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE;
|
---|
523 | uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
|
---|
524 | #if 1
|
---|
525 | __madvise((void *)a, b-a, MADV_DONTNEED);
|
---|
526 | #else
|
---|
527 | __mmap((void *)a, b-a, PROT_READ|PROT_WRITE,
|
---|
528 | MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
|
---|
529 | #endif
|
---|
530 | }
|
---|
531 |
|
---|
532 | unlock_bin(i);
|
---|
533 | }
|
---|