[164] | 1 | /**
|
---|
| 2 | * @file
|
---|
| 3 | * Dynamic memory manager
|
---|
| 4 | *
|
---|
| 5 | * This is a lightweight replacement for the standard C library malloc().
|
---|
| 6 | *
|
---|
| 7 | * If you want to use the standard C library malloc() instead, define
|
---|
| 8 | * MEM_LIBC_MALLOC to 1 in your lwipopts.h
|
---|
| 9 | *
|
---|
| 10 | * To let mem_malloc() use pools (prevents fragmentation and is much faster than
|
---|
| 11 | * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
|
---|
| 12 | * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
|
---|
| 13 | * of pools like this (more pools can be added between _START and _END):
|
---|
| 14 | *
|
---|
| 15 | * Define three pools with sizes 256, 512, and 1512 bytes
|
---|
| 16 | * LWIP_MALLOC_MEMPOOL_START
|
---|
| 17 | * LWIP_MALLOC_MEMPOOL(20, 256)
|
---|
| 18 | * LWIP_MALLOC_MEMPOOL(10, 512)
|
---|
| 19 | * LWIP_MALLOC_MEMPOOL(5, 1512)
|
---|
| 20 | * LWIP_MALLOC_MEMPOOL_END
|
---|
| 21 | */
|
---|
| 22 |
|
---|
| 23 | /*
|
---|
| 24 | * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
|
---|
| 25 | * All rights reserved.
|
---|
| 26 | *
|
---|
| 27 | * Redistribution and use in source and binary forms, with or without modification,
|
---|
| 28 | * are permitted provided that the following conditions are met:
|
---|
| 29 | *
|
---|
| 30 | * 1. Redistributions of source code must retain the above copyright notice,
|
---|
| 31 | * this list of conditions and the following disclaimer.
|
---|
| 32 | * 2. Redistributions in binary form must reproduce the above copyright notice,
|
---|
| 33 | * this list of conditions and the following disclaimer in the documentation
|
---|
| 34 | * and/or other materials provided with the distribution.
|
---|
| 35 | * 3. The name of the author may not be used to endorse or promote products
|
---|
| 36 | * derived from this software without specific prior written permission.
|
---|
| 37 | *
|
---|
| 38 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
---|
| 39 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
---|
| 40 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
---|
| 41 | * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
| 42 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
|
---|
| 43 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
---|
| 44 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
---|
| 45 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
---|
| 46 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
|
---|
| 47 | * OF SUCH DAMAGE.
|
---|
| 48 | *
|
---|
| 49 | * This file is part of the lwIP TCP/IP stack.
|
---|
| 50 | *
|
---|
| 51 | * Author: Adam Dunkels <adam@sics.se>
|
---|
| 52 | * Simon Goldschmidt
|
---|
| 53 | *
|
---|
| 54 | */
|
---|
| 55 |
|
---|
| 56 | #include "lwip/opt.h"
|
---|
| 57 |
|
---|
| 58 | #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
|
---|
| 59 |
|
---|
| 60 | #include "lwip/def.h"
|
---|
| 61 | #include "lwip/mem.h"
|
---|
| 62 | #include "lwip/sys.h"
|
---|
| 63 | #include "lwip/stats.h"
|
---|
| 64 | #include "lwip/err.h"
|
---|
| 65 |
|
---|
| 66 | #include <string.h>
|
---|
| 67 |
|
---|
| 68 | #if MEM_USE_POOLS
|
---|
| 69 | /* lwIP head implemented with different sized pools */
|
---|
| 70 |
|
---|
| 71 | /**
|
---|
| 72 | * Allocate memory: determine the smallest pool that is big enough
|
---|
| 73 | * to contain an element of 'size' and get an element from that pool.
|
---|
| 74 | *
|
---|
| 75 | * @param size the size in bytes of the memory needed
|
---|
| 76 | * @return a pointer to the allocated memory or NULL if the pool is empty
|
---|
| 77 | */
|
---|
| 78 | void *
|
---|
| 79 | mem_malloc(mem_size_t size)
|
---|
| 80 | {
|
---|
| 81 | void *ret;
|
---|
| 82 | struct memp_malloc_helper *element;
|
---|
| 83 | memp_t poolnr;
|
---|
| 84 | mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
|
---|
| 85 |
|
---|
| 86 | for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
|
---|
| 87 | #if MEM_USE_POOLS_TRY_BIGGER_POOL
|
---|
| 88 | again:
|
---|
| 89 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
|
---|
| 90 | /* is this pool big enough to hold an element of the required size
|
---|
| 91 | plus a struct memp_malloc_helper that saves the pool this element came from? */
|
---|
| 92 | if (required_size <= memp_sizes[poolnr]) {
|
---|
| 93 | break;
|
---|
| 94 | }
|
---|
| 95 | }
|
---|
| 96 | if (poolnr > MEMP_POOL_LAST) {
|
---|
| 97 | LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
|
---|
| 98 | return NULL;
|
---|
| 99 | }
|
---|
| 100 | element = (struct memp_malloc_helper*)memp_malloc(poolnr);
|
---|
| 101 | if (element == NULL) {
|
---|
| 102 | /* No need to DEBUGF or ASSERT: This error is already
|
---|
| 103 | taken care of in memp.c */
|
---|
| 104 | #if MEM_USE_POOLS_TRY_BIGGER_POOL
|
---|
| 105 | /** Try a bigger pool if this one is empty! */
|
---|
| 106 | if (poolnr < MEMP_POOL_LAST) {
|
---|
| 107 | poolnr++;
|
---|
| 108 | goto again;
|
---|
| 109 | }
|
---|
| 110 | #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
|
---|
| 111 | return NULL;
|
---|
| 112 | }
|
---|
| 113 |
|
---|
| 114 | /* save the pool number this element came from */
|
---|
| 115 | element->poolnr = poolnr;
|
---|
| 116 | /* and return a pointer to the memory directly after the struct memp_malloc_helper */
|
---|
| 117 | ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
|
---|
| 118 |
|
---|
| 119 | return ret;
|
---|
| 120 | }
|
---|
| 121 |
|
---|
| 122 | /**
|
---|
| 123 | * Free memory previously allocated by mem_malloc. Loads the pool number
|
---|
| 124 | * and calls memp_free with that pool number to put the element back into
|
---|
| 125 | * its pool
|
---|
| 126 | *
|
---|
| 127 | * @param rmem the memory element to free
|
---|
| 128 | */
|
---|
| 129 | void
|
---|
| 130 | mem_free(void *rmem)
|
---|
| 131 | {
|
---|
| 132 | struct memp_malloc_helper *hmem;
|
---|
| 133 |
|
---|
| 134 | LWIP_ASSERT("rmem != NULL", (rmem != NULL));
|
---|
| 135 | LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
|
---|
| 136 |
|
---|
| 137 | /* get the original struct memp_malloc_helper */
|
---|
| 138 | hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
|
---|
| 139 |
|
---|
| 140 | LWIP_ASSERT("hmem != NULL", (hmem != NULL));
|
---|
| 141 | LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
|
---|
| 142 | LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
|
---|
| 143 |
|
---|
| 144 | /* and put it in the pool we saved earlier */
|
---|
| 145 | memp_free(hmem->poolnr, hmem);
|
---|
| 146 | }
|
---|
| 147 |
|
---|
| 148 | #else /* MEM_USE_POOLS */
|
---|
| 149 | /* lwIP replacement for your libc malloc() */
|
---|
| 150 |
|
---|
| 151 | /**
|
---|
| 152 | * The heap is made up as a list of structs of this type.
|
---|
| 153 | * This does not have to be aligned since for getting its size,
|
---|
| 154 | * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
|
---|
| 155 | */
|
---|
| 156 | struct mem {
|
---|
| 157 | /** index (-> ram[next]) of the next struct */
|
---|
| 158 | mem_size_t next;
|
---|
| 159 | /** index (-> ram[prev]) of the previous struct */
|
---|
| 160 | mem_size_t prev;
|
---|
| 161 | /** 1: this area is used; 0: this area is unused */
|
---|
| 162 | u8_t used;
|
---|
| 163 | };
|
---|
| 164 |
|
---|
| 165 | /** All allocated blocks will be MIN_SIZE bytes big, at least!
|
---|
| 166 | * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
|
---|
| 167 | * larger values could prevent too small blocks to fragment the RAM too much. */
|
---|
| 168 | #ifndef MIN_SIZE
|
---|
| 169 | #define MIN_SIZE 12
|
---|
| 170 | #endif /* MIN_SIZE */
|
---|
| 171 | /* some alignment macros: we define them here for better source code layout */
|
---|
| 172 | #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
|
---|
| 173 | #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
|
---|
| 174 | #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
|
---|
| 175 |
|
---|
| 176 | /** If you want to relocate the heap to external memory, simply define
|
---|
| 177 | * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
|
---|
| 178 | * If so, make sure the memory at that location is big enough (see below on
|
---|
| 179 | * how that space is calculated). */
|
---|
| 180 | #ifndef LWIP_RAM_HEAP_POINTER
|
---|
| 181 | /** the heap. we need one struct mem at the end and some room for alignment */
|
---|
| 182 | u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
|
---|
| 183 | #define LWIP_RAM_HEAP_POINTER ram_heap
|
---|
| 184 | #endif /* LWIP_RAM_HEAP_POINTER */
|
---|
| 185 |
|
---|
| 186 | /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
|
---|
| 187 | static u8_t *ram;
|
---|
| 188 | /** the last entry, always unused! */
|
---|
| 189 | static struct mem *ram_end;
|
---|
| 190 | /** pointer to the lowest free block, this is used for faster search */
|
---|
| 191 | static struct mem *lfree;
|
---|
| 192 |
|
---|
| 193 | /** concurrent access protection */
|
---|
| 194 | #if !NO_SYS
|
---|
| 195 | static sys_mutex_t mem_mutex;
|
---|
| 196 | #endif
|
---|
| 197 |
|
---|
| 198 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 199 |
|
---|
| 200 | static volatile u8_t mem_free_count;
|
---|
| 201 |
|
---|
| 202 | /* Allow mem_free from other (e.g. interrupt) context */
|
---|
| 203 | #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
|
---|
| 204 | #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
|
---|
| 205 | #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
|
---|
| 206 | #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
|
---|
| 207 | #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
|
---|
| 208 | #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
|
---|
| 209 |
|
---|
| 210 | #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 211 |
|
---|
| 212 | /* Protect the heap only by using a semaphore */
|
---|
| 213 | #define LWIP_MEM_FREE_DECL_PROTECT()
|
---|
| 214 | #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
|
---|
| 215 | #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
|
---|
| 216 | /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
|
---|
| 217 | #define LWIP_MEM_ALLOC_DECL_PROTECT()
|
---|
| 218 | #define LWIP_MEM_ALLOC_PROTECT()
|
---|
| 219 | #define LWIP_MEM_ALLOC_UNPROTECT()
|
---|
| 220 |
|
---|
| 221 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 222 |
|
---|
| 223 |
|
---|
| 224 | /**
|
---|
| 225 | * "Plug holes" by combining adjacent empty struct mems.
|
---|
| 226 | * After this function is through, there should not exist
|
---|
| 227 | * one empty struct mem pointing to another empty struct mem.
|
---|
| 228 | *
|
---|
| 229 | * @param mem this points to a struct mem which just has been freed
|
---|
| 230 | * @internal this function is only called by mem_free() and mem_trim()
|
---|
| 231 | *
|
---|
| 232 | * This assumes access to the heap is protected by the calling function
|
---|
| 233 | * already.
|
---|
| 234 | */
|
---|
| 235 | static void
|
---|
| 236 | plug_holes(struct mem *mem)
|
---|
| 237 | {
|
---|
| 238 | struct mem *nmem;
|
---|
| 239 | struct mem *pmem;
|
---|
| 240 |
|
---|
| 241 | LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
|
---|
| 242 | LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
|
---|
| 243 | LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
|
---|
| 244 |
|
---|
| 245 | /* plug hole forward */
|
---|
| 246 | LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
|
---|
| 247 |
|
---|
| 248 | nmem = (struct mem *)(void *)&ram[mem->next];
|
---|
| 249 | if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
|
---|
| 250 | /* if mem->next is unused and not end of ram, combine mem and mem->next */
|
---|
| 251 | if (lfree == nmem) {
|
---|
| 252 | lfree = mem;
|
---|
| 253 | }
|
---|
| 254 | mem->next = nmem->next;
|
---|
| 255 | ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
|
---|
| 256 | }
|
---|
| 257 |
|
---|
| 258 | /* plug hole backward */
|
---|
| 259 | pmem = (struct mem *)(void *)&ram[mem->prev];
|
---|
| 260 | if (pmem != mem && pmem->used == 0) {
|
---|
| 261 | /* if mem->prev is unused, combine mem and mem->prev */
|
---|
| 262 | if (lfree == mem) {
|
---|
| 263 | lfree = pmem;
|
---|
| 264 | }
|
---|
| 265 | pmem->next = mem->next;
|
---|
| 266 | ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
|
---|
| 267 | }
|
---|
| 268 | }
|
---|
| 269 |
|
---|
| 270 | /**
|
---|
| 271 | * Zero the heap and initialize start, end and lowest-free
|
---|
| 272 | */
|
---|
| 273 | void
|
---|
| 274 | mem_init(void)
|
---|
| 275 | {
|
---|
| 276 | struct mem *mem;
|
---|
| 277 |
|
---|
| 278 | LWIP_ASSERT("Sanity check alignment",
|
---|
| 279 | (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
|
---|
| 280 |
|
---|
| 281 | /* align the heap */
|
---|
| 282 | ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
|
---|
| 283 | /* initialize the start of the heap */
|
---|
| 284 | mem = (struct mem *)(void *)ram;
|
---|
| 285 | mem->next = MEM_SIZE_ALIGNED;
|
---|
| 286 | mem->prev = 0;
|
---|
| 287 | mem->used = 0;
|
---|
| 288 | /* initialize the end of the heap */
|
---|
| 289 | ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
|
---|
| 290 | ram_end->used = 1;
|
---|
| 291 | ram_end->next = MEM_SIZE_ALIGNED;
|
---|
| 292 | ram_end->prev = MEM_SIZE_ALIGNED;
|
---|
| 293 |
|
---|
| 294 | /* initialize the lowest-free pointer to the start of the heap */
|
---|
| 295 | lfree = (struct mem *)(void *)ram;
|
---|
| 296 |
|
---|
| 297 | MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
|
---|
| 298 |
|
---|
| 299 | if(sys_mutex_new(&mem_mutex) != ERR_OK) {
|
---|
| 300 | LWIP_ASSERT("failed to create mem_mutex", 0);
|
---|
| 301 | }
|
---|
| 302 | }
|
---|
| 303 |
|
---|
| 304 | /**
|
---|
| 305 | * Put a struct mem back on the heap
|
---|
| 306 | *
|
---|
| 307 | * @param rmem is the data portion of a struct mem as returned by a previous
|
---|
| 308 | * call to mem_malloc()
|
---|
| 309 | */
|
---|
| 310 | void
|
---|
| 311 | mem_free(void *rmem)
|
---|
| 312 | {
|
---|
| 313 | struct mem *mem;
|
---|
| 314 | LWIP_MEM_FREE_DECL_PROTECT();
|
---|
| 315 |
|
---|
| 316 | if (rmem == NULL) {
|
---|
| 317 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
|
---|
| 318 | return;
|
---|
| 319 | }
|
---|
| 320 | LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
|
---|
| 321 |
|
---|
| 322 | LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
|
---|
| 323 | (u8_t *)rmem < (u8_t *)ram_end);
|
---|
| 324 |
|
---|
| 325 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
|
---|
| 326 | SYS_ARCH_DECL_PROTECT(lev);
|
---|
| 327 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
|
---|
| 328 | /* protect mem stats from concurrent access */
|
---|
| 329 | SYS_ARCH_PROTECT(lev);
|
---|
| 330 | MEM_STATS_INC(illegal);
|
---|
| 331 | SYS_ARCH_UNPROTECT(lev);
|
---|
| 332 | return;
|
---|
| 333 | }
|
---|
| 334 | /* protect the heap from concurrent access */
|
---|
| 335 | LWIP_MEM_FREE_PROTECT();
|
---|
| 336 | /* Get the corresponding struct mem ... */
|
---|
| 337 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
|
---|
| 338 | /* ... which has to be in a used state ... */
|
---|
| 339 | LWIP_ASSERT("mem_free: mem->used", mem->used);
|
---|
| 340 | /* ... and is now unused. */
|
---|
| 341 | mem->used = 0;
|
---|
| 342 |
|
---|
| 343 | if (mem < lfree) {
|
---|
| 344 | /* the newly freed struct is now the lowest */
|
---|
| 345 | lfree = mem;
|
---|
| 346 | }
|
---|
| 347 |
|
---|
| 348 | MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
|
---|
| 349 |
|
---|
| 350 | /* finally, see if prev or next are free also */
|
---|
| 351 | plug_holes(mem);
|
---|
| 352 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 353 | mem_free_count = 1;
|
---|
| 354 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 355 | LWIP_MEM_FREE_UNPROTECT();
|
---|
| 356 | }
|
---|
| 357 |
|
---|
| 358 | /**
|
---|
| 359 | * Shrink memory returned by mem_malloc().
|
---|
| 360 | *
|
---|
| 361 | * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
|
---|
| 362 | * @param newsize required size after shrinking (needs to be smaller than or
|
---|
| 363 | * equal to the previous size)
|
---|
| 364 | * @return for compatibility reasons: is always == rmem, at the moment
|
---|
| 365 | * or NULL if newsize is > old size, in which case rmem is NOT touched
|
---|
| 366 | * or freed!
|
---|
| 367 | */
|
---|
| 368 | void *
|
---|
| 369 | mem_trim(void *rmem, mem_size_t newsize)
|
---|
| 370 | {
|
---|
| 371 | mem_size_t size;
|
---|
| 372 | mem_size_t ptr, ptr2;
|
---|
| 373 | struct mem *mem, *mem2;
|
---|
| 374 | /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
|
---|
| 375 | LWIP_MEM_FREE_DECL_PROTECT();
|
---|
| 376 |
|
---|
| 377 | /* Expand the size of the allocated memory region so that we can
|
---|
| 378 | adjust for alignment. */
|
---|
| 379 | newsize = LWIP_MEM_ALIGN_SIZE(newsize);
|
---|
| 380 |
|
---|
| 381 | if(newsize < MIN_SIZE_ALIGNED) {
|
---|
| 382 | /* every data block must be at least MIN_SIZE_ALIGNED long */
|
---|
| 383 | newsize = MIN_SIZE_ALIGNED;
|
---|
| 384 | }
|
---|
| 385 |
|
---|
| 386 | if (newsize > MEM_SIZE_ALIGNED) {
|
---|
| 387 | return NULL;
|
---|
| 388 | }
|
---|
| 389 |
|
---|
| 390 | LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
|
---|
| 391 | (u8_t *)rmem < (u8_t *)ram_end);
|
---|
| 392 |
|
---|
| 393 | if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
|
---|
| 394 | SYS_ARCH_DECL_PROTECT(lev);
|
---|
| 395 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
|
---|
| 396 | /* protect mem stats from concurrent access */
|
---|
| 397 | SYS_ARCH_PROTECT(lev);
|
---|
| 398 | MEM_STATS_INC(illegal);
|
---|
| 399 | SYS_ARCH_UNPROTECT(lev);
|
---|
| 400 | return rmem;
|
---|
| 401 | }
|
---|
| 402 | /* Get the corresponding struct mem ... */
|
---|
| 403 | mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
|
---|
| 404 | /* ... and its offset pointer */
|
---|
| 405 | ptr = (mem_size_t)((u8_t *)mem - ram);
|
---|
| 406 |
|
---|
| 407 | size = mem->next - ptr - SIZEOF_STRUCT_MEM;
|
---|
| 408 | LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
|
---|
| 409 | if (newsize > size) {
|
---|
| 410 | /* not supported */
|
---|
| 411 | return NULL;
|
---|
| 412 | }
|
---|
| 413 | if (newsize == size) {
|
---|
| 414 | /* No change in size, simply return */
|
---|
| 415 | return rmem;
|
---|
| 416 | }
|
---|
| 417 |
|
---|
| 418 | /* protect the heap from concurrent access */
|
---|
| 419 | LWIP_MEM_FREE_PROTECT();
|
---|
| 420 |
|
---|
| 421 | mem2 = (struct mem *)(void *)&ram[mem->next];
|
---|
| 422 | if(mem2->used == 0) {
|
---|
| 423 | /* The next struct is unused, we can simply move it at little */
|
---|
| 424 | mem_size_t next;
|
---|
| 425 | /* remember the old next pointer */
|
---|
| 426 | next = mem2->next;
|
---|
| 427 | /* create new struct mem which is moved directly after the shrinked mem */
|
---|
| 428 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
|
---|
| 429 | if (lfree == mem2) {
|
---|
| 430 | lfree = (struct mem *)(void *)&ram[ptr2];
|
---|
| 431 | }
|
---|
| 432 | mem2 = (struct mem *)(void *)&ram[ptr2];
|
---|
| 433 | mem2->used = 0;
|
---|
| 434 | /* restore the next pointer */
|
---|
| 435 | mem2->next = next;
|
---|
| 436 | /* link it back to mem */
|
---|
| 437 | mem2->prev = ptr;
|
---|
| 438 | /* link mem to it */
|
---|
| 439 | mem->next = ptr2;
|
---|
| 440 | /* last thing to restore linked list: as we have moved mem2,
|
---|
| 441 | * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
|
---|
| 442 | * the end of the heap */
|
---|
| 443 | if (mem2->next != MEM_SIZE_ALIGNED) {
|
---|
| 444 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
|
---|
| 445 | }
|
---|
| 446 | MEM_STATS_DEC_USED(used, (size - newsize));
|
---|
| 447 | /* no need to plug holes, we've already done that */
|
---|
| 448 | } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
|
---|
| 449 | /* Next struct is used but there's room for another struct mem with
|
---|
| 450 | * at least MIN_SIZE_ALIGNED of data.
|
---|
| 451 | * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
|
---|
| 452 | * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
|
---|
| 453 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
|
---|
| 454 | * region that couldn't hold data, but when mem->next gets freed,
|
---|
| 455 | * the 2 regions would be combined, resulting in more free memory */
|
---|
| 456 | ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
|
---|
| 457 | mem2 = (struct mem *)(void *)&ram[ptr2];
|
---|
| 458 | if (mem2 < lfree) {
|
---|
| 459 | lfree = mem2;
|
---|
| 460 | }
|
---|
| 461 | mem2->used = 0;
|
---|
| 462 | mem2->next = mem->next;
|
---|
| 463 | mem2->prev = ptr;
|
---|
| 464 | mem->next = ptr2;
|
---|
| 465 | if (mem2->next != MEM_SIZE_ALIGNED) {
|
---|
| 466 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
|
---|
| 467 | }
|
---|
| 468 | MEM_STATS_DEC_USED(used, (size - newsize));
|
---|
| 469 | /* the original mem->next is used, so no need to plug holes! */
|
---|
| 470 | }
|
---|
| 471 | /* else {
|
---|
| 472 | next struct mem is used but size between mem and mem2 is not big enough
|
---|
| 473 | to create another struct mem
|
---|
| 474 | -> don't do anyhting.
|
---|
| 475 | -> the remaining space stays unused since it is too small
|
---|
| 476 | } */
|
---|
| 477 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 478 | mem_free_count = 1;
|
---|
| 479 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 480 | LWIP_MEM_FREE_UNPROTECT();
|
---|
| 481 | return rmem;
|
---|
| 482 | }
|
---|
| 483 |
|
---|
| 484 | /**
|
---|
| 485 | * Adam's mem_malloc() plus solution for bug #17922
|
---|
| 486 | * Allocate a block of memory with a minimum of 'size' bytes.
|
---|
| 487 | *
|
---|
| 488 | * @param size is the minimum size of the requested block in bytes.
|
---|
| 489 | * @return pointer to allocated memory or NULL if no free memory was found.
|
---|
| 490 | *
|
---|
| 491 | * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
|
---|
| 492 | */
|
---|
| 493 | void *
|
---|
| 494 | mem_malloc(mem_size_t size)
|
---|
| 495 | {
|
---|
| 496 | mem_size_t ptr, ptr2;
|
---|
| 497 | struct mem *mem, *mem2;
|
---|
| 498 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 499 | u8_t local_mem_free_count = 0;
|
---|
| 500 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 501 | LWIP_MEM_ALLOC_DECL_PROTECT();
|
---|
| 502 |
|
---|
| 503 | if (size == 0) {
|
---|
| 504 | return NULL;
|
---|
| 505 | }
|
---|
| 506 |
|
---|
| 507 | /* Expand the size of the allocated memory region so that we can
|
---|
| 508 | adjust for alignment. */
|
---|
| 509 | size = LWIP_MEM_ALIGN_SIZE(size);
|
---|
| 510 |
|
---|
| 511 | if(size < MIN_SIZE_ALIGNED) {
|
---|
| 512 | /* every data block must be at least MIN_SIZE_ALIGNED long */
|
---|
| 513 | size = MIN_SIZE_ALIGNED;
|
---|
| 514 | }
|
---|
| 515 |
|
---|
| 516 | if (size > MEM_SIZE_ALIGNED) {
|
---|
| 517 | return NULL;
|
---|
| 518 | }
|
---|
| 519 |
|
---|
| 520 | /* protect the heap from concurrent access */
|
---|
| 521 | sys_mutex_lock(&mem_mutex);
|
---|
| 522 | LWIP_MEM_ALLOC_PROTECT();
|
---|
| 523 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 524 | /* run as long as a mem_free disturbed mem_malloc or mem_trim */
|
---|
| 525 | do {
|
---|
| 526 | local_mem_free_count = 0;
|
---|
| 527 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 528 |
|
---|
| 529 | /* Scan through the heap searching for a free block that is big enough,
|
---|
| 530 | * beginning with the lowest free block.
|
---|
| 531 | */
|
---|
| 532 | for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
|
---|
| 533 | ptr = ((struct mem *)(void *)&ram[ptr])->next) {
|
---|
| 534 | mem = (struct mem *)(void *)&ram[ptr];
|
---|
| 535 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 536 | mem_free_count = 0;
|
---|
| 537 | LWIP_MEM_ALLOC_UNPROTECT();
|
---|
| 538 | /* allow mem_free or mem_trim to run */
|
---|
| 539 | LWIP_MEM_ALLOC_PROTECT();
|
---|
| 540 | if (mem_free_count != 0) {
|
---|
| 541 | /* If mem_free or mem_trim have run, we have to restart since they
|
---|
| 542 | could have altered our current struct mem. */
|
---|
| 543 | local_mem_free_count = 1;
|
---|
| 544 | break;
|
---|
| 545 | }
|
---|
| 546 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 547 |
|
---|
| 548 | if ((!mem->used) &&
|
---|
| 549 | (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
|
---|
| 550 | /* mem is not used and at least perfect fit is possible:
|
---|
| 551 | * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
|
---|
| 552 |
|
---|
| 553 | if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
|
---|
| 554 | /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
|
---|
| 555 | * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
|
---|
| 556 | * -> split large block, create empty remainder,
|
---|
| 557 | * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
|
---|
| 558 | * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
|
---|
| 559 | * struct mem would fit in but no data between mem2 and mem2->next
|
---|
| 560 | * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
|
---|
| 561 | * region that couldn't hold data, but when mem->next gets freed,
|
---|
| 562 | * the 2 regions would be combined, resulting in more free memory
|
---|
| 563 | */
|
---|
| 564 | ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
|
---|
| 565 | /* create mem2 struct */
|
---|
| 566 | mem2 = (struct mem *)(void *)&ram[ptr2];
|
---|
| 567 | mem2->used = 0;
|
---|
| 568 | mem2->next = mem->next;
|
---|
| 569 | mem2->prev = ptr;
|
---|
| 570 | /* and insert it between mem and mem->next */
|
---|
| 571 | mem->next = ptr2;
|
---|
| 572 | mem->used = 1;
|
---|
| 573 |
|
---|
| 574 | if (mem2->next != MEM_SIZE_ALIGNED) {
|
---|
| 575 | ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
|
---|
| 576 | }
|
---|
| 577 | MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
|
---|
| 578 | } else {
|
---|
| 579 | /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
|
---|
| 580 | * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
|
---|
| 581 | * take care of this).
|
---|
| 582 | * -> near fit or excact fit: do not split, no mem2 creation
|
---|
| 583 | * also can't move mem->next directly behind mem, since mem->next
|
---|
| 584 | * will always be used at this point!
|
---|
| 585 | */
|
---|
| 586 | mem->used = 1;
|
---|
| 587 | MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
|
---|
| 588 | }
|
---|
| 589 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 590 | mem_malloc_adjust_lfree:
|
---|
| 591 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 592 | if (mem == lfree) {
|
---|
| 593 | struct mem *cur = lfree;
|
---|
| 594 | /* Find next free block after mem and update lowest free pointer */
|
---|
| 595 | while (cur->used && cur != ram_end) {
|
---|
| 596 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 597 | mem_free_count = 0;
|
---|
| 598 | LWIP_MEM_ALLOC_UNPROTECT();
|
---|
| 599 | /* prevent high interrupt latency... */
|
---|
| 600 | LWIP_MEM_ALLOC_PROTECT();
|
---|
| 601 | if (mem_free_count != 0) {
|
---|
| 602 | /* If mem_free or mem_trim have run, we have to restart since they
|
---|
| 603 | could have altered our current struct mem or lfree. */
|
---|
| 604 | goto mem_malloc_adjust_lfree;
|
---|
| 605 | }
|
---|
| 606 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 607 | cur = (struct mem *)(void *)&ram[cur->next];
|
---|
| 608 | }
|
---|
| 609 | lfree = cur;
|
---|
| 610 | LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
|
---|
| 611 | }
|
---|
| 612 | LWIP_MEM_ALLOC_UNPROTECT();
|
---|
| 613 | sys_mutex_unlock(&mem_mutex);
|
---|
| 614 | LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
|
---|
| 615 | (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
|
---|
| 616 | LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
|
---|
| 617 | ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
|
---|
| 618 | LWIP_ASSERT("mem_malloc: sanity check alignment",
|
---|
| 619 | (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
|
---|
| 620 |
|
---|
| 621 | return (u8_t *)mem + SIZEOF_STRUCT_MEM;
|
---|
| 622 | }
|
---|
| 623 | }
|
---|
| 624 | #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
|
---|
| 625 | /* if we got interrupted by a mem_free, try again */
|
---|
| 626 | } while(local_mem_free_count != 0);
|
---|
| 627 | #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
|
---|
| 628 | LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
|
---|
| 629 | MEM_STATS_INC(err);
|
---|
| 630 | LWIP_MEM_ALLOC_UNPROTECT();
|
---|
| 631 | sys_mutex_unlock(&mem_mutex);
|
---|
| 632 | return NULL;
|
---|
| 633 | }
|
---|
| 634 |
|
---|
| 635 | #endif /* MEM_USE_POOLS */
|
---|
| 636 | /**
|
---|
| 637 | * Contiguously allocates enough space for count objects that are size bytes
|
---|
| 638 | * of memory each and returns a pointer to the allocated memory.
|
---|
| 639 | *
|
---|
| 640 | * The allocated memory is filled with bytes of value zero.
|
---|
| 641 | *
|
---|
| 642 | * @param count number of objects to allocate
|
---|
| 643 | * @param size size of the objects to allocate
|
---|
| 644 | * @return pointer to allocated memory / NULL pointer if there is an error
|
---|
| 645 | */
|
---|
| 646 | void *mem_calloc(mem_size_t count, mem_size_t size)
|
---|
| 647 | {
|
---|
| 648 | void *p;
|
---|
| 649 |
|
---|
| 650 | /* allocate 'count' objects of size 'size' */
|
---|
| 651 | p = mem_malloc(count * size);
|
---|
| 652 | if (p) {
|
---|
| 653 | /* zero the memory */
|
---|
| 654 | memset(p, 0, count * size);
|
---|
| 655 | }
|
---|
| 656 | return p;
|
---|
| 657 | }
|
---|
| 658 |
|
---|
| 659 | #endif /* !MEM_LIBC_MALLOC */
|
---|