source: azure_iot_hub_f767zi/trunk/asp_baseplatform/lwip/lwip-2.1.2/src/core/pbuf.c@ 457

Last change on this file since 457 was 457, checked in by coas-nagasima, 4 years ago

ファイルを追加

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 48.6 KB
Line 
1/**
2 * @file
3 * Packet buffer management
4 */
5
6/**
7 * @defgroup pbuf Packet buffers (PBUF)
8 * @ingroup infrastructure
9 *
10 * Packets are built from the pbuf data structure. It supports dynamic
11 * memory allocation for packet contents or can reference externally
12 * managed packet contents both in RAM and ROM. Quick allocation for
13 * incoming packets is provided through pools with fixed sized pbufs.
14 *
15 * A packet may span over multiple pbufs, chained as a singly linked
16 * list. This is called a "pbuf chain".
17 *
18 * Multiple packets may be queued, also using this singly linked list.
19 * This is called a "packet queue".
20 *
21 * So, a packet queue consists of one or more pbuf chains, each of
22 * which consist of one or more pbufs. CURRENTLY, PACKET QUEUES ARE
23 * NOT SUPPORTED!!! Use helper structs to queue multiple packets.
24 *
25 * The differences between a pbuf chain and a packet queue are very
26 * precise but subtle.
27 *
28 * The last pbuf of a packet has a ->tot_len field that equals the
29 * ->len field. It can be found by traversing the list. If the last
30 * pbuf of a packet has a ->next field other than NULL, more packets
31 * are on the queue.
32 *
33 * Therefore, looping through a pbuf of a single packet, has an
34 * loop end condition (tot_len == p->len), NOT (next == NULL).
35 *
36 * Example of custom pbuf usage: @ref zerocopyrx
37 */
38
39/*
40 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without modification,
44 * are permitted provided that the following conditions are met:
45 *
46 * 1. Redistributions of source code must retain the above copyright notice,
47 * this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright notice,
49 * this list of conditions and the following disclaimer in the documentation
50 * and/or other materials provided with the distribution.
51 * 3. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
55 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
56 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
57 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
58 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
59 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
62 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
63 * OF SUCH DAMAGE.
64 *
65 * This file is part of the lwIP TCP/IP stack.
66 *
67 * Author: Adam Dunkels <adam@sics.se>
68 *
69 */
70
71#include "lwip/opt.h"
72
73#include "lwip/pbuf.h"
74#include "lwip/stats.h"
75#include "lwip/def.h"
76#include "lwip/mem.h"
77#include "lwip/memp.h"
78#include "lwip/sys.h"
79#include "lwip/netif.h"
80#if LWIP_TCP && TCP_QUEUE_OOSEQ
81#include "lwip/priv/tcp_priv.h"
82#endif
83#if LWIP_CHECKSUM_ON_COPY
84#include "lwip/inet_chksum.h"
85#endif
86
87#include <string.h>
88
89#define SIZEOF_STRUCT_PBUF LWIP_MEM_ALIGN_SIZE(sizeof(struct pbuf))
90/* Since the pool is created in memp, PBUF_POOL_BUFSIZE will be automatically
91 aligned there. Therefore, PBUF_POOL_BUFSIZE_ALIGNED can be used here. */
92#define PBUF_POOL_BUFSIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE)
93
94static const struct pbuf *
95pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset);
96
97#if !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ
98#define PBUF_POOL_IS_EMPTY()
99#else /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
100
101#if !NO_SYS
102#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
103#include "lwip/tcpip.h"
104#define PBUF_POOL_FREE_OOSEQ_QUEUE_CALL() do { \
105 if (tcpip_try_callback(pbuf_free_ooseq_callback, NULL) != ERR_OK) { \
106 SYS_ARCH_PROTECT(old_level); \
107 pbuf_free_ooseq_pending = 0; \
108 SYS_ARCH_UNPROTECT(old_level); \
109 } } while(0)
110#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
111#endif /* !NO_SYS */
112
113volatile u8_t pbuf_free_ooseq_pending;
114#define PBUF_POOL_IS_EMPTY() pbuf_pool_is_empty()
115
116/**
117 * Attempt to reclaim some memory from queued out-of-sequence TCP segments
118 * if we run out of pool pbufs. It's better to give priority to new packets
119 * if we're running out.
120 *
121 * This must be done in the correct thread context therefore this function
122 * can only be used with NO_SYS=0 and through tcpip_callback.
123 */
124#if !NO_SYS
125static
126#endif /* !NO_SYS */
127void
128pbuf_free_ooseq(void)
129{
130 struct tcp_pcb *pcb;
131 SYS_ARCH_SET(pbuf_free_ooseq_pending, 0);
132
133 for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) {
134 if (pcb->ooseq != NULL) {
135 /** Free the ooseq pbufs of one PCB only */
136 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n"));
137 tcp_free_ooseq(pcb);
138 return;
139 }
140 }
141}
142
143#if !NO_SYS
144/**
145 * Just a callback function for tcpip_callback() that calls pbuf_free_ooseq().
146 */
147static void
148pbuf_free_ooseq_callback(void *arg)
149{
150 LWIP_UNUSED_ARG(arg);
151 pbuf_free_ooseq();
152}
153#endif /* !NO_SYS */
154
155/** Queue a call to pbuf_free_ooseq if not already queued. */
156static void
157pbuf_pool_is_empty(void)
158{
159#ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL
160 SYS_ARCH_SET(pbuf_free_ooseq_pending, 1);
161#else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
162 u8_t queued;
163 SYS_ARCH_DECL_PROTECT(old_level);
164 SYS_ARCH_PROTECT(old_level);
165 queued = pbuf_free_ooseq_pending;
166 pbuf_free_ooseq_pending = 1;
167 SYS_ARCH_UNPROTECT(old_level);
168
169 if (!queued) {
170 /* queue a call to pbuf_free_ooseq if not already queued */
171 PBUF_POOL_FREE_OOSEQ_QUEUE_CALL();
172 }
173#endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */
174}
175#endif /* !LWIP_TCP || !TCP_QUEUE_OOSEQ || !PBUF_POOL_FREE_OOSEQ */
176
177/* Initialize members of struct pbuf after allocation */
178static void
179pbuf_init_alloced_pbuf(struct pbuf *p, void *payload, u16_t tot_len, u16_t len, pbuf_type type, u8_t flags)
180{
181 p->next = NULL;
182 p->payload = payload;
183 p->tot_len = tot_len;
184 p->len = len;
185 p->type_internal = (u8_t)type;
186 p->flags = flags;
187 p->ref = 1;
188 p->if_idx = NETIF_NO_INDEX;
189}
190
191/**
192 * @ingroup pbuf
193 * Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
194 *
195 * The actual memory allocated for the pbuf is determined by the
196 * layer at which the pbuf is allocated and the requested size
197 * (from the size parameter).
198 *
199 * @param layer header size
200 * @param length size of the pbuf's payload
201 * @param type this parameter decides how and where the pbuf
202 * should be allocated as follows:
203 *
204 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
205 * chunk. This includes protocol headers as well.
206 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
207 * protocol headers. Additional headers must be prepended
208 * by allocating another pbuf and chain in to the front of
209 * the ROM pbuf. It is assumed that the memory used is really
210 * similar to ROM in that it is immutable and will not be
211 * changed. Memory which is dynamic should generally not
212 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
213 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
214 * protocol headers. It is assumed that the pbuf is only
215 * being used in a single thread. If the pbuf gets queued,
216 * then pbuf_take should be called to copy the buffer.
217 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
218 * the pbuf pool that is allocated during pbuf_init().
219 *
220 * @return the allocated pbuf. If multiple pbufs where allocated, this
221 * is the first pbuf of a pbuf chain.
222 */
223struct pbuf *
224pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
225{
226 struct pbuf *p;
227 u16_t offset = (u16_t)layer;
228 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
229
230 switch (type) {
231 case PBUF_REF: /* fall through */
232 case PBUF_ROM:
233 p = pbuf_alloc_reference(NULL, length, type);
234 break;
235 case PBUF_POOL: {
236 struct pbuf *q, *last;
237 u16_t rem_len; /* remaining length */
238 p = NULL;
239 last = NULL;
240 rem_len = length;
241 do {
242 u16_t qlen;
243 q = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
244 if (q == NULL) {
245 PBUF_POOL_IS_EMPTY();
246 /* free chain so far allocated */
247 if (p) {
248 pbuf_free(p);
249 }
250 /* bail out unsuccessfully */
251 return NULL;
252 }
253 qlen = LWIP_MIN(rem_len, (u16_t)(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)));
254 pbuf_init_alloced_pbuf(q, LWIP_MEM_ALIGN((void *)((u8_t *)q + SIZEOF_STRUCT_PBUF + offset)),
255 rem_len, qlen, type, 0);
256 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
257 ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
258 LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
259 (PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
260 if (p == NULL) {
261 /* allocated head of pbuf chain (into p) */
262 p = q;
263 } else {
264 /* make previous pbuf point to this pbuf */
265 last->next = q;
266 }
267 last = q;
268 rem_len = (u16_t)(rem_len - qlen);
269 offset = 0;
270 } while (rem_len > 0);
271 break;
272 }
273 case PBUF_RAM: {
274 u16_t payload_len = (u16_t)(LWIP_MEM_ALIGN_SIZE(offset) + LWIP_MEM_ALIGN_SIZE(length));
275 mem_size_t alloc_len = (mem_size_t)(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF) + payload_len);
276
277 /* bug #50040: Check for integer overflow when calculating alloc_len */
278 if ((payload_len < LWIP_MEM_ALIGN_SIZE(length)) ||
279 (alloc_len < LWIP_MEM_ALIGN_SIZE(length))) {
280 return NULL;
281 }
282
283 /* If pbuf is to be allocated in RAM, allocate memory for it. */
284 p = (struct pbuf *)mem_malloc(alloc_len);
285 if (p == NULL) {
286 return NULL;
287 }
288 pbuf_init_alloced_pbuf(p, LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset)),
289 length, length, type, 0);
290 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
291 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
292 break;
293 }
294 default:
295 LWIP_ASSERT("pbuf_alloc: erroneous type", 0);
296 return NULL;
297 }
298 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F") == %p\n", length, (void *)p));
299 return p;
300}
301
302/**
303 * @ingroup pbuf
304 * Allocates a pbuf for referenced data.
305 * Referenced data can be volatile (PBUF_REF) or long-lived (PBUF_ROM).
306 *
307 * The actual memory allocated for the pbuf is determined by the
308 * layer at which the pbuf is allocated and the requested size
309 * (from the size parameter).
310 *
311 * @param payload referenced payload
312 * @param length size of the pbuf's payload
313 * @param type this parameter decides how and where the pbuf
314 * should be allocated as follows:
315 *
316 * - PBUF_ROM: It is assumed that the memory used is really
317 * similar to ROM in that it is immutable and will not be
318 * changed. Memory which is dynamic should generally not
319 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
320 * - PBUF_REF: It is assumed that the pbuf is only
321 * being used in a single thread. If the pbuf gets queued,
322 * then pbuf_take should be called to copy the buffer.
323 *
324 * @return the allocated pbuf.
325 */
326struct pbuf *
327pbuf_alloc_reference(void *payload, u16_t length, pbuf_type type)
328{
329 struct pbuf *p;
330 LWIP_ASSERT("invalid pbuf_type", (type == PBUF_REF) || (type == PBUF_ROM));
331 /* only allocate memory for the pbuf structure */
332 p = (struct pbuf *)memp_malloc(MEMP_PBUF);
333 if (p == NULL) {
334 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
335 ("pbuf_alloc_reference: Could not allocate MEMP_PBUF for PBUF_%s.\n",
336 (type == PBUF_ROM) ? "ROM" : "REF"));
337 return NULL;
338 }
339 pbuf_init_alloced_pbuf(p, payload, length, length, type, 0);
340 return p;
341}
342
343
344#if LWIP_SUPPORT_CUSTOM_PBUF
345/**
346 * @ingroup pbuf
347 * Initialize a custom pbuf (already allocated).
348 * Example of custom pbuf usage: @ref zerocopyrx
349 *
350 * @param l header size
351 * @param length size of the pbuf's payload
352 * @param type type of the pbuf (only used to treat the pbuf accordingly, as
353 * this function allocates no memory)
354 * @param p pointer to the custom pbuf to initialize (already allocated)
355 * @param payload_mem pointer to the buffer that is used for payload and headers,
356 * must be at least big enough to hold 'length' plus the header size,
357 * may be NULL if set later.
358 * ATTENTION: The caller is responsible for correct alignment of this buffer!!
359 * @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
360 * big enough to hold 'length' plus the header size
361 */
362struct pbuf *
363pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
364 void *payload_mem, u16_t payload_mem_len)
365{
366 u16_t offset = (u16_t)l;
367 void *payload;
368 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
369
370 if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
371 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
372 return NULL;
373 }
374
375 if (payload_mem != NULL) {
376 payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
377 } else {
378 payload = NULL;
379 }
380 pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM);
381 return &p->pbuf;
382}
383#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
384
385/**
386 * @ingroup pbuf
387 * Shrink a pbuf chain to a desired length.
388 *
389 * @param p pbuf to shrink.
390 * @param new_len desired new length of pbuf chain
391 *
392 * Depending on the desired length, the first few pbufs in a chain might
393 * be skipped and left unchanged. The new last pbuf in the chain will be
394 * resized, and any remaining pbufs will be freed.
395 *
396 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
397 * @note May not be called on a packet queue.
398 *
399 * @note Despite its name, pbuf_realloc cannot grow the size of a pbuf (chain).
400 */
401void
402pbuf_realloc(struct pbuf *p, u16_t new_len)
403{
404 struct pbuf *q;
405 u16_t rem_len; /* remaining length */
406 u16_t shrink;
407
408 LWIP_ASSERT("pbuf_realloc: p != NULL", p != NULL);
409
410 /* desired length larger than current length? */
411 if (new_len >= p->tot_len) {
412 /* enlarging not yet supported */
413 return;
414 }
415
416 /* the pbuf chain grows by (new_len - p->tot_len) bytes
417 * (which may be negative in case of shrinking) */
418 shrink = (u16_t)(p->tot_len - new_len);
419
420 /* first, step over any pbufs that should remain in the chain */
421 rem_len = new_len;
422 q = p;
423 /* should this pbuf be kept? */
424 while (rem_len > q->len) {
425 /* decrease remaining length by pbuf length */
426 rem_len = (u16_t)(rem_len - q->len);
427 /* decrease total length indicator */
428 q->tot_len = (u16_t)(q->tot_len - shrink);
429 /* proceed to next pbuf in chain */
430 q = q->next;
431 LWIP_ASSERT("pbuf_realloc: q != NULL", q != NULL);
432 }
433 /* we have now reached the new last pbuf (in q) */
434 /* rem_len == desired length for pbuf q */
435
436 /* shrink allocated memory for PBUF_RAM */
437 /* (other types merely adjust their length fields */
438 if (pbuf_match_allocsrc(q, PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) && (rem_len != q->len)
439#if LWIP_SUPPORT_CUSTOM_PBUF
440 && ((q->flags & PBUF_FLAG_IS_CUSTOM) == 0)
441#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
442 ) {
443 /* reallocate and adjust the length of the pbuf that will be split */
444 q = (struct pbuf *)mem_trim(q, (mem_size_t)(((u8_t *)q->payload - (u8_t *)q) + rem_len));
445 LWIP_ASSERT("mem_trim returned q == NULL", q != NULL);
446 }
447 /* adjust length fields for new last pbuf */
448 q->len = rem_len;
449 q->tot_len = q->len;
450
451 /* any remaining pbufs in chain? */
452 if (q->next != NULL) {
453 /* free remaining pbufs in chain */
454 pbuf_free(q->next);
455 }
456 /* q is last packet in chain */
457 q->next = NULL;
458
459}
460
461/**
462 * Adjusts the payload pointer to reveal headers in the payload.
463 * @see pbuf_add_header.
464 *
465 * @param p pbuf to change the header size.
466 * @param header_size_increment Number of bytes to increment header size.
467 * @param force Allow 'header_size_increment > 0' for PBUF_REF/PBUF_ROM types
468 *
469 * @return non-zero on failure, zero on success.
470 *
471 */
472static u8_t
473pbuf_add_header_impl(struct pbuf *p, size_t header_size_increment, u8_t force)
474{
475 u16_t type_internal;
476 void *payload;
477 u16_t increment_magnitude;
478
479 LWIP_ASSERT("p != NULL", p != NULL);
480 if ((p == NULL) || (header_size_increment > 0xFFFF)) {
481 return 1;
482 }
483 if (header_size_increment == 0) {
484 return 0;
485 }
486
487 increment_magnitude = (u16_t)header_size_increment;
488 /* Do not allow tot_len to wrap as a result. */
489 if ((u16_t)(increment_magnitude + p->tot_len) < increment_magnitude) {
490 return 1;
491 }
492
493 type_internal = p->type_internal;
494
495 /* pbuf types containing payloads? */
496 if (type_internal & PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS) {
497 /* set new payload pointer */
498 payload = (u8_t *)p->payload - header_size_increment;
499 /* boundary check fails? */
500 if ((u8_t *)payload < (u8_t *)p + SIZEOF_STRUCT_PBUF) {
501 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE,
502 ("pbuf_add_header: failed as %p < %p (not enough space for new header size)\n",
503 (void *)payload, (void *)((u8_t *)p + SIZEOF_STRUCT_PBUF)));
504 /* bail out unsuccessfully */
505 return 1;
506 }
507 /* pbuf types referring to external payloads? */
508 } else {
509 /* hide a header in the payload? */
510 if (force) {
511 payload = (u8_t *)p->payload - header_size_increment;
512 } else {
513 /* cannot expand payload to front (yet!)
514 * bail out unsuccessfully */
515 return 1;
516 }
517 }
518 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_add_header: old %p new %p (%"U16_F")\n",
519 (void *)p->payload, (void *)payload, increment_magnitude));
520
521 /* modify pbuf fields */
522 p->payload = payload;
523 p->len = (u16_t)(p->len + increment_magnitude);
524 p->tot_len = (u16_t)(p->tot_len + increment_magnitude);
525
526
527 return 0;
528}
529
530/**
531 * Adjusts the payload pointer to reveal headers in the payload.
532 *
533 * Adjusts the ->payload pointer so that space for a header
534 * appears in the pbuf payload.
535 *
536 * The ->payload, ->tot_len and ->len fields are adjusted.
537 *
538 * @param p pbuf to change the header size.
539 * @param header_size_increment Number of bytes to increment header size which
540 * increases the size of the pbuf. New space is on the front.
541 * If header_size_increment is 0, this function does nothing and returns successful.
542 *
543 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
544 * the call will fail. A check is made that the increase in header size does
545 * not move the payload pointer in front of the start of the buffer.
546 *
547 * @return non-zero on failure, zero on success.
548 *
549 */
550u8_t
551pbuf_add_header(struct pbuf *p, size_t header_size_increment)
552{
553 return pbuf_add_header_impl(p, header_size_increment, 0);
554}
555
556/**
557 * Same as @ref pbuf_add_header but does not check if 'header_size > 0' is allowed.
558 * This is used internally only, to allow PBUF_REF for RX.
559 */
560u8_t
561pbuf_add_header_force(struct pbuf *p, size_t header_size_increment)
562{
563 return pbuf_add_header_impl(p, header_size_increment, 1);
564}
565
566/**
567 * Adjusts the payload pointer to hide headers in the payload.
568 *
569 * Adjusts the ->payload pointer so that space for a header
570 * disappears in the pbuf payload.
571 *
572 * The ->payload, ->tot_len and ->len fields are adjusted.
573 *
574 * @param p pbuf to change the header size.
575 * @param header_size_decrement Number of bytes to decrement header size which
576 * decreases the size of the pbuf.
577 * If header_size_decrement is 0, this function does nothing and returns successful.
578 * @return non-zero on failure, zero on success.
579 *
580 */
581u8_t
582pbuf_remove_header(struct pbuf *p, size_t header_size_decrement)
583{
584 void *payload;
585 u16_t increment_magnitude;
586
587 LWIP_ASSERT("p != NULL", p != NULL);
588 if ((p == NULL) || (header_size_decrement > 0xFFFF)) {
589 return 1;
590 }
591 if (header_size_decrement == 0) {
592 return 0;
593 }
594
595 increment_magnitude = (u16_t)header_size_decrement;
596 /* Check that we aren't going to move off the end of the pbuf */
597 LWIP_ERROR("increment_magnitude <= p->len", (increment_magnitude <= p->len), return 1;);
598
599 /* remember current payload pointer */
600 payload = p->payload;
601 LWIP_UNUSED_ARG(payload); /* only used in LWIP_DEBUGF below */
602
603 /* increase payload pointer (guarded by length check above) */
604 p->payload = (u8_t *)p->payload + header_size_decrement;
605 /* modify pbuf length fields */
606 p->len = (u16_t)(p->len - increment_magnitude);
607 p->tot_len = (u16_t)(p->tot_len - increment_magnitude);
608
609 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_remove_header: old %p new %p (%"U16_F")\n",
610 (void *)payload, (void *)p->payload, increment_magnitude));
611
612 return 0;
613}
614
615static u8_t
616pbuf_header_impl(struct pbuf *p, s16_t header_size_increment, u8_t force)
617{
618 if (header_size_increment < 0) {
619 return pbuf_remove_header(p, (size_t) - header_size_increment);
620 } else {
621 return pbuf_add_header_impl(p, (size_t)header_size_increment, force);
622 }
623}
624
625/**
626 * Adjusts the payload pointer to hide or reveal headers in the payload.
627 *
628 * Adjusts the ->payload pointer so that space for a header
629 * (dis)appears in the pbuf payload.
630 *
631 * The ->payload, ->tot_len and ->len fields are adjusted.
632 *
633 * @param p pbuf to change the header size.
634 * @param header_size_increment Number of bytes to increment header size which
635 * increases the size of the pbuf. New space is on the front.
636 * (Using a negative value decreases the header size.)
637 * If header_size_increment is 0, this function does nothing and returns successful.
638 *
639 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
640 * the call will fail. A check is made that the increase in header size does
641 * not move the payload pointer in front of the start of the buffer.
642 * @return non-zero on failure, zero on success.
643 *
644 */
645u8_t
646pbuf_header(struct pbuf *p, s16_t header_size_increment)
647{
648 return pbuf_header_impl(p, header_size_increment, 0);
649}
650
651/**
652 * Same as pbuf_header but does not check if 'header_size > 0' is allowed.
653 * This is used internally only, to allow PBUF_REF for RX.
654 */
655u8_t
656pbuf_header_force(struct pbuf *p, s16_t header_size_increment)
657{
658 return pbuf_header_impl(p, header_size_increment, 1);
659}
660
661/** Similar to pbuf_header(-size) but de-refs header pbufs for (size >= p->len)
662 *
663 * @param q pbufs to operate on
664 * @param size The number of bytes to remove from the beginning of the pbuf list.
665 * While size >= p->len, pbufs are freed.
666 * ATTENTION: this is the opposite direction as @ref pbuf_header, but
667 * takes an u16_t not s16_t!
668 * @return the new head pbuf
669 */
670struct pbuf *
671pbuf_free_header(struct pbuf *q, u16_t size)
672{
673 struct pbuf *p = q;
674 u16_t free_left = size;
675 while (free_left && p) {
676 if (free_left >= p->len) {
677 struct pbuf *f = p;
678 free_left = (u16_t)(free_left - p->len);
679 p = p->next;
680 f->next = 0;
681 pbuf_free(f);
682 } else {
683 pbuf_remove_header(p, free_left);
684 free_left = 0;
685 }
686 }
687 return p;
688}
689
690/**
691 * @ingroup pbuf
692 * Dereference a pbuf chain or queue and deallocate any no-longer-used
693 * pbufs at the head of this chain or queue.
694 *
695 * Decrements the pbuf reference count. If it reaches zero, the pbuf is
696 * deallocated.
697 *
698 * For a pbuf chain, this is repeated for each pbuf in the chain,
699 * up to the first pbuf which has a non-zero reference count after
700 * decrementing. So, when all reference counts are one, the whole
701 * chain is free'd.
702 *
703 * @param p The pbuf (chain) to be dereferenced.
704 *
705 * @return the number of pbufs that were de-allocated
706 * from the head of the chain.
707 *
708 * @note MUST NOT be called on a packet queue (Not verified to work yet).
709 * @note the reference counter of a pbuf equals the number of pointers
710 * that refer to the pbuf (or into the pbuf).
711 *
712 * @internal examples:
713 *
714 * Assuming existing chains a->b->c with the following reference
715 * counts, calling pbuf_free(a) results in:
716 *
717 * 1->2->3 becomes ...1->3
718 * 3->3->3 becomes 2->3->3
719 * 1->1->2 becomes ......1
720 * 2->1->1 becomes 1->1->1
721 * 1->1->1 becomes .......
722 *
723 */
724u8_t
725pbuf_free(struct pbuf *p)
726{
727 u8_t alloc_src;
728 struct pbuf *q;
729 u8_t count;
730
731 if (p == NULL) {
732 LWIP_ASSERT("p != NULL", p != NULL);
733 /* if assertions are disabled, proceed with debug output */
734 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
735 ("pbuf_free(p == NULL) was called.\n"));
736 return 0;
737 }
738 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free(%p)\n", (void *)p));
739
740 PERF_START;
741
742 count = 0;
743 /* de-allocate all consecutive pbufs from the head of the chain that
744 * obtain a zero reference count after decrementing*/
745 while (p != NULL) {
746 LWIP_PBUF_REF_T ref;
747 SYS_ARCH_DECL_PROTECT(old_level);
748 /* Since decrementing ref cannot be guaranteed to be a single machine operation
749 * we must protect it. We put the new ref into a local variable to prevent
750 * further protection. */
751 SYS_ARCH_PROTECT(old_level);
752 /* all pbufs in a chain are referenced at least once */
753 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
754 /* decrease reference count (number of pointers to pbuf) */
755 ref = --(p->ref);
756 SYS_ARCH_UNPROTECT(old_level);
757 /* this pbuf is no longer referenced to? */
758 if (ref == 0) {
759 /* remember next pbuf in chain for next iteration */
760 q = p->next;
761 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: deallocating %p\n", (void *)p));
762 alloc_src = pbuf_get_allocsrc(p);
763#if LWIP_SUPPORT_CUSTOM_PBUF
764 /* is this a custom pbuf? */
765 if ((p->flags & PBUF_FLAG_IS_CUSTOM) != 0) {
766 struct pbuf_custom *pc = (struct pbuf_custom *)p;
767 LWIP_ASSERT("pc->custom_free_function != NULL", pc->custom_free_function != NULL);
768 pc->custom_free_function(p);
769 } else
770#endif /* LWIP_SUPPORT_CUSTOM_PBUF */
771 {
772 /* is this a pbuf from the pool? */
773 if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF_POOL) {
774 memp_free(MEMP_PBUF_POOL, p);
775 /* is this a ROM or RAM referencing pbuf? */
776 } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_MEMP_PBUF) {
777 memp_free(MEMP_PBUF, p);
778 /* type == PBUF_RAM */
779 } else if (alloc_src == PBUF_TYPE_ALLOC_SRC_MASK_STD_HEAP) {
780 mem_free(p);
781 } else {
782 /* @todo: support freeing other types */
783 LWIP_ASSERT("invalid pbuf type", 0);
784 }
785 }
786 count++;
787 /* proceed to next pbuf */
788 p = q;
789 /* p->ref > 0, this pbuf is still referenced to */
790 /* (and so the remaining pbufs in chain as well) */
791 } else {
792 LWIP_DEBUGF( PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free: %p has ref %"U16_F", ending here.\n", (void *)p, (u16_t)ref));
793 /* stop walking through the chain */
794 p = NULL;
795 }
796 }
797 PERF_STOP("pbuf_free");
798 /* return number of de-allocated pbufs */
799 return count;
800}
801
802/**
803 * Count number of pbufs in a chain
804 *
805 * @param p first pbuf of chain
806 * @return the number of pbufs in a chain
807 */
808u16_t
809pbuf_clen(const struct pbuf *p)
810{
811 u16_t len;
812
813 len = 0;
814 while (p != NULL) {
815 ++len;
816 p = p->next;
817 }
818 return len;
819}
820
821/**
822 * @ingroup pbuf
823 * Increment the reference count of the pbuf.
824 *
825 * @param p pbuf to increase reference counter of
826 *
827 */
828void
829pbuf_ref(struct pbuf *p)
830{
831 /* pbuf given? */
832 if (p != NULL) {
833 SYS_ARCH_SET(p->ref, (LWIP_PBUF_REF_T)(p->ref + 1));
834 LWIP_ASSERT("pbuf ref overflow", p->ref > 0);
835 }
836}
837
838/**
839 * @ingroup pbuf
840 * Concatenate two pbufs (each may be a pbuf chain) and take over
841 * the caller's reference of the tail pbuf.
842 *
843 * @note The caller MAY NOT reference the tail pbuf afterwards.
844 * Use pbuf_chain() for that purpose.
845 *
846 * This function explicitly does not check for tot_len overflow to prevent
847 * failing to queue too long pbufs. This can produce invalid pbufs, so
848 * handle with care!
849 *
850 * @see pbuf_chain()
851 */
852void
853pbuf_cat(struct pbuf *h, struct pbuf *t)
854{
855 struct pbuf *p;
856
857 LWIP_ERROR("(h != NULL) && (t != NULL) (programmer violates API)",
858 ((h != NULL) && (t != NULL)), return;);
859
860 /* proceed to last pbuf of chain */
861 for (p = h; p->next != NULL; p = p->next) {
862 /* add total length of second chain to all totals of first chain */
863 p->tot_len = (u16_t)(p->tot_len + t->tot_len);
864 }
865 /* { p is last pbuf of first h chain, p->next == NULL } */
866 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
867 LWIP_ASSERT("p->next == NULL", p->next == NULL);
868 /* add total length of second chain to last pbuf total of first chain */
869 p->tot_len = (u16_t)(p->tot_len + t->tot_len);
870 /* chain last pbuf of head (p) with first of tail (t) */
871 p->next = t;
872 /* p->next now references t, but the caller will drop its reference to t,
873 * so netto there is no change to the reference count of t.
874 */
875}
876
877/**
878 * @ingroup pbuf
879 * Chain two pbufs (or pbuf chains) together.
880 *
881 * The caller MUST call pbuf_free(t) once it has stopped
882 * using it. Use pbuf_cat() instead if you no longer use t.
883 *
884 * @param h head pbuf (chain)
885 * @param t tail pbuf (chain)
886 * @note The pbufs MUST belong to the same packet.
887 * @note MAY NOT be called on a packet queue.
888 *
889 * The ->tot_len fields of all pbufs of the head chain are adjusted.
890 * The ->next field of the last pbuf of the head chain is adjusted.
891 * The ->ref field of the first pbuf of the tail chain is adjusted.
892 *
893 */
894void
895pbuf_chain(struct pbuf *h, struct pbuf *t)
896{
897 pbuf_cat(h, t);
898 /* t is now referenced by h */
899 pbuf_ref(t);
900 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
901}
902
903/**
904 * Dechains the first pbuf from its succeeding pbufs in the chain.
905 *
906 * Makes p->tot_len field equal to p->len.
907 * @param p pbuf to dechain
908 * @return remainder of the pbuf chain, or NULL if it was de-allocated.
909 * @note May not be called on a packet queue.
910 */
911struct pbuf *
912pbuf_dechain(struct pbuf *p)
913{
914 struct pbuf *q;
915 u8_t tail_gone = 1;
916 /* tail */
917 q = p->next;
918 /* pbuf has successor in chain? */
919 if (q != NULL) {
920 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
921 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
922 /* enforce invariant if assertion is disabled */
923 q->tot_len = (u16_t)(p->tot_len - p->len);
924 /* decouple pbuf from remainder */
925 p->next = NULL;
926 /* total length of pbuf p is its own length only */
927 p->tot_len = p->len;
928 /* q is no longer referenced by p, free it */
929 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
930 tail_gone = pbuf_free(q);
931 if (tail_gone > 0) {
932 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE,
933 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
934 }
935 /* return remaining tail or NULL if deallocated */
936 }
937 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
938 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
939 return ((tail_gone > 0) ? NULL : q);
940}
941
942/**
943 * @ingroup pbuf
944 * Create PBUF_RAM copies of pbufs.
945 *
946 * Used to queue packets on behalf of the lwIP stack, such as
947 * ARP based queueing.
948 *
949 * @note You MUST explicitly use p = pbuf_take(p);
950 *
951 * @note Only one packet is copied, no packet queue!
952 *
953 * @param p_to pbuf destination of the copy
954 * @param p_from pbuf source of the copy
955 *
956 * @return ERR_OK if pbuf was copied
957 * ERR_ARG if one of the pbufs is NULL or p_to is not big
958 * enough to hold p_from
959 */
960err_t
961pbuf_copy(struct pbuf *p_to, const struct pbuf *p_from)
962{
963 size_t offset_to = 0, offset_from = 0, len;
964
965 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy(%p, %p)\n",
966 (const void *)p_to, (const void *)p_from));
967
968 /* is the target big enough to hold the source? */
969 LWIP_ERROR("pbuf_copy: target not big enough to hold source", ((p_to != NULL) &&
970 (p_from != NULL) && (p_to->tot_len >= p_from->tot_len)), return ERR_ARG;);
971
972 /* iterate through pbuf chain */
973 do {
974 /* copy one part of the original chain */
975 if ((p_to->len - offset_to) >= (p_from->len - offset_from)) {
976 /* complete current p_from fits into current p_to */
977 len = p_from->len - offset_from;
978 } else {
979 /* current p_from does not fit into current p_to */
980 len = p_to->len - offset_to;
981 }
982 MEMCPY((u8_t *)p_to->payload + offset_to, (u8_t *)p_from->payload + offset_from, len);
983 offset_to += len;
984 offset_from += len;
985 LWIP_ASSERT("offset_to <= p_to->len", offset_to <= p_to->len);
986 LWIP_ASSERT("offset_from <= p_from->len", offset_from <= p_from->len);
987 if (offset_from >= p_from->len) {
988 /* on to next p_from (if any) */
989 offset_from = 0;
990 p_from = p_from->next;
991 }
992 if (offset_to == p_to->len) {
993 /* on to next p_to (if any) */
994 offset_to = 0;
995 p_to = p_to->next;
996 LWIP_ERROR("p_to != NULL", (p_to != NULL) || (p_from == NULL), return ERR_ARG;);
997 }
998
999 if ((p_from != NULL) && (p_from->len == p_from->tot_len)) {
1000 /* don't copy more than one packet! */
1001 LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1002 (p_from->next == NULL), return ERR_VAL;);
1003 }
1004 if ((p_to != NULL) && (p_to->len == p_to->tot_len)) {
1005 /* don't copy more than one packet! */
1006 LWIP_ERROR("pbuf_copy() does not allow packet queues!",
1007 (p_to->next == NULL), return ERR_VAL;);
1008 }
1009 } while (p_from);
1010 LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_copy: end of chain reached.\n"));
1011 return ERR_OK;
1012}
1013
1014/**
1015 * @ingroup pbuf
1016 * Copy (part of) the contents of a packet buffer
1017 * to an application supplied buffer.
1018 *
1019 * @param buf the pbuf from which to copy data
1020 * @param dataptr the application supplied buffer
1021 * @param len length of data to copy (dataptr must be big enough). No more
1022 * than buf->tot_len will be copied, irrespective of len
1023 * @param offset offset into the packet buffer from where to begin copying len bytes
1024 * @return the number of bytes copied, or 0 on failure
1025 */
1026u16_t
1027pbuf_copy_partial(const struct pbuf *buf, void *dataptr, u16_t len, u16_t offset)
1028{
1029 const struct pbuf *p;
1030 u16_t left = 0;
1031 u16_t buf_copy_len;
1032 u16_t copied_total = 0;
1033
1034 LWIP_ERROR("pbuf_copy_partial: invalid buf", (buf != NULL), return 0;);
1035 LWIP_ERROR("pbuf_copy_partial: invalid dataptr", (dataptr != NULL), return 0;);
1036
1037 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1038 for (p = buf; len != 0 && p != NULL; p = p->next) {
1039 if ((offset != 0) && (offset >= p->len)) {
1040 /* don't copy from this buffer -> on to the next */
1041 offset = (u16_t)(offset - p->len);
1042 } else {
1043 /* copy from this buffer. maybe only partially. */
1044 buf_copy_len = (u16_t)(p->len - offset);
1045 if (buf_copy_len > len) {
1046 buf_copy_len = len;
1047 }
1048 /* copy the necessary parts of the buffer */
1049 MEMCPY(&((char *)dataptr)[left], &((char *)p->payload)[offset], buf_copy_len);
1050 copied_total = (u16_t)(copied_total + buf_copy_len);
1051 left = (u16_t)(left + buf_copy_len);
1052 len = (u16_t)(len - buf_copy_len);
1053 offset = 0;
1054 }
1055 }
1056 return copied_total;
1057}
1058
1059/**
1060 * @ingroup pbuf
1061 * Get part of a pbuf's payload as contiguous memory. The returned memory is
1062 * either a pointer into the pbuf's payload or, if split over multiple pbufs,
1063 * a copy into the user-supplied buffer.
1064 *
1065 * @param p the pbuf from which to copy data
1066 * @param buffer the application supplied buffer
1067 * @param bufsize size of the application supplied buffer
1068 * @param len length of data to copy (dataptr must be big enough). No more
1069 * than buf->tot_len will be copied, irrespective of len
1070 * @param offset offset into the packet buffer from where to begin copying len bytes
1071 * @return the number of bytes copied, or 0 on failure
1072 */
1073void *
1074pbuf_get_contiguous(const struct pbuf *p, void *buffer, size_t bufsize, u16_t len, u16_t offset)
1075{
1076 const struct pbuf *q;
1077 u16_t out_offset;
1078
1079 LWIP_ERROR("pbuf_get_contiguous: invalid buf", (p != NULL), return NULL;);
1080 LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (buffer != NULL), return NULL;);
1081 LWIP_ERROR("pbuf_get_contiguous: invalid dataptr", (bufsize >= len), return NULL;);
1082
1083 q = pbuf_skip_const(p, offset, &out_offset);
1084 if (q != NULL) {
1085 if (q->len >= (out_offset + len)) {
1086 /* all data in this pbuf, return zero-copy */
1087 return (u8_t *)q->payload + out_offset;
1088 }
1089 /* need to copy */
1090 if (pbuf_copy_partial(q, buffer, len, out_offset) != len) {
1091 /* copying failed: pbuf is too short */
1092 return NULL;
1093 }
1094 return buffer;
1095 }
1096 /* pbuf is too short (offset does not fit in) */
1097 return NULL;
1098}
1099
1100#if LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE
1101/**
1102 * This method modifies a 'pbuf chain', so that its total length is
1103 * smaller than 64K. The remainder of the original pbuf chain is stored
1104 * in *rest.
1105 * This function never creates new pbufs, but splits an existing chain
1106 * in two parts. The tot_len of the modified packet queue will likely be
1107 * smaller than 64K.
1108 * 'packet queues' are not supported by this function.
1109 *
1110 * @param p the pbuf queue to be split
1111 * @param rest pointer to store the remainder (after the first 64K)
1112 */
1113void pbuf_split_64k(struct pbuf *p, struct pbuf **rest)
1114{
1115 *rest = NULL;
1116 if ((p != NULL) && (p->next != NULL)) {
1117 u16_t tot_len_front = p->len;
1118 struct pbuf *i = p;
1119 struct pbuf *r = p->next;
1120
1121 /* continue until the total length (summed up as u16_t) overflows */
1122 while ((r != NULL) && ((u16_t)(tot_len_front + r->len) >= tot_len_front)) {
1123 tot_len_front = (u16_t)(tot_len_front + r->len);
1124 i = r;
1125 r = r->next;
1126 }
1127 /* i now points to last packet of the first segment. Set next
1128 pointer to NULL */
1129 i->next = NULL;
1130
1131 if (r != NULL) {
1132 /* Update the tot_len field in the first part */
1133 for (i = p; i != NULL; i = i->next) {
1134 i->tot_len = (u16_t)(i->tot_len - r->tot_len);
1135 LWIP_ASSERT("tot_len/len mismatch in last pbuf",
1136 (i->next != NULL) || (i->tot_len == i->len));
1137 }
1138 if (p->flags & PBUF_FLAG_TCP_FIN) {
1139 r->flags |= PBUF_FLAG_TCP_FIN;
1140 }
1141
1142 /* tot_len field in rest does not need modifications */
1143 /* reference counters do not need modifications */
1144 *rest = r;
1145 }
1146 }
1147}
1148#endif /* LWIP_TCP && TCP_QUEUE_OOSEQ && LWIP_WND_SCALE */
1149
1150/* Actual implementation of pbuf_skip() but returning const pointer... */
1151static const struct pbuf *
1152pbuf_skip_const(const struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1153{
1154 u16_t offset_left = in_offset;
1155 const struct pbuf *q = in;
1156
1157 /* get the correct pbuf */
1158 while ((q != NULL) && (q->len <= offset_left)) {
1159 offset_left = (u16_t)(offset_left - q->len);
1160 q = q->next;
1161 }
1162 if (out_offset != NULL) {
1163 *out_offset = offset_left;
1164 }
1165 return q;
1166}
1167
1168/**
1169 * @ingroup pbuf
1170 * Skip a number of bytes at the start of a pbuf
1171 *
1172 * @param in input pbuf
1173 * @param in_offset offset to skip
1174 * @param out_offset resulting offset in the returned pbuf
1175 * @return the pbuf in the queue where the offset is
1176 */
1177struct pbuf *
1178pbuf_skip(struct pbuf *in, u16_t in_offset, u16_t *out_offset)
1179{
1180 const struct pbuf *out = pbuf_skip_const(in, in_offset, out_offset);
1181 return LWIP_CONST_CAST(struct pbuf *, out);
1182}
1183
1184/**
1185 * @ingroup pbuf
1186 * Copy application supplied data into a pbuf.
1187 * This function can only be used to copy the equivalent of buf->tot_len data.
1188 *
1189 * @param buf pbuf to fill with data
1190 * @param dataptr application supplied data buffer
1191 * @param len length of the application supplied data buffer
1192 *
1193 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1194 */
1195err_t
1196pbuf_take(struct pbuf *buf, const void *dataptr, u16_t len)
1197{
1198 struct pbuf *p;
1199 size_t buf_copy_len;
1200 size_t total_copy_len = len;
1201 size_t copied_total = 0;
1202
1203 LWIP_ERROR("pbuf_take: invalid buf", (buf != NULL), return ERR_ARG;);
1204 LWIP_ERROR("pbuf_take: invalid dataptr", (dataptr != NULL), return ERR_ARG;);
1205 LWIP_ERROR("pbuf_take: buf not large enough", (buf->tot_len >= len), return ERR_MEM;);
1206
1207 if ((buf == NULL) || (dataptr == NULL) || (buf->tot_len < len)) {
1208 return ERR_ARG;
1209 }
1210
1211 /* Note some systems use byte copy if dataptr or one of the pbuf payload pointers are unaligned. */
1212 for (p = buf; total_copy_len != 0; p = p->next) {
1213 LWIP_ASSERT("pbuf_take: invalid pbuf", p != NULL);
1214 buf_copy_len = total_copy_len;
1215 if (buf_copy_len > p->len) {
1216 /* this pbuf cannot hold all remaining data */
1217 buf_copy_len = p->len;
1218 }
1219 /* copy the necessary parts of the buffer */
1220 MEMCPY(p->payload, &((const char *)dataptr)[copied_total], buf_copy_len);
1221 total_copy_len -= buf_copy_len;
1222 copied_total += buf_copy_len;
1223 }
1224 LWIP_ASSERT("did not copy all data", total_copy_len == 0 && copied_total == len);
1225 return ERR_OK;
1226}
1227
1228/**
1229 * @ingroup pbuf
1230 * Same as pbuf_take() but puts data at an offset
1231 *
1232 * @param buf pbuf to fill with data
1233 * @param dataptr application supplied data buffer
1234 * @param len length of the application supplied data buffer
1235 * @param offset offset in pbuf where to copy dataptr to
1236 *
1237 * @return ERR_OK if successful, ERR_MEM if the pbuf is not big enough
1238 */
1239err_t
1240pbuf_take_at(struct pbuf *buf, const void *dataptr, u16_t len, u16_t offset)
1241{
1242 u16_t target_offset;
1243 struct pbuf *q = pbuf_skip(buf, offset, &target_offset);
1244
1245 /* return requested data if pbuf is OK */
1246 if ((q != NULL) && (q->tot_len >= target_offset + len)) {
1247 u16_t remaining_len = len;
1248 const u8_t *src_ptr = (const u8_t *)dataptr;
1249 /* copy the part that goes into the first pbuf */
1250 u16_t first_copy_len;
1251 LWIP_ASSERT("check pbuf_skip result", target_offset < q->len);
1252 first_copy_len = (u16_t)LWIP_MIN(q->len - target_offset, len);
1253 MEMCPY(((u8_t *)q->payload) + target_offset, dataptr, first_copy_len);
1254 remaining_len = (u16_t)(remaining_len - first_copy_len);
1255 src_ptr += first_copy_len;
1256 if (remaining_len > 0) {
1257 return pbuf_take(q->next, src_ptr, remaining_len);
1258 }
1259 return ERR_OK;
1260 }
1261 return ERR_MEM;
1262}
1263
1264/**
1265 * @ingroup pbuf
1266 * Creates a single pbuf out of a queue of pbufs.
1267 *
1268 * @remark: Either the source pbuf 'p' is freed by this function or the original
1269 * pbuf 'p' is returned, therefore the caller has to check the result!
1270 *
1271 * @param p the source pbuf
1272 * @param layer pbuf_layer of the new pbuf
1273 *
1274 * @return a new, single pbuf (p->next is NULL)
1275 * or the old pbuf if allocation fails
1276 */
1277struct pbuf *
1278pbuf_coalesce(struct pbuf *p, pbuf_layer layer)
1279{
1280 struct pbuf *q;
1281 if (p->next == NULL) {
1282 return p;
1283 }
1284 q = pbuf_clone(layer, PBUF_RAM, p);
1285 if (q == NULL) {
1286 /* @todo: what do we do now? */
1287 return p;
1288 }
1289 pbuf_free(p);
1290 return q;
1291}
1292
1293/**
1294 * @ingroup pbuf
1295 * Allocates a new pbuf of same length (via pbuf_alloc()) and copies the source
1296 * pbuf into this new pbuf (using pbuf_copy()).
1297 *
1298 * @param layer pbuf_layer of the new pbuf
1299 * @param type this parameter decides how and where the pbuf should be allocated
1300 * (@see pbuf_alloc())
1301 * @param p the source pbuf
1302 *
1303 * @return a new pbuf or NULL if allocation fails
1304 */
1305struct pbuf *
1306pbuf_clone(pbuf_layer layer, pbuf_type type, struct pbuf *p)
1307{
1308 struct pbuf *q;
1309 err_t err;
1310 q = pbuf_alloc(layer, p->tot_len, type);
1311 if (q == NULL) {
1312 return NULL;
1313 }
1314 err = pbuf_copy(q, p);
1315 LWIP_UNUSED_ARG(err); /* in case of LWIP_NOASSERT */
1316 LWIP_ASSERT("pbuf_copy failed", err == ERR_OK);
1317 return q;
1318}
1319
1320#if LWIP_CHECKSUM_ON_COPY
1321/**
1322 * Copies data into a single pbuf (*not* into a pbuf queue!) and updates
1323 * the checksum while copying
1324 *
1325 * @param p the pbuf to copy data into
1326 * @param start_offset offset of p->payload where to copy the data to
1327 * @param dataptr data to copy into the pbuf
1328 * @param len length of data to copy into the pbuf
1329 * @param chksum pointer to the checksum which is updated
1330 * @return ERR_OK if successful, another error if the data does not fit
1331 * within the (first) pbuf (no pbuf queues!)
1332 */
1333err_t
1334pbuf_fill_chksum(struct pbuf *p, u16_t start_offset, const void *dataptr,
1335 u16_t len, u16_t *chksum)
1336{
1337 u32_t acc;
1338 u16_t copy_chksum;
1339 char *dst_ptr;
1340 LWIP_ASSERT("p != NULL", p != NULL);
1341 LWIP_ASSERT("dataptr != NULL", dataptr != NULL);
1342 LWIP_ASSERT("chksum != NULL", chksum != NULL);
1343 LWIP_ASSERT("len != 0", len != 0);
1344
1345 if ((start_offset >= p->len) || (start_offset + len > p->len)) {
1346 return ERR_ARG;
1347 }
1348
1349 dst_ptr = ((char *)p->payload) + start_offset;
1350 copy_chksum = LWIP_CHKSUM_COPY(dst_ptr, dataptr, len);
1351 if ((start_offset & 1) != 0) {
1352 copy_chksum = SWAP_BYTES_IN_WORD(copy_chksum);
1353 }
1354 acc = *chksum;
1355 acc += copy_chksum;
1356 *chksum = FOLD_U32T(acc);
1357 return ERR_OK;
1358}
1359#endif /* LWIP_CHECKSUM_ON_COPY */
1360
1361/**
1362 * @ingroup pbuf
1363 * Get one byte from the specified position in a pbuf
1364 * WARNING: returns zero for offset >= p->tot_len
1365 *
1366 * @param p pbuf to parse
1367 * @param offset offset into p of the byte to return
1368 * @return byte at an offset into p OR ZERO IF 'offset' >= p->tot_len
1369 */
1370u8_t
1371pbuf_get_at(const struct pbuf *p, u16_t offset)
1372{
1373 int ret = pbuf_try_get_at(p, offset);
1374 if (ret >= 0) {
1375 return (u8_t)ret;
1376 }
1377 return 0;
1378}
1379
1380/**
1381 * @ingroup pbuf
1382 * Get one byte from the specified position in a pbuf
1383 *
1384 * @param p pbuf to parse
1385 * @param offset offset into p of the byte to return
1386 * @return byte at an offset into p [0..0xFF] OR negative if 'offset' >= p->tot_len
1387 */
1388int
1389pbuf_try_get_at(const struct pbuf *p, u16_t offset)
1390{
1391 u16_t q_idx;
1392 const struct pbuf *q = pbuf_skip_const(p, offset, &q_idx);
1393
1394 /* return requested data if pbuf is OK */
1395 if ((q != NULL) && (q->len > q_idx)) {
1396 return ((u8_t *)q->payload)[q_idx];
1397 }
1398 return -1;
1399}
1400
1401/**
1402 * @ingroup pbuf
1403 * Put one byte to the specified position in a pbuf
1404 * WARNING: silently ignores offset >= p->tot_len
1405 *
1406 * @param p pbuf to fill
1407 * @param offset offset into p of the byte to write
1408 * @param data byte to write at an offset into p
1409 */
1410void
1411pbuf_put_at(struct pbuf *p, u16_t offset, u8_t data)
1412{
1413 u16_t q_idx;
1414 struct pbuf *q = pbuf_skip(p, offset, &q_idx);
1415
1416 /* write requested data if pbuf is OK */
1417 if ((q != NULL) && (q->len > q_idx)) {
1418 ((u8_t *)q->payload)[q_idx] = data;
1419 }
1420}
1421
1422/**
1423 * @ingroup pbuf
1424 * Compare pbuf contents at specified offset with memory s2, both of length n
1425 *
1426 * @param p pbuf to compare
1427 * @param offset offset into p at which to start comparing
1428 * @param s2 buffer to compare
1429 * @param n length of buffer to compare
1430 * @return zero if equal, nonzero otherwise
1431 * (0xffff if p is too short, diffoffset+1 otherwise)
1432 */
1433u16_t
1434pbuf_memcmp(const struct pbuf *p, u16_t offset, const void *s2, u16_t n)
1435{
1436 u16_t start = offset;
1437 const struct pbuf *q = p;
1438 u16_t i;
1439
1440 /* pbuf long enough to perform check? */
1441 if (p->tot_len < (offset + n)) {
1442 return 0xffff;
1443 }
1444
1445 /* get the correct pbuf from chain. We know it succeeds because of p->tot_len check above. */
1446 while ((q != NULL) && (q->len <= start)) {
1447 start = (u16_t)(start - q->len);
1448 q = q->next;
1449 }
1450
1451 /* return requested data if pbuf is OK */
1452 for (i = 0; i < n; i++) {
1453 /* We know pbuf_get_at() succeeds because of p->tot_len check above. */
1454 u8_t a = pbuf_get_at(q, (u16_t)(start + i));
1455 u8_t b = ((const u8_t *)s2)[i];
1456 if (a != b) {
1457 return (u16_t)LWIP_MIN(i + 1, 0xFFFF);
1458 }
1459 }
1460 return 0;
1461}
1462
1463/**
1464 * @ingroup pbuf
1465 * Find occurrence of mem (with length mem_len) in pbuf p, starting at offset
1466 * start_offset.
1467 *
1468 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1469 * return value 'not found'
1470 * @param mem search for the contents of this buffer
1471 * @param mem_len length of 'mem'
1472 * @param start_offset offset into p at which to start searching
1473 * @return 0xFFFF if substr was not found in p or the index where it was found
1474 */
1475u16_t
1476pbuf_memfind(const struct pbuf *p, const void *mem, u16_t mem_len, u16_t start_offset)
1477{
1478 u16_t i;
1479 u16_t max_cmp_start = (u16_t)(p->tot_len - mem_len);
1480 if (p->tot_len >= mem_len + start_offset) {
1481 for (i = start_offset; i <= max_cmp_start; i++) {
1482 u16_t plus = pbuf_memcmp(p, i, mem, mem_len);
1483 if (plus == 0) {
1484 return i;
1485 }
1486 }
1487 }
1488 return 0xFFFF;
1489}
1490
1491/**
1492 * Find occurrence of substr with length substr_len in pbuf p, start at offset
1493 * start_offset
1494 * WARNING: in contrast to strstr(), this one does not stop at the first \0 in
1495 * the pbuf/source string!
1496 *
1497 * @param p pbuf to search, maximum length is 0xFFFE since 0xFFFF is used as
1498 * return value 'not found'
1499 * @param substr string to search for in p, maximum length is 0xFFFE
1500 * @return 0xFFFF if substr was not found in p or the index where it was found
1501 */
1502u16_t
1503pbuf_strstr(const struct pbuf *p, const char *substr)
1504{
1505 size_t substr_len;
1506 if ((substr == NULL) || (substr[0] == 0) || (p->tot_len == 0xFFFF)) {
1507 return 0xFFFF;
1508 }
1509 substr_len = strlen(substr);
1510 if (substr_len >= 0xFFFF) {
1511 return 0xFFFF;
1512 }
1513 return pbuf_memfind(p, substr, (u16_t)substr_len, 0);
1514}
Note: See TracBrowser for help on using the repository browser.