source: UsbWattMeter/trunk/lwip-1.4.1/src/core/ipv4/ip_frag.c@ 167

Last change on this file since 167 was 167, checked in by coas-nagasima, 8 years ago

MIMEにSJISを設定

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
  • Property svn:mime-type set to text/x-csrc; charset=SHIFT_JIS
File size: 28.2 KB
Line 
1/**
2 * @file
3 * This is the IPv4 packet segmentation and reassembly implementation.
4 *
5 */
6
7/*
8 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without modification,
12 * are permitted provided that the following conditions are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
25 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
27 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
31 * OF SUCH DAMAGE.
32 *
33 * This file is part of the lwIP TCP/IP stack.
34 *
35 * Author: Jani Monoses <jani@iv.ro>
36 * Simon Goldschmidt
37 * original reassembly code by Adam Dunkels <adam@sics.se>
38 *
39 */
40
41#include "lwip/opt.h"
42#include "lwip/ip_frag.h"
43#include "lwip/def.h"
44#include "lwip/inet_chksum.h"
45#include "lwip/netif.h"
46#include "lwip/snmp.h"
47#include "lwip/stats.h"
48#include "lwip/icmp.h"
49
50#include <string.h>
51
52#if IP_REASSEMBLY
53/**
54 * The IP reassembly code currently has the following limitations:
55 * - IP header options are not supported
56 * - fragments must not overlap (e.g. due to different routes),
57 * currently, overlapping or duplicate fragments are thrown away
58 * if IP_REASS_CHECK_OVERLAP=1 (the default)!
59 *
60 * @todo: work with IP header options
61 */
62
63/** Setting this to 0, you can turn off checking the fragments for overlapping
64 * regions. The code gets a little smaller. Only use this if you know that
65 * overlapping won't occur on your network! */
66#ifndef IP_REASS_CHECK_OVERLAP
67#define IP_REASS_CHECK_OVERLAP 1
68#endif /* IP_REASS_CHECK_OVERLAP */
69
70/** Set to 0 to prevent freeing the oldest datagram when the reassembly buffer is
71 * full (IP_REASS_MAX_PBUFS pbufs are enqueued). The code gets a little smaller.
72 * Datagrams will be freed by timeout only. Especially useful when MEMP_NUM_REASSDATA
73 * is set to 1, so one datagram can be reassembled at a time, only. */
74#ifndef IP_REASS_FREE_OLDEST
75#define IP_REASS_FREE_OLDEST 1
76#endif /* IP_REASS_FREE_OLDEST */
77
78#define IP_REASS_FLAG_LASTFRAG 0x01
79
80/** This is a helper struct which holds the starting
81 * offset and the ending offset of this fragment to
82 * easily chain the fragments.
83 * It has the same packing requirements as the IP header, since it replaces
84 * the IP header in memory in incoming fragments (after copying it) to keep
85 * track of the various fragments. (-> If the IP header doesn't need packing,
86 * this struct doesn't need packing, too.)
87 */
88#ifdef PACK_STRUCT_USE_INCLUDES
89# include "arch/bpstruct.h"
90#endif
91PACK_STRUCT_BEGIN
92struct ip_reass_helper {
93 PACK_STRUCT_FIELD(struct pbuf *next_pbuf);
94 PACK_STRUCT_FIELD(u16_t start);
95 PACK_STRUCT_FIELD(u16_t end);
96} PACK_STRUCT_STRUCT;
97PACK_STRUCT_END
98#ifdef PACK_STRUCT_USE_INCLUDES
99# include "arch/epstruct.h"
100#endif
101
102#define IP_ADDRESSES_AND_ID_MATCH(iphdrA, iphdrB) \
103 (ip_addr_cmp(&(iphdrA)->src, &(iphdrB)->src) && \
104 ip_addr_cmp(&(iphdrA)->dest, &(iphdrB)->dest) && \
105 IPH_ID(iphdrA) == IPH_ID(iphdrB)) ? 1 : 0
106
107/* global variables */
108static struct ip_reassdata *reassdatagrams;
109static u16_t ip_reass_pbufcount;
110
111/* function prototypes */
112static void ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
113static int ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev);
114
115/**
116 * Reassembly timer base function
117 * for both NO_SYS == 0 and 1 (!).
118 *
119 * Should be called every 1000 msec (defined by IP_TMR_INTERVAL).
120 */
121void
122ip_reass_tmr(void)
123{
124 struct ip_reassdata *r, *prev = NULL;
125
126 r = reassdatagrams;
127 while (r != NULL) {
128 /* Decrement the timer. Once it reaches 0,
129 * clean up the incomplete fragment assembly */
130 if (r->timer > 0) {
131 r->timer--;
132 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer dec %"U16_F"\n",(u16_t)r->timer));
133 prev = r;
134 r = r->next;
135 } else {
136 /* reassembly timed out */
137 struct ip_reassdata *tmp;
138 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass_tmr: timer timed out\n"));
139 tmp = r;
140 /* get the next pointer before freeing */
141 r = r->next;
142 /* free the helper struct and all enqueued pbufs */
143 ip_reass_free_complete_datagram(tmp, prev);
144 }
145 }
146}
147
148/**
149 * Free a datagram (struct ip_reassdata) and all its pbufs.
150 * Updates the total count of enqueued pbufs (ip_reass_pbufcount),
151 * SNMP counters and sends an ICMP time exceeded packet.
152 *
153 * @param ipr datagram to free
154 * @param prev the previous datagram in the linked list
155 * @return the number of pbufs freed
156 */
157static int
158ip_reass_free_complete_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
159{
160 u16_t pbufs_freed = 0;
161 u8_t clen;
162 struct pbuf *p;
163 struct ip_reass_helper *iprh;
164
165 LWIP_ASSERT("prev != ipr", prev != ipr);
166 if (prev != NULL) {
167 LWIP_ASSERT("prev->next == ipr", prev->next == ipr);
168 }
169
170 snmp_inc_ipreasmfails();
171#if LWIP_ICMP
172 iprh = (struct ip_reass_helper *)ipr->p->payload;
173 if (iprh->start == 0) {
174 /* The first fragment was received, send ICMP time exceeded. */
175 /* First, de-queue the first pbuf from r->p. */
176 p = ipr->p;
177 ipr->p = iprh->next_pbuf;
178 /* Then, copy the original header into it. */
179 SMEMCPY(p->payload, &ipr->iphdr, IP_HLEN);
180 icmp_time_exceeded(p, ICMP_TE_FRAG);
181 clen = pbuf_clen(p);
182 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
183 pbufs_freed += clen;
184 pbuf_free(p);
185 }
186#endif /* LWIP_ICMP */
187
188 /* First, free all received pbufs. The individual pbufs need to be released
189 separately as they have not yet been chained */
190 p = ipr->p;
191 while (p != NULL) {
192 struct pbuf *pcur;
193 iprh = (struct ip_reass_helper *)p->payload;
194 pcur = p;
195 /* get the next pointer before freeing */
196 p = iprh->next_pbuf;
197 clen = pbuf_clen(pcur);
198 LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
199 pbufs_freed += clen;
200 pbuf_free(pcur);
201 }
202 /* Then, unchain the struct ip_reassdata from the list and free it. */
203 ip_reass_dequeue_datagram(ipr, prev);
204 LWIP_ASSERT("ip_reass_pbufcount >= clen", ip_reass_pbufcount >= pbufs_freed);
205 ip_reass_pbufcount -= pbufs_freed;
206
207 return pbufs_freed;
208}
209
210#if IP_REASS_FREE_OLDEST
211/**
212 * Free the oldest datagram to make room for enqueueing new fragments.
213 * The datagram 'fraghdr' belongs to is not freed!
214 *
215 * @param fraghdr IP header of the current fragment
216 * @param pbufs_needed number of pbufs needed to enqueue
217 * (used for freeing other datagrams if not enough space)
218 * @return the number of pbufs freed
219 */
220static int
221ip_reass_remove_oldest_datagram(struct ip_hdr *fraghdr, int pbufs_needed)
222{
223 /* @todo Can't we simply remove the last datagram in the
224 * linked list behind reassdatagrams?
225 */
226 struct ip_reassdata *r, *oldest, *prev;
227 int pbufs_freed = 0, pbufs_freed_current;
228 int other_datagrams;
229
230 /* Free datagrams until being allowed to enqueue 'pbufs_needed' pbufs,
231 * but don't free the datagram that 'fraghdr' belongs to! */
232 do {
233 oldest = NULL;
234 prev = NULL;
235 other_datagrams = 0;
236 r = reassdatagrams;
237 while (r != NULL) {
238 if (!IP_ADDRESSES_AND_ID_MATCH(&r->iphdr, fraghdr)) {
239 /* Not the same datagram as fraghdr */
240 other_datagrams++;
241 if (oldest == NULL) {
242 oldest = r;
243 } else if (r->timer <= oldest->timer) {
244 /* older than the previous oldest */
245 oldest = r;
246 }
247 }
248 if (r->next != NULL) {
249 prev = r;
250 }
251 r = r->next;
252 }
253 if (oldest != NULL) {
254 pbufs_freed_current = ip_reass_free_complete_datagram(oldest, prev);
255 pbufs_freed += pbufs_freed_current;
256 }
257 } while ((pbufs_freed < pbufs_needed) && (other_datagrams > 1));
258 return pbufs_freed;
259}
260#endif /* IP_REASS_FREE_OLDEST */
261
262/**
263 * Enqueues a new fragment into the fragment queue
264 * @param fraghdr points to the new fragments IP hdr
265 * @param clen number of pbufs needed to enqueue (used for freeing other datagrams if not enough space)
266 * @return A pointer to the queue location into which the fragment was enqueued
267 */
268static struct ip_reassdata*
269ip_reass_enqueue_new_datagram(struct ip_hdr *fraghdr, int clen)
270{
271 struct ip_reassdata* ipr;
272 /* No matching previous fragment found, allocate a new reassdata struct */
273 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
274 if (ipr == NULL) {
275#if IP_REASS_FREE_OLDEST
276 if (ip_reass_remove_oldest_datagram(fraghdr, clen) >= clen) {
277 ipr = (struct ip_reassdata *)memp_malloc(MEMP_REASSDATA);
278 }
279 if (ipr == NULL)
280#endif /* IP_REASS_FREE_OLDEST */
281 {
282 IPFRAG_STATS_INC(ip_frag.memerr);
283 LWIP_DEBUGF(IP_REASS_DEBUG,("Failed to alloc reassdata struct\n"));
284 return NULL;
285 }
286 }
287 memset(ipr, 0, sizeof(struct ip_reassdata));
288 ipr->timer = IP_REASS_MAXAGE;
289
290 /* enqueue the new structure to the front of the list */
291 ipr->next = reassdatagrams;
292 reassdatagrams = ipr;
293 /* copy the ip header for later tests and input */
294 /* @todo: no ip options supported? */
295 SMEMCPY(&(ipr->iphdr), fraghdr, IP_HLEN);
296 return ipr;
297}
298
299/**
300 * Dequeues a datagram from the datagram queue. Doesn't deallocate the pbufs.
301 * @param ipr points to the queue entry to dequeue
302 */
303static void
304ip_reass_dequeue_datagram(struct ip_reassdata *ipr, struct ip_reassdata *prev)
305{
306
307 /* dequeue the reass struct */
308 if (reassdatagrams == ipr) {
309 /* it was the first in the list */
310 reassdatagrams = ipr->next;
311 } else {
312 /* it wasn't the first, so it must have a valid 'prev' */
313 LWIP_ASSERT("sanity check linked list", prev != NULL);
314 prev->next = ipr->next;
315 }
316
317 /* now we can free the ip_reass struct */
318 memp_free(MEMP_REASSDATA, ipr);
319}
320
321/**
322 * Chain a new pbuf into the pbuf list that composes the datagram. The pbuf list
323 * will grow over time as new pbufs are rx.
324 * Also checks that the datagram passes basic continuity checks (if the last
325 * fragment was received at least once).
326 * @param root_p points to the 'root' pbuf for the current datagram being assembled.
327 * @param new_p points to the pbuf for the current fragment
328 * @return 0 if invalid, >0 otherwise
329 */
330static int
331ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p)
332{
333 struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
334 struct pbuf *q;
335 u16_t offset,len;
336 struct ip_hdr *fraghdr;
337 int valid = 1;
338
339 /* Extract length and fragment offset from current fragment */
340 fraghdr = (struct ip_hdr*)new_p->payload;
341 len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
342 offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;
343
344 /* overwrite the fragment's ip header from the pbuf with our helper struct,
345 * and setup the embedded helper structure. */
346 /* make sure the struct ip_reass_helper fits into the IP header */
347 LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
348 sizeof(struct ip_reass_helper) <= IP_HLEN);
349 iprh = (struct ip_reass_helper*)new_p->payload;
350 iprh->next_pbuf = NULL;
351 iprh->start = offset;
352 iprh->end = offset + len;
353
354 /* Iterate through until we either get to the end of the list (append),
355 * or we find on with a larger offset (insert). */
356 for (q = ipr->p; q != NULL;) {
357 iprh_tmp = (struct ip_reass_helper*)q->payload;
358 if (iprh->start < iprh_tmp->start) {
359 /* the new pbuf should be inserted before this */
360 iprh->next_pbuf = q;
361 if (iprh_prev != NULL) {
362 /* not the fragment with the lowest offset */
363#if IP_REASS_CHECK_OVERLAP
364 if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
365 /* fragment overlaps with previous or following, throw away */
366 goto freepbuf;
367 }
368#endif /* IP_REASS_CHECK_OVERLAP */
369 iprh_prev->next_pbuf = new_p;
370 } else {
371 /* fragment with the lowest offset */
372 ipr->p = new_p;
373 }
374 break;
375 } else if(iprh->start == iprh_tmp->start) {
376 /* received the same datagram twice: no need to keep the datagram */
377 goto freepbuf;
378#if IP_REASS_CHECK_OVERLAP
379 } else if(iprh->start < iprh_tmp->end) {
380 /* overlap: no need to keep the new datagram */
381 goto freepbuf;
382#endif /* IP_REASS_CHECK_OVERLAP */
383 } else {
384 /* Check if the fragments received so far have no wholes. */
385 if (iprh_prev != NULL) {
386 if (iprh_prev->end != iprh_tmp->start) {
387 /* There is a fragment missing between the current
388 * and the previous fragment */
389 valid = 0;
390 }
391 }
392 }
393 q = iprh_tmp->next_pbuf;
394 iprh_prev = iprh_tmp;
395 }
396
397 /* If q is NULL, then we made it to the end of the list. Determine what to do now */
398 if (q == NULL) {
399 if (iprh_prev != NULL) {
400 /* this is (for now), the fragment with the highest offset:
401 * chain it to the last fragment */
402#if IP_REASS_CHECK_OVERLAP
403 LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
404#endif /* IP_REASS_CHECK_OVERLAP */
405 iprh_prev->next_pbuf = new_p;
406 if (iprh_prev->end != iprh->start) {
407 valid = 0;
408 }
409 } else {
410#if IP_REASS_CHECK_OVERLAP
411 LWIP_ASSERT("no previous fragment, this must be the first fragment!",
412 ipr->p == NULL);
413#endif /* IP_REASS_CHECK_OVERLAP */
414 /* this is the first fragment we ever received for this ip datagram */
415 ipr->p = new_p;
416 }
417 }
418
419 /* At this point, the validation part begins: */
420 /* If we already received the last fragment */
421 if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) {
422 /* and had no wholes so far */
423 if (valid) {
424 /* then check if the rest of the fragments is here */
425 /* Check if the queue starts with the first datagram */
426 if (((struct ip_reass_helper*)ipr->p->payload)->start != 0) {
427 valid = 0;
428 } else {
429 /* and check that there are no wholes after this datagram */
430 iprh_prev = iprh;
431 q = iprh->next_pbuf;
432 while (q != NULL) {
433 iprh = (struct ip_reass_helper*)q->payload;
434 if (iprh_prev->end != iprh->start) {
435 valid = 0;
436 break;
437 }
438 iprh_prev = iprh;
439 q = iprh->next_pbuf;
440 }
441 /* if still valid, all fragments are received
442 * (because to the MF==0 already arrived */
443 if (valid) {
444 LWIP_ASSERT("sanity check", ipr->p != NULL);
445 LWIP_ASSERT("sanity check",
446 ((struct ip_reass_helper*)ipr->p->payload) != iprh);
447 LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
448 iprh->next_pbuf == NULL);
449 LWIP_ASSERT("validate_datagram:datagram end!=datagram len",
450 iprh->end == ipr->datagram_len);
451 }
452 }
453 }
454 /* If valid is 0 here, there are some fragments missing in the middle
455 * (since MF == 0 has already arrived). Such datagrams simply time out if
456 * no more fragments are received... */
457 return valid;
458 }
459 /* If we come here, not all fragments were received, yet! */
460 return 0; /* not yet valid! */
461#if IP_REASS_CHECK_OVERLAP
462freepbuf:
463 ip_reass_pbufcount -= pbuf_clen(new_p);
464 pbuf_free(new_p);
465 return 0;
466#endif /* IP_REASS_CHECK_OVERLAP */
467}
468
469/**
470 * Reassembles incoming IP fragments into an IP datagram.
471 *
472 * @param p points to a pbuf chain of the fragment
473 * @return NULL if reassembly is incomplete, ? otherwise
474 */
475struct pbuf *
476ip_reass(struct pbuf *p)
477{
478 struct pbuf *r;
479 struct ip_hdr *fraghdr;
480 struct ip_reassdata *ipr;
481 struct ip_reass_helper *iprh;
482 u16_t offset, len;
483 u8_t clen;
484 struct ip_reassdata *ipr_prev = NULL;
485
486 IPFRAG_STATS_INC(ip_frag.recv);
487 snmp_inc_ipreasmreqds();
488
489 fraghdr = (struct ip_hdr*)p->payload;
490
491 if ((IPH_HL(fraghdr) * 4) != IP_HLEN) {
492 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass: IP options currently not supported!\n"));
493 IPFRAG_STATS_INC(ip_frag.err);
494 goto nullreturn;
495 }
496
497 offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;
498 len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
499
500 /* Check if we are allowed to enqueue more datagrams. */
501 clen = pbuf_clen(p);
502 if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
503#if IP_REASS_FREE_OLDEST
504 if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
505 ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
506#endif /* IP_REASS_FREE_OLDEST */
507 {
508 /* No datagram could be freed and still too many pbufs enqueued */
509 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
510 ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
511 IPFRAG_STATS_INC(ip_frag.memerr);
512 /* @todo: send ICMP time exceeded here? */
513 /* drop this pbuf */
514 goto nullreturn;
515 }
516 }
517
518 /* Look for the datagram the fragment belongs to in the current datagram queue,
519 * remembering the previous in the queue for later dequeueing. */
520 for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
521 /* Check if the incoming fragment matches the one currently present
522 in the reassembly buffer. If so, we proceed with copying the
523 fragment into the buffer. */
524 if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
525 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_reass: matching previous fragment ID=%"X16_F"\n",
526 ntohs(IPH_ID(fraghdr))));
527 IPFRAG_STATS_INC(ip_frag.cachehit);
528 break;
529 }
530 ipr_prev = ipr;
531 }
532
533 if (ipr == NULL) {
534 /* Enqueue a new datagram into the datagram queue */
535 ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
536 /* Bail if unable to enqueue */
537 if(ipr == NULL) {
538 goto nullreturn;
539 }
540 } else {
541 if (((ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) &&
542 ((ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
543 /* ipr->iphdr is not the header from the first fragment, but fraghdr is
544 * -> copy fraghdr into ipr->iphdr since we want to have the header
545 * of the first fragment (for ICMP time exceeded and later, for copying
546 * all options, if supported)*/
547 SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
548 }
549 }
550 /* Track the current number of pbufs current 'in-flight', in order to limit
551 the number of fragments that may be enqueued at any one time */
552 ip_reass_pbufcount += clen;
553
554 /* At this point, we have either created a new entry or pointing
555 * to an existing one */
556
557 /* check for 'no more fragments', and update queue entry*/
558 if ((IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0) {
559 ipr->flags |= IP_REASS_FLAG_LASTFRAG;
560 ipr->datagram_len = offset + len;
561 LWIP_DEBUGF(IP_REASS_DEBUG,
562 ("ip_reass: last fragment seen, total len %"S16_F"\n",
563 ipr->datagram_len));
564 }
565 /* find the right place to insert this pbuf */
566 /* @todo: trim pbufs if fragments are overlapping */
567 if (ip_reass_chain_frag_into_datagram_and_validate(ipr, p)) {
568 /* the totally last fragment (flag more fragments = 0) was received at least
569 * once AND all fragments are received */
570 ipr->datagram_len += IP_HLEN;
571
572 /* save the second pbuf before copying the header over the pointer */
573 r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf;
574
575 /* copy the original ip header back to the first pbuf */
576 fraghdr = (struct ip_hdr*)(ipr->p->payload);
577 SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
578 IPH_LEN_SET(fraghdr, htons(ipr->datagram_len));
579 IPH_OFFSET_SET(fraghdr, 0);
580 IPH_CHKSUM_SET(fraghdr, 0);
581 /* @todo: do we need to set calculate the correct checksum? */
582 IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
583
584 p = ipr->p;
585
586 /* chain together the pbufs contained within the reass_data list. */
587 while(r != NULL) {
588 iprh = (struct ip_reass_helper*)r->payload;
589
590 /* hide the ip header for every succeding fragment */
591 pbuf_header(r, -IP_HLEN);
592 pbuf_cat(p, r);
593 r = iprh->next_pbuf;
594 }
595 /* release the sources allocate for the fragment queue entry */
596 ip_reass_dequeue_datagram(ipr, ipr_prev);
597
598 /* and adjust the number of pbufs currently queued for reassembly. */
599 ip_reass_pbufcount -= pbuf_clen(p);
600
601 /* Return the pbuf chain */
602 return p;
603 }
604 /* the datagram is not (yet?) reassembled completely */
605 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass_pbufcount: %d out\n", ip_reass_pbufcount));
606 return NULL;
607
608nullreturn:
609 LWIP_DEBUGF(IP_REASS_DEBUG,("ip_reass: nullreturn\n"));
610 IPFRAG_STATS_INC(ip_frag.drop);
611 pbuf_free(p);
612 return NULL;
613}
614#endif /* IP_REASSEMBLY */
615
616#if IP_FRAG
617#if IP_FRAG_USES_STATIC_BUF
618static u8_t buf[LWIP_MEM_ALIGN_SIZE(IP_FRAG_MAX_MTU + MEM_ALIGNMENT - 1)];
619#else /* IP_FRAG_USES_STATIC_BUF */
620
621#if !LWIP_NETIF_TX_SINGLE_PBUF
622/** Allocate a new struct pbuf_custom_ref */
623static struct pbuf_custom_ref*
624ip_frag_alloc_pbuf_custom_ref(void)
625{
626 return (struct pbuf_custom_ref*)memp_malloc(MEMP_FRAG_PBUF);
627}
628
629/** Free a struct pbuf_custom_ref */
630static void
631ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
632{
633 LWIP_ASSERT("p != NULL", p != NULL);
634 memp_free(MEMP_FRAG_PBUF, p);
635}
636
637/** Free-callback function to free a 'struct pbuf_custom_ref', called by
638 * pbuf_free. */
639static void
640ipfrag_free_pbuf_custom(struct pbuf *p)
641{
642 struct pbuf_custom_ref *pcr = (struct pbuf_custom_ref*)p;
643 LWIP_ASSERT("pcr != NULL", pcr != NULL);
644 LWIP_ASSERT("pcr == p", (void*)pcr == (void*)p);
645 if (pcr->original != NULL) {
646 pbuf_free(pcr->original);
647 }
648 ip_frag_free_pbuf_custom_ref(pcr);
649}
650#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
651#endif /* IP_FRAG_USES_STATIC_BUF */
652
653/**
654 * Fragment an IP datagram if too large for the netif.
655 *
656 * Chop the datagram in MTU sized chunks and send them in order
657 * by using a fixed size static memory buffer (PBUF_REF) or
658 * point PBUF_REFs into p (depending on IP_FRAG_USES_STATIC_BUF).
659 *
660 * @param p ip packet to send
661 * @param netif the netif on which to send
662 * @param dest destination ip address to which to send
663 *
664 * @return ERR_OK if sent successfully, err_t otherwise
665 */
666err_t
667ip_frag(struct pbuf *p, struct netif *netif, ip_addr_t *dest)
668{
669 struct pbuf *rambuf;
670#if IP_FRAG_USES_STATIC_BUF
671 struct pbuf *header;
672#else
673#if !LWIP_NETIF_TX_SINGLE_PBUF
674 struct pbuf *newpbuf;
675#endif
676 struct ip_hdr *original_iphdr;
677#endif
678 struct ip_hdr *iphdr;
679 u16_t nfb;
680 u16_t left, cop;
681 u16_t mtu = netif->mtu;
682 u16_t ofo, omf;
683 u16_t last;
684 u16_t poff = IP_HLEN;
685 u16_t tmp;
686#if !IP_FRAG_USES_STATIC_BUF && !LWIP_NETIF_TX_SINGLE_PBUF
687 u16_t newpbuflen = 0;
688 u16_t left_to_copy;
689#endif
690
691 /* Get a RAM based MTU sized pbuf */
692#if IP_FRAG_USES_STATIC_BUF
693 /* When using a static buffer, we use a PBUF_REF, which we will
694 * use to reference the packet (without link header).
695 * Layer and length is irrelevant.
696 */
697 rambuf = pbuf_alloc(PBUF_LINK, 0, PBUF_REF);
698 if (rambuf == NULL) {
699 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc(PBUF_LINK, 0, PBUF_REF) failed\n"));
700 return ERR_MEM;
701 }
702 rambuf->tot_len = rambuf->len = mtu;
703 rambuf->payload = LWIP_MEM_ALIGN((void *)buf);
704
705 /* Copy the IP header in it */
706 iphdr = (struct ip_hdr *)rambuf->payload;
707 SMEMCPY(iphdr, p->payload, IP_HLEN);
708#else /* IP_FRAG_USES_STATIC_BUF */
709 original_iphdr = (struct ip_hdr *)p->payload;
710 iphdr = original_iphdr;
711#endif /* IP_FRAG_USES_STATIC_BUF */
712
713 /* Save original offset */
714 tmp = ntohs(IPH_OFFSET(iphdr));
715 ofo = tmp & IP_OFFMASK;
716 omf = tmp & IP_MF;
717
718 left = p->tot_len - IP_HLEN;
719
720 nfb = (mtu - IP_HLEN) / 8;
721
722 while (left) {
723 last = (left <= mtu - IP_HLEN);
724
725 /* Set new offset and MF flag */
726 tmp = omf | (IP_OFFMASK & (ofo));
727 if (!last) {
728 tmp = tmp | IP_MF;
729 }
730
731 /* Fill this fragment */
732 cop = last ? left : nfb * 8;
733
734#if IP_FRAG_USES_STATIC_BUF
735 poff += pbuf_copy_partial(p, (u8_t*)iphdr + IP_HLEN, cop, poff);
736#else /* IP_FRAG_USES_STATIC_BUF */
737#if LWIP_NETIF_TX_SINGLE_PBUF
738 rambuf = pbuf_alloc(PBUF_IP, cop, PBUF_RAM);
739 if (rambuf == NULL) {
740 return ERR_MEM;
741 }
742 LWIP_ASSERT("this needs a pbuf in one piece!",
743 (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
744 poff += pbuf_copy_partial(p, rambuf->payload, cop, poff);
745 /* make room for the IP header */
746 if(pbuf_header(rambuf, IP_HLEN)) {
747 pbuf_free(rambuf);
748 return ERR_MEM;
749 }
750 /* fill in the IP header */
751 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
752 iphdr = rambuf->payload;
753#else /* LWIP_NETIF_TX_SINGLE_PBUF */
754 /* When not using a static buffer, create a chain of pbufs.
755 * The first will be a PBUF_RAM holding the link and IP header.
756 * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
757 * but limited to the size of an mtu.
758 */
759 rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
760 if (rambuf == NULL) {
761 return ERR_MEM;
762 }
763 LWIP_ASSERT("this needs a pbuf in one piece!",
764 (p->len >= (IP_HLEN)));
765 SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
766 iphdr = (struct ip_hdr *)rambuf->payload;
767
768 /* Can just adjust p directly for needed offset. */
769 p->payload = (u8_t *)p->payload + poff;
770 p->len -= poff;
771
772 left_to_copy = cop;
773 while (left_to_copy) {
774 struct pbuf_custom_ref *pcr;
775 newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
776 /* Is this pbuf already empty? */
777 if (!newpbuflen) {
778 p = p->next;
779 continue;
780 }
781 pcr = ip_frag_alloc_pbuf_custom_ref();
782 if (pcr == NULL) {
783 pbuf_free(rambuf);
784 return ERR_MEM;
785 }
786 /* Mirror this pbuf, although we might not need all of it. */
787 newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
788 if (newpbuf == NULL) {
789 ip_frag_free_pbuf_custom_ref(pcr);
790 pbuf_free(rambuf);
791 return ERR_MEM;
792 }
793 pbuf_ref(p);
794 pcr->original = p;
795 pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;
796
797 /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
798 * so that it is removed when pbuf_dechain is later called on rambuf.
799 */
800 pbuf_cat(rambuf, newpbuf);
801 left_to_copy -= newpbuflen;
802 if (left_to_copy) {
803 p = p->next;
804 }
805 }
806 poff = newpbuflen;
807#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
808#endif /* IP_FRAG_USES_STATIC_BUF */
809
810 /* Correct header */
811 IPH_OFFSET_SET(iphdr, htons(tmp));
812 IPH_LEN_SET(iphdr, htons(cop + IP_HLEN));
813 IPH_CHKSUM_SET(iphdr, 0);
814 IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));
815
816#if IP_FRAG_USES_STATIC_BUF
817 if (last) {
818 pbuf_realloc(rambuf, left + IP_HLEN);
819 }
820
821 /* This part is ugly: we alloc a RAM based pbuf for
822 * the link level header for each chunk and then
823 * free it.A PBUF_ROM style pbuf for which pbuf_header
824 * worked would make things simpler.
825 */
826 header = pbuf_alloc(PBUF_LINK, 0, PBUF_RAM);
827 if (header != NULL) {
828 pbuf_chain(header, rambuf);
829 netif->output(netif, header, dest);
830 IPFRAG_STATS_INC(ip_frag.xmit);
831 snmp_inc_ipfragcreates();
832 pbuf_free(header);
833 } else {
834 LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc() for header failed\n"));
835 pbuf_free(rambuf);
836 return ERR_MEM;
837 }
838#else /* IP_FRAG_USES_STATIC_BUF */
839 /* No need for separate header pbuf - we allowed room for it in rambuf
840 * when allocated.
841 */
842 netif->output(netif, rambuf, dest);
843 IPFRAG_STATS_INC(ip_frag.xmit);
844
845 /* Unfortunately we can't reuse rambuf - the hardware may still be
846 * using the buffer. Instead we free it (and the ensuing chain) and
847 * recreate it next time round the loop. If we're lucky the hardware
848 * will have already sent the packet, the free will really free, and
849 * there will be zero memory penalty.
850 */
851
852 pbuf_free(rambuf);
853#endif /* IP_FRAG_USES_STATIC_BUF */
854 left -= cop;
855 ofo += nfb;
856 }
857#if IP_FRAG_USES_STATIC_BUF
858 pbuf_free(rambuf);
859#endif /* IP_FRAG_USES_STATIC_BUF */
860 snmp_inc_ipfragoks();
861 return ERR_OK;
862}
863#endif /* IP_FRAG */
Note: See TracBrowser for help on using the repository browser.