source: azure_iot_hub_f767zi/trunk/asp_baseplatform/lwip/lwip-2.1.2/src/core/tcp_out.c@ 457

Last change on this file since 457 was 457, checked in by coas-nagasima, 4 years ago

ファイルを追加

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 74.9 KB
Line 
1/**
2 * @file
3 * Transmission Control Protocol, outgoing traffic
4 *
5 * The output functions of TCP.
6 *
7 * There are two distinct ways for TCP segments to get sent:
8 * - queued data: these are segments transferring data or segments containing
9 * SYN or FIN (which both count as one sequence number). They are created as
10 * struct @ref pbuf together with a struct tcp_seg and enqueue to the
11 * unsent list of the pcb. They are sent by tcp_output:
12 * - @ref tcp_write : creates data segments
13 * - @ref tcp_split_unsent_seg : splits a data segment
14 * - @ref tcp_enqueue_flags : creates SYN-only or FIN-only segments
15 * - @ref tcp_output / tcp_output_segment : finalize the tcp header
16 * (e.g. sequence numbers, options, checksum) and output to IP
17 * - the various tcp_rexmit functions shuffle around segments between the
18 * unsent an unacked lists to retransmit them
19 * - tcp_create_segment and tcp_pbuf_prealloc allocate pbuf and
20 * segment for these functions
21 * - direct send: these segments don't contain data but control the connection
22 * behaviour. They are created as pbuf only and sent directly without
23 * enqueueing them:
24 * - @ref tcp_send_empty_ack sends an ACK-only segment
25 * - @ref tcp_rst sends a RST segment
26 * - @ref tcp_keepalive sends a keepalive segment
27 * - @ref tcp_zero_window_probe sends a window probe segment
28 * - tcp_output_alloc_header allocates a header-only pbuf for these functions
29 */
30
31/*
32 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without modification,
36 * are permitted provided that the following conditions are met:
37 *
38 * 1. Redistributions of source code must retain the above copyright notice,
39 * this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright notice,
41 * this list of conditions and the following disclaimer in the documentation
42 * and/or other materials provided with the distribution.
43 * 3. The name of the author may not be used to endorse or promote products
44 * derived from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
47 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
48 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
49 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
50 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
51 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
52 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
53 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
54 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
55 * OF SUCH DAMAGE.
56 *
57 * This file is part of the lwIP TCP/IP stack.
58 *
59 * Author: Adam Dunkels <adam@sics.se>
60 *
61 */
62
63#include "lwip/opt.h"
64
65#if LWIP_TCP /* don't build if not configured for use in lwipopts.h */
66
67#include "lwip/priv/tcp_priv.h"
68#include "lwip/def.h"
69#include "lwip/mem.h"
70#include "lwip/memp.h"
71#include "lwip/ip_addr.h"
72#include "lwip/netif.h"
73#include "lwip/inet_chksum.h"
74#include "lwip/stats.h"
75#include "lwip/ip6.h"
76#include "lwip/ip6_addr.h"
77#if LWIP_TCP_TIMESTAMPS
78#include "lwip/sys.h"
79#endif
80
81#include <string.h>
82
83#ifdef LWIP_HOOK_FILENAME
84#include LWIP_HOOK_FILENAME
85#endif
86
87/* Allow to add custom TCP header options by defining this hook */
88#ifdef LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH
89#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_HOOK_TCP_OUT_TCPOPT_LENGTH(pcb, LWIP_TCP_OPT_LENGTH(flags))
90#else
91#define LWIP_TCP_OPT_LENGTH_SEGMENT(flags, pcb) LWIP_TCP_OPT_LENGTH(flags)
92#endif
93
94/* Define some copy-macros for checksum-on-copy so that the code looks
95 nicer by preventing too many ifdef's. */
96#if TCP_CHECKSUM_ON_COPY
97#define TCP_DATA_COPY(dst, src, len, seg) do { \
98 tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), \
99 len, &seg->chksum, &seg->chksum_swapped); \
100 seg->flags |= TF_SEG_DATA_CHECKSUMMED; } while(0)
101#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) \
102 tcp_seg_add_chksum(LWIP_CHKSUM_COPY(dst, src, len), len, chksum, chksum_swapped);
103#else /* TCP_CHECKSUM_ON_COPY*/
104#define TCP_DATA_COPY(dst, src, len, seg) MEMCPY(dst, src, len)
105#define TCP_DATA_COPY2(dst, src, len, chksum, chksum_swapped) MEMCPY(dst, src, len)
106#endif /* TCP_CHECKSUM_ON_COPY*/
107
108/** Define this to 1 for an extra check that the output checksum is valid
109 * (usefule when the checksum is generated by the application, not the stack) */
110#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK
111#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK 0
112#endif
113/* Allow to override the failure of sanity check from warning to e.g. hard failure */
114#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
115#ifndef TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL
116#define TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(msg) LWIP_DEBUGF(TCP_DEBUG | LWIP_DBG_LEVEL_WARNING, msg)
117#endif
118#endif
119
120#if TCP_OVERSIZE
121/** The size of segment pbufs created when TCP_OVERSIZE is enabled */
122#ifndef TCP_OVERSIZE_CALC_LENGTH
123#define TCP_OVERSIZE_CALC_LENGTH(length) ((length) + TCP_OVERSIZE)
124#endif
125#endif
126
127/* Forward declarations.*/
128static err_t tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif);
129
130/* tcp_route: common code that returns a fixed bound netif or calls ip_route */
131static struct netif *
132tcp_route(const struct tcp_pcb *pcb, const ip_addr_t *src, const ip_addr_t *dst)
133{
134 LWIP_UNUSED_ARG(src); /* in case IPv4-only and source-based routing is disabled */
135
136 if ((pcb != NULL) && (pcb->netif_idx != NETIF_NO_INDEX)) {
137 return netif_get_by_index(pcb->netif_idx);
138 } else {
139 return ip_route(src, dst);
140 }
141}
142
143/**
144 * Create a TCP segment with prefilled header.
145 *
146 * Called by @ref tcp_write, @ref tcp_enqueue_flags and @ref tcp_split_unsent_seg
147 *
148 * @param pcb Protocol control block for the TCP connection.
149 * @param p pbuf that is used to hold the TCP header.
150 * @param hdrflags TCP flags for header.
151 * @param seqno TCP sequence number of this packet
152 * @param optflags options to include in TCP header
153 * @return a new tcp_seg pointing to p, or NULL.
154 * The TCP header is filled in except ackno and wnd.
155 * p is freed on failure.
156 */
157static struct tcp_seg *
158tcp_create_segment(const struct tcp_pcb *pcb, struct pbuf *p, u8_t hdrflags, u32_t seqno, u8_t optflags)
159{
160 struct tcp_seg *seg;
161 u8_t optlen;
162
163 LWIP_ASSERT("tcp_create_segment: invalid pcb", pcb != NULL);
164 LWIP_ASSERT("tcp_create_segment: invalid pbuf", p != NULL);
165
166 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
167
168 if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
169 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no memory.\n"));
170 pbuf_free(p);
171 return NULL;
172 }
173 seg->flags = optflags;
174 seg->next = NULL;
175 seg->p = p;
176 LWIP_ASSERT("p->tot_len >= optlen", p->tot_len >= optlen);
177 seg->len = p->tot_len - optlen;
178#if TCP_OVERSIZE_DBGCHECK
179 seg->oversize_left = 0;
180#endif /* TCP_OVERSIZE_DBGCHECK */
181#if TCP_CHECKSUM_ON_COPY
182 seg->chksum = 0;
183 seg->chksum_swapped = 0;
184 /* check optflags */
185 LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
186 (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
187#endif /* TCP_CHECKSUM_ON_COPY */
188
189 /* build TCP header */
190 if (pbuf_add_header(p, TCP_HLEN)) {
191 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
192 TCP_STATS_INC(tcp.err);
193 tcp_seg_free(seg);
194 return NULL;
195 }
196 seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
197 seg->tcphdr->src = lwip_htons(pcb->local_port);
198 seg->tcphdr->dest = lwip_htons(pcb->remote_port);
199 seg->tcphdr->seqno = lwip_htonl(seqno);
200 /* ackno is set in tcp_output */
201 TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), hdrflags);
202 /* wnd and chksum are set in tcp_output */
203 seg->tcphdr->urgp = 0;
204 return seg;
205}
206
207/**
208 * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end.
209 *
210 * This function is like pbuf_alloc(layer, length, PBUF_RAM) except
211 * there may be extra bytes available at the end.
212 *
213 * Called by @ref tcp_write
214 *
215 * @param layer flag to define header size.
216 * @param length size of the pbuf's payload.
217 * @param max_length maximum usable size of payload+oversize.
218 * @param oversize pointer to a u16_t that will receive the number of usable tail bytes.
219 * @param pcb The TCP connection that will enqueue the pbuf.
220 * @param apiflags API flags given to tcp_write.
221 * @param first_seg true when this pbuf will be used in the first enqueued segment.
222 */
223#if TCP_OVERSIZE
224static struct pbuf *
225tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
226 u16_t *oversize, const struct tcp_pcb *pcb, u8_t apiflags,
227 u8_t first_seg)
228{
229 struct pbuf *p;
230 u16_t alloc = length;
231
232 LWIP_ASSERT("tcp_pbuf_prealloc: invalid oversize", oversize != NULL);
233 LWIP_ASSERT("tcp_pbuf_prealloc: invalid pcb", pcb != NULL);
234
235#if LWIP_NETIF_TX_SINGLE_PBUF
236 LWIP_UNUSED_ARG(max_length);
237 LWIP_UNUSED_ARG(pcb);
238 LWIP_UNUSED_ARG(apiflags);
239 LWIP_UNUSED_ARG(first_seg);
240 alloc = max_length;
241#else /* LWIP_NETIF_TX_SINGLE_PBUF */
242 if (length < max_length) {
243 /* Should we allocate an oversized pbuf, or just the minimum
244 * length required? If tcp_write is going to be called again
245 * before this segment is transmitted, we want the oversized
246 * buffer. If the segment will be transmitted immediately, we can
247 * save memory by allocating only length. We use a simple
248 * heuristic based on the following information:
249 *
250 * Did the user set TCP_WRITE_FLAG_MORE?
251 *
252 * Will the Nagle algorithm defer transmission of this segment?
253 */
254 if ((apiflags & TCP_WRITE_FLAG_MORE) ||
255 (!(pcb->flags & TF_NODELAY) &&
256 (!first_seg ||
257 pcb->unsent != NULL ||
258 pcb->unacked != NULL))) {
259 alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(TCP_OVERSIZE_CALC_LENGTH(length)));
260 }
261 }
262#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
263 p = pbuf_alloc(layer, alloc, PBUF_RAM);
264 if (p == NULL) {
265 return NULL;
266 }
267 LWIP_ASSERT("need unchained pbuf", p->next == NULL);
268 *oversize = p->len - length;
269 /* trim p->len to the currently used size */
270 p->len = p->tot_len = length;
271 return p;
272}
273#else /* TCP_OVERSIZE */
274#define tcp_pbuf_prealloc(layer, length, mx, os, pcb, api, fst) pbuf_alloc((layer), (length), PBUF_RAM)
275#endif /* TCP_OVERSIZE */
276
277#if TCP_CHECKSUM_ON_COPY
278/** Add a checksum of newly added data to the segment.
279 *
280 * Called by tcp_write and tcp_split_unsent_seg.
281 */
282static void
283tcp_seg_add_chksum(u16_t chksum, u16_t len, u16_t *seg_chksum,
284 u8_t *seg_chksum_swapped)
285{
286 u32_t helper;
287 /* add chksum to old chksum and fold to u16_t */
288 helper = chksum + *seg_chksum;
289 chksum = FOLD_U32T(helper);
290 if ((len & 1) != 0) {
291 *seg_chksum_swapped = 1 - *seg_chksum_swapped;
292 chksum = SWAP_BYTES_IN_WORD(chksum);
293 }
294 *seg_chksum = chksum;
295}
296#endif /* TCP_CHECKSUM_ON_COPY */
297
298/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen).
299 *
300 * @param pcb the tcp pcb to check for
301 * @param len length of data to send (checked agains snd_buf)
302 * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise
303 */
304static err_t
305tcp_write_checks(struct tcp_pcb *pcb, u16_t len)
306{
307 LWIP_ASSERT("tcp_write_checks: invalid pcb", pcb != NULL);
308
309 /* connection is in invalid state for data transmission? */
310 if ((pcb->state != ESTABLISHED) &&
311 (pcb->state != CLOSE_WAIT) &&
312 (pcb->state != SYN_SENT) &&
313 (pcb->state != SYN_RCVD)) {
314 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
315 return ERR_CONN;
316 } else if (len == 0) {
317 return ERR_OK;
318 }
319
320 /* fail on too much data */
321 if (len > pcb->snd_buf) {
322 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too much data (len=%"U16_F" > snd_buf=%"TCPWNDSIZE_F")\n",
323 len, pcb->snd_buf));
324 tcp_set_flags(pcb, TF_NAGLEMEMERR);
325 return ERR_MEM;
326 }
327
328 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"TCPWNDSIZE_F"\n", (tcpwnd_size_t)pcb->snd_queuelen));
329
330 /* If total number of pbufs on the unsent/unacked queues exceeds the
331 * configured maximum, return an error */
332 /* check for configured max queuelen and possible overflow */
333 if (pcb->snd_queuelen >= LWIP_MIN(TCP_SND_QUEUELEN, (TCP_SNDQUEUELEN_OVERFLOW + 1))) {
334 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("tcp_write: too long queue %"U16_F" (max %"U16_F")\n",
335 pcb->snd_queuelen, (u16_t)TCP_SND_QUEUELEN));
336 TCP_STATS_INC(tcp.memerr);
337 tcp_set_flags(pcb, TF_NAGLEMEMERR);
338 return ERR_MEM;
339 }
340 if (pcb->snd_queuelen != 0) {
341 LWIP_ASSERT("tcp_write: pbufs on queue => at least one queue non-empty",
342 pcb->unacked != NULL || pcb->unsent != NULL);
343 } else {
344 LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
345 pcb->unacked == NULL && pcb->unsent == NULL);
346 }
347 return ERR_OK;
348}
349
350/**
351 * @ingroup tcp_raw
352 * Write data for sending (but does not send it immediately).
353 *
354 * It waits in the expectation of more data being sent soon (as
355 * it can send them more efficiently by combining them together).
356 * To prompt the system to send data now, call tcp_output() after
357 * calling tcp_write().
358 *
359 * This function enqueues the data pointed to by the argument dataptr. The length of
360 * the data is passed as the len parameter. The apiflags can be one or more of:
361 * - TCP_WRITE_FLAG_COPY: indicates whether the new memory should be allocated
362 * for the data to be copied into. If this flag is not given, no new memory
363 * should be allocated and the data should only be referenced by pointer. This
364 * also means that the memory behind dataptr must not change until the data is
365 * ACKed by the remote host
366 * - TCP_WRITE_FLAG_MORE: indicates that more data follows. If this is omitted,
367 * the PSH flag is set in the last segment created by this call to tcp_write.
368 * If this flag is given, the PSH flag is not set.
369 *
370 * The tcp_write() function will fail and return ERR_MEM if the length
371 * of the data exceeds the current send buffer size or if the length of
372 * the queue of outgoing segment is larger than the upper limit defined
373 * in lwipopts.h. The number of bytes available in the output queue can
374 * be retrieved with the tcp_sndbuf() function.
375 *
376 * The proper way to use this function is to call the function with at
377 * most tcp_sndbuf() bytes of data. If the function returns ERR_MEM,
378 * the application should wait until some of the currently enqueued
379 * data has been successfully received by the other host and try again.
380 *
381 * @param pcb Protocol control block for the TCP connection to enqueue data for.
382 * @param arg Pointer to the data to be enqueued for sending.
383 * @param len Data length in bytes
384 * @param apiflags combination of following flags :
385 * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack
386 * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will not be set on last segment sent,
387 * @return ERR_OK if enqueued, another err_t on error
388 */
389err_t
390tcp_write(struct tcp_pcb *pcb, const void *arg, u16_t len, u8_t apiflags)
391{
392 struct pbuf *concat_p = NULL;
393 struct tcp_seg *last_unsent = NULL, *seg = NULL, *prev_seg = NULL, *queue = NULL;
394 u16_t pos = 0; /* position in 'arg' data */
395 u16_t queuelen;
396 u8_t optlen;
397 u8_t optflags = 0;
398#if TCP_OVERSIZE
399 u16_t oversize = 0;
400 u16_t oversize_used = 0;
401#if TCP_OVERSIZE_DBGCHECK
402 u16_t oversize_add = 0;
403#endif /* TCP_OVERSIZE_DBGCHECK*/
404#endif /* TCP_OVERSIZE */
405 u16_t extendlen = 0;
406#if TCP_CHECKSUM_ON_COPY
407 u16_t concat_chksum = 0;
408 u8_t concat_chksum_swapped = 0;
409 u16_t concat_chksummed = 0;
410#endif /* TCP_CHECKSUM_ON_COPY */
411 err_t err;
412 u16_t mss_local;
413
414 LWIP_ERROR("tcp_write: invalid pcb", pcb != NULL, return ERR_ARG);
415
416 /* don't allocate segments bigger than half the maximum window we ever received */
417 mss_local = LWIP_MIN(pcb->mss, TCPWND_MIN16(pcb->snd_wnd_max / 2));
418 mss_local = mss_local ? mss_local : pcb->mss;
419
420 LWIP_ASSERT_CORE_LOCKED();
421
422#if LWIP_NETIF_TX_SINGLE_PBUF
423 /* Always copy to try to create single pbufs for TX */
424 apiflags |= TCP_WRITE_FLAG_COPY;
425#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
426
427 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
428 (void *)pcb, arg, len, (u16_t)apiflags));
429 LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)",
430 arg != NULL, return ERR_ARG;);
431
432 err = tcp_write_checks(pcb, len);
433 if (err != ERR_OK) {
434 return err;
435 }
436 queuelen = pcb->snd_queuelen;
437
438#if LWIP_TCP_TIMESTAMPS
439 if ((pcb->flags & TF_TIMESTAMP)) {
440 /* Make sure the timestamp option is only included in data segments if we
441 agreed about it with the remote host. */
442 optflags = TF_SEG_OPTS_TS;
443 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(TF_SEG_OPTS_TS, pcb);
444 /* ensure that segments can hold at least one data byte... */
445 mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1);
446 } else
447#endif /* LWIP_TCP_TIMESTAMPS */
448 {
449 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
450 }
451
452
453 /*
454 * TCP segmentation is done in three phases with increasing complexity:
455 *
456 * 1. Copy data directly into an oversized pbuf.
457 * 2. Chain a new pbuf to the end of pcb->unsent.
458 * 3. Create new segments.
459 *
460 * We may run out of memory at any point. In that case we must
461 * return ERR_MEM and not change anything in pcb. Therefore, all
462 * changes are recorded in local variables and committed at the end
463 * of the function. Some pcb fields are maintained in local copies:
464 *
465 * queuelen = pcb->snd_queuelen
466 * oversize = pcb->unsent_oversize
467 *
468 * These variables are set consistently by the phases:
469 *
470 * seg points to the last segment tampered with.
471 *
472 * pos records progress as data is segmented.
473 */
474
475 /* Find the tail of the unsent queue. */
476 if (pcb->unsent != NULL) {
477 u16_t space;
478 u16_t unsent_optlen;
479
480 /* @todo: this could be sped up by keeping last_unsent in the pcb */
481 for (last_unsent = pcb->unsent; last_unsent->next != NULL;
482 last_unsent = last_unsent->next);
483
484 /* Usable space at the end of the last unsent segment */
485 unsent_optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(last_unsent->flags, pcb);
486 LWIP_ASSERT("mss_local is too small", mss_local >= last_unsent->len + unsent_optlen);
487 space = mss_local - (last_unsent->len + unsent_optlen);
488
489 /*
490 * Phase 1: Copy data directly into an oversized pbuf.
491 *
492 * The number of bytes copied is recorded in the oversize_used
493 * variable. The actual copying is done at the bottom of the
494 * function.
495 */
496#if TCP_OVERSIZE
497#if TCP_OVERSIZE_DBGCHECK
498 /* check that pcb->unsent_oversize matches last_unsent->oversize_left */
499 LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
500 pcb->unsent_oversize == last_unsent->oversize_left);
501#endif /* TCP_OVERSIZE_DBGCHECK */
502 oversize = pcb->unsent_oversize;
503 if (oversize > 0) {
504 LWIP_ASSERT("inconsistent oversize vs. space", oversize <= space);
505 seg = last_unsent;
506 oversize_used = LWIP_MIN(space, LWIP_MIN(oversize, len));
507 pos += oversize_used;
508 oversize -= oversize_used;
509 space -= oversize_used;
510 }
511 /* now we are either finished or oversize is zero */
512 LWIP_ASSERT("inconsistent oversize vs. len", (oversize == 0) || (pos == len));
513#endif /* TCP_OVERSIZE */
514
515#if !LWIP_NETIF_TX_SINGLE_PBUF
516 /*
517 * Phase 2: Chain a new pbuf to the end of pcb->unsent.
518 *
519 * As an exception when NOT copying the data, if the given data buffer
520 * directly follows the last unsent data buffer in memory, extend the last
521 * ROM pbuf reference to the buffer, thus saving a ROM pbuf allocation.
522 *
523 * We don't extend segments containing SYN/FIN flags or options
524 * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
525 * the end.
526 *
527 * This phase is skipped for LWIP_NETIF_TX_SINGLE_PBUF as we could only execute
528 * it after rexmit puts a segment from unacked to unsent and at this point,
529 * oversize info is lost.
530 */
531 if ((pos < len) && (space > 0) && (last_unsent->len > 0)) {
532 u16_t seglen = LWIP_MIN(space, len - pos);
533 seg = last_unsent;
534
535 /* Create a pbuf with a copy or reference to seglen bytes. We
536 * can use PBUF_RAW here since the data appears in the middle of
537 * a segment. A header will never be prepended. */
538 if (apiflags & TCP_WRITE_FLAG_COPY) {
539 /* Data is copied */
540 if ((concat_p = tcp_pbuf_prealloc(PBUF_RAW, seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
541 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
542 ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
543 seglen));
544 goto memerr;
545 }
546#if TCP_OVERSIZE_DBGCHECK
547 oversize_add = oversize;
548#endif /* TCP_OVERSIZE_DBGCHECK */
549 TCP_DATA_COPY2(concat_p->payload, (const u8_t *)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
550#if TCP_CHECKSUM_ON_COPY
551 concat_chksummed += seglen;
552#endif /* TCP_CHECKSUM_ON_COPY */
553 queuelen += pbuf_clen(concat_p);
554 } else {
555 /* Data is not copied */
556 /* If the last unsent pbuf is of type PBUF_ROM, try to extend it. */
557 struct pbuf *p;
558 for (p = last_unsent->p; p->next != NULL; p = p->next);
559 if (((p->type_internal & (PBUF_TYPE_FLAG_STRUCT_DATA_CONTIGUOUS | PBUF_TYPE_FLAG_DATA_VOLATILE)) == 0) &&
560 (const u8_t *)p->payload + p->len == (const u8_t *)arg) {
561 LWIP_ASSERT("tcp_write: ROM pbufs cannot be oversized", pos == 0);
562 extendlen = seglen;
563 } else {
564 if ((concat_p = pbuf_alloc(PBUF_RAW, seglen, PBUF_ROM)) == NULL) {
565 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
566 ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
567 goto memerr;
568 }
569 /* reference the non-volatile payload data */
570 ((struct pbuf_rom *)concat_p)->payload = (const u8_t *)arg + pos;
571 queuelen += pbuf_clen(concat_p);
572 }
573#if TCP_CHECKSUM_ON_COPY
574 /* calculate the checksum of nocopy-data */
575 tcp_seg_add_chksum(~inet_chksum((const u8_t *)arg + pos, seglen), seglen,
576 &concat_chksum, &concat_chksum_swapped);
577 concat_chksummed += seglen;
578#endif /* TCP_CHECKSUM_ON_COPY */
579 }
580
581 pos += seglen;
582 }
583#endif /* !LWIP_NETIF_TX_SINGLE_PBUF */
584 } else {
585#if TCP_OVERSIZE
586 LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
587 pcb->unsent_oversize == 0);
588#endif /* TCP_OVERSIZE */
589 }
590
591 /*
592 * Phase 3: Create new segments.
593 *
594 * The new segments are chained together in the local 'queue'
595 * variable, ready to be appended to pcb->unsent.
596 */
597 while (pos < len) {
598 struct pbuf *p;
599 u16_t left = len - pos;
600 u16_t max_len = mss_local - optlen;
601 u16_t seglen = LWIP_MIN(left, max_len);
602#if TCP_CHECKSUM_ON_COPY
603 u16_t chksum = 0;
604 u8_t chksum_swapped = 0;
605#endif /* TCP_CHECKSUM_ON_COPY */
606
607 if (apiflags & TCP_WRITE_FLAG_COPY) {
608 /* If copy is set, memory should be allocated and data copied
609 * into pbuf */
610 if ((p = tcp_pbuf_prealloc(PBUF_TRANSPORT, seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
611 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
612 goto memerr;
613 }
614 LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
615 (p->len >= seglen));
616 TCP_DATA_COPY2((char *)p->payload + optlen, (const u8_t *)arg + pos, seglen, &chksum, &chksum_swapped);
617 } else {
618 /* Copy is not set: First allocate a pbuf for holding the data.
619 * Since the referenced data is available at least until it is
620 * sent out on the link (as it has to be ACKed by the remote
621 * party) we can safely use PBUF_ROM instead of PBUF_REF here.
622 */
623 struct pbuf *p2;
624#if TCP_OVERSIZE
625 LWIP_ASSERT("oversize == 0", oversize == 0);
626#endif /* TCP_OVERSIZE */
627 if ((p2 = pbuf_alloc(PBUF_TRANSPORT, seglen, PBUF_ROM)) == NULL) {
628 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for zero-copy pbuf\n"));
629 goto memerr;
630 }
631#if TCP_CHECKSUM_ON_COPY
632 /* calculate the checksum of nocopy-data */
633 chksum = ~inet_chksum((const u8_t *)arg + pos, seglen);
634 if (seglen & 1) {
635 chksum_swapped = 1;
636 chksum = SWAP_BYTES_IN_WORD(chksum);
637 }
638#endif /* TCP_CHECKSUM_ON_COPY */
639 /* reference the non-volatile payload data */
640 ((struct pbuf_rom *)p2)->payload = (const u8_t *)arg + pos;
641
642 /* Second, allocate a pbuf for the headers. */
643 if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
644 /* If allocation fails, we have to deallocate the data pbuf as
645 * well. */
646 pbuf_free(p2);
647 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: could not allocate memory for header pbuf\n"));
648 goto memerr;
649 }
650 /* Concatenate the headers and data pbufs together. */
651 pbuf_cat(p/*header*/, p2/*data*/);
652 }
653
654 queuelen += pbuf_clen(p);
655
656 /* Now that there are more segments queued, we check again if the
657 * length of the queue exceeds the configured maximum or
658 * overflows. */
659 if (queuelen > LWIP_MIN(TCP_SND_QUEUELEN, TCP_SNDQUEUELEN_OVERFLOW)) {
660 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_write: queue too long %"U16_F" (%d)\n",
661 queuelen, (int)TCP_SND_QUEUELEN));
662 pbuf_free(p);
663 goto memerr;
664 }
665
666 if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
667 goto memerr;
668 }
669#if TCP_OVERSIZE_DBGCHECK
670 seg->oversize_left = oversize;
671#endif /* TCP_OVERSIZE_DBGCHECK */
672#if TCP_CHECKSUM_ON_COPY
673 seg->chksum = chksum;
674 seg->chksum_swapped = chksum_swapped;
675 seg->flags |= TF_SEG_DATA_CHECKSUMMED;
676#endif /* TCP_CHECKSUM_ON_COPY */
677
678 /* first segment of to-be-queued data? */
679 if (queue == NULL) {
680 queue = seg;
681 } else {
682 /* Attach the segment to the end of the queued segments */
683 LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
684 prev_seg->next = seg;
685 }
686 /* remember last segment of to-be-queued data for next iteration */
687 prev_seg = seg;
688
689 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
690 lwip_ntohl(seg->tcphdr->seqno),
691 lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));
692
693 pos += seglen;
694 }
695
696 /*
697 * All three segmentation phases were successful. We can commit the
698 * transaction.
699 */
700#if TCP_OVERSIZE_DBGCHECK
701 if ((last_unsent != NULL) && (oversize_add != 0)) {
702 last_unsent->oversize_left += oversize_add;
703 }
704#endif /* TCP_OVERSIZE_DBGCHECK */
705
706 /*
707 * Phase 1: If data has been added to the preallocated tail of
708 * last_unsent, we update the length fields of the pbuf chain.
709 */
710#if TCP_OVERSIZE
711 if (oversize_used > 0) {
712 struct pbuf *p;
713 /* Bump tot_len of whole chain, len of tail */
714 for (p = last_unsent->p; p; p = p->next) {
715 p->tot_len += oversize_used;
716 if (p->next == NULL) {
717 TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, last_unsent);
718 p->len += oversize_used;
719 }
720 }
721 last_unsent->len += oversize_used;
722#if TCP_OVERSIZE_DBGCHECK
723 LWIP_ASSERT("last_unsent->oversize_left >= oversize_used",
724 last_unsent->oversize_left >= oversize_used);
725 last_unsent->oversize_left -= oversize_used;
726#endif /* TCP_OVERSIZE_DBGCHECK */
727 }
728 pcb->unsent_oversize = oversize;
729#endif /* TCP_OVERSIZE */
730
731 /*
732 * Phase 2: concat_p can be concatenated onto last_unsent->p, unless we
733 * determined that the last ROM pbuf can be extended to include the new data.
734 */
735 if (concat_p != NULL) {
736 LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
737 (last_unsent != NULL));
738 pbuf_cat(last_unsent->p, concat_p);
739 last_unsent->len += concat_p->tot_len;
740 } else if (extendlen > 0) {
741 struct pbuf *p;
742 LWIP_ASSERT("tcp_write: extension of reference requires reference",
743 last_unsent != NULL && last_unsent->p != NULL);
744 for (p = last_unsent->p; p->next != NULL; p = p->next) {
745 p->tot_len += extendlen;
746 }
747 p->tot_len += extendlen;
748 p->len += extendlen;
749 last_unsent->len += extendlen;
750 }
751
752#if TCP_CHECKSUM_ON_COPY
753 if (concat_chksummed) {
754 LWIP_ASSERT("tcp_write: concat checksum needs concatenated data",
755 concat_p != NULL || extendlen > 0);
756 /*if concat checksumm swapped - swap it back */
757 if (concat_chksum_swapped) {
758 concat_chksum = SWAP_BYTES_IN_WORD(concat_chksum);
759 }
760 tcp_seg_add_chksum(concat_chksum, concat_chksummed, &last_unsent->chksum,
761 &last_unsent->chksum_swapped);
762 last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
763 }
764#endif /* TCP_CHECKSUM_ON_COPY */
765
766 /*
767 * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
768 * is harmless
769 */
770 if (last_unsent == NULL) {
771 pcb->unsent = queue;
772 } else {
773 last_unsent->next = queue;
774 }
775
776 /*
777 * Finally update the pcb state.
778 */
779 pcb->snd_lbb += len;
780 pcb->snd_buf -= len;
781 pcb->snd_queuelen = queuelen;
782
783 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
784 pcb->snd_queuelen));
785 if (pcb->snd_queuelen != 0) {
786 LWIP_ASSERT("tcp_write: valid queue length",
787 pcb->unacked != NULL || pcb->unsent != NULL);
788 }
789
790 /* Set the PSH flag in the last segment that we enqueued. */
791 if (seg != NULL && seg->tcphdr != NULL && ((apiflags & TCP_WRITE_FLAG_MORE) == 0)) {
792 TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
793 }
794
795 return ERR_OK;
796memerr:
797 tcp_set_flags(pcb, TF_NAGLEMEMERR);
798 TCP_STATS_INC(tcp.memerr);
799
800 if (concat_p != NULL) {
801 pbuf_free(concat_p);
802 }
803 if (queue != NULL) {
804 tcp_segs_free(queue);
805 }
806 if (pcb->snd_queuelen != 0) {
807 LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
808 pcb->unsent != NULL);
809 }
810 LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
811 return ERR_MEM;
812}
813
814/**
815 * Split segment on the head of the unsent queue. If return is not
816 * ERR_OK, existing head remains intact
817 *
818 * The split is accomplished by creating a new TCP segment and pbuf
819 * which holds the remainder payload after the split. The original
820 * pbuf is trimmed to new length. This allows splitting of read-only
821 * pbufs
822 *
823 * @param pcb the tcp_pcb for which to split the unsent head
824 * @param split the amount of payload to remain in the head
825 */
826err_t
827tcp_split_unsent_seg(struct tcp_pcb *pcb, u16_t split)
828{
829 struct tcp_seg *seg = NULL, *useg = NULL;
830 struct pbuf *p = NULL;
831 u8_t optlen;
832 u8_t optflags;
833 u8_t split_flags;
834 u8_t remainder_flags;
835 u16_t remainder;
836 u16_t offset;
837#if TCP_CHECKSUM_ON_COPY
838 u16_t chksum = 0;
839 u8_t chksum_swapped = 0;
840 struct pbuf *q;
841#endif /* TCP_CHECKSUM_ON_COPY */
842
843 LWIP_ASSERT("tcp_split_unsent_seg: invalid pcb", pcb != NULL);
844
845 useg = pcb->unsent;
846 if (useg == NULL) {
847 return ERR_MEM;
848 }
849
850 if (split == 0) {
851 LWIP_ASSERT("Can't split segment into length 0", 0);
852 return ERR_VAL;
853 }
854
855 if (useg->len <= split) {
856 return ERR_OK;
857 }
858
859 LWIP_ASSERT("split <= mss", split <= pcb->mss);
860 LWIP_ASSERT("useg->len > 0", useg->len > 0);
861
862 /* We should check that we don't exceed TCP_SND_QUEUELEN but we need
863 * to split this packet so we may actually exceed the max value by
864 * one!
865 */
866 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue: split_unsent_seg: %u\n", (unsigned int)pcb->snd_queuelen));
867
868 optflags = useg->flags;
869#if TCP_CHECKSUM_ON_COPY
870 /* Remove since checksum is not stored until after tcp_create_segment() */
871 optflags &= ~TF_SEG_DATA_CHECKSUMMED;
872#endif /* TCP_CHECKSUM_ON_COPY */
873 optlen = LWIP_TCP_OPT_LENGTH(optflags);
874 remainder = useg->len - split;
875
876 /* Create new pbuf for the remainder of the split */
877 p = pbuf_alloc(PBUF_TRANSPORT, remainder + optlen, PBUF_RAM);
878 if (p == NULL) {
879 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
880 ("tcp_split_unsent_seg: could not allocate memory for pbuf remainder %u\n", remainder));
881 goto memerr;
882 }
883
884 /* Offset into the original pbuf is past TCP/IP headers, options, and split amount */
885 offset = useg->p->tot_len - useg->len + split;
886 /* Copy remainder into new pbuf, headers and options will not be filled out */
887 if (pbuf_copy_partial(useg->p, (u8_t *)p->payload + optlen, remainder, offset ) != remainder) {
888 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
889 ("tcp_split_unsent_seg: could not copy pbuf remainder %u\n", remainder));
890 goto memerr;
891 }
892#if TCP_CHECKSUM_ON_COPY
893 /* calculate the checksum on remainder data */
894 tcp_seg_add_chksum(~inet_chksum((const u8_t *)p->payload + optlen, remainder), remainder,
895 &chksum, &chksum_swapped);
896#endif /* TCP_CHECKSUM_ON_COPY */
897
898 /* Options are created when calling tcp_output() */
899
900 /* Migrate flags from original segment */
901 split_flags = TCPH_FLAGS(useg->tcphdr);
902 remainder_flags = 0; /* ACK added in tcp_output() */
903
904 if (split_flags & TCP_PSH) {
905 split_flags &= ~TCP_PSH;
906 remainder_flags |= TCP_PSH;
907 }
908 if (split_flags & TCP_FIN) {
909 split_flags &= ~TCP_FIN;
910 remainder_flags |= TCP_FIN;
911 }
912 /* SYN should be left on split, RST should not be present with data */
913
914 seg = tcp_create_segment(pcb, p, remainder_flags, lwip_ntohl(useg->tcphdr->seqno) + split, optflags);
915 if (seg == NULL) {
916 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_LEVEL_SERIOUS,
917 ("tcp_split_unsent_seg: could not create new TCP segment\n"));
918 goto memerr;
919 }
920
921#if TCP_CHECKSUM_ON_COPY
922 seg->chksum = chksum;
923 seg->chksum_swapped = chksum_swapped;
924 seg->flags |= TF_SEG_DATA_CHECKSUMMED;
925#endif /* TCP_CHECKSUM_ON_COPY */
926
927 /* Remove this segment from the queue since trimming it may free pbufs */
928 pcb->snd_queuelen -= pbuf_clen(useg->p);
929
930 /* Trim the original pbuf into our split size. At this point our remainder segment must be setup
931 successfully because we are modifying the original segment */
932 pbuf_realloc(useg->p, useg->p->tot_len - remainder);
933 useg->len -= remainder;
934 TCPH_SET_FLAG(useg->tcphdr, split_flags);
935#if TCP_OVERSIZE_DBGCHECK
936 /* By trimming, realloc may have actually shrunk the pbuf, so clear oversize_left */
937 useg->oversize_left = 0;
938#endif /* TCP_OVERSIZE_DBGCHECK */
939
940 /* Add back to the queue with new trimmed pbuf */
941 pcb->snd_queuelen += pbuf_clen(useg->p);
942
943#if TCP_CHECKSUM_ON_COPY
944 /* The checksum on the split segment is now incorrect. We need to re-run it over the split */
945 useg->chksum = 0;
946 useg->chksum_swapped = 0;
947 q = useg->p;
948 offset = q->tot_len - useg->len; /* Offset due to exposed headers */
949
950 /* Advance to the pbuf where the offset ends */
951 while (q != NULL && offset > q->len) {
952 offset -= q->len;
953 q = q->next;
954 }
955 LWIP_ASSERT("Found start of payload pbuf", q != NULL);
956 /* Checksum the first payload pbuf accounting for offset, then other pbufs are all payload */
957 for (; q != NULL; offset = 0, q = q->next) {
958 tcp_seg_add_chksum(~inet_chksum((const u8_t *)q->payload + offset, q->len - offset), q->len - offset,
959 &useg->chksum, &useg->chksum_swapped);
960 }
961#endif /* TCP_CHECKSUM_ON_COPY */
962
963 /* Update number of segments on the queues. Note that length now may
964 * exceed TCP_SND_QUEUELEN! We don't have to touch pcb->snd_buf
965 * because the total amount of data is constant when packet is split */
966 pcb->snd_queuelen += pbuf_clen(seg->p);
967
968 /* Finally insert remainder into queue after split (which stays head) */
969 seg->next = useg->next;
970 useg->next = seg;
971
972#if TCP_OVERSIZE
973 /* If remainder is last segment on the unsent, ensure we clear the oversize amount
974 * because the remainder is always sized to the exact remaining amount */
975 if (seg->next == NULL) {
976 pcb->unsent_oversize = 0;
977 }
978#endif /* TCP_OVERSIZE */
979
980 return ERR_OK;
981memerr:
982 TCP_STATS_INC(tcp.memerr);
983
984 LWIP_ASSERT("seg == NULL", seg == NULL);
985 if (p != NULL) {
986 pbuf_free(p);
987 }
988
989 return ERR_MEM;
990}
991
992/**
993 * Called by tcp_close() to send a segment including FIN flag but not data.
994 * This FIN may be added to an existing segment or a new, otherwise empty
995 * segment is enqueued.
996 *
997 * @param pcb the tcp_pcb over which to send a segment
998 * @return ERR_OK if sent, another err_t otherwise
999 */
1000err_t
1001tcp_send_fin(struct tcp_pcb *pcb)
1002{
1003 LWIP_ASSERT("tcp_send_fin: invalid pcb", pcb != NULL);
1004
1005 /* first, try to add the fin to the last unsent segment */
1006 if (pcb->unsent != NULL) {
1007 struct tcp_seg *last_unsent;
1008 for (last_unsent = pcb->unsent; last_unsent->next != NULL;
1009 last_unsent = last_unsent->next);
1010
1011 if ((TCPH_FLAGS(last_unsent->tcphdr) & (TCP_SYN | TCP_FIN | TCP_RST)) == 0) {
1012 /* no SYN/FIN/RST flag in the header, we can add the FIN flag */
1013 TCPH_SET_FLAG(last_unsent->tcphdr, TCP_FIN);
1014 tcp_set_flags(pcb, TF_FIN);
1015 return ERR_OK;
1016 }
1017 }
1018 /* no data, no length, flags, copy=1, no optdata */
1019 return tcp_enqueue_flags(pcb, TCP_FIN);
1020}
1021
1022/**
1023 * Enqueue SYN or FIN for transmission.
1024 *
1025 * Called by @ref tcp_connect, tcp_listen_input, and @ref tcp_close
1026 * (via @ref tcp_send_fin)
1027 *
1028 * @param pcb Protocol control block for the TCP connection.
1029 * @param flags TCP header flags to set in the outgoing segment.
1030 */
1031err_t
1032tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
1033{
1034 struct pbuf *p;
1035 struct tcp_seg *seg;
1036 u8_t optflags = 0;
1037 u8_t optlen = 0;
1038
1039 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));
1040
1041 LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
1042 (flags & (TCP_SYN | TCP_FIN)) != 0);
1043 LWIP_ASSERT("tcp_enqueue_flags: invalid pcb", pcb != NULL);
1044
1045 /* No need to check pcb->snd_queuelen if only SYN or FIN are allowed! */
1046
1047 /* Get options for this segment. This is a special case since this is the
1048 only place where a SYN can be sent. */
1049 if (flags & TCP_SYN) {
1050 optflags = TF_SEG_OPTS_MSS;
1051#if LWIP_WND_SCALE
1052 if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_WND_SCALE)) {
1053 /* In a <SYN,ACK> (sent in state SYN_RCVD), the window scale option may only
1054 be sent if we received a window scale option from the remote host. */
1055 optflags |= TF_SEG_OPTS_WND_SCALE;
1056 }
1057#endif /* LWIP_WND_SCALE */
1058#if LWIP_TCP_SACK_OUT
1059 if ((pcb->state != SYN_RCVD) || (pcb->flags & TF_SACK)) {
1060 /* In a <SYN,ACK> (sent in state SYN_RCVD), the SACK_PERM option may only
1061 be sent if we received a SACK_PERM option from the remote host. */
1062 optflags |= TF_SEG_OPTS_SACK_PERM;
1063 }
1064#endif /* LWIP_TCP_SACK_OUT */
1065 }
1066#if LWIP_TCP_TIMESTAMPS
1067 if ((pcb->flags & TF_TIMESTAMP) || ((flags & TCP_SYN) && (pcb->state != SYN_RCVD))) {
1068 /* Make sure the timestamp option is only included in data segments if we
1069 agreed about it with the remote host (and in active open SYN segments). */
1070 optflags |= TF_SEG_OPTS_TS;
1071 }
1072#endif /* LWIP_TCP_TIMESTAMPS */
1073 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
1074
1075 /* Allocate pbuf with room for TCP header + options */
1076 if ((p = pbuf_alloc(PBUF_TRANSPORT, optlen, PBUF_RAM)) == NULL) {
1077 tcp_set_flags(pcb, TF_NAGLEMEMERR);
1078 TCP_STATS_INC(tcp.memerr);
1079 return ERR_MEM;
1080 }
1081 LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
1082 (p->len >= optlen));
1083
1084 /* Allocate memory for tcp_seg, and fill in fields. */
1085 if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
1086 tcp_set_flags(pcb, TF_NAGLEMEMERR);
1087 TCP_STATS_INC(tcp.memerr);
1088 return ERR_MEM;
1089 }
1090 LWIP_ASSERT("seg->tcphdr not aligned", ((mem_ptr_t)seg->tcphdr % LWIP_MIN(MEM_ALIGNMENT, 4)) == 0);
1091 LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);
1092
1093 LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE,
1094 ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
1095 lwip_ntohl(seg->tcphdr->seqno),
1096 lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
1097 (u16_t)flags));
1098
1099 /* Now append seg to pcb->unsent queue */
1100 if (pcb->unsent == NULL) {
1101 pcb->unsent = seg;
1102 } else {
1103 struct tcp_seg *useg;
1104 for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
1105 useg->next = seg;
1106 }
1107#if TCP_OVERSIZE
1108 /* The new unsent tail has no space */
1109 pcb->unsent_oversize = 0;
1110#endif /* TCP_OVERSIZE */
1111
1112 /* SYN and FIN bump the sequence number */
1113 if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
1114 pcb->snd_lbb++;
1115 /* optlen does not influence snd_buf */
1116 }
1117 if (flags & TCP_FIN) {
1118 tcp_set_flags(pcb, TF_FIN);
1119 }
1120
1121 /* update number of segments on the queues */
1122 pcb->snd_queuelen += pbuf_clen(seg->p);
1123 LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
1124 if (pcb->snd_queuelen != 0) {
1125 LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
1126 pcb->unacked != NULL || pcb->unsent != NULL);
1127 }
1128
1129 return ERR_OK;
1130}
1131
1132#if LWIP_TCP_TIMESTAMPS
1133/* Build a timestamp option (12 bytes long) at the specified options pointer)
1134 *
1135 * @param pcb tcp_pcb
1136 * @param opts option pointer where to store the timestamp option
1137 */
1138static void
1139tcp_build_timestamp_option(const struct tcp_pcb *pcb, u32_t *opts)
1140{
1141 LWIP_ASSERT("tcp_build_timestamp_option: invalid pcb", pcb != NULL);
1142
1143 /* Pad with two NOP options to make everything nicely aligned */
1144 opts[0] = PP_HTONL(0x0101080A);
1145 opts[1] = lwip_htonl(sys_now());
1146 opts[2] = lwip_htonl(pcb->ts_recent);
1147}
1148#endif
1149
1150#if LWIP_TCP_SACK_OUT
1151/**
1152 * Calculates the number of SACK entries that should be generated.
1153 * It takes into account whether TF_SACK flag is set,
1154 * the number of SACK entries in tcp_pcb that are valid,
1155 * as well as the available options size.
1156 *
1157 * @param pcb tcp_pcb
1158 * @param optlen the length of other TCP options (in bytes)
1159 * @return the number of SACK ranges that can be used
1160 */
1161static u8_t
1162tcp_get_num_sacks(const struct tcp_pcb *pcb, u8_t optlen)
1163{
1164 u8_t num_sacks = 0;
1165
1166 LWIP_ASSERT("tcp_get_num_sacks: invalid pcb", pcb != NULL);
1167
1168 if (pcb->flags & TF_SACK) {
1169 u8_t i;
1170
1171 /* The first SACK takes up 12 bytes (it includes SACK header and two NOP options),
1172 each additional one - 8 bytes. */
1173 optlen += 12;
1174
1175 /* Max options size = 40, number of SACK array entries = LWIP_TCP_MAX_SACK_NUM */
1176 for (i = 0; (i < LWIP_TCP_MAX_SACK_NUM) && (optlen <= TCP_MAX_OPTION_BYTES) &&
1177 LWIP_TCP_SACK_VALID(pcb, i); ++i) {
1178 ++num_sacks;
1179 optlen += 8;
1180 }
1181 }
1182
1183 return num_sacks;
1184}
1185
1186/** Build a SACK option (12 or more bytes long) at the specified options pointer)
1187 *
1188 * @param pcb tcp_pcb
1189 * @param opts option pointer where to store the SACK option
1190 * @param num_sacks the number of SACKs to store
1191 */
1192static void
1193tcp_build_sack_option(const struct tcp_pcb *pcb, u32_t *opts, u8_t num_sacks)
1194{
1195 u8_t i;
1196
1197 LWIP_ASSERT("tcp_build_sack_option: invalid pcb", pcb != NULL);
1198 LWIP_ASSERT("tcp_build_sack_option: invalid opts", opts != NULL);
1199
1200 /* Pad with two NOP options to make everything nicely aligned.
1201 We add the length (of just the SACK option, not the NOPs in front of it),
1202 which is 2B of header, plus 8B for each SACK. */
1203 *(opts++) = PP_HTONL(0x01010500 + 2 + num_sacks * 8);
1204
1205 for (i = 0; i < num_sacks; ++i) {
1206 *(opts++) = lwip_htonl(pcb->rcv_sacks[i].left);
1207 *(opts++) = lwip_htonl(pcb->rcv_sacks[i].right);
1208 }
1209}
1210
1211#endif
1212
1213#if LWIP_WND_SCALE
1214/** Build a window scale option (3 bytes long) at the specified options pointer)
1215 *
1216 * @param opts option pointer where to store the window scale option
1217 */
1218static void
1219tcp_build_wnd_scale_option(u32_t *opts)
1220{
1221 LWIP_ASSERT("tcp_build_wnd_scale_option: invalid opts", opts != NULL);
1222
1223 /* Pad with one NOP option to make everything nicely aligned */
1224 opts[0] = PP_HTONL(0x01030300 | TCP_RCV_SCALE);
1225}
1226#endif
1227
1228/**
1229 * @ingroup tcp_raw
1230 * Find out what we can send and send it
1231 *
1232 * @param pcb Protocol control block for the TCP connection to send data
1233 * @return ERR_OK if data has been sent or nothing to send
1234 * another err_t on error
1235 */
1236err_t
1237tcp_output(struct tcp_pcb *pcb)
1238{
1239 struct tcp_seg *seg, *useg;
1240 u32_t wnd, snd_nxt;
1241 err_t err;
1242 struct netif *netif;
1243#if TCP_CWND_DEBUG
1244 s16_t i = 0;
1245#endif /* TCP_CWND_DEBUG */
1246
1247 LWIP_ASSERT_CORE_LOCKED();
1248
1249 LWIP_ASSERT("tcp_output: invalid pcb", pcb != NULL);
1250 /* pcb->state LISTEN not allowed here */
1251 LWIP_ASSERT("don't call tcp_output for listen-pcbs",
1252 pcb->state != LISTEN);
1253
1254 /* First, check if we are invoked by the TCP input processing
1255 code. If so, we do not output anything. Instead, we rely on the
1256 input processing code to call us when input processing is done
1257 with. */
1258 if (tcp_input_pcb == pcb) {
1259 return ERR_OK;
1260 }
1261
1262 wnd = LWIP_MIN(pcb->snd_wnd, pcb->cwnd);
1263
1264 seg = pcb->unsent;
1265
1266 if (seg == NULL) {
1267 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: nothing to send (%p)\n",
1268 (void *)pcb->unsent));
1269 LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F
1270 ", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
1271 ", seg == NULL, ack %"U32_F"\n",
1272 pcb->snd_wnd, pcb->cwnd, wnd, pcb->lastack));
1273
1274 /* If the TF_ACK_NOW flag is set and the ->unsent queue is empty, construct
1275 * an empty ACK segment and send it. */
1276 if (pcb->flags & TF_ACK_NOW) {
1277 return tcp_send_empty_ack(pcb);
1278 }
1279 /* nothing to send: shortcut out of here */
1280 goto output_done;
1281 } else {
1282 LWIP_DEBUGF(TCP_CWND_DEBUG,
1283 ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F
1284 ", effwnd %"U32_F", seq %"U32_F", ack %"U32_F"\n",
1285 pcb->snd_wnd, pcb->cwnd, wnd,
1286 lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len,
1287 lwip_ntohl(seg->tcphdr->seqno), pcb->lastack));
1288 }
1289
1290 netif = tcp_route(pcb, &pcb->local_ip, &pcb->remote_ip);
1291 if (netif == NULL) {
1292 return ERR_RTE;
1293 }
1294
1295 /* If we don't have a local IP address, we get one from netif */
1296 if (ip_addr_isany(&pcb->local_ip)) {
1297 const ip_addr_t *local_ip = ip_netif_get_local_ip(netif, &pcb->remote_ip);
1298 if (local_ip == NULL) {
1299 return ERR_RTE;
1300 }
1301 ip_addr_copy(pcb->local_ip, *local_ip);
1302 }
1303
1304 /* Handle the current segment not fitting within the window */
1305 if (lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len > wnd) {
1306 /* We need to start the persistent timer when the next unsent segment does not fit
1307 * within the remaining (could be 0) send window and RTO timer is not running (we
1308 * have no in-flight data). If window is still too small after persist timer fires,
1309 * then we split the segment. We don't consider the congestion window since a cwnd
1310 * smaller than 1 SMSS implies in-flight data
1311 */
1312 if (wnd == pcb->snd_wnd && pcb->unacked == NULL && pcb->persist_backoff == 0) {
1313 pcb->persist_cnt = 0;
1314 pcb->persist_backoff = 1;
1315 pcb->persist_probe = 0;
1316 }
1317 /* We need an ACK, but can't send data now, so send an empty ACK */
1318 if (pcb->flags & TF_ACK_NOW) {
1319 return tcp_send_empty_ack(pcb);
1320 }
1321 goto output_done;
1322 }
1323 /* Stop persist timer, above conditions are not active */
1324 pcb->persist_backoff = 0;
1325
1326 /* useg should point to last segment on unacked queue */
1327 useg = pcb->unacked;
1328 if (useg != NULL) {
1329 for (; useg->next != NULL; useg = useg->next);
1330 }
1331 /* data available and window allows it to be sent? */
1332 while (seg != NULL &&
1333 lwip_ntohl(seg->tcphdr->seqno) - pcb->lastack + seg->len <= wnd) {
1334 LWIP_ASSERT("RST not expected here!",
1335 (TCPH_FLAGS(seg->tcphdr) & TCP_RST) == 0);
1336 /* Stop sending if the nagle algorithm would prevent it
1337 * Don't stop:
1338 * - if tcp_write had a memory error before (prevent delayed ACK timeout) or
1339 * - if FIN was already enqueued for this PCB (SYN is always alone in a segment -
1340 * either seg->next != NULL or pcb->unacked == NULL;
1341 * RST is no sent using tcp_write/tcp_output.
1342 */
1343 if ((tcp_do_output_nagle(pcb) == 0) &&
1344 ((pcb->flags & (TF_NAGLEMEMERR | TF_FIN)) == 0)) {
1345 break;
1346 }
1347#if TCP_CWND_DEBUG
1348 LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_output: snd_wnd %"TCPWNDSIZE_F", cwnd %"TCPWNDSIZE_F", wnd %"U32_F", effwnd %"U32_F", seq %"U32_F", ack %"U32_F", i %"S16_F"\n",
1349 pcb->snd_wnd, pcb->cwnd, wnd,
1350 lwip_ntohl(seg->tcphdr->seqno) + seg->len -
1351 pcb->lastack,
1352 lwip_ntohl(seg->tcphdr->seqno), pcb->lastack, i));
1353 ++i;
1354#endif /* TCP_CWND_DEBUG */
1355
1356 if (pcb->state != SYN_SENT) {
1357 TCPH_SET_FLAG(seg->tcphdr, TCP_ACK);
1358 }
1359
1360 err = tcp_output_segment(seg, pcb, netif);
1361 if (err != ERR_OK) {
1362 /* segment could not be sent, for whatever reason */
1363 tcp_set_flags(pcb, TF_NAGLEMEMERR);
1364 return err;
1365 }
1366#if TCP_OVERSIZE_DBGCHECK
1367 seg->oversize_left = 0;
1368#endif /* TCP_OVERSIZE_DBGCHECK */
1369 pcb->unsent = seg->next;
1370 if (pcb->state != SYN_SENT) {
1371 tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
1372 }
1373 snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
1374 if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
1375 pcb->snd_nxt = snd_nxt;
1376 }
1377 /* put segment on unacknowledged list if length > 0 */
1378 if (TCP_TCPLEN(seg) > 0) {
1379 seg->next = NULL;
1380 /* unacked list is empty? */
1381 if (pcb->unacked == NULL) {
1382 pcb->unacked = seg;
1383 useg = seg;
1384 /* unacked list is not empty? */
1385 } else {
1386 /* In the case of fast retransmit, the packet should not go to the tail
1387 * of the unacked queue, but rather somewhere before it. We need to check for
1388 * this case. -STJ Jul 27, 2004 */
1389 if (TCP_SEQ_LT(lwip_ntohl(seg->tcphdr->seqno), lwip_ntohl(useg->tcphdr->seqno))) {
1390 /* add segment to before tail of unacked list, keeping the list sorted */
1391 struct tcp_seg **cur_seg = &(pcb->unacked);
1392 while (*cur_seg &&
1393 TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
1394 cur_seg = &((*cur_seg)->next );
1395 }
1396 seg->next = (*cur_seg);
1397 (*cur_seg) = seg;
1398 } else {
1399 /* add segment to tail of unacked list */
1400 useg->next = seg;
1401 useg = useg->next;
1402 }
1403 }
1404 /* do not queue empty segments on the unacked list */
1405 } else {
1406 tcp_seg_free(seg);
1407 }
1408 seg = pcb->unsent;
1409 }
1410#if TCP_OVERSIZE
1411 if (pcb->unsent == NULL) {
1412 /* last unsent has been removed, reset unsent_oversize */
1413 pcb->unsent_oversize = 0;
1414 }
1415#endif /* TCP_OVERSIZE */
1416
1417output_done:
1418 tcp_clear_flags(pcb, TF_NAGLEMEMERR);
1419 return ERR_OK;
1420}
1421
1422/** Check if a segment's pbufs are used by someone else than TCP.
1423 * This can happen on retransmission if the pbuf of this segment is still
1424 * referenced by the netif driver due to deferred transmission.
1425 * This is the case (only!) if someone down the TX call path called
1426 * pbuf_ref() on one of the pbufs!
1427 *
1428 * @arg seg the tcp segment to check
1429 * @return 1 if ref != 1, 0 if ref == 1
1430 */
1431static int
1432tcp_output_segment_busy(const struct tcp_seg *seg)
1433{
1434 LWIP_ASSERT("tcp_output_segment_busy: invalid seg", seg != NULL);
1435
1436 /* We only need to check the first pbuf here:
1437 If a pbuf is queued for transmission, a driver calls pbuf_ref(),
1438 which only changes the ref count of the first pbuf */
1439 if (seg->p->ref != 1) {
1440 /* other reference found */
1441 return 1;
1442 }
1443 /* no other references found */
1444 return 0;
1445}
1446
1447/**
1448 * Called by tcp_output() to actually send a TCP segment over IP.
1449 *
1450 * @param seg the tcp_seg to send
1451 * @param pcb the tcp_pcb for the TCP connection used to send the segment
1452 * @param netif the netif used to send the segment
1453 */
1454static err_t
1455tcp_output_segment(struct tcp_seg *seg, struct tcp_pcb *pcb, struct netif *netif)
1456{
1457 err_t err;
1458 u16_t len;
1459 u32_t *opts;
1460#if TCP_CHECKSUM_ON_COPY
1461 int seg_chksum_was_swapped = 0;
1462#endif
1463
1464 LWIP_ASSERT("tcp_output_segment: invalid seg", seg != NULL);
1465 LWIP_ASSERT("tcp_output_segment: invalid pcb", pcb != NULL);
1466 LWIP_ASSERT("tcp_output_segment: invalid netif", netif != NULL);
1467
1468 if (tcp_output_segment_busy(seg)) {
1469 /* This should not happen: rexmit functions should have checked this.
1470 However, since this function modifies p->len, we must not continue in this case. */
1471 LWIP_DEBUGF(TCP_RTO_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("tcp_output_segment: segment busy\n"));
1472 return ERR_OK;
1473 }
1474
1475 /* The TCP header has already been constructed, but the ackno and
1476 wnd fields remain. */
1477 seg->tcphdr->ackno = lwip_htonl(pcb->rcv_nxt);
1478
1479 /* advertise our receive window size in this TCP segment */
1480#if LWIP_WND_SCALE
1481 if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
1482 /* The Window field in a SYN segment itself (the only type where we send
1483 the window scale option) is never scaled. */
1484 seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(pcb->rcv_ann_wnd));
1485 } else
1486#endif /* LWIP_WND_SCALE */
1487 {
1488 seg->tcphdr->wnd = lwip_htons(TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
1489 }
1490
1491 pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
1492
1493 /* Add any requested options. NB MSS option is only set on SYN
1494 packets, so ignore it here */
1495 /* cast through void* to get rid of alignment warnings */
1496 opts = (u32_t *)(void *)(seg->tcphdr + 1);
1497 if (seg->flags & TF_SEG_OPTS_MSS) {
1498 u16_t mss;
1499#if TCP_CALCULATE_EFF_SEND_MSS
1500 mss = tcp_eff_send_mss_netif(TCP_MSS, netif, &pcb->remote_ip);
1501#else /* TCP_CALCULATE_EFF_SEND_MSS */
1502 mss = TCP_MSS;
1503#endif /* TCP_CALCULATE_EFF_SEND_MSS */
1504 *opts = TCP_BUILD_MSS_OPTION(mss);
1505 opts += 1;
1506 }
1507#if LWIP_TCP_TIMESTAMPS
1508 pcb->ts_lastacksent = pcb->rcv_nxt;
1509
1510 if (seg->flags & TF_SEG_OPTS_TS) {
1511 tcp_build_timestamp_option(pcb, opts);
1512 opts += 3;
1513 }
1514#endif
1515#if LWIP_WND_SCALE
1516 if (seg->flags & TF_SEG_OPTS_WND_SCALE) {
1517 tcp_build_wnd_scale_option(opts);
1518 opts += 1;
1519 }
1520#endif
1521#if LWIP_TCP_SACK_OUT
1522 if (seg->flags & TF_SEG_OPTS_SACK_PERM) {
1523 /* Pad with two NOP options to make everything nicely aligned
1524 * NOTE: When we send both timestamp and SACK_PERM options,
1525 * we could use the first two NOPs before the timestamp to store SACK_PERM option,
1526 * but that would complicate the code.
1527 */
1528 *(opts++) = PP_HTONL(0x01010402);
1529 }
1530#endif
1531
1532 /* Set retransmission timer running if it is not currently enabled
1533 This must be set before checking the route. */
1534 if (pcb->rtime < 0) {
1535 pcb->rtime = 0;
1536 }
1537
1538 if (pcb->rttest == 0) {
1539 pcb->rttest = tcp_ticks;
1540 pcb->rtseq = lwip_ntohl(seg->tcphdr->seqno);
1541
1542 LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_output_segment: rtseq %"U32_F"\n", pcb->rtseq));
1543 }
1544 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output_segment: %"U32_F":%"U32_F"\n",
1545 lwip_htonl(seg->tcphdr->seqno), lwip_htonl(seg->tcphdr->seqno) +
1546 seg->len));
1547
1548 len = (u16_t)((u8_t *)seg->tcphdr - (u8_t *)seg->p->payload);
1549 if (len == 0) {
1550 /** Exclude retransmitted segments from this count. */
1551 MIB2_STATS_INC(mib2.tcpoutsegs);
1552 }
1553
1554 seg->p->len -= len;
1555 seg->p->tot_len -= len;
1556
1557 seg->p->payload = seg->tcphdr;
1558
1559 seg->tcphdr->chksum = 0;
1560
1561#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS
1562 opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(seg->p, seg->tcphdr, pcb, opts);
1563#endif
1564 LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(seg->tcphdr + 1)) + LWIP_TCP_OPT_LENGTH_SEGMENT(seg->flags, pcb));
1565
1566#if CHECKSUM_GEN_TCP
1567 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1568#if TCP_CHECKSUM_ON_COPY
1569 u32_t acc;
1570#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1571 u16_t chksum_slow = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
1572 seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
1573#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1574 if ((seg->flags & TF_SEG_DATA_CHECKSUMMED) == 0) {
1575 LWIP_ASSERT("data included but not checksummed",
1576 seg->p->tot_len == TCPH_HDRLEN_BYTES(seg->tcphdr));
1577 }
1578
1579 /* rebuild TCP header checksum (TCP header changes for retransmissions!) */
1580 acc = ip_chksum_pseudo_partial(seg->p, IP_PROTO_TCP,
1581 seg->p->tot_len, TCPH_HDRLEN_BYTES(seg->tcphdr), &pcb->local_ip, &pcb->remote_ip);
1582 /* add payload checksum */
1583 if (seg->chksum_swapped) {
1584 seg_chksum_was_swapped = 1;
1585 seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
1586 seg->chksum_swapped = 0;
1587 }
1588 acc = (u16_t)~acc + seg->chksum;
1589 seg->tcphdr->chksum = (u16_t)~FOLD_U32T(acc);
1590#if TCP_CHECKSUM_ON_COPY_SANITY_CHECK
1591 if (chksum_slow != seg->tcphdr->chksum) {
1592 TCP_CHECKSUM_ON_COPY_SANITY_CHECK_FAIL(
1593 ("tcp_output_segment: calculated checksum is %"X16_F" instead of %"X16_F"\n",
1594 seg->tcphdr->chksum, chksum_slow));
1595 seg->tcphdr->chksum = chksum_slow;
1596 }
1597#endif /* TCP_CHECKSUM_ON_COPY_SANITY_CHECK */
1598#else /* TCP_CHECKSUM_ON_COPY */
1599 seg->tcphdr->chksum = ip_chksum_pseudo(seg->p, IP_PROTO_TCP,
1600 seg->p->tot_len, &pcb->local_ip, &pcb->remote_ip);
1601#endif /* TCP_CHECKSUM_ON_COPY */
1602 }
1603#endif /* CHECKSUM_GEN_TCP */
1604 TCP_STATS_INC(tcp.xmit);
1605
1606 NETIF_SET_HINTS(netif, &(pcb->netif_hints));
1607 err = ip_output_if(seg->p, &pcb->local_ip, &pcb->remote_ip, pcb->ttl,
1608 pcb->tos, IP_PROTO_TCP, netif);
1609 NETIF_RESET_HINTS(netif);
1610
1611#if TCP_CHECKSUM_ON_COPY
1612 if (seg_chksum_was_swapped) {
1613 /* if data is added to this segment later, chksum needs to be swapped,
1614 so restore this now */
1615 seg->chksum = SWAP_BYTES_IN_WORD(seg->chksum);
1616 seg->chksum_swapped = 1;
1617 }
1618#endif
1619
1620 return err;
1621}
1622
1623/**
1624 * Requeue all unacked segments for retransmission
1625 *
1626 * Called by tcp_slowtmr() for slow retransmission.
1627 *
1628 * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
1629 */
1630err_t
1631tcp_rexmit_rto_prepare(struct tcp_pcb *pcb)
1632{
1633 struct tcp_seg *seg;
1634
1635 LWIP_ASSERT("tcp_rexmit_rto_prepare: invalid pcb", pcb != NULL);
1636
1637 if (pcb->unacked == NULL) {
1638 return ERR_VAL;
1639 }
1640
1641 /* Move all unacked segments to the head of the unsent queue.
1642 However, give up if any of the unsent pbufs are still referenced by the
1643 netif driver due to deferred transmission. No point loading the link further
1644 if it is struggling to flush its buffered writes. */
1645 for (seg = pcb->unacked; seg->next != NULL; seg = seg->next) {
1646 if (tcp_output_segment_busy(seg)) {
1647 LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n"));
1648 return ERR_VAL;
1649 }
1650 }
1651 if (tcp_output_segment_busy(seg)) {
1652 LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit_rto: segment busy\n"));
1653 return ERR_VAL;
1654 }
1655 /* concatenate unsent queue after unacked queue */
1656 seg->next = pcb->unsent;
1657#if TCP_OVERSIZE_DBGCHECK
1658 /* if last unsent changed, we need to update unsent_oversize */
1659 if (pcb->unsent == NULL) {
1660 pcb->unsent_oversize = seg->oversize_left;
1661 }
1662#endif /* TCP_OVERSIZE_DBGCHECK */
1663 /* unsent queue is the concatenated queue (of unacked, unsent) */
1664 pcb->unsent = pcb->unacked;
1665 /* unacked queue is now empty */
1666 pcb->unacked = NULL;
1667
1668 /* Mark RTO in-progress */
1669 tcp_set_flags(pcb, TF_RTO);
1670 /* Record the next byte following retransmit */
1671 pcb->rto_end = lwip_ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg);
1672 /* Don't take any RTT measurements after retransmitting. */
1673 pcb->rttest = 0;
1674
1675 return ERR_OK;
1676}
1677
1678/**
1679 * Requeue all unacked segments for retransmission
1680 *
1681 * Called by tcp_slowtmr() for slow retransmission.
1682 *
1683 * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
1684 */
1685void
1686tcp_rexmit_rto_commit(struct tcp_pcb *pcb)
1687{
1688 LWIP_ASSERT("tcp_rexmit_rto_commit: invalid pcb", pcb != NULL);
1689
1690 /* increment number of retransmissions */
1691 if (pcb->nrtx < 0xFF) {
1692 ++pcb->nrtx;
1693 }
1694 /* Do the actual retransmission */
1695 tcp_output(pcb);
1696}
1697
1698/**
1699 * Requeue all unacked segments for retransmission
1700 *
1701 * Called by tcp_process() only, tcp_slowtmr() needs to do some things between
1702 * "prepare" and "commit".
1703 *
1704 * @param pcb the tcp_pcb for which to re-enqueue all unacked segments
1705 */
1706void
1707tcp_rexmit_rto(struct tcp_pcb *pcb)
1708{
1709 LWIP_ASSERT("tcp_rexmit_rto: invalid pcb", pcb != NULL);
1710
1711 if (tcp_rexmit_rto_prepare(pcb) == ERR_OK) {
1712 tcp_rexmit_rto_commit(pcb);
1713 }
1714}
1715
1716/**
1717 * Requeue the first unacked segment for retransmission
1718 *
1719 * Called by tcp_receive() for fast retransmit.
1720 *
1721 * @param pcb the tcp_pcb for which to retransmit the first unacked segment
1722 */
1723err_t
1724tcp_rexmit(struct tcp_pcb *pcb)
1725{
1726 struct tcp_seg *seg;
1727 struct tcp_seg **cur_seg;
1728
1729 LWIP_ASSERT("tcp_rexmit: invalid pcb", pcb != NULL);
1730
1731 if (pcb->unacked == NULL) {
1732 return ERR_VAL;
1733 }
1734
1735 seg = pcb->unacked;
1736
1737 /* Give up if the segment is still referenced by the netif driver
1738 due to deferred transmission. */
1739 if (tcp_output_segment_busy(seg)) {
1740 LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_rexmit busy\n"));
1741 return ERR_VAL;
1742 }
1743
1744 /* Move the first unacked segment to the unsent queue */
1745 /* Keep the unsent queue sorted. */
1746 pcb->unacked = seg->next;
1747
1748 cur_seg = &(pcb->unsent);
1749 while (*cur_seg &&
1750 TCP_SEQ_LT(lwip_ntohl((*cur_seg)->tcphdr->seqno), lwip_ntohl(seg->tcphdr->seqno))) {
1751 cur_seg = &((*cur_seg)->next );
1752 }
1753 seg->next = *cur_seg;
1754 *cur_seg = seg;
1755#if TCP_OVERSIZE
1756 if (seg->next == NULL) {
1757 /* the retransmitted segment is last in unsent, so reset unsent_oversize */
1758 pcb->unsent_oversize = 0;
1759 }
1760#endif /* TCP_OVERSIZE */
1761
1762 if (pcb->nrtx < 0xFF) {
1763 ++pcb->nrtx;
1764 }
1765
1766 /* Don't take any rtt measurements after retransmitting. */
1767 pcb->rttest = 0;
1768
1769 /* Do the actual retransmission. */
1770 MIB2_STATS_INC(mib2.tcpretranssegs);
1771 /* No need to call tcp_output: we are always called from tcp_input()
1772 and thus tcp_output directly returns. */
1773 return ERR_OK;
1774}
1775
1776
1777/**
1778 * Handle retransmission after three dupacks received
1779 *
1780 * @param pcb the tcp_pcb for which to retransmit the first unacked segment
1781 */
1782void
1783tcp_rexmit_fast(struct tcp_pcb *pcb)
1784{
1785 LWIP_ASSERT("tcp_rexmit_fast: invalid pcb", pcb != NULL);
1786
1787 if (pcb->unacked != NULL && !(pcb->flags & TF_INFR)) {
1788 /* This is fast retransmit. Retransmit the first unacked segment. */
1789 LWIP_DEBUGF(TCP_FR_DEBUG,
1790 ("tcp_receive: dupacks %"U16_F" (%"U32_F
1791 "), fast retransmit %"U32_F"\n",
1792 (u16_t)pcb->dupacks, pcb->lastack,
1793 lwip_ntohl(pcb->unacked->tcphdr->seqno)));
1794 if (tcp_rexmit(pcb) == ERR_OK) {
1795 /* Set ssthresh to half of the minimum of the current
1796 * cwnd and the advertised window */
1797 pcb->ssthresh = LWIP_MIN(pcb->cwnd, pcb->snd_wnd) / 2;
1798
1799 /* The minimum value for ssthresh should be 2 MSS */
1800 if (pcb->ssthresh < (2U * pcb->mss)) {
1801 LWIP_DEBUGF(TCP_FR_DEBUG,
1802 ("tcp_receive: The minimum value for ssthresh %"TCPWNDSIZE_F
1803 " should be min 2 mss %"U16_F"...\n",
1804 pcb->ssthresh, (u16_t)(2 * pcb->mss)));
1805 pcb->ssthresh = 2 * pcb->mss;
1806 }
1807
1808 pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
1809 tcp_set_flags(pcb, TF_INFR);
1810
1811 /* Reset the retransmission timer to prevent immediate rto retransmissions */
1812 pcb->rtime = 0;
1813 }
1814 }
1815}
1816
1817static struct pbuf *
1818tcp_output_alloc_header_common(u32_t ackno, u16_t optlen, u16_t datalen,
1819 u32_t seqno_be /* already in network byte order */,
1820 u16_t src_port, u16_t dst_port, u8_t flags, u16_t wnd)
1821{
1822 struct tcp_hdr *tcphdr;
1823 struct pbuf *p;
1824
1825 p = pbuf_alloc(PBUF_IP, TCP_HLEN + optlen + datalen, PBUF_RAM);
1826 if (p != NULL) {
1827 LWIP_ASSERT("check that first pbuf can hold struct tcp_hdr",
1828 (p->len >= TCP_HLEN + optlen));
1829 tcphdr = (struct tcp_hdr *)p->payload;
1830 tcphdr->src = lwip_htons(src_port);
1831 tcphdr->dest = lwip_htons(dst_port);
1832 tcphdr->seqno = seqno_be;
1833 tcphdr->ackno = lwip_htonl(ackno);
1834 TCPH_HDRLEN_FLAGS_SET(tcphdr, (5 + optlen / 4), flags);
1835 tcphdr->wnd = lwip_htons(wnd);
1836 tcphdr->chksum = 0;
1837 tcphdr->urgp = 0;
1838 }
1839 return p;
1840}
1841
1842/** Allocate a pbuf and create a tcphdr at p->payload, used for output
1843 * functions other than the default tcp_output -> tcp_output_segment
1844 * (e.g. tcp_send_empty_ack, etc.)
1845 *
1846 * @param pcb tcp pcb for which to send a packet (used to initialize tcp_hdr)
1847 * @param optlen length of header-options
1848 * @param datalen length of tcp data to reserve in pbuf
1849 * @param seqno_be seqno in network byte order (big-endian)
1850 * @return pbuf with p->payload being the tcp_hdr
1851 */
1852static struct pbuf *
1853tcp_output_alloc_header(struct tcp_pcb *pcb, u16_t optlen, u16_t datalen,
1854 u32_t seqno_be /* already in network byte order */)
1855{
1856 struct pbuf *p;
1857
1858 LWIP_ASSERT("tcp_output_alloc_header: invalid pcb", pcb != NULL);
1859
1860 p = tcp_output_alloc_header_common(pcb->rcv_nxt, optlen, datalen,
1861 seqno_be, pcb->local_port, pcb->remote_port, TCP_ACK,
1862 TCPWND_MIN16(RCV_WND_SCALE(pcb, pcb->rcv_ann_wnd)));
1863 if (p != NULL) {
1864 /* If we're sending a packet, update the announced right window edge */
1865 pcb->rcv_ann_right_edge = pcb->rcv_nxt + pcb->rcv_ann_wnd;
1866 }
1867 return p;
1868}
1869
1870/* Fill in options for control segments */
1871static void
1872tcp_output_fill_options(const struct tcp_pcb *pcb, struct pbuf *p, u8_t optflags, u8_t num_sacks)
1873{
1874 struct tcp_hdr *tcphdr;
1875 u32_t *opts;
1876 u16_t sacks_len = 0;
1877
1878 LWIP_ASSERT("tcp_output_fill_options: invalid pbuf", p != NULL);
1879
1880 tcphdr = (struct tcp_hdr *)p->payload;
1881 opts = (u32_t *)(void *)(tcphdr + 1);
1882
1883 /* NB. MSS and window scale options are only sent on SYNs, so ignore them here */
1884
1885#if LWIP_TCP_TIMESTAMPS
1886 if (optflags & TF_SEG_OPTS_TS) {
1887 tcp_build_timestamp_option(pcb, opts);
1888 opts += 3;
1889 }
1890#endif
1891
1892#if LWIP_TCP_SACK_OUT
1893 if (pcb && (num_sacks > 0)) {
1894 tcp_build_sack_option(pcb, opts, num_sacks);
1895 /* 1 word for SACKs header (including 2xNOP), and 2 words for each SACK */
1896 sacks_len = 1 + num_sacks * 2;
1897 opts += sacks_len;
1898 }
1899#else
1900 LWIP_UNUSED_ARG(num_sacks);
1901#endif
1902
1903#ifdef LWIP_HOOK_TCP_OUT_ADD_TCPOPTS
1904 opts = LWIP_HOOK_TCP_OUT_ADD_TCPOPTS(p, tcphdr, pcb, opts);
1905#endif
1906
1907 LWIP_UNUSED_ARG(pcb);
1908 LWIP_UNUSED_ARG(sacks_len);
1909 LWIP_ASSERT("options not filled", (u8_t *)opts == ((u8_t *)(tcphdr + 1)) + sacks_len * 4 + LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb));
1910 LWIP_UNUSED_ARG(optflags); /* for LWIP_NOASSERT */
1911 LWIP_UNUSED_ARG(opts); /* for LWIP_NOASSERT */
1912}
1913
1914/** Output a control segment pbuf to IP.
1915 *
1916 * Called from tcp_rst, tcp_send_empty_ack, tcp_keepalive and tcp_zero_window_probe,
1917 * this function combines selecting a netif for transmission, generating the tcp
1918 * header checksum and calling ip_output_if while handling netif hints and stats.
1919 */
1920static err_t
1921tcp_output_control_segment(const struct tcp_pcb *pcb, struct pbuf *p,
1922 const ip_addr_t *src, const ip_addr_t *dst)
1923{
1924 err_t err;
1925 struct netif *netif;
1926
1927 LWIP_ASSERT("tcp_output_control_segment: invalid pbuf", p != NULL);
1928
1929 netif = tcp_route(pcb, src, dst);
1930 if (netif == NULL) {
1931 err = ERR_RTE;
1932 } else {
1933 u8_t ttl, tos;
1934#if CHECKSUM_GEN_TCP
1935 IF__NETIF_CHECKSUM_ENABLED(netif, NETIF_CHECKSUM_GEN_TCP) {
1936 struct tcp_hdr *tcphdr = (struct tcp_hdr *)p->payload;
1937 tcphdr->chksum = ip_chksum_pseudo(p, IP_PROTO_TCP, p->tot_len,
1938 src, dst);
1939 }
1940#endif
1941 if (pcb != NULL) {
1942 NETIF_SET_HINTS(netif, LWIP_CONST_CAST(struct netif_hint*, &(pcb->netif_hints)));
1943 ttl = pcb->ttl;
1944 tos = pcb->tos;
1945 } else {
1946 /* Send output with hardcoded TTL/HL since we have no access to the pcb */
1947 ttl = TCP_TTL;
1948 tos = 0;
1949 }
1950 TCP_STATS_INC(tcp.xmit);
1951 err = ip_output_if(p, src, dst, ttl, tos, IP_PROTO_TCP, netif);
1952 NETIF_RESET_HINTS(netif);
1953 }
1954 pbuf_free(p);
1955 return err;
1956}
1957
1958/**
1959 * Send a TCP RESET packet (empty segment with RST flag set) either to
1960 * abort a connection or to show that there is no matching local connection
1961 * for a received segment.
1962 *
1963 * Called by tcp_abort() (to abort a local connection), tcp_input() (if no
1964 * matching local pcb was found), tcp_listen_input() (if incoming segment
1965 * has ACK flag set) and tcp_process() (received segment in the wrong state)
1966 *
1967 * Since a RST segment is in most cases not sent for an active connection,
1968 * tcp_rst() has a number of arguments that are taken from a tcp_pcb for
1969 * most other segment output functions.
1970 *
1971 * @param pcb TCP pcb (may be NULL if no pcb is available)
1972 * @param seqno the sequence number to use for the outgoing segment
1973 * @param ackno the acknowledge number to use for the outgoing segment
1974 * @param local_ip the local IP address to send the segment from
1975 * @param remote_ip the remote IP address to send the segment to
1976 * @param local_port the local TCP port to send the segment from
1977 * @param remote_port the remote TCP port to send the segment to
1978 */
1979void
1980tcp_rst(const struct tcp_pcb *pcb, u32_t seqno, u32_t ackno,
1981 const ip_addr_t *local_ip, const ip_addr_t *remote_ip,
1982 u16_t local_port, u16_t remote_port)
1983{
1984 struct pbuf *p;
1985 u16_t wnd;
1986 u8_t optlen;
1987
1988 LWIP_ASSERT("tcp_rst: invalid local_ip", local_ip != NULL);
1989 LWIP_ASSERT("tcp_rst: invalid remote_ip", remote_ip != NULL);
1990
1991 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
1992
1993#if LWIP_WND_SCALE
1994 wnd = PP_HTONS(((TCP_WND >> TCP_RCV_SCALE) & 0xFFFF));
1995#else
1996 wnd = PP_HTONS(TCP_WND);
1997#endif
1998
1999 p = tcp_output_alloc_header_common(ackno, optlen, 0, lwip_htonl(seqno), local_port,
2000 remote_port, TCP_RST | TCP_ACK, wnd);
2001 if (p == NULL) {
2002 LWIP_DEBUGF(TCP_DEBUG, ("tcp_rst: could not allocate memory for pbuf\n"));
2003 return;
2004 }
2005 tcp_output_fill_options(pcb, p, 0, optlen);
2006
2007 MIB2_STATS_INC(mib2.tcpoutrsts);
2008
2009 tcp_output_control_segment(pcb, p, local_ip, remote_ip);
2010 LWIP_DEBUGF(TCP_RST_DEBUG, ("tcp_rst: seqno %"U32_F" ackno %"U32_F".\n", seqno, ackno));
2011}
2012
2013/**
2014 * Send an ACK without data.
2015 *
2016 * @param pcb Protocol control block for the TCP connection to send the ACK
2017 */
2018err_t
2019tcp_send_empty_ack(struct tcp_pcb *pcb)
2020{
2021 err_t err;
2022 struct pbuf *p;
2023 u8_t optlen, optflags = 0;
2024 u8_t num_sacks = 0;
2025
2026 LWIP_ASSERT("tcp_send_empty_ack: invalid pcb", pcb != NULL);
2027
2028#if LWIP_TCP_TIMESTAMPS
2029 if (pcb->flags & TF_TIMESTAMP) {
2030 optflags = TF_SEG_OPTS_TS;
2031 }
2032#endif
2033 optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(optflags, pcb);
2034
2035#if LWIP_TCP_SACK_OUT
2036 /* For now, SACKs are only sent with empty ACKs */
2037 if ((num_sacks = tcp_get_num_sacks(pcb, optlen)) > 0) {
2038 optlen += 4 + num_sacks * 8; /* 4 bytes for header (including 2*NOP), plus 8B for each SACK */
2039 }
2040#endif
2041
2042 p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt));
2043 if (p == NULL) {
2044 /* let tcp_fasttmr retry sending this ACK */
2045 tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
2046 LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_output: (ACK) could not allocate pbuf\n"));
2047 return ERR_BUF;
2048 }
2049 tcp_output_fill_options(pcb, p, optflags, num_sacks);
2050
2051#if LWIP_TCP_TIMESTAMPS
2052 pcb->ts_lastacksent = pcb->rcv_nxt;
2053#endif
2054
2055 LWIP_DEBUGF(TCP_OUTPUT_DEBUG,
2056 ("tcp_output: sending ACK for %"U32_F"\n", pcb->rcv_nxt));
2057 err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
2058 if (err != ERR_OK) {
2059 /* let tcp_fasttmr retry sending this ACK */
2060 tcp_set_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
2061 } else {
2062 /* remove ACK flags from the PCB, as we sent an empty ACK now */
2063 tcp_clear_flags(pcb, TF_ACK_DELAY | TF_ACK_NOW);
2064 }
2065
2066 return err;
2067}
2068
2069/**
2070 * Send keepalive packets to keep a connection active although
2071 * no data is sent over it.
2072 *
2073 * Called by tcp_slowtmr()
2074 *
2075 * @param pcb the tcp_pcb for which to send a keepalive packet
2076 */
2077err_t
2078tcp_keepalive(struct tcp_pcb *pcb)
2079{
2080 err_t err;
2081 struct pbuf *p;
2082 u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
2083
2084 LWIP_ASSERT("tcp_keepalive: invalid pcb", pcb != NULL);
2085
2086 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: sending KEEPALIVE probe to "));
2087 ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
2088 LWIP_DEBUGF(TCP_DEBUG, ("\n"));
2089
2090 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: tcp_ticks %"U32_F" pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
2091 tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
2092
2093 p = tcp_output_alloc_header(pcb, optlen, 0, lwip_htonl(pcb->snd_nxt - 1));
2094 if (p == NULL) {
2095 LWIP_DEBUGF(TCP_DEBUG,
2096 ("tcp_keepalive: could not allocate memory for pbuf\n"));
2097 return ERR_MEM;
2098 }
2099 tcp_output_fill_options(pcb, p, 0, optlen);
2100 err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
2101
2102 LWIP_DEBUGF(TCP_DEBUG, ("tcp_keepalive: seqno %"U32_F" ackno %"U32_F" err %d.\n",
2103 pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
2104 return err;
2105}
2106
2107/**
2108 * Send persist timer zero-window probes to keep a connection active
2109 * when a window update is lost.
2110 *
2111 * Called by tcp_slowtmr()
2112 *
2113 * @param pcb the tcp_pcb for which to send a zero-window probe packet
2114 */
2115err_t
2116tcp_zero_window_probe(struct tcp_pcb *pcb)
2117{
2118 err_t err;
2119 struct pbuf *p;
2120 struct tcp_hdr *tcphdr;
2121 struct tcp_seg *seg;
2122 u16_t len;
2123 u8_t is_fin;
2124 u32_t snd_nxt;
2125 u8_t optlen = LWIP_TCP_OPT_LENGTH_SEGMENT(0, pcb);
2126
2127 LWIP_ASSERT("tcp_zero_window_probe: invalid pcb", pcb != NULL);
2128
2129 LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: sending ZERO WINDOW probe to "));
2130 ip_addr_debug_print_val(TCP_DEBUG, pcb->remote_ip);
2131 LWIP_DEBUGF(TCP_DEBUG, ("\n"));
2132
2133 LWIP_DEBUGF(TCP_DEBUG,
2134 ("tcp_zero_window_probe: tcp_ticks %"U32_F
2135 " pcb->tmr %"U32_F" pcb->keep_cnt_sent %"U16_F"\n",
2136 tcp_ticks, pcb->tmr, (u16_t)pcb->keep_cnt_sent));
2137
2138 /* Only consider unsent, persist timer should be off when there is data in-flight */
2139 seg = pcb->unsent;
2140 if (seg == NULL) {
2141 /* Not expected, persist timer should be off when the send buffer is empty */
2142 return ERR_OK;
2143 }
2144
2145 /* increment probe count. NOTE: we record probe even if it fails
2146 to actually transmit due to an error. This ensures memory exhaustion/
2147 routing problem doesn't leave a zero-window pcb as an indefinite zombie.
2148 RTO mechanism has similar behavior, see pcb->nrtx */
2149 if (pcb->persist_probe < 0xFF) {
2150 ++pcb->persist_probe;
2151 }
2152
2153 is_fin = ((TCPH_FLAGS(seg->tcphdr) & TCP_FIN) != 0) && (seg->len == 0);
2154 /* we want to send one seqno: either FIN or data (no options) */
2155 len = is_fin ? 0 : 1;
2156
2157 p = tcp_output_alloc_header(pcb, optlen, len, seg->tcphdr->seqno);
2158 if (p == NULL) {
2159 LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: no memory for pbuf\n"));
2160 return ERR_MEM;
2161 }
2162 tcphdr = (struct tcp_hdr *)p->payload;
2163
2164 if (is_fin) {
2165 /* FIN segment, no data */
2166 TCPH_FLAGS_SET(tcphdr, TCP_ACK | TCP_FIN);
2167 } else {
2168 /* Data segment, copy in one byte from the head of the unacked queue */
2169 char *d = ((char *)p->payload + TCP_HLEN);
2170 /* Depending on whether the segment has already been sent (unacked) or not
2171 (unsent), seg->p->payload points to the IP header or TCP header.
2172 Ensure we copy the first TCP data byte: */
2173 pbuf_copy_partial(seg->p, d, 1, seg->p->tot_len - seg->len);
2174 }
2175
2176 /* The byte may be acknowledged without the window being opened. */
2177 snd_nxt = lwip_ntohl(seg->tcphdr->seqno) + 1;
2178 if (TCP_SEQ_LT(pcb->snd_nxt, snd_nxt)) {
2179 pcb->snd_nxt = snd_nxt;
2180 }
2181 tcp_output_fill_options(pcb, p, 0, optlen);
2182
2183 err = tcp_output_control_segment(pcb, p, &pcb->local_ip, &pcb->remote_ip);
2184
2185 LWIP_DEBUGF(TCP_DEBUG, ("tcp_zero_window_probe: seqno %"U32_F
2186 " ackno %"U32_F" err %d.\n",
2187 pcb->snd_nxt - 1, pcb->rcv_nxt, (int)err));
2188 return err;
2189}
2190#endif /* LWIP_TCP */
Note: See TracBrowser for help on using the repository browser.