source: azure_iot_hub_f767zi/trunk/asp_baseplatform/lwip/lwip-2.1.2/src/api/sockets.c@ 457

Last change on this file since 457 was 457, checked in by coas-nagasima, 4 years ago

ファイルを追加

  • Property svn:eol-style set to native
  • Property svn:mime-type set to text/x-csrc;charset=UTF-8
File size: 134.7 KB
Line 
1/**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6/*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40#include "lwip/opt.h"
41
42#if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44#include "lwip/sockets.h"
45#include "lwip/priv/sockets_priv.h"
46#include "lwip/api.h"
47#include "lwip/igmp.h"
48#include "lwip/inet.h"
49#include "lwip/tcp.h"
50#include "lwip/raw.h"
51#include "lwip/udp.h"
52#include "lwip/memp.h"
53#include "lwip/pbuf.h"
54#include "lwip/netif.h"
55#include "lwip/priv/tcpip_priv.h"
56#include "lwip/mld6.h"
57#if LWIP_CHECKSUM_ON_COPY
58#include "lwip/inet_chksum.h"
59#endif
60
61#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
62#include <stdarg.h>
63#endif
64
65#include <string.h>
66
67#ifdef LWIP_HOOK_FILENAME
68#include LWIP_HOOK_FILENAME
69#endif
70
71/* If the netconn API is not required publicly, then we include the necessary
72 files here to get the implementation */
73#if !LWIP_NETCONN
74#undef LWIP_NETCONN
75#define LWIP_NETCONN 1
76#include "api_msg.c"
77#include "api_lib.c"
78#include "netbuf.c"
79#undef LWIP_NETCONN
80#define LWIP_NETCONN 0
81#endif
82
83#define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
84#define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
85#define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
86#define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
87
88#if LWIP_IPV4
89#define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
90 (sin)->sin_len = sizeof(struct sockaddr_in); \
91 (sin)->sin_family = AF_INET; \
92 (sin)->sin_port = lwip_htons((port)); \
93 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
94 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
95#define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
96 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
97 (port) = lwip_ntohs((sin)->sin_port); }while(0)
98#endif /* LWIP_IPV4 */
99
100#if LWIP_IPV6
101#define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
102 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
103 (sin6)->sin6_family = AF_INET6; \
104 (sin6)->sin6_port = lwip_htons((port)); \
105 (sin6)->sin6_flowinfo = 0; \
106 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
107 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
108#define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
109 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
110 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
111 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
112 } \
113 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
114#endif /* LWIP_IPV6 */
115
116#if LWIP_IPV4 && LWIP_IPV6
117static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
118
119#define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
120 ((namelen) == sizeof(struct sockaddr_in6)))
121#define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
122 ((name)->sa_family == AF_INET6))
123#define SOCK_ADDR_TYPE_MATCH(name, sock) \
124 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
125 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
126#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
127 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
128 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
129 } else { \
130 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
131 } } while(0)
132#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
133#define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
134 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
135#elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
136#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
137#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
138#define SOCK_ADDR_TYPE_MATCH(name, sock) 1
139#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
140 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
141#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
142 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
143#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
144#else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
145#define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
146#define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
147#define SOCK_ADDR_TYPE_MATCH(name, sock) 1
148#define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
149 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
150#define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
151 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
152#define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
153#endif /* LWIP_IPV6 */
154
155#define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
156 IS_SOCK_ADDR_TYPE_VALID(name))
157#define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
158 SOCK_ADDR_TYPE_MATCH(name, sock))
159#define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
160
161
162#define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
163#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
164 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
165 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
166#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
167 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
168 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
169#define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
170 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
171 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
172
173
174#define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
175#define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
176#define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
177#if LWIP_MPU_COMPATIBLE
178#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
179 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
180 if (name == NULL) { \
181 sock_set_errno(sock, ENOMEM); \
182 done_socket(sock); \
183 return -1; \
184 } }while(0)
185#else /* LWIP_MPU_COMPATIBLE */
186#define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
187#endif /* LWIP_MPU_COMPATIBLE */
188
189#if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
190#define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
191#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
192#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
193#else
194#define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
195#define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
196 u32_t loc = (val); \
197 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
198 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
199#define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
200#endif
201
202
203/** A struct sockaddr replacement that has the same alignment as sockaddr_in/
204 * sockaddr_in6 if instantiated.
205 */
206union sockaddr_aligned {
207 struct sockaddr sa;
208#if LWIP_IPV6
209 struct sockaddr_in6 sin6;
210#endif /* LWIP_IPV6 */
211#if LWIP_IPV4
212 struct sockaddr_in sin;
213#endif /* LWIP_IPV4 */
214};
215
216/* Define the number of IPv4 multicast memberships, default is one per socket */
217#ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
218#define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
219#endif
220
221#if LWIP_IGMP
222/* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
223 a socket is closed */
224struct lwip_socket_multicast_pair {
225 /** the socket */
226 struct lwip_sock *sock;
227 /** the interface address */
228 ip4_addr_t if_addr;
229 /** the group address */
230 ip4_addr_t multi_addr;
231};
232
233static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
234
235static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
236static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
237static void lwip_socket_drop_registered_memberships(int s);
238#endif /* LWIP_IGMP */
239
240#if LWIP_IPV6_MLD
241/* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
242 a socket is closed */
243struct lwip_socket_multicast_mld6_pair {
244 /** the socket */
245 struct lwip_sock *sock;
246 /** the interface index */
247 u8_t if_idx;
248 /** the group address */
249 ip6_addr_t multi_addr;
250};
251
252static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
253
254static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
255static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
256static void lwip_socket_drop_registered_mld6_memberships(int s);
257#endif /* LWIP_IPV6_MLD */
258
259/** The global array of available sockets */
260static struct lwip_sock sockets[NUM_SOCKETS];
261
262#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
263#if LWIP_TCPIP_CORE_LOCKING
264/* protect the select_cb_list using core lock */
265#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
266#define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
267#define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
268#else /* LWIP_TCPIP_CORE_LOCKING */
269/* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
270#define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
271#define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
272#define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
273/** This counter is increased from lwip_select when the list is changed
274 and checked in select_check_waiters to see if it has changed. */
275static volatile int select_cb_ctr;
276#endif /* LWIP_TCPIP_CORE_LOCKING */
277/** The global list of tasks waiting for select */
278static struct lwip_select_cb *select_cb_list;
279#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
280
281#define sock_set_errno(sk, e) do { \
282 const int sockerr = (e); \
283 set_errno(sockerr); \
284} while (0)
285
286/* Forward declaration of some functions */
287#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
288static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
289#define DEFAULT_SOCKET_EVENTCB event_callback
290static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
291#else
292#define DEFAULT_SOCKET_EVENTCB NULL
293#endif
294#if !LWIP_TCPIP_CORE_LOCKING
295static void lwip_getsockopt_callback(void *arg);
296static void lwip_setsockopt_callback(void *arg);
297#endif
298static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
299static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
300static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
301 union lwip_sock_lastdata *lastdata);
302static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
303
304#if LWIP_IPV4 && LWIP_IPV6
305static void
306sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
307{
308 if ((sockaddr->sa_family) == AF_INET6) {
309 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
310 ipaddr->type = IPADDR_TYPE_V6;
311 } else {
312 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
313 ipaddr->type = IPADDR_TYPE_V4;
314 }
315}
316#endif /* LWIP_IPV4 && LWIP_IPV6 */
317
318/** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
319void
320lwip_socket_thread_init(void)
321{
322 netconn_thread_init();
323}
324
325/** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
326void
327lwip_socket_thread_cleanup(void)
328{
329 netconn_thread_cleanup();
330}
331
332#if LWIP_NETCONN_FULLDUPLEX
333/* Thread-safe increment of sock->fd_used, with overflow check */
334static int
335sock_inc_used(struct lwip_sock *sock)
336{
337 int ret;
338 SYS_ARCH_DECL_PROTECT(lev);
339
340 LWIP_ASSERT("sock != NULL", sock != NULL);
341
342 SYS_ARCH_PROTECT(lev);
343 if (sock->fd_free_pending) {
344 /* prevent new usage of this socket if free is pending */
345 ret = 0;
346 } else {
347 ++sock->fd_used;
348 ret = 1;
349 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
350 }
351 SYS_ARCH_UNPROTECT(lev);
352 return ret;
353}
354
355/* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
356static int
357sock_inc_used_locked(struct lwip_sock *sock)
358{
359 LWIP_ASSERT("sock != NULL", sock != NULL);
360
361 if (sock->fd_free_pending) {
362 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
363 return 0;
364 }
365
366 ++sock->fd_used;
367 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
368 return 1;
369}
370
371/* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
372 * released (and possibly reused) when used from more than one thread
373 * (e.g. read-while-write or close-while-write, etc)
374 * This function is called at the end of functions using (try)get_socket*().
375 */
376static void
377done_socket(struct lwip_sock *sock)
378{
379 int freed = 0;
380 int is_tcp = 0;
381 struct netconn *conn = NULL;
382 union lwip_sock_lastdata lastdata;
383 SYS_ARCH_DECL_PROTECT(lev);
384 LWIP_ASSERT("sock != NULL", sock != NULL);
385
386 SYS_ARCH_PROTECT(lev);
387 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
388 if (--sock->fd_used == 0) {
389 if (sock->fd_free_pending) {
390 /* free the socket */
391 sock->fd_used = 1;
392 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
393 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
394 }
395 }
396 SYS_ARCH_UNPROTECT(lev);
397
398 if (freed) {
399 free_socket_free_elements(is_tcp, conn, &lastdata);
400 }
401}
402
403#else /* LWIP_NETCONN_FULLDUPLEX */
404#define sock_inc_used(sock) 1
405#define sock_inc_used_locked(sock) 1
406#define done_socket(sock)
407#endif /* LWIP_NETCONN_FULLDUPLEX */
408
409/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
410static struct lwip_sock *
411tryget_socket_unconn_nouse(int fd)
412{
413 int s = fd - LWIP_SOCKET_OFFSET;
414 if ((s < 0) || (s >= NUM_SOCKETS)) {
415 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
416 return NULL;
417 }
418 return &sockets[s];
419}
420
421struct lwip_sock *
422lwip_socket_dbg_get_socket(int fd)
423{
424 return tryget_socket_unconn_nouse(fd);
425}
426
427/* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
428static struct lwip_sock *
429tryget_socket_unconn(int fd)
430{
431 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
432 if (ret != NULL) {
433 if (!sock_inc_used(ret)) {
434 return NULL;
435 }
436 }
437 return ret;
438}
439
440/* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
441static struct lwip_sock *
442tryget_socket_unconn_locked(int fd)
443{
444 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
445 if (ret != NULL) {
446 if (!sock_inc_used_locked(ret)) {
447 return NULL;
448 }
449 }
450 return ret;
451}
452
453/**
454 * Same as get_socket but doesn't set errno
455 *
456 * @param fd externally used socket index
457 * @return struct lwip_sock for the socket or NULL if not found
458 */
459static struct lwip_sock *
460tryget_socket(int fd)
461{
462 struct lwip_sock *sock = tryget_socket_unconn(fd);
463 if (sock != NULL) {
464 if (sock->conn) {
465 return sock;
466 }
467 done_socket(sock);
468 }
469 return NULL;
470}
471
472/**
473 * Map a externally used socket index to the internal socket representation.
474 *
475 * @param fd externally used socket index
476 * @return struct lwip_sock for the socket or NULL if not found
477 */
478static struct lwip_sock *
479get_socket(int fd)
480{
481 struct lwip_sock *sock = tryget_socket(fd);
482 if (!sock) {
483 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
484 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
485 }
486 set_errno(EBADF);
487 return NULL;
488 }
489 return sock;
490}
491
492/**
493 * Allocate a new socket for a given netconn.
494 *
495 * @param newconn the netconn for which to allocate a socket
496 * @param accepted 1 if socket has been created by accept(),
497 * 0 if socket has been created by socket()
498 * @return the index of the new socket; -1 on error
499 */
500static int
501alloc_socket(struct netconn *newconn, int accepted)
502{
503 int i;
504 SYS_ARCH_DECL_PROTECT(lev);
505 LWIP_UNUSED_ARG(accepted);
506
507 /* allocate a new socket identifier */
508 for (i = 0; i < NUM_SOCKETS; ++i) {
509 /* Protect socket array */
510 SYS_ARCH_PROTECT(lev);
511 if (!sockets[i].conn) {
512#if LWIP_NETCONN_FULLDUPLEX
513 if (sockets[i].fd_used) {
514 SYS_ARCH_UNPROTECT(lev);
515 continue;
516 }
517 sockets[i].fd_used = 1;
518 sockets[i].fd_free_pending = 0;
519#endif
520 sockets[i].conn = newconn;
521 /* The socket is not yet known to anyone, so no need to protect
522 after having marked it as used. */
523 SYS_ARCH_UNPROTECT(lev);
524 sockets[i].lastdata.pbuf = NULL;
525#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
526 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
527 sockets[i].rcvevent = 0;
528 /* TCP sendbuf is empty, but the socket is not yet writable until connected
529 * (unless it has been created by accept()). */
530 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
531 sockets[i].errevent = 0;
532#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
533 return i + LWIP_SOCKET_OFFSET;
534 }
535 SYS_ARCH_UNPROTECT(lev);
536 }
537 return -1;
538}
539
540/** Free a socket (under lock)
541 *
542 * @param sock the socket to free
543 * @param is_tcp != 0 for TCP sockets, used to free lastdata
544 * @param conn the socekt's netconn is stored here, must be freed externally
545 * @param lastdata lastdata is stored here, must be freed externally
546 */
547static int
548free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
549 union lwip_sock_lastdata *lastdata)
550{
551#if LWIP_NETCONN_FULLDUPLEX
552 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
553 sock->fd_used--;
554 if (sock->fd_used > 0) {
555 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
556 return 0;
557 }
558#else /* LWIP_NETCONN_FULLDUPLEX */
559 LWIP_UNUSED_ARG(is_tcp);
560#endif /* LWIP_NETCONN_FULLDUPLEX */
561
562 *lastdata = sock->lastdata;
563 sock->lastdata.pbuf = NULL;
564 *conn = sock->conn;
565 sock->conn = NULL;
566 return 1;
567}
568
569/** Free a socket's leftover members.
570 */
571static void
572free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
573{
574 if (lastdata->pbuf != NULL) {
575 if (is_tcp) {
576 pbuf_free(lastdata->pbuf);
577 } else {
578 netbuf_delete(lastdata->netbuf);
579 }
580 }
581 if (conn != NULL) {
582 /* netconn_prepare_delete() has already been called, here we only free the conn */
583 netconn_delete(conn);
584 }
585}
586
587/** Free a socket. The socket's netconn must have been
588 * delete before!
589 *
590 * @param sock the socket to free
591 * @param is_tcp != 0 for TCP sockets, used to free lastdata
592 */
593static void
594free_socket(struct lwip_sock *sock, int is_tcp)
595{
596 int freed;
597 struct netconn *conn;
598 union lwip_sock_lastdata lastdata;
599 SYS_ARCH_DECL_PROTECT(lev);
600
601 /* Protect socket array */
602 SYS_ARCH_PROTECT(lev);
603
604 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
605 SYS_ARCH_UNPROTECT(lev);
606 /* don't use 'sock' after this line, as another task might have allocated it */
607
608 if (freed) {
609 free_socket_free_elements(is_tcp, conn, &lastdata);
610 }
611}
612
613/* Below this, the well-known socket functions are implemented.
614 * Use google.com or opengroup.org to get a good description :-)
615 *
616 * Exceptions are documented!
617 */
618
619int
620lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
621{
622 struct lwip_sock *sock, *nsock;
623 struct netconn *newconn;
624 ip_addr_t naddr;
625 u16_t port = 0;
626 int newsock;
627 err_t err;
628 int recvevent;
629 SYS_ARCH_DECL_PROTECT(lev);
630
631 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
632 sock = get_socket(s);
633 if (!sock) {
634 return -1;
635 }
636
637 /* wait for a new connection */
638 err = netconn_accept(sock->conn, &newconn);
639 if (err != ERR_OK) {
640 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
641 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
642 sock_set_errno(sock, EOPNOTSUPP);
643 } else if (err == ERR_CLSD) {
644 sock_set_errno(sock, EINVAL);
645 } else {
646 sock_set_errno(sock, err_to_errno(err));
647 }
648 done_socket(sock);
649 return -1;
650 }
651 LWIP_ASSERT("newconn != NULL", newconn != NULL);
652
653 newsock = alloc_socket(newconn, 1);
654 if (newsock == -1) {
655 netconn_delete(newconn);
656 sock_set_errno(sock, ENFILE);
657 done_socket(sock);
658 return -1;
659 }
660 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
661 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
662
663 /* See event_callback: If data comes in right away after an accept, even
664 * though the server task might not have created a new socket yet.
665 * In that case, newconn->socket is counted down (newconn->socket--),
666 * so nsock->rcvevent is >= 1 here!
667 */
668 SYS_ARCH_PROTECT(lev);
669 recvevent = (s16_t)(-1 - newconn->socket);
670 newconn->socket = newsock;
671 SYS_ARCH_UNPROTECT(lev);
672
673 if (newconn->callback) {
674 LOCK_TCPIP_CORE();
675 while (recvevent > 0) {
676 recvevent--;
677 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
678 }
679 UNLOCK_TCPIP_CORE();
680 }
681
682 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
683 * not be NULL if addr is valid.
684 */
685 if ((addr != NULL) && (addrlen != NULL)) {
686 union sockaddr_aligned tempaddr;
687 /* get the IP address and port of the remote host */
688 err = netconn_peer(newconn, &naddr, &port);
689 if (err != ERR_OK) {
690 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
691 netconn_delete(newconn);
692 free_socket(nsock, 1);
693 sock_set_errno(sock, err_to_errno(err));
694 done_socket(sock);
695 return -1;
696 }
697
698 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
699 if (*addrlen > tempaddr.sa.sa_len) {
700 *addrlen = tempaddr.sa.sa_len;
701 }
702 MEMCPY(addr, &tempaddr, *addrlen);
703
704 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
705 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
706 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
707 } else {
708 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
709 }
710
711 sock_set_errno(sock, 0);
712 done_socket(sock);
713 done_socket(nsock);
714 return newsock;
715}
716
717int
718lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
719{
720 struct lwip_sock *sock;
721 ip_addr_t local_addr;
722 u16_t local_port;
723 err_t err;
724
725 sock = get_socket(s);
726 if (!sock) {
727 return -1;
728 }
729
730 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
731 /* sockaddr does not match socket type (IPv4/IPv6) */
732 sock_set_errno(sock, err_to_errno(ERR_VAL));
733 done_socket(sock);
734 return -1;
735 }
736
737 /* check size, family and alignment of 'name' */
738 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
739 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
740 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
741 LWIP_UNUSED_ARG(namelen);
742
743 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
744 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
745 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
746 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
747
748#if LWIP_IPV4 && LWIP_IPV6
749 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
750 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
751 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
752 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
753 }
754#endif /* LWIP_IPV4 && LWIP_IPV6 */
755
756 err = netconn_bind(sock->conn, &local_addr, local_port);
757
758 if (err != ERR_OK) {
759 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
760 sock_set_errno(sock, err_to_errno(err));
761 done_socket(sock);
762 return -1;
763 }
764
765 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
766 sock_set_errno(sock, 0);
767 done_socket(sock);
768 return 0;
769}
770
771int
772lwip_close(int s)
773{
774 struct lwip_sock *sock;
775 int is_tcp = 0;
776 err_t err;
777
778 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
779
780 sock = get_socket(s);
781 if (!sock) {
782 return -1;
783 }
784
785 if (sock->conn != NULL) {
786 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
787 } else {
788 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
789 }
790
791#if LWIP_IGMP
792 /* drop all possibly joined IGMP memberships */
793 lwip_socket_drop_registered_memberships(s);
794#endif /* LWIP_IGMP */
795#if LWIP_IPV6_MLD
796 /* drop all possibly joined MLD6 memberships */
797 lwip_socket_drop_registered_mld6_memberships(s);
798#endif /* LWIP_IPV6_MLD */
799
800 err = netconn_prepare_delete(sock->conn);
801 if (err != ERR_OK) {
802 sock_set_errno(sock, err_to_errno(err));
803 done_socket(sock);
804 return -1;
805 }
806
807 free_socket(sock, is_tcp);
808 set_errno(0);
809 return 0;
810}
811
812int
813lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
814{
815 struct lwip_sock *sock;
816 err_t err;
817
818 sock = get_socket(s);
819 if (!sock) {
820 return -1;
821 }
822
823 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
824 /* sockaddr does not match socket type (IPv4/IPv6) */
825 sock_set_errno(sock, err_to_errno(ERR_VAL));
826 done_socket(sock);
827 return -1;
828 }
829
830 LWIP_UNUSED_ARG(namelen);
831 if (name->sa_family == AF_UNSPEC) {
832 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
833 err = netconn_disconnect(sock->conn);
834 } else {
835 ip_addr_t remote_addr;
836 u16_t remote_port;
837
838 /* check size, family and alignment of 'name' */
839 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
840 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
841 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
842
843 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
844 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
845 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
846 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
847
848#if LWIP_IPV4 && LWIP_IPV6
849 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
850 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
851 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
852 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
853 }
854#endif /* LWIP_IPV4 && LWIP_IPV6 */
855
856 err = netconn_connect(sock->conn, &remote_addr, remote_port);
857 }
858
859 if (err != ERR_OK) {
860 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
861 sock_set_errno(sock, err_to_errno(err));
862 done_socket(sock);
863 return -1;
864 }
865
866 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
867 sock_set_errno(sock, 0);
868 done_socket(sock);
869 return 0;
870}
871
872/**
873 * Set a socket into listen mode.
874 * The socket may not have been used for another connection previously.
875 *
876 * @param s the socket to set to listening mode
877 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
878 * @return 0 on success, non-zero on failure
879 */
880int
881lwip_listen(int s, int backlog)
882{
883 struct lwip_sock *sock;
884 err_t err;
885
886 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
887
888 sock = get_socket(s);
889 if (!sock) {
890 return -1;
891 }
892
893 /* limit the "backlog" parameter to fit in an u8_t */
894 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
895
896 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
897
898 if (err != ERR_OK) {
899 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
900 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
901 sock_set_errno(sock, EOPNOTSUPP);
902 } else {
903 sock_set_errno(sock, err_to_errno(err));
904 }
905 done_socket(sock);
906 return -1;
907 }
908
909 sock_set_errno(sock, 0);
910 done_socket(sock);
911 return 0;
912}
913
914#if LWIP_TCP
915/* Helper function to loop over receiving pbufs from netconn
916 * until "len" bytes are received or we're otherwise done.
917 * Keeps sock->lastdata for peeking or partly copying.
918 */
919static ssize_t
920lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
921{
922 u8_t apiflags = NETCONN_NOAUTORCVD;
923 ssize_t recvd = 0;
924 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
925
926 LWIP_ASSERT("no socket given", sock != NULL);
927 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
928
929 if (flags & MSG_DONTWAIT) {
930 apiflags |= NETCONN_DONTBLOCK;
931 }
932
933 do {
934 struct pbuf *p;
935 err_t err;
936 u16_t copylen;
937
938 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
939 /* Check if there is data left from the last recv operation. */
940 if (sock->lastdata.pbuf) {
941 p = sock->lastdata.pbuf;
942 } else {
943 /* No data was left from the previous operation, so we try to get
944 some from the network. */
945 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
946 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
947 err, (void *)p));
948
949 if (err != ERR_OK) {
950 if (recvd > 0) {
951 /* already received data, return that (this trusts in getting the same error from
952 netconn layer again next time netconn_recv is called) */
953 goto lwip_recv_tcp_done;
954 }
955 /* We should really do some error checking here. */
956 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
957 lwip_strerr(err)));
958 sock_set_errno(sock, err_to_errno(err));
959 if (err == ERR_CLSD) {
960 return 0;
961 } else {
962 return -1;
963 }
964 }
965 LWIP_ASSERT("p != NULL", p != NULL);
966 sock->lastdata.pbuf = p;
967 }
968
969 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
970 p->tot_len, (int)recv_left, (int)recvd));
971
972 if (recv_left > p->tot_len) {
973 copylen = p->tot_len;
974 } else {
975 copylen = (u16_t)recv_left;
976 }
977 if (recvd + copylen < recvd) {
978 /* overflow */
979 copylen = (u16_t)(SSIZE_MAX - recvd);
980 }
981
982 /* copy the contents of the received buffer into
983 the supplied memory pointer mem */
984 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
985
986 recvd += copylen;
987
988 /* TCP combines multiple pbufs for one recv */
989 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
990 recv_left -= copylen;
991
992 /* Unless we peek the incoming message... */
993 if ((flags & MSG_PEEK) == 0) {
994 /* ... check if there is data left in the pbuf */
995 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
996 if (p->tot_len - copylen > 0) {
997 /* If so, it should be saved in the sock structure for the next recv call.
998 We store the pbuf but hide/free the consumed data: */
999 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1000 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1001 } else {
1002 sock->lastdata.pbuf = NULL;
1003 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1004 pbuf_free(p);
1005 }
1006 }
1007 /* once we have some data to return, only add more if we don't need to wait */
1008 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1009 /* @todo: do we need to support peeking more than one pbuf? */
1010 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1011lwip_recv_tcp_done:
1012 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1013 /* ensure window update after copying all data */
1014 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1015 }
1016 sock_set_errno(sock, 0);
1017 return recvd;
1018}
1019#endif
1020
1021/* Convert a netbuf's address data to struct sockaddr */
1022static int
1023lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1024 struct sockaddr *from, socklen_t *fromlen)
1025{
1026 int truncated = 0;
1027 union sockaddr_aligned saddr;
1028
1029 LWIP_UNUSED_ARG(conn);
1030
1031 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1032 LWIP_ASSERT("from != NULL", from != NULL);
1033 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1034
1035#if LWIP_IPV4 && LWIP_IPV6
1036 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1037 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1038 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1039 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1040 }
1041#endif /* LWIP_IPV4 && LWIP_IPV6 */
1042
1043 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1044 if (*fromlen < saddr.sa.sa_len) {
1045 truncated = 1;
1046 } else if (*fromlen > saddr.sa.sa_len) {
1047 *fromlen = saddr.sa.sa_len;
1048 }
1049 MEMCPY(from, &saddr, *fromlen);
1050 return truncated;
1051}
1052
1053#if LWIP_TCP
1054/* Helper function to get a tcp socket's remote address info */
1055static int
1056lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1057{
1058 if (sock == NULL) {
1059 return 0;
1060 }
1061 LWIP_UNUSED_ARG(dbg_fn);
1062 LWIP_UNUSED_ARG(dbg_s);
1063 LWIP_UNUSED_ARG(dbg_ret);
1064
1065#if !SOCKETS_DEBUG
1066 if (from && fromlen)
1067#endif /* !SOCKETS_DEBUG */
1068 {
1069 /* get remote addr/port from tcp_pcb */
1070 u16_t port;
1071 ip_addr_t tmpaddr;
1072 netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1073 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1074 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1075 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1076 if (from && fromlen) {
1077 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1078 }
1079 }
1080 return 0;
1081}
1082#endif
1083
1084/* Helper function to receive a netbuf from a udp or raw netconn.
1085 * Keeps sock->lastdata for peeking.
1086 */
1087static err_t
1088lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1089{
1090 struct netbuf *buf;
1091 u8_t apiflags;
1092 err_t err;
1093 u16_t buflen, copylen, copied;
1094 int i;
1095
1096 LWIP_UNUSED_ARG(dbg_s);
1097 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1098
1099 if (flags & MSG_DONTWAIT) {
1100 apiflags = NETCONN_DONTBLOCK;
1101 } else {
1102 apiflags = 0;
1103 }
1104
1105 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1106 /* Check if there is data left from the last recv operation. */
1107 buf = sock->lastdata.netbuf;
1108 if (buf == NULL) {
1109 /* No data was left from the previous operation, so we try to get
1110 some from the network. */
1111 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1112 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1113 err, (void *)buf));
1114
1115 if (err != ERR_OK) {
1116 return err;
1117 }
1118 LWIP_ASSERT("buf != NULL", buf != NULL);
1119 sock->lastdata.netbuf = buf;
1120 }
1121 buflen = buf->p->tot_len;
1122 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1123
1124 copied = 0;
1125 /* copy the pbuf payload into the iovs */
1126 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1127 u16_t len_left = (u16_t)(buflen - copied);
1128 if (msg->msg_iov[i].iov_len > len_left) {
1129 copylen = len_left;
1130 } else {
1131 copylen = (u16_t)msg->msg_iov[i].iov_len;
1132 }
1133
1134 /* copy the contents of the received buffer into
1135 the supplied memory buffer */
1136 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1137 copied = (u16_t)(copied + copylen);
1138 }
1139
1140 /* Check to see from where the data was.*/
1141#if !SOCKETS_DEBUG
1142 if (msg->msg_name && msg->msg_namelen)
1143#endif /* !SOCKETS_DEBUG */
1144 {
1145 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1146 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1147 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1148 if (msg->msg_name && msg->msg_namelen) {
1149 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1150 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1151 }
1152 }
1153
1154 /* Initialize flag output */
1155 msg->msg_flags = 0;
1156
1157 if (msg->msg_control) {
1158 u8_t wrote_msg = 0;
1159#if LWIP_NETBUF_RECVINFO
1160 /* Check if packet info was recorded */
1161 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1162 if (IP_IS_V4(&buf->toaddr)) {
1163#if LWIP_IPV4
1164 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1165 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1166 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1167 chdr->cmsg_level = IPPROTO_IP;
1168 chdr->cmsg_type = IP_PKTINFO;
1169 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1170 pkti->ipi_ifindex = buf->p->if_idx;
1171 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1172 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1173 wrote_msg = 1;
1174 } else {
1175 msg->msg_flags |= MSG_CTRUNC;
1176 }
1177#endif /* LWIP_IPV4 */
1178 }
1179 }
1180#endif /* LWIP_NETBUF_RECVINFO */
1181
1182 if (!wrote_msg) {
1183 msg->msg_controllen = 0;
1184 }
1185 }
1186
1187 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1188 if ((flags & MSG_PEEK) == 0) {
1189 sock->lastdata.netbuf = NULL;
1190 netbuf_delete(buf);
1191 }
1192 if (datagram_len) {
1193 *datagram_len = buflen;
1194 }
1195 return ERR_OK;
1196}
1197
1198ssize_t
1199lwip_recvfrom(int s, void *mem, size_t len, int flags,
1200 struct sockaddr *from, socklen_t *fromlen)
1201{
1202 struct lwip_sock *sock;
1203 ssize_t ret;
1204
1205 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1206 sock = get_socket(s);
1207 if (!sock) {
1208 return -1;
1209 }
1210#if LWIP_TCP
1211 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1212 ret = lwip_recv_tcp(sock, mem, len, flags);
1213 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1214 done_socket(sock);
1215 return ret;
1216 } else
1217#endif
1218 {
1219 u16_t datagram_len = 0;
1220 struct iovec vec;
1221 struct msghdr msg;
1222 err_t err;
1223 vec.iov_base = mem;
1224 vec.iov_len = len;
1225 msg.msg_control = NULL;
1226 msg.msg_controllen = 0;
1227 msg.msg_flags = 0;
1228 msg.msg_iov = &vec;
1229 msg.msg_iovlen = 1;
1230 msg.msg_name = from;
1231 msg.msg_namelen = (fromlen ? *fromlen : 0);
1232 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1233 if (err != ERR_OK) {
1234 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1235 s, lwip_strerr(err)));
1236 sock_set_errno(sock, err_to_errno(err));
1237 done_socket(sock);
1238 return -1;
1239 }
1240 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1241 if (fromlen) {
1242 *fromlen = msg.msg_namelen;
1243 }
1244 }
1245
1246 sock_set_errno(sock, 0);
1247 done_socket(sock);
1248 return ret;
1249}
1250
1251ssize_t
1252lwip_read(int s, void *mem, size_t len)
1253{
1254 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1255}
1256
1257ssize_t
1258lwip_readv(int s, const struct iovec *iov, int iovcnt)
1259{
1260 struct msghdr msg;
1261
1262 msg.msg_name = NULL;
1263 msg.msg_namelen = 0;
1264 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1265 Blame the opengroup standard for this inconsistency. */
1266 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1267 msg.msg_iovlen = iovcnt;
1268 msg.msg_control = NULL;
1269 msg.msg_controllen = 0;
1270 msg.msg_flags = 0;
1271 return lwip_recvmsg(s, &msg, 0);
1272}
1273
1274ssize_t
1275lwip_recv(int s, void *mem, size_t len, int flags)
1276{
1277 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1278}
1279
1280ssize_t
1281lwip_recvmsg(int s, struct msghdr *message, int flags)
1282{
1283 struct lwip_sock *sock;
1284 int i;
1285 ssize_t buflen;
1286
1287 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1288 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1289 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1290 set_errno(EOPNOTSUPP); return -1;);
1291
1292 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1293 set_errno(EMSGSIZE);
1294 return -1;
1295 }
1296
1297 sock = get_socket(s);
1298 if (!sock) {
1299 return -1;
1300 }
1301
1302 /* check for valid vectors */
1303 buflen = 0;
1304 for (i = 0; i < message->msg_iovlen; i++) {
1305 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1306 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1307 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1308 sock_set_errno(sock, err_to_errno(ERR_VAL));
1309 done_socket(sock);
1310 return -1;
1311 }
1312 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1313 }
1314
1315 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1316#if LWIP_TCP
1317 int recv_flags = flags;
1318 message->msg_flags = 0;
1319 /* recv the data */
1320 buflen = 0;
1321 for (i = 0; i < message->msg_iovlen; i++) {
1322 /* try to receive into this vector's buffer */
1323 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1324 if (recvd_local > 0) {
1325 /* sum up received bytes */
1326 buflen += recvd_local;
1327 }
1328 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1329 (flags & MSG_PEEK)) {
1330 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1331 if (buflen <= 0) {
1332 /* nothing received at all, propagate the error */
1333 buflen = recvd_local;
1334 }
1335 break;
1336 }
1337 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1338 recv_flags |= MSG_DONTWAIT;
1339 }
1340 if (buflen > 0) {
1341 /* reset socket error since we have received something */
1342 sock_set_errno(sock, 0);
1343 }
1344 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1345 done_socket(sock);
1346 return buflen;
1347#else /* LWIP_TCP */
1348 sock_set_errno(sock, err_to_errno(ERR_ARG));
1349 done_socket(sock);
1350 return -1;
1351#endif /* LWIP_TCP */
1352 }
1353 /* else, UDP and RAW NETCONNs */
1354#if LWIP_UDP || LWIP_RAW
1355 {
1356 u16_t datagram_len = 0;
1357 err_t err;
1358 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1359 if (err != ERR_OK) {
1360 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1361 s, lwip_strerr(err)));
1362 sock_set_errno(sock, err_to_errno(err));
1363 done_socket(sock);
1364 return -1;
1365 }
1366 if (datagram_len > buflen) {
1367 message->msg_flags |= MSG_TRUNC;
1368 }
1369
1370 sock_set_errno(sock, 0);
1371 done_socket(sock);
1372 return (int)datagram_len;
1373 }
1374#else /* LWIP_UDP || LWIP_RAW */
1375 sock_set_errno(sock, err_to_errno(ERR_ARG));
1376 done_socket(sock);
1377 return -1;
1378#endif /* LWIP_UDP || LWIP_RAW */
1379}
1380
1381ssize_t
1382lwip_send(int s, const void *data, size_t size, int flags)
1383{
1384 struct lwip_sock *sock;
1385 err_t err;
1386 u8_t write_flags;
1387 size_t written;
1388
1389 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1390 s, data, size, flags));
1391
1392 sock = get_socket(s);
1393 if (!sock) {
1394 return -1;
1395 }
1396
1397 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1398#if (LWIP_UDP || LWIP_RAW)
1399 done_socket(sock);
1400 return lwip_sendto(s, data, size, flags, NULL, 0);
1401#else /* (LWIP_UDP || LWIP_RAW) */
1402 sock_set_errno(sock, err_to_errno(ERR_ARG));
1403 done_socket(sock);
1404 return -1;
1405#endif /* (LWIP_UDP || LWIP_RAW) */
1406 }
1407
1408 write_flags = (u8_t)(NETCONN_COPY |
1409 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1410 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1411 written = 0;
1412 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1413
1414 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1415 sock_set_errno(sock, err_to_errno(err));
1416 done_socket(sock);
1417 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1418 return (err == ERR_OK ? (ssize_t)written : -1);
1419}
1420
1421ssize_t
1422lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1423{
1424 struct lwip_sock *sock;
1425#if LWIP_TCP
1426 u8_t write_flags;
1427 size_t written;
1428#endif
1429 err_t err = ERR_OK;
1430
1431 sock = get_socket(s);
1432 if (!sock) {
1433 return -1;
1434 }
1435
1436 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1437 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1438 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1439 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1440 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1441 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1442 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1443 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1444
1445 LWIP_UNUSED_ARG(msg->msg_control);
1446 LWIP_UNUSED_ARG(msg->msg_controllen);
1447 LWIP_UNUSED_ARG(msg->msg_flags);
1448
1449 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1450#if LWIP_TCP
1451 write_flags = (u8_t)(NETCONN_COPY |
1452 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1453 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1454
1455 written = 0;
1456 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1457 sock_set_errno(sock, err_to_errno(err));
1458 done_socket(sock);
1459 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1460 return (err == ERR_OK ? (ssize_t)written : -1);
1461#else /* LWIP_TCP */
1462 sock_set_errno(sock, err_to_errno(ERR_ARG));
1463 done_socket(sock);
1464 return -1;
1465#endif /* LWIP_TCP */
1466 }
1467 /* else, UDP and RAW NETCONNs */
1468#if LWIP_UDP || LWIP_RAW
1469 {
1470 struct netbuf chain_buf;
1471 int i;
1472 ssize_t size = 0;
1473
1474 LWIP_UNUSED_ARG(flags);
1475 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1476 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1477 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1478
1479 /* initialize chain buffer with destination */
1480 memset(&chain_buf, 0, sizeof(struct netbuf));
1481 if (msg->msg_name) {
1482 u16_t remote_port;
1483 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1484 netbuf_fromport(&chain_buf) = remote_port;
1485 }
1486#if LWIP_NETIF_TX_SINGLE_PBUF
1487 for (i = 0; i < msg->msg_iovlen; i++) {
1488 size += msg->msg_iov[i].iov_len;
1489 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1490 /* overflow */
1491 goto sendmsg_emsgsize;
1492 }
1493 }
1494 if (size > 0xFFFF) {
1495 /* overflow */
1496 goto sendmsg_emsgsize;
1497 }
1498 /* Allocate a new netbuf and copy the data into it. */
1499 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1500 err = ERR_MEM;
1501 } else {
1502 /* flatten the IO vectors */
1503 size_t offset = 0;
1504 for (i = 0; i < msg->msg_iovlen; i++) {
1505 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1506 offset += msg->msg_iov[i].iov_len;
1507 }
1508#if LWIP_CHECKSUM_ON_COPY
1509 {
1510 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1511 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1512 netbuf_set_chksum(&chain_buf, chksum);
1513 }
1514#endif /* LWIP_CHECKSUM_ON_COPY */
1515 err = ERR_OK;
1516 }
1517#else /* LWIP_NETIF_TX_SINGLE_PBUF */
1518 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1519 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1520 for (i = 0; i < msg->msg_iovlen; i++) {
1521 struct pbuf *p;
1522 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1523 /* overflow */
1524 goto sendmsg_emsgsize;
1525 }
1526 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1527 if (p == NULL) {
1528 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1529 break;
1530 }
1531 p->payload = msg->msg_iov[i].iov_base;
1532 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1533 /* netbuf empty, add new pbuf */
1534 if (chain_buf.p == NULL) {
1535 chain_buf.p = chain_buf.ptr = p;
1536 /* add pbuf to existing pbuf chain */
1537 } else {
1538 if (chain_buf.p->tot_len + p->len > 0xffff) {
1539 /* overflow */
1540 pbuf_free(p);
1541 goto sendmsg_emsgsize;
1542 }
1543 pbuf_cat(chain_buf.p, p);
1544 }
1545 }
1546 /* save size of total chain */
1547 if (err == ERR_OK) {
1548 size = netbuf_len(&chain_buf);
1549 }
1550#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1551
1552 if (err == ERR_OK) {
1553#if LWIP_IPV4 && LWIP_IPV6
1554 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1555 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1556 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1557 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1558 }
1559#endif /* LWIP_IPV4 && LWIP_IPV6 */
1560
1561 /* send the data */
1562 err = netconn_send(sock->conn, &chain_buf);
1563 }
1564
1565 /* deallocated the buffer */
1566 netbuf_free(&chain_buf);
1567
1568 sock_set_errno(sock, err_to_errno(err));
1569 done_socket(sock);
1570 return (err == ERR_OK ? size : -1);
1571sendmsg_emsgsize:
1572 sock_set_errno(sock, EMSGSIZE);
1573 netbuf_free(&chain_buf);
1574 done_socket(sock);
1575 return -1;
1576 }
1577#else /* LWIP_UDP || LWIP_RAW */
1578 sock_set_errno(sock, err_to_errno(ERR_ARG));
1579 done_socket(sock);
1580 return -1;
1581#endif /* LWIP_UDP || LWIP_RAW */
1582}
1583
1584ssize_t
1585lwip_sendto(int s, const void *data, size_t size, int flags,
1586 const struct sockaddr *to, socklen_t tolen)
1587{
1588 struct lwip_sock *sock;
1589 err_t err;
1590 u16_t short_size;
1591 u16_t remote_port;
1592 struct netbuf buf;
1593
1594 sock = get_socket(s);
1595 if (!sock) {
1596 return -1;
1597 }
1598
1599 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1600#if LWIP_TCP
1601 done_socket(sock);
1602 return lwip_send(s, data, size, flags);
1603#else /* LWIP_TCP */
1604 LWIP_UNUSED_ARG(flags);
1605 sock_set_errno(sock, err_to_errno(ERR_ARG));
1606 done_socket(sock);
1607 return -1;
1608#endif /* LWIP_TCP */
1609 }
1610
1611 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1612 /* cannot fit into one datagram (at least for us) */
1613 sock_set_errno(sock, EMSGSIZE);
1614 done_socket(sock);
1615 return -1;
1616 }
1617 short_size = (u16_t)size;
1618 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1619 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1620 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1621 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1622 LWIP_UNUSED_ARG(tolen);
1623
1624 /* initialize a buffer */
1625 buf.p = buf.ptr = NULL;
1626#if LWIP_CHECKSUM_ON_COPY
1627 buf.flags = 0;
1628#endif /* LWIP_CHECKSUM_ON_COPY */
1629 if (to) {
1630 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1631 } else {
1632 remote_port = 0;
1633 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1634 }
1635 netbuf_fromport(&buf) = remote_port;
1636
1637
1638 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1639 s, data, short_size, flags));
1640 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1641 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1642
1643 /* make the buffer point to the data that should be sent */
1644#if LWIP_NETIF_TX_SINGLE_PBUF
1645 /* Allocate a new netbuf and copy the data into it. */
1646 if (netbuf_alloc(&buf, short_size) == NULL) {
1647 err = ERR_MEM;
1648 } else {
1649#if LWIP_CHECKSUM_ON_COPY
1650 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1651 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1652 netbuf_set_chksum(&buf, chksum);
1653 } else
1654#endif /* LWIP_CHECKSUM_ON_COPY */
1655 {
1656 MEMCPY(buf.p->payload, data, short_size);
1657 }
1658 err = ERR_OK;
1659 }
1660#else /* LWIP_NETIF_TX_SINGLE_PBUF */
1661 err = netbuf_ref(&buf, data, short_size);
1662#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1663 if (err == ERR_OK) {
1664#if LWIP_IPV4 && LWIP_IPV6
1665 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1666 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1667 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1668 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1669 }
1670#endif /* LWIP_IPV4 && LWIP_IPV6 */
1671
1672 /* send the data */
1673 err = netconn_send(sock->conn, &buf);
1674 }
1675
1676 /* deallocated the buffer */
1677 netbuf_free(&buf);
1678
1679 sock_set_errno(sock, err_to_errno(err));
1680 done_socket(sock);
1681 return (err == ERR_OK ? short_size : -1);
1682}
1683
1684int
1685lwip_socket(int domain, int type, int protocol)
1686{
1687 struct netconn *conn;
1688 int i;
1689
1690 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1691
1692 /* create a netconn */
1693 switch (type) {
1694 case SOCK_RAW:
1695 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1696 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1697 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1698 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1699 break;
1700 case SOCK_DGRAM:
1701 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1702 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1703 DEFAULT_SOCKET_EVENTCB);
1704 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1705 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1706#if LWIP_NETBUF_RECVINFO
1707 if (conn) {
1708 /* netconn layer enables pktinfo by default, sockets default to off */
1709 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1710 }
1711#endif /* LWIP_NETBUF_RECVINFO */
1712 break;
1713 case SOCK_STREAM:
1714 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1715 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1716 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1717 break;
1718 default:
1719 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1720 domain, type, protocol));
1721 set_errno(EINVAL);
1722 return -1;
1723 }
1724
1725 if (!conn) {
1726 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1727 set_errno(ENOBUFS);
1728 return -1;
1729 }
1730
1731 i = alloc_socket(conn, 0);
1732
1733 if (i == -1) {
1734 netconn_delete(conn);
1735 set_errno(ENFILE);
1736 return -1;
1737 }
1738 conn->socket = i;
1739 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1740 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1741 set_errno(0);
1742 return i;
1743}
1744
1745ssize_t
1746lwip_write(int s, const void *data, size_t size)
1747{
1748 return lwip_send(s, data, size, 0);
1749}
1750
1751ssize_t
1752lwip_writev(int s, const struct iovec *iov, int iovcnt)
1753{
1754 struct msghdr msg;
1755
1756 msg.msg_name = NULL;
1757 msg.msg_namelen = 0;
1758 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1759 Blame the opengroup standard for this inconsistency. */
1760 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1761 msg.msg_iovlen = iovcnt;
1762 msg.msg_control = NULL;
1763 msg.msg_controllen = 0;
1764 msg.msg_flags = 0;
1765 return lwip_sendmsg(s, &msg, 0);
1766}
1767
1768#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1769/* Add select_cb to select_cb_list. */
1770static void
1771lwip_link_select_cb(struct lwip_select_cb *select_cb)
1772{
1773 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1774
1775 /* Protect the select_cb_list */
1776 LWIP_SOCKET_SELECT_PROTECT(lev);
1777
1778 /* Put this select_cb on top of list */
1779 select_cb->next = select_cb_list;
1780 if (select_cb_list != NULL) {
1781 select_cb_list->prev = select_cb;
1782 }
1783 select_cb_list = select_cb;
1784#if !LWIP_TCPIP_CORE_LOCKING
1785 /* Increasing this counter tells select_check_waiters that the list has changed. */
1786 select_cb_ctr++;
1787#endif
1788
1789 /* Now we can safely unprotect */
1790 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1791}
1792
1793/* Remove select_cb from select_cb_list. */
1794static void
1795lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1796{
1797 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1798
1799 /* Take us off the list */
1800 LWIP_SOCKET_SELECT_PROTECT(lev);
1801 if (select_cb->next != NULL) {
1802 select_cb->next->prev = select_cb->prev;
1803 }
1804 if (select_cb_list == select_cb) {
1805 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1806 select_cb_list = select_cb->next;
1807 } else {
1808 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1809 select_cb->prev->next = select_cb->next;
1810 }
1811#if !LWIP_TCPIP_CORE_LOCKING
1812 /* Increasing this counter tells select_check_waiters that the list has changed. */
1813 select_cb_ctr++;
1814#endif
1815 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1816}
1817#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1818
1819#if LWIP_SOCKET_SELECT
1820/**
1821 * Go through the readset and writeset lists and see which socket of the sockets
1822 * set in the sets has events. On return, readset, writeset and exceptset have
1823 * the sockets enabled that had events.
1824 *
1825 * @param maxfdp1 the highest socket index in the sets
1826 * @param readset_in set of sockets to check for read events
1827 * @param writeset_in set of sockets to check for write events
1828 * @param exceptset_in set of sockets to check for error events
1829 * @param readset_out set of sockets that had read events
1830 * @param writeset_out set of sockets that had write events
1831 * @param exceptset_out set os sockets that had error events
1832 * @return number of sockets that had events (read/write/exception) (>= 0)
1833 */
1834static int
1835lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1836 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1837{
1838 int i, nready = 0;
1839 fd_set lreadset, lwriteset, lexceptset;
1840 struct lwip_sock *sock;
1841 SYS_ARCH_DECL_PROTECT(lev);
1842
1843 FD_ZERO(&lreadset);
1844 FD_ZERO(&lwriteset);
1845 FD_ZERO(&lexceptset);
1846
1847 /* Go through each socket in each list to count number of sockets which
1848 currently match */
1849 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1850 /* if this FD is not in the set, continue */
1851 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1852 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1853 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1854 continue;
1855 }
1856 /* First get the socket's status (protected)... */
1857 SYS_ARCH_PROTECT(lev);
1858 sock = tryget_socket_unconn_locked(i);
1859 if (sock != NULL) {
1860 void *lastdata = sock->lastdata.pbuf;
1861 s16_t rcvevent = sock->rcvevent;
1862 u16_t sendevent = sock->sendevent;
1863 u16_t errevent = sock->errevent;
1864 SYS_ARCH_UNPROTECT(lev);
1865
1866 /* ... then examine it: */
1867 /* See if netconn of this socket is ready for read */
1868 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1869 FD_SET(i, &lreadset);
1870 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1871 nready++;
1872 }
1873 /* See if netconn of this socket is ready for write */
1874 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1875 FD_SET(i, &lwriteset);
1876 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1877 nready++;
1878 }
1879 /* See if netconn of this socket had an error */
1880 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1881 FD_SET(i, &lexceptset);
1882 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1883 nready++;
1884 }
1885 done_socket(sock);
1886 } else {
1887 SYS_ARCH_UNPROTECT(lev);
1888 /* no a valid open socket */
1889 return -1;
1890 }
1891 }
1892 /* copy local sets to the ones provided as arguments */
1893 *readset_out = lreadset;
1894 *writeset_out = lwriteset;
1895 *exceptset_out = lexceptset;
1896
1897 LWIP_ASSERT("nready >= 0", nready >= 0);
1898 return nready;
1899}
1900
1901#if LWIP_NETCONN_FULLDUPLEX
1902/* Mark all of the set sockets in one of the three fdsets passed to select as used.
1903 * All sockets are marked (and later unmarked), whether they are open or not.
1904 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1905 */
1906static void
1907lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1908{
1909 SYS_ARCH_DECL_PROTECT(lev);
1910 if (fdset) {
1911 int i;
1912 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1913 /* if this FD is in the set, lock it (unless already done) */
1914 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1915 struct lwip_sock *sock;
1916 SYS_ARCH_PROTECT(lev);
1917 sock = tryget_socket_unconn_locked(i);
1918 if (sock != NULL) {
1919 /* leave the socket used until released by lwip_select_dec_sockets_used */
1920 FD_SET(i, used_sockets);
1921 }
1922 SYS_ARCH_UNPROTECT(lev);
1923 }
1924 }
1925 }
1926}
1927
1928/* Mark all sockets passed to select as used to prevent them from being freed
1929 * from other threads while select is running.
1930 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1931 * to unmark them correctly.
1932 */
1933static void
1934lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1935{
1936 FD_ZERO(used_sockets);
1937 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
1938 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
1939 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
1940}
1941
1942/* Let go all sockets that were marked as used when starting select */
1943static void
1944lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
1945{
1946 int i;
1947 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1948 /* if this FD is not in the set, continue */
1949 if (FD_ISSET(i, used_sockets)) {
1950 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
1951 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
1952 if (sock != NULL) {
1953 done_socket(sock);
1954 }
1955 }
1956 }
1957}
1958#else /* LWIP_NETCONN_FULLDUPLEX */
1959#define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
1960#define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
1961#endif /* LWIP_NETCONN_FULLDUPLEX */
1962
1963int
1964lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
1965 struct timeval *timeout)
1966{
1967 u32_t waitres = 0;
1968 int nready;
1969 fd_set lreadset, lwriteset, lexceptset;
1970 u32_t msectimeout;
1971 int i;
1972 int maxfdp2;
1973#if LWIP_NETCONN_SEM_PER_THREAD
1974 int waited = 0;
1975#endif
1976#if LWIP_NETCONN_FULLDUPLEX
1977 fd_set used_sockets;
1978#endif
1979 SYS_ARCH_DECL_PROTECT(lev);
1980
1981 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
1982 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
1983 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
1984 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
1985
1986 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
1987 set_errno(EINVAL);
1988 return -1;
1989 }
1990
1991 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
1992
1993 /* Go through each socket in each list to count number of sockets which
1994 currently match */
1995 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
1996
1997 if (nready < 0) {
1998 /* one of the sockets in one of the fd_sets was invalid */
1999 set_errno(EBADF);
2000 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2001 return -1;
2002 } else if (nready > 0) {
2003 /* one or more sockets are set, no need to wait */
2004 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2005 } else {
2006 /* If we don't have any current events, then suspend if we are supposed to */
2007 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2008 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2009 /* This is OK as the local fdsets are empty and nready is zero,
2010 or we would have returned earlier. */
2011 } else {
2012 /* None ready: add our semaphore to list:
2013 We don't actually need any dynamic memory. Our entry on the
2014 list is only valid while we are in this function, so it's ok
2015 to use local variables (unless we're running in MPU compatible
2016 mode). */
2017 API_SELECT_CB_VAR_DECLARE(select_cb);
2018 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2019 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2020
2021 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2022 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2023 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2024#if LWIP_NETCONN_SEM_PER_THREAD
2025 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2026#else /* LWIP_NETCONN_SEM_PER_THREAD */
2027 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2028 /* failed to create semaphore */
2029 set_errno(ENOMEM);
2030 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2031 API_SELECT_CB_VAR_FREE(select_cb);
2032 return -1;
2033 }
2034#endif /* LWIP_NETCONN_SEM_PER_THREAD */
2035
2036 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2037
2038 /* Increase select_waiting for each socket we are interested in */
2039 maxfdp2 = maxfdp1;
2040 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2041 if ((readset && FD_ISSET(i, readset)) ||
2042 (writeset && FD_ISSET(i, writeset)) ||
2043 (exceptset && FD_ISSET(i, exceptset))) {
2044 struct lwip_sock *sock;
2045 SYS_ARCH_PROTECT(lev);
2046 sock = tryget_socket_unconn_locked(i);
2047 if (sock != NULL) {
2048 sock->select_waiting++;
2049 if (sock->select_waiting == 0) {
2050 /* overflow - too many threads waiting */
2051 sock->select_waiting--;
2052 nready = -1;
2053 maxfdp2 = i;
2054 SYS_ARCH_UNPROTECT(lev);
2055 done_socket(sock);
2056 set_errno(EBUSY);
2057 break;
2058 }
2059 SYS_ARCH_UNPROTECT(lev);
2060 done_socket(sock);
2061 } else {
2062 /* Not a valid socket */
2063 nready = -1;
2064 maxfdp2 = i;
2065 SYS_ARCH_UNPROTECT(lev);
2066 set_errno(EBADF);
2067 break;
2068 }
2069 }
2070 }
2071
2072 if (nready >= 0) {
2073 /* Call lwip_selscan again: there could have been events between
2074 the last scan (without us on the list) and putting us on the list! */
2075 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2076 if (!nready) {
2077 /* Still none ready, just wait to be woken */
2078 if (timeout == 0) {
2079 /* Wait forever */
2080 msectimeout = 0;
2081 } else {
2082 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2083 if (msecs_long <= 0) {
2084 /* Wait 1ms at least (0 means wait forever) */
2085 msectimeout = 1;
2086 } else {
2087 msectimeout = (u32_t)msecs_long;
2088 }
2089 }
2090
2091 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2092#if LWIP_NETCONN_SEM_PER_THREAD
2093 waited = 1;
2094#endif
2095 }
2096 }
2097
2098 /* Decrease select_waiting for each socket we are interested in */
2099 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2100 if ((readset && FD_ISSET(i, readset)) ||
2101 (writeset && FD_ISSET(i, writeset)) ||
2102 (exceptset && FD_ISSET(i, exceptset))) {
2103 struct lwip_sock *sock;
2104 SYS_ARCH_PROTECT(lev);
2105 sock = tryget_socket_unconn_locked(i);
2106 if (sock != NULL) {
2107 /* for now, handle select_waiting==0... */
2108 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2109 if (sock->select_waiting > 0) {
2110 sock->select_waiting--;
2111 }
2112 SYS_ARCH_UNPROTECT(lev);
2113 done_socket(sock);
2114 } else {
2115 SYS_ARCH_UNPROTECT(lev);
2116 /* Not a valid socket */
2117 nready = -1;
2118 set_errno(EBADF);
2119 }
2120 }
2121 }
2122
2123 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2124
2125#if LWIP_NETCONN_SEM_PER_THREAD
2126 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2127 /* don't leave the thread-local semaphore signalled */
2128 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2129 }
2130#else /* LWIP_NETCONN_SEM_PER_THREAD */
2131 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2132#endif /* LWIP_NETCONN_SEM_PER_THREAD */
2133 API_SELECT_CB_VAR_FREE(select_cb);
2134
2135 if (nready < 0) {
2136 /* This happens when a socket got closed while waiting */
2137 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2138 return -1;
2139 }
2140
2141 if (waitres == SYS_ARCH_TIMEOUT) {
2142 /* Timeout */
2143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2144 /* This is OK as the local fdsets are empty and nready is zero,
2145 or we would have returned earlier. */
2146 } else {
2147 /* See what's set now after waiting */
2148 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2149 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2150 }
2151 }
2152 }
2153
2154 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2155 set_errno(0);
2156 if (readset) {
2157 *readset = lreadset;
2158 }
2159 if (writeset) {
2160 *writeset = lwriteset;
2161 }
2162 if (exceptset) {
2163 *exceptset = lexceptset;
2164 }
2165 return nready;
2166}
2167#endif /* LWIP_SOCKET_SELECT */
2168
2169#if LWIP_SOCKET_POLL
2170/** Options for the lwip_pollscan function. */
2171enum lwip_pollscan_opts
2172{
2173 /** Clear revents in each struct pollfd. */
2174 LWIP_POLLSCAN_CLEAR = 1,
2175
2176 /** Increment select_waiting in each struct lwip_sock. */
2177 LWIP_POLLSCAN_INC_WAIT = 2,
2178
2179 /** Decrement select_waiting in each struct lwip_sock. */
2180 LWIP_POLLSCAN_DEC_WAIT = 4
2181};
2182
2183/**
2184 * Update revents in each struct pollfd.
2185 * Optionally update select_waiting in struct lwip_sock.
2186 *
2187 * @param fds array of structures to update
2188 * @param nfds number of structures in fds
2189 * @param opts what to update and how
2190 * @return number of structures that have revents != 0
2191 */
2192static int
2193lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2194{
2195 int nready = 0;
2196 nfds_t fdi;
2197 struct lwip_sock *sock;
2198 SYS_ARCH_DECL_PROTECT(lev);
2199
2200 /* Go through each struct pollfd in the array. */
2201 for (fdi = 0; fdi < nfds; fdi++) {
2202 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2203 fds[fdi].revents = 0;
2204 }
2205
2206 /* Negative fd means the caller wants us to ignore this struct.
2207 POLLNVAL means we already detected that the fd is invalid;
2208 if another thread has since opened a new socket with that fd,
2209 we must not use that socket. */
2210 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2211 /* First get the socket's status (protected)... */
2212 SYS_ARCH_PROTECT(lev);
2213 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2214 if (sock != NULL) {
2215 void* lastdata = sock->lastdata.pbuf;
2216 s16_t rcvevent = sock->rcvevent;
2217 u16_t sendevent = sock->sendevent;
2218 u16_t errevent = sock->errevent;
2219
2220 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2221 sock->select_waiting++;
2222 if (sock->select_waiting == 0) {
2223 /* overflow - too many threads waiting */
2224 sock->select_waiting--;
2225 nready = -1;
2226 SYS_ARCH_UNPROTECT(lev);
2227 done_socket(sock);
2228 break;
2229 }
2230 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2231 /* for now, handle select_waiting==0... */
2232 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2233 if (sock->select_waiting > 0) {
2234 sock->select_waiting--;
2235 }
2236 }
2237 SYS_ARCH_UNPROTECT(lev);
2238 done_socket(sock);
2239
2240 /* ... then examine it: */
2241 /* See if netconn of this socket is ready for read */
2242 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2243 fds[fdi].revents |= POLLIN;
2244 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2245 }
2246 /* See if netconn of this socket is ready for write */
2247 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2248 fds[fdi].revents |= POLLOUT;
2249 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2250 }
2251 /* See if netconn of this socket had an error */
2252 if (errevent != 0) {
2253 /* POLLERR is output only. */
2254 fds[fdi].revents |= POLLERR;
2255 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2256 }
2257 } else {
2258 /* Not a valid socket */
2259 SYS_ARCH_UNPROTECT(lev);
2260 /* POLLNVAL is output only. */
2261 fds[fdi].revents |= POLLNVAL;
2262 return -1;
2263 }
2264 }
2265
2266 /* Will return the number of structures that have events,
2267 not the number of events. */
2268 if (fds[fdi].revents != 0) {
2269 nready++;
2270 }
2271 }
2272
2273 LWIP_ASSERT("nready >= 0", nready >= 0);
2274 return nready;
2275}
2276
2277#if LWIP_NETCONN_FULLDUPLEX
2278/* Mark all sockets as used.
2279 *
2280 * All sockets are marked (and later unmarked), whether they are open or not.
2281 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2282 */
2283static void
2284lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2285{
2286 nfds_t fdi;
2287
2288 if(fds) {
2289 /* Go through each struct pollfd in the array. */
2290 for (fdi = 0; fdi < nfds; fdi++) {
2291 /* Increase the reference counter */
2292 tryget_socket_unconn(fds[fdi].fd);
2293 }
2294 }
2295}
2296
2297/* Let go all sockets that were marked as used when starting poll */
2298static void
2299lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2300{
2301 nfds_t fdi;
2302
2303 if(fds) {
2304 /* Go through each struct pollfd in the array. */
2305 for (fdi = 0; fdi < nfds; fdi++) {
2306 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2307 if (sock != NULL) {
2308 done_socket(sock);
2309 }
2310 }
2311 }
2312}
2313#else /* LWIP_NETCONN_FULLDUPLEX */
2314#define lwip_poll_inc_sockets_used(fds, nfds)
2315#define lwip_poll_dec_sockets_used(fds, nfds)
2316#endif /* LWIP_NETCONN_FULLDUPLEX */
2317
2318int
2319lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2320{
2321 u32_t waitres = 0;
2322 int nready;
2323 u32_t msectimeout;
2324#if LWIP_NETCONN_SEM_PER_THREAD
2325 int waited = 0;
2326#endif
2327
2328 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2329 (void*)fds, (int)nfds, timeout));
2330 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2331 set_errno(EINVAL); return -1;);
2332
2333 lwip_poll_inc_sockets_used(fds, nfds);
2334
2335 /* Go through each struct pollfd to count number of structures
2336 which currently match */
2337 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2338
2339 if (nready < 0) {
2340 lwip_poll_dec_sockets_used(fds, nfds);
2341 return -1;
2342 }
2343
2344 /* If we don't have any current events, then suspend if we are supposed to */
2345 if (!nready) {
2346 API_SELECT_CB_VAR_DECLARE(select_cb);
2347
2348 if (timeout == 0) {
2349 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2350 goto return_success;
2351 }
2352 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2353 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2354
2355 /* None ready: add our semaphore to list:
2356 We don't actually need any dynamic memory. Our entry on the
2357 list is only valid while we are in this function, so it's ok
2358 to use local variables. */
2359
2360 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2361 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2362#if LWIP_NETCONN_SEM_PER_THREAD
2363 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2364#else /* LWIP_NETCONN_SEM_PER_THREAD */
2365 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2366 /* failed to create semaphore */
2367 set_errno(EAGAIN);
2368 lwip_poll_dec_sockets_used(fds, nfds);
2369 API_SELECT_CB_VAR_FREE(select_cb);
2370 return -1;
2371 }
2372#endif /* LWIP_NETCONN_SEM_PER_THREAD */
2373
2374 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2375
2376 /* Increase select_waiting for each socket we are interested in.
2377 Also, check for events again: there could have been events between
2378 the last scan (without us on the list) and putting us on the list! */
2379 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2380
2381 if (!nready) {
2382 /* Still none ready, just wait to be woken */
2383 if (timeout < 0) {
2384 /* Wait forever */
2385 msectimeout = 0;
2386 } else {
2387 /* timeout == 0 would have been handled earlier. */
2388 LWIP_ASSERT("timeout > 0", timeout > 0);
2389 msectimeout = timeout;
2390 }
2391 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2392#if LWIP_NETCONN_SEM_PER_THREAD
2393 waited = 1;
2394#endif
2395 }
2396
2397 /* Decrease select_waiting for each socket we are interested in,
2398 and check which events occurred while we waited. */
2399 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2400
2401 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2402
2403#if LWIP_NETCONN_SEM_PER_THREAD
2404 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2405 /* don't leave the thread-local semaphore signalled */
2406 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2407 }
2408#else /* LWIP_NETCONN_SEM_PER_THREAD */
2409 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2410#endif /* LWIP_NETCONN_SEM_PER_THREAD */
2411 API_SELECT_CB_VAR_FREE(select_cb);
2412
2413 if (nready < 0) {
2414 /* This happens when a socket got closed while waiting */
2415 lwip_poll_dec_sockets_used(fds, nfds);
2416 return -1;
2417 }
2418
2419 if (waitres == SYS_ARCH_TIMEOUT) {
2420 /* Timeout */
2421 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2422 goto return_success;
2423 }
2424 }
2425
2426 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2427return_success:
2428 lwip_poll_dec_sockets_used(fds, nfds);
2429 set_errno(0);
2430 return nready;
2431}
2432
2433/**
2434 * Check whether event_callback should wake up a thread waiting in
2435 * lwip_poll.
2436 */
2437static int
2438lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2439{
2440 nfds_t fdi;
2441 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2442 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2443 if (pollfd->fd == fd) {
2444 /* Do not update pollfd->revents right here;
2445 that would be a data race because lwip_pollscan
2446 accesses revents without protecting. */
2447 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2448 return 1;
2449 }
2450 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2451 return 1;
2452 }
2453 if (has_errevent) {
2454 /* POLLERR is output only. */
2455 return 1;
2456 }
2457 }
2458 }
2459 return 0;
2460}
2461#endif /* LWIP_SOCKET_POLL */
2462
2463#if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2464/**
2465 * Callback registered in the netconn layer for each socket-netconn.
2466 * Processes recvevent (data available) and wakes up tasks waiting for select.
2467 *
2468 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2469 * must have the core lock held when signaling the following events
2470 * as they might cause select_list_cb to be checked:
2471 * NETCONN_EVT_RCVPLUS
2472 * NETCONN_EVT_SENDPLUS
2473 * NETCONN_EVT_ERROR
2474 * This requirement will be asserted in select_check_waiters()
2475 */
2476static void
2477event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2478{
2479 int s, check_waiters;
2480 struct lwip_sock *sock;
2481 SYS_ARCH_DECL_PROTECT(lev);
2482
2483 LWIP_UNUSED_ARG(len);
2484
2485 /* Get socket */
2486 if (conn) {
2487 s = conn->socket;
2488 if (s < 0) {
2489 /* Data comes in right away after an accept, even though
2490 * the server task might not have created a new socket yet.
2491 * Just count down (or up) if that's the case and we
2492 * will use the data later. Note that only receive events
2493 * can happen before the new socket is set up. */
2494 SYS_ARCH_PROTECT(lev);
2495 if (conn->socket < 0) {
2496 if (evt == NETCONN_EVT_RCVPLUS) {
2497 /* conn->socket is -1 on initialization
2498 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2499 conn->socket--;
2500 }
2501 SYS_ARCH_UNPROTECT(lev);
2502 return;
2503 }
2504 s = conn->socket;
2505 SYS_ARCH_UNPROTECT(lev);
2506 }
2507
2508 sock = get_socket(s);
2509 if (!sock) {
2510 return;
2511 }
2512 } else {
2513 return;
2514 }
2515
2516 check_waiters = 1;
2517 SYS_ARCH_PROTECT(lev);
2518 /* Set event as required */
2519 switch (evt) {
2520 case NETCONN_EVT_RCVPLUS:
2521 sock->rcvevent++;
2522 if (sock->rcvevent > 1) {
2523 check_waiters = 0;
2524 }
2525 break;
2526 case NETCONN_EVT_RCVMINUS:
2527 sock->rcvevent--;
2528 check_waiters = 0;
2529 break;
2530 case NETCONN_EVT_SENDPLUS:
2531 if (sock->sendevent) {
2532 check_waiters = 0;
2533 }
2534 sock->sendevent = 1;
2535 break;
2536 case NETCONN_EVT_SENDMINUS:
2537 sock->sendevent = 0;
2538 check_waiters = 0;
2539 break;
2540 case NETCONN_EVT_ERROR:
2541 sock->errevent = 1;
2542 break;
2543 default:
2544 LWIP_ASSERT("unknown event", 0);
2545 break;
2546 }
2547
2548 if (sock->select_waiting && check_waiters) {
2549 /* Save which events are active */
2550 int has_recvevent, has_sendevent, has_errevent;
2551 has_recvevent = sock->rcvevent > 0;
2552 has_sendevent = sock->sendevent != 0;
2553 has_errevent = sock->errevent != 0;
2554 SYS_ARCH_UNPROTECT(lev);
2555 /* Check any select calls waiting on this socket */
2556 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2557 } else {
2558 SYS_ARCH_UNPROTECT(lev);
2559 }
2560 done_socket(sock);
2561}
2562
2563/**
2564 * Check if any select waiters are waiting on this socket and its events
2565 *
2566 * @note on synchronization of select_cb_list:
2567 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2568 * the core lock. We do a single pass through the list and signal any waiters.
2569 * Core lock should already be held when calling here!!!!
2570
2571 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2572 * of the loop, thus creating a possibility where a thread could modify the
2573 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2574 * detect this change and restart the list walk. The list is expected to be small
2575 */
2576static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2577{
2578 struct lwip_select_cb *scb;
2579#if !LWIP_TCPIP_CORE_LOCKING
2580 int last_select_cb_ctr;
2581 SYS_ARCH_DECL_PROTECT(lev);
2582#endif /* !LWIP_TCPIP_CORE_LOCKING */
2583
2584 LWIP_ASSERT_CORE_LOCKED();
2585
2586#if !LWIP_TCPIP_CORE_LOCKING
2587 SYS_ARCH_PROTECT(lev);
2588again:
2589 /* remember the state of select_cb_list to detect changes */
2590 last_select_cb_ctr = select_cb_ctr;
2591#endif /* !LWIP_TCPIP_CORE_LOCKING */
2592 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2593 if (scb->sem_signalled == 0) {
2594 /* semaphore not signalled yet */
2595 int do_signal = 0;
2596#if LWIP_SOCKET_POLL
2597 if (scb->poll_fds != NULL) {
2598 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2599 }
2600#endif /* LWIP_SOCKET_POLL */
2601#if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2602 else
2603#endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2604#if LWIP_SOCKET_SELECT
2605 {
2606 /* Test this select call for our socket */
2607 if (has_recvevent) {
2608 if (scb->readset && FD_ISSET(s, scb->readset)) {
2609 do_signal = 1;
2610 }
2611 }
2612 if (has_sendevent) {
2613 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2614 do_signal = 1;
2615 }
2616 }
2617 if (has_errevent) {
2618 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2619 do_signal = 1;
2620 }
2621 }
2622 }
2623#endif /* LWIP_SOCKET_SELECT */
2624 if (do_signal) {
2625 scb->sem_signalled = 1;
2626 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2627 the semaphore, as this might lead to the select thread taking itself off the list,
2628 invalidating the semaphore. */
2629 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2630 }
2631 }
2632#if LWIP_TCPIP_CORE_LOCKING
2633 }
2634#else
2635 /* unlock interrupts with each step */
2636 SYS_ARCH_UNPROTECT(lev);
2637 /* this makes sure interrupt protection time is short */
2638 SYS_ARCH_PROTECT(lev);
2639 if (last_select_cb_ctr != select_cb_ctr) {
2640 /* someone has changed select_cb_list, restart at the beginning */
2641 goto again;
2642 }
2643 /* remember the state of select_cb_list to detect changes */
2644 last_select_cb_ctr = select_cb_ctr;
2645 }
2646 SYS_ARCH_UNPROTECT(lev);
2647#endif
2648}
2649#endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2650
2651/**
2652 * Close one end of a full-duplex connection.
2653 */
2654int
2655lwip_shutdown(int s, int how)
2656{
2657 struct lwip_sock *sock;
2658 err_t err;
2659 u8_t shut_rx = 0, shut_tx = 0;
2660
2661 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2662
2663 sock = get_socket(s);
2664 if (!sock) {
2665 return -1;
2666 }
2667
2668 if (sock->conn != NULL) {
2669 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2670 sock_set_errno(sock, EOPNOTSUPP);
2671 done_socket(sock);
2672 return -1;
2673 }
2674 } else {
2675 sock_set_errno(sock, ENOTCONN);
2676 done_socket(sock);
2677 return -1;
2678 }
2679
2680 if (how == SHUT_RD) {
2681 shut_rx = 1;
2682 } else if (how == SHUT_WR) {
2683 shut_tx = 1;
2684 } else if (how == SHUT_RDWR) {
2685 shut_rx = 1;
2686 shut_tx = 1;
2687 } else {
2688 sock_set_errno(sock, EINVAL);
2689 done_socket(sock);
2690 return -1;
2691 }
2692 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2693
2694 sock_set_errno(sock, err_to_errno(err));
2695 done_socket(sock);
2696 return (err == ERR_OK ? 0 : -1);
2697}
2698
2699static int
2700lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2701{
2702 struct lwip_sock *sock;
2703 union sockaddr_aligned saddr;
2704 ip_addr_t naddr;
2705 u16_t port;
2706 err_t err;
2707
2708 sock = get_socket(s);
2709 if (!sock) {
2710 return -1;
2711 }
2712
2713 /* get the IP address and port */
2714 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2715 if (err != ERR_OK) {
2716 sock_set_errno(sock, err_to_errno(err));
2717 done_socket(sock);
2718 return -1;
2719 }
2720
2721#if LWIP_IPV4 && LWIP_IPV6
2722 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2723 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2724 IP_IS_V4_VAL(naddr)) {
2725 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2726 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2727 }
2728#endif /* LWIP_IPV4 && LWIP_IPV6 */
2729
2730 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2731
2732 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2733 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2734 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2735
2736 if (*namelen > saddr.sa.sa_len) {
2737 *namelen = saddr.sa.sa_len;
2738 }
2739 MEMCPY(name, &saddr, *namelen);
2740
2741 sock_set_errno(sock, 0);
2742 done_socket(sock);
2743 return 0;
2744}
2745
2746int
2747lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2748{
2749 return lwip_getaddrname(s, name, namelen, 0);
2750}
2751
2752int
2753lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2754{
2755 return lwip_getaddrname(s, name, namelen, 1);
2756}
2757
2758int
2759lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2760{
2761 int err;
2762 struct lwip_sock *sock = get_socket(s);
2763#if !LWIP_TCPIP_CORE_LOCKING
2764 err_t cberr;
2765 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2766#endif /* !LWIP_TCPIP_CORE_LOCKING */
2767
2768 if (!sock) {
2769 return -1;
2770 }
2771
2772 if ((NULL == optval) || (NULL == optlen)) {
2773 sock_set_errno(sock, EFAULT);
2774 done_socket(sock);
2775 return -1;
2776 }
2777
2778#if LWIP_TCPIP_CORE_LOCKING
2779 /* core-locking can just call the -impl function */
2780 LOCK_TCPIP_CORE();
2781 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2782 UNLOCK_TCPIP_CORE();
2783
2784#else /* LWIP_TCPIP_CORE_LOCKING */
2785
2786#if LWIP_MPU_COMPATIBLE
2787 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2788 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2789 sock_set_errno(sock, ENOBUFS);
2790 done_socket(sock);
2791 return -1;
2792 }
2793#endif /* LWIP_MPU_COMPATIBLE */
2794
2795 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2796 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2797 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2798 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2799 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2800#if !LWIP_MPU_COMPATIBLE
2801 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2802#endif /* !LWIP_MPU_COMPATIBLE */
2803 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2804#if LWIP_NETCONN_SEM_PER_THREAD
2805 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2806#else
2807 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2808#endif
2809 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2810 if (cberr != ERR_OK) {
2811 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2812 sock_set_errno(sock, err_to_errno(cberr));
2813 done_socket(sock);
2814 return -1;
2815 }
2816 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2817
2818 /* write back optlen and optval */
2819 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2820#if LWIP_MPU_COMPATIBLE
2821 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2822 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2823#endif /* LWIP_MPU_COMPATIBLE */
2824
2825 /* maybe lwip_getsockopt_internal has changed err */
2826 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2827 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2828#endif /* LWIP_TCPIP_CORE_LOCKING */
2829
2830 sock_set_errno(sock, err);
2831 done_socket(sock);
2832 return err ? -1 : 0;
2833}
2834
2835#if !LWIP_TCPIP_CORE_LOCKING
2836/** lwip_getsockopt_callback: only used without CORE_LOCKING
2837 * to get into the tcpip_thread
2838 */
2839static void
2840lwip_getsockopt_callback(void *arg)
2841{
2842 struct lwip_setgetsockopt_data *data;
2843 LWIP_ASSERT("arg != NULL", arg != NULL);
2844 data = (struct lwip_setgetsockopt_data *)arg;
2845
2846 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2847#if LWIP_MPU_COMPATIBLE
2848 data->optval,
2849#else /* LWIP_MPU_COMPATIBLE */
2850 data->optval.p,
2851#endif /* LWIP_MPU_COMPATIBLE */
2852 &data->optlen);
2853
2854 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2855}
2856#endif /* LWIP_TCPIP_CORE_LOCKING */
2857
2858static int
2859lwip_sockopt_to_ipopt(int optname)
2860{
2861 /* Map SO_* values to our internal SOF_* values
2862 * We should not rely on #defines in socket.h
2863 * being in sync with ip.h.
2864 */
2865 switch (optname) {
2866 case SO_BROADCAST:
2867 return SOF_BROADCAST;
2868 case SO_KEEPALIVE:
2869 return SOF_KEEPALIVE;
2870 case SO_REUSEADDR:
2871 return SOF_REUSEADDR;
2872 default:
2873 LWIP_ASSERT("Unknown socket option", 0);
2874 return 0;
2875 }
2876}
2877
2878/** lwip_getsockopt_impl: the actual implementation of getsockopt:
2879 * same argument as lwip_getsockopt, either called directly or through callback
2880 */
2881static int
2882lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2883{
2884 int err = 0;
2885 struct lwip_sock *sock = tryget_socket(s);
2886 if (!sock) {
2887 return EBADF;
2888 }
2889
2890#ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2891 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2892 return err;
2893 }
2894#endif
2895
2896 switch (level) {
2897
2898 /* Level: SOL_SOCKET */
2899 case SOL_SOCKET:
2900 switch (optname) {
2901
2902#if LWIP_TCP
2903 case SO_ACCEPTCONN:
2904 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2905 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2906 done_socket(sock);
2907 return ENOPROTOOPT;
2908 }
2909 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2910 *(int *)optval = 1;
2911 } else {
2912 *(int *)optval = 0;
2913 }
2914 break;
2915#endif /* LWIP_TCP */
2916
2917 /* The option flags */
2918 case SO_BROADCAST:
2919 case SO_KEEPALIVE:
2920#if SO_REUSE
2921 case SO_REUSEADDR:
2922#endif /* SO_REUSE */
2923 if ((optname == SO_BROADCAST) &&
2924 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2925 done_socket(sock);
2926 return ENOPROTOOPT;
2927 }
2928
2929 optname = lwip_sockopt_to_ipopt(optname);
2930
2931 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2932 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
2933 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
2934 s, optname, (*(int *)optval ? "on" : "off")));
2935 break;
2936
2937 case SO_TYPE:
2938 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2939 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
2940 case NETCONN_RAW:
2941 *(int *)optval = SOCK_RAW;
2942 break;
2943 case NETCONN_TCP:
2944 *(int *)optval = SOCK_STREAM;
2945 break;
2946 case NETCONN_UDP:
2947 *(int *)optval = SOCK_DGRAM;
2948 break;
2949 default: /* unrecognized socket type */
2950 *(int *)optval = netconn_type(sock->conn);
2951 LWIP_DEBUGF(SOCKETS_DEBUG,
2952 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
2953 s, *(int *)optval));
2954 } /* switch (netconn_type(sock->conn)) */
2955 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
2956 s, *(int *)optval));
2957 break;
2958
2959 case SO_ERROR:
2960 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
2961 *(int *)optval = err_to_errno(netconn_err(sock->conn));
2962 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
2963 s, *(int *)optval));
2964 break;
2965
2966#if LWIP_SO_SNDTIMEO
2967 case SO_SNDTIMEO:
2968 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2969 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
2970 break;
2971#endif /* LWIP_SO_SNDTIMEO */
2972#if LWIP_SO_RCVTIMEO
2973 case SO_RCVTIMEO:
2974 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2975 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
2976 break;
2977#endif /* LWIP_SO_RCVTIMEO */
2978#if LWIP_SO_RCVBUF
2979 case SO_RCVBUF:
2980 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2981 *(int *)optval = netconn_get_recvbufsize(sock->conn);
2982 break;
2983#endif /* LWIP_SO_RCVBUF */
2984#if LWIP_SO_LINGER
2985 case SO_LINGER: {
2986 s16_t conn_linger;
2987 struct linger *linger = (struct linger *)optval;
2988 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
2989 conn_linger = sock->conn->linger;
2990 if (conn_linger >= 0) {
2991 linger->l_onoff = 1;
2992 linger->l_linger = (int)conn_linger;
2993 } else {
2994 linger->l_onoff = 0;
2995 linger->l_linger = 0;
2996 }
2997 }
2998 break;
2999#endif /* LWIP_SO_LINGER */
3000#if LWIP_UDP
3001 case SO_NO_CHECK:
3002 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3003#if LWIP_UDPLITE
3004 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3005 /* this flag is only available for UDP, not for UDP lite */
3006 done_socket(sock);
3007 return EAFNOSUPPORT;
3008 }
3009#endif /* LWIP_UDPLITE */
3010 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3011 break;
3012#endif /* LWIP_UDP*/
3013 default:
3014 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3015 s, optname));
3016 err = ENOPROTOOPT;
3017 break;
3018 } /* switch (optname) */
3019 break;
3020
3021 /* Level: IPPROTO_IP */
3022 case IPPROTO_IP:
3023 switch (optname) {
3024 case IP_TTL:
3025 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3026 *(int *)optval = sock->conn->pcb.ip->ttl;
3027 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3028 s, *(int *)optval));
3029 break;
3030 case IP_TOS:
3031 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3032 *(int *)optval = sock->conn->pcb.ip->tos;
3033 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3034 s, *(int *)optval));
3035 break;
3036#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3037 case IP_MULTICAST_TTL:
3038 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3039 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3040 done_socket(sock);
3041 return ENOPROTOOPT;
3042 }
3043 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3044 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3045 s, *(int *)optval));
3046 break;
3047 case IP_MULTICAST_IF:
3048 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3049 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3050 done_socket(sock);
3051 return ENOPROTOOPT;
3052 }
3053 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3054 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3055 s, *(u32_t *)optval));
3056 break;
3057 case IP_MULTICAST_LOOP:
3058 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3059 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3060 *(u8_t *)optval = 1;
3061 } else {
3062 *(u8_t *)optval = 0;
3063 }
3064 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3065 s, *(int *)optval));
3066 break;
3067#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3068 default:
3069 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3070 s, optname));
3071 err = ENOPROTOOPT;
3072 break;
3073 } /* switch (optname) */
3074 break;
3075
3076#if LWIP_TCP
3077 /* Level: IPPROTO_TCP */
3078 case IPPROTO_TCP:
3079 /* Special case: all IPPROTO_TCP option take an int */
3080 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3081 if (sock->conn->pcb.tcp->state == LISTEN) {
3082 done_socket(sock);
3083 return EINVAL;
3084 }
3085 switch (optname) {
3086 case TCP_NODELAY:
3087 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3088 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3089 s, (*(int *)optval) ? "on" : "off") );
3090 break;
3091 case TCP_KEEPALIVE:
3092 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3093 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3094 s, *(int *)optval));
3095 break;
3096
3097#if LWIP_TCP_KEEPALIVE
3098 case TCP_KEEPIDLE:
3099 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3100 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3101 s, *(int *)optval));
3102 break;
3103 case TCP_KEEPINTVL:
3104 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3105 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3106 s, *(int *)optval));
3107 break;
3108 case TCP_KEEPCNT:
3109 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3110 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3111 s, *(int *)optval));
3112 break;
3113#endif /* LWIP_TCP_KEEPALIVE */
3114 default:
3115 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3116 s, optname));
3117 err = ENOPROTOOPT;
3118 break;
3119 } /* switch (optname) */
3120 break;
3121#endif /* LWIP_TCP */
3122
3123#if LWIP_IPV6
3124 /* Level: IPPROTO_IPV6 */
3125 case IPPROTO_IPV6:
3126 switch (optname) {
3127 case IPV6_V6ONLY:
3128 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3129 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3130 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3131 s, *(int *)optval));
3132 break;
3133 default:
3134 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3135 s, optname));
3136 err = ENOPROTOOPT;
3137 break;
3138 } /* switch (optname) */
3139 break;
3140#endif /* LWIP_IPV6 */
3141
3142#if LWIP_UDP && LWIP_UDPLITE
3143 /* Level: IPPROTO_UDPLITE */
3144 case IPPROTO_UDPLITE:
3145 /* Special case: all IPPROTO_UDPLITE option take an int */
3146 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3147 /* If this is no UDP lite socket, ignore any options. */
3148 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3149 done_socket(sock);
3150 return ENOPROTOOPT;
3151 }
3152 switch (optname) {
3153 case UDPLITE_SEND_CSCOV:
3154 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3155 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3156 s, (*(int *)optval)) );
3157 break;
3158 case UDPLITE_RECV_CSCOV:
3159 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3160 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3161 s, (*(int *)optval)) );
3162 break;
3163 default:
3164 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3165 s, optname));
3166 err = ENOPROTOOPT;
3167 break;
3168 } /* switch (optname) */
3169 break;
3170#endif /* LWIP_UDP */
3171 /* Level: IPPROTO_RAW */
3172 case IPPROTO_RAW:
3173 switch (optname) {
3174#if LWIP_IPV6 && LWIP_RAW
3175 case IPV6_CHECKSUM:
3176 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3177 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3178 *(int *)optval = -1;
3179 } else {
3180 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3181 }
3182 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3183 s, (*(int *)optval)) );
3184 break;
3185#endif /* LWIP_IPV6 && LWIP_RAW */
3186 default:
3187 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3188 s, optname));
3189 err = ENOPROTOOPT;
3190 break;
3191 } /* switch (optname) */
3192 break;
3193 default:
3194 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3195 s, level, optname));
3196 err = ENOPROTOOPT;
3197 break;
3198 } /* switch (level) */
3199
3200 done_socket(sock);
3201 return err;
3202}
3203
3204int
3205lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3206{
3207 int err = 0;
3208 struct lwip_sock *sock = get_socket(s);
3209#if !LWIP_TCPIP_CORE_LOCKING
3210 err_t cberr;
3211 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3212#endif /* !LWIP_TCPIP_CORE_LOCKING */
3213
3214 if (!sock) {
3215 return -1;
3216 }
3217
3218 if (NULL == optval) {
3219 sock_set_errno(sock, EFAULT);
3220 done_socket(sock);
3221 return -1;
3222 }
3223
3224#if LWIP_TCPIP_CORE_LOCKING
3225 /* core-locking can just call the -impl function */
3226 LOCK_TCPIP_CORE();
3227 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3228 UNLOCK_TCPIP_CORE();
3229
3230#else /* LWIP_TCPIP_CORE_LOCKING */
3231
3232#if LWIP_MPU_COMPATIBLE
3233 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3234 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3235 sock_set_errno(sock, ENOBUFS);
3236 done_socket(sock);
3237 return -1;
3238 }
3239#endif /* LWIP_MPU_COMPATIBLE */
3240
3241 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3242 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3243 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3244 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3245 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3246#if LWIP_MPU_COMPATIBLE
3247 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3248#else /* LWIP_MPU_COMPATIBLE */
3249 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3250#endif /* LWIP_MPU_COMPATIBLE */
3251 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3252#if LWIP_NETCONN_SEM_PER_THREAD
3253 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3254#else
3255 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3256#endif
3257 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3258 if (cberr != ERR_OK) {
3259 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3260 sock_set_errno(sock, err_to_errno(cberr));
3261 done_socket(sock);
3262 return -1;
3263 }
3264 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3265
3266 /* maybe lwip_getsockopt_internal has changed err */
3267 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3268 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3269#endif /* LWIP_TCPIP_CORE_LOCKING */
3270
3271 sock_set_errno(sock, err);
3272 done_socket(sock);
3273 return err ? -1 : 0;
3274}
3275
3276#if !LWIP_TCPIP_CORE_LOCKING
3277/** lwip_setsockopt_callback: only used without CORE_LOCKING
3278 * to get into the tcpip_thread
3279 */
3280static void
3281lwip_setsockopt_callback(void *arg)
3282{
3283 struct lwip_setgetsockopt_data *data;
3284 LWIP_ASSERT("arg != NULL", arg != NULL);
3285 data = (struct lwip_setgetsockopt_data *)arg;
3286
3287 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3288#if LWIP_MPU_COMPATIBLE
3289 data->optval,
3290#else /* LWIP_MPU_COMPATIBLE */
3291 data->optval.pc,
3292#endif /* LWIP_MPU_COMPATIBLE */
3293 data->optlen);
3294
3295 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3296}
3297#endif /* LWIP_TCPIP_CORE_LOCKING */
3298
3299/** lwip_setsockopt_impl: the actual implementation of setsockopt:
3300 * same argument as lwip_setsockopt, either called directly or through callback
3301 */
3302static int
3303lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3304{
3305 int err = 0;
3306 struct lwip_sock *sock = tryget_socket(s);
3307 if (!sock) {
3308 return EBADF;
3309 }
3310
3311#ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3312 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3313 return err;
3314 }
3315#endif
3316
3317 switch (level) {
3318
3319 /* Level: SOL_SOCKET */
3320 case SOL_SOCKET:
3321 switch (optname) {
3322
3323 /* SO_ACCEPTCONN is get-only */
3324
3325 /* The option flags */
3326 case SO_BROADCAST:
3327 case SO_KEEPALIVE:
3328#if SO_REUSE
3329 case SO_REUSEADDR:
3330#endif /* SO_REUSE */
3331 if ((optname == SO_BROADCAST) &&
3332 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3333 done_socket(sock);
3334 return ENOPROTOOPT;
3335 }
3336
3337 optname = lwip_sockopt_to_ipopt(optname);
3338
3339 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3340 if (*(const int *)optval) {
3341 ip_set_option(sock->conn->pcb.ip, optname);
3342 } else {
3343 ip_reset_option(sock->conn->pcb.ip, optname);
3344 }
3345 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3346 s, optname, (*(const int *)optval ? "on" : "off")));
3347 break;
3348
3349 /* SO_TYPE is get-only */
3350 /* SO_ERROR is get-only */
3351
3352#if LWIP_SO_SNDTIMEO
3353 case SO_SNDTIMEO: {
3354 long ms_long;
3355 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3356 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3357 if (ms_long < 0) {
3358 done_socket(sock);
3359 return EINVAL;
3360 }
3361 netconn_set_sendtimeout(sock->conn, ms_long);
3362 break;
3363 }
3364#endif /* LWIP_SO_SNDTIMEO */
3365#if LWIP_SO_RCVTIMEO
3366 case SO_RCVTIMEO: {
3367 long ms_long;
3368 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3369 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3370 if (ms_long < 0) {
3371 done_socket(sock);
3372 return EINVAL;
3373 }
3374 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3375 break;
3376 }
3377#endif /* LWIP_SO_RCVTIMEO */
3378#if LWIP_SO_RCVBUF
3379 case SO_RCVBUF:
3380 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3381 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3382 break;
3383#endif /* LWIP_SO_RCVBUF */
3384#if LWIP_SO_LINGER
3385 case SO_LINGER: {
3386 const struct linger *linger = (const struct linger *)optval;
3387 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3388 if (linger->l_onoff) {
3389 int lingersec = linger->l_linger;
3390 if (lingersec < 0) {
3391 done_socket(sock);
3392 return EINVAL;
3393 }
3394 if (lingersec > 0xFFFF) {
3395 lingersec = 0xFFFF;
3396 }
3397 sock->conn->linger = (s16_t)lingersec;
3398 } else {
3399 sock->conn->linger = -1;
3400 }
3401 }
3402 break;
3403#endif /* LWIP_SO_LINGER */
3404#if LWIP_UDP
3405 case SO_NO_CHECK:
3406 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3407#if LWIP_UDPLITE
3408 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3409 /* this flag is only available for UDP, not for UDP lite */
3410 done_socket(sock);
3411 return EAFNOSUPPORT;
3412 }
3413#endif /* LWIP_UDPLITE */
3414 if (*(const int *)optval) {
3415 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3416 } else {
3417 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3418 }
3419 break;
3420#endif /* LWIP_UDP */
3421 case SO_BINDTODEVICE: {
3422 const struct ifreq *iface;
3423 struct netif *n = NULL;
3424
3425 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3426
3427 iface = (const struct ifreq *)optval;
3428 if (iface->ifr_name[0] != 0) {
3429 n = netif_find(iface->ifr_name);
3430 if (n == NULL) {
3431 done_socket(sock);
3432 return ENODEV;
3433 }
3434 }
3435
3436 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3437#if LWIP_TCP
3438 case NETCONN_TCP:
3439 tcp_bind_netif(sock->conn->pcb.tcp, n);
3440 break;
3441#endif
3442#if LWIP_UDP
3443 case NETCONN_UDP:
3444 udp_bind_netif(sock->conn->pcb.udp, n);
3445 break;
3446#endif
3447#if LWIP_RAW
3448 case NETCONN_RAW:
3449 raw_bind_netif(sock->conn->pcb.raw, n);
3450 break;
3451#endif
3452 default:
3453 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3454 break;
3455 }
3456 }
3457 break;
3458 default:
3459 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3460 s, optname));
3461 err = ENOPROTOOPT;
3462 break;
3463 } /* switch (optname) */
3464 break;
3465
3466 /* Level: IPPROTO_IP */
3467 case IPPROTO_IP:
3468 switch (optname) {
3469 case IP_TTL:
3470 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3471 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3472 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3473 s, sock->conn->pcb.ip->ttl));
3474 break;
3475 case IP_TOS:
3476 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3477 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3478 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3479 s, sock->conn->pcb.ip->tos));
3480 break;
3481#if LWIP_NETBUF_RECVINFO
3482 case IP_PKTINFO:
3483 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3484 if (*(const int *)optval) {
3485 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3486 } else {
3487 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3488 }
3489 break;
3490#endif /* LWIP_NETBUF_RECVINFO */
3491#if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3492 case IP_MULTICAST_TTL:
3493 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3494 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3495 break;
3496 case IP_MULTICAST_IF: {
3497 ip4_addr_t if_addr;
3498 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3499 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3500 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3501 }
3502 break;
3503 case IP_MULTICAST_LOOP:
3504 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3505 if (*(const u8_t *)optval) {
3506 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3507 } else {
3508 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3509 }
3510 break;
3511#endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3512#if LWIP_IGMP
3513 case IP_ADD_MEMBERSHIP:
3514 case IP_DROP_MEMBERSHIP: {
3515 /* If this is a TCP or a RAW socket, ignore these options. */
3516 err_t igmp_err;
3517 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3518 ip4_addr_t if_addr;
3519 ip4_addr_t multi_addr;
3520 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3521 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3522 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3523 if (optname == IP_ADD_MEMBERSHIP) {
3524 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3525 /* cannot track membership (out of memory) */
3526 err = ENOMEM;
3527 igmp_err = ERR_OK;
3528 } else {
3529 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3530 }
3531 } else {
3532 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3533 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3534 }
3535 if (igmp_err != ERR_OK) {
3536 err = EADDRNOTAVAIL;
3537 }
3538 }
3539 break;
3540#endif /* LWIP_IGMP */
3541 default:
3542 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3543 s, optname));
3544 err = ENOPROTOOPT;
3545 break;
3546 } /* switch (optname) */
3547 break;
3548
3549#if LWIP_TCP
3550 /* Level: IPPROTO_TCP */
3551 case IPPROTO_TCP:
3552 /* Special case: all IPPROTO_TCP option take an int */
3553 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3554 if (sock->conn->pcb.tcp->state == LISTEN) {
3555 done_socket(sock);
3556 return EINVAL;
3557 }
3558 switch (optname) {
3559 case TCP_NODELAY:
3560 if (*(const int *)optval) {
3561 tcp_nagle_disable(sock->conn->pcb.tcp);
3562 } else {
3563 tcp_nagle_enable(sock->conn->pcb.tcp);
3564 }
3565 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3566 s, (*(const int *)optval) ? "on" : "off") );
3567 break;
3568 case TCP_KEEPALIVE:
3569 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3570 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3571 s, sock->conn->pcb.tcp->keep_idle));
3572 break;
3573
3574#if LWIP_TCP_KEEPALIVE
3575 case TCP_KEEPIDLE:
3576 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3577 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3578 s, sock->conn->pcb.tcp->keep_idle));
3579 break;
3580 case TCP_KEEPINTVL:
3581 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3582 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3583 s, sock->conn->pcb.tcp->keep_intvl));
3584 break;
3585 case TCP_KEEPCNT:
3586 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3587 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3588 s, sock->conn->pcb.tcp->keep_cnt));
3589 break;
3590#endif /* LWIP_TCP_KEEPALIVE */
3591 default:
3592 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3593 s, optname));
3594 err = ENOPROTOOPT;
3595 break;
3596 } /* switch (optname) */
3597 break;
3598#endif /* LWIP_TCP*/
3599
3600#if LWIP_IPV6
3601 /* Level: IPPROTO_IPV6 */
3602 case IPPROTO_IPV6:
3603 switch (optname) {
3604 case IPV6_V6ONLY:
3605 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3606 if (*(const int *)optval) {
3607 netconn_set_ipv6only(sock->conn, 1);
3608 } else {
3609 netconn_set_ipv6only(sock->conn, 0);
3610 }
3611 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3612 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3613 break;
3614#if LWIP_IPV6_MLD
3615 case IPV6_JOIN_GROUP:
3616 case IPV6_LEAVE_GROUP: {
3617 /* If this is a TCP or a RAW socket, ignore these options. */
3618 err_t mld6_err;
3619 struct netif *netif;
3620 ip6_addr_t multi_addr;
3621 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3622 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3623 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3624 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3625 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3626 if (netif == NULL) {
3627 err = EADDRNOTAVAIL;
3628 break;
3629 }
3630
3631 if (optname == IPV6_JOIN_GROUP) {
3632 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3633 /* cannot track membership (out of memory) */
3634 err = ENOMEM;
3635 mld6_err = ERR_OK;
3636 } else {
3637 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3638 }
3639 } else {
3640 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3641 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3642 }
3643 if (mld6_err != ERR_OK) {
3644 err = EADDRNOTAVAIL;
3645 }
3646 }
3647 break;
3648#endif /* LWIP_IPV6_MLD */
3649 default:
3650 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3651 s, optname));
3652 err = ENOPROTOOPT;
3653 break;
3654 } /* switch (optname) */
3655 break;
3656#endif /* LWIP_IPV6 */
3657
3658#if LWIP_UDP && LWIP_UDPLITE
3659 /* Level: IPPROTO_UDPLITE */
3660 case IPPROTO_UDPLITE:
3661 /* Special case: all IPPROTO_UDPLITE option take an int */
3662 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3663 /* If this is no UDP lite socket, ignore any options. */
3664 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3665 done_socket(sock);
3666 return ENOPROTOOPT;
3667 }
3668 switch (optname) {
3669 case UDPLITE_SEND_CSCOV:
3670 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3671 /* don't allow illegal values! */
3672 sock->conn->pcb.udp->chksum_len_tx = 8;
3673 } else {
3674 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3675 }
3676 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3677 s, (*(const int *)optval)) );
3678 break;
3679 case UDPLITE_RECV_CSCOV:
3680 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3681 /* don't allow illegal values! */
3682 sock->conn->pcb.udp->chksum_len_rx = 8;
3683 } else {
3684 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3685 }
3686 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3687 s, (*(const int *)optval)) );
3688 break;
3689 default:
3690 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3691 s, optname));
3692 err = ENOPROTOOPT;
3693 break;
3694 } /* switch (optname) */
3695 break;
3696#endif /* LWIP_UDP */
3697 /* Level: IPPROTO_RAW */
3698 case IPPROTO_RAW:
3699 switch (optname) {
3700#if LWIP_IPV6 && LWIP_RAW
3701 case IPV6_CHECKSUM:
3702 /* It should not be possible to disable the checksum generation with ICMPv6
3703 * as per RFC 3542 chapter 3.1 */
3704 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3705 done_socket(sock);
3706 return EINVAL;
3707 }
3708
3709 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3710 if (*(const int *)optval < 0) {
3711 sock->conn->pcb.raw->chksum_reqd = 0;
3712 } else if (*(const int *)optval & 1) {
3713 /* Per RFC3542, odd offsets are not allowed */
3714 done_socket(sock);
3715 return EINVAL;
3716 } else {
3717 sock->conn->pcb.raw->chksum_reqd = 1;
3718 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3719 }
3720 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3721 s, sock->conn->pcb.raw->chksum_reqd));
3722 break;
3723#endif /* LWIP_IPV6 && LWIP_RAW */
3724 default:
3725 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3726 s, optname));
3727 err = ENOPROTOOPT;
3728 break;
3729 } /* switch (optname) */
3730 break;
3731 default:
3732 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3733 s, level, optname));
3734 err = ENOPROTOOPT;
3735 break;
3736 } /* switch (level) */
3737
3738 done_socket(sock);
3739 return err;
3740}
3741
3742int
3743lwip_ioctl(int s, long cmd, void *argp)
3744{
3745 struct lwip_sock *sock = get_socket(s);
3746 u8_t val;
3747#if LWIP_SO_RCVBUF
3748 int recv_avail;
3749#endif /* LWIP_SO_RCVBUF */
3750
3751 if (!sock) {
3752 return -1;
3753 }
3754
3755 switch (cmd) {
3756#if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3757 case FIONREAD:
3758 if (!argp) {
3759 sock_set_errno(sock, EINVAL);
3760 done_socket(sock);
3761 return -1;
3762 }
3763#if LWIP_FIONREAD_LINUXMODE
3764 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3765 struct netbuf *nb;
3766 if (sock->lastdata.netbuf) {
3767 nb = sock->lastdata.netbuf;
3768 *((int *)argp) = nb->p->tot_len;
3769 } else {
3770 struct netbuf *rxbuf;
3771 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3772 if (err != ERR_OK) {
3773 *((int *)argp) = 0;
3774 } else {
3775 sock->lastdata.netbuf = rxbuf;
3776 *((int *)argp) = rxbuf->p->tot_len;
3777 }
3778 }
3779 done_socket(sock);
3780 return 0;
3781 }
3782#endif /* LWIP_FIONREAD_LINUXMODE */
3783
3784#if LWIP_SO_RCVBUF
3785 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3786 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3787 if (recv_avail < 0) {
3788 recv_avail = 0;
3789 }
3790
3791 /* Check if there is data left from the last recv operation. /maq 041215 */
3792 if (sock->lastdata.netbuf) {
3793 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3794 recv_avail += sock->lastdata.pbuf->tot_len;
3795 } else {
3796 recv_avail += sock->lastdata.netbuf->p->tot_len;
3797 }
3798 }
3799 *((int *)argp) = recv_avail;
3800
3801 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3802 sock_set_errno(sock, 0);
3803 done_socket(sock);
3804 return 0;
3805#else /* LWIP_SO_RCVBUF */
3806 break;
3807#endif /* LWIP_SO_RCVBUF */
3808#endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3809
3810 case (long)FIONBIO:
3811 val = 0;
3812 if (argp && *(int *)argp) {
3813 val = 1;
3814 }
3815 netconn_set_nonblocking(sock->conn, val);
3816 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3817 sock_set_errno(sock, 0);
3818 done_socket(sock);
3819 return 0;
3820
3821 default:
3822 break;
3823 } /* switch (cmd) */
3824 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3825 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3826 done_socket(sock);
3827 return -1;
3828}
3829
3830/** A minimal implementation of fcntl.
3831 * Currently only the commands F_GETFL and F_SETFL are implemented.
3832 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3833 * the flag O_NONBLOCK is implemented for F_SETFL.
3834 */
3835int
3836lwip_fcntl(int s, int cmd, int val)
3837{
3838 struct lwip_sock *sock = get_socket(s);
3839 int ret = -1;
3840 int op_mode = 0;
3841
3842 if (!sock) {
3843 return -1;
3844 }
3845
3846 switch (cmd) {
3847 case F_GETFL:
3848 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3849 sock_set_errno(sock, 0);
3850
3851 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3852#if LWIP_TCPIP_CORE_LOCKING
3853 LOCK_TCPIP_CORE();
3854#else
3855 SYS_ARCH_DECL_PROTECT(lev);
3856 /* the proper thing to do here would be to get into the tcpip_thread,
3857 but locking should be OK as well since we only *read* some flags */
3858 SYS_ARCH_PROTECT(lev);
3859#endif
3860#if LWIP_TCP
3861 if (sock->conn->pcb.tcp) {
3862 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3863 op_mode |= O_RDONLY;
3864 }
3865 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3866 op_mode |= O_WRONLY;
3867 }
3868 }
3869#endif
3870#if LWIP_TCPIP_CORE_LOCKING
3871 UNLOCK_TCPIP_CORE();
3872#else
3873 SYS_ARCH_UNPROTECT(lev);
3874#endif
3875 } else {
3876 op_mode |= O_RDWR;
3877 }
3878
3879 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3880 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3881
3882 break;
3883 case F_SETFL:
3884 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3885 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3886 if ((val & ~O_NONBLOCK) == 0) {
3887 /* only O_NONBLOCK, all other bits are zero */
3888 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3889 ret = 0;
3890 sock_set_errno(sock, 0);
3891 } else {
3892 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3893 }
3894 break;
3895 default:
3896 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3897 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3898 break;
3899 }
3900 done_socket(sock);
3901 return ret;
3902}
3903
3904#if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3905int
3906fcntl(int s, int cmd, ...)
3907{
3908 va_list ap;
3909 int val;
3910
3911 va_start(ap, cmd);
3912 val = va_arg(ap, int);
3913 va_end(ap);
3914 return lwip_fcntl(s, cmd, val);
3915}
3916#endif
3917
3918const char *
3919lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3920{
3921 const char *ret = NULL;
3922 int size_int = (int)size;
3923 if (size_int < 0) {
3924 set_errno(ENOSPC);
3925 return NULL;
3926 }
3927 switch (af) {
3928#if LWIP_IPV4
3929 case AF_INET:
3930 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
3931 if (ret == NULL) {
3932 set_errno(ENOSPC);
3933 }
3934 break;
3935#endif
3936#if LWIP_IPV6
3937 case AF_INET6:
3938 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
3939 if (ret == NULL) {
3940 set_errno(ENOSPC);
3941 }
3942 break;
3943#endif
3944 default:
3945 set_errno(EAFNOSUPPORT);
3946 break;
3947 }
3948 return ret;
3949}
3950
3951int
3952lwip_inet_pton(int af, const char *src, void *dst)
3953{
3954 int err;
3955 switch (af) {
3956#if LWIP_IPV4
3957 case AF_INET:
3958 err = ip4addr_aton(src, (ip4_addr_t *)dst);
3959 break;
3960#endif
3961#if LWIP_IPV6
3962 case AF_INET6: {
3963 /* convert into temporary variable since ip6_addr_t might be larger
3964 than in6_addr when scopes are enabled */
3965 ip6_addr_t addr;
3966 err = ip6addr_aton(src, &addr);
3967 if (err) {
3968 memcpy(dst, &addr.addr, sizeof(addr.addr));
3969 }
3970 break;
3971 }
3972#endif
3973 default:
3974 err = -1;
3975 set_errno(EAFNOSUPPORT);
3976 break;
3977 }
3978 return err;
3979}
3980
3981#if LWIP_IGMP
3982/** Register a new IGMP membership. On socket close, the membership is dropped automatically.
3983 *
3984 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3985 *
3986 * @return 1 on success, 0 on failure
3987 */
3988static int
3989lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
3990{
3991 struct lwip_sock *sock = get_socket(s);
3992 int i;
3993
3994 if (!sock) {
3995 return 0;
3996 }
3997
3998 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3999 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4000 socket_ipv4_multicast_memberships[i].sock = sock;
4001 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4002 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4003 done_socket(sock);
4004 return 1;
4005 }
4006 }
4007 done_socket(sock);
4008 return 0;
4009}
4010
4011/** Unregister a previously registered membership. This prevents dropping the membership
4012 * on socket close.
4013 *
4014 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4015 */
4016static void
4017lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4018{
4019 struct lwip_sock *sock = get_socket(s);
4020 int i;
4021
4022 if (!sock) {
4023 return;
4024 }
4025
4026 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4027 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4028 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4029 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4030 socket_ipv4_multicast_memberships[i].sock = NULL;
4031 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4032 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4033 break;
4034 }
4035 }
4036 done_socket(sock);
4037}
4038
4039/** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4040 *
4041 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4042 */
4043static void
4044lwip_socket_drop_registered_memberships(int s)
4045{
4046 struct lwip_sock *sock = get_socket(s);
4047 int i;
4048
4049 if (!sock) {
4050 return;
4051 }
4052
4053 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4054 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4055 ip_addr_t multi_addr, if_addr;
4056 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4057 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4058 socket_ipv4_multicast_memberships[i].sock = NULL;
4059 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4060 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4061
4062 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4063 }
4064 }
4065 done_socket(sock);
4066}
4067#endif /* LWIP_IGMP */
4068
4069#if LWIP_IPV6_MLD
4070/** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4071 *
4072 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4073 *
4074 * @return 1 on success, 0 on failure
4075 */
4076static int
4077lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4078{
4079 struct lwip_sock *sock = get_socket(s);
4080 int i;
4081
4082 if (!sock) {
4083 return 0;
4084 }
4085
4086 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4087 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4088 socket_ipv6_multicast_memberships[i].sock = sock;
4089 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4090 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4091 done_socket(sock);
4092 return 1;
4093 }
4094 }
4095 done_socket(sock);
4096 return 0;
4097}
4098
4099/** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4100 * on socket close.
4101 *
4102 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4103 */
4104static void
4105lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4106{
4107 struct lwip_sock *sock = get_socket(s);
4108 int i;
4109
4110 if (!sock) {
4111 return;
4112 }
4113
4114 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4115 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4116 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4117 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4118 socket_ipv6_multicast_memberships[i].sock = NULL;
4119 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4120 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4121 break;
4122 }
4123 }
4124 done_socket(sock);
4125}
4126
4127/** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4128 *
4129 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4130 */
4131static void
4132lwip_socket_drop_registered_mld6_memberships(int s)
4133{
4134 struct lwip_sock *sock = get_socket(s);
4135 int i;
4136
4137 if (!sock) {
4138 return;
4139 }
4140
4141 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4142 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4143 ip_addr_t multi_addr;
4144 u8_t if_idx;
4145
4146 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4147 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4148
4149 socket_ipv6_multicast_memberships[i].sock = NULL;
4150 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4151 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4152
4153 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4154 }
4155 }
4156 done_socket(sock);
4157}
4158#endif /* LWIP_IPV6_MLD */
4159
4160#endif /* LWIP_SOCKET */
Note: See TracBrowser for help on using the repository browser.