1 | /*
|
---|
2 | * Copyright (c) 2001-2003 Swedish Institute of Computer Science.
|
---|
3 | * All rights reserved.
|
---|
4 | *
|
---|
5 | * Redistribution and use in source and binary forms, with or without modification,
|
---|
6 | * are permitted provided that the following conditions are met:
|
---|
7 | *
|
---|
8 | * 1. Redistributions of source code must retain the above copyright notice,
|
---|
9 | * this list of conditions and the following disclaimer.
|
---|
10 | * 2. Redistributions in binary form must reproduce the above copyright notice,
|
---|
11 | * this list of conditions and the following disclaimer in the documentation
|
---|
12 | * and/or other materials provided with the distribution.
|
---|
13 | * 3. The name of the author may not be used to endorse or promote products
|
---|
14 | * derived from this software without specific prior written permission.
|
---|
15 | *
|
---|
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
---|
17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
---|
18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
---|
19 | * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
20 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
|
---|
21 | * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
---|
22 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
---|
23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
---|
24 | * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
|
---|
25 | * OF SUCH DAMAGE.
|
---|
26 | *
|
---|
27 | * This file is part of the lwIP TCP/IP stack.
|
---|
28 | *
|
---|
29 | * Author: Adam Dunkels <adam@sics.se>
|
---|
30 | *
|
---|
31 | */
|
---|
32 |
|
---|
33 | /*
|
---|
34 | * Wed Apr 17 16:05:29 EDT 2002 (James Roth)
|
---|
35 | *
|
---|
36 | * - Fixed an unlikely sys_thread_new() race condition.
|
---|
37 | *
|
---|
38 | * - Made current_thread() work with threads which where
|
---|
39 | * not created with sys_thread_new(). This includes
|
---|
40 | * the main thread and threads made with pthread_create().
|
---|
41 | *
|
---|
42 | * - Catch overflows where more than SYS_MBOX_SIZE messages
|
---|
43 | * are waiting to be read. The sys_mbox_post() routine
|
---|
44 | * will block until there is more room instead of just
|
---|
45 | * leaking messages.
|
---|
46 | */
|
---|
47 | #include "lwip/debug.h"
|
---|
48 |
|
---|
49 | #include <string.h>
|
---|
50 | #include <sys/time.h>
|
---|
51 | #include <sys/types.h>
|
---|
52 | #include <stdlib.h>
|
---|
53 | #include <unistd.h>
|
---|
54 | #include <pthread.h>
|
---|
55 | #include <errno.h>
|
---|
56 |
|
---|
57 | #include "lwip/def.h"
|
---|
58 |
|
---|
59 | #ifdef LWIP_UNIX_MACH
|
---|
60 | #include <mach/mach.h>
|
---|
61 | #include <mach/mach_time.h>
|
---|
62 | #endif
|
---|
63 |
|
---|
64 | #include "lwip/sys.h"
|
---|
65 | #include "lwip/opt.h"
|
---|
66 | #include "lwip/stats.h"
|
---|
67 | #include "lwip/tcpip.h"
|
---|
68 |
|
---|
69 | static void
|
---|
70 | get_monotonic_time(struct timespec *ts)
|
---|
71 | {
|
---|
72 | #ifdef LWIP_UNIX_MACH
|
---|
73 | /* darwin impl (no CLOCK_MONOTONIC) */
|
---|
74 | uint64_t t = mach_absolute_time();
|
---|
75 | mach_timebase_info_data_t timebase_info = {0, 0};
|
---|
76 | mach_timebase_info(&timebase_info);
|
---|
77 | uint64_t nano = (t * timebase_info.numer) / (timebase_info.denom);
|
---|
78 | uint64_t sec = nano/1000000000L;
|
---|
79 | nano -= sec * 1000000000L;
|
---|
80 | ts->tv_sec = sec;
|
---|
81 | ts->tv_nsec = nano;
|
---|
82 | #else
|
---|
83 | clock_gettime(CLOCK_MONOTONIC, ts);
|
---|
84 | #endif
|
---|
85 | }
|
---|
86 |
|
---|
87 | #if SYS_LIGHTWEIGHT_PROT
|
---|
88 | static pthread_mutex_t lwprot_mutex = PTHREAD_MUTEX_INITIALIZER;
|
---|
89 | static pthread_t lwprot_thread = (pthread_t)0xDEAD;
|
---|
90 | static int lwprot_count = 0;
|
---|
91 | #endif /* SYS_LIGHTWEIGHT_PROT */
|
---|
92 |
|
---|
93 | #if !NO_SYS
|
---|
94 |
|
---|
95 | static struct sys_thread *threads = NULL;
|
---|
96 | static pthread_mutex_t threads_mutex = PTHREAD_MUTEX_INITIALIZER;
|
---|
97 |
|
---|
98 | struct sys_mbox_msg {
|
---|
99 | struct sys_mbox_msg *next;
|
---|
100 | void *msg;
|
---|
101 | };
|
---|
102 |
|
---|
103 | #define SYS_MBOX_SIZE 128
|
---|
104 |
|
---|
105 | struct sys_mbox {
|
---|
106 | int first, last;
|
---|
107 | void *msgs[SYS_MBOX_SIZE];
|
---|
108 | struct sys_sem *not_empty;
|
---|
109 | struct sys_sem *not_full;
|
---|
110 | struct sys_sem *mutex;
|
---|
111 | int wait_send;
|
---|
112 | };
|
---|
113 |
|
---|
114 | struct sys_sem {
|
---|
115 | unsigned int c;
|
---|
116 | pthread_condattr_t condattr;
|
---|
117 | pthread_cond_t cond;
|
---|
118 | pthread_mutex_t mutex;
|
---|
119 | };
|
---|
120 |
|
---|
121 | struct sys_mutex {
|
---|
122 | pthread_mutex_t mutex;
|
---|
123 | };
|
---|
124 |
|
---|
125 | struct sys_thread {
|
---|
126 | struct sys_thread *next;
|
---|
127 | pthread_t pthread;
|
---|
128 | };
|
---|
129 |
|
---|
130 | static struct sys_sem *sys_sem_new_internal(u8_t count);
|
---|
131 | static void sys_sem_free_internal(struct sys_sem *sem);
|
---|
132 |
|
---|
133 | static u32_t cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex,
|
---|
134 | u32_t timeout);
|
---|
135 |
|
---|
136 | /*-----------------------------------------------------------------------------------*/
|
---|
137 | /* Threads */
|
---|
138 | static struct sys_thread *
|
---|
139 | introduce_thread(pthread_t id)
|
---|
140 | {
|
---|
141 | struct sys_thread *thread;
|
---|
142 |
|
---|
143 | thread = (struct sys_thread *)malloc(sizeof(struct sys_thread));
|
---|
144 |
|
---|
145 | if (thread != NULL) {
|
---|
146 | pthread_mutex_lock(&threads_mutex);
|
---|
147 | thread->next = threads;
|
---|
148 | thread->pthread = id;
|
---|
149 | threads = thread;
|
---|
150 | pthread_mutex_unlock(&threads_mutex);
|
---|
151 | }
|
---|
152 |
|
---|
153 | return thread;
|
---|
154 | }
|
---|
155 |
|
---|
156 | struct thread_wrapper_data
|
---|
157 | {
|
---|
158 | lwip_thread_fn function;
|
---|
159 | void *arg;
|
---|
160 | };
|
---|
161 |
|
---|
162 | static void *
|
---|
163 | thread_wrapper(void *arg)
|
---|
164 | {
|
---|
165 | struct thread_wrapper_data *thread_data = (struct thread_wrapper_data *)arg;
|
---|
166 |
|
---|
167 | thread_data->function(thread_data->arg);
|
---|
168 |
|
---|
169 | /* we should never get here */
|
---|
170 | free(arg);
|
---|
171 | return NULL;
|
---|
172 | }
|
---|
173 |
|
---|
174 | sys_thread_t
|
---|
175 | sys_thread_new(const char *name, lwip_thread_fn function, void *arg, int stacksize, int prio)
|
---|
176 | {
|
---|
177 | int code;
|
---|
178 | pthread_t tmp;
|
---|
179 | struct sys_thread *st = NULL;
|
---|
180 | struct thread_wrapper_data *thread_data;
|
---|
181 | LWIP_UNUSED_ARG(name);
|
---|
182 | LWIP_UNUSED_ARG(stacksize);
|
---|
183 | LWIP_UNUSED_ARG(prio);
|
---|
184 |
|
---|
185 | thread_data = (struct thread_wrapper_data *)malloc(sizeof(struct thread_wrapper_data));
|
---|
186 | thread_data->arg = arg;
|
---|
187 | thread_data->function = function;
|
---|
188 | code = pthread_create(&tmp,
|
---|
189 | NULL,
|
---|
190 | thread_wrapper,
|
---|
191 | thread_data);
|
---|
192 |
|
---|
193 | if (0 == code) {
|
---|
194 | st = introduce_thread(tmp);
|
---|
195 | }
|
---|
196 |
|
---|
197 | if (NULL == st) {
|
---|
198 | LWIP_DEBUGF(SYS_DEBUG, ("sys_thread_new: pthread_create %d, st = 0x%lx",
|
---|
199 | code, (unsigned long)st));
|
---|
200 | abort();
|
---|
201 | }
|
---|
202 | return st;
|
---|
203 | }
|
---|
204 |
|
---|
205 | #if LWIP_TCPIP_CORE_LOCKING
|
---|
206 | static pthread_t lwip_core_lock_holder_thread_id;
|
---|
207 | void sys_lock_tcpip_core(void)
|
---|
208 | {
|
---|
209 | sys_mutex_lock(&lock_tcpip_core);
|
---|
210 | lwip_core_lock_holder_thread_id = pthread_self();
|
---|
211 | }
|
---|
212 |
|
---|
213 | void sys_unlock_tcpip_core(void)
|
---|
214 | {
|
---|
215 | lwip_core_lock_holder_thread_id = 0;
|
---|
216 | sys_mutex_unlock(&lock_tcpip_core);
|
---|
217 | }
|
---|
218 | #endif /* LWIP_TCPIP_CORE_LOCKING */
|
---|
219 |
|
---|
220 | static pthread_t lwip_tcpip_thread_id;
|
---|
221 | void sys_mark_tcpip_thread(void)
|
---|
222 | {
|
---|
223 | lwip_tcpip_thread_id = pthread_self();
|
---|
224 | }
|
---|
225 |
|
---|
226 | void sys_check_core_locking(void)
|
---|
227 | {
|
---|
228 | /* Embedded systems should check we are NOT in an interrupt context here */
|
---|
229 |
|
---|
230 | if (lwip_tcpip_thread_id != 0) {
|
---|
231 | pthread_t current_thread_id = pthread_self();
|
---|
232 |
|
---|
233 | #if LWIP_TCPIP_CORE_LOCKING
|
---|
234 | LWIP_ASSERT("Function called without core lock", current_thread_id == lwip_core_lock_holder_thread_id);
|
---|
235 | #else /* LWIP_TCPIP_CORE_LOCKING */
|
---|
236 | LWIP_ASSERT("Function called from wrong thread", current_thread_id == lwip_tcpip_thread_id);
|
---|
237 | #endif /* LWIP_TCPIP_CORE_LOCKING */
|
---|
238 | }
|
---|
239 | }
|
---|
240 |
|
---|
241 | /*-----------------------------------------------------------------------------------*/
|
---|
242 | /* Mailbox */
|
---|
243 | err_t
|
---|
244 | sys_mbox_new(struct sys_mbox **mb, int size)
|
---|
245 | {
|
---|
246 | struct sys_mbox *mbox;
|
---|
247 | LWIP_UNUSED_ARG(size);
|
---|
248 |
|
---|
249 | mbox = (struct sys_mbox *)malloc(sizeof(struct sys_mbox));
|
---|
250 | if (mbox == NULL) {
|
---|
251 | return ERR_MEM;
|
---|
252 | }
|
---|
253 | mbox->first = mbox->last = 0;
|
---|
254 | mbox->not_empty = sys_sem_new_internal(0);
|
---|
255 | mbox->not_full = sys_sem_new_internal(0);
|
---|
256 | mbox->mutex = sys_sem_new_internal(1);
|
---|
257 | mbox->wait_send = 0;
|
---|
258 |
|
---|
259 | SYS_STATS_INC_USED(mbox);
|
---|
260 | *mb = mbox;
|
---|
261 | return ERR_OK;
|
---|
262 | }
|
---|
263 |
|
---|
264 | void
|
---|
265 | sys_mbox_free(struct sys_mbox **mb)
|
---|
266 | {
|
---|
267 | if ((mb != NULL) && (*mb != SYS_MBOX_NULL)) {
|
---|
268 | struct sys_mbox *mbox = *mb;
|
---|
269 | SYS_STATS_DEC(mbox.used);
|
---|
270 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
271 |
|
---|
272 | sys_sem_free_internal(mbox->not_empty);
|
---|
273 | sys_sem_free_internal(mbox->not_full);
|
---|
274 | sys_sem_free_internal(mbox->mutex);
|
---|
275 | mbox->not_empty = mbox->not_full = mbox->mutex = NULL;
|
---|
276 | /* LWIP_DEBUGF("sys_mbox_free: mbox 0x%lx\n", mbox); */
|
---|
277 | free(mbox);
|
---|
278 | }
|
---|
279 | }
|
---|
280 |
|
---|
281 | err_t
|
---|
282 | sys_mbox_trypost(struct sys_mbox **mb, void *msg)
|
---|
283 | {
|
---|
284 | u8_t first;
|
---|
285 | struct sys_mbox *mbox;
|
---|
286 | LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
|
---|
287 | mbox = *mb;
|
---|
288 |
|
---|
289 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
290 |
|
---|
291 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n",
|
---|
292 | (void *)mbox, (void *)msg));
|
---|
293 |
|
---|
294 | if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
|
---|
295 | sys_sem_signal(&mbox->mutex);
|
---|
296 | return ERR_MEM;
|
---|
297 | }
|
---|
298 |
|
---|
299 | mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
|
---|
300 |
|
---|
301 | if (mbox->last == mbox->first) {
|
---|
302 | first = 1;
|
---|
303 | } else {
|
---|
304 | first = 0;
|
---|
305 | }
|
---|
306 |
|
---|
307 | mbox->last++;
|
---|
308 |
|
---|
309 | if (first) {
|
---|
310 | sys_sem_signal(&mbox->not_empty);
|
---|
311 | }
|
---|
312 |
|
---|
313 | sys_sem_signal(&mbox->mutex);
|
---|
314 |
|
---|
315 | return ERR_OK;
|
---|
316 | }
|
---|
317 |
|
---|
318 | err_t
|
---|
319 | sys_mbox_trypost_fromisr(sys_mbox_t *q, void *msg)
|
---|
320 | {
|
---|
321 | return sys_mbox_trypost(q, msg);
|
---|
322 | }
|
---|
323 |
|
---|
324 | void
|
---|
325 | sys_mbox_post(struct sys_mbox **mb, void *msg)
|
---|
326 | {
|
---|
327 | u8_t first;
|
---|
328 | struct sys_mbox *mbox;
|
---|
329 | LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
|
---|
330 | mbox = *mb;
|
---|
331 |
|
---|
332 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
333 |
|
---|
334 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_post: mbox %p msg %p\n", (void *)mbox, (void *)msg));
|
---|
335 |
|
---|
336 | while ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) {
|
---|
337 | mbox->wait_send++;
|
---|
338 | sys_sem_signal(&mbox->mutex);
|
---|
339 | sys_arch_sem_wait(&mbox->not_full, 0);
|
---|
340 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
341 | mbox->wait_send--;
|
---|
342 | }
|
---|
343 |
|
---|
344 | mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg;
|
---|
345 |
|
---|
346 | if (mbox->last == mbox->first) {
|
---|
347 | first = 1;
|
---|
348 | } else {
|
---|
349 | first = 0;
|
---|
350 | }
|
---|
351 |
|
---|
352 | mbox->last++;
|
---|
353 |
|
---|
354 | if (first) {
|
---|
355 | sys_sem_signal(&mbox->not_empty);
|
---|
356 | }
|
---|
357 |
|
---|
358 | sys_sem_signal(&mbox->mutex);
|
---|
359 | }
|
---|
360 |
|
---|
361 | u32_t
|
---|
362 | sys_arch_mbox_tryfetch(struct sys_mbox **mb, void **msg)
|
---|
363 | {
|
---|
364 | struct sys_mbox *mbox;
|
---|
365 | LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
|
---|
366 | mbox = *mb;
|
---|
367 |
|
---|
368 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
369 |
|
---|
370 | if (mbox->first == mbox->last) {
|
---|
371 | sys_sem_signal(&mbox->mutex);
|
---|
372 | return SYS_MBOX_EMPTY;
|
---|
373 | }
|
---|
374 |
|
---|
375 | if (msg != NULL) {
|
---|
376 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p msg %p\n", (void *)mbox, *msg));
|
---|
377 | *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
|
---|
378 | }
|
---|
379 | else{
|
---|
380 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_tryfetch: mbox %p, null msg\n", (void *)mbox));
|
---|
381 | }
|
---|
382 |
|
---|
383 | mbox->first++;
|
---|
384 |
|
---|
385 | if (mbox->wait_send) {
|
---|
386 | sys_sem_signal(&mbox->not_full);
|
---|
387 | }
|
---|
388 |
|
---|
389 | sys_sem_signal(&mbox->mutex);
|
---|
390 |
|
---|
391 | return 0;
|
---|
392 | }
|
---|
393 |
|
---|
394 | u32_t
|
---|
395 | sys_arch_mbox_fetch(struct sys_mbox **mb, void **msg, u32_t timeout)
|
---|
396 | {
|
---|
397 | u32_t time_needed = 0;
|
---|
398 | struct sys_mbox *mbox;
|
---|
399 | LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL));
|
---|
400 | mbox = *mb;
|
---|
401 |
|
---|
402 | /* The mutex lock is quick so we don't bother with the timeout
|
---|
403 | stuff here. */
|
---|
404 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
405 |
|
---|
406 | while (mbox->first == mbox->last) {
|
---|
407 | sys_sem_signal(&mbox->mutex);
|
---|
408 |
|
---|
409 | /* We block while waiting for a mail to arrive in the mailbox. We
|
---|
410 | must be prepared to timeout. */
|
---|
411 | if (timeout != 0) {
|
---|
412 | time_needed = sys_arch_sem_wait(&mbox->not_empty, timeout);
|
---|
413 |
|
---|
414 | if (time_needed == SYS_ARCH_TIMEOUT) {
|
---|
415 | return SYS_ARCH_TIMEOUT;
|
---|
416 | }
|
---|
417 | } else {
|
---|
418 | sys_arch_sem_wait(&mbox->not_empty, 0);
|
---|
419 | }
|
---|
420 |
|
---|
421 | sys_arch_sem_wait(&mbox->mutex, 0);
|
---|
422 | }
|
---|
423 |
|
---|
424 | if (msg != NULL) {
|
---|
425 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p msg %p\n", (void *)mbox, *msg));
|
---|
426 | *msg = mbox->msgs[mbox->first % SYS_MBOX_SIZE];
|
---|
427 | }
|
---|
428 | else{
|
---|
429 | LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_fetch: mbox %p, null msg\n", (void *)mbox));
|
---|
430 | }
|
---|
431 |
|
---|
432 | mbox->first++;
|
---|
433 |
|
---|
434 | if (mbox->wait_send) {
|
---|
435 | sys_sem_signal(&mbox->not_full);
|
---|
436 | }
|
---|
437 |
|
---|
438 | sys_sem_signal(&mbox->mutex);
|
---|
439 |
|
---|
440 | return time_needed;
|
---|
441 | }
|
---|
442 |
|
---|
443 | /*-----------------------------------------------------------------------------------*/
|
---|
444 | /* Semaphore */
|
---|
445 | static struct sys_sem *
|
---|
446 | sys_sem_new_internal(u8_t count)
|
---|
447 | {
|
---|
448 | struct sys_sem *sem;
|
---|
449 |
|
---|
450 | sem = (struct sys_sem *)malloc(sizeof(struct sys_sem));
|
---|
451 | if (sem != NULL) {
|
---|
452 | sem->c = count;
|
---|
453 | pthread_condattr_init(&(sem->condattr));
|
---|
454 | #if !(defined(LWIP_UNIX_MACH) || (defined(LWIP_UNIX_ANDROID) && __ANDROID_API__ < 21))
|
---|
455 | pthread_condattr_setclock(&(sem->condattr), CLOCK_MONOTONIC);
|
---|
456 | #endif
|
---|
457 | pthread_cond_init(&(sem->cond), &(sem->condattr));
|
---|
458 | pthread_mutex_init(&(sem->mutex), NULL);
|
---|
459 | }
|
---|
460 | return sem;
|
---|
461 | }
|
---|
462 |
|
---|
463 | err_t
|
---|
464 | sys_sem_new(struct sys_sem **sem, u8_t count)
|
---|
465 | {
|
---|
466 | SYS_STATS_INC_USED(sem);
|
---|
467 | *sem = sys_sem_new_internal(count);
|
---|
468 | if (*sem == NULL) {
|
---|
469 | return ERR_MEM;
|
---|
470 | }
|
---|
471 | return ERR_OK;
|
---|
472 | }
|
---|
473 |
|
---|
474 | static u32_t
|
---|
475 | cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, u32_t timeout)
|
---|
476 | {
|
---|
477 | struct timespec rtime1, rtime2, ts;
|
---|
478 | int ret;
|
---|
479 |
|
---|
480 | #ifdef __GNU__
|
---|
481 | #define pthread_cond_wait pthread_hurd_cond_wait_np
|
---|
482 | #define pthread_cond_timedwait pthread_hurd_cond_timedwait_np
|
---|
483 | #endif
|
---|
484 |
|
---|
485 | if (timeout == 0) {
|
---|
486 | pthread_cond_wait(cond, mutex);
|
---|
487 | return 0;
|
---|
488 | }
|
---|
489 |
|
---|
490 | /* Get a timestamp and add the timeout value. */
|
---|
491 | get_monotonic_time(&rtime1);
|
---|
492 | #if defined(LWIP_UNIX_MACH) || (defined(LWIP_UNIX_ANDROID) && __ANDROID_API__ < 21)
|
---|
493 | ts.tv_sec = timeout / 1000L;
|
---|
494 | ts.tv_nsec = (timeout % 1000L) * 1000000L;
|
---|
495 | ret = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
|
---|
496 | #else
|
---|
497 | ts.tv_sec = rtime1.tv_sec + timeout / 1000L;
|
---|
498 | ts.tv_nsec = rtime1.tv_nsec + (timeout % 1000L) * 1000000L;
|
---|
499 | if (ts.tv_nsec >= 1000000000L) {
|
---|
500 | ts.tv_sec++;
|
---|
501 | ts.tv_nsec -= 1000000000L;
|
---|
502 | }
|
---|
503 |
|
---|
504 | ret = pthread_cond_timedwait(cond, mutex, &ts);
|
---|
505 | #endif
|
---|
506 | if (ret == ETIMEDOUT) {
|
---|
507 | return SYS_ARCH_TIMEOUT;
|
---|
508 | }
|
---|
509 |
|
---|
510 | /* Calculate for how long we waited for the cond. */
|
---|
511 | get_monotonic_time(&rtime2);
|
---|
512 | ts.tv_sec = rtime2.tv_sec - rtime1.tv_sec;
|
---|
513 | ts.tv_nsec = rtime2.tv_nsec - rtime1.tv_nsec;
|
---|
514 | if (ts.tv_nsec < 0) {
|
---|
515 | ts.tv_sec--;
|
---|
516 | ts.tv_nsec += 1000000000L;
|
---|
517 | }
|
---|
518 | return (u32_t)(ts.tv_sec * 1000L + ts.tv_nsec / 1000000L);
|
---|
519 | }
|
---|
520 |
|
---|
521 | u32_t
|
---|
522 | sys_arch_sem_wait(struct sys_sem **s, u32_t timeout)
|
---|
523 | {
|
---|
524 | u32_t time_needed = 0;
|
---|
525 | struct sys_sem *sem;
|
---|
526 | LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
|
---|
527 | sem = *s;
|
---|
528 |
|
---|
529 | pthread_mutex_lock(&(sem->mutex));
|
---|
530 | while (sem->c <= 0) {
|
---|
531 | if (timeout > 0) {
|
---|
532 | time_needed = cond_wait(&(sem->cond), &(sem->mutex), timeout);
|
---|
533 |
|
---|
534 | if (time_needed == SYS_ARCH_TIMEOUT) {
|
---|
535 | pthread_mutex_unlock(&(sem->mutex));
|
---|
536 | return SYS_ARCH_TIMEOUT;
|
---|
537 | }
|
---|
538 | /* pthread_mutex_unlock(&(sem->mutex));
|
---|
539 | return time_needed; */
|
---|
540 | } else {
|
---|
541 | cond_wait(&(sem->cond), &(sem->mutex), 0);
|
---|
542 | }
|
---|
543 | }
|
---|
544 | sem->c--;
|
---|
545 | pthread_mutex_unlock(&(sem->mutex));
|
---|
546 | return (u32_t)time_needed;
|
---|
547 | }
|
---|
548 |
|
---|
549 | void
|
---|
550 | sys_sem_signal(struct sys_sem **s)
|
---|
551 | {
|
---|
552 | struct sys_sem *sem;
|
---|
553 | LWIP_ASSERT("invalid sem", (s != NULL) && (*s != NULL));
|
---|
554 | sem = *s;
|
---|
555 |
|
---|
556 | pthread_mutex_lock(&(sem->mutex));
|
---|
557 | sem->c++;
|
---|
558 |
|
---|
559 | if (sem->c > 1) {
|
---|
560 | sem->c = 1;
|
---|
561 | }
|
---|
562 |
|
---|
563 | pthread_cond_broadcast(&(sem->cond));
|
---|
564 | pthread_mutex_unlock(&(sem->mutex));
|
---|
565 | }
|
---|
566 |
|
---|
567 | static void
|
---|
568 | sys_sem_free_internal(struct sys_sem *sem)
|
---|
569 | {
|
---|
570 | pthread_cond_destroy(&(sem->cond));
|
---|
571 | pthread_condattr_destroy(&(sem->condattr));
|
---|
572 | pthread_mutex_destroy(&(sem->mutex));
|
---|
573 | free(sem);
|
---|
574 | }
|
---|
575 |
|
---|
576 | void
|
---|
577 | sys_sem_free(struct sys_sem **sem)
|
---|
578 | {
|
---|
579 | if ((sem != NULL) && (*sem != SYS_SEM_NULL)) {
|
---|
580 | SYS_STATS_DEC(sem.used);
|
---|
581 | sys_sem_free_internal(*sem);
|
---|
582 | }
|
---|
583 | }
|
---|
584 |
|
---|
585 | /*-----------------------------------------------------------------------------------*/
|
---|
586 | /* Mutex */
|
---|
587 | /** Create a new mutex
|
---|
588 | * @param mutex pointer to the mutex to create
|
---|
589 | * @return a new mutex */
|
---|
590 | err_t
|
---|
591 | sys_mutex_new(struct sys_mutex **mutex)
|
---|
592 | {
|
---|
593 | struct sys_mutex *mtx;
|
---|
594 |
|
---|
595 | mtx = (struct sys_mutex *)malloc(sizeof(struct sys_mutex));
|
---|
596 | if (mtx != NULL) {
|
---|
597 | pthread_mutex_init(&(mtx->mutex), NULL);
|
---|
598 | *mutex = mtx;
|
---|
599 | return ERR_OK;
|
---|
600 | }
|
---|
601 | else {
|
---|
602 | return ERR_MEM;
|
---|
603 | }
|
---|
604 | }
|
---|
605 |
|
---|
606 | /** Lock a mutex
|
---|
607 | * @param mutex the mutex to lock */
|
---|
608 | void
|
---|
609 | sys_mutex_lock(struct sys_mutex **mutex)
|
---|
610 | {
|
---|
611 | pthread_mutex_lock(&((*mutex)->mutex));
|
---|
612 | }
|
---|
613 |
|
---|
614 | /** Unlock a mutex
|
---|
615 | * @param mutex the mutex to unlock */
|
---|
616 | void
|
---|
617 | sys_mutex_unlock(struct sys_mutex **mutex)
|
---|
618 | {
|
---|
619 | pthread_mutex_unlock(&((*mutex)->mutex));
|
---|
620 | }
|
---|
621 |
|
---|
622 | /** Delete a mutex
|
---|
623 | * @param mutex the mutex to delete */
|
---|
624 | void
|
---|
625 | sys_mutex_free(struct sys_mutex **mutex)
|
---|
626 | {
|
---|
627 | pthread_mutex_destroy(&((*mutex)->mutex));
|
---|
628 | free(*mutex);
|
---|
629 | }
|
---|
630 |
|
---|
631 | #endif /* !NO_SYS */
|
---|
632 |
|
---|
633 | /*-----------------------------------------------------------------------------------*/
|
---|
634 | /* Time */
|
---|
635 | u32_t
|
---|
636 | sys_now(void)
|
---|
637 | {
|
---|
638 | struct timespec ts;
|
---|
639 |
|
---|
640 | get_monotonic_time(&ts);
|
---|
641 | return (u32_t)(ts.tv_sec * 1000L + ts.tv_nsec / 1000000L);
|
---|
642 | }
|
---|
643 |
|
---|
644 | u32_t
|
---|
645 | sys_jiffies(void)
|
---|
646 | {
|
---|
647 | struct timespec ts;
|
---|
648 |
|
---|
649 | get_monotonic_time(&ts);
|
---|
650 | return (u32_t)(ts.tv_sec * 1000000000L + ts.tv_nsec);
|
---|
651 | }
|
---|
652 |
|
---|
653 | /*-----------------------------------------------------------------------------------*/
|
---|
654 | /* Init */
|
---|
655 |
|
---|
656 | void
|
---|
657 | sys_init(void)
|
---|
658 | {
|
---|
659 | }
|
---|
660 |
|
---|
661 | /*-----------------------------------------------------------------------------------*/
|
---|
662 | /* Critical section */
|
---|
663 | #if SYS_LIGHTWEIGHT_PROT
|
---|
664 | /** sys_prot_t sys_arch_protect(void)
|
---|
665 |
|
---|
666 | This optional function does a "fast" critical region protection and returns
|
---|
667 | the previous protection level. This function is only called during very short
|
---|
668 | critical regions. An embedded system which supports ISR-based drivers might
|
---|
669 | want to implement this function by disabling interrupts. Task-based systems
|
---|
670 | might want to implement this by using a mutex or disabling tasking. This
|
---|
671 | function should support recursive calls from the same task or interrupt. In
|
---|
672 | other words, sys_arch_protect() could be called while already protected. In
|
---|
673 | that case the return value indicates that it is already protected.
|
---|
674 |
|
---|
675 | sys_arch_protect() is only required if your port is supporting an operating
|
---|
676 | system.
|
---|
677 | */
|
---|
678 | sys_prot_t
|
---|
679 | sys_arch_protect(void)
|
---|
680 | {
|
---|
681 | /* Note that for the UNIX port, we are using a lightweight mutex, and our
|
---|
682 | * own counter (which is locked by the mutex). The return code is not actually
|
---|
683 | * used. */
|
---|
684 | if (lwprot_thread != pthread_self())
|
---|
685 | {
|
---|
686 | /* We are locking the mutex where it has not been locked before *
|
---|
687 | * or is being locked by another thread */
|
---|
688 | pthread_mutex_lock(&lwprot_mutex);
|
---|
689 | lwprot_thread = pthread_self();
|
---|
690 | lwprot_count = 1;
|
---|
691 | }
|
---|
692 | else
|
---|
693 | /* It is already locked by THIS thread */
|
---|
694 | lwprot_count++;
|
---|
695 | return 0;
|
---|
696 | }
|
---|
697 |
|
---|
698 | /** void sys_arch_unprotect(sys_prot_t pval)
|
---|
699 |
|
---|
700 | This optional function does a "fast" set of critical region protection to the
|
---|
701 | value specified by pval. See the documentation for sys_arch_protect() for
|
---|
702 | more information. This function is only required if your port is supporting
|
---|
703 | an operating system.
|
---|
704 | */
|
---|
705 | void
|
---|
706 | sys_arch_unprotect(sys_prot_t pval)
|
---|
707 | {
|
---|
708 | LWIP_UNUSED_ARG(pval);
|
---|
709 | if (lwprot_thread == pthread_self())
|
---|
710 | {
|
---|
711 | lwprot_count--;
|
---|
712 | if (lwprot_count == 0)
|
---|
713 | {
|
---|
714 | lwprot_thread = (pthread_t) 0xDEAD;
|
---|
715 | pthread_mutex_unlock(&lwprot_mutex);
|
---|
716 | }
|
---|
717 | }
|
---|
718 | }
|
---|
719 | #endif /* SYS_LIGHTWEIGHT_PROT */
|
---|