uloop: move kqueue code into a separate file
[project/libubox.git] / uloop.c
1 /*
2  * uloop - event loop implementation
3  *
4  * Copyright (C) 2010-2016 Felix Fietkau <nbd@openwrt.org>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29
30 #include "uloop.h"
31 #include "utils.h"
32
33 #ifdef USE_KQUEUE
34 #include <sys/event.h>
35 #endif
36 #ifdef USE_EPOLL
37 #include <sys/epoll.h>
38 #endif
39 #include <sys/wait.h>
40
41 struct uloop_fd_event {
42         struct uloop_fd *fd;
43         unsigned int events;
44 };
45
46 struct uloop_fd_stack {
47         struct uloop_fd_stack *next;
48         struct uloop_fd *fd;
49         unsigned int events;
50 };
51
52 static struct uloop_fd_stack *fd_stack = NULL;
53
54 #define ULOOP_MAX_EVENTS 10
55
56 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
57 static struct list_head processes = LIST_HEAD_INIT(processes);
58
59 static int poll_fd = -1;
60 bool uloop_cancelled = false;
61 static bool do_sigchld = false;
62
63 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
64 static int cur_fd, cur_nfds;
65
66 #ifdef USE_KQUEUE
67 #include "uloop-kqueue.c"
68 #endif
69
70 #ifdef USE_EPOLL
71
72 /**
73  * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
74  */
75 #ifndef EPOLLRDHUP
76 #define EPOLLRDHUP 0x2000
77 #endif
78
79 int uloop_init(void)
80 {
81         if (poll_fd >= 0)
82                 return 0;
83
84         poll_fd = epoll_create(32);
85         if (poll_fd < 0)
86                 return -1;
87
88         fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
89         return 0;
90 }
91
92 static int register_poll(struct uloop_fd *fd, unsigned int flags)
93 {
94         struct epoll_event ev;
95         int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
96
97         memset(&ev, 0, sizeof(struct epoll_event));
98
99         if (flags & ULOOP_READ)
100                 ev.events |= EPOLLIN | EPOLLRDHUP;
101
102         if (flags & ULOOP_WRITE)
103                 ev.events |= EPOLLOUT;
104
105         if (flags & ULOOP_EDGE_TRIGGER)
106                 ev.events |= EPOLLET;
107
108         ev.data.fd = fd->fd;
109         ev.data.ptr = fd;
110         fd->flags = flags;
111
112         return epoll_ctl(poll_fd, op, fd->fd, &ev);
113 }
114
115 static struct epoll_event events[ULOOP_MAX_EVENTS];
116
117 static int __uloop_fd_delete(struct uloop_fd *sock)
118 {
119         sock->flags = 0;
120         return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
121 }
122
123 static int uloop_fetch_events(int timeout)
124 {
125         int n, nfds;
126
127         nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
128         for (n = 0; n < nfds; ++n) {
129                 struct uloop_fd_event *cur = &cur_fds[n];
130                 struct uloop_fd *u = events[n].data.ptr;
131                 unsigned int ev = 0;
132
133                 cur->fd = u;
134                 if (!u)
135                         continue;
136
137                 if (events[n].events & (EPOLLERR|EPOLLHUP)) {
138                         u->error = true;
139                         if (!(u->flags & ULOOP_ERROR_CB))
140                                 uloop_fd_delete(u);
141                 }
142
143                 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP))) {
144                         cur->fd = NULL;
145                         continue;
146                 }
147
148                 if(events[n].events & EPOLLRDHUP)
149                         u->eof = true;
150
151                 if(events[n].events & EPOLLIN)
152                         ev |= ULOOP_READ;
153
154                 if(events[n].events & EPOLLOUT)
155                         ev |= ULOOP_WRITE;
156
157                 cur->events = ev;
158         }
159
160         return nfds;
161 }
162
163 #endif
164
165 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
166 {
167         struct uloop_fd_stack *cur;
168
169         /*
170          * Do not buffer events for level-triggered fds, they will keep firing.
171          * Caller needs to take care of recursion issues.
172          */
173         if (!(fd->flags & ULOOP_EDGE_TRIGGER))
174                 return false;
175
176         for (cur = fd_stack; cur; cur = cur->next) {
177                 if (cur->fd != fd)
178                         continue;
179
180                 if (events < 0)
181                         cur->fd = NULL;
182                 else
183                         cur->events |= events | ULOOP_EVENT_BUFFERED;
184
185                 return true;
186         }
187
188         return false;
189 }
190
191 static void uloop_run_events(int timeout)
192 {
193         struct uloop_fd_event *cur;
194         struct uloop_fd *fd;
195
196         if (!cur_nfds) {
197                 cur_fd = 0;
198                 cur_nfds = uloop_fetch_events(timeout);
199                 if (cur_nfds < 0)
200                         cur_nfds = 0;
201         }
202
203         while (cur_nfds > 0) {
204                 struct uloop_fd_stack stack_cur;
205                 unsigned int events;
206
207                 cur = &cur_fds[cur_fd++];
208                 cur_nfds--;
209
210                 fd = cur->fd;
211                 events = cur->events;
212                 if (!fd)
213                         continue;
214
215                 if (!fd->cb)
216                         continue;
217
218                 if (uloop_fd_stack_event(fd, cur->events))
219                         continue;
220
221                 stack_cur.next = fd_stack;
222                 stack_cur.fd = fd;
223                 fd_stack = &stack_cur;
224                 do {
225                         stack_cur.events = 0;
226                         fd->cb(fd, events);
227                         events = stack_cur.events & ULOOP_EVENT_MASK;
228                 } while (stack_cur.fd && events);
229                 fd_stack = stack_cur.next;
230
231                 return;
232         }
233 }
234
235 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
236 {
237         unsigned int fl;
238         int ret;
239
240         if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
241                 return uloop_fd_delete(sock);
242
243         if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
244                 fl = fcntl(sock->fd, F_GETFL, 0);
245                 fl |= O_NONBLOCK;
246                 fcntl(sock->fd, F_SETFL, fl);
247         }
248
249         ret = register_poll(sock, flags);
250         if (ret < 0)
251                 goto out;
252
253         sock->registered = true;
254         sock->eof = false;
255         sock->error = false;
256
257 out:
258         return ret;
259 }
260
261 int uloop_fd_delete(struct uloop_fd *fd)
262 {
263         int i;
264
265         for (i = 0; i < cur_nfds; i++) {
266                 if (cur_fds[cur_fd + i].fd != fd)
267                         continue;
268
269                 cur_fds[cur_fd + i].fd = NULL;
270         }
271
272         if (!fd->registered)
273                 return 0;
274
275         fd->registered = false;
276         uloop_fd_stack_event(fd, -1);
277         return __uloop_fd_delete(fd);
278 }
279
280 static int tv_diff(struct timeval *t1, struct timeval *t2)
281 {
282         return
283                 (t1->tv_sec - t2->tv_sec) * 1000 +
284                 (t1->tv_usec - t2->tv_usec) / 1000;
285 }
286
287 int uloop_timeout_add(struct uloop_timeout *timeout)
288 {
289         struct uloop_timeout *tmp;
290         struct list_head *h = &timeouts;
291
292         if (timeout->pending)
293                 return -1;
294
295         list_for_each_entry(tmp, &timeouts, list) {
296                 if (tv_diff(&tmp->time, &timeout->time) > 0) {
297                         h = &tmp->list;
298                         break;
299                 }
300         }
301
302         list_add_tail(&timeout->list, h);
303         timeout->pending = true;
304
305         return 0;
306 }
307
308 static void uloop_gettime(struct timeval *tv)
309 {
310         struct timespec ts;
311
312         clock_gettime(CLOCK_MONOTONIC, &ts);
313         tv->tv_sec = ts.tv_sec;
314         tv->tv_usec = ts.tv_nsec / 1000;
315 }
316
317 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
318 {
319         struct timeval *time = &timeout->time;
320
321         if (timeout->pending)
322                 uloop_timeout_cancel(timeout);
323
324         uloop_gettime(time);
325
326         time->tv_sec += msecs / 1000;
327         time->tv_usec += (msecs % 1000) * 1000;
328
329         if (time->tv_usec > 1000000) {
330                 time->tv_sec++;
331                 time->tv_usec -= 1000000;
332         }
333
334         return uloop_timeout_add(timeout);
335 }
336
337 int uloop_timeout_cancel(struct uloop_timeout *timeout)
338 {
339         if (!timeout->pending)
340                 return -1;
341
342         list_del(&timeout->list);
343         timeout->pending = false;
344
345         return 0;
346 }
347
348 int uloop_timeout_remaining(struct uloop_timeout *timeout)
349 {
350         struct timeval now;
351
352         if (!timeout->pending)
353                 return -1;
354
355         uloop_gettime(&now);
356
357         return tv_diff(&timeout->time, &now);
358 }
359
360 int uloop_process_add(struct uloop_process *p)
361 {
362         struct uloop_process *tmp;
363         struct list_head *h = &processes;
364
365         if (p->pending)
366                 return -1;
367
368         list_for_each_entry(tmp, &processes, list) {
369                 if (tmp->pid > p->pid) {
370                         h = &tmp->list;
371                         break;
372                 }
373         }
374
375         list_add_tail(&p->list, h);
376         p->pending = true;
377
378         return 0;
379 }
380
381 int uloop_process_delete(struct uloop_process *p)
382 {
383         if (!p->pending)
384                 return -1;
385
386         list_del(&p->list);
387         p->pending = false;
388
389         return 0;
390 }
391
392 static void uloop_handle_processes(void)
393 {
394         struct uloop_process *p, *tmp;
395         pid_t pid;
396         int ret;
397
398         do_sigchld = false;
399
400         while (1) {
401                 pid = waitpid(-1, &ret, WNOHANG);
402                 if (pid <= 0)
403                         return;
404
405                 list_for_each_entry_safe(p, tmp, &processes, list) {
406                         if (p->pid < pid)
407                                 continue;
408
409                         if (p->pid > pid)
410                                 break;
411
412                         uloop_process_delete(p);
413                         p->cb(p, ret);
414                 }
415         }
416
417 }
418
419 static void uloop_handle_sigint(int signo)
420 {
421         uloop_cancelled = true;
422 }
423
424 static void uloop_sigchld(int signo)
425 {
426         do_sigchld = true;
427 }
428
429 static void uloop_install_handler(int signum, void (*handler)(int), struct sigaction* old, bool add)
430 {
431         struct sigaction s;
432         struct sigaction *act;
433
434         act = NULL;
435         sigaction(signum, NULL, &s);
436
437         if (add) {
438                 if (s.sa_handler == SIG_DFL) { /* Do not override existing custom signal handlers */
439                         memcpy(old, &s, sizeof(struct sigaction));
440                         s.sa_handler = handler;
441                         s.sa_flags = 0;
442                         act = &s;
443                 }
444         }
445         else if (s.sa_handler == handler) { /* Do not restore if someone modified our handler */
446                         act = old;
447         }
448
449         if (act != NULL)
450                 sigaction(signum, act, NULL);
451 }
452
453 static void uloop_ignore_signal(int signum, bool ignore)
454 {
455         struct sigaction s;
456         void *new_handler = NULL;
457
458         sigaction(signum, NULL, &s);
459
460         if (ignore) {
461                 if (s.sa_handler == SIG_DFL) /* Ignore only if there isn't any custom handler */
462                         new_handler = SIG_IGN;
463         } else {
464                 if (s.sa_handler == SIG_IGN) /* Restore only if noone modified our SIG_IGN */
465                         new_handler = SIG_DFL;
466         }
467
468         if (new_handler) {
469                 s.sa_handler = new_handler;
470                 s.sa_flags = 0;
471                 sigaction(signum, &s, NULL);
472         }
473 }
474
475 static void uloop_setup_signals(bool add)
476 {
477         static struct sigaction old_sigint, old_sigchld, old_sigterm;
478
479         uloop_install_handler(SIGINT, uloop_handle_sigint, &old_sigint, add);
480         uloop_install_handler(SIGTERM, uloop_handle_sigint, &old_sigterm, add);
481         uloop_install_handler(SIGCHLD, uloop_sigchld, &old_sigchld, add);
482
483         uloop_ignore_signal(SIGPIPE, add);
484 }
485
486 static int uloop_get_next_timeout(struct timeval *tv)
487 {
488         struct uloop_timeout *timeout;
489         int diff;
490
491         if (list_empty(&timeouts))
492                 return -1;
493
494         timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
495         diff = tv_diff(&timeout->time, tv);
496         if (diff < 0)
497                 return 0;
498
499         return diff;
500 }
501
502 static void uloop_process_timeouts(struct timeval *tv)
503 {
504         struct uloop_timeout *t;
505
506         while (!list_empty(&timeouts)) {
507                 t = list_first_entry(&timeouts, struct uloop_timeout, list);
508
509                 if (tv_diff(&t->time, tv) > 0)
510                         break;
511
512                 uloop_timeout_cancel(t);
513                 if (t->cb)
514                         t->cb(t);
515         }
516 }
517
518 static void uloop_clear_timeouts(void)
519 {
520         struct uloop_timeout *t, *tmp;
521
522         list_for_each_entry_safe(t, tmp, &timeouts, list)
523                 uloop_timeout_cancel(t);
524 }
525
526 static void uloop_clear_processes(void)
527 {
528         struct uloop_process *p, *tmp;
529
530         list_for_each_entry_safe(p, tmp, &processes, list)
531                 uloop_process_delete(p);
532 }
533
534 void uloop_run(void)
535 {
536         static int recursive_calls = 0;
537         struct timeval tv;
538
539         /*
540          * Handlers are only updated for the first call to uloop_run() (and restored
541          * when this call is done).
542          */
543         if (!recursive_calls++)
544                 uloop_setup_signals(true);
545
546         uloop_cancelled = false;
547         while(!uloop_cancelled)
548         {
549                 uloop_gettime(&tv);
550                 uloop_process_timeouts(&tv);
551
552                 if (do_sigchld)
553                         uloop_handle_processes();
554
555                 if (uloop_cancelled)
556                         break;
557
558                 uloop_gettime(&tv);
559                 uloop_run_events(uloop_get_next_timeout(&tv));
560         }
561
562         if (!--recursive_calls)
563                 uloop_setup_signals(false);
564 }
565
566 void uloop_done(void)
567 {
568         if (poll_fd < 0)
569                 return;
570
571         close(poll_fd);
572         poll_fd = -1;
573
574         uloop_clear_timeouts();
575         uloop_clear_processes();
576 }