a3ba3ad550c6a8c7383b7e167c697e2fd5a08835
[project/libubox.git] / uloop.c
1 /*
2  * uloop - event loop implementation
3  *
4  * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 #include <sys/time.h>
19 #include <sys/types.h>
20
21 #include <unistd.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <errno.h>
25 #include <poll.h>
26 #include <string.h>
27 #include <fcntl.h>
28 #include <stdbool.h>
29
30 #include "uloop.h"
31 #include "utils.h"
32
33 #ifdef USE_KQUEUE
34 #include <sys/event.h>
35 #endif
36 #ifdef USE_EPOLL
37 #include <sys/epoll.h>
38 #endif
39 #include <sys/wait.h>
40
41 #define ULOOP_MAX_EVENTS 10
42
43 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
44 static struct list_head processes = LIST_HEAD_INIT(processes);
45
46 static int poll_fd = -1;
47 bool uloop_cancelled = false;
48 bool uloop_handle_sigchld = true;
49 static bool do_sigchld = false;
50 static int cur_fd, cur_nfds;
51
52 #ifdef USE_KQUEUE
53
54 int uloop_init(void)
55 {
56         struct timespec timeout = { 0, 0 };
57         struct kevent ev = {};
58
59         if (poll_fd >= 0)
60                 return 0;
61
62         poll_fd = kqueue();
63         if (poll_fd < 0)
64                 return -1;
65
66         EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
67         kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
68
69         return 0;
70 }
71
72
73 static uint16_t get_flags(unsigned int flags, unsigned int mask)
74 {
75         uint16_t kflags = 0;
76
77         if (!(flags & mask))
78                 return EV_DELETE;
79
80         kflags = EV_ADD;
81         if (flags & ULOOP_EDGE_TRIGGER)
82                 kflags |= EV_CLEAR;
83
84         return kflags;
85 }
86
87 static struct kevent events[ULOOP_MAX_EVENTS];
88
89 static int register_kevent(struct uloop_fd *fd, unsigned int flags)
90 {
91         struct timespec timeout = { 0, 0 };
92         struct kevent ev[2];
93         int nev = 0;
94         unsigned int fl = 0;
95         unsigned int changed;
96         uint16_t kflags;
97
98         if (flags & ULOOP_EDGE_DEFER)
99                 flags &= ~ULOOP_EDGE_TRIGGER;
100
101         changed = flags ^ fd->flags;
102         if (changed & ULOOP_EDGE_TRIGGER)
103                 changed |= flags;
104
105         if (changed & ULOOP_READ) {
106                 kflags = get_flags(flags, ULOOP_READ);
107                 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
108         }
109
110         if (changed & ULOOP_WRITE) {
111                 kflags = get_flags(flags, ULOOP_WRITE);
112                 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
113         }
114
115         if (!flags)
116                 fl |= EV_DELETE;
117
118         fd->flags = flags;
119         if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
120                 return -1;
121
122         return 0;
123 }
124
125 static int register_poll(struct uloop_fd *fd, unsigned int flags)
126 {
127         if (flags & ULOOP_EDGE_TRIGGER)
128                 flags |= ULOOP_EDGE_DEFER;
129         else
130                 flags &= ~ULOOP_EDGE_DEFER;
131
132         return register_kevent(fd, flags);
133 }
134
135 int uloop_fd_delete(struct uloop_fd *sock)
136 {
137         int i;
138
139         for (i = cur_fd + 1; i < cur_nfds; i++) {
140                 if (events[i].udata != sock)
141                         continue;
142
143                 events[i].udata = NULL;
144         }
145
146         sock->registered = false;
147         return register_poll(sock, 0);
148 }
149
150 static void uloop_run_events(int timeout)
151 {
152         struct timespec ts;
153         int nfds, n;
154
155         if (timeout >= 0) {
156                 ts.tv_sec = timeout / 1000;
157                 ts.tv_nsec = (timeout % 1000) * 1000000;
158         }
159
160         nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
161         for(n = 0; n < nfds; ++n)
162         {
163                 struct uloop_fd *u = events[n].udata;
164                 unsigned int ev = 0;
165
166                 if (!u)
167                         continue;
168
169                 if (events[n].flags & EV_ERROR) {
170                         u->error = true;
171                         uloop_fd_delete(u);
172                 }
173
174                 if(events[n].filter == EVFILT_READ)
175                         ev |= ULOOP_READ;
176                 else if (events[n].filter == EVFILT_WRITE)
177                         ev |= ULOOP_WRITE;
178
179                 if (events[n].flags & EV_EOF)
180                         u->eof = true;
181                 else if (!ev)
182                         continue;
183
184                 if (u->cb) {
185                         cur_fd = n;
186                         cur_nfds = nfds;
187                         u->cb(u, ev);
188                         if (u->flags & ULOOP_EDGE_DEFER) {
189                                 u->flags &= ~ULOOP_EDGE_DEFER;
190                                 register_kevent(u, u->flags);
191                         }
192                 }
193         }
194         cur_nfds = 0;
195 }
196
197 #endif
198
199 #ifdef USE_EPOLL
200
201 /**
202  * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
203  */
204 #ifndef EPOLLRDHUP
205 #define EPOLLRDHUP 0x2000
206 #endif
207
208 int uloop_init(void)
209 {
210         if (poll_fd >= 0)
211                 return 0;
212
213         poll_fd = epoll_create(32);
214         if (poll_fd < 0)
215                 return -1;
216
217         fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
218         return 0;
219 }
220
221 static int register_poll(struct uloop_fd *fd, unsigned int flags)
222 {
223         struct epoll_event ev;
224         int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
225
226         memset(&ev, 0, sizeof(struct epoll_event));
227
228         if (flags & ULOOP_READ)
229                 ev.events |= EPOLLIN | EPOLLRDHUP;
230
231         if (flags & ULOOP_WRITE)
232                 ev.events |= EPOLLOUT;
233
234         if (flags & ULOOP_EDGE_TRIGGER)
235                 ev.events |= EPOLLET;
236
237         ev.data.fd = fd->fd;
238         ev.data.ptr = fd;
239
240         return epoll_ctl(poll_fd, op, fd->fd, &ev);
241 }
242
243 static struct epoll_event events[ULOOP_MAX_EVENTS];
244
245 int uloop_fd_delete(struct uloop_fd *sock)
246 {
247         int i;
248
249         for (i = cur_fd + 1; i < cur_nfds; i++) {
250                 if (events[i].data.ptr != sock)
251                         continue;
252
253                 events[i].data.ptr = NULL;
254         }
255         sock->registered = false;
256         return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
257 }
258
259 static void uloop_run_events(int timeout)
260 {
261         int n, nfds;
262
263         nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
264         for(n = 0; n < nfds; ++n)
265         {
266                 struct uloop_fd *u = events[n].data.ptr;
267                 unsigned int ev = 0;
268
269                 if (!u)
270                         continue;
271
272                 if(events[n].events & (EPOLLERR|EPOLLHUP)) {
273                         u->error = true;
274                         uloop_fd_delete(u);
275                 }
276
277                 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)))
278                         continue;
279
280                 if(events[n].events & EPOLLRDHUP)
281                         u->eof = true;
282
283                 if(events[n].events & EPOLLIN)
284                         ev |= ULOOP_READ;
285
286                 if(events[n].events & EPOLLOUT)
287                         ev |= ULOOP_WRITE;
288
289                 if(u->cb) {
290                         cur_fd = n;
291                         cur_nfds = nfds;
292                         u->cb(u, ev);
293                 }
294         }
295         cur_nfds = 0;
296 }
297
298 #endif
299
300 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
301 {
302         unsigned int fl;
303         int ret;
304
305         if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
306                 fl = fcntl(sock->fd, F_GETFL, 0);
307                 fl |= O_NONBLOCK;
308                 fcntl(sock->fd, F_SETFL, fl);
309         }
310
311         ret = register_poll(sock, flags);
312         if (ret < 0)
313                 goto out;
314
315         sock->registered = true;
316         sock->eof = false;
317
318 out:
319         return ret;
320 }
321
322 static int tv_diff(struct timeval *t1, struct timeval *t2)
323 {
324         return
325                 (t1->tv_sec - t2->tv_sec) * 1000 +
326                 (t1->tv_usec - t2->tv_usec) / 1000;
327 }
328
329 int uloop_timeout_add(struct uloop_timeout *timeout)
330 {
331         struct uloop_timeout *tmp;
332         struct list_head *h = &timeouts;
333
334         if (timeout->pending)
335                 return -1;
336
337         list_for_each_entry(tmp, &timeouts, list) {
338                 if (tv_diff(&tmp->time, &timeout->time) > 0) {
339                         h = &tmp->list;
340                         break;
341                 }
342         }
343
344         list_add_tail(&timeout->list, h);
345         timeout->pending = true;
346
347         return 0;
348 }
349
350 static void uloop_gettime(struct timeval *tv)
351 {
352         struct timespec ts;
353
354         clock_gettime(CLOCK_MONOTONIC, &ts);
355         tv->tv_sec = ts.tv_sec;
356         tv->tv_usec = ts.tv_nsec / 1000;
357 }
358
359 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
360 {
361         struct timeval *time = &timeout->time;
362
363         if (timeout->pending)
364                 uloop_timeout_cancel(timeout);
365
366         uloop_gettime(&timeout->time);
367
368         time->tv_sec += msecs / 1000;
369         time->tv_usec += (msecs % 1000) * 1000;
370
371         if (time->tv_usec > 1000000) {
372                 time->tv_sec++;
373                 time->tv_usec %= 1000000;
374         }
375
376         return uloop_timeout_add(timeout);
377 }
378
379 int uloop_timeout_cancel(struct uloop_timeout *timeout)
380 {
381         if (!timeout->pending)
382                 return -1;
383
384         list_del(&timeout->list);
385         timeout->pending = false;
386
387         return 0;
388 }
389
390 int uloop_timeout_remaining(struct uloop_timeout *timeout)
391 {
392         struct timeval now;
393
394         if (!timeout->pending)
395                 return -1;
396
397         uloop_gettime(&now);
398
399         return tv_diff(&timeout->time, &now);
400 }
401
402 int uloop_process_add(struct uloop_process *p)
403 {
404         struct uloop_process *tmp;
405         struct list_head *h = &processes;
406
407         if (p->pending)
408                 return -1;
409
410         list_for_each_entry(tmp, &processes, list) {
411                 if (tmp->pid > p->pid) {
412                         h = &tmp->list;
413                         break;
414                 }
415         }
416
417         list_add_tail(&p->list, h);
418         p->pending = true;
419
420         return 0;
421 }
422
423 int uloop_process_delete(struct uloop_process *p)
424 {
425         if (!p->pending)
426                 return -1;
427
428         list_del(&p->list);
429         p->pending = false;
430
431         return 0;
432 }
433
434 static void uloop_handle_processes(void)
435 {
436         struct uloop_process *p, *tmp;
437         pid_t pid;
438         int ret;
439
440         do_sigchld = false;
441
442         while (1) {
443                 pid = waitpid(-1, &ret, WNOHANG);
444                 if (pid <= 0)
445                         return;
446
447                 list_for_each_entry_safe(p, tmp, &processes, list) {
448                         if (p->pid < pid)
449                                 continue;
450
451                         if (p->pid > pid)
452                                 break;
453
454                         uloop_process_delete(p);
455                         p->cb(p, ret);
456                 }
457         }
458
459 }
460
461 static void uloop_handle_sigint(int signo)
462 {
463         uloop_cancelled = true;
464 }
465
466 static void uloop_sigchld(int signo)
467 {
468         do_sigchld = true;
469 }
470
471 static void uloop_setup_signals(void)
472 {
473         struct sigaction s;
474
475         memset(&s, 0, sizeof(struct sigaction));
476         s.sa_handler = uloop_handle_sigint;
477         s.sa_flags = 0;
478         sigaction(SIGINT, &s, NULL);
479
480         if (uloop_handle_sigchld) {
481                 s.sa_handler = uloop_sigchld;
482                 sigaction(SIGCHLD, &s, NULL);
483         }
484 }
485
486 static int uloop_get_next_timeout(struct timeval *tv)
487 {
488         struct uloop_timeout *timeout;
489         int diff;
490
491         if (list_empty(&timeouts))
492                 return -1;
493
494         timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
495         diff = tv_diff(&timeout->time, tv);
496         if (diff < 0)
497                 return 0;
498
499         return diff;
500 }
501
502 static void uloop_process_timeouts(struct timeval *tv)
503 {
504         struct uloop_timeout *t;
505
506         while (!list_empty(&timeouts)) {
507                 t = list_first_entry(&timeouts, struct uloop_timeout, list);
508
509                 if (tv_diff(&t->time, tv) > 0)
510                         break;
511
512                 uloop_timeout_cancel(t);
513                 if (t->cb)
514                         t->cb(t);
515         }
516 }
517
518 static void uloop_clear_timeouts(void)
519 {
520         struct uloop_timeout *t, *tmp;
521
522         list_for_each_entry_safe(t, tmp, &timeouts, list)
523                 uloop_timeout_cancel(t);
524 }
525
526 static void uloop_clear_processes(void)
527 {
528         struct uloop_process *p, *tmp;
529
530         list_for_each_entry_safe(p, tmp, &processes, list)
531                 uloop_process_delete(p);
532 }
533
534 void uloop_run(void)
535 {
536         struct timeval tv;
537
538         uloop_setup_signals();
539         while(!uloop_cancelled)
540         {
541                 uloop_gettime(&tv);
542                 uloop_process_timeouts(&tv);
543                 if (uloop_cancelled)
544                         break;
545
546                 if (do_sigchld)
547                         uloop_handle_processes();
548                 uloop_run_events(uloop_get_next_timeout(&tv));
549         }
550 }
551
552 void uloop_done(void)
553 {
554         if (poll_fd < 0)
555                 return;
556
557         close(poll_fd);
558         poll_fd = -1;
559
560         uloop_clear_timeouts();
561         uloop_clear_processes();
562 }