uloop: do less state/change tracking for kevent() on mac os x, it is unreliable
[project/libubox.git] / uloop.c
1 /*
2  *   Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3  *   Copyright (C) 2010 John Crispin <blogic@openwrt.org>
4  *   Copyright (C) 2010 Steven Barth <steven@midlink.org>
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19  *
20  */
21
22 #include <sys/time.h>
23 #include <sys/types.h>
24
25 #include <unistd.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <errno.h>
29 #include <poll.h>
30 #include <string.h>
31 #include <fcntl.h>
32 #include <stdbool.h>
33
34 #include "uloop.h"
35 #include "utils.h"
36
37 #ifdef USE_KQUEUE
38 #include <sys/event.h>
39 #endif
40 #ifdef USE_EPOLL
41 #include <sys/epoll.h>
42 #endif
43 #include <sys/wait.h>
44
45 #define ULOOP_MAX_EVENTS 10
46
47 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
48 static struct list_head processes = LIST_HEAD_INIT(processes);
49
50 static int poll_fd = -1;
51 bool uloop_cancelled = false;
52 bool uloop_handle_sigchld = true;
53 static bool do_sigchld = false;
54 static int cur_fd, cur_nfds;
55
56 #ifdef USE_KQUEUE
57
58 int uloop_init(void)
59 {
60         if (poll_fd >= 0)
61                 return 0;
62
63         poll_fd = kqueue();
64         if (poll_fd < 0)
65                 return -1;
66
67         return 0;
68 }
69
70
71 static uint16_t get_flags(unsigned int flags, unsigned int mask)
72 {
73         uint16_t kflags = 0;
74
75         if (!(flags & mask))
76                 return EV_DELETE;
77
78         kflags = EV_ADD;
79         if (flags & ULOOP_EDGE_TRIGGER)
80                 kflags |= EV_CLEAR;
81
82         return kflags;
83 }
84
85 static struct kevent events[ULOOP_MAX_EVENTS];
86
87 static int register_poll(struct uloop_fd *fd, unsigned int flags)
88 {
89         struct timespec timeout = { 0, 0 };
90         struct kevent ev[2];
91         int nev = 0;
92         unsigned int fl = 0;
93         uint16_t kflags;
94
95         kflags = get_flags(flags, ULOOP_READ);
96         EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
97
98         kflags = get_flags(flags, ULOOP_WRITE);
99         EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
100
101         if (!flags)
102                 fl |= EV_DELETE;
103
104         if (nev && (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1))
105                 return -1;
106
107         return 0;
108 }
109
110 int uloop_fd_delete(struct uloop_fd *sock)
111 {
112         int i;
113
114         for (i = cur_fd + 1; i < cur_nfds; i++) {
115                 if (events[i].udata != sock)
116                         continue;
117
118                 events[i].udata = NULL;
119         }
120
121         sock->registered = false;
122         return register_poll(sock, 0);
123 }
124
125 static void uloop_run_events(int timeout)
126 {
127         struct timespec ts;
128         int nfds, n;
129
130         if (timeout >= 0) {
131                 ts.tv_sec = timeout / 1000;
132                 ts.tv_nsec = (timeout % 1000) * 1000000;
133         }
134
135         nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
136         for(n = 0; n < nfds; ++n)
137         {
138                 struct uloop_fd *u = events[n].udata;
139                 unsigned int ev = 0;
140
141                 if (!u)
142                         continue;
143
144                 if (events[n].flags & EV_ERROR) {
145                         u->error = true;
146                         uloop_fd_delete(u);
147                 }
148
149                 if(events[n].filter == EVFILT_READ)
150                         ev |= ULOOP_READ;
151                 else if (events[n].filter == EVFILT_WRITE)
152                         ev |= ULOOP_WRITE;
153
154                 if (events[n].flags & EV_EOF)
155                         u->eof = true;
156                 else if (!ev)
157                         continue;
158
159                 if (u->cb) {
160                         cur_fd = n;
161                         cur_nfds = nfds;
162                         u->cb(u, ev);
163                 }
164         }
165         cur_nfds = 0;
166 }
167
168 #endif
169
170 #ifdef USE_EPOLL
171
172 /**
173  * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
174  */
175 #ifndef EPOLLRDHUP
176 #define EPOLLRDHUP 0x2000
177 #endif
178
179 int uloop_init(void)
180 {
181         if (poll_fd >= 0)
182                 return 0;
183
184         poll_fd = epoll_create(32);
185         if (poll_fd < 0)
186                 return -1;
187
188         fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
189         return 0;
190 }
191
192 static int register_poll(struct uloop_fd *fd, unsigned int flags)
193 {
194         struct epoll_event ev;
195         int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
196
197         memset(&ev, 0, sizeof(struct epoll_event));
198
199         if (flags & ULOOP_READ)
200                 ev.events |= EPOLLIN | EPOLLRDHUP;
201
202         if (flags & ULOOP_WRITE)
203                 ev.events |= EPOLLOUT;
204
205         if (flags & ULOOP_EDGE_TRIGGER)
206                 ev.events |= EPOLLET;
207
208         ev.data.fd = fd->fd;
209         ev.data.ptr = fd;
210
211         return epoll_ctl(poll_fd, op, fd->fd, &ev);
212 }
213
214 static struct epoll_event events[ULOOP_MAX_EVENTS];
215
216 int uloop_fd_delete(struct uloop_fd *sock)
217 {
218         int i;
219
220         for (i = cur_fd + 1; i < cur_nfds; i++) {
221                 if (events[i].data.ptr != sock)
222                         continue;
223
224                 events[i].data.ptr = NULL;
225         }
226         sock->registered = false;
227         return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
228 }
229
230 static void uloop_run_events(int timeout)
231 {
232         int n, nfds;
233
234         nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
235         for(n = 0; n < nfds; ++n)
236         {
237                 struct uloop_fd *u = events[n].data.ptr;
238                 unsigned int ev = 0;
239
240                 if (!u)
241                         continue;
242
243                 if(events[n].events & (EPOLLERR|EPOLLHUP)) {
244                         u->error = true;
245                         uloop_fd_delete(u);
246                 }
247
248                 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)))
249                         continue;
250
251                 if(events[n].events & EPOLLRDHUP)
252                         u->eof = true;
253
254                 if(events[n].events & EPOLLIN)
255                         ev |= ULOOP_READ;
256
257                 if(events[n].events & EPOLLOUT)
258                         ev |= ULOOP_WRITE;
259
260                 if(u->cb) {
261                         cur_fd = n;
262                         cur_nfds = nfds;
263                         u->cb(u, ev);
264                 }
265         }
266         cur_nfds = 0;
267 }
268
269 #endif
270
271 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
272 {
273         unsigned int fl;
274         int ret;
275
276         if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
277                 fl = fcntl(sock->fd, F_GETFL, 0);
278                 fl |= O_NONBLOCK;
279                 fcntl(sock->fd, F_SETFL, fl);
280         }
281
282         ret = register_poll(sock, flags);
283         if (ret < 0)
284                 goto out;
285
286         sock->registered = true;
287         sock->eof = false;
288
289 out:
290         return ret;
291 }
292
293 static int tv_diff(struct timeval *t1, struct timeval *t2)
294 {
295         return
296                 (t1->tv_sec - t2->tv_sec) * 1000 +
297                 (t1->tv_usec - t2->tv_usec) / 1000;
298 }
299
300 int uloop_timeout_add(struct uloop_timeout *timeout)
301 {
302         struct uloop_timeout *tmp;
303         struct list_head *h = &timeouts;
304
305         if (timeout->pending)
306                 return -1;
307
308         list_for_each_entry(tmp, &timeouts, list) {
309                 if (tv_diff(&tmp->time, &timeout->time) > 0) {
310                         h = &tmp->list;
311                         break;
312                 }
313         }
314
315         list_add_tail(&timeout->list, h);
316         timeout->pending = true;
317
318         return 0;
319 }
320
321 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
322 {
323         struct timeval *time = &timeout->time;
324
325         if (timeout->pending)
326                 uloop_timeout_cancel(timeout);
327
328         gettimeofday(&timeout->time, NULL);
329
330         time->tv_sec += msecs / 1000;
331         time->tv_usec += (msecs % 1000) * 1000;
332
333         if (time->tv_usec > 1000000) {
334                 time->tv_sec++;
335                 time->tv_usec %= 1000000;
336         }
337
338         return uloop_timeout_add(timeout);
339 }
340
341 int uloop_timeout_cancel(struct uloop_timeout *timeout)
342 {
343         if (!timeout->pending)
344                 return -1;
345
346         list_del(&timeout->list);
347         timeout->pending = false;
348
349         return 0;
350 }
351
352 int uloop_process_add(struct uloop_process *p)
353 {
354         struct uloop_process *tmp;
355         struct list_head *h = &processes;
356
357         if (p->pending)
358                 return -1;
359
360         list_for_each_entry(tmp, &processes, list) {
361                 if (tmp->pid > p->pid) {
362                         h = &tmp->list;
363                         break;
364                 }
365         }
366
367         list_add_tail(&p->list, h);
368         p->pending = true;
369
370         return 0;
371 }
372
373 int uloop_process_delete(struct uloop_process *p)
374 {
375         if (!p->pending)
376                 return -1;
377
378         list_del(&p->list);
379         p->pending = false;
380
381         return 0;
382 }
383
384 static void uloop_handle_processes(void)
385 {
386         struct uloop_process *p, *tmp;
387         pid_t pid;
388         int ret;
389
390         do_sigchld = false;
391
392         while (1) {
393                 pid = waitpid(-1, &ret, WNOHANG);
394                 if (pid <= 0)
395                         return;
396
397                 list_for_each_entry_safe(p, tmp, &processes, list) {
398                         if (p->pid < pid)
399                                 continue;
400
401                         if (p->pid > pid)
402                                 break;
403
404                         uloop_process_delete(p);
405                         p->cb(p, ret);
406                 }
407         }
408
409 }
410
411 static void uloop_handle_sigint(int signo)
412 {
413         uloop_cancelled = true;
414 }
415
416 static void uloop_sigchld(int signo)
417 {
418         do_sigchld = true;
419 }
420
421 static void uloop_setup_signals(void)
422 {
423         struct sigaction s;
424
425         memset(&s, 0, sizeof(struct sigaction));
426         s.sa_handler = uloop_handle_sigint;
427         s.sa_flags = 0;
428         sigaction(SIGINT, &s, NULL);
429
430         if (uloop_handle_sigchld) {
431                 s.sa_handler = uloop_sigchld;
432                 sigaction(SIGCHLD, &s, NULL);
433         }
434 }
435
436 static int uloop_get_next_timeout(struct timeval *tv)
437 {
438         struct uloop_timeout *timeout;
439         int diff;
440
441         if (list_empty(&timeouts))
442                 return -1;
443
444         timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
445         diff = tv_diff(&timeout->time, tv);
446         if (diff < 0)
447                 return 0;
448
449         return diff;
450 }
451
452 static void uloop_process_timeouts(struct timeval *tv)
453 {
454         struct uloop_timeout *t;
455
456         while (!list_empty(&timeouts)) {
457                 t = list_first_entry(&timeouts, struct uloop_timeout, list);
458
459                 if (tv_diff(&t->time, tv) > 0)
460                         break;
461
462                 uloop_timeout_cancel(t);
463                 if (t->cb)
464                         t->cb(t);
465         }
466 }
467
468 static void uloop_clear_timeouts(void)
469 {
470         struct uloop_timeout *t, *tmp;
471
472         list_for_each_entry_safe(t, tmp, &timeouts, list)
473                 uloop_timeout_cancel(t);
474 }
475
476 static void uloop_clear_processes(void)
477 {
478         struct uloop_process *p, *tmp;
479
480         list_for_each_entry_safe(p, tmp, &processes, list)
481                 uloop_process_delete(p);
482 }
483
484 void uloop_run(void)
485 {
486         struct timeval tv;
487
488         uloop_setup_signals();
489         while(!uloop_cancelled)
490         {
491                 gettimeofday(&tv, NULL);
492                 uloop_process_timeouts(&tv);
493                 if (uloop_cancelled)
494                         break;
495
496                 if (do_sigchld)
497                         uloop_handle_processes();
498                 uloop_run_events(uloop_get_next_timeout(&tv));
499         }
500 }
501
502 void uloop_done(void)
503 {
504         if (poll_fd < 0)
505                 return;
506
507         close(poll_fd);
508         poll_fd = -1;
509
510         uloop_clear_timeouts();
511         uloop_clear_processes();
512 }