2 * uloop - event loop implementation
4 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/types.h>
34 #include <sys/event.h>
37 #include <sys/epoll.h>
41 struct uloop_fd_event {
46 struct uloop_fd_stack {
47 struct uloop_fd_stack *next;
52 static struct uloop_fd_stack *fd_stack = NULL;
54 #define ULOOP_MAX_EVENTS 10
56 static struct list_head timeouts = LIST_HEAD_INIT(timeouts);
57 static struct list_head processes = LIST_HEAD_INIT(processes);
59 static int poll_fd = -1;
60 bool uloop_cancelled = false;
61 bool uloop_handle_sigchld = true;
62 static bool do_sigchld = false;
64 static struct uloop_fd_event cur_fds[ULOOP_MAX_EVENTS];
65 static int cur_fd, cur_nfds;
71 struct timespec timeout = { 0, 0 };
72 struct kevent ev = {};
81 EV_SET(&ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, 0);
82 kevent(poll_fd, &ev, 1, NULL, 0, &timeout);
88 static uint16_t get_flags(unsigned int flags, unsigned int mask)
96 if (flags & ULOOP_EDGE_TRIGGER)
102 static struct kevent events[ULOOP_MAX_EVENTS];
104 static int register_kevent(struct uloop_fd *fd, unsigned int flags)
106 struct timespec timeout = { 0, 0 };
110 unsigned int changed;
113 if (flags & ULOOP_EDGE_DEFER)
114 flags &= ~ULOOP_EDGE_TRIGGER;
116 changed = flags ^ fd->flags;
117 if (changed & ULOOP_EDGE_TRIGGER)
120 if (changed & ULOOP_READ) {
121 kflags = get_flags(flags, ULOOP_READ);
122 EV_SET(&ev[nev++], fd->fd, EVFILT_READ, kflags, 0, 0, fd);
125 if (changed & ULOOP_WRITE) {
126 kflags = get_flags(flags, ULOOP_WRITE);
127 EV_SET(&ev[nev++], fd->fd, EVFILT_WRITE, kflags, 0, 0, fd);
134 if (kevent(poll_fd, ev, nev, NULL, fl, &timeout) == -1)
140 static int register_poll(struct uloop_fd *fd, unsigned int flags)
142 if (flags & ULOOP_EDGE_TRIGGER)
143 flags |= ULOOP_EDGE_DEFER;
145 flags &= ~ULOOP_EDGE_DEFER;
147 return register_kevent(fd, flags);
150 static int __uloop_fd_delete(struct uloop_fd *fd)
152 return register_poll(fd, 0);
155 static int uloop_fetch_events(int timeout)
161 ts.tv_sec = timeout / 1000;
162 ts.tv_nsec = (timeout % 1000) * 1000000;
165 nfds = kevent(poll_fd, NULL, 0, events, ARRAY_SIZE(events), timeout >= 0 ? &ts : NULL);
166 for (n = 0; n < nfds; n++) {
167 struct uloop_fd_event *cur = &cur_fds[n];
168 struct uloop_fd *u = events[n].udata;
175 if (events[n].flags & EV_ERROR) {
177 if (!(u->flags & ULOOP_ERROR_CB))
181 if(events[n].filter == EVFILT_READ)
183 else if (events[n].filter == EVFILT_WRITE)
186 if (events[n].flags & EV_EOF)
192 if (u->flags & ULOOP_EDGE_DEFER) {
193 u->flags &= ~ULOOP_EDGE_DEFER;
194 u->flags |= ULOOP_EDGE_TRIGGER;
195 register_kevent(u, u->flags);
206 * FIXME: uClibc < 0.9.30.3 does not define EPOLLRDHUP for Linux >= 2.6.17
209 #define EPOLLRDHUP 0x2000
217 poll_fd = epoll_create(32);
221 fcntl(poll_fd, F_SETFD, fcntl(poll_fd, F_GETFD) | FD_CLOEXEC);
225 static int register_poll(struct uloop_fd *fd, unsigned int flags)
227 struct epoll_event ev;
228 int op = fd->registered ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
230 memset(&ev, 0, sizeof(struct epoll_event));
232 if (flags & ULOOP_READ)
233 ev.events |= EPOLLIN | EPOLLRDHUP;
235 if (flags & ULOOP_WRITE)
236 ev.events |= EPOLLOUT;
238 if (flags & ULOOP_EDGE_TRIGGER)
239 ev.events |= EPOLLET;
245 return epoll_ctl(poll_fd, op, fd->fd, &ev);
248 static struct epoll_event events[ULOOP_MAX_EVENTS];
250 static int __uloop_fd_delete(struct uloop_fd *sock)
253 return epoll_ctl(poll_fd, EPOLL_CTL_DEL, sock->fd, 0);
256 static int uloop_fetch_events(int timeout)
260 nfds = epoll_wait(poll_fd, events, ARRAY_SIZE(events), timeout);
261 for (n = 0; n < nfds; ++n) {
262 struct uloop_fd_event *cur = &cur_fds[n];
263 struct uloop_fd *u = events[n].data.ptr;
270 if (events[n].events & (EPOLLERR|EPOLLHUP)) {
272 if (!(u->flags & ULOOP_ERROR_CB))
276 if(!(events[n].events & (EPOLLRDHUP|EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP))) {
281 if(events[n].events & EPOLLRDHUP)
284 if(events[n].events & EPOLLIN)
287 if(events[n].events & EPOLLOUT)
298 static bool uloop_fd_stack_event(struct uloop_fd *fd, int events)
300 struct uloop_fd_stack *cur;
303 * Do not buffer events for level-triggered fds, they will keep firing.
304 * Caller needs to take care of recursion issues.
306 if (!(fd->flags & ULOOP_EDGE_TRIGGER))
309 for (cur = fd_stack; cur; cur = cur->next) {
316 cur->events |= events | ULOOP_EVENT_BUFFERED;
324 static void uloop_run_events(int timeout)
326 struct uloop_fd_event *cur;
331 cur_nfds = uloop_fetch_events(timeout);
336 while (cur_nfds > 0) {
337 struct uloop_fd_stack stack_cur;
340 cur = &cur_fds[cur_fd++];
344 events = cur->events;
351 if (uloop_fd_stack_event(fd, cur->events))
354 stack_cur.next = fd_stack;
356 fd_stack = &stack_cur;
358 stack_cur.events = 0;
360 events = stack_cur.events & ULOOP_EVENT_MASK;
361 } while (stack_cur.fd && events);
362 fd_stack = stack_cur.next;
368 int uloop_fd_add(struct uloop_fd *sock, unsigned int flags)
373 if (!(flags & (ULOOP_READ | ULOOP_WRITE)))
374 return uloop_fd_delete(sock);
376 if (!sock->registered && !(flags & ULOOP_BLOCKING)) {
377 fl = fcntl(sock->fd, F_GETFL, 0);
379 fcntl(sock->fd, F_SETFL, fl);
382 ret = register_poll(sock, flags);
386 sock->registered = true;
394 int uloop_fd_delete(struct uloop_fd *fd)
398 for (i = 0; i < cur_nfds; i++) {
399 if (cur_fds[cur_fd + i].fd != fd)
402 cur_fds[cur_fd + i].fd = NULL;
408 fd->registered = false;
409 uloop_fd_stack_event(fd, -1);
410 return __uloop_fd_delete(fd);
413 static int tv_diff(struct timeval *t1, struct timeval *t2)
416 (t1->tv_sec - t2->tv_sec) * 1000 +
417 (t1->tv_usec - t2->tv_usec) / 1000;
420 int uloop_timeout_add(struct uloop_timeout *timeout)
422 struct uloop_timeout *tmp;
423 struct list_head *h = &timeouts;
425 if (timeout->pending)
428 list_for_each_entry(tmp, &timeouts, list) {
429 if (tv_diff(&tmp->time, &timeout->time) > 0) {
435 list_add_tail(&timeout->list, h);
436 timeout->pending = true;
441 static void uloop_gettime(struct timeval *tv)
445 clock_gettime(CLOCK_MONOTONIC, &ts);
446 tv->tv_sec = ts.tv_sec;
447 tv->tv_usec = ts.tv_nsec / 1000;
450 int uloop_timeout_set(struct uloop_timeout *timeout, int msecs)
452 struct timeval *time = &timeout->time;
454 if (timeout->pending)
455 uloop_timeout_cancel(timeout);
457 uloop_gettime(&timeout->time);
459 time->tv_sec += msecs / 1000;
460 time->tv_usec += (msecs % 1000) * 1000;
462 if (time->tv_usec > 1000000) {
464 time->tv_usec %= 1000000;
467 return uloop_timeout_add(timeout);
470 int uloop_timeout_cancel(struct uloop_timeout *timeout)
472 if (!timeout->pending)
475 list_del(&timeout->list);
476 timeout->pending = false;
481 int uloop_timeout_remaining(struct uloop_timeout *timeout)
485 if (!timeout->pending)
490 return tv_diff(&timeout->time, &now);
493 int uloop_process_add(struct uloop_process *p)
495 struct uloop_process *tmp;
496 struct list_head *h = &processes;
501 list_for_each_entry(tmp, &processes, list) {
502 if (tmp->pid > p->pid) {
508 list_add_tail(&p->list, h);
514 int uloop_process_delete(struct uloop_process *p)
525 static void uloop_handle_processes(void)
527 struct uloop_process *p, *tmp;
534 pid = waitpid(-1, &ret, WNOHANG);
538 list_for_each_entry_safe(p, tmp, &processes, list) {
545 uloop_process_delete(p);
552 static void uloop_handle_sigint(int signo)
554 uloop_cancelled = true;
557 static void uloop_sigchld(int signo)
562 static void uloop_setup_signals(bool add)
564 static struct sigaction old_sigint, old_sigchld;
567 memset(&s, 0, sizeof(struct sigaction));
570 s.sa_handler = uloop_handle_sigint;
576 sigaction(SIGINT, &s, &old_sigint);
578 if (!uloop_handle_sigchld)
582 s.sa_handler = uloop_sigchld;
586 sigaction(SIGCHLD, &s, &old_sigchld);
589 static int uloop_get_next_timeout(struct timeval *tv)
591 struct uloop_timeout *timeout;
594 if (list_empty(&timeouts))
597 timeout = list_first_entry(&timeouts, struct uloop_timeout, list);
598 diff = tv_diff(&timeout->time, tv);
605 static void uloop_process_timeouts(struct timeval *tv)
607 struct uloop_timeout *t;
609 while (!list_empty(&timeouts)) {
610 t = list_first_entry(&timeouts, struct uloop_timeout, list);
612 if (tv_diff(&t->time, tv) > 0)
615 uloop_timeout_cancel(t);
621 static void uloop_clear_timeouts(void)
623 struct uloop_timeout *t, *tmp;
625 list_for_each_entry_safe(t, tmp, &timeouts, list)
626 uloop_timeout_cancel(t);
629 static void uloop_clear_processes(void)
631 struct uloop_process *p, *tmp;
633 list_for_each_entry_safe(p, tmp, &processes, list)
634 uloop_process_delete(p);
639 static int recursive_calls = 0;
643 * Handlers are only updated for the first call to uloop_run() (and restored
644 * when this call is done).
646 if (!recursive_calls++)
647 uloop_setup_signals(true);
649 while(!uloop_cancelled)
652 uloop_process_timeouts(&tv);
657 uloop_handle_processes();
659 uloop_run_events(uloop_get_next_timeout(&tv));
662 if (!--recursive_calls)
663 uloop_setup_signals(false);
666 void uloop_done(void)
674 uloop_clear_timeouts();
675 uloop_clear_processes();