30static int fpm_event_kqueue_init(
int max);
31static int fpm_event_kqueue_clean(
void);
32static int fpm_event_kqueue_wait(
struct fpm_event_queue_s *queue,
unsigned long int timeout);
33static int fpm_event_kqueue_add(
struct fpm_event_s *ev);
34static int fpm_event_kqueue_remove(
struct fpm_event_s *ev);
38 .support_edge_trigger = 1,
39 .init = fpm_event_kqueue_init,
40 .clean = fpm_event_kqueue_clean,
41 .wait = fpm_event_kqueue_wait,
42 .add = fpm_event_kqueue_add,
43 .remove = fpm_event_kqueue_remove,
46static struct kevent *kevents =
NULL;
47static int nkevents = 0;
58 return &kqueue_module;
70static int fpm_event_kqueue_init(
int max)
82 kevents = calloc(
max,
sizeof(
struct kevent));
97static int fpm_event_kqueue_clean(
void)
113static int fpm_event_kqueue_wait(
struct fpm_event_queue_s *queue,
unsigned long int timeout)
119 memset(kevents, 0,
sizeof(
struct kevent) * nkevents);
122 t.tv_sec = timeout / 1000;
123 t.tv_nsec = (timeout % 1000) * 1000 * 1000;
126 ret = kevent(kfd,
NULL, 0, kevents, nkevents, &t);
130 if (
errno != EINTR) {
137 for (i = 0; i <
ret; i++) {
138 if (kevents[i].udata) {
155static int fpm_event_kqueue_add(
struct fpm_event_s *ev)
164 EV_SET(&k, ev->
fd, EVFILT_READ,
flags, 0, 0, (
void *)ev);
166 if (kevent(kfd, &k, 1,
NULL, 0,
NULL) < 0) {
180static int fpm_event_kqueue_remove(
struct fpm_event_s *ev)
183 int flags = EV_DELETE;
189 EV_SET(&k, ev->
fd, EVFILT_READ,
flags, 0, 0, (
void *)ev);
191 if (kevent(kfd, &k, 1,
NULL, 0,
NULL) < 0) {
memset(ptr, 0, type->size)
struct fpm_globals_s fpm_globals
void fpm_event_fire(struct fpm_event_s *ev)
struct fpm_event_module_s * fpm_event_kqueue_module(void)