28static int fpm_event_poll_init(
int max);
29static int fpm_event_poll_clean(
void);
30static int fpm_event_poll_wait(
struct fpm_event_queue_s *queue,
unsigned long int timeout);
31static int fpm_event_poll_add(
struct fpm_event_s *ev);
32static int fpm_event_poll_remove(
struct fpm_event_s *ev);
36 .support_edge_trigger = 0,
37 .init = fpm_event_poll_init,
38 .clean = fpm_event_poll_clean,
39 .wait = fpm_event_poll_wait,
40 .add = fpm_event_poll_add,
41 .remove = fpm_event_poll_remove,
44static struct pollfd *pollfds =
NULL;
45static struct pollfd *active_pollfds =
NULL;
46static int npollfds = 0;
47static int next_free_slot = 0;
67static int fpm_event_poll_init(
int max)
76 pollfds = malloc(
sizeof(
struct pollfd) *
max);
81 memset(pollfds, 0,
sizeof(
struct pollfd) *
max);
84 for (i = 0; i <
max; i++) {
89 active_pollfds = malloc(
sizeof(
struct pollfd) *
max);
90 if (!active_pollfds) {
95 memset(active_pollfds, 0,
sizeof(
struct pollfd) *
max);
105static int fpm_event_poll_clean(
void)
114 if (active_pollfds) {
115 free(active_pollfds);
116 active_pollfds =
NULL;
126static int fpm_event_poll_wait(
struct fpm_event_queue_s *queue,
unsigned long int timeout)
133 memcpy(active_pollfds, pollfds,
sizeof(
struct pollfd) * npollfds);
137 ret = poll(active_pollfds, npollfds, timeout);
141 if (
errno != EINTR) {
184 if (pollfds[next_free_slot].
fd == -1) {
186 pollfds[next_free_slot].fd =
ev->
fd;
187 pollfds[next_free_slot].events =
POLLIN;
191 if (next_free_slot >= npollfds) {
198 for (i = 0; i < npollfds; i++) {
199 if (pollfds[i].
fd != -1) {
205 pollfds[i].fd =
ev->
fd;
206 pollfds[i].events =
POLLIN;
210 if (next_free_slot >= npollfds) {
244 for (i = 0; i < npollfds; i++) {
246 if (pollfds[i].
fd !=
ev->
fd) {
256 pollfds[i].events = 0;
memset(ptr, 0, type->size)
struct fpm_globals_s fpm_globals
void fpm_event_fire(struct fpm_event_s *ev)
struct fpm_event_module_s * fpm_event_poll_module(void)
struct fpm_event_queue_s * next