mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-08 20:07:56 -04:00
Restructure the event backends so that they do not need to keep track of events themselves, as a side effect multiple events can use the same fd or signal.
svn:r972
This commit is contained in:
parent
97cebce8fd
commit
02b2b4d1be
@ -132,7 +132,8 @@ Changes in current version:
|
||||
o Fix a typo in setting the global event base; reported by lance.
|
||||
o Set the 0x20 bit on outgoing alphabetic characters in DNS requests randomly, and insist on a match in replies. This helps resist DNS poisoning attacks.
|
||||
o Make the http connection close detection work properly with bufferevents and fix a potential memory leak associated with it.
|
||||
|
||||
o Restructure the event backends so that they do not need to keep track of events themselves, as a side effect multiple events can use the same fd or signal.
|
||||
|
||||
Changes in 1.4.0:
|
||||
o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
|
||||
o demote most http warnings to debug messages
|
||||
|
@ -82,7 +82,7 @@ event-config.h: config.h
|
||||
echo "#endif" >> $@
|
||||
|
||||
CORE_SRC = event.c buffer.c evbuffer-internal.h bufferevent.c \
|
||||
bufferevent-internal.h \
|
||||
bufferevent-internal.h evmap.c evmap.h \
|
||||
log.c evutil.c $(SYS_SRC)
|
||||
EXTRA_SRC = event_tagging.c http.c evhttp.h http-internal.h evdns.c \
|
||||
evdns.h evrpc.c evrpc.h evrpc-internal.h mm-internal.h \
|
||||
|
@ -48,39 +48,26 @@
|
||||
#include "log.h"
|
||||
#include "event.h"
|
||||
#include "event-internal.h"
|
||||
#include "evmap.h"
|
||||
|
||||
#define XFREE(ptr) do { if (ptr) mm_free(ptr); } while(0)
|
||||
|
||||
extern struct event_list timequeue;
|
||||
extern struct event_list addqueue;
|
||||
#if 0
|
||||
extern struct event_list signalqueue;
|
||||
#endif
|
||||
|
||||
struct win_fd_set {
|
||||
u_int fd_count;
|
||||
SOCKET fd_array[1];
|
||||
};
|
||||
|
||||
int evsigcaught[NSIG];
|
||||
volatile sig_atomic_t signal_caught = 0;
|
||||
/* MSDN says this is required to handle SIGFPE */
|
||||
volatile double SIGFPE_REQ = 0.0f;
|
||||
|
||||
#if 0
|
||||
static void signal_handler(int sig);
|
||||
|
||||
void signal_process(void);
|
||||
int signal_recalc(void);
|
||||
#endif
|
||||
|
||||
struct event_entry {
|
||||
RB_ENTRY(event_entry) node;
|
||||
SOCKET sock;
|
||||
int read_pos;
|
||||
int write_pos;
|
||||
struct event *read_event;
|
||||
struct event *write_event;
|
||||
};
|
||||
|
||||
static int
|
||||
@ -108,10 +95,10 @@ RB_PROTOTYPE(event_map, event_entry, node, compare);
|
||||
RB_GENERATE(event_map, event_entry, node, compare);
|
||||
|
||||
void *win32_init (struct event_base *);
|
||||
int win32_insert (void *, struct event *);
|
||||
int win32_del (void *, struct event *);
|
||||
int win32_dispatch (struct event_base *base, void *, struct timeval *);
|
||||
void win32_dealloc (struct event_base *, void *);
|
||||
int win32_insert(struct event_base *, evutil_socket_t, short old short events);
|
||||
int win32_del(struct event_base *, evutil_socket_t, short old, short events);
|
||||
int win32_dispatch (struct event_base *base, struct timeval *);
|
||||
void win32_dealloc (struct event_base *);
|
||||
|
||||
struct eventop win32ops = {
|
||||
"win32",
|
||||
@ -268,55 +255,44 @@ win32_init(struct event_base *_base)
|
||||
}
|
||||
|
||||
int
|
||||
win32_insert(void *op, struct event *ev)
|
||||
win32_insert(struct event_base *base, evutil_socket_t fd,
|
||||
short old, short events)
|
||||
{
|
||||
struct win32op *win32op = op;
|
||||
struct win32op *win32op = base->evbase;
|
||||
struct event_entry *ent;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL) {
|
||||
return (evsignal_add(ev));
|
||||
}
|
||||
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
|
||||
if (!(events & (EV_READ|EV_WRITE)))
|
||||
return (0);
|
||||
ent = get_event_entry(win32op, ev->ev_fd, 1);
|
||||
ent = get_event_entry(win32op, fd, 1);
|
||||
if (!ent)
|
||||
return (-1); /* out of memory */
|
||||
|
||||
event_debug(("%s: adding event for %d", __func__, (int)ev->ev_fd));
|
||||
if (ev->ev_events & EV_READ) {
|
||||
event_debug(("%s: adding event for %d", __func__, (int)fd));
|
||||
if (events & EV_READ) {
|
||||
if (do_fd_set(win32op, ent, 1)<0)
|
||||
return (-1);
|
||||
ent->read_event = ev;
|
||||
}
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (events & EV_WRITE) {
|
||||
if (do_fd_set(win32op, ent, 0)<0)
|
||||
return (-1);
|
||||
ent->write_event = ev;
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
win32_del(void *op, struct event *ev)
|
||||
win32_del(struct event_base *base, evutil_socket_t fd, short old, short events)
|
||||
{
|
||||
struct win32op *win32op = op;
|
||||
struct win32op *win32op = base->evbase;
|
||||
struct event_entry *ent;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_del(ev));
|
||||
|
||||
if (!(ent = get_event_entry(win32op, ev->ev_fd, 0)))
|
||||
if (!(ent = get_event_entry(win32op, fd, 0)))
|
||||
return (-1);
|
||||
event_debug(("%s: Removing event for %d", __func__, ev->ev_fd));
|
||||
if (ev == ent->read_event) {
|
||||
event_debug(("%s: Removing event for %d", __func__, fd));
|
||||
if (events & EV_READ)
|
||||
do_fd_clear(win32op, ent, 1);
|
||||
ent->read_event = NULL;
|
||||
}
|
||||
if (ev == ent->write_event) {
|
||||
if (events & EV_WRITE)
|
||||
do_fd_clear(win32op, ent, 0);
|
||||
ent->write_event = NULL;
|
||||
}
|
||||
if (!ent->read_event && !ent->write_event) {
|
||||
if ((events & (EV_WRITE|EV_READ) == (EV_WRITE|EV_READ)) {
|
||||
RB_REMOVE(event_map, &win32op->event_root, ent);
|
||||
mm_free(ent);
|
||||
}
|
||||
@ -343,10 +319,9 @@ fd_set_copy(struct win_fd_set *out, const struct win_fd_set *in)
|
||||
*/
|
||||
|
||||
int
|
||||
win32_dispatch(struct event_base *base, void *op,
|
||||
struct timeval *tv)
|
||||
win32_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
struct win32op *win32op = op;
|
||||
struct win32op *win32op = base->evbase;
|
||||
int res = 0;
|
||||
int i;
|
||||
int fd_count;
|
||||
@ -383,34 +358,26 @@ win32_dispatch(struct event_base *base, void *op,
|
||||
for (i=0; i<win32op->readset_out->fd_count; ++i) {
|
||||
struct event_entry *ent;
|
||||
SOCKET s = win32op->readset_out->fd_array[i];
|
||||
if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
|
||||
event_active(ent->read_event, EV_READ, 1);
|
||||
evmap_io_active(base, s, EV_READ);
|
||||
}
|
||||
for (i=0; i<win32op->exset_out->fd_count; ++i) {
|
||||
struct event_entry *ent;
|
||||
SOCKET s = win32op->exset_out->fd_array[i];
|
||||
if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
|
||||
event_active(ent->read_event, EV_READ, 1);
|
||||
evmap_io_active(base, s, EV_READ, 1);
|
||||
}
|
||||
for (i=0; i<win32op->writeset_out->fd_count; ++i) {
|
||||
struct event_entry *ent;
|
||||
SOCKET s = win32op->writeset_out->fd_array[i];
|
||||
if ((ent = get_event_entry(win32op, s, 0)) && ent->write_event)
|
||||
event_active(ent->write_event, EV_WRITE, 1);
|
||||
evmap_io_active(base, s, EV_WRITE, 1);
|
||||
}
|
||||
|
||||
#if 0
|
||||
if (signal_recalc() == -1)
|
||||
return (-1);
|
||||
#endif
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
win32_dealloc(struct event_base *_base, void *arg)
|
||||
win32_dealloc(struct event_base *_base)
|
||||
{
|
||||
struct win32op *win32op = arg;
|
||||
struct win32op *win32op = _base->evbase;
|
||||
|
||||
evsignal_dealloc(_base);
|
||||
if (win32op->readset_in)
|
||||
@ -428,45 +395,3 @@ win32_dealloc(struct event_base *_base, void *arg)
|
||||
memset(win32op, 0, sizeof(win32op));
|
||||
mm_free(win32op);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void
|
||||
signal_handler(int sig)
|
||||
{
|
||||
evsigcaught[sig]++;
|
||||
signal_caught = 1;
|
||||
}
|
||||
|
||||
int
|
||||
signal_recalc(void)
|
||||
{
|
||||
struct event *ev;
|
||||
|
||||
/* Reinstall our signal handler. */
|
||||
TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
|
||||
if((int)signal(EVENT_SIGNAL(ev), signal_handler) == -1)
|
||||
return (-1);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
signal_process(void)
|
||||
{
|
||||
struct event *ev;
|
||||
short ncalls;
|
||||
|
||||
TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
|
||||
ncalls = evsigcaught[EVENT_SIGNAL(ev)];
|
||||
if (ncalls) {
|
||||
if (!(ev->ev_events & EV_PERSIST))
|
||||
event_del(ev);
|
||||
event_active(ev, EV_SIGNAL, ncalls);
|
||||
}
|
||||
}
|
||||
|
||||
memset(evsigcaught, 0, sizeof(evsigcaught));
|
||||
signal_caught = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
146
devpoll.c
146
devpoll.c
@ -51,18 +51,9 @@
|
||||
#include "event-internal.h"
|
||||
#include "evsignal.h"
|
||||
#include "log.h"
|
||||
|
||||
/* due to limitations in the devpoll interface, we need to keep track of
|
||||
* all file descriptors outself.
|
||||
*/
|
||||
struct evdevpoll {
|
||||
struct event *evread;
|
||||
struct event *evwrite;
|
||||
};
|
||||
#include "evmap.h"
|
||||
|
||||
struct devpollop {
|
||||
struct evdevpoll *fds;
|
||||
int nfds;
|
||||
struct pollfd *events;
|
||||
int nevents;
|
||||
int dpfd;
|
||||
@ -71,10 +62,10 @@ struct devpollop {
|
||||
};
|
||||
|
||||
static void *devpoll_init (struct event_base *);
|
||||
static int devpoll_add (void *, struct event *);
|
||||
static int devpoll_del (void *, struct event *);
|
||||
static int devpoll_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static void devpoll_dealloc (struct event_base *, void *);
|
||||
static int devpoll_add(struct event_base *, int fd, short old, short events);
|
||||
static int devpoll_del(struct event_base *, int fd, short old, short events);
|
||||
static int devpoll_dispatch (struct event_base *, struct timeval *);
|
||||
static void devpoll_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop devpollops = {
|
||||
"devpoll",
|
||||
@ -157,18 +148,8 @@ devpoll_init(struct event_base *base)
|
||||
}
|
||||
devpollop->nevents = nfiles;
|
||||
|
||||
devpollop->fds = mm_calloc(nfiles, sizeof(struct evdevpoll));
|
||||
if (devpollop->fds == NULL) {
|
||||
mm_free(devpollop->events);
|
||||
mm_free(devpollop);
|
||||
close(dpfd);
|
||||
return (NULL);
|
||||
}
|
||||
devpollop->nfds = nfiles;
|
||||
|
||||
devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
|
||||
if (devpollop->changes == NULL) {
|
||||
mm_free(devpollop->fds);
|
||||
mm_free(devpollop->events);
|
||||
mm_free(devpollop);
|
||||
close(dpfd);
|
||||
@ -181,36 +162,9 @@ devpoll_init(struct event_base *base)
|
||||
}
|
||||
|
||||
static int
|
||||
devpoll_recalc(struct event_base *base, void *arg, int max)
|
||||
devpoll_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
struct devpollop *devpollop = arg;
|
||||
|
||||
if (max >= devpollop->nfds) {
|
||||
struct evdevpoll *fds;
|
||||
int nfds;
|
||||
|
||||
nfds = devpollop->nfds;
|
||||
while (nfds <= max)
|
||||
nfds <<= 1;
|
||||
|
||||
fds = mm_realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
|
||||
if (fds == NULL) {
|
||||
event_warn("realloc");
|
||||
return (-1);
|
||||
}
|
||||
devpollop->fds = fds;
|
||||
memset(fds + devpollop->nfds, 0,
|
||||
(nfds - devpollop->nfds) * sizeof(struct evdevpoll));
|
||||
devpollop->nfds = nfds;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
{
|
||||
struct devpollop *devpollop = arg;
|
||||
struct devpollop *devpollop = base->evbase;
|
||||
struct pollfd *events = devpollop->events;
|
||||
struct dvpoll dvp;
|
||||
struct evdevpoll *evdp;
|
||||
@ -247,9 +201,6 @@ devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
int what = events[i].revents;
|
||||
struct event *evread = NULL, *evwrite = NULL;
|
||||
|
||||
assert(events[i].fd < devpollop->nfds);
|
||||
evdp = &devpollop->fds[events[i].fd];
|
||||
|
||||
if (what & POLLHUP)
|
||||
what |= POLLIN | POLLOUT;
|
||||
else if (what & POLLERR)
|
||||
@ -268,16 +219,8 @@ devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
if (!which)
|
||||
continue;
|
||||
|
||||
if (evread != NULL && !(evread->ev_events & EV_PERSIST))
|
||||
event_del(evread);
|
||||
if (evwrite != NULL && evwrite != evread &&
|
||||
!(evwrite->ev_events & EV_PERSIST))
|
||||
event_del(evwrite);
|
||||
|
||||
if (evread != NULL)
|
||||
event_active(evread, EV_READ, 1);
|
||||
if (evwrite != NULL)
|
||||
event_active(evwrite, EV_WRITE, 1);
|
||||
/* XXX(niels): not sure if this works for devpoll */
|
||||
evmap_io_active(base, events[i].fd, which);
|
||||
}
|
||||
|
||||
return (0);
|
||||
@ -285,22 +228,11 @@ devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
|
||||
|
||||
static int
|
||||
devpoll_add(void *arg, struct event *ev)
|
||||
devpoll_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct devpollop *devpollop = arg;
|
||||
struct devpollop *devpollop = base->evbase;
|
||||
struct evdevpoll *evdp;
|
||||
int fd, events;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_add(ev));
|
||||
|
||||
fd = ev->ev_fd;
|
||||
if (fd >= devpollop->nfds) {
|
||||
/* Extend the file descriptor array as necessary */
|
||||
if (devpoll_recalc(ev->ev_base, devpollop, fd) == -1)
|
||||
return (-1);
|
||||
}
|
||||
evdp = &devpollop->fds[fd];
|
||||
int events;
|
||||
|
||||
/*
|
||||
* It's not necessary to OR the existing read/write events that we
|
||||
@ -310,54 +242,29 @@ devpoll_add(void *arg, struct event *ev)
|
||||
*/
|
||||
|
||||
events = 0;
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (evdp->evread && evdp->evread != ev) {
|
||||
/* There is already a different read event registered */
|
||||
return(-1);
|
||||
}
|
||||
if (events & EV_READ)
|
||||
events |= POLLIN;
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (evdp->evwrite && evdp->evwrite != ev) {
|
||||
/* There is already a different write event registered */
|
||||
return(-1);
|
||||
}
|
||||
if (events & EV_WRITE)
|
||||
events |= POLLOUT;
|
||||
}
|
||||
|
||||
if (devpoll_queue(devpollop, fd, events) != 0)
|
||||
return(-1);
|
||||
|
||||
/* Update events responsible */
|
||||
if (ev->ev_events & EV_READ)
|
||||
evdp->evread = ev;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
evdp->evwrite = ev;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
devpoll_del(void *arg, struct event *ev)
|
||||
devpoll_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct devpollop *devpollop = arg;
|
||||
struct devpollop *devpollop = base->evbase;
|
||||
struct evdevpoll *evdp;
|
||||
int fd, events;
|
||||
int events;
|
||||
int needwritedelete = 1, needreaddelete = 1;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_del(ev));
|
||||
|
||||
fd = ev->ev_fd;
|
||||
if (fd >= devpollop->nfds)
|
||||
return (0);
|
||||
evdp = &devpollop->fds[fd];
|
||||
|
||||
events = 0;
|
||||
if (ev->ev_events & EV_READ)
|
||||
if (events & EV_READ)
|
||||
events |= POLLIN;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
if (events & EV_WRITE)
|
||||
events |= POLLOUT;
|
||||
|
||||
/*
|
||||
@ -376,33 +283,26 @@ devpoll_del(void *arg, struct event *ev)
|
||||
* event that we are still interested in if one exists.
|
||||
*/
|
||||
|
||||
if ((events & POLLIN) && evdp->evwrite != NULL) {
|
||||
if ((events & POLLIN) && (old & EV_WRITE)) {
|
||||
/* Deleting read, still care about write */
|
||||
devpoll_queue(devpollop, fd, POLLOUT);
|
||||
needwritedelete = 0;
|
||||
} else if ((events & POLLOUT) && evdp->evread != NULL) {
|
||||
} else if ((events & POLLOUT) && (old & EV_READ)) {
|
||||
/* Deleting write, still care about read */
|
||||
devpoll_queue(devpollop, fd, POLLIN);
|
||||
needreaddelete = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (needreaddelete)
|
||||
evdp->evread = NULL;
|
||||
if (needwritedelete)
|
||||
evdp->evwrite = NULL;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
devpoll_dealloc(struct event_base *base, void *arg)
|
||||
devpoll_dealloc(struct event_base *base)
|
||||
{
|
||||
struct devpollop *devpollop = arg;
|
||||
struct devpollop *devpollop = base->evbase;
|
||||
|
||||
evsignal_dealloc(base);
|
||||
if (devpollop->fds)
|
||||
mm_free(devpollop->fds);
|
||||
if (devpollop->events)
|
||||
mm_free(devpollop->events);
|
||||
if (devpollop->changes)
|
||||
|
197
epoll.c
197
epoll.c
@ -48,33 +48,22 @@
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event-internal.h"
|
||||
#include "evsignal.h"
|
||||
#include "log.h"
|
||||
|
||||
/* due to limitations in the epoll interface, we need to keep track of
|
||||
* all file descriptors outself.
|
||||
*/
|
||||
struct evepoll {
|
||||
struct event *evread;
|
||||
struct event *evwrite;
|
||||
};
|
||||
#include "evmap.h"
|
||||
|
||||
struct epollop {
|
||||
struct evepoll *fds;
|
||||
int nfds;
|
||||
struct epoll_event *events;
|
||||
int nevents;
|
||||
int epfd;
|
||||
};
|
||||
|
||||
static void *epoll_init (struct event_base *);
|
||||
static int epoll_add (void *, struct event *);
|
||||
static int epoll_del (void *, struct event *);
|
||||
static int epoll_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static void epoll_dealloc (struct event_base *, void *);
|
||||
static int epoll_add(struct event_base *, int fd, short old, short events);
|
||||
static int epoll_del(struct event_base *, int fd, short old, short events);
|
||||
static int epoll_dispatch (struct event_base *, struct timeval *);
|
||||
static void epoll_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop epollops = {
|
||||
"epoll",
|
||||
@ -146,52 +135,16 @@ epoll_init(struct event_base *base)
|
||||
}
|
||||
epollop->nevents = nfiles;
|
||||
|
||||
epollop->fds = mm_calloc(nfiles, sizeof(struct evepoll));
|
||||
if (epollop->fds == NULL) {
|
||||
mm_free(epollop->events);
|
||||
mm_free(epollop);
|
||||
return (NULL);
|
||||
}
|
||||
epollop->nfds = nfiles;
|
||||
|
||||
evsignal_init(base);
|
||||
|
||||
return (epollop);
|
||||
}
|
||||
|
||||
static int
|
||||
epoll_recalc(struct event_base *base, void *arg, int max)
|
||||
epoll_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
struct epollop *epollop = arg;
|
||||
|
||||
if (max > epollop->nfds) {
|
||||
struct evepoll *fds;
|
||||
int nfds;
|
||||
|
||||
nfds = epollop->nfds;
|
||||
while (nfds < max)
|
||||
nfds <<= 1;
|
||||
|
||||
fds = mm_realloc(epollop->fds, nfds * sizeof(struct evepoll));
|
||||
if (fds == NULL) {
|
||||
event_warn("realloc");
|
||||
return (-1);
|
||||
}
|
||||
epollop->fds = fds;
|
||||
memset(fds + epollop->nfds, 0,
|
||||
(nfds - epollop->nfds) * sizeof(struct evepoll));
|
||||
epollop->nfds = nfds;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
{
|
||||
struct epollop *epollop = arg;
|
||||
struct epollop *epollop = base->evbase;
|
||||
struct epoll_event *events = epollop->events;
|
||||
struct evepoll *evep;
|
||||
int i, res, timeout = -1;
|
||||
|
||||
if (tv != NULL)
|
||||
@ -221,130 +174,82 @@ epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
|
||||
for (i = 0; i < res; i++) {
|
||||
int what = events[i].events;
|
||||
struct event *evread = NULL, *evwrite = NULL;
|
||||
|
||||
evep = (struct evepoll *)events[i].data.ptr;
|
||||
short res = 0;
|
||||
|
||||
if (what & (EPOLLHUP|EPOLLERR)) {
|
||||
evread = evep->evread;
|
||||
evwrite = evep->evwrite;
|
||||
res = EV_READ | EV_WRITE;
|
||||
} else {
|
||||
if (what & EPOLLIN) {
|
||||
evread = evep->evread;
|
||||
}
|
||||
|
||||
if (what & EPOLLOUT) {
|
||||
evwrite = evep->evwrite;
|
||||
}
|
||||
if (what & EPOLLIN)
|
||||
res |= EV_READ;
|
||||
if (what & EPOLLOUT)
|
||||
res |= EV_WRITE;
|
||||
}
|
||||
|
||||
if (!(evread||evwrite))
|
||||
if (!res)
|
||||
continue;
|
||||
|
||||
if (evread != NULL)
|
||||
event_active(evread, EV_READ | (evread->ev_events & EV_ET), 1);
|
||||
if (evwrite != NULL)
|
||||
event_active(evwrite, EV_WRITE | (evwrite->ev_events & EV_ET), 1);
|
||||
evmap_io_active(base, events[i].data.fd, res | EV_ET);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
epoll_add(void *arg, struct event *ev)
|
||||
epoll_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct epollop *epollop = arg;
|
||||
struct epollop *epollop = base->evbase;
|
||||
struct epoll_event epev = {0, {0}};
|
||||
struct evepoll *evep;
|
||||
int fd, op, events;
|
||||
int op, res;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_add(ev));
|
||||
|
||||
fd = ev->ev_fd;
|
||||
if (fd >= epollop->nfds) {
|
||||
/* Extent the file descriptor array as necessary */
|
||||
if (epoll_recalc(ev->ev_base, epollop, fd) == -1)
|
||||
return (-1);
|
||||
}
|
||||
evep = &epollop->fds[fd];
|
||||
op = EPOLL_CTL_ADD;
|
||||
events = 0;
|
||||
if (evep->evread != NULL) {
|
||||
events |= EPOLLIN;
|
||||
res = 0;
|
||||
|
||||
events |= old;
|
||||
if (events & EV_READ)
|
||||
res |= EPOLLIN;
|
||||
if (events & EV_WRITE)
|
||||
res |= EPOLLOUT;
|
||||
if (events & EV_ET)
|
||||
res |= EPOLLET;
|
||||
|
||||
if (old != 0)
|
||||
op = EPOLL_CTL_MOD;
|
||||
}
|
||||
if (evep->evwrite != NULL) {
|
||||
events |= EPOLLOUT;
|
||||
op = EPOLL_CTL_MOD;
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_READ)
|
||||
events |= EPOLLIN;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
events |= EPOLLOUT;
|
||||
if(ev->ev_events & EV_ET)
|
||||
events |= EPOLLET;
|
||||
|
||||
epev.data.ptr = evep;
|
||||
epev.events = events;
|
||||
if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
|
||||
return (-1);
|
||||
|
||||
/* Update events responsible */
|
||||
if (ev->ev_events & EV_READ)
|
||||
evep->evread = ev;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
evep->evwrite = ev;
|
||||
epev.data.fd = fd;
|
||||
epev.events = res;
|
||||
if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
epoll_del(void *arg, struct event *ev)
|
||||
epoll_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct epollop *epollop = arg;
|
||||
struct epollop *epollop = base->evbase;
|
||||
struct epoll_event epev = {0, {0}};
|
||||
struct evepoll *evep;
|
||||
int fd, events, op;
|
||||
int needwritedelete = 1, needreaddelete = 1;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_del(ev));
|
||||
|
||||
fd = ev->ev_fd;
|
||||
if (fd >= epollop->nfds)
|
||||
return (0);
|
||||
evep = &epollop->fds[fd];
|
||||
int res, op;
|
||||
|
||||
op = EPOLL_CTL_DEL;
|
||||
events = 0;
|
||||
|
||||
if (ev->ev_events & EV_READ)
|
||||
events |= EPOLLIN;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
events |= EPOLLOUT;
|
||||
res = 0;
|
||||
if (events & EV_READ)
|
||||
res |= EPOLLIN;
|
||||
if (events & EV_WRITE)
|
||||
res |= EPOLLOUT;
|
||||
|
||||
if ((events & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
|
||||
if ((events & EPOLLIN) && evep->evwrite != NULL) {
|
||||
needwritedelete = 0;
|
||||
events = EPOLLOUT;
|
||||
if ((res & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
|
||||
if ((res & EPOLLIN) && (old & EV_WRITE)) {
|
||||
res = EPOLLOUT;
|
||||
op = EPOLL_CTL_MOD;
|
||||
} else if ((events & EPOLLOUT) && evep->evread != NULL) {
|
||||
needreaddelete = 0;
|
||||
events = EPOLLIN;
|
||||
} else if ((res & EPOLLOUT) && (old & EV_READ)) {
|
||||
res = EPOLLIN;
|
||||
op = EPOLL_CTL_MOD;
|
||||
}
|
||||
}
|
||||
|
||||
epev.events = events;
|
||||
epev.data.ptr = evep;
|
||||
|
||||
if (needreaddelete)
|
||||
evep->evread = NULL;
|
||||
if (needwritedelete)
|
||||
evep->evwrite = NULL;
|
||||
epev.data.fd = fd;
|
||||
epev.events = res;
|
||||
|
||||
if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
|
||||
return (-1);
|
||||
@ -353,13 +258,11 @@ epoll_del(void *arg, struct event *ev)
|
||||
}
|
||||
|
||||
static void
|
||||
epoll_dealloc(struct event_base *base, void *arg)
|
||||
epoll_dealloc(struct event_base *base)
|
||||
{
|
||||
struct epollop *epollop = arg;
|
||||
struct epollop *epollop = base->evbase;
|
||||
|
||||
evsignal_dealloc(base);
|
||||
if (epollop->fds)
|
||||
mm_free(epollop->fds);
|
||||
if (epollop->events)
|
||||
mm_free(epollop->events);
|
||||
if (epollop->epfd >= 0)
|
||||
|
@ -40,6 +40,7 @@ extern "C" {
|
||||
|
||||
/* mutually exclusive */
|
||||
#define ev_signal_next _ev.ev_signal.ev_signal_next
|
||||
#define ev_io_next _ev.ev_io.ev_io_next
|
||||
|
||||
/* used only by signals */
|
||||
#define ev_ncalls _ev.ev_signal.ev_ncalls
|
||||
@ -48,18 +49,31 @@ extern "C" {
|
||||
struct eventop {
|
||||
const char *name;
|
||||
void *(*init)(struct event_base *);
|
||||
int (*add)(void *, struct event *);
|
||||
int (*del)(void *, struct event *);
|
||||
int (*dispatch)(struct event_base *, void *, struct timeval *);
|
||||
void (*dealloc)(struct event_base *, void *);
|
||||
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events);
|
||||
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events);
|
||||
int (*dispatch)(struct event_base *, struct timeval *);
|
||||
void (*dealloc)(struct event_base *);
|
||||
/* set if we need to reinitialize the event base */
|
||||
int need_reinit;
|
||||
enum event_method_feature features;
|
||||
};
|
||||
|
||||
/* used to map multiple events to the same underlying identifier */
|
||||
struct event_map {
|
||||
void *entries;
|
||||
int nentries;
|
||||
};
|
||||
|
||||
struct event_base {
|
||||
const struct eventop *evsel;
|
||||
void *evbase;
|
||||
|
||||
/* signal handling info */
|
||||
const struct eventop *evsigsel;
|
||||
void *evsigbase;
|
||||
|
||||
struct evsignal_info sig;
|
||||
|
||||
int event_count; /* counts number of total events */
|
||||
int event_count_active; /* counts number of active events */
|
||||
|
||||
@ -70,8 +84,11 @@ struct event_base {
|
||||
struct event_list **activequeues;
|
||||
int nactivequeues;
|
||||
|
||||
/* signal handling info */
|
||||
struct evsignal_info sig;
|
||||
/* for mapping io activity to events */
|
||||
struct event_map io;
|
||||
|
||||
/* for mapping signal activity to events */
|
||||
struct event_map sigmap;
|
||||
|
||||
struct event_list eventqueue;
|
||||
struct timeval event_tv;
|
||||
|
49
event.c
49
event.c
@ -66,6 +66,7 @@
|
||||
#include "event2/thread.h"
|
||||
#include "event2/util.h"
|
||||
#include "log.h"
|
||||
#include "evmap.h"
|
||||
|
||||
#ifdef HAVE_EVENT_PORTS
|
||||
extern const struct eventop evportops;
|
||||
@ -255,7 +256,7 @@ event_base_new_with_config(struct event_config *cfg)
|
||||
eventops[i]->name))
|
||||
continue;
|
||||
if ((eventops[i]->features & cfg->require_features)
|
||||
!= cfg->require_features)
|
||||
!= cfg->require_features)
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -270,8 +271,9 @@ event_base_new_with_config(struct event_config *cfg)
|
||||
|
||||
if (base->evbase == NULL) {
|
||||
if (cfg == NULL)
|
||||
event_errx(1, "%s: no event mechanism available", __func__);
|
||||
else {
|
||||
event_errx(1, "%s: no event mechanism available",
|
||||
__func__);
|
||||
else {
|
||||
event_base_free(base);
|
||||
return NULL;
|
||||
}
|
||||
@ -344,7 +346,7 @@ event_base_free(struct event_base *base)
|
||||
__func__, n_deleted));
|
||||
|
||||
if (base->evsel != NULL && base->evsel->dealloc != NULL)
|
||||
base->evsel->dealloc(base, base->evbase);
|
||||
base->evsel->dealloc(base);
|
||||
|
||||
for (i = 0; i < base->nactivequeues; ++i)
|
||||
assert(TAILQ_EMPTY(base->activequeues[i]));
|
||||
@ -358,6 +360,9 @@ event_base_free(struct event_base *base)
|
||||
|
||||
assert(TAILQ_EMPTY(&base->eventqueue));
|
||||
|
||||
evmap_clear(&base->io);
|
||||
evmap_clear(&base->sigmap);
|
||||
|
||||
mm_free(base);
|
||||
}
|
||||
|
||||
@ -366,7 +371,6 @@ int
|
||||
event_reinit(struct event_base *base)
|
||||
{
|
||||
const struct eventop *evsel = base->evsel;
|
||||
void *evbase = base->evbase;
|
||||
int res = 0;
|
||||
struct event *ev;
|
||||
|
||||
@ -382,15 +386,23 @@ event_reinit(struct event_base *base)
|
||||
}
|
||||
|
||||
if (base->evsel->dealloc != NULL)
|
||||
base->evsel->dealloc(base, base->evbase);
|
||||
evbase = base->evbase = evsel->init(base);
|
||||
base->evsel->dealloc(base);
|
||||
base->evbase = evsel->init(base);
|
||||
if (base->evbase == NULL)
|
||||
event_errx(1, "%s: could not reinitialize event mechanism",
|
||||
__func__);
|
||||
|
||||
evmap_clear(&base->io);
|
||||
evmap_clear(&base->sigmap);
|
||||
|
||||
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
|
||||
if (evsel->add(evbase, ev) == -1)
|
||||
res = -1;
|
||||
if (ev->ev_events & (EV_READ|EV_WRITE)) {
|
||||
if (evmap_io_add(base, ev->ev_fd, ev) == -1)
|
||||
res = -1;
|
||||
} else if (ev->ev_events & EV_SIGNAL) {
|
||||
if (evmap_signal_add(base, ev->ev_fd, ev) == -1)
|
||||
res = -1;
|
||||
}
|
||||
}
|
||||
|
||||
return (res);
|
||||
@ -691,7 +703,6 @@ int
|
||||
event_base_loop(struct event_base *base, int flags)
|
||||
{
|
||||
const struct eventop *evsel = base->evsel;
|
||||
void *evbase = base->evbase;
|
||||
struct timeval tv;
|
||||
struct timeval *tv_p;
|
||||
int res, done;
|
||||
@ -751,7 +762,7 @@ event_base_loop(struct event_base *base, int flags)
|
||||
/* clear time cache */
|
||||
base->tv_cache.tv_sec = 0;
|
||||
|
||||
res = evsel->dispatch(base, evbase, tv_p);
|
||||
res = evsel->dispatch(base, tv_p);
|
||||
|
||||
if (res == -1)
|
||||
return (-1);
|
||||
@ -1020,8 +1031,6 @@ static inline int
|
||||
event_add_internal(struct event *ev, const struct timeval *tv)
|
||||
{
|
||||
struct event_base *base = ev->ev_base;
|
||||
const struct eventop *evsel = base->evsel;
|
||||
void *evbase = base->evbase;
|
||||
int res = 0;
|
||||
|
||||
event_debug((
|
||||
@ -1046,7 +1055,10 @@ event_add_internal(struct event *ev, const struct timeval *tv)
|
||||
|
||||
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
|
||||
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
|
||||
res = evsel->add(evbase, ev);
|
||||
if (ev->ev_events & (EV_READ|EV_WRITE))
|
||||
res = evmap_io_add(base, ev->ev_fd, ev);
|
||||
else if (ev->ev_events & EV_SIGNAL)
|
||||
res = evmap_signal_add(base, ev->ev_fd, ev);
|
||||
if (res != -1)
|
||||
event_queue_insert(base, ev, EVLIST_INSERTED);
|
||||
}
|
||||
@ -1118,8 +1130,6 @@ static inline int
|
||||
event_del_internal(struct event *ev)
|
||||
{
|
||||
struct event_base *base;
|
||||
const struct eventop *evsel;
|
||||
void *evbase;
|
||||
int res = 0;
|
||||
|
||||
event_debug(("event_del: %p, callback %p",
|
||||
@ -1130,8 +1140,6 @@ event_del_internal(struct event *ev)
|
||||
return (-1);
|
||||
|
||||
base = ev->ev_base;
|
||||
evsel = base->evsel;
|
||||
evbase = base->evbase;
|
||||
|
||||
assert(!(ev->ev_flags & ~EVLIST_ALL));
|
||||
|
||||
@ -1151,7 +1159,10 @@ event_del_internal(struct event *ev)
|
||||
|
||||
if (ev->ev_flags & EVLIST_INSERTED) {
|
||||
event_queue_remove(base, ev, EVLIST_INSERTED);
|
||||
res = evsel->del(evbase, ev);
|
||||
if (ev->ev_events & (EV_READ|EV_WRITE))
|
||||
res = evmap_io_del(base, ev->ev_fd, ev);
|
||||
else
|
||||
res = evmap_signal_del(base, ev->ev_fd, ev);
|
||||
}
|
||||
|
||||
/* if we are not in the right thread, we need to wake up the loop */
|
||||
|
304
evmap.c
Normal file
304
evmap.c
Normal file
@ -0,0 +1,304 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#undef WIN32_LEAN_AND_MEAN
|
||||
#include "misc.h"
|
||||
#endif
|
||||
#include <sys/types.h>
|
||||
#ifdef HAVE_SYS_TIME_H
|
||||
#include <sys/time.h>
|
||||
#else
|
||||
#include <sys/_time.h>
|
||||
#endif
|
||||
#include <sys/queue.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#ifndef WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "event-internal.h"
|
||||
#include "evmap.h"
|
||||
#include "mm-internal.h"
|
||||
|
||||
#define GET_SLOT(map, slot, msize) (void *)((char *)(map)->entries + (slot)*(msize))
|
||||
|
||||
static void *
|
||||
evmap_get_head(struct event_map *map, int slot, int msize, void (*ctor)(void *))
|
||||
{
|
||||
if (map->nentries <= slot) {
|
||||
int i;
|
||||
int nentries = map->nentries ? map->nentries : 32;
|
||||
void *tmp;
|
||||
|
||||
while (nentries <= slot)
|
||||
nentries <<= 1;
|
||||
|
||||
tmp = mm_realloc(map->entries, nentries * msize);
|
||||
if (tmp == NULL)
|
||||
return (NULL);
|
||||
|
||||
for (i = map->nentries; i < nentries; ++i)
|
||||
(*ctor)((char *)tmp + i * msize);
|
||||
|
||||
map->nentries = nentries;
|
||||
map->entries = tmp;
|
||||
}
|
||||
|
||||
return (GET_SLOT(map, slot, msize));
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
evmap_clear(struct event_map *ctx)
|
||||
{
|
||||
ctx->nentries = 0;
|
||||
if (ctx->entries != NULL) {
|
||||
mm_free(ctx->entries);
|
||||
ctx->entries = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* code specific to file descriptors */
|
||||
|
||||
struct evmap_io {
|
||||
struct event_list events;
|
||||
unsigned int nread;
|
||||
unsigned int nwrite;
|
||||
};
|
||||
|
||||
static void
|
||||
evmap_io_init(void *arg)
|
||||
{
|
||||
struct evmap_io *entry = arg;
|
||||
|
||||
TAILQ_INIT(&entry->events);
|
||||
entry->nread = 0;
|
||||
entry->nwrite = 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
evmap_io_add(struct event_base *base, int fd, struct event *ev)
|
||||
{
|
||||
const struct eventop *evsel = base->evsel;
|
||||
struct event_map *io = &base->io;
|
||||
struct evmap_io *ctx = NULL;
|
||||
int nread, nwrite;
|
||||
short res = 0, old = 0;
|
||||
|
||||
if (fd >= io->nentries) {
|
||||
ctx = evmap_get_head(
|
||||
io, fd, sizeof(struct evmap_io), evmap_io_init);
|
||||
if (ctx == NULL)
|
||||
return (-1);
|
||||
} else {
|
||||
ctx = GET_SLOT(io, fd, sizeof(struct evmap_io));
|
||||
}
|
||||
|
||||
nread = ctx->nread;
|
||||
nwrite = ctx->nwrite;
|
||||
|
||||
if (nread)
|
||||
old |= EV_READ;
|
||||
if (nwrite)
|
||||
old |= EV_WRITE;
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (++nread == 1)
|
||||
res |= EV_READ;
|
||||
}
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (++nwrite == 1)
|
||||
res |= EV_WRITE;
|
||||
}
|
||||
|
||||
if (res) {
|
||||
/* XXX(niels): we cannot mix edge-triggered and
|
||||
* level-triggered, we should probably assert on
|
||||
* this. */
|
||||
if (evsel->add(base, ev->ev_fd,
|
||||
old, (ev->ev_events & EV_ET) | res) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
ctx->nread = nread;
|
||||
ctx->nwrite = nwrite;
|
||||
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
evmap_io_del(struct event_base *base, int fd, struct event *ev)
|
||||
{
|
||||
const struct eventop *evsel = base->evsel;
|
||||
struct event_map *io = &base->io;
|
||||
struct evmap_io *ctx;
|
||||
int nread, nwrite;
|
||||
short res = 0, old = 0;
|
||||
|
||||
if (fd >= io->nentries)
|
||||
return (-1);
|
||||
|
||||
ctx = GET_SLOT(io, fd, sizeof(struct evmap_io));
|
||||
|
||||
nread = ctx->nread;
|
||||
nwrite = ctx->nwrite;
|
||||
|
||||
if (nread)
|
||||
old |= EV_READ;
|
||||
if (nwrite)
|
||||
old |= EV_WRITE;
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (--nread == 0)
|
||||
res |= EV_READ;
|
||||
assert(nread >= 0);
|
||||
}
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (--nwrite == 0)
|
||||
res |= EV_WRITE;
|
||||
assert(nwrite >= 0);
|
||||
}
|
||||
|
||||
if (res) {
|
||||
if (evsel->del(base, ev->ev_fd, old, res) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
ctx->nread = nread;
|
||||
ctx->nwrite = nwrite;
|
||||
TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
evmap_io_active(struct event_base *base, int fd, short events)
|
||||
{
|
||||
struct event_map *io = &base->io;
|
||||
struct evmap_io *ctx;
|
||||
struct event *ev;
|
||||
|
||||
assert(fd < io->nentries);
|
||||
ctx = GET_SLOT(io, fd, sizeof(struct evmap_io));
|
||||
|
||||
TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
|
||||
if (ev->ev_events & events)
|
||||
event_active(ev, ev->ev_events & events, 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* code specific to signals */
|
||||
|
||||
struct evmap_signal {
|
||||
struct event_list events;
|
||||
};
|
||||
|
||||
static void
|
||||
evmap_signal_init(void *arg)
|
||||
{
|
||||
struct evmap_signal *entry = arg;
|
||||
|
||||
TAILQ_INIT(&entry->events);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
evmap_signal_add(struct event_base *base, int sig, struct event *ev)
|
||||
{
|
||||
const struct eventop *evsel = base->evsigsel;
|
||||
struct event_map *map = &base->sigmap;
|
||||
struct evmap_signal *ctx = NULL;
|
||||
|
||||
if (sig >= map->nentries) {
|
||||
ctx = evmap_get_head(
|
||||
map, sig, sizeof(struct evmap_signal),
|
||||
evmap_signal_init);
|
||||
if (ctx == NULL)
|
||||
return (-1);
|
||||
} else {
|
||||
ctx = GET_SLOT(map, sig, sizeof(struct evmap_signal));
|
||||
}
|
||||
|
||||
if (TAILQ_EMPTY(&ctx->events)) {
|
||||
if (evsel->add(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
evmap_signal_del(struct event_base *base, int sig, struct event *ev)
|
||||
{
|
||||
const struct eventop *evsel = base->evsigsel;
|
||||
struct event_map *map = &base->sigmap;
|
||||
struct evmap_signal *ctx;
|
||||
|
||||
if (sig >= map->nentries)
|
||||
return (-1);
|
||||
|
||||
ctx = GET_SLOT(map, sig, sizeof(struct evmap_signal));
|
||||
|
||||
if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
|
||||
if (evsel->del(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
evmap_signal_active(struct event_base *base, int sig, int ncalls)
|
||||
{
|
||||
struct event_map *map = &base->sigmap;
|
||||
struct evmap_signal *ctx;
|
||||
struct event *ev;
|
||||
|
||||
assert(sig < map->nentries);
|
||||
ctx = GET_SLOT(map, sig, sizeof(struct evmap_signal));
|
||||
|
||||
TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
|
||||
event_active(ev, EV_SIGNAL, ncalls);
|
||||
}
|
44
evmap.h
Normal file
44
evmap.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _EVMAP_H_
|
||||
#define _EVMAP_H_
|
||||
|
||||
struct event_map;
|
||||
struct event_base;
|
||||
struct event;
|
||||
|
||||
void evmap_clear(struct event_map* ctx);
|
||||
|
||||
int evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev);
|
||||
int evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev);
|
||||
void evmap_io_active(struct event_base *base, evutil_socket_t fd, short events);
|
||||
|
||||
int evmap_signal_add(struct event_base *base, int signum, struct event *ev);
|
||||
int evmap_signal_del(struct event_base *base, int signum, struct event *ev);
|
||||
void evmap_signal_active(struct event_base *base, int fd, int ncalls);
|
||||
|
||||
#endif /* _EVMAP_H_ */
|
116
evport.c
116
evport.c
@ -70,12 +70,10 @@
|
||||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event-internal.h"
|
||||
#include "log.h"
|
||||
#include "evsignal.h"
|
||||
|
||||
#include "evmap.h"
|
||||
|
||||
/*
|
||||
* Default value for ed_nevents, which is the maximum file descriptor number we
|
||||
@ -98,12 +96,11 @@
|
||||
*/
|
||||
|
||||
struct fd_info {
|
||||
struct event* fdi_revt; /* the event responsible for the "read" */
|
||||
struct event* fdi_wevt; /* the event responsible for the "write" */
|
||||
short fdi_what /* combinations of EV_READ and EV_WRITE */
|
||||
};
|
||||
|
||||
#define FDI_HAS_READ(fdi) ((fdi)->fdi_revt != NULL)
|
||||
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_wevt != NULL)
|
||||
#define FDI_HAS_READ(fdi) ((fdi)->fdi_what & EV_READ)
|
||||
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_what & EV_WRITE))
|
||||
#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
|
||||
#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
|
||||
(FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
|
||||
@ -117,10 +114,10 @@ struct evport_data {
|
||||
};
|
||||
|
||||
static void* evport_init (struct event_base *);
|
||||
static int evport_add (void *, struct event *);
|
||||
static int evport_del (void *, struct event *);
|
||||
static int evport_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static void evport_dealloc (struct event_base *, void *);
|
||||
static int evport_add(struct event_base *, int fd, short old, short events);
|
||||
static int evport_del(struct event_base *, int fd, short old, short events);
|
||||
static int evport_dispatch (struct event_base *, struct timeval *);
|
||||
static void evport_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop evportops = {
|
||||
"event ports",
|
||||
@ -182,25 +179,6 @@ check_evportop(struct evport_data *evpd)
|
||||
assert(evpd->ed_nevents > 0);
|
||||
assert(evpd->ed_port > 0);
|
||||
assert(evpd->ed_fds > 0);
|
||||
|
||||
/*
|
||||
* Verify the integrity of the fd_info struct as well as the events to
|
||||
* which it points (at least, that they're valid references and correct
|
||||
* for their position in the structure).
|
||||
*/
|
||||
int i;
|
||||
for (i = 0; i < evpd->ed_nevents; ++i) {
|
||||
struct event *ev;
|
||||
struct fd_info *fdi;
|
||||
|
||||
fdi = &evpd->ed_fds[i];
|
||||
if ((ev = fdi->fdi_revt) != NULL) {
|
||||
assert(ev->ev_fd == i);
|
||||
}
|
||||
if ((ev = fdi->fdi_wevt) != NULL) {
|
||||
assert(ev->ev_fd == i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -279,10 +257,10 @@ reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
|
||||
*/
|
||||
|
||||
static int
|
||||
evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
evport_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
int i, res;
|
||||
struct evport_data *epdp = arg;
|
||||
struct evport_data *epdp = base->evbase;
|
||||
port_event_t pevtlist[EVENTS_PER_GETN];
|
||||
|
||||
/*
|
||||
@ -318,8 +296,7 @@ evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
}
|
||||
|
||||
if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
|
||||
int fd = FDI_HAS_READ(fdi) ? fdi->fdi_revt->ev_fd :
|
||||
fdi->fdi_wevt->ev_fd;
|
||||
int fd = edp->ed_pending[i];
|
||||
reassociate(epdp, fdi, fd);
|
||||
epdp->ed_pending[i] = -1;
|
||||
}
|
||||
@ -366,19 +343,7 @@ evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
assert(epdp->ed_nevents > fd);
|
||||
fdi = &(epdp->ed_fds[fd]);
|
||||
|
||||
/*
|
||||
* We now check for each of the possible events (READ
|
||||
* or WRITE). Then, we activate the event (which will
|
||||
* cause its callback to be executed).
|
||||
*/
|
||||
|
||||
if ((res & EV_READ) && ((ev = fdi->fdi_revt) != NULL)) {
|
||||
event_active(ev, res, 1);
|
||||
}
|
||||
|
||||
if ((res & EV_WRITE) && ((ev = fdi->fdi_wevt) != NULL)) {
|
||||
event_active(ev, res, 1);
|
||||
}
|
||||
evmap_io_active(base, fd, res);
|
||||
} /* end of all events gotten */
|
||||
|
||||
check_evportop(epdp);
|
||||
@ -393,26 +358,20 @@ evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
*/
|
||||
|
||||
static int
|
||||
evport_add(void *arg, struct event *ev)
|
||||
evport_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct evport_data *evpd = arg;
|
||||
struct evport_data *evpd = base->evbase;
|
||||
struct fd_info *fdi;
|
||||
int factor;
|
||||
|
||||
check_evportop(evpd);
|
||||
|
||||
/*
|
||||
* Delegate, if it's not ours to handle.
|
||||
*/
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_add(ev));
|
||||
|
||||
/*
|
||||
* If necessary, grow the file descriptor info table
|
||||
*/
|
||||
|
||||
factor = 1;
|
||||
while (ev->ev_fd >= factor * evpd->ed_nevents)
|
||||
while (fd >= factor * evpd->ed_nevents)
|
||||
factor *= 2;
|
||||
|
||||
if (factor > 1) {
|
||||
@ -421,13 +380,10 @@ evport_add(void *arg, struct event *ev)
|
||||
}
|
||||
}
|
||||
|
||||
fdi = &evpd->ed_fds[ev->ev_fd];
|
||||
if (ev->ev_events & EV_READ)
|
||||
fdi->fdi_revt = ev;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
fdi->fdi_wevt = ev;
|
||||
fdi = &evpd->ed_fds[fd];
|
||||
fdi->what |= events;
|
||||
|
||||
return reassociate(evpd, fdi, ev->ev_fd);
|
||||
return reassociate(evpd, fdi, fd);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -435,43 +391,35 @@ evport_add(void *arg, struct event *ev)
|
||||
*/
|
||||
|
||||
static int
|
||||
evport_del(void *arg, struct event *ev)
|
||||
evport_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct evport_data *evpd = arg;
|
||||
struct evport_data *evpd = base->evbase;
|
||||
struct fd_info *fdi;
|
||||
int i;
|
||||
int associated = 1;
|
||||
|
||||
check_evportop(evpd);
|
||||
|
||||
/*
|
||||
* Delegate, if it's not ours to handle
|
||||
*/
|
||||
if (ev->ev_events & EV_SIGNAL) {
|
||||
return (evsignal_del(ev));
|
||||
}
|
||||
|
||||
if (evpd->ed_nevents < ev->ev_fd) {
|
||||
if (evpd->ed_nevents < fd) {
|
||||
return (-1);
|
||||
}
|
||||
|
||||
for (i = 0; i < EVENTS_PER_GETN; ++i) {
|
||||
if (evpd->ed_pending[i] == ev->ev_fd) {
|
||||
if (evpd->ed_pending[i] == fd) {
|
||||
associated = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fdi = &evpd->ed_fds[ev->ev_fd];
|
||||
if (ev->ev_events & EV_READ)
|
||||
fdi->fdi_revt = NULL;
|
||||
if (ev->ev_events & EV_WRITE)
|
||||
fdi->fdi_wevt = NULL;
|
||||
fdi = &evpd->ed_fds[fd];
|
||||
if (events & EV_READ)
|
||||
fdi->what &= ~EV_READ;
|
||||
if (events & EV_WRITE)
|
||||
fdi->what &= ~EV_WRITE;
|
||||
|
||||
if (associated) {
|
||||
if (!FDI_HAS_EVENTS(fdi) &&
|
||||
port_dissociate(evpd->ed_port, PORT_SOURCE_FD,
|
||||
ev->ev_fd) == -1) {
|
||||
port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) {
|
||||
/*
|
||||
* Ignre EBADFD error the fd could have been closed
|
||||
* before event_del() was called.
|
||||
@ -482,11 +430,11 @@ evport_del(void *arg, struct event *ev)
|
||||
}
|
||||
} else {
|
||||
if (FDI_HAS_EVENTS(fdi)) {
|
||||
return (reassociate(evpd, fdi, ev->ev_fd));
|
||||
return (reassociate(evpd, fdi, fd));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) {
|
||||
if ((fdi->what & (EV_READ|EV_WRITE)) == 0) {
|
||||
evpd->ed_pending[i] = -1;
|
||||
}
|
||||
}
|
||||
@ -495,9 +443,9 @@ evport_del(void *arg, struct event *ev)
|
||||
|
||||
|
||||
static void
|
||||
evport_dealloc(struct event_base *base, void *arg)
|
||||
evport_dealloc(struct event_base *base)
|
||||
{
|
||||
struct evport_data *evpd = arg;
|
||||
struct evport_data *evpd = base->evbase;
|
||||
|
||||
evsignal_dealloc(base);
|
||||
|
||||
|
@ -38,7 +38,6 @@ struct evsignal_info {
|
||||
evutil_socket_t ev_signal_pair[2];
|
||||
int ev_signal_added;
|
||||
volatile sig_atomic_t evsignal_caught;
|
||||
struct event_list evsigevents[NSIG];
|
||||
sig_atomic_t evsigcaught[NSIG];
|
||||
#ifdef HAVE_SIGACTION
|
||||
struct sigaction **sh_old;
|
||||
@ -49,8 +48,6 @@ struct evsignal_info {
|
||||
};
|
||||
void evsignal_init(struct event_base *);
|
||||
void evsignal_process(struct event_base *);
|
||||
int evsignal_add(struct event *);
|
||||
int evsignal_del(struct event *);
|
||||
void evsignal_dealloc(struct event_base *);
|
||||
|
||||
#endif /* _EVSIGNAL_H_ */
|
||||
|
3
http.c
3
http.c
@ -1891,9 +1891,6 @@ evhttp_send_done(struct evhttp_connection *evcon, void *arg)
|
||||
struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
|
||||
TAILQ_REMOVE(&evcon->requests, req, next);
|
||||
|
||||
/* delete possible close detection events */
|
||||
evhttp_connection_stop_detectclose(evcon);
|
||||
|
||||
need_close =
|
||||
(req->minor == 0 &&
|
||||
!evhttp_is_connection_keepalive(req->input_headers))||
|
||||
|
@ -85,6 +85,11 @@ struct event {
|
||||
|
||||
evutil_socket_t ev_fd;
|
||||
union {
|
||||
/* used for io events */
|
||||
struct {
|
||||
TAILQ_ENTRY (event) (ev_io_next);
|
||||
} ev_io;
|
||||
|
||||
/* used by signal events */
|
||||
struct {
|
||||
TAILQ_ENTRY (event) (ev_signal_next);
|
||||
|
215
kqueue.c
215
kqueue.c
@ -58,13 +58,9 @@
|
||||
#define PTR_TO_UDATA(x) (x)
|
||||
#endif
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event-internal.h"
|
||||
#include "log.h"
|
||||
#include "event-internal.h"
|
||||
|
||||
#define EVLIST_X_KQINKERNEL 0x1000
|
||||
#include "evmap.h"
|
||||
|
||||
#define NEVENT 64
|
||||
|
||||
@ -72,18 +68,19 @@ struct kqop {
|
||||
struct kevent *changes;
|
||||
int nchanges;
|
||||
struct kevent *events;
|
||||
struct event_list evsigevents[NSIG];
|
||||
int nevents;
|
||||
int kq;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
static void *kq_init (struct event_base *);
|
||||
static int kq_add (void *, struct event *);
|
||||
static int kq_del (void *, struct event *);
|
||||
static int kq_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static int kq_add (struct event_base *, int, short, short);
|
||||
static int kq_del (struct event_base *, int, short, short);
|
||||
static int kq_sig_add (struct event_base *, int, short, short);
|
||||
static int kq_sig_del (struct event_base *, int, short, short);
|
||||
static int kq_dispatch (struct event_base *, struct timeval *);
|
||||
static int kq_insert (struct kqop *, struct kevent *);
|
||||
static void kq_dealloc (struct event_base *, void *);
|
||||
static void kq_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop kqops = {
|
||||
"kqueue",
|
||||
@ -96,10 +93,21 @@ const struct eventop kqops = {
|
||||
EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
|
||||
};
|
||||
|
||||
static const struct eventop kqsigops = {
|
||||
"kqueue_signal",
|
||||
NULL,
|
||||
kq_sig_add,
|
||||
kq_sig_del,
|
||||
NULL,
|
||||
NULL,
|
||||
1 /* need reinit */,
|
||||
0
|
||||
};
|
||||
|
||||
static void *
|
||||
kq_init(struct event_base *base)
|
||||
{
|
||||
int i, kq;
|
||||
int kq;
|
||||
struct kqop *kqueueop;
|
||||
|
||||
if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
|
||||
@ -131,11 +139,6 @@ kq_init(struct event_base *base)
|
||||
}
|
||||
kqueueop->nevents = NEVENT;
|
||||
|
||||
/* we need to keep track of multiple events per signal */
|
||||
for (i = 0; i < NSIG; ++i) {
|
||||
TAILQ_INIT(&kqueueop->evsigevents[i]);
|
||||
}
|
||||
|
||||
/* Check for Mac OS X kqueue bug. */
|
||||
kqueueop->changes[0].ident = -1;
|
||||
kqueueop->changes[0].filter = EVFILT_READ;
|
||||
@ -157,6 +160,9 @@ kq_init(struct event_base *base)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
base->evsigsel = &kqsigops;
|
||||
base->evsigbase = kqueueop;
|
||||
|
||||
return (kqueueop);
|
||||
}
|
||||
|
||||
@ -212,9 +218,9 @@ kq_sighandler(int sig)
|
||||
}
|
||||
|
||||
static int
|
||||
kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
kq_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
struct kqop *kqop = arg;
|
||||
struct kqop *kqop = base->evbase;
|
||||
struct kevent *changes = kqop->changes;
|
||||
struct kevent *events = kqop->events;
|
||||
struct event *ev;
|
||||
@ -276,18 +282,9 @@ kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
continue;
|
||||
|
||||
if (events[i].filter == EVFILT_SIGNAL) {
|
||||
struct event_list *head =
|
||||
(struct event_list *)events[i].udata;
|
||||
TAILQ_FOREACH(ev, head, ev_signal_next) {
|
||||
event_active(ev, which, events[i].data);
|
||||
}
|
||||
evmap_signal_active(base, events[i].ident, 1);
|
||||
} else {
|
||||
ev = (struct event *)events[i].udata;
|
||||
|
||||
if (!(ev->ev_events & EV_PERSIST))
|
||||
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
|
||||
|
||||
event_active(ev, which | (ev->ev_events & EV_ET), 1);
|
||||
evmap_io_active(base, events[i].ident, which | EV_ET);
|
||||
}
|
||||
}
|
||||
|
||||
@ -296,149 +293,75 @@ kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
|
||||
|
||||
static int
|
||||
kq_add(void *arg, struct event *ev)
|
||||
kq_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct kqop *kqop = arg;
|
||||
struct kqop *kqop = base->evbase;
|
||||
struct kevent kev;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL) {
|
||||
int nsignal = EVENT_SIGNAL(ev);
|
||||
|
||||
assert(nsignal >= 0 && nsignal < NSIG);
|
||||
if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
|
||||
struct timespec timeout = { 0, 0 };
|
||||
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = nsignal;
|
||||
kev.filter = EVFILT_SIGNAL;
|
||||
kev.flags = EV_ADD;
|
||||
kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]);
|
||||
|
||||
/* Be ready for the signal if it is sent any
|
||||
* time between now and the next call to
|
||||
* kq_dispatch. */
|
||||
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
|
||||
return (-1);
|
||||
|
||||
if (_evsignal_set_handler(ev->ev_base, nsignal,
|
||||
kq_sighandler) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev,
|
||||
ev_signal_next);
|
||||
ev->ev_flags |= EVLIST_X_KQINKERNEL;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (events & EV_READ) {
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = ev->ev_fd;
|
||||
kev.ident = fd;
|
||||
kev.filter = EVFILT_READ;
|
||||
#ifdef NOTE_EOF
|
||||
/* Make it behave like select() and poll() */
|
||||
kev.fflags = NOTE_EOF;
|
||||
#endif
|
||||
kev.flags = EV_ADD;
|
||||
if (!(ev->ev_events & EV_PERSIST))
|
||||
kev.flags |= EV_ONESHOT;
|
||||
if (ev->ev_events & EV_ET)
|
||||
if (events & EV_ET)
|
||||
kev.flags |= EV_CLEAR;
|
||||
kev.udata = PTR_TO_UDATA(ev);
|
||||
|
||||
if (kq_insert(kqop, &kev) == -1)
|
||||
return (-1);
|
||||
|
||||
ev->ev_flags |= EVLIST_X_KQINKERNEL;
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (events & EV_WRITE) {
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = ev->ev_fd;
|
||||
kev.ident = fd;
|
||||
kev.filter = EVFILT_WRITE;
|
||||
kev.flags = EV_ADD;
|
||||
if (!(ev->ev_events & EV_PERSIST))
|
||||
kev.flags |= EV_ONESHOT;
|
||||
if (ev->ev_events & EV_ET)
|
||||
if (events & EV_ET)
|
||||
kev.flags |= EV_CLEAR;
|
||||
kev.udata = PTR_TO_UDATA(ev);
|
||||
|
||||
if (kq_insert(kqop, &kev) == -1)
|
||||
return (-1);
|
||||
|
||||
ev->ev_flags |= EVLIST_X_KQINKERNEL;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
kq_del(void *arg, struct event *ev)
|
||||
kq_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct kqop *kqop = arg;
|
||||
struct kqop *kqop = base->evbase;
|
||||
struct kevent kev;
|
||||
|
||||
if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
|
||||
return (0);
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL) {
|
||||
int nsignal = EVENT_SIGNAL(ev);
|
||||
struct timespec timeout = { 0, 0 };
|
||||
|
||||
assert(nsignal >= 0 && nsignal < NSIG);
|
||||
TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next);
|
||||
if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = nsignal;
|
||||
kev.filter = EVFILT_SIGNAL;
|
||||
kev.flags = EV_DELETE;
|
||||
|
||||
/* Because we insert signal events
|
||||
* immediately, we need to delete them
|
||||
* immediately, too */
|
||||
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
|
||||
return (-1);
|
||||
|
||||
if (_evsignal_restore_handler(ev->ev_base,
|
||||
nsignal) == -1)
|
||||
return (-1);
|
||||
}
|
||||
|
||||
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (events & EV_READ) {
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = ev->ev_fd;
|
||||
kev.ident = fd;
|
||||
kev.filter = EVFILT_READ;
|
||||
kev.flags = EV_DELETE;
|
||||
|
||||
if (kq_insert(kqop, &kev) == -1)
|
||||
return (-1);
|
||||
|
||||
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (events & EV_WRITE) {
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = ev->ev_fd;
|
||||
kev.ident = fd;
|
||||
kev.filter = EVFILT_WRITE;
|
||||
kev.flags = EV_DELETE;
|
||||
|
||||
if (kq_insert(kqop, &kev) == -1)
|
||||
return (-1);
|
||||
|
||||
ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
kq_dealloc(struct event_base *base, void *arg)
|
||||
kq_dealloc(struct event_base *base)
|
||||
{
|
||||
struct kqop *kqop = arg;
|
||||
struct kqop *kqop = base->evbase;
|
||||
|
||||
if (kqop->changes)
|
||||
mm_free(kqop->changes);
|
||||
@ -449,3 +372,57 @@ kq_dealloc(struct event_base *base, void *arg)
|
||||
memset(kqop, 0, sizeof(struct kqop));
|
||||
mm_free(kqop);
|
||||
}
|
||||
|
||||
/* signal handling */
|
||||
static int
|
||||
kq_sig_add(struct event_base *base, int nsignal, short old, short events)
|
||||
{
|
||||
struct kqop *kqop = base->evbase;
|
||||
struct kevent kev;
|
||||
struct timespec timeout = { 0, 0 };
|
||||
|
||||
assert(nsignal >= 0 && nsignal < NSIG);
|
||||
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = nsignal;
|
||||
kev.filter = EVFILT_SIGNAL;
|
||||
kev.flags = EV_ADD;
|
||||
|
||||
/* Be ready for the signal if it is sent any
|
||||
* time between now and the next call to
|
||||
* kq_dispatch. */
|
||||
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
|
||||
return (-1);
|
||||
|
||||
if (_evsignal_set_handler(base, nsignal, kq_sighandler) == -1)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
kq_sig_del(struct event_base *base, int nsignal, short old, short events)
|
||||
{
|
||||
struct kqop *kqop = base->evbase;
|
||||
struct kevent kev;
|
||||
|
||||
struct timespec timeout = { 0, 0 };
|
||||
|
||||
assert(nsignal >= 0 && nsignal < NSIG);
|
||||
|
||||
memset(&kev, 0, sizeof(kev));
|
||||
kev.ident = nsignal;
|
||||
kev.filter = EVFILT_SIGNAL;
|
||||
kev.flags = EV_DELETE;
|
||||
|
||||
/* Because we insert signal events
|
||||
* immediately, we need to delete them
|
||||
* immediately, too */
|
||||
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
|
||||
return (-1);
|
||||
|
||||
if (_evsignal_restore_handler(base, nsignal) == -1)
|
||||
return (-1);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#define _MIN_HEAP_H_
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event2/util.h"
|
||||
|
||||
typedef struct min_heap
|
||||
|
128
poll.c
128
poll.c
@ -44,33 +44,28 @@
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#ifdef CHECK_INVARIANTS
|
||||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event-internal.h"
|
||||
#include "evsignal.h"
|
||||
#include "log.h"
|
||||
#include "evmap.h"
|
||||
|
||||
struct pollop {
|
||||
int event_count; /* Highest number alloc */
|
||||
int nfds; /* Size of event_* */
|
||||
int fd_count; /* Size of idxplus1_by_fd */
|
||||
struct pollfd *event_set;
|
||||
struct event **event_r_back;
|
||||
struct event **event_w_back;
|
||||
int *idxplus1_by_fd; /* Index into event_set by fd; we add 1 so
|
||||
* that 0 (which is easy to memset) can mean
|
||||
* "no entry." */
|
||||
};
|
||||
|
||||
static void *poll_init (struct event_base *);
|
||||
static int poll_add (void *, struct event *);
|
||||
static int poll_del (void *, struct event *);
|
||||
static int poll_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static void poll_dealloc (struct event_base *, void *);
|
||||
static int poll_add(struct event_base *, int, short old, short events);
|
||||
static int poll_del(struct event_base *, int, short old, short events);
|
||||
static int poll_dispatch (struct event_base *, struct timeval *);
|
||||
static void poll_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop pollops = {
|
||||
"poll",
|
||||
@ -108,18 +103,6 @@ poll_check_ok(struct pollop *pop)
|
||||
if (idx < 0)
|
||||
continue;
|
||||
assert(pop->event_set[idx].fd == i);
|
||||
if (pop->event_set[idx].events & POLLIN) {
|
||||
ev = pop->event_r_back[idx];
|
||||
assert(ev);
|
||||
assert(ev->ev_events & EV_READ);
|
||||
assert(ev->ev_fd == i);
|
||||
}
|
||||
if (pop->event_set[idx].events & POLLOUT) {
|
||||
ev = pop->event_w_back[idx];
|
||||
assert(ev);
|
||||
assert(ev->ev_events & EV_WRITE);
|
||||
assert(ev->ev_fd == i);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < pop->nfds; ++i) {
|
||||
struct pollfd *pfd = &pop->event_set[i];
|
||||
@ -131,10 +114,10 @@ poll_check_ok(struct pollop *pop)
|
||||
#endif
|
||||
|
||||
static int
|
||||
poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
poll_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
int res, i, msec = -1, nfds;
|
||||
struct pollop *pop = arg;
|
||||
struct pollop *pop = base->evbase;
|
||||
|
||||
poll_check_ok(pop);
|
||||
|
||||
@ -163,7 +146,6 @@ poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
|
||||
for (i = 0; i < nfds; i++) {
|
||||
int what = pop->event_set[i].revents;
|
||||
struct event *r_ev = NULL, *w_ev = NULL;
|
||||
if (!what)
|
||||
continue;
|
||||
|
||||
@ -172,45 +154,33 @@ poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
/* If the file gets closed notify */
|
||||
if (what & (POLLHUP|POLLERR))
|
||||
what |= POLLIN|POLLOUT;
|
||||
if (what & POLLIN) {
|
||||
if (what & POLLIN)
|
||||
res |= EV_READ;
|
||||
r_ev = pop->event_r_back[i];
|
||||
}
|
||||
if (what & POLLOUT) {
|
||||
if (what & POLLOUT)
|
||||
res |= EV_WRITE;
|
||||
w_ev = pop->event_w_back[i];
|
||||
}
|
||||
if (res == 0)
|
||||
continue;
|
||||
|
||||
if (r_ev && (res & r_ev->ev_events)) {
|
||||
event_active(r_ev, res & r_ev->ev_events, 1);
|
||||
}
|
||||
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
|
||||
event_active(w_ev, res & w_ev->ev_events, 1);
|
||||
}
|
||||
evmap_io_active(base, pop->event_set[i].fd, res);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
poll_add(void *arg, struct event *ev)
|
||||
poll_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct pollop *pop = arg;
|
||||
struct pollop *pop = base->evbase;
|
||||
struct pollfd *pfd = NULL;
|
||||
int i;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_add(ev));
|
||||
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
|
||||
assert((events & EV_SIGNAL) == 0);
|
||||
if (!(events & (EV_READ|EV_WRITE)))
|
||||
return (0);
|
||||
|
||||
poll_check_ok(pop);
|
||||
if (pop->nfds + 1 >= pop->event_count) {
|
||||
struct pollfd *tmp_event_set;
|
||||
struct event **tmp_event_r_back;
|
||||
struct event **tmp_event_w_back;
|
||||
int tmp_event_count;
|
||||
|
||||
if (pop->event_count < 32)
|
||||
@ -227,35 +197,16 @@ poll_add(void *arg, struct event *ev)
|
||||
}
|
||||
pop->event_set = tmp_event_set;
|
||||
|
||||
tmp_event_r_back = mm_realloc(pop->event_r_back,
|
||||
tmp_event_count * sizeof(struct event *));
|
||||
if (tmp_event_r_back == NULL) {
|
||||
/* event_set overallocated; that's okay. */
|
||||
event_warn("realloc");
|
||||
return (-1);
|
||||
}
|
||||
pop->event_r_back = tmp_event_r_back;
|
||||
|
||||
tmp_event_w_back = mm_realloc(pop->event_w_back,
|
||||
tmp_event_count * sizeof(struct event *));
|
||||
if (tmp_event_w_back == NULL) {
|
||||
/* event_set and event_r_back overallocated; that's
|
||||
* okay. */
|
||||
event_warn("realloc");
|
||||
return (-1);
|
||||
}
|
||||
pop->event_w_back = tmp_event_w_back;
|
||||
|
||||
pop->event_count = tmp_event_count;
|
||||
}
|
||||
if (ev->ev_fd >= pop->fd_count) {
|
||||
if (fd >= pop->fd_count) {
|
||||
int *tmp_idxplus1_by_fd;
|
||||
int new_count;
|
||||
if (pop->fd_count < 32)
|
||||
new_count = 32;
|
||||
else
|
||||
new_count = pop->fd_count * 2;
|
||||
while (new_count <= ev->ev_fd)
|
||||
while (new_count <= fd)
|
||||
new_count *= 2;
|
||||
tmp_idxplus1_by_fd =
|
||||
mm_realloc(pop->idxplus1_by_fd, new_count * sizeof(int));
|
||||
@ -269,27 +220,22 @@ poll_add(void *arg, struct event *ev)
|
||||
pop->fd_count = new_count;
|
||||
}
|
||||
|
||||
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
|
||||
i = pop->idxplus1_by_fd[fd] - 1;
|
||||
if (i >= 0) {
|
||||
pfd = &pop->event_set[i];
|
||||
} else {
|
||||
i = pop->nfds++;
|
||||
pfd = &pop->event_set[i];
|
||||
pfd->events = 0;
|
||||
pfd->fd = ev->ev_fd;
|
||||
pop->event_w_back[i] = pop->event_r_back[i] = NULL;
|
||||
pop->idxplus1_by_fd[ev->ev_fd] = i + 1;
|
||||
pfd->fd = fd;
|
||||
pop->idxplus1_by_fd[fd] = i + 1;
|
||||
}
|
||||
|
||||
pfd->revents = 0;
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (events & EV_WRITE)
|
||||
pfd->events |= POLLOUT;
|
||||
pop->event_w_back[i] = ev;
|
||||
}
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (events & EV_READ)
|
||||
pfd->events |= POLLIN;
|
||||
pop->event_r_back[i] = ev;
|
||||
}
|
||||
poll_check_ok(pop);
|
||||
|
||||
return (0);
|
||||
@ -300,40 +246,34 @@ poll_add(void *arg, struct event *ev)
|
||||
*/
|
||||
|
||||
static int
|
||||
poll_del(void *arg, struct event *ev)
|
||||
poll_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct pollop *pop = arg;
|
||||
struct pollop *pop = base->evbase;
|
||||
struct pollfd *pfd = NULL;
|
||||
int i;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_del(ev));
|
||||
|
||||
if (!(ev->ev_events & (EV_READ|EV_WRITE)))
|
||||
assert((events & EV_SIGNAL) == 0);
|
||||
if (!(events & (EV_READ|EV_WRITE)))
|
||||
return (0);
|
||||
|
||||
poll_check_ok(pop);
|
||||
i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
|
||||
i = pop->idxplus1_by_fd[fd] - 1;
|
||||
if (i < 0)
|
||||
return (-1);
|
||||
|
||||
/* Do we still want to read or write? */
|
||||
pfd = &pop->event_set[i];
|
||||
if (ev->ev_events & EV_READ) {
|
||||
if (events & EV_READ)
|
||||
pfd->events &= ~POLLIN;
|
||||
pop->event_r_back[i] = NULL;
|
||||
}
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
if (events & EV_WRITE)
|
||||
pfd->events &= ~POLLOUT;
|
||||
pop->event_w_back[i] = NULL;
|
||||
}
|
||||
poll_check_ok(pop);
|
||||
if (pfd->events)
|
||||
/* Another event cares about that fd. */
|
||||
return (0);
|
||||
|
||||
/* Okay, so we aren't interested in that fd anymore. */
|
||||
pop->idxplus1_by_fd[ev->ev_fd] = 0;
|
||||
pop->idxplus1_by_fd[fd] = 0;
|
||||
|
||||
--pop->nfds;
|
||||
if (i != pop->nfds) {
|
||||
@ -343,8 +283,6 @@ poll_del(void *arg, struct event *ev)
|
||||
*/
|
||||
memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
|
||||
sizeof(struct pollfd));
|
||||
pop->event_r_back[i] = pop->event_r_back[pop->nfds];
|
||||
pop->event_w_back[i] = pop->event_w_back[pop->nfds];
|
||||
pop->idxplus1_by_fd[pop->event_set[i].fd] = i + 1;
|
||||
}
|
||||
|
||||
@ -353,17 +291,13 @@ poll_del(void *arg, struct event *ev)
|
||||
}
|
||||
|
||||
static void
|
||||
poll_dealloc(struct event_base *base, void *arg)
|
||||
poll_dealloc(struct event_base *base)
|
||||
{
|
||||
struct pollop *pop = arg;
|
||||
struct pollop *pop = base->evbase;
|
||||
|
||||
evsignal_dealloc(base);
|
||||
if (pop->event_set)
|
||||
mm_free(pop->event_set);
|
||||
if (pop->event_r_back)
|
||||
mm_free(pop->event_r_back);
|
||||
if (pop->event_w_back)
|
||||
mm_free(pop->event_w_back);
|
||||
if (pop->idxplus1_by_fd)
|
||||
mm_free(pop->idxplus1_by_fd);
|
||||
|
||||
|
127
select.c
127
select.c
@ -46,15 +46,12 @@
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#ifdef CHECK_INVARIANTS
|
||||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
#include "event2/event.h"
|
||||
#include "event2/event_struct.h"
|
||||
#include "event-internal.h"
|
||||
#include "evsignal.h"
|
||||
#include "log.h"
|
||||
#include "evmap.h"
|
||||
|
||||
#ifndef howmany
|
||||
#define howmany(x, y) (((x)+((y)-1))/(y))
|
||||
@ -67,15 +64,13 @@ struct selectop {
|
||||
fd_set *event_writeset_in;
|
||||
fd_set *event_readset_out;
|
||||
fd_set *event_writeset_out;
|
||||
struct event **event_r_by_fd;
|
||||
struct event **event_w_by_fd;
|
||||
};
|
||||
|
||||
static void *select_init (struct event_base *);
|
||||
static int select_add (void *, struct event *);
|
||||
static int select_del (void *, struct event *);
|
||||
static int select_dispatch (struct event_base *, void *, struct timeval *);
|
||||
static void select_dealloc (struct event_base *, void *);
|
||||
static int select_add(struct event_base *, int, short old, short events);
|
||||
static int select_del(struct event_base *, int, short old, short events);
|
||||
static int select_dispatch (struct event_base *, struct timeval *);
|
||||
static void select_dealloc (struct event_base *);
|
||||
|
||||
const struct eventop selectops = {
|
||||
"select",
|
||||
@ -109,34 +104,17 @@ select_init(struct event_base *base)
|
||||
static void
|
||||
check_selectop(struct selectop *sop)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i <= sop->event_fds; ++i) {
|
||||
if (FD_ISSET(i, sop->event_readset_in)) {
|
||||
assert(sop->event_r_by_fd[i]);
|
||||
assert(sop->event_r_by_fd[i]->ev_events & EV_READ);
|
||||
assert(sop->event_r_by_fd[i]->ev_fd == i);
|
||||
} else {
|
||||
assert(! sop->event_r_by_fd[i]);
|
||||
}
|
||||
if (FD_ISSET(i, sop->event_writeset_in)) {
|
||||
assert(sop->event_w_by_fd[i]);
|
||||
assert(sop->event_w_by_fd[i]->ev_events & EV_WRITE);
|
||||
assert(sop->event_w_by_fd[i]->ev_fd == i);
|
||||
} else {
|
||||
assert(! sop->event_w_by_fd[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/* nothing to be done here */
|
||||
}
|
||||
#else
|
||||
#define check_selectop(sop) do { (void) sop; } while (0)
|
||||
#endif
|
||||
|
||||
static int
|
||||
select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
select_dispatch(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
int res, i;
|
||||
struct selectop *sop = arg;
|
||||
struct selectop *sop = base->evbase;
|
||||
|
||||
check_selectop(sop);
|
||||
|
||||
@ -166,22 +144,16 @@ select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
|
||||
|
||||
check_selectop(sop);
|
||||
for (i = 0; i <= sop->event_fds; ++i) {
|
||||
struct event *r_ev = NULL, *w_ev = NULL;
|
||||
res = 0;
|
||||
if (FD_ISSET(i, sop->event_readset_out)) {
|
||||
r_ev = sop->event_r_by_fd[i];
|
||||
if (FD_ISSET(i, sop->event_readset_out))
|
||||
res |= EV_READ;
|
||||
}
|
||||
if (FD_ISSET(i, sop->event_writeset_out)) {
|
||||
w_ev = sop->event_w_by_fd[i];
|
||||
if (FD_ISSET(i, sop->event_writeset_out))
|
||||
res |= EV_WRITE;
|
||||
}
|
||||
if (r_ev && (res & r_ev->ev_events)) {
|
||||
event_active(r_ev, res & r_ev->ev_events, 1);
|
||||
}
|
||||
if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
|
||||
event_active(w_ev, res & w_ev->ev_events, 1);
|
||||
}
|
||||
|
||||
if (res == 0)
|
||||
continue;
|
||||
|
||||
evmap_io_active(base, i, res);
|
||||
}
|
||||
check_selectop(sop);
|
||||
|
||||
@ -198,8 +170,6 @@ select_resize(struct selectop *sop, int fdsz)
|
||||
fd_set *writeset_in = NULL;
|
||||
fd_set *readset_out = NULL;
|
||||
fd_set *writeset_out = NULL;
|
||||
struct event **r_by_fd = NULL;
|
||||
struct event **w_by_fd = NULL;
|
||||
|
||||
n_events = (fdsz/sizeof(fd_mask)) * NFDBITS;
|
||||
n_events_old = (sop->event_fdsz/sizeof(fd_mask)) * NFDBITS;
|
||||
@ -219,23 +189,11 @@ select_resize(struct selectop *sop, int fdsz)
|
||||
if ((writeset_out = mm_realloc(sop->event_writeset_out, fdsz)) == NULL)
|
||||
goto error;
|
||||
sop->event_writeset_out = writeset_out;
|
||||
if ((r_by_fd = mm_realloc(sop->event_r_by_fd,
|
||||
n_events*sizeof(struct event*))) == NULL)
|
||||
goto error;
|
||||
sop->event_r_by_fd = r_by_fd;
|
||||
if ((w_by_fd = mm_realloc(sop->event_w_by_fd,
|
||||
n_events * sizeof(struct event*))) == NULL)
|
||||
goto error;
|
||||
sop->event_w_by_fd = w_by_fd;
|
||||
|
||||
memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
|
||||
fdsz - sop->event_fdsz);
|
||||
memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
|
||||
fdsz - sop->event_fdsz);
|
||||
memset(sop->event_r_by_fd + n_events_old, 0,
|
||||
(n_events-n_events_old) * sizeof(struct event*));
|
||||
memset(sop->event_w_by_fd + n_events_old, 0,
|
||||
(n_events-n_events_old) * sizeof(struct event*));
|
||||
|
||||
sop->event_fdsz = fdsz;
|
||||
check_selectop(sop);
|
||||
@ -249,26 +207,24 @@ select_resize(struct selectop *sop, int fdsz)
|
||||
|
||||
|
||||
static int
|
||||
select_add(void *arg, struct event *ev)
|
||||
select_add(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct selectop *sop = arg;
|
||||
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_add(ev));
|
||||
struct selectop *sop = base->evbase;
|
||||
|
||||
assert((events & EV_SIGNAL) == 0);
|
||||
check_selectop(sop);
|
||||
/*
|
||||
* Keep track of the highest fd, so that we can calculate the size
|
||||
* of the fd_sets for select(2)
|
||||
*/
|
||||
if (sop->event_fds < ev->ev_fd) {
|
||||
if (sop->event_fds < fd) {
|
||||
int fdsz = sop->event_fdsz;
|
||||
|
||||
if (fdsz < sizeof(fd_mask))
|
||||
fdsz = sizeof(fd_mask);
|
||||
|
||||
while (fdsz <
|
||||
(howmany(ev->ev_fd + 1, NFDBITS) * sizeof(fd_mask)))
|
||||
(howmany(fd + 1, NFDBITS) * sizeof(fd_mask)))
|
||||
fdsz *= 2;
|
||||
|
||||
if (fdsz != sop->event_fdsz) {
|
||||
@ -278,17 +234,13 @@ select_add(void *arg, struct event *ev)
|
||||
}
|
||||
}
|
||||
|
||||
sop->event_fds = ev->ev_fd;
|
||||
sop->event_fds = fd;
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
FD_SET(ev->ev_fd, sop->event_readset_in);
|
||||
sop->event_r_by_fd[ev->ev_fd] = ev;
|
||||
}
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
FD_SET(ev->ev_fd, sop->event_writeset_in);
|
||||
sop->event_w_by_fd[ev->ev_fd] = ev;
|
||||
}
|
||||
if (events & EV_READ)
|
||||
FD_SET(fd, sop->event_readset_in);
|
||||
if (events & EV_WRITE)
|
||||
FD_SET(fd, sop->event_writeset_in);
|
||||
check_selectop(sop);
|
||||
|
||||
return (0);
|
||||
@ -299,37 +251,32 @@ select_add(void *arg, struct event *ev)
|
||||
*/
|
||||
|
||||
static int
|
||||
select_del(void *arg, struct event *ev)
|
||||
select_del(struct event_base *base, int fd, short old, short events)
|
||||
{
|
||||
struct selectop *sop = arg;
|
||||
struct selectop *sop = base->evbase;
|
||||
|
||||
assert((events & EV_SIGNAL) == 0);
|
||||
check_selectop(sop);
|
||||
if (ev->ev_events & EV_SIGNAL)
|
||||
return (evsignal_del(ev));
|
||||
|
||||
if (sop->event_fds < ev->ev_fd) {
|
||||
if (sop->event_fds < fd) {
|
||||
check_selectop(sop);
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (ev->ev_events & EV_READ) {
|
||||
FD_CLR(ev->ev_fd, sop->event_readset_in);
|
||||
sop->event_r_by_fd[ev->ev_fd] = NULL;
|
||||
}
|
||||
if (events & EV_READ)
|
||||
FD_CLR(fd, sop->event_readset_in);
|
||||
|
||||
if (ev->ev_events & EV_WRITE) {
|
||||
FD_CLR(ev->ev_fd, sop->event_writeset_in);
|
||||
sop->event_w_by_fd[ev->ev_fd] = NULL;
|
||||
}
|
||||
if (events & EV_WRITE)
|
||||
FD_CLR(fd, sop->event_writeset_in);
|
||||
|
||||
check_selectop(sop);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
select_dealloc(struct event_base *base, void *arg)
|
||||
select_dealloc(struct event_base *base)
|
||||
{
|
||||
struct selectop *sop = arg;
|
||||
struct selectop *sop = base->evbase;
|
||||
|
||||
evsignal_dealloc(base);
|
||||
if (sop->event_readset_in)
|
||||
@ -340,10 +287,6 @@ select_dealloc(struct event_base *base, void *arg)
|
||||
mm_free(sop->event_readset_out);
|
||||
if (sop->event_writeset_out)
|
||||
mm_free(sop->event_writeset_out);
|
||||
if (sop->event_r_by_fd)
|
||||
mm_free(sop->event_r_by_fd);
|
||||
if (sop->event_w_by_fd)
|
||||
mm_free(sop->event_w_by_fd);
|
||||
|
||||
memset(sop, 0, sizeof(struct selectop));
|
||||
mm_free(sop);
|
||||
|
84
signal.c
84
signal.c
@ -63,6 +63,20 @@
|
||||
#include "event2/util.h"
|
||||
#include "evsignal.h"
|
||||
#include "log.h"
|
||||
#include "evmap.h"
|
||||
|
||||
static int evsignal_add(struct event_base *, int, short, short);
|
||||
static int evsignal_del(struct event_base *, int, short, short);
|
||||
|
||||
static const struct eventop evsigops = {
|
||||
"signal",
|
||||
NULL,
|
||||
evsignal_add,
|
||||
evsignal_del,
|
||||
NULL,
|
||||
NULL,
|
||||
0, 0
|
||||
};
|
||||
|
||||
struct event_base *evsignal_base = NULL;
|
||||
|
||||
@ -97,8 +111,6 @@ evsignal_cb(evutil_socket_t fd, short what, void *arg)
|
||||
void
|
||||
evsignal_init(struct event_base *base)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Our signal handler is going to write to one end of the socket
|
||||
* pair to wake up our event loop. The event loop then scans for
|
||||
@ -114,9 +126,6 @@ evsignal_init(struct event_base *base)
|
||||
base->sig.sh_old_max = 0;
|
||||
base->sig.evsignal_caught = 0;
|
||||
memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
|
||||
/* initialize the queues for all events */
|
||||
for (i = 0; i < NSIG; ++i)
|
||||
TAILQ_INIT(&base->sig.evsigevents[i]);
|
||||
|
||||
evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
|
||||
|
||||
@ -124,6 +133,9 @@ evsignal_init(struct event_base *base)
|
||||
EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal);
|
||||
|
||||
base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
|
||||
|
||||
base->evsigsel = &evsigops;
|
||||
base->evsigbase = &base->sig;
|
||||
}
|
||||
|
||||
/* Helper: set the signal handler for evsignal to handler in base, so that
|
||||
@ -192,34 +204,26 @@ _evsignal_set_handler(struct event_base *base,
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
evsignal_add(struct event *ev)
|
||||
static int
|
||||
evsignal_add(struct event_base *base, int evsignal, short old, short events)
|
||||
{
|
||||
int evsignal;
|
||||
struct event_base *base = ev->ev_base;
|
||||
struct evsignal_info *sig = &ev->ev_base->sig;
|
||||
struct evsignal_info *sig = &base->sig;
|
||||
|
||||
evsignal = EVENT_SIGNAL(ev);
|
||||
assert(evsignal >= 0 && evsignal < NSIG);
|
||||
if (TAILQ_EMPTY(&sig->evsigevents[evsignal])) {
|
||||
event_debug(("%s: %p: changing signal handler", __func__, ev));
|
||||
if (_evsignal_set_handler(
|
||||
base, evsignal, evsignal_handler) == -1)
|
||||
|
||||
event_debug(("%s: %p: changing signal handler", __func__, ev));
|
||||
if (_evsignal_set_handler(base, evsignal, evsignal_handler) == -1)
|
||||
return (-1);
|
||||
|
||||
/* catch signals if they happen quickly */
|
||||
evsignal_base = base;
|
||||
|
||||
if (!sig->ev_signal_added) {
|
||||
if (event_add(&sig->ev_signal, NULL))
|
||||
return (-1);
|
||||
|
||||
/* catch signals if they happen quickly */
|
||||
evsignal_base = base;
|
||||
|
||||
if (!sig->ev_signal_added) {
|
||||
if (event_add(&sig->ev_signal, NULL))
|
||||
return (-1);
|
||||
sig->ev_signal_added = 1;
|
||||
}
|
||||
sig->ev_signal_added = 1;
|
||||
}
|
||||
|
||||
/* multiple events may listen to the same signal */
|
||||
TAILQ_INSERT_TAIL(&sig->evsigevents[evsignal], ev, ev_signal_next);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -254,24 +258,14 @@ _evsignal_restore_handler(struct event_base *base, int evsignal)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
evsignal_del(struct event *ev)
|
||||
static int
|
||||
evsignal_del(struct event_base *base, int evsignal, short old, short events)
|
||||
{
|
||||
struct event_base *base = ev->ev_base;
|
||||
struct evsignal_info *sig = &base->sig;
|
||||
int evsignal = EVENT_SIGNAL(ev);
|
||||
|
||||
assert(evsignal >= 0 && evsignal < NSIG);
|
||||
|
||||
/* multiple events may listen to the same signal */
|
||||
TAILQ_REMOVE(&sig->evsigevents[evsignal], ev, ev_signal_next);
|
||||
|
||||
if (!TAILQ_EMPTY(&sig->evsigevents[evsignal]))
|
||||
return (0);
|
||||
|
||||
event_debug(("%s: %p: restoring signal handler", __func__, ev));
|
||||
|
||||
return (_evsignal_restore_handler(ev->ev_base, EVENT_SIGNAL(ev)));
|
||||
return (_evsignal_restore_handler(base, evsignal));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -308,7 +302,6 @@ void
|
||||
evsignal_process(struct event_base *base)
|
||||
{
|
||||
struct evsignal_info *sig = &base->sig;
|
||||
struct event *ev, *next_ev;
|
||||
sig_atomic_t ncalls;
|
||||
int i;
|
||||
|
||||
@ -318,14 +311,7 @@ evsignal_process(struct event_base *base)
|
||||
if (ncalls == 0)
|
||||
continue;
|
||||
|
||||
for (ev = TAILQ_FIRST(&sig->evsigevents[i]);
|
||||
ev != NULL; ev = next_ev) {
|
||||
next_ev = TAILQ_NEXT(ev, ev_signal_next);
|
||||
if (!(ev->ev_events & EV_PERSIST))
|
||||
event_del(ev);
|
||||
event_active(ev, EV_SIGNAL, ncalls);
|
||||
}
|
||||
|
||||
evmap_signal_active(base, i, ncalls);
|
||||
sig->evsigcaught[i] = 0;
|
||||
}
|
||||
}
|
||||
|
@ -329,6 +329,35 @@ test_simplewrite(void)
|
||||
cleanup_test();
|
||||
}
|
||||
|
||||
static void
|
||||
simpleread_multiple_cb(int fd, short event, void *arg)
|
||||
{
|
||||
if (++called == 2)
|
||||
test_ok = 1;
|
||||
}
|
||||
|
||||
static void
|
||||
test_simpleread_multiple(void)
|
||||
{
|
||||
struct event one, two;
|
||||
|
||||
/* Very simple read test */
|
||||
setup_test("Simple read to multiple evens: ");
|
||||
|
||||
write(pair[0], TEST1, strlen(TEST1)+1);
|
||||
shutdown(pair[0], SHUT_WR);
|
||||
|
||||
event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
|
||||
if (event_add(&one, NULL) == -1)
|
||||
exit(1);
|
||||
event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
|
||||
if (event_add(&two, NULL) == -1)
|
||||
exit(1);
|
||||
event_dispatch();
|
||||
|
||||
cleanup_test();
|
||||
}
|
||||
|
||||
static void
|
||||
test_multiple(void)
|
||||
{
|
||||
@ -2307,19 +2336,8 @@ main (int argc, char **argv)
|
||||
regress_zlib();
|
||||
#endif
|
||||
|
||||
http_suite();
|
||||
|
||||
#ifndef WIN32
|
||||
rpc_suite();
|
||||
#endif
|
||||
|
||||
dns_suite();
|
||||
|
||||
#ifndef WIN32
|
||||
test_fork();
|
||||
#endif
|
||||
|
||||
test_simpleread();
|
||||
test_simpleread_multiple();
|
||||
|
||||
test_simplewrite();
|
||||
|
||||
@ -2331,6 +2349,10 @@ main (int argc, char **argv)
|
||||
|
||||
test_simpletimeout();
|
||||
|
||||
#ifndef WIN32
|
||||
test_fork();
|
||||
#endif
|
||||
|
||||
#ifndef WIN32
|
||||
test_edgetriggered();
|
||||
test_simplesignal();
|
||||
@ -2340,6 +2362,14 @@ main (int argc, char **argv)
|
||||
test_loopexit();
|
||||
test_loopbreak();
|
||||
|
||||
http_suite();
|
||||
|
||||
#ifndef WIN32
|
||||
rpc_suite();
|
||||
#endif
|
||||
|
||||
/* XXX(niels): renable dns_suite(); */
|
||||
|
||||
test_loopexit_multiple();
|
||||
|
||||
test_nonpersist_readd();
|
||||
|
@ -1373,7 +1373,7 @@ http_close_detection(int with_delay)
|
||||
http = http_setup(&port, NULL);
|
||||
|
||||
/* 2 second timeout */
|
||||
evhttp_set_timeout(http, 2);
|
||||
evhttp_set_timeout(http, 1);
|
||||
|
||||
evcon = evhttp_connection_new("127.0.0.1", port);
|
||||
if (evcon == NULL) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user