mirror of
https://github.com/cuberite/libevent.git
synced 2025-08-03 17:26:24 -04:00
Try to comment some of the event code more
This commit is contained in:
parent
78772c3503
commit
cdd4c4905b
@ -50,12 +50,15 @@ struct deferred_cb {
|
||||
void *arg;
|
||||
};
|
||||
|
||||
|
||||
/** A deferred_cb_queue is a list of deferred_cb that we can add to and run. */
|
||||
struct deferred_cb_queue {
|
||||
/** Lock used to protect the queue. */
|
||||
void *lock;
|
||||
|
||||
/** How many entries are in the queue? */
|
||||
int active_count;
|
||||
|
||||
/** Function called when adding to the queue from another thread. */
|
||||
void (*notify_fn)(struct deferred_cb_queue *, void *);
|
||||
void *notify_arg;
|
||||
|
||||
|
@ -43,7 +43,6 @@ extern "C" {
|
||||
|
||||
/* mutually exclusive */
|
||||
#define ev_signal_next _ev.ev_signal.ev_signal_next
|
||||
|
||||
#define ev_io_next _ev.ev_io.ev_io_next
|
||||
#define ev_io_timeout _ev.ev_io.ev_timeout
|
||||
|
||||
@ -51,7 +50,7 @@ extern "C" {
|
||||
#define ev_ncalls _ev.ev_signal.ev_ncalls
|
||||
#define ev_pncalls _ev.ev_signal.ev_pncalls
|
||||
|
||||
/* Possible event closures. */
|
||||
/* Possible values for ev_closure in struct event. */
|
||||
#define EV_CLOSURE_NONE 0
|
||||
#define EV_CLOSURE_SIGNAL 1
|
||||
#define EV_CLOSURE_PERSIST 2
|
||||
@ -60,25 +59,41 @@ extern "C" {
|
||||
struct eventop {
|
||||
/** The name of this backend. */
|
||||
const char *name;
|
||||
/** Set up an event_base to use this backend.*/
|
||||
/** Function to set up an event_base to use this backend. It should
|
||||
* create a new structure holding whatever information is needed to
|
||||
* run the backend, and return it. The returned pointer will get
|
||||
* stored by event_init into the event_base.evbase field. On failure,
|
||||
* this function should return NULL. */
|
||||
void *(*init)(struct event_base *);
|
||||
/** Enable reading/writing on a given fd. */
|
||||
/** Enable reading/writing on a given fd or signal. 'events' will be
|
||||
* the events that we're trying to enable: one or more of EV_READ,
|
||||
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
|
||||
* were enabled on this fd previously. 'fdinfo' will be a structure
|
||||
* associated with the fd by the evmap; its size is defined by the
|
||||
* fdinfo field below. It will be set to 0 the first time the fd is
|
||||
* added. The function should return 0 on success and -1 on error.
|
||||
*/
|
||||
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
||||
/** Disable reading/writing on a given fd. */
|
||||
/** As "add", except 'events' contains the events we mean to disable. */
|
||||
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
|
||||
/** Function to implement the core of an event loop. It must see which
|
||||
added events are ready, and cause event_active to be called for each
|
||||
active event (usually via event_io_active or such).
|
||||
active event (usually via event_io_active or such). It should
|
||||
return 0 on success and -1 on error.
|
||||
*/
|
||||
int (*dispatch)(struct event_base *, struct timeval *);
|
||||
/** Function to clean up and free our data from the event_base. */
|
||||
void (*dealloc)(struct event_base *);
|
||||
/** Set if we need to reinitialize the event base after we fork. */
|
||||
/** Flag: set if we need to reinitialize the event base after we fork.
|
||||
*/
|
||||
int need_reinit;
|
||||
/** Bit-array of supported event_method_features */
|
||||
/** Bit-array of supported event_method_features that this backend can
|
||||
* provide. */
|
||||
enum event_method_feature features;
|
||||
/** Length of extra information we should record for each fd that
|
||||
has one or more active events.
|
||||
/** Length of the extra information we should record for each fd that
|
||||
has one or more active events. This information is recorded
|
||||
as part of the evmap entry for each fd, and passed as an argument
|
||||
to the add and del functions above.
|
||||
*/
|
||||
size_t fdinfo_len;
|
||||
};
|
||||
@ -102,22 +117,38 @@ HT_HEAD(event_io_map, event_map_entry);
|
||||
#endif
|
||||
|
||||
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
|
||||
defined, this is also used as event_io_map, to map fds to a list of events.
|
||||
defined, this structure is also used as event_io_map, which maps fds to a
|
||||
list of events.
|
||||
*/
|
||||
struct event_signal_map {
|
||||
/* An array of evmap_io * or of evmap_signal *; empty entries are
|
||||
* set to NULL. */
|
||||
void **entries;
|
||||
/* The number of entries available in entries */
|
||||
int nentries;
|
||||
};
|
||||
|
||||
/* A list of events waiting on a given 'common' timeout value. Ordinarily,
|
||||
* events waiting for a timeout wait on a minheap. Sometimes, however, a
|
||||
* queue can be faster.
|
||||
**/
|
||||
struct common_timeout_list {
|
||||
/* List of events currently waiting in the queue. */
|
||||
struct event_list events;
|
||||
/* 'magic' timeval used to indicate the duration of events in this
|
||||
* queue. */
|
||||
struct timeval duration;
|
||||
/* Event that triggers whenever one of the events in the queue is
|
||||
* ready to activate */
|
||||
struct event timeout_event;
|
||||
/* The event_base that this timeout list is part of */
|
||||
struct event_base *base;
|
||||
};
|
||||
|
||||
struct event_change;
|
||||
|
||||
/* List of 'changes' since the last call to eventop.dispatch. Only maintained
|
||||
* if the backend is using changesets. */
|
||||
struct event_changelist {
|
||||
struct event_change *changes;
|
||||
int n_changes;
|
||||
@ -125,6 +156,7 @@ struct event_changelist {
|
||||
};
|
||||
|
||||
#ifndef _EVENT_DISABLE_DEBUG_MODE
|
||||
/* Global internal flag: set to one if debug mode is on. */
|
||||
extern int _event_debug_mode_on;
|
||||
#endif
|
||||
|
||||
@ -139,17 +171,24 @@ struct event_base {
|
||||
* by the O(1) backends. */
|
||||
struct event_changelist changelist;
|
||||
|
||||
/* signal handling info */
|
||||
/** Function pointers used to describe the backend that this event_base
|
||||
* uses for signals */
|
||||
const struct eventop *evsigsel;
|
||||
/** Pointer to signal backend-specific data*/
|
||||
void *evsigbase;
|
||||
/** Data to implement the common signal handelr code. */
|
||||
struct evsig_info sig;
|
||||
|
||||
int event_count; /**< counts number of total events */
|
||||
int event_count_active; /**< counts number of active events */
|
||||
/** Number of total events added to this event_base */
|
||||
int event_count;
|
||||
/** Number of total events active in this event_base */
|
||||
int event_count_active;
|
||||
|
||||
int event_gotterm; /**< Set to terminate loop once done
|
||||
* processing events. */
|
||||
int event_break; /**< Set to exit loop immediately */
|
||||
/** Set if we should terminate the loop once we're done processing
|
||||
* events. */
|
||||
int event_gotterm;
|
||||
/** Set if we should terminate the loop immediately */
|
||||
int event_break;
|
||||
|
||||
/* Active event management. */
|
||||
/** An array of nactivequeues queues for active events (ones that
|
||||
@ -157,31 +196,42 @@ struct event_base {
|
||||
* priority numbers are more important, and stall higher ones.
|
||||
*/
|
||||
struct event_list *activequeues;
|
||||
/** The length of the activequeues array */
|
||||
int nactivequeues;
|
||||
|
||||
/* common timeout logic */
|
||||
|
||||
/** An array of common_timeout_list* for all of the common timeout
|
||||
* values we know. */
|
||||
struct common_timeout_list **common_timeout_queues;
|
||||
/** The number of entries used in common_timeout_queues */
|
||||
int n_common_timeouts;
|
||||
/** The total size of common_timeout_queues. */
|
||||
int n_common_timeouts_allocated;
|
||||
|
||||
/** The event whose callback is executing right now */
|
||||
struct event *current_event;
|
||||
|
||||
/** List of defered_cb that are active. We run these after the active
|
||||
* events. */
|
||||
struct deferred_cb_queue defer_queue;
|
||||
|
||||
/** Mapping from file descriptors to enabled events */
|
||||
struct event_io_map io;
|
||||
/** Mapping from file descriptors to enabled (added) events */
|
||||
struct event_io_map io;
|
||||
|
||||
/** Mapping from signal numbers to enabled events. */
|
||||
/** Mapping from signal numbers to enabled (added) events. */
|
||||
struct event_signal_map sigmap;
|
||||
|
||||
/** All events that have been enabled (added) in this event_base */
|
||||
struct event_list eventqueue;
|
||||
|
||||
/** Stored timeval; used to detect when time is running backwards. */
|
||||
struct timeval event_tv;
|
||||
|
||||
/** Priority queue of events with timeouts. */
|
||||
struct min_heap timeheap;
|
||||
|
||||
/** Stored timeval: used to avoid calling gettimeofday too often. */
|
||||
struct timeval tv_cache;
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
@ -196,14 +246,21 @@ struct event_base {
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
/** IOCP support structure, if IOCP is enabled. */
|
||||
struct event_iocp_port *iocp;
|
||||
#endif
|
||||
|
||||
/** Flags that this base was configured with */
|
||||
enum event_base_config_flag flags;
|
||||
|
||||
/* Notify main thread to wake up break, etc. */
|
||||
/** A socketpair used by some th_notify functions to wake up the main
|
||||
* thread. */
|
||||
int th_notify_fd[2];
|
||||
/** An event used by some th_notify functions to wake up the main
|
||||
* thread. */
|
||||
struct event th_notify;
|
||||
/** A function used to wake up the main thread from another thread. */
|
||||
int (*th_notify_fn)(struct event_base *base);
|
||||
};
|
||||
|
||||
|
106
event.c
106
event.c
@ -90,7 +90,7 @@ extern const struct eventop devpollops;
|
||||
extern const struct eventop win32ops;
|
||||
#endif
|
||||
|
||||
/* In order of preference */
|
||||
/* Array of backends in order of preference. */
|
||||
static const struct eventop *eventops[] = {
|
||||
#ifdef _EVENT_HAVE_EVENT_PORTS
|
||||
&evportops,
|
||||
@ -116,8 +116,10 @@ static const struct eventop *eventops[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
/* Global state */
|
||||
/* Global state; deprecated */
|
||||
struct event_base *current_base = NULL;
|
||||
|
||||
/* Global state */
|
||||
extern struct event_base *evsig_base;
|
||||
static int use_monotonic;
|
||||
|
||||
@ -185,6 +187,7 @@ HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free);
|
||||
|
||||
/* Macro: record that ev is now setup (that is, ready for an add) */
|
||||
#define _event_debug_note_setup(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -205,6 +208,7 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
/* Macro: record that ev is no longer setup */
|
||||
#define _event_debug_note_teardown(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -216,6 +220,7 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
/* Macro: record that ev is now added */
|
||||
#define _event_debug_note_add(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -232,6 +237,7 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
/* Macro: record that ev is no longer added */
|
||||
#define _event_debug_note_del(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -248,6 +254,7 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
|
||||
#define _event_debug_assert_is_setup(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -262,7 +269,8 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/* Macro: assert that ev is not added (i.e., okay to tear down or set
|
||||
* up again) */
|
||||
#define _event_debug_assert_not_added(ev) do { \
|
||||
if (_event_debug_mode_on) { \
|
||||
struct event_debug_entry *dent,find; \
|
||||
@ -296,6 +304,8 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
#define EVENT_BASE_ASSERT_LOCKED(base) \
|
||||
EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
|
||||
|
||||
/* The first time this function is called, it sets use_monotonic to 1
|
||||
* if we have a clock function that supports monotonic time */
|
||||
static void
|
||||
detect_monotonic(void)
|
||||
{
|
||||
@ -313,6 +323,11 @@ detect_monotonic(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/** Set 'tp' to the current time according to 'base'. We must hold the lock
|
||||
* on 'base'. If there is a cached time, return it. Otherwise, use
|
||||
* clock_gettime or gettimeofday as appropriate to find out the right time.
|
||||
* Return 0 on success, -1 on failure.
|
||||
*/
|
||||
static int
|
||||
gettime(struct event_base *base, struct timeval *tp)
|
||||
{
|
||||
@ -355,12 +370,14 @@ event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
|
||||
return r;
|
||||
}
|
||||
|
||||
/** Make 'base' have no current cached time. */
|
||||
static inline void
|
||||
clear_time_cache(struct event_base *base)
|
||||
{
|
||||
base->tv_cache.tv_sec = 0;
|
||||
}
|
||||
|
||||
/** Replace the cached time in 'base' with the current time. */
|
||||
static inline void
|
||||
update_time_cache(struct event_base *base)
|
||||
{
|
||||
@ -396,6 +413,8 @@ event_base_new(void)
|
||||
return base;
|
||||
}
|
||||
|
||||
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
|
||||
* avoid. */
|
||||
static int
|
||||
event_config_is_avoided_method(const struct event_config *cfg,
|
||||
const char *method)
|
||||
@ -411,6 +430,7 @@ event_config_is_avoided_method(const struct event_config *cfg,
|
||||
return (0);
|
||||
}
|
||||
|
||||
/** Return true iff 'method' is disabled according to the environment. */
|
||||
static int
|
||||
event_is_method_disabled(const char *name)
|
||||
{
|
||||
@ -420,6 +440,8 @@ event_is_method_disabled(const char *name)
|
||||
evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
|
||||
for (i = 8; environment[i] != '\0'; ++i)
|
||||
environment[i] = toupper(environment[i]);
|
||||
/* Note that evutil_getenv() ignores the environment entirely if
|
||||
* we're setuid */
|
||||
return (evutil_getenv(environment) != NULL);
|
||||
}
|
||||
|
||||
@ -436,12 +458,13 @@ event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
|
||||
TAILQ_INIT(&cb->deferred_cb_list);
|
||||
}
|
||||
|
||||
/** Helper for the deferred_cb queue: wake up the event base. */
|
||||
static void
|
||||
notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
|
||||
{
|
||||
struct event_base *base = baseptr;
|
||||
if (!EVBASE_IN_THREAD(base))
|
||||
evthread_notify_base(base);
|
||||
evthread_notify_base(base);
|
||||
}
|
||||
|
||||
struct deferred_cb_queue *
|
||||
@ -899,6 +922,7 @@ event_base_priority_init(struct event_base *base, int npriorities)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Returns true iff we're currently watching any events. */
|
||||
static int
|
||||
event_haveevents(struct event_base *base)
|
||||
{
|
||||
@ -906,6 +930,7 @@ event_haveevents(struct event_base *base)
|
||||
return (base->event_count > 0);
|
||||
}
|
||||
|
||||
/* "closure" function called when processing active signal events */
|
||||
static inline void
|
||||
event_signal_closure(struct event_base *base, struct event *ev)
|
||||
{
|
||||
@ -927,6 +952,20 @@ event_signal_closure(struct event_base *base, struct event *ev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Common timeouts are special timeouts that are handled as queues rather than
|
||||
* in the minheap. This is more efficient than the minheap if we happen to
|
||||
* know that we're going to get several thousands of timeout events all with
|
||||
* the same timeout value.
|
||||
*
|
||||
* Since all our timeout handling code assumes timevals can be copied,
|
||||
* assigned, etc, we can't use "magic pointer" to encode these common
|
||||
* timeouts. Searching through a list to see if every timeout is common could
|
||||
* also get inefficient. Instead, we take advantage of the fact that tv_usec
|
||||
* is 32 bits long, but only uses 20 of those bits (since it can never be over
|
||||
* 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
|
||||
* of index into the event_base's aray of common timeouts.
|
||||
*/
|
||||
|
||||
#define MICROSECONDS_MASK 0x000fffff
|
||||
#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
|
||||
#define COMMON_TIMEOUT_IDX_SHIFT 20
|
||||
@ -936,6 +975,7 @@ event_signal_closure(struct event_base *base, struct event *ev)
|
||||
#define COMMON_TIMEOUT_IDX(tv) \
|
||||
(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
|
||||
|
||||
/** Return true iff if 'tv' is a common timeout in 'base' */
|
||||
static inline int
|
||||
is_common_timeout(const struct timeval *tv,
|
||||
const struct event_base *base)
|
||||
@ -956,12 +996,15 @@ is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
|
||||
(tv2->tv_usec & ~MICROSECONDS_MASK);
|
||||
}
|
||||
|
||||
/** Requires that 'tv' is a common timeout. Return the corresponding
|
||||
* common_timeout_list. */
|
||||
static inline struct common_timeout_list *
|
||||
get_common_timeout_list(struct event_base *base, const struct timeval *tv)
|
||||
{
|
||||
return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
|
||||
}
|
||||
|
||||
#if 0
|
||||
static inline int
|
||||
common_timeout_ok(const struct timeval *tv,
|
||||
struct event_base *base)
|
||||
@ -971,7 +1014,10 @@ common_timeout_ok(const struct timeval *tv,
|
||||
return tv->tv_sec == expect->tv_sec &&
|
||||
tv->tv_usec == expect->tv_usec;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Add the timeout for the first event in given common timeout list to the
|
||||
* event_base's minheap. */
|
||||
static void
|
||||
common_timeout_schedule(struct common_timeout_list *ctl,
|
||||
const struct timeval *now, struct event *head)
|
||||
@ -981,6 +1027,9 @@ common_timeout_schedule(struct common_timeout_list *ctl,
|
||||
event_add_internal(&ctl->timeout_event, &timeout, 1);
|
||||
}
|
||||
|
||||
/* Callback: invoked when the timeout for a common timeout queue triggers.
|
||||
* This means that (at least) the first event in that queue should be run,
|
||||
* and the timeout should be rescheduled if there are more events. */
|
||||
static void
|
||||
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
|
||||
{
|
||||
@ -1080,6 +1129,7 @@ done:
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Closure function invoked when we're activating a persistent event. */
|
||||
static inline void
|
||||
event_persist_closure(struct event_base *base, struct event *ev)
|
||||
{
|
||||
@ -1182,6 +1232,11 @@ event_process_active_single_queue(struct event_base *base,
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
Process all the defered_cb entries in 'queue'. If *breakptr becomes set to
|
||||
1, stop. Requires that we start out holding the lock on 'queue'; releases
|
||||
the lock around 'queue' for each deferred_cb we process.
|
||||
*/
|
||||
static int
|
||||
event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
|
||||
{
|
||||
@ -1257,6 +1312,8 @@ event_base_get_method(const struct event_base *base)
|
||||
return (base->evsel->name);
|
||||
}
|
||||
|
||||
/** Callback: used to implement event_base_loopexit by telling the event_base
|
||||
* that it's time to exit its loop. */
|
||||
static void
|
||||
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
|
||||
{
|
||||
@ -1264,7 +1321,6 @@ event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
|
||||
base->event_gotterm = 1;
|
||||
}
|
||||
|
||||
/* not thread safe */
|
||||
int
|
||||
event_loopexit(const struct timeval *tv)
|
||||
{
|
||||
@ -1420,7 +1476,6 @@ done:
|
||||
}
|
||||
|
||||
/* Sets up an event for processing once */
|
||||
|
||||
struct event_once {
|
||||
struct event ev;
|
||||
|
||||
@ -1428,8 +1483,8 @@ struct event_once {
|
||||
void *arg;
|
||||
};
|
||||
|
||||
/* One-time callback, it deletes itself */
|
||||
|
||||
/* One-time callback to implement event_base_once: invokes the user callback,
|
||||
* then deletes the allocated storage */
|
||||
static void
|
||||
event_once_cb(evutil_socket_t fd, short events, void *arg)
|
||||
{
|
||||
@ -1744,6 +1799,10 @@ event_add(struct event *ev, const struct timeval *tv)
|
||||
return (res);
|
||||
}
|
||||
|
||||
/* Helper callback: wake an event_base from another thread. This version
|
||||
* works by writing a byte to one end of a socketpair, so that the event_base
|
||||
* listening on the other end will wake up as the corresponding event
|
||||
* triggers */
|
||||
static int
|
||||
evthread_notify_base_default(struct event_base *base)
|
||||
{
|
||||
@ -1759,6 +1818,8 @@ evthread_notify_base_default(struct event_base *base)
|
||||
}
|
||||
|
||||
#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
|
||||
/* Helper callback: wake an event_base from another thread. This version
|
||||
* assumes that you have a working eventfd() implementation. */
|
||||
static int
|
||||
evthread_notify_base_eventfd(struct event_base *base)
|
||||
{
|
||||
@ -1772,6 +1833,9 @@ evthread_notify_base_eventfd(struct event_base *base)
|
||||
}
|
||||
#endif
|
||||
|
||||
/** Tell the thread currently running the event_loop for base (if any) that it
|
||||
* needs to stop waiting in its dispatch function (if it is) and process all
|
||||
* active events and deferred callbacks (if there are any). */
|
||||
static int
|
||||
evthread_notify_base(struct event_base *base)
|
||||
{
|
||||
@ -1780,6 +1844,10 @@ evthread_notify_base(struct event_base *base)
|
||||
return base->th_notify_fn(base);
|
||||
}
|
||||
|
||||
/* Implementation function to add an event. Works just like event_add,
|
||||
* except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
|
||||
* we treat tv as an absolute time, not as an interval to add to the current
|
||||
* time */
|
||||
static inline int
|
||||
event_add_internal(struct event *ev, const struct timeval *tv,
|
||||
int tv_is_absolute)
|
||||
@ -2089,6 +2157,8 @@ event_deferred_cb_schedule(struct deferred_cb_queue *queue,
|
||||
cb->queued = 1;
|
||||
TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
|
||||
++queue->active_count;
|
||||
/* XXXX Can we get away with doing this only when adding
|
||||
* the first active deferred_cb to the queue? */
|
||||
if (queue->notify_fn)
|
||||
queue->notify_fn(queue, queue->notify_arg);
|
||||
}
|
||||
@ -2133,11 +2203,11 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Determines if the time is running backwards by comparing the current
|
||||
* time against the last time we checked. Not needed when using clock
|
||||
* monotonic.
|
||||
* Determines if the time is running backwards by comparing the current time
|
||||
* against the last time we checked. Not needed when using clock monotonic.
|
||||
* If time is running backwards, we adjust the firing time of every event by
|
||||
* the amount that time seems to have jumped.
|
||||
*/
|
||||
|
||||
static void
|
||||
timeout_correct(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
@ -2164,7 +2234,7 @@ timeout_correct(struct event_base *base, struct timeval *tv)
|
||||
|
||||
/*
|
||||
* We can modify the key element of the node without destroying
|
||||
* the key, because we apply it to all in the right order.
|
||||
* the minheap property, because we change every element.
|
||||
*/
|
||||
pev = base->timeheap.p;
|
||||
size = base->timeheap.n;
|
||||
@ -2190,6 +2260,7 @@ timeout_correct(struct event_base *base, struct timeval *tv)
|
||||
base->event_tv = *tv;
|
||||
}
|
||||
|
||||
/* Activate every event whose timeout has elapsed. */
|
||||
static void
|
||||
timeout_process(struct event_base *base)
|
||||
{
|
||||
@ -2216,6 +2287,7 @@ timeout_process(struct event_base *base)
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove 'ev' from 'queue' (EVLIST_...) in base. */
|
||||
static void
|
||||
event_queue_remove(struct event_base *base, struct event *ev, int queue)
|
||||
{
|
||||
@ -2255,11 +2327,19 @@ event_queue_remove(struct event_base *base, struct event *ev, int queue)
|
||||
}
|
||||
}
|
||||
|
||||
/* Add 'ev' to the common timeout list in 'ev'. */
|
||||
static void
|
||||
insert_common_timeout_inorder(struct common_timeout_list *ctl,
|
||||
struct event *ev)
|
||||
{
|
||||
struct event *e;
|
||||
/* By all logic, we should just be able to append 'ev' to the end of
|
||||
* ctl->events, since the timeout on each 'ev' is set to {the common
|
||||
* timeout} + {the time when we add the event}, and so the events
|
||||
* should arrive in order of their timeeouts. But just in case
|
||||
* there's some wacky threading issue going on, we do a search from
|
||||
* the end of 'ev' to find the right insertion point.
|
||||
*/
|
||||
TAILQ_FOREACH_REVERSE(e, &ctl->events,
|
||||
ev_timeout_pos.ev_next_with_common_timeout, event_list) {
|
||||
/* This timercmp is a little sneaky, since both ev and e have
|
||||
|
9
evutil.c
9
evutil.c
@ -323,6 +323,7 @@ evutil_strtoll(const char *s, char **endptr, int base)
|
||||
}
|
||||
|
||||
#ifndef _EVENT_HAVE_GETTIMEOFDAY
|
||||
/* No gettimeofday; this muse be windows. */
|
||||
int
|
||||
evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
{
|
||||
@ -331,6 +332,14 @@ evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
|
||||
if (tv == NULL)
|
||||
return -1;
|
||||
|
||||
/* XXXX
|
||||
* _ftime is not the greatest interface here; GetSystemTimeAsFileTime
|
||||
* would give us better resolution, whereas something cobbled together
|
||||
* with GetTickCount could maybe give us monotonic behavior.
|
||||
*
|
||||
* Either way, I think this value might be skewed to ignore the
|
||||
* timezone, and just return local time. That's not so good.
|
||||
*/
|
||||
_ftime(&tb);
|
||||
tv->tv_sec = (long) tb.time;
|
||||
tv->tv_usec = ((int) tb.millitm) * 1000;
|
||||
|
Loading…
x
Reference in New Issue
Block a user