mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-19 01:04:58 -04:00
Refactor monotonic timer handling into a new type and set of functions; add a gettimeofday-based ratcheting implementation
Now, event.c can always assume that we have a monotonic timer; this makes event.c easier to write.
This commit is contained in:
parent
71bca50f12
commit
f5e4eb05e5
@ -36,10 +36,6 @@ extern "C" {
|
||||
|
||||
#include <time.h>
|
||||
#include <sys/queue.h>
|
||||
#ifdef EVENT__HAVE_MACH_MACH_TIME_H
|
||||
/* For mach_timebase_info */
|
||||
#include <mach/mach_time.h>
|
||||
#endif
|
||||
#include "event2/event_struct.h"
|
||||
#include "minheap-internal.h"
|
||||
#include "evsignal-internal.h"
|
||||
@ -62,16 +58,6 @@ extern "C" {
|
||||
#define EV_CLOSURE_SIGNAL 1
|
||||
#define EV_CLOSURE_PERSIST 2
|
||||
|
||||
/* Define HAVE_ANY_MONOTONIC iff we *might* have a working monotonic
|
||||
* clock implementation */
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#elif defined(_WIN32)
|
||||
#define HAVE_ANY_MONOTONIC 1
|
||||
#endif
|
||||
|
||||
/** Structure to define the backend of a given event_base. */
|
||||
struct eventop {
|
||||
/** The name of this backend. */
|
||||
@ -247,9 +233,6 @@ struct event_base {
|
||||
/** Mapping from signal numbers to enabled (added) events. */
|
||||
struct event_signal_map sigmap;
|
||||
|
||||
/** Stored timeval; used to detect when time is running backwards. */
|
||||
struct timeval event_tv;
|
||||
|
||||
/** Priority queue of events with timeouts. */
|
||||
struct min_heap timeheap;
|
||||
|
||||
@ -257,27 +240,13 @@ struct event_base {
|
||||
* too often. */
|
||||
struct timeval tv_cache;
|
||||
|
||||
#if defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
struct mach_timebase_info mach_timebase_units;
|
||||
#endif
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) && defined(CLOCK_MONOTONIC_COARSE)
|
||||
#define CLOCK_IS_SELECTED
|
||||
int monotonic_clock;
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
DWORD last_tick_count;
|
||||
struct timeval adjust_tick_count;
|
||||
#endif
|
||||
#if defined(HAVE_ANY_MONOTONIC)
|
||||
/** True iff we should use our system's monotonic time implementation */
|
||||
/* TODO: Support systems where we don't need to detct monotonic time */
|
||||
int use_monotonic;
|
||||
struct evutil_monotonic_timer monotonic_timer;
|
||||
|
||||
/** Difference between internal time (maybe from clock_gettime) and
|
||||
* gettimeofday. */
|
||||
struct timeval tv_clock_diff;
|
||||
/** Second in which we last updated tv_clock_diff, in monotonic time. */
|
||||
time_t last_updated_clock_diff;
|
||||
#endif
|
||||
|
||||
#ifndef EVENT__DISABLE_THREAD_SUPPORT
|
||||
/* threading support */
|
||||
@ -413,4 +382,3 @@ int event_base_foreach_event_(struct event_base *base,
|
||||
#endif
|
||||
|
||||
#endif /* EVENT_INTERNAL_H_INCLUDED_ */
|
||||
|
||||
|
189
event.c
189
event.c
@ -152,7 +152,6 @@ static int event_process_active(struct event_base *);
|
||||
|
||||
static int timeout_next(struct event_base *, struct timeval **);
|
||||
static void timeout_process(struct event_base *);
|
||||
static void timeout_correct(struct event_base *, struct timeval *);
|
||||
|
||||
static inline void event_signal_closure(struct event_base *, struct event *ev);
|
||||
static inline void event_persist_closure(struct event_base *, struct event *ev);
|
||||
@ -338,48 +337,6 @@ HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
|
||||
#define EVENT_BASE_ASSERT_LOCKED(base) \
|
||||
EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
|
||||
|
||||
/* Set base->use_monotonic to 1 if we have a clock function that supports
|
||||
* monotonic time */
|
||||
static void
|
||||
detect_monotonic(struct event_base *base, const struct event_config *cfg)
|
||||
{
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
{
|
||||
/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.
|
||||
* You need to check for it at runtime, because some older
|
||||
* versions won't have it working. */
|
||||
struct timespec ts;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
|
||||
base->use_monotonic = 1;
|
||||
#ifdef CLOCK_IS_SELECTED
|
||||
base->monotonic_clock = CLOCK_MONOTONIC;
|
||||
if (cfg == NULL ||
|
||||
!(cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER)) {
|
||||
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0)
|
||||
base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
{
|
||||
struct mach_timebase_info mi;
|
||||
/* OSX has mach_absolute_time() */
|
||||
if (mach_timebase_info(&mi) == 0 && mach_absolute_time() != 0) {
|
||||
base->use_monotonic = 1;
|
||||
/* mach_timebase_info tells us how to convert
|
||||
* mach_absolute_time() into nanoseconds, but we
|
||||
* want to use microseconds instead. */
|
||||
mi.denom *= 1000;
|
||||
memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
base->use_monotonic = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* How often (in seconds) do we check for changes in wall clock time relative
|
||||
* to monotonic time? Set this to -1 for 'never.' */
|
||||
#define CLOCK_SYNC_INTERVAL 5
|
||||
@ -399,60 +356,19 @@ gettime(struct event_base *base, struct timeval *tp)
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
if (base->use_monotonic) {
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
struct timespec ts;
|
||||
#ifdef CLOCK_IS_SELECTED
|
||||
if (clock_gettime(base->monotonic_clock, &ts) == -1)
|
||||
return (-1);
|
||||
#else
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
|
||||
return (-1);
|
||||
#endif
|
||||
|
||||
tp->tv_sec = ts.tv_sec;
|
||||
tp->tv_usec = ts.tv_nsec / 1000;
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
uint64_t abstime = mach_absolute_time();
|
||||
uint64_t usec;
|
||||
usec = (abstime * base->mach_timebase_units.numer)
|
||||
/ (base->mach_timebase_units.denom);
|
||||
tp->tv_sec = usec / 1000000;
|
||||
tp->tv_usec = usec % 1000000;
|
||||
#elif defined(_WIN32)
|
||||
/* TODO: Support GetTickCount64. */
|
||||
/* TODO: Support alternate timer backends if the user asked
|
||||
* for a high-precision timer. QueryPerformanceCounter is
|
||||
* possibly a good idea, but it is also supposed to have
|
||||
* reliability issues under various circumstances. */
|
||||
DWORD ticks = GetTickCount();
|
||||
if (ticks < base->last_tick_count) {
|
||||
/* The 32-bit timer rolled over. Let's assume it only
|
||||
* happened once. Add 2**32 msec to adjust_tick_count. */
|
||||
const struct timeval tv_rollover = { 4294967, 296000 };
|
||||
evutil_timeradd(&tv_rollover, &base->adjust_tick_count, &base->adjust_tick_count);
|
||||
}
|
||||
base->last_tick_count = ticks;
|
||||
tp->tv_sec = ticks / 1000;
|
||||
tp->tv_usec = (ticks % 1000) * 1000;
|
||||
evutil_timeradd(tp, &base->adjust_tick_count, tp);
|
||||
#else
|
||||
#error "Missing monotonic time implementation."
|
||||
#endif
|
||||
if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
|
||||
< tp->tv_sec) {
|
||||
struct timeval tv;
|
||||
evutil_gettimeofday(&tv,NULL);
|
||||
evutil_timersub(&tv, tp, &base->tv_clock_diff);
|
||||
base->last_updated_clock_diff = tp->tv_sec;
|
||||
}
|
||||
|
||||
return (0);
|
||||
if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
return (evutil_gettimeofday(tp, NULL));
|
||||
if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
|
||||
< tp->tv_sec) {
|
||||
struct timeval tv;
|
||||
evutil_gettimeofday(&tv,NULL);
|
||||
evutil_timersub(&tv, tp, &base->tv_clock_diff);
|
||||
base->last_updated_clock_diff = tp->tv_sec;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
@ -469,11 +385,7 @@ event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
|
||||
if (base->tv_cache.tv_sec == 0) {
|
||||
r = evutil_gettimeofday(tv, NULL);
|
||||
} else {
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
|
||||
#else
|
||||
*tv = base->tv_cache;
|
||||
#endif
|
||||
r = 0;
|
||||
}
|
||||
EVBASE_RELEASE_LOCK(base, th_base_lock);
|
||||
@ -647,8 +559,12 @@ event_base_new_with_config(const struct event_config *cfg)
|
||||
event_warn("%s: calloc", __func__);
|
||||
return NULL;
|
||||
}
|
||||
detect_monotonic(base, cfg);
|
||||
gettime(base, &base->event_tv);
|
||||
evutil_configure_monotonic_time_(&base->monotonic_timer,
|
||||
cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER));
|
||||
{
|
||||
struct timeval tmp;
|
||||
gettime(base, &tmp);
|
||||
}
|
||||
|
||||
min_heap_ctor_(&base->timeheap);
|
||||
|
||||
@ -1771,8 +1687,6 @@ event_base_loop(struct event_base *base, int flags)
|
||||
break;
|
||||
}
|
||||
|
||||
timeout_correct(base, &tv);
|
||||
|
||||
tv_p = &tv;
|
||||
if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
|
||||
timeout_next(base, &tv_p);
|
||||
@ -1792,9 +1706,6 @@ event_base_loop(struct event_base *base, int flags)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* update last old time */
|
||||
gettime(base, &base->event_tv);
|
||||
|
||||
clear_time_cache(base);
|
||||
|
||||
res = evsel->dispatch(base, tv_p);
|
||||
@ -2080,12 +1991,8 @@ event_pending(const struct event *ev, short event, struct timeval *tv)
|
||||
if (tv != NULL && (flags & event & EV_TIMEOUT)) {
|
||||
struct timeval tmp = ev->ev_timeout;
|
||||
tmp.tv_usec &= MICROSECONDS_MASK;
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
/* correctly remamp to real time */
|
||||
evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
|
||||
#else
|
||||
*tv = tmp;
|
||||
#endif
|
||||
}
|
||||
|
||||
return (flags & event);
|
||||
@ -2627,66 +2534,6 @@ out:
|
||||
return (res);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determines if the time is running backwards by comparing the current time
|
||||
* against the last time we checked. Not needed when using clock monotonic.
|
||||
* If time is running backwards, we adjust the firing time of every event by
|
||||
* the amount that time seems to have jumped.
|
||||
*/
|
||||
static void
|
||||
timeout_correct(struct event_base *base, struct timeval *tv)
|
||||
{
|
||||
/* Caller must hold th_base_lock. */
|
||||
struct event **pev;
|
||||
unsigned int size;
|
||||
struct timeval off;
|
||||
int i;
|
||||
|
||||
#ifdef HAVE_ANY_MONOTONIC
|
||||
if (base->use_monotonic)
|
||||
return;
|
||||
#endif
|
||||
|
||||
/* Check if time is running backwards */
|
||||
gettime(base, tv);
|
||||
|
||||
if (evutil_timercmp(tv, &base->event_tv, >=)) {
|
||||
base->event_tv = *tv;
|
||||
return;
|
||||
}
|
||||
|
||||
event_debug(("%s: time is running backwards, corrected",
|
||||
__func__));
|
||||
evutil_timersub(&base->event_tv, tv, &off);
|
||||
|
||||
/*
|
||||
* We can modify the key element of the node without destroying
|
||||
* the minheap property, because we change every element.
|
||||
*/
|
||||
pev = base->timeheap.p;
|
||||
size = base->timeheap.n;
|
||||
for (; size-- > 0; ++pev) {
|
||||
struct timeval *ev_tv = &(**pev).ev_timeout;
|
||||
evutil_timersub(ev_tv, &off, ev_tv);
|
||||
}
|
||||
for (i=0; i<base->n_common_timeouts; ++i) {
|
||||
struct event *ev;
|
||||
struct common_timeout_list *ctl =
|
||||
base->common_timeout_queues[i];
|
||||
TAILQ_FOREACH(ev, &ctl->events,
|
||||
ev_timeout_pos.ev_next_with_common_timeout) {
|
||||
struct timeval *ev_tv = &ev->ev_timeout;
|
||||
ev_tv->tv_usec &= MICROSECONDS_MASK;
|
||||
evutil_timersub(ev_tv, &off, ev_tv);
|
||||
ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
|
||||
(i<<COMMON_TIMEOUT_IDX_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now remember what the new time turned out to be. */
|
||||
base->event_tv = *tv;
|
||||
}
|
||||
|
||||
/* Activate every event whose timeout has elapsed. */
|
||||
static void
|
||||
timeout_process(struct event_base *base)
|
||||
@ -3203,9 +3050,7 @@ dump_inserted_event_fn(struct event_base *base, struct event *e, void *arg)
|
||||
struct timeval tv;
|
||||
tv.tv_sec = e->ev_timeout.tv_sec;
|
||||
tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
|
||||
#if defined(HAVE_ANY_MONOTONIC)
|
||||
evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
|
||||
#endif
|
||||
fprintf(output, " Timeout=%ld.%06d",
|
||||
(long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
|
||||
}
|
||||
|
235
evutil_time.c
235
evutil_time.c
@ -49,6 +49,7 @@
|
||||
#endif
|
||||
#include <time.h>
|
||||
#include <sys/stat.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "event2/util.h"
|
||||
#include "util-internal.h"
|
||||
@ -131,3 +132,237 @@ evutil_usleep_(const struct timeval *tv)
|
||||
select(0, NULL, NULL, NULL, tv);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
This function assumes it's called repeatedly with a
|
||||
not-actually-so-monotonic time source whose outputs are in 'tv'. It
|
||||
implements a trivial ratcheting mechanism so that the values never go
|
||||
backwards.
|
||||
*/
|
||||
static void
|
||||
adjust_monotonic_time(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tv)
|
||||
{
|
||||
evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
|
||||
|
||||
if (evutil_timercmp(tv, &base->last_time, <)) {
|
||||
/* Guess it wasn't monotonic after all. */
|
||||
struct timeval adjust;
|
||||
evutil_timersub(&base->last_time, tv, &adjust);
|
||||
evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
|
||||
&base->adjust_monotonic_clock);
|
||||
*tv = base->last_time;
|
||||
}
|
||||
base->last_time = *tv;
|
||||
}
|
||||
|
||||
#if defined(HAVE_POSIX_MONOTONIC)
|
||||
/* =====
|
||||
The POSIX clock_gettime() interface provides a few ways to get at a
|
||||
monotonic clock. CLOCK_MONOTONIC is most widely supported. Linux also
|
||||
provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
|
||||
|
||||
On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
|
||||
Platforms don't agree about whether it should jump on a sleep/resume.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int precise)
|
||||
{
|
||||
/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris. You need to
|
||||
* check for it at runtime, because some older kernel versions won't
|
||||
* have it working. */
|
||||
struct timespec ts;
|
||||
#ifdef CLOCK_MONOTONIC_COARSE
|
||||
#if CLOCK_MONOTONIC_COARSE < 0
|
||||
/* Technically speaking, nothing keeps CLOCK_* from being negative (as
|
||||
* far as I know). This check and the one below make sure that it's
|
||||
* safe for us to use -1 as an "unset" value. */
|
||||
#error "I didn't expect CLOCK_MONOTONIC_COARSE to be < 0"
|
||||
#endif
|
||||
if (! precise) {
|
||||
if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
|
||||
base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
|
||||
base->monotonic_clock = CLOCK_MONOTONIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if CLOCK_MONOTONIC < 0
|
||||
#error "I didn't expect CLOCK_MONOTONIC to be < 0"
|
||||
#endif
|
||||
|
||||
base->monotonic_clock = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
if (base->monotonic_clock < 0) {
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (clock_gettime(base->monotonic_clock, &ts) == -1)
|
||||
return -1;
|
||||
tp->tv_sec = ts.tv_sec;
|
||||
tp->tv_usec = ts.tv_nsec / 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_MACH_MONOTONIC)
|
||||
/* ======
|
||||
Apple is a little late to the POSIX party. And why not? Instead of
|
||||
clock_gettime(), they provide mach_absolute_time(). Its units are not
|
||||
fixed; we need to use mach_timebase_info() to get the right functions to
|
||||
convert its units into nanoseconds.
|
||||
|
||||
To all appearances, mach_absolute_time() seems to be honest-to-goodness
|
||||
monotonic. Whether it stops during sleep or not is unspecified in
|
||||
principle, and dependent on CPU architecture in practice.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int precise)
|
||||
{
|
||||
struct mach_timebase_info mi;
|
||||
memset(base, 0, sizeof(*base));
|
||||
/* OSX has mach_absolute_time() */
|
||||
if (mach_timebase_info(&mi) == 0 && mach_absolute_time() != 0) {
|
||||
/* mach_timebase_info tells us how to convert
|
||||
* mach_absolute_time() into nanoseconds, but we
|
||||
* want to use microseconds instead. */
|
||||
mi.denom *= 1000;
|
||||
memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
|
||||
} else {
|
||||
base->mach_timebase_units.numer = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
ev_uint64_t abstime, usec;
|
||||
if (base->mach_timebase_units.numer == 0) {
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
abstime = mach_absolute_time();
|
||||
usec = (abstime * base->mach_timebase_units.numer)
|
||||
/ (base->mach_timebase_units.denom);
|
||||
tp->tv_sec = usec / 1000000;
|
||||
tp->tv_usec = usec % 1000000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_WIN32_MONOTONIC)
|
||||
/* =====
|
||||
Turn we now to Windows. Want monontonic time on Windows?
|
||||
|
||||
Windows has QueryPerformanceCounter(), which gives time most high-
|
||||
resolution time. It's a pity it's not so monotonic in practice; it's
|
||||
also got some fun bugs, especially with older Windowses, under
|
||||
virtualizations, with funny hardware, on multiprocessor systems, and so
|
||||
on. PEP418 [1] has a nice roundup here.
|
||||
|
||||
There's GetTickCount64(), which gives a number of 1-msec ticks since
|
||||
startup. The accuracy here might be as bad as 10-20 msec, I hear.
|
||||
There's an undocumented function (NtSetTimerResolution) that allegedly
|
||||
increases the accuracy. Good luck!
|
||||
|
||||
There's also GetTickCount(), which is only 32 bits, but seems to be
|
||||
supported on pre-Vista versions of Windows.
|
||||
|
||||
The less said about timeGetTime() the better.
|
||||
|
||||
"We don't care. We don't have to. We're the Phone Company."
|
||||
-- Lily Tomlin, SNL
|
||||
|
||||
|
||||
[1] http://www.python.org/dev/peps/pep-0418
|
||||
*/
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int precise)
|
||||
{
|
||||
memset(base, 0, sizeof(*base));
|
||||
base->last_tick_count = GetTickCount();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
/* TODO: Support GetTickCount64. */
|
||||
/* TODO: Support alternate timer backends if the user asked
|
||||
* for a high-precision timer. QueryPerformanceCounter is
|
||||
* possibly a good idea, but it is also supposed to have
|
||||
* reliability issues under various circumstances. */
|
||||
DWORD ticks = GetTickCount();
|
||||
if (ticks < base->last_tick_count) {
|
||||
/* The 32-bit timer rolled over. Let's assume it only
|
||||
* happened once. Add 2**32 msec to adjust_tick_count. */
|
||||
const struct timeval tv_rollover = { 4294967, 296000 };
|
||||
evutil_timeradd(&tv_rollover, &base->adjust_tick_count, &base->adjust_tick_count);
|
||||
}
|
||||
base->last_tick_count = ticks;
|
||||
tp->tv_sec = ticks / 1000;
|
||||
tp->tv_usec = (ticks % 1000) * 1000;
|
||||
evutil_timeradd(tp, &base->adjust_tick_count, tp);
|
||||
|
||||
adjust_monotonic_time(base, tp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_FALLBACK_MONOTONIC)
|
||||
/* =====
|
||||
And if none of the other options work, let's just use gettimeofday(), and
|
||||
ratchet it forward so that it acts like a monotonic timer, whether it
|
||||
wants to or not.
|
||||
*/
|
||||
|
||||
int
|
||||
evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
|
||||
int precise)
|
||||
{
|
||||
memset(base, 0, sizeof(*base));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
|
||||
struct timeval *tp)
|
||||
{
|
||||
if (evutil_gettimeofday(tp, NULL) < 0)
|
||||
return -1;
|
||||
adjust_monotonic_time(base, tp);
|
||||
return 0;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
@ -29,15 +29,57 @@
|
||||
|
||||
#include "event2/event-config.h"
|
||||
#include "evconfig-private.h"
|
||||
|
||||
#ifdef EVENT__HAVE_MACH_MACH_TIME_H
|
||||
/* For mach_timebase_info */
|
||||
#include <mach/mach_time.h>
|
||||
#endif
|
||||
|
||||
#include <time.h>
|
||||
|
||||
#include "event2/util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(EVENT__HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
#define HAVE_POSIX_MONOTONIC
|
||||
#elif defined(EVENT__HAVE_MACH_ABSOLUTE_TIME)
|
||||
#define HAVE_MACH_MONOTONIC
|
||||
#elif defined(_WIN32)
|
||||
#define HAVE_WIN32_MONOTONIC
|
||||
#else
|
||||
#define HAVE_FALLBACK_MONOTONIC
|
||||
#endif
|
||||
|
||||
long evutil_tv_to_msec_(const struct timeval *tv);
|
||||
void evutil_usleep_(const struct timeval *tv);
|
||||
|
||||
struct evutil_monotonic_timer {
|
||||
|
||||
#ifdef HAVE_MACH_MONOTONIC
|
||||
struct mach_timebase_info mach_timebase_units;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_POSIX_MONOTONIC
|
||||
int monotonic_clock;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_WIN32_MONOTONIC
|
||||
DWORD last_tick_count;
|
||||
struct timeval adjust_tick_count;
|
||||
#endif
|
||||
|
||||
struct timeval adjust_monotonic_clock;
|
||||
struct timeval last_time;
|
||||
};
|
||||
|
||||
int evutil_configure_monotonic_time_(struct evutil_monotonic_timer *mt,
|
||||
int precise);
|
||||
int evutil_gettime_monotonic_(struct evutil_monotonic_timer *mt, struct timeval *tv);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user