Add support for a "debug mode" to try to catch common errors.

Right now it only catches cases where we aren't initializing events,
or where we are re-initializing events without deleting them first.
These are however shockingly common.
This commit is contained in:
Nick Mathewson 2010-01-22 00:34:37 -05:00
parent 70a4a3ef14
commit cd17c3acd5
4 changed files with 266 additions and 6 deletions

View File

@ -31,7 +31,7 @@ fi
AC_ARG_ENABLE(gcc-warnings, AC_ARG_ENABLE(gcc-warnings,
AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC)) AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC))
AC_ARG_ENABLE(thread-support, AC_ARG_ENABLE(thread-support,
AS_HELP_STRING(--enable-thread-support, enable support for threading), AS_HELP_STRING(--disable-thread-support, disable support for threading),
[], [enable_thread_support=yes]) [], [enable_thread_support=yes])
AC_ARG_ENABLE(malloc-replacement, AC_ARG_ENABLE(malloc-replacement,
AS_HELP_STRING(--disable-malloc-replacement, disable support for replacing the memory mgt functions), AS_HELP_STRING(--disable-malloc-replacement, disable support for replacing the memory mgt functions),
@ -39,6 +39,9 @@ AC_ARG_ENABLE(malloc-replacement,
AC_ARG_ENABLE(openssl, AC_ARG_ENABLE(openssl,
AS_HELP_STRING(--disable-openssl, disable support for openssl encryption), AS_HELP_STRING(--disable-openssl, disable support for openssl encryption),
[], [enable_openssl=yes]) [], [enable_openssl=yes])
AC_ARG_ENABLE(debug-mode,
AS_HELP_STRING(--disable-debug-mode, disable support for running in debug mode),
[], [enable_debug_mode=yes])
AC_PROG_LIBTOOL AC_PROG_LIBTOOL
@ -529,6 +532,12 @@ if test x$enable_malloc_replacement = xno; then
[Define if libevent should not allow replacing the mm functions]) [Define if libevent should not allow replacing the mm functions])
fi fi
# check if we should hard-code debugging out
if test x$enable_debug_mode = xno; then
AC_DEFINE(DISABLE_DEBUG_MODE, 1,
[Define if libevent should build without support for a debug mode])
fi
# check if we have and should use openssl # check if we have and should use openssl
AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"]) AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])

View File

@ -122,6 +122,10 @@ struct event_changelist {
int changes_size; int changes_size;
}; };
#ifndef _EVENT_DISABLE_DEBUG_MODE
extern int _event_debug_mode_on;
#endif
struct event_base { struct event_base {
/** Function pointers and other data to describe this event_base's /** Function pointers and other data to describe this event_base's
* backend. */ * backend. */

223
event.c
View File

@ -66,6 +66,7 @@
#include "evmap-internal.h" #include "evmap-internal.h"
#include "iocp-internal.h" #include "iocp-internal.h"
#include "changelist-internal.h" #include "changelist-internal.h"
#include "ht-internal.h"
#ifdef _EVENT_HAVE_EVENT_PORTS #ifdef _EVENT_HAVE_EVENT_PORTS
extern const struct eventop evportops; extern const struct eventop evportops;
@ -140,6 +141,150 @@ static inline void event_persist_closure(struct event_base *, struct event *ev);
static int evthread_notify_base(struct event_base *base); static int evthread_notify_base(struct event_base *base);
#ifndef _EVENT_DISABLE_DEBUG_MODE
/* These functions implement a hashtable of which 'struct event *' structures
* have been setup or added. We don't want to trust the content of the struct
* event itself, since we're trying to work through cases where an event gets
* clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
*/
struct event_debug_entry {
HT_ENTRY(event_debug_entry) node;
const struct event *ptr;
unsigned added : 1;
};
static inline unsigned
hash_debug_entry(const struct event_debug_entry *e)
{
return ((unsigned)e->ptr) >> 3;
}
static inline int
eq_debug_entry(const struct event_debug_entry *a,
const struct event_debug_entry *b)
{
return a->ptr == b->ptr;
}
int _event_debug_mode_on = 0;
static void *_event_debug_map_lock = NULL;
static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
HT_INITIALIZER();
HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
eq_debug_entry);
HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free);
#define _event_debug_note_setup(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
} else { \
dent = mm_malloc(sizeof(*dent)); \
if (!dent) \
event_err(1, \
"Out of memory in debugging code"); \
dent->ptr = (ev); \
dent->added = 0; \
HT_INSERT(event_debug_map, &global_debug_map, dent); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
#define _event_debug_note_teardown(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
if (dent) \
mm_free(dent); \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while(0)
#define _event_debug_note_add(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 1; \
} else { \
event_errx(_EVENT_ERR_ABORT, \
"%s: noting an add on a non-setup event %p", \
__func__, (ev)); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while(0)
#define _event_debug_note_del(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent) { \
dent->added = 0; \
} else { \
event_errx(_EVENT_ERR_ABORT, \
"%s: noting a del on a non-setup event %p", \
__func__, (ev)); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while(0)
#define _event_debug_assert_is_setup(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (!dent) { \
event_errx(_EVENT_ERR_ABORT, \
"%s called on a non-initialized event %p", \
__func__, (ev)); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
#define _event_debug_assert_not_added(ev) do { \
if (_event_debug_mode_on) { \
struct event_debug_entry *dent,find; \
find.ptr = (ev); \
EVLOCK_LOCK(_event_debug_map_lock, 0); \
dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
if (dent && dent->added) { \
event_errx(_EVENT_ERR_ABORT, \
"%s called on an already added event %p", \
__func__, (ev)); \
} \
EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
} \
} while (0)
#else
#define _event_debug_note_setup(ev) \
((void)0)
#define _event_debug_note_teardown(ev) \
((void)0)
#define _event_debug_note_add(ev) \
((void)0)
#define _event_debug_note_del(ev) \
((void)0)
#define _event_debug_assert_is_setup(ev) \
((void)0)
#define _event_debug_assert_not_added(ev) \
((void)0)
#endif
static void static void
detect_monotonic(void) detect_monotonic(void)
{ {
@ -291,6 +436,38 @@ event_base_get_deferred_cb_queue(struct event_base *base)
return base ? &base->defer_queue : NULL; return base ? &base->defer_queue : NULL;
} }
void
event_enable_debug_mode(void)
{
#ifndef _EVENT_DISABLE_DEBUG_MODE
if (_event_debug_mode_on)
event_errx(1, "%s was called twice!", __func__);
_event_debug_mode_on = 1;
HT_INIT(event_debug_map, &global_debug_map);
EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
#endif
}
#if 0
void
event_disable_debug_mode(void)
{
struct event_debug_entry **ent, *victim;
EVLOCK_LOCK(_event_debug_map_lock, 0);
for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
victim = *ent;
ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
mm_free(victim);
}
HT_CLEAR(event_debug_map, &global_debug_map);
EVLOCK_UNLOCK(_event_debug_map_lock , 0);
}
#endif
struct event_base * struct event_base *
event_base_new_with_config(struct event_config *cfg) event_base_new_with_config(struct event_config *cfg)
{ {
@ -298,6 +475,10 @@ event_base_new_with_config(struct event_config *cfg)
struct event_base *base; struct event_base *base;
int should_check_environment; int should_check_environment;
if (_event_debug_mode_on && !_event_debug_map_lock) {
EVTHREAD_ALLOC_LOCK(_event_debug_map_lock, 0);
}
if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) { if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
event_warn("%s: calloc", __func__); event_warn("%s: calloc", __func__);
return NULL; return NULL;
@ -619,7 +800,6 @@ event_config_free(struct event_config *cfg)
mm_free(cfg); mm_free(cfg);
} }
int int
event_config_set_flag(struct event_config *cfg, int flag) event_config_set_flag(struct event_config *cfg, int flag)
{ {
@ -1282,6 +1462,9 @@ event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, shor
{ {
if (!base) if (!base)
base = current_base; base = current_base;
_event_debug_assert_not_added(ev);
ev->ev_base = base; ev->ev_base = base;
ev->ev_callback = callback; ev->ev_callback = callback;
@ -1315,6 +1498,9 @@ event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, shor
/* by default, we put new events into the middle priority */ /* by default, we put new events into the middle priority */
ev->ev_pri = base->nactivequeues / 2; ev->ev_pri = base->nactivequeues / 2;
} }
_event_debug_note_setup(ev);
return 0; return 0;
} }
@ -1325,6 +1511,8 @@ event_base_set(struct event_base *base, struct event *ev)
if (ev->ev_flags != EVLIST_INIT) if (ev->ev_flags != EVLIST_INIT)
return (-1); return (-1);
_event_debug_assert_is_setup(ev);
ev->ev_base = base; ev->ev_base = base;
ev->ev_pri = base->nactivequeues/2; ev->ev_pri = base->nactivequeues/2;
@ -1358,9 +1546,22 @@ event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(
void void
event_free(struct event *ev) event_free(struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
/* make sure that this event won't be coming back to haunt us. */ /* make sure that this event won't be coming back to haunt us. */
event_del(ev); event_del(ev);
_event_debug_note_teardown(ev);
mm_free(ev); mm_free(ev);
}
void
event_debug_unassign(struct event *ev)
{
_event_debug_assert_not_added(ev);
_event_debug_note_teardown(ev);
ev->ev_flags &= ~EVLIST_INIT;
} }
/* /*
@ -1371,6 +1572,8 @@ event_free(struct event *ev)
int int
event_priority_set(struct event *ev, int pri) event_priority_set(struct event *ev, int pri)
{ {
_event_debug_assert_is_setup(ev);
if (ev->ev_flags & EVLIST_ACTIVE) if (ev->ev_flags & EVLIST_ACTIVE)
return (-1); return (-1);
if (pri < 0 || pri >= ev->ev_base->nactivequeues) if (pri < 0 || pri >= ev->ev_base->nactivequeues)
@ -1391,6 +1594,8 @@ event_pending(struct event *ev, short event, struct timeval *tv)
struct timeval now, res; struct timeval now, res;
int flags = 0; int flags = 0;
_event_debug_assert_is_setup(ev);
if (ev->ev_flags & EVLIST_INSERTED) if (ev->ev_flags & EVLIST_INSERTED)
flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
if (ev->ev_flags & EVLIST_ACTIVE) if (ev->ev_flags & EVLIST_ACTIVE)
@ -1430,6 +1635,8 @@ _event_initialized(struct event *ev, int need_fd)
void void
event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out) event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
{ {
_event_debug_assert_is_setup(event);
if (base_out) if (base_out)
*base_out = event->ev_base; *base_out = event->ev_base;
if (fd_out) if (fd_out)
@ -1451,30 +1658,35 @@ event_get_struct_event_size(void)
evutil_socket_t evutil_socket_t
event_get_fd(const struct event *ev) event_get_fd(const struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
return ev->ev_fd; return ev->ev_fd;
} }
struct event_base * struct event_base *
event_get_base(const struct event *ev) event_get_base(const struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
return ev->ev_base; return ev->ev_base;
} }
short short
event_get_events(const struct event *ev) event_get_events(const struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
return ev->ev_events; return ev->ev_events;
} }
event_callback_fn event_callback_fn
event_get_callback(const struct event *ev) event_get_callback(const struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
return ev->ev_callback; return ev->ev_callback;
} }
void * void *
event_get_callback_arg(const struct event *ev) event_get_callback_arg(const struct event *ev)
{ {
_event_debug_assert_is_setup(ev);
return ev->ev_arg; return ev->ev_arg;
} }
@ -1536,6 +1748,8 @@ event_add_internal(struct event *ev, const struct timeval *tv,
int res = 0; int res = 0;
int notify = 0; int notify = 0;
_event_debug_assert_is_setup(ev);
event_debug(( event_debug((
"event_add: event: %p, %s%s%scall %p", "event_add: event: %p, %s%s%scall %p",
ev, ev,
@ -1619,7 +1833,6 @@ event_add_internal(struct event *ev, const struct timeval *tv,
gettime(base, &now); gettime(base, &now);
common_timeout = is_common_timeout(tv, base); common_timeout = is_common_timeout(tv, base);
if (tv_is_absolute) { if (tv_is_absolute) {
ev->ev_timeout = *tv; ev->ev_timeout = *tv;
@ -1658,6 +1871,8 @@ event_add_internal(struct event *ev, const struct timeval *tv,
if (res != -1 && notify && !EVBASE_IN_THREAD(base)) if (res != -1 && notify && !EVBASE_IN_THREAD(base))
evthread_notify_base(base); evthread_notify_base(base);
_event_debug_note_add(ev);
return (res); return (res);
} }
@ -1744,6 +1959,8 @@ event_del_internal(struct event *ev)
if (need_cur_lock) if (need_cur_lock)
EVBASE_RELEASE_LOCK(base, current_event_lock); EVBASE_RELEASE_LOCK(base, current_event_lock);
_event_debug_note_del(ev);
return (res); return (res);
} }
@ -1752,6 +1969,8 @@ event_active(struct event *ev, int res, short ncalls)
{ {
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
_event_debug_assert_is_setup(ev);
event_active_nolock(ev, res, ncalls); event_active_nolock(ev, res, ncalls);
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);

View File

@ -60,6 +60,34 @@ struct event_base;
struct event; struct event;
struct event_config; struct event_config;
/** Enable some relatively expensive debugging checks in Libevent that would
* normally be turned off. Generally, these cause code that would otherwise
* crash mysteriously to fail earlier with an assertion failure. Note that
* this method MUST be called before any events or event_bases have been
* created.
*
* Debug mode can currently catch the following errors:
* An event is re-assigned while it is added
* Any function is called on a non-assigned event
*
* Note that debugging mode uses memory to track every event that has been
* initialized (via event_assign, event_set, or event_new) but not yet
* released (via event_free or event_debug_unassign). If you want to use
* debug mode, and you find yourself running out of memory, you will need
* to use event_debug_unassign to explicitly stop tracking events that
* are no longer considered set-up.
*/
void event_enable_debug_mode(void);
/**
* When debugging mode is enabled, informs Libevent that an event should no
* longer be considered as assigned. When debugging mode is not enabled, does
* nothing.
*
* This function must only be called on a non-added event.
*/
void event_debug_unassign(struct event *);
/** /**
Initialize the event API. Initialize the event API.
@ -171,7 +199,7 @@ enum event_base_config_flag {
/** Instead of checking the current time every time the event loop is /** Instead of checking the current time every time the event loop is
ready to run timeout callbacks, check after each timeout callback. ready to run timeout callbacks, check after each timeout callback.
*/ */
EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08 EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08,
}; };
/** /**
@ -202,8 +230,8 @@ int event_base_get_features(struct event_base *base);
*/ */
int event_config_require_features(struct event_config *cfg, int feature); int event_config_require_features(struct event_config *cfg, int feature);
/** Sets a flag to configure what parts of the eventual event_base will /** Sets one or more flags to configure what parts of the eventual event_base
* be initialized, and how they'll work. */ * will be initialized, and how they'll work. */
int event_config_set_flag(struct event_config *cfg, int flag); int event_config_set_flag(struct event_config *cfg, int flag);
/** /**