mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-13 14:26:30 -04:00
Use conditions instead of current_event_lock to fix a deadlock.
Avi Bab correctly noted as bug 3044479 the fact that any thread blocking on current_event_lock will do so while holding th_base_lock, making it impossible for the currently running event's callback to call any other functions that require th_base_lock. This patch switches the current_event_lock code to instead use a condition variable that we wait on if we're trying to mess with a currently-executing event, and that we signal when we're done executing a callback if anybody is waiting on it.
This commit is contained in:
parent
d4977b52f0
commit
e0972c2138
@ -216,9 +216,6 @@ struct event_base {
|
|||||||
/** The total size of common_timeout_queues. */
|
/** The total size of common_timeout_queues. */
|
||||||
int n_common_timeouts_allocated;
|
int n_common_timeouts_allocated;
|
||||||
|
|
||||||
/** The event whose callback is executing right now */
|
|
||||||
struct event *current_event;
|
|
||||||
|
|
||||||
/** List of defered_cb that are active. We run these after the active
|
/** List of defered_cb that are active. We run these after the active
|
||||||
* events. */
|
* events. */
|
||||||
struct deferred_cb_queue defer_queue;
|
struct deferred_cb_queue defer_queue;
|
||||||
@ -247,9 +244,13 @@ struct event_base {
|
|||||||
unsigned long th_owner_id;
|
unsigned long th_owner_id;
|
||||||
/** A lock to prevent conflicting accesses to this event_base */
|
/** A lock to prevent conflicting accesses to this event_base */
|
||||||
void *th_base_lock;
|
void *th_base_lock;
|
||||||
/** A lock to prevent event_del from deleting an event while its
|
/** The event whose callback is executing right now */
|
||||||
* callback is executing. */
|
struct event *current_event;
|
||||||
void *current_event_lock;
|
/** A condition that gets signalled when we're done processing an
|
||||||
|
* event with waiters on it. */
|
||||||
|
void *current_event_cond;
|
||||||
|
/** Number of threads blocking on current_event_cond. */
|
||||||
|
int current_event_waiters;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef WIN32
|
#ifdef WIN32
|
||||||
|
58
event.c
58
event.c
@ -604,8 +604,7 @@ event_base_new_with_config(const struct event_config *cfg)
|
|||||||
EVTHREAD_ALLOC_LOCK(base->th_base_lock,
|
EVTHREAD_ALLOC_LOCK(base->th_base_lock,
|
||||||
EVTHREAD_LOCKTYPE_RECURSIVE);
|
EVTHREAD_LOCKTYPE_RECURSIVE);
|
||||||
base->defer_queue.lock = base->th_base_lock;
|
base->defer_queue.lock = base->th_base_lock;
|
||||||
EVTHREAD_ALLOC_LOCK(base->current_event_lock,
|
EVTHREAD_ALLOC_COND(base->current_event_cond);
|
||||||
EVTHREAD_LOCKTYPE_RECURSIVE);
|
|
||||||
r = evthread_make_base_notifiable(base);
|
r = evthread_make_base_notifiable(base);
|
||||||
if (r<0) {
|
if (r<0) {
|
||||||
event_base_free(base);
|
event_base_free(base);
|
||||||
@ -731,8 +730,7 @@ event_base_free(struct event_base *base)
|
|||||||
event_changelist_freemem(&base->changelist);
|
event_changelist_freemem(&base->changelist);
|
||||||
|
|
||||||
EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
|
||||||
EVTHREAD_FREE_LOCK(base->current_event_lock,
|
EVTHREAD_FREE_COND(base->current_event_cond);
|
||||||
EVTHREAD_LOCKTYPE_RECURSIVE);
|
|
||||||
|
|
||||||
mm_free(base);
|
mm_free(base);
|
||||||
}
|
}
|
||||||
@ -1217,9 +1215,10 @@ event_process_active_single_queue(struct event_base *base,
|
|||||||
ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
|
ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
|
||||||
ev->ev_callback));
|
ev->ev_callback));
|
||||||
|
|
||||||
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||||
base->current_event = ev;
|
base->current_event = ev;
|
||||||
|
base->current_event_waiters = 0;
|
||||||
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
|
#endif
|
||||||
|
|
||||||
switch (ev->ev_closure) {
|
switch (ev->ev_closure) {
|
||||||
case EV_CLOSURE_SIGNAL:
|
case EV_CLOSURE_SIGNAL:
|
||||||
@ -1236,9 +1235,14 @@ event_process_active_single_queue(struct event_base *base,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
EVBASE_RELEASE_LOCK(base, current_event_lock);
|
|
||||||
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
|
||||||
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||||
base->current_event = NULL;
|
base->current_event = NULL;
|
||||||
|
if (base->current_event_waiters) {
|
||||||
|
base->current_event_waiters = 0;
|
||||||
|
EVTHREAD_COND_BROADCAST(base->current_event_cond);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (base->event_break)
|
if (base->event_break)
|
||||||
return -1;
|
return -1;
|
||||||
@ -1879,7 +1883,6 @@ event_add_internal(struct event *ev, const struct timeval *tv,
|
|||||||
struct event_base *base = ev->ev_base;
|
struct event_base *base = ev->ev_base;
|
||||||
int res = 0;
|
int res = 0;
|
||||||
int notify = 0;
|
int notify = 0;
|
||||||
int need_cur_lock;
|
|
||||||
|
|
||||||
EVENT_BASE_ASSERT_LOCKED(base);
|
EVENT_BASE_ASSERT_LOCKED(base);
|
||||||
_event_debug_assert_is_setup(ev);
|
_event_debug_assert_is_setup(ev);
|
||||||
@ -1908,10 +1911,13 @@ event_add_internal(struct event *ev, const struct timeval *tv,
|
|||||||
* callback, and we are not the main thread, then we want to wait
|
* callback, and we are not the main thread, then we want to wait
|
||||||
* until the callback is done before we mess with the event, or else
|
* until the callback is done before we mess with the event, or else
|
||||||
* we can race on ev_ncalls and ev_pncalls below. */
|
* we can race on ev_ncalls and ev_pncalls below. */
|
||||||
need_cur_lock = (base->current_event == ev) &&
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||||
(ev->ev_events & EV_SIGNAL);
|
if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
|
||||||
if (need_cur_lock)
|
&& !EVBASE_IN_THREAD(base)) {
|
||||||
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
|
++base->current_event_waiters;
|
||||||
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
|
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
|
||||||
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
|
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
|
||||||
@ -2016,9 +2022,6 @@ event_add_internal(struct event *ev, const struct timeval *tv,
|
|||||||
|
|
||||||
_event_debug_note_add(ev);
|
_event_debug_note_add(ev);
|
||||||
|
|
||||||
if (need_cur_lock)
|
|
||||||
EVBASE_RELEASE_LOCK(base, current_event_lock);
|
|
||||||
|
|
||||||
return (res);
|
return (res);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2042,7 +2045,6 @@ event_del_internal(struct event *ev)
|
|||||||
{
|
{
|
||||||
struct event_base *base;
|
struct event_base *base;
|
||||||
int res = 0, notify = 0;
|
int res = 0, notify = 0;
|
||||||
int need_cur_lock;
|
|
||||||
|
|
||||||
event_debug(("event_del: %p, callback %p",
|
event_debug(("event_del: %p, callback %p",
|
||||||
ev, ev->ev_callback));
|
ev, ev->ev_callback));
|
||||||
@ -2059,9 +2061,12 @@ event_del_internal(struct event *ev)
|
|||||||
* when this function returns, it will be safe to free the
|
* when this function returns, it will be safe to free the
|
||||||
* user-supplied argument. */
|
* user-supplied argument. */
|
||||||
base = ev->ev_base;
|
base = ev->ev_base;
|
||||||
need_cur_lock = (base->current_event == ev);
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||||
if (need_cur_lock)
|
if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
|
||||||
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
|
++base->current_event_waiters;
|
||||||
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
|
EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
|
||||||
|
|
||||||
@ -2104,9 +2109,6 @@ event_del_internal(struct event *ev)
|
|||||||
if (res != -1 && notify && !EVBASE_IN_THREAD(base))
|
if (res != -1 && notify && !EVBASE_IN_THREAD(base))
|
||||||
evthread_notify_base(base);
|
evthread_notify_base(base);
|
||||||
|
|
||||||
if (need_cur_lock)
|
|
||||||
EVBASE_RELEASE_LOCK(base, current_event_lock);
|
|
||||||
|
|
||||||
_event_debug_note_del(ev);
|
_event_debug_note_del(ev);
|
||||||
|
|
||||||
return (res);
|
return (res);
|
||||||
@ -2129,7 +2131,6 @@ void
|
|||||||
event_active_nolock(struct event *ev, int res, short ncalls)
|
event_active_nolock(struct event *ev, int res, short ncalls)
|
||||||
{
|
{
|
||||||
struct event_base *base;
|
struct event_base *base;
|
||||||
int need_cur_lock = 0;
|
|
||||||
|
|
||||||
/* We get different kinds of events, add them together */
|
/* We get different kinds of events, add them together */
|
||||||
if (ev->ev_flags & EVLIST_ACTIVE) {
|
if (ev->ev_flags & EVLIST_ACTIVE) {
|
||||||
@ -2144,16 +2145,17 @@ event_active_nolock(struct event *ev, int res, short ncalls)
|
|||||||
ev->ev_res = res;
|
ev->ev_res = res;
|
||||||
|
|
||||||
if (ev->ev_events & EV_SIGNAL) {
|
if (ev->ev_events & EV_SIGNAL) {
|
||||||
need_cur_lock = (base->current_event == ev);
|
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||||
if (need_cur_lock)
|
if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
|
||||||
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
|
++base->current_event_waiters;
|
||||||
|
EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
ev->ev_ncalls = ncalls;
|
ev->ev_ncalls = ncalls;
|
||||||
ev->ev_pncalls = NULL;
|
ev->ev_pncalls = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
event_queue_insert(base, ev, EVLIST_ACTIVE);
|
event_queue_insert(base, ev, EVLIST_ACTIVE);
|
||||||
if (need_cur_lock)
|
|
||||||
EVBASE_RELEASE_LOCK(base, current_event_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
Loading…
x
Reference in New Issue
Block a user