Don't race when calling event_active/event_add on a running signal event

There was previously no lock protecting the signal event's
ev_ncalls/ev_pncalls fields, which were accessed by all of
event_signal_closure, event_add_internal, event_del_internal, and
event_active_nolock.  This patch fixes this race by using the
current_event_lock in the same way it's used to prevent
event_del_internal from touching an event that's currently running.
This commit is contained in:
Nick Mathewson 2010-07-05 14:39:39 -04:00
parent 61e1eeec6f
commit fc5e0a2344

21
event.c
View File

@ -954,6 +954,8 @@ event_signal_closure(struct event_base *base, struct event *ev)
while (ncalls) { while (ncalls) {
ncalls--; ncalls--;
ev->ev_ncalls = ncalls; ev->ev_ncalls = ncalls;
if (ncalls == 0)
ev->ev_pncalls = NULL;
(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
#if 0 #if 0
/* XXXX we can't do this without a lock on the base. */ /* XXXX we can't do this without a lock on the base. */
@ -1876,6 +1878,7 @@ event_add_internal(struct event *ev, const struct timeval *tv,
struct event_base *base = ev->ev_base; struct event_base *base = ev->ev_base;
int res = 0; int res = 0;
int notify = 0; int notify = 0;
int need_cur_lock;
EVENT_BASE_ASSERT_LOCKED(base); EVENT_BASE_ASSERT_LOCKED(base);
_event_debug_assert_is_setup(ev); _event_debug_assert_is_setup(ev);
@ -1900,6 +1903,15 @@ event_add_internal(struct event *ev, const struct timeval *tv,
return (-1); /* ENOMEM == errno */ return (-1); /* ENOMEM == errno */
} }
/* If the main thread is currently executing a signal event's
* callback, and we are not the main thread, then we want to wait
* until the callback is done before we mess with the event, or else
* we can race on ev_ncalls and ev_pncalls below. */
need_cur_lock = (base->current_event == ev) &&
(ev->ev_events & EV_SIGNAL);
if (need_cur_lock)
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
!(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
if (ev->ev_events & (EV_READ|EV_WRITE)) if (ev->ev_events & (EV_READ|EV_WRITE))
@ -2003,6 +2015,9 @@ event_add_internal(struct event *ev, const struct timeval *tv,
_event_debug_note_add(ev); _event_debug_note_add(ev);
if (need_cur_lock)
EVBASE_RELEASE_LOCK(base, current_event_lock);
return (res); return (res);
} }
@ -2113,6 +2128,7 @@ void
event_active_nolock(struct event *ev, int res, short ncalls) event_active_nolock(struct event *ev, int res, short ncalls)
{ {
struct event_base *base; struct event_base *base;
int need_cur_lock = 0;
/* We get different kinds of events, add them together */ /* We get different kinds of events, add them together */
if (ev->ev_flags & EVLIST_ACTIVE) { if (ev->ev_flags & EVLIST_ACTIVE) {
@ -2127,11 +2143,16 @@ event_active_nolock(struct event *ev, int res, short ncalls)
ev->ev_res = res; ev->ev_res = res;
if (ev->ev_events & EV_SIGNAL) { if (ev->ev_events & EV_SIGNAL) {
need_cur_lock = (base->current_event == ev);
if (need_cur_lock)
EVBASE_ACQUIRE_LOCK(base, current_event_lock);
ev->ev_ncalls = ncalls; ev->ev_ncalls = ncalls;
ev->ev_pncalls = NULL; ev->ev_pncalls = NULL;
} }
event_queue_insert(base, ev, EVLIST_ACTIVE); event_queue_insert(base, ev, EVLIST_ACTIVE);
if (need_cur_lock)
EVBASE_RELEASE_LOCK(base, current_event_lock);
} }
void void