mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-10 13:04:23 -04:00
Make event_del(E) block while E is running in another thread.
This gives you the property that once you have called event_del(E), you know that E is no longer running or pending or active at all, and so it is safe to delete the resource used by E's callback. svn:r1341
This commit is contained in:
parent
d866f05585
commit
6b4b77a265
@ -42,6 +42,7 @@ Changes in 2.0.2-alpha:
|
|||||||
o Allow C identifiers as struct names; allow multiple comments in .rpc files; from Zack Weinberg
|
o Allow C identifiers as struct names; allow multiple comments in .rpc files; from Zack Weinberg
|
||||||
o Mitigate a race condition when using socket bufferevents in multiple threads.
|
o Mitigate a race condition when using socket bufferevents in multiple threads.
|
||||||
o Use AC_SEARCH_LIBS, not AC_CHECK_LIB to avoid needless library use.
|
o Use AC_SEARCH_LIBS, not AC_CHECK_LIB to avoid needless library use.
|
||||||
|
o Do not allow event_del(ev) to return while that event's callback is executing in another thread. This fixes a nasty race condition.
|
||||||
|
|
||||||
|
|
||||||
Changes in 2.0.1-alpha:
|
Changes in 2.0.1-alpha:
|
||||||
|
@ -133,6 +133,9 @@ struct event_base {
|
|||||||
struct event_list **activequeues;
|
struct event_list **activequeues;
|
||||||
int nactivequeues;
|
int nactivequeues;
|
||||||
|
|
||||||
|
/** The event whose callback is executing right now */
|
||||||
|
struct event *current_event;
|
||||||
|
|
||||||
/** Deferred callback management: a list of deferred callbacks to
|
/** Deferred callback management: a list of deferred callbacks to
|
||||||
* run active the active events. */
|
* run active the active events. */
|
||||||
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
|
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
|
||||||
@ -159,6 +162,9 @@ struct event_base {
|
|||||||
unsigned long th_owner_id;
|
unsigned long th_owner_id;
|
||||||
/** A lock to prevent conflicting accesses to this event_base */
|
/** A lock to prevent conflicting accesses to this event_base */
|
||||||
void *th_base_lock;
|
void *th_base_lock;
|
||||||
|
/** A lock to prevent event_del from deleting an event while its
|
||||||
|
* callback is executing. */
|
||||||
|
void *current_event_lock;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef WIN32
|
#ifdef WIN32
|
||||||
|
27
event.c
27
event.c
@ -301,6 +301,7 @@ event_base_new_with_config(struct event_config *cfg)
|
|||||||
if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
|
if (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK)) {
|
||||||
int r;
|
int r;
|
||||||
EVTHREAD_ALLOC_LOCK(base->th_base_lock);
|
EVTHREAD_ALLOC_LOCK(base->th_base_lock);
|
||||||
|
EVTHREAD_ALLOC_LOCK(base->current_event_lock);
|
||||||
r = evthread_make_base_notifiable(base);
|
r = evthread_make_base_notifiable(base);
|
||||||
if (r<0) {
|
if (r<0) {
|
||||||
event_base_free(base);
|
event_base_free(base);
|
||||||
@ -382,6 +383,7 @@ event_base_free(struct event_base *base)
|
|||||||
evmap_signal_clear(&base->sigmap);
|
evmap_signal_clear(&base->sigmap);
|
||||||
|
|
||||||
EVTHREAD_FREE_LOCK(base->th_base_lock);
|
EVTHREAD_FREE_LOCK(base->th_base_lock);
|
||||||
|
EVTHREAD_FREE_LOCK(base->current_event_lock);
|
||||||
|
|
||||||
mm_free(base);
|
mm_free(base);
|
||||||
}
|
}
|
||||||
@ -646,6 +648,10 @@ event_process_active_single_queue(struct event_base *base,
|
|||||||
ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
|
ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
|
||||||
ev->ev_callback));
|
ev->ev_callback));
|
||||||
|
|
||||||
|
base->current_event = ev;
|
||||||
|
|
||||||
|
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, current_event_lock);
|
||||||
|
|
||||||
EVBASE_RELEASE_LOCK(base,
|
EVBASE_RELEASE_LOCK(base,
|
||||||
EVTHREAD_WRITE, th_base_lock);
|
EVTHREAD_WRITE, th_base_lock);
|
||||||
|
|
||||||
@ -663,9 +669,12 @@ event_process_active_single_queue(struct event_base *base,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, current_event_lock);
|
||||||
|
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
||||||
|
base->current_event = NULL;
|
||||||
|
|
||||||
if (base->event_break)
|
if (base->event_break)
|
||||||
return -1;
|
return -1;
|
||||||
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -711,7 +720,7 @@ event_process_active(struct event_base *base)
|
|||||||
activeq = base->activequeues[i];
|
activeq = base->activequeues[i];
|
||||||
c = event_process_active_single_queue(base, activeq);
|
c = event_process_active_single_queue(base, activeq);
|
||||||
if (c < 0)
|
if (c < 0)
|
||||||
return; /* already unlocked */
|
goto unlock;
|
||||||
else if (c > 0)
|
else if (c > 0)
|
||||||
break; /* Processed a real event; do not
|
break; /* Processed a real event; do not
|
||||||
* consider lower-priority events */
|
* consider lower-priority events */
|
||||||
@ -722,6 +731,7 @@ event_process_active(struct event_base *base)
|
|||||||
|
|
||||||
event_process_deferred_callbacks(base);
|
event_process_deferred_callbacks(base);
|
||||||
|
|
||||||
|
unlock:
|
||||||
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1267,11 +1277,13 @@ event_del(struct event *ev)
|
|||||||
return (res);
|
return (res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Helper for event_del: always called with th_base_lock held. */
|
||||||
static inline int
|
static inline int
|
||||||
event_del_internal(struct event *ev)
|
event_del_internal(struct event *ev)
|
||||||
{
|
{
|
||||||
struct event_base *base;
|
struct event_base *base;
|
||||||
int res = 0;
|
int res = 0;
|
||||||
|
int need_cur_lock;
|
||||||
|
|
||||||
event_debug(("event_del: %p, callback %p",
|
event_debug(("event_del: %p, callback %p",
|
||||||
ev, ev->ev_callback));
|
ev, ev->ev_callback));
|
||||||
@ -1280,7 +1292,15 @@ event_del_internal(struct event *ev)
|
|||||||
if (ev->ev_base == NULL)
|
if (ev->ev_base == NULL)
|
||||||
return (-1);
|
return (-1);
|
||||||
|
|
||||||
|
/* If the main thread is currently executing this event's callback,
|
||||||
|
* and we are not the main thread, then we want to wait until the
|
||||||
|
* callback is done before we start removing the event. That way,
|
||||||
|
* when this function returns, it will be safe to free the
|
||||||
|
* user-supplied argument. */
|
||||||
base = ev->ev_base;
|
base = ev->ev_base;
|
||||||
|
need_cur_lock = (base->current_event == ev);
|
||||||
|
if (need_cur_lock)
|
||||||
|
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, current_event_lock);
|
||||||
|
|
||||||
assert(!(ev->ev_flags & ~EVLIST_ALL));
|
assert(!(ev->ev_flags & ~EVLIST_ALL));
|
||||||
|
|
||||||
@ -1310,6 +1330,9 @@ event_del_internal(struct event *ev)
|
|||||||
if (res != -1 && !EVBASE_IN_THREAD(base))
|
if (res != -1 && !EVBASE_IN_THREAD(base))
|
||||||
evthread_notify_base(base);
|
evthread_notify_base(base);
|
||||||
|
|
||||||
|
if (need_cur_lock)
|
||||||
|
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, current_event_lock);
|
||||||
|
|
||||||
return (res);
|
return (res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user