mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-10 04:50:37 -04:00
Don't allow internal events to starve lower-priority events.
This is exceptionally important with multithreaded stuff, where we use an event to notify the base that other events have been made active. If the activated events have a prioirty number greater than that of the notification event, it will starve them, and that's no good. svn:r1149
This commit is contained in:
parent
ce146eb1cb
commit
e3d82497c9
@ -146,6 +146,7 @@ Changes in current version:
|
|||||||
o evbuffers can now be "frozen" to prevent operations at one or both ends.
|
o evbuffers can now be "frozen" to prevent operations at one or both ends.
|
||||||
o Bufferevents now notice external attempts to add data to an inbuf or remove it from an outbuf, and stop them.
|
o Bufferevents now notice external attempts to add data to an inbuf or remove it from an outbuf, and stop them.
|
||||||
o Fix parsing of queries where the encoded queries contained \r, \n or +
|
o Fix parsing of queries where the encoded queries contained \r, \n or +
|
||||||
|
o Do not allow internal events to starve lower-priority events.
|
||||||
|
|
||||||
Changes in 1.4.0:
|
Changes in 1.4.0:
|
||||||
o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
|
o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
|
||||||
|
62
event.c
62
event.c
@ -604,26 +604,18 @@ event_signal_closure(struct event_base *base, struct event *ev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Active events are stored in priority queues. Lower priorities are always
|
Helper for event_process_active to process all the events in a single queue,
|
||||||
* process before higher priorities. Low priority events can starve high
|
releasing the lock as we go. This function requires that the lock be held
|
||||||
* priority ones.
|
when it's invoked. Returns -1 if we get a signal or an event_break that
|
||||||
|
means we should stop processing any active events now. Otherwise returns
|
||||||
|
the number of non-internal events that we processed.
|
||||||
*/
|
*/
|
||||||
|
static int
|
||||||
static void
|
event_process_active_single_queue(struct event_base *base,
|
||||||
event_process_active(struct event_base *base)
|
struct event_list *activeq)
|
||||||
{
|
{
|
||||||
struct event *ev;
|
struct event *ev;
|
||||||
struct event_list *activeq = NULL;
|
int count = 0;
|
||||||
int i;
|
|
||||||
|
|
||||||
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
|
||||||
|
|
||||||
for (i = 0; i < base->nactivequeues; ++i) {
|
|
||||||
if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
|
|
||||||
activeq = base->activequeues[i];
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(activeq != NULL);
|
assert(activeq != NULL);
|
||||||
|
|
||||||
@ -632,6 +624,8 @@ event_process_active(struct event_base *base)
|
|||||||
event_queue_remove(base, ev, EVLIST_ACTIVE);
|
event_queue_remove(base, ev, EVLIST_ACTIVE);
|
||||||
else
|
else
|
||||||
event_del_internal(ev);
|
event_del_internal(ev);
|
||||||
|
if (!(ev->ev_flags & EVLIST_INTERNAL))
|
||||||
|
++count;
|
||||||
|
|
||||||
event_debug((
|
event_debug((
|
||||||
"event_process_active: event: %p, %s%scall %p",
|
"event_process_active: event: %p, %s%scall %p",
|
||||||
@ -649,9 +643,39 @@ event_process_active(struct event_base *base)
|
|||||||
(*ev->ev_callback)(
|
(*ev->ev_callback)(
|
||||||
(int)ev->ev_fd, ev->ev_res, ev->ev_arg);
|
(int)ev->ev_fd, ev->ev_res, ev->ev_arg);
|
||||||
if (event_gotsig || base->event_break)
|
if (event_gotsig || base->event_break)
|
||||||
return;
|
return -1;
|
||||||
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
||||||
}
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Active events are stored in priority queues. Lower priorities are always
|
||||||
|
* process before higher priorities. Low priority events can starve high
|
||||||
|
* priority ones.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void
|
||||||
|
event_process_active(struct event_base *base)
|
||||||
|
{
|
||||||
|
struct event_list *activeq = NULL;
|
||||||
|
int i, c;
|
||||||
|
|
||||||
|
EVBASE_ACQUIRE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
||||||
|
|
||||||
|
for (i = 0; i < base->nactivequeues; ++i) {
|
||||||
|
if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
|
||||||
|
activeq = base->activequeues[i];
|
||||||
|
c = event_process_active_single_queue(base, activeq);
|
||||||
|
if (c < 0)
|
||||||
|
return; /* already unlocked */
|
||||||
|
else if (c > 0)
|
||||||
|
break; /* Processed a real event; do not
|
||||||
|
* consider lower-priority events */
|
||||||
|
/* If we get here, all of the events we processed
|
||||||
|
* were internal. Continue. */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
EVBASE_RELEASE_LOCK(base, EVTHREAD_WRITE, th_base_lock);
|
||||||
}
|
}
|
||||||
@ -1651,8 +1675,6 @@ evthread_make_base_notifiable(struct event_base *base)
|
|||||||
/* we need to mark this as internal event */
|
/* we need to mark this as internal event */
|
||||||
base->th_notify.ev_flags |= EVLIST_INTERNAL;
|
base->th_notify.ev_flags |= EVLIST_INTERNAL;
|
||||||
|
|
||||||
/* XXX th_notify should have a very high priority. */
|
|
||||||
|
|
||||||
event_add(&base->th_notify, NULL);
|
event_add(&base->th_notify, NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user