Only process up to MAX_DEFERRED deferred_cbs at a time.

If threads queue callbacks while event_process_deferred_callbacks is
running, the loop may spin long enough to significantly skew timers.
A unit test stressing this behavior is also in this commit.
This commit is contained in:
Christopher Davis 2010-09-01 11:04:57 -07:00
parent 2447fe8886
commit 17a14f1af2
3 changed files with 109 additions and 16 deletions

View File

@ -206,10 +206,11 @@ bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
#define SCHEDULE_DEFERRED(bevp) \ #define SCHEDULE_DEFERRED(bevp) \
do { \ do { \
bufferevent_incref(&(bevp)->bev); \
event_deferred_cb_schedule( \ event_deferred_cb_schedule( \
event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \ event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \
&(bevp)->deferred); \ &(bevp)->deferred); \
} while (0); } while (0)
void void
@ -222,10 +223,8 @@ _bufferevent_run_readcb(struct bufferevent *bufev)
return; return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) { if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->readcb_pending = 1; p->readcb_pending = 1;
if (!p->deferred.queued) { if (!p->deferred.queued)
bufferevent_incref(bufev);
SCHEDULE_DEFERRED(p); SCHEDULE_DEFERRED(p);
}
} else { } else {
bufev->readcb(bufev, bufev->cbarg); bufev->readcb(bufev, bufev->cbarg);
} }
@ -241,10 +240,8 @@ _bufferevent_run_writecb(struct bufferevent *bufev)
return; return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) { if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->writecb_pending = 1; p->writecb_pending = 1;
if (!p->deferred.queued) { if (!p->deferred.queued)
bufferevent_incref(bufev);
SCHEDULE_DEFERRED(p); SCHEDULE_DEFERRED(p);
}
} else { } else {
bufev->writecb(bufev, bufev->cbarg); bufev->writecb(bufev, bufev->cbarg);
} }
@ -261,10 +258,8 @@ _bufferevent_run_eventcb(struct bufferevent *bufev, short what)
if (p->options & BEV_OPT_DEFER_CALLBACKS) { if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->eventcb_pending |= what; p->eventcb_pending |= what;
p->errno_pending = EVUTIL_SOCKET_ERROR(); p->errno_pending = EVUTIL_SOCKET_ERROR();
if (!p->deferred.queued) { if (!p->deferred.queued)
bufferevent_incref(bufev);
SCHEDULE_DEFERRED(p); SCHEDULE_DEFERRED(p);
}
} else { } else {
bufev->errorcb(bufev, what, bufev->cbarg); bufev->errorcb(bufev, what, bufev->cbarg);
} }

16
event.c
View File

@ -1285,9 +1285,10 @@ event_process_active_single_queue(struct event_base *base,
} }
/* /*
Process all the defered_cb entries in 'queue'. If *breakptr becomes set to Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1, stop. Requires that we start out holding the lock on 'queue'; releases *breakptr becomes set to 1, stop. Requires that we start out holding
the lock around 'queue' for each deferred_cb we process. the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
we process.
*/ */
static int static int
event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr) event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
@ -1295,6 +1296,7 @@ event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
int count = 0; int count = 0;
struct deferred_cb *cb; struct deferred_cb *cb;
#define MAX_DEFERRED 16
while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) { while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
cb->queued = 0; cb->queued = 0;
TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next); TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
@ -1302,12 +1304,14 @@ event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
UNLOCK_DEFERRED_QUEUE(queue); UNLOCK_DEFERRED_QUEUE(queue);
cb->cb(cb, cb->arg); cb->cb(cb, cb->arg);
++count;
if (*breakptr)
return -1;
LOCK_DEFERRED_QUEUE(queue); LOCK_DEFERRED_QUEUE(queue);
if (*breakptr)
return -1;
if (++count == MAX_DEFERRED)
break;
} }
#undef MAX_DEFERRED
return count; return count;
} }

View File

@ -33,6 +33,9 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#ifndef WIN32
#include <unistd.h>
#endif
#ifdef _EVENT_HAVE_PTHREADS #ifdef _EVENT_HAVE_PTHREADS
#include <pthread.h> #include <pthread.h>
@ -46,6 +49,7 @@
#include "event2/event_struct.h" #include "event2/event_struct.h"
#include "event2/thread.h" #include "event2/thread.h"
#include "evthread-internal.h" #include "evthread-internal.h"
#include "defer-internal.h"
#include "regress.h" #include "regress.h"
#include "tinytest_macros.h" #include "tinytest_macros.h"
@ -312,12 +316,102 @@ end:
; ;
} }
#define CB_COUNT 128
#define QUEUE_THREAD_COUNT 8
#ifdef WIN32
#define SLEEP_MS(ms) Sleep(ms)
#else
#define SLEEP_MS(ms) usleep((ms) * 1000)
#endif
struct deferred_test_data {
struct deferred_cb cbs[CB_COUNT];
struct deferred_cb_queue *queue;
};
static time_t timer_start = 0;
static time_t timer_end = 0;
static unsigned callback_count = 0;
static THREAD_T load_threads[QUEUE_THREAD_COUNT];
static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
static void
deferred_callback(struct deferred_cb *cb, void *arg)
{
SLEEP_MS(1);
callback_count += 1;
}
static THREAD_FN
load_deferred_queue(void *arg)
{
struct deferred_test_data *data = arg;
size_t i;
for (i = 0; i < CB_COUNT; ++i) {
event_deferred_cb_init(&data->cbs[i], deferred_callback, NULL);
event_deferred_cb_schedule(data->queue, &data->cbs[i]);
SLEEP_MS(1);
}
THREAD_RETURN();
}
static void
timer_callback(evutil_socket_t fd, short what, void *arg)
{
timer_end = time(NULL);
}
static void
start_threads_callback(evutil_socket_t fd, short what, void *arg)
{
int i;
for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
THREAD_START(load_threads[i], load_deferred_queue,
&deferred_data[i]);
}
}
static void
thread_deferred_cb_skew(void *arg)
{
struct basic_test_data *data = arg;
struct timeval tv_timer = {4, 0};
struct event event_threads;
struct deferred_cb_queue *queue;
int i;
queue = event_base_get_deferred_cb_queue(data->base);
tt_assert(queue);
for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
deferred_data[i].queue = queue;
timer_start = time(NULL);
event_base_once(data->base, -1, EV_TIMEOUT, timer_callback, NULL,
&tv_timer);
event_base_once(data->base, -1, EV_TIMEOUT, start_threads_callback,
NULL, NULL);
event_base_dispatch(data->base);
TT_BLATHER(("callback count, %u", callback_count));
tt_int_op(timer_end - timer_start, ==, 4);
end:
for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
THREAD_JOIN(load_threads[i]);
}
#define TEST(name) \ #define TEST(name) \
{ #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE, \ { #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE, \
&basic_setup, NULL } &basic_setup, NULL }
struct testcase_t thread_testcases[] = { struct testcase_t thread_testcases[] = {
TEST(basic), TEST(basic),
TEST(conditions_simple), TEST(conditions_simple),
TEST(deferred_cb_skew),
END_OF_TESTCASES END_OF_TESTCASES
}; };