switch timeouts to a min heap; from Maxim Yegorushkin

svn:r467
This commit is contained in:
Niels Provos 2007-11-03 18:04:53 +00:00
parent 65236aa857
commit 30ae40cc52
6 changed files with 166 additions and 47 deletions

View File

@ -30,3 +30,4 @@ Changes in current version:
o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin

View File

@ -4,7 +4,7 @@ AUTOMAKE_OPTIONS = foreign no-dependencies
bin_SCRIPTS = event_rpcgen.py
EXTRA_DIST = acconfig.h event.h event-internal.h log.h evsignal.h evdns.3 \
evrpc.h evrpc-internal.h \
evrpc.h evrpc-internal.h min_heap.h \
event.3 \
kqueue.c epoll_sub.c epoll.c select.c rtsig.c poll.c signal.c \
evport.c devpoll.c event_rpcgen.py \

View File

@ -31,6 +31,7 @@
extern "C" {
#endif
#include "min_heap.h"
#include "evsignal.h"
struct event_base {
@ -51,7 +52,7 @@ struct event_base {
struct event_list eventqueue;
struct timeval event_tv;
RB_HEAD(event_tree, event) timetree;
struct min_heap timeheap;
};
#ifdef __cplusplus

56
event.c
View File

@ -131,20 +131,6 @@ static int timeout_next(struct event_base *, struct timeval **);
static void timeout_process(struct event_base *);
static void timeout_correct(struct event_base *, struct timeval *);
static int
compare(struct event *a, struct event *b)
{
if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
return (-1);
else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
return (1);
if (a < b)
return (-1);
else if (a > b)
return (1);
return (0);
}
static void
detect_monotonic(void)
{
@ -175,11 +161,6 @@ gettime(struct timeval *tp)
return (gettimeofday(tp, NULL));
}
RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
RB_GENERATE(event_tree, event, ev_timeout_node, compare);
void *
event_init(void)
{
@ -195,7 +176,7 @@ event_init(void)
detect_monotonic();
gettime(&base->event_tv);
RB_INIT(&base->timetree);
min_heap_ctor(&base->timeheap);
TAILQ_INIT(&base->eventqueue);
TAILQ_INIT(&base->sig.signalqueue);
base->sig.ev_signal_pair[0] = -1;
@ -232,13 +213,13 @@ event_base_free(struct event_base *base)
if (base == current_base)
current_base = NULL;
/* XXX(niels) - check for internal events first */
assert(base);
if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base, base->evbase);
for (i=0; i < base->nactivequeues; ++i)
assert(TAILQ_EMPTY(base->activequeues[i]));
assert(RB_EMPTY(&base->timetree));
assert(min_heap_empty(&base->timeheap));
for (i = 0; i < base->nactivequeues; ++i)
free(base->activequeues[i]);
@ -546,6 +527,8 @@ event_set(struct event *ev, int fd, short events,
ev->ev_ncalls = 0;
ev->ev_pncalls = NULL;
min_heap_elem_init(ev);
/* by default, we put new events into the middle priority */
if(current_base)
ev->ev_pri = current_base->nactivequeues/2;
@ -637,6 +620,9 @@ event_add(struct event *ev, struct timeval *tv)
if (ev->ev_flags & EVLIST_TIMEOUT)
event_queue_remove(base, ev, EVLIST_TIMEOUT);
else if (min_heap_reserve(&base->timeheap,
1 + min_heap_size(&base->timeheap)) == -1)
return (-1); /* ENOMEM == errno */
/* Check if it is active due to a timeout. Rescheduling
* this timeout before the callback can be executed
@ -744,7 +730,7 @@ timeout_next(struct event_base *base, struct timeval **tv_p)
struct event *ev;
struct timeval *tv = *tv_p;
if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
if ((ev = min_heap_top(&base->timeheap)) == NULL) {
/* if no time-based events are active wait for I/O */
*tv_p = NULL;
return (0);
@ -776,7 +762,8 @@ timeout_next(struct event_base *base, struct timeval **tv_p)
static void
timeout_correct(struct event_base *base, struct timeval *tv)
{
struct event *ev;
struct event **pev;
unsigned int size;
struct timeval off;
if (use_monotonic)
@ -797,26 +784,28 @@ timeout_correct(struct event_base *base, struct timeval *tv)
* We can modify the key element of the node without destroying
* the key, beause we apply it to all in the right order.
*/
RB_FOREACH(ev, event_tree, &base->timetree)
timersub(&ev->ev_timeout, &off, &ev->ev_timeout);
pev = base->timeheap.p;
size = base->timeheap.n;
for (; size-- > 0; ++pev) {
struct timeval *tv = &(**pev).ev_timeout;
timersub(tv, &off, tv);
}
}
void
timeout_process(struct event_base *base)
{
struct timeval now;
struct event *ev, *next;
struct event *ev;
if (RB_EMPTY(&base->timetree))
if (min_heap_empty(&base->timeheap))
return;
gettime(&now);
for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
while ((ev = min_heap_top(&base->timeheap))) {
if (timercmp(&ev->ev_timeout, &now, >))
break;
next = RB_NEXT(event_tree, &base->timetree, ev);
event_queue_remove(base, ev, EVLIST_TIMEOUT);
/* delete this event from the I/O queues */
@ -855,7 +844,7 @@ event_queue_remove(struct event_base *base, struct event *ev, int queue)
TAILQ_REMOVE(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT:
RB_REMOVE(event_tree, &base->timetree, ev);
min_heap_erase(&base->timeheap, ev);
break;
case EVLIST_INSERTED:
TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
@ -897,8 +886,7 @@ event_queue_insert(struct event_base *base, struct event *ev, int queue)
TAILQ_INSERT_TAIL(&base->sig.signalqueue, ev, ev_signal_next);
break;
case EVLIST_TIMEOUT: {
struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
assert(tmp == NULL);
min_heap_push(&base->timeheap, ev);
break;
}
case EVLIST_INSERTED:

13
event.h
View File

@ -188,25 +188,16 @@ struct { \
struct type **tqe_prev; /* address of previous next element */ \
}
#endif /* !TAILQ_ENTRY */
#ifndef RB_ENTRY
#define _EVENT_DEFINED_RBENTRY
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#endif /* !RB_ENTRY */
struct event_base;
struct event {
TAILQ_ENTRY (event) ev_next;
TAILQ_ENTRY (event) ev_active_next;
TAILQ_ENTRY (event) ev_signal_next;
RB_ENTRY (event) ev_timeout_node;
unsigned int min_heap_idx; /* for managing timeouts */
struct event_base *ev_base;
int ev_fd;
short ev_events;
short ev_ncalls;

138
min_heap.h Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MIN_HEAP_H_
#define _MIN_HEAP_H_
#include "event.h"
typedef struct min_heap
{
struct event** p;
unsigned n, a;
} min_heap_t;
static inline void min_heap_ctor(min_heap_t* s);
static inline void min_heap_dtor(min_heap_t* s);
static inline void min_heap_elem_init(struct event* e);
static inline int min_heap_elem_greater(struct event *a, struct event *b);
static inline int min_heap_empty(min_heap_t* s);
static inline unsigned min_heap_size(min_heap_t* s);
static inline struct event* min_heap_top(min_heap_t* s);
static inline int min_heap_reserve(min_heap_t* s, unsigned n);
static inline int min_heap_push(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop(min_heap_t* s);
static inline int min_heap_erase(min_heap_t* s, struct event* e);
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
int min_heap_elem_greater(struct event *a, struct event *b)
{
return timercmp(&a->ev_timeout, &b->ev_timeout, >);
}
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
void min_heap_dtor(min_heap_t* s) { free(s->p); }
void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
unsigned min_heap_size(min_heap_t* s) { return s->n; }
struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
int min_heap_push(min_heap_t* s, struct event* e)
{
if(min_heap_reserve(s, s->n + 1))
return -1;
min_heap_shift_up_(s, s->n++, e);
return 0;
}
struct event* min_heap_pop(min_heap_t* s)
{
if(s->n)
{
struct event* e = *s->p;
e->min_heap_idx = -1;
min_heap_shift_down_(s, 0u, s->p[--s->n]);
return e;
}
return 0;
}
int min_heap_erase(min_heap_t* s, struct event* e)
{
if(-1u != e->min_heap_idx)
{
min_heap_shift_down_(s, e->min_heap_idx, s->p[--s->n]);
e->min_heap_idx = -1;
return 0;
}
return -1;
}
int min_heap_reserve(min_heap_t* s, unsigned n)
{
if(s->a < n)
{
struct event** p;
unsigned a = s->a ? s->a * 2 : 8;
if(a < n)
a = n;
if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
return -1;
s->p = p;
s->a = a;
}
return 0;
}
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned parent = (hole_index - 1) / 2;
while(hole_index && min_heap_elem_greater(s->p[parent], e))
{
(s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
hole_index = parent;
parent = (hole_index - 1) / 2;
}
(s->p[hole_index] = e)->min_heap_idx = hole_index;
}
void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned min_child = 2 * (hole_index + 1);
while(min_child <= s->n)
{
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
if(!(min_heap_elem_greater(e, s->p[min_child])))
break;
(s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
hole_index = min_child;
min_child = 2 * (hole_index + 1);
}
min_heap_shift_up_(s, hole_index, e);
}
#endif /* _MIN_HEAP_H_ */