mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-25 12:23:13 -04:00
Merge remote-tracking branch 'github/21_faster_timeout_adj'
This commit is contained in:
commit
24e2480520
45
event.c
45
event.c
@ -133,6 +133,8 @@ static inline int event_del_internal(struct event *ev);
|
||||
|
||||
static void event_queue_insert(struct event_base *, struct event *, int);
|
||||
static void event_queue_remove(struct event_base *, struct event *, int);
|
||||
static void event_queue_reinsert(struct event_base *,struct event *ev,int);
|
||||
|
||||
static int event_haveevents(struct event_base *);
|
||||
|
||||
static int event_process_active(struct event_base *);
|
||||
@ -146,6 +148,9 @@ static inline void event_persist_closure(struct event_base *, struct event *ev);
|
||||
|
||||
static int evthread_notify_base(struct event_base *base);
|
||||
|
||||
static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
|
||||
struct event *ev);
|
||||
|
||||
#ifndef _EVENT_DISABLE_DEBUG_MODE
|
||||
/* These functions implement a hashtable of which 'struct event *' structures
|
||||
* have been setup or added. We don't want to trust the content of the struct
|
||||
@ -2048,17 +2053,6 @@ event_add_internal(struct event *ev, const struct timeval *tv,
|
||||
if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
|
||||
ev->ev_io_timeout = *tv;
|
||||
|
||||
/*
|
||||
* we already reserved memory above for the case where we
|
||||
* are not replacing an existing timeout.
|
||||
*/
|
||||
if (ev->ev_flags & EVLIST_TIMEOUT) {
|
||||
/* XXX I believe this is needless. */
|
||||
if (min_heap_elt_is_top(ev))
|
||||
notify = 1;
|
||||
event_queue_remove(base, ev, EVLIST_TIMEOUT);
|
||||
}
|
||||
|
||||
/* Check if it is active due to a timeout. Rescheduling
|
||||
* this timeout before the callback can be executed
|
||||
* removes it from the active list. */
|
||||
@ -2096,7 +2090,8 @@ event_add_internal(struct event *ev, const struct timeval *tv,
|
||||
"event_add: timeout in %d seconds, call %p",
|
||||
(int)tv->tv_sec, ev->ev_callback));
|
||||
|
||||
event_queue_insert(base, ev, EVLIST_TIMEOUT);
|
||||
event_queue_reinsert(base, ev, EVLIST_TIMEOUT);
|
||||
|
||||
if (common_timeout) {
|
||||
struct common_timeout_list *ctl =
|
||||
get_common_timeout_list(base, &ev->ev_timeout);
|
||||
@ -2484,6 +2479,32 @@ event_queue_remove(struct event_base *base, struct event *ev, int queue)
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove and reinsert 'ev' into the appropriate queue. Only EVLIST_TIMEOUT
|
||||
* is supported. */
|
||||
static void
|
||||
event_queue_reinsert(struct event_base *base, struct event *ev, int queue)
|
||||
{
|
||||
if (!(ev->ev_flags & queue)) {
|
||||
event_queue_insert(base, ev, queue);
|
||||
return;
|
||||
}
|
||||
|
||||
if (queue != EVLIST_TIMEOUT) {
|
||||
event_errx(1, "%s: Unsupported queue %x", __func__, queue);
|
||||
return; /* unreached */
|
||||
}
|
||||
|
||||
if (is_common_timeout(&ev->ev_timeout, base)) {
|
||||
struct common_timeout_list *ctl =
|
||||
get_common_timeout_list(base, &ev->ev_timeout);
|
||||
TAILQ_REMOVE(&ctl->events, ev,
|
||||
ev_timeout_pos.ev_next_with_common_timeout);
|
||||
insert_common_timeout_inorder(ctl, ev);
|
||||
} else {
|
||||
min_heap_adjust(&base->timeheap, ev);
|
||||
}
|
||||
}
|
||||
|
||||
/* Add 'ev' to the common timeout list in 'ev'. */
|
||||
static void
|
||||
insert_common_timeout_inorder(struct common_timeout_list *ctl,
|
||||
|
@ -46,21 +46,20 @@ static inline void min_heap_ctor(min_heap_t* s);
|
||||
static inline void min_heap_dtor(min_heap_t* s);
|
||||
static inline void min_heap_elem_init(struct event* e);
|
||||
static inline int min_heap_elt_is_top(const struct event *e);
|
||||
static inline int min_heap_elem_greater(struct event *a, struct event *b);
|
||||
static inline int min_heap_empty(min_heap_t* s);
|
||||
static inline unsigned min_heap_size(min_heap_t* s);
|
||||
static inline struct event* min_heap_top(min_heap_t* s);
|
||||
static inline int min_heap_reserve(min_heap_t* s, unsigned n);
|
||||
static inline int min_heap_push(min_heap_t* s, struct event* e);
|
||||
static inline struct event* min_heap_pop(min_heap_t* s);
|
||||
static inline int min_heap_adjust(min_heap_t *s, struct event* e);
|
||||
static inline int min_heap_erase(min_heap_t* s, struct event* e);
|
||||
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
static inline void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
|
||||
|
||||
int min_heap_elem_greater(struct event *a, struct event *b)
|
||||
{
|
||||
return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
|
||||
}
|
||||
#define min_heap_elem_greater(a, b) \
|
||||
(evutil_timercmp(&(a)->ev_timeout, &(b)->ev_timeout, >))
|
||||
|
||||
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
|
||||
void min_heap_dtor(min_heap_t* s) { if (s->p) mm_free(s->p); }
|
||||
@ -106,7 +105,7 @@ int min_heap_erase(min_heap_t* s, struct event* e)
|
||||
to be less than the parent, it can't need to shift both up and
|
||||
down. */
|
||||
if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
|
||||
min_heap_shift_up_(s, e->ev_timeout_pos.min_heap_idx, last);
|
||||
min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, last);
|
||||
else
|
||||
min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
|
||||
e->ev_timeout_pos.min_heap_idx = -1;
|
||||
@ -115,6 +114,23 @@ int min_heap_erase(min_heap_t* s, struct event* e)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int min_heap_adjust(min_heap_t *s, struct event *e)
|
||||
{
|
||||
if (-1 == e->ev_timeout_pos.min_heap_idx) {
|
||||
return min_heap_push(s, e);
|
||||
} else {
|
||||
unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
|
||||
/* The position of e has changed; we shift it up or down
|
||||
* as needed. We can't need to do both. */
|
||||
if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], e))
|
||||
min_heap_shift_up_unconditional_(s, e->ev_timeout_pos.min_heap_idx, e);
|
||||
else
|
||||
min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, e);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int min_heap_reserve(min_heap_t* s, unsigned n)
|
||||
{
|
||||
if (s->a < n)
|
||||
@ -131,6 +147,18 @@ int min_heap_reserve(min_heap_t* s, unsigned n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void min_heap_shift_up_unconditional_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
{
|
||||
unsigned parent = (hole_index - 1) / 2;
|
||||
do
|
||||
{
|
||||
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
hole_index = parent;
|
||||
parent = (hole_index - 1) / 2;
|
||||
} while (hole_index && min_heap_elem_greater(s->p[parent], e));
|
||||
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
|
||||
}
|
||||
|
||||
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
|
||||
{
|
||||
unsigned parent = (hole_index - 1) / 2;
|
||||
|
Loading…
x
Reference in New Issue
Block a user