On win32, use a hashtable to map sockets to events rather than using an array.

svn:r988
This commit is contained in:
Nick Mathewson 2009-01-09 13:42:21 +00:00
parent 8f5777e692
commit 55bcd7d2f0
5 changed files with 614 additions and 41 deletions

View File

@ -58,8 +58,22 @@ struct eventop {
enum event_method_feature features;
};
/* used to map multiple events to the same underlying identifier */
struct event_map {
#ifdef WIN32
#define EVMAP_USE_HT
#endif
#ifdef EVMAP_USE_HT
#include "ht-internal.h"
struct event_map_entry;
HT_HEAD(event_io_map, event_map_entry);
#else
#define event_io_map event_signal_map
#endif
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
defined, this is also used as event_io_map, to map fds to a list of events.
*/
struct event_signal_map {
void **entries;
int nentries;
};
@ -85,10 +99,10 @@ struct event_base {
int nactivequeues;
/* for mapping io activity to events */
struct event_map io;
struct event_io_map io;
/* for mapping signal activity to events */
struct event_map sigmap;
struct event_signal_map sigmap;
struct event_list eventqueue;
struct timeval event_tv;

View File

@ -360,8 +360,8 @@ event_base_free(struct event_base *base)
assert(TAILQ_EMPTY(&base->eventqueue));
evmap_clear(&base->io);
evmap_clear(&base->sigmap);
evmap_io_clear(&base->io);
evmap_signal_clear(&base->sigmap);
mm_free(base);
}
@ -392,8 +392,8 @@ event_reinit(struct event_base *base)
event_errx(1, "%s: could not reinitialize event mechanism",
__func__);
evmap_clear(&base->io);
evmap_clear(&base->sigmap);
evmap_io_clear(&base->io);
evmap_signal_clear(&base->sigmap);
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
if (ev->ev_events & (EV_READ|EV_WRITE)) {

155
evmap.c
View File

@ -56,16 +56,102 @@
#include "evmap.h"
#include "mm-internal.h"
/** An entry for an evmap_io list: notes all the events that want to read or
write on a given fd, and the number of each.
*/
struct evmap_io {
struct event_list events;
unsigned int nread;
unsigned int nwrite;
};
/* An entry for an evmap_signal list: notes all the events that want to know
when a signal triggers. */
struct evmap_signal {
struct event_list events;
};
/* On some platforms, fds start at 0 and increment by 1 as they are
allocated, and old numbers get used. For these platforms, we
implement io maps just like signal maps: as an array of pointers to
struct evmap_io. But on other platforms (windows), sockets are not
0-indexed, not necessarily consecutive, and not necessarily reused.
There, we use a hashtable to implement evmap_io.
*/
#ifdef EVMAP_USE_HT
struct event_map_entry {
HT_ENTRY(event_map_entry) map_node;
evutil_socket_t fd;
union { /* This is a union in case we need to make more things that can
be in the hashtable. */
struct evmap_io evmap_io;
} ent;
};
static inline unsigned
hashsocket(struct event_map_entry *e)
{
return (unsigned) e->fd;
}
static inline int
eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
{
return e1->fd == e2->fd;
}
HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket);
HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
0.5, mm_malloc, mm_realloc, mm_free);
#define GET_IO_SLOT(x, map, slot, type) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_ent = HT_FIND(event_io_map, map, &_key); \
(x) = &_ent->ent.type; \
} while (0);
#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
event_map_entry, &_key, ptr, \
{ \
_ent = *ptr; \
}, \
{ \
_ent = mm_malloc(sizeof(struct event_map_entry)); \
assert(_ent); \
_ent->fd = slot; \
(ctor)(&_ent->ent.type); \
_HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
}); \
(x) = &_ent->ent.type; \
} while (0)
void evmap_io_clear(struct event_io_map *ctx)
{
struct event_map_entry **ent, **next, *this;
for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
this = *ent;
next = HT_NEXT_RMV(event_io_map, ctx, ent);
mm_free(this);
}
}
#endif
/* Set the variable 'x' to the field in event_map 'map' with fields of type
'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
if there are no entries for 'slot'. Does no bounds-checking. */
#define GET_SLOT(x, map, slot, type) \
#define GET_SIGNAL_SLOT(x, map, slot, type) \
(x) = (struct type *)((map)->entries[slot])
/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
by allocating enough memory for a 'struct type', and initializing the new
value by calling the function 'ctor' on it.
*/
#define GET_SLOT_AND_CTOR(x, map, slot, type, ctor) \
#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor) \
do { \
if ((map)->entries[slot] == NULL) { \
assert(ctor != NULL); \
@ -74,13 +160,27 @@
(ctor)((struct type *)(map)->entries[slot]); \
} \
(x) = (struct type *)((map)->entries[slot]); \
} while (0) \
} while (0)
/* If we aren't using hashtables, then define the IO_SLOT macros and functions
as thin aliases over the SIGNAL_SLOT versions. */
#ifndef EVMAP_USE_HT
#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor) \
GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor)
void
evmap_io_clear(struct event_io_map* ctx)
{
evmap_signal_clear(ctx);
}
#endif
/** Expand 'map' with new entries of width 'msize' until it is big enough
to store a value in 'slot'.
*/
static int
evmap_make_space(struct event_map *map, int slot, int msize)
evmap_make_space(struct event_signal_map *map, int slot, int msize)
{
if (map->nentries <= slot) {
int nentries = map->nentries ? map->nentries : 32;
@ -103,9 +203,8 @@ evmap_make_space(struct event_map *map, int slot, int msize)
return (0);
}
void
evmap_clear(struct event_map *ctx)
evmap_signal_clear(struct event_signal_map *ctx)
{
ctx->nentries = 0;
if (ctx->entries != NULL) {
@ -119,16 +218,8 @@ evmap_clear(struct event_map *ctx)
}
}
/* code specific to file descriptors */
/** An entry for an evmap_io list: notes all the events that want to read or
write on a given fd, and the number of each.
*/
struct evmap_io {
struct event_list events;
unsigned int nread;
unsigned int nwrite;
};
/* code specific to file descriptors */
/** Constructor for struct evmap_io */
static void
@ -144,7 +235,7 @@ int
evmap_io_add(struct event_base *base, int fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_map *io = &base->io;
struct event_io_map *io = &base->io;
struct evmap_io *ctx = NULL;
int nread, nwrite;
short res = 0, old = 0;
@ -153,11 +244,13 @@ evmap_io_add(struct event_base *base, int fd, struct event *ev)
/*XXX(nickm) Should we assert that ev is not already inserted, or should
* we make this function idempotent? */
#ifndef EVMAP_USE_HT
if (fd >= io->nentries) {
if (evmap_make_space(io, fd, sizeof(struct evmap_io)) == -1)
return (-1);
}
GET_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init);
#endif
GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init);
nread = ctx->nread;
nwrite = ctx->nwrite;
@ -196,7 +289,7 @@ int
evmap_io_del(struct event_base *base, int fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_map *io = &base->io;
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
int nread, nwrite;
short res = 0, old = 0;
@ -205,10 +298,12 @@ evmap_io_del(struct event_base *base, int fd, struct event *ev)
/*XXX(nickm) Should we assert that ev is not already inserted, or should
* we make this function idempotent? */
#ifndef EVMAP_USE_HT
if (fd >= io->nentries)
return (-1);
#endif
GET_SLOT(ctx, io, fd, evmap_io);
GET_IO_SLOT(ctx, io, fd, evmap_io);
nread = ctx->nread;
nwrite = ctx->nwrite;
@ -244,12 +339,14 @@ evmap_io_del(struct event_base *base, int fd, struct event *ev)
void
evmap_io_active(struct event_base *base, int fd, short events)
{
struct event_map *io = &base->io;
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
struct event *ev;
#ifndef EVMAP_USE_HT
assert(fd < io->nentries);
GET_SLOT(ctx, io, fd, evmap_io);
#endif
GET_IO_SLOT(ctx, io, fd, evmap_io);
assert(ctx);
TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
@ -260,10 +357,6 @@ evmap_io_active(struct event_base *base, int fd, short events)
/* code specific to signals */
struct evmap_signal {
struct event_list events;
};
static void
evmap_signal_init(struct evmap_signal *entry)
{
@ -275,7 +368,7 @@ int
evmap_signal_add(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_map *map = &base->sigmap;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx = NULL;
if (sig >= map->nentries) {
@ -283,7 +376,7 @@ evmap_signal_add(struct event_base *base, int sig, struct event *ev)
map, sig, sizeof(struct evmap_signal)) == -1)
return (-1);
}
GET_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init);
GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init);
if (TAILQ_EMPTY(&ctx->events)) {
if (evsel->add(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL) == -1)
@ -299,13 +392,13 @@ int
evmap_signal_del(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_map *map = &base->sigmap;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
if (sig >= map->nentries)
return (-1);
GET_SLOT(ctx, map, sig, evmap_signal);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
if (evsel->del(base, EVENT_SIGNAL(ev), 0, EV_SIGNAL) == -1)
@ -320,12 +413,12 @@ evmap_signal_del(struct event_base *base, int sig, struct event *ev)
void
evmap_signal_active(struct event_base *base, int sig, int ncalls)
{
struct event_map *map = &base->sigmap;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
struct event *ev;
assert(sig < map->nentries);
GET_SLOT(ctx, map, sig, evmap_signal);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
event_active(ev, EV_SIGNAL, ncalls);

View File

@ -34,7 +34,6 @@
* inside libevent.
**/
struct event_map;
struct event_base;
struct event;
@ -42,7 +41,8 @@ struct event;
@param ctx the map to clear.
*/
void evmap_clear(struct event_map* ctx);
void evmap_io_clear(struct event_io_map* ctx);
void evmap_signal_clear(struct event_signal_map* ctx);
/** Add an IO event (some combination of EV_READ or EV_WRITE) to an
event_base's list of events on a given file descriptor, and tell the

466
ht-internal.h Normal file
View File

@ -0,0 +1,466 @@
/* Copyright 2002 Christopher Clark */
/* Copyright 2005-9 Nick Mathewson */
/* See license at end. */
/* Based on ideas by Christopher Clark and interfaces from Niels Provos. */
#ifndef _EVENT_HT_H
#define _EVENT_HT_H
#define HT_HEAD(name, type) \
struct name { \
/* The hash table itself. */ \
struct type **hth_table; \
/* How long is the hash table? */ \
unsigned hth_table_length; \
/* How many elements does the table contain? */ \
unsigned hth_n_entries; \
/* How many elements will we allow in the table before resizing it? */ \
unsigned hth_load_limit; \
/* Position of hth_table_length in the primes table. */ \
int hth_prime_idx; \
}
#define HT_INITIALIZER() \
{ NULL, 0, 0, 0, -1 }
#define HT_ENTRY(type) \
struct { \
struct type *hte_next; \
unsigned hte_hash; \
}
#define HT_EMPTY(head) \
((head)->hth_n_entries == 0)
/* Helper: alias for the bucket containing 'elm'. */
#define _HT_BUCKET(head, field, elm) \
((head)->hth_table[elm->field.hte_hash % head->hth_table_length])
/* How many elements in 'head'? */
#define HT_SIZE(head) \
((head)->hth_n_entries)
#define HT_FIND(name, head, elm) name##_HT_FIND((head), (elm))
#define HT_INSERT(name, head, elm) name##_HT_INSERT((head), (elm))
#define HT_REPLACE(name, head, elm) name##_HT_REPLACE((head), (elm))
#define HT_REMOVE(name, head, elm) name##_HT_REMOVE((head), (elm))
#define HT_START(name, head) name##_HT_START(head)
#define HT_NEXT(name, head, elm) name##_HT_NEXT((head), (elm))
#define HT_NEXT_RMV(name, head, elm) name##_HT_NEXT_RMV((head), (elm))
#define HT_CLEAR(name, head) name##_HT_CLEAR(head)
#define HT_INIT(name, head) name##_HT_INIT(head)
/* Helper: */
static inline unsigned
ht_improve_hash(unsigned h)
{
/* Aim to protect against poor hash functions by adding logic here
* - logic taken from java 1.4 hashtable source */
h += ~(h << 9);
h ^= ((h >> 14) | (h << 18)); /* >>> */
h += (h << 4);
h ^= ((h >> 10) | (h << 22)); /* >>> */
return h;
}
#if 0
/** Basic string hash function, from Java standard String.hashCode(). */
static inline unsigned
ht_string_hash(const char *s)
{
unsigned h = 0;
int m = 1;
while (*s) {
h += ((signed char)*s++)*m;
m = (m<<5)-1; /* m *= 31 */
}
return h;
}
#endif
/** Basic string hash function, from Python's str.__hash__() */
static inline unsigned
ht_string_hash(const char *s)
{
unsigned h;
const unsigned char *cp = (const unsigned char *)s;
h = *cp << 7;
while (*cp) {
h = (1000003*h) ^ *cp++;
}
/* This conversion truncates the length of the string, but that's ok. */
h ^= (unsigned)(cp-(const unsigned char*)s);
return h;
}
#define _HT_SET_HASH(elm, field, hashfn) \
(elm)->field.hte_hash = hashfn(elm)
#define HT_FOREACH(x, name, head) \
for ((x) = HT_START(name, head); \
(x) != NULL; \
(x) = HT_NEXT(name, head, x))
#define HT_PROTOTYPE(name, type, field, hashfn, eqfn) \
int name##_HT_GROW(struct name *ht, unsigned min_capacity); \
void name##_HT_CLEAR(struct name *ht); \
int _##name##_HT_REP_IS_BAD(const struct name *ht); \
static inline void \
name##_HT_INIT(struct name *head) { \
head->hth_table_length = 0; \
head->hth_table = NULL; \
head->hth_n_entries = 0; \
head->hth_load_limit = 0; \
head->hth_prime_idx = -1; \
} \
/* Helper: returns a pointer to the right location in the table \
* 'head' to find or insert the element 'elm'. */ \
static inline struct type ** \
_##name##_HT_FIND_P(struct name *head, struct type *elm) \
{ \
struct type **p; \
if (!head->hth_table) \
return NULL; \
p = &_HT_BUCKET(head, field, elm); \
while (*p) { \
if (eqfn(*p, elm)) \
return p; \
p = &(*p)->field.hte_next; \
} \
return p; \
} \
/* Return a pointer to the element in the table 'head' matching 'elm', \
* or NULL if no such element exists */ \
static inline struct type * \
name##_HT_FIND(const struct name *head, struct type *elm) \
{ \
struct type **p; \
struct name *h = (struct name *) head; \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(h, elm); \
return p ? *p : NULL; \
} \
/* Insert the element 'elm' into the table 'head'. Do not call this \
* function if the table might already contain a matching element. */ \
static inline void \
name##_HT_INSERT(struct name *head, struct type *elm) \
{ \
struct type **p; \
if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
name##_HT_GROW(head, head->hth_n_entries+1); \
++head->hth_n_entries; \
_HT_SET_HASH(elm, field, hashfn); \
p = &_HT_BUCKET(head, field, elm); \
elm->field.hte_next = *p; \
*p = elm; \
} \
/* Insert the element 'elm' into the table 'head'. If there already \
* a matching element in the table, replace that element and return \
* it. */ \
static inline struct type * \
name##_HT_REPLACE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
name##_HT_GROW(head, head->hth_n_entries+1); \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(head, elm); \
r = *p; \
*p = elm; \
if (r && (r!=elm)) { \
elm->field.hte_next = r->field.hte_next; \
r->field.hte_next = NULL; \
return r; \
} else { \
++head->hth_n_entries; \
return NULL; \
} \
} \
/* Remove any element matching 'elm' from the table 'head'. If such \
* an element is found, return it; otherwise return NULL. */ \
static inline struct type * \
name##_HT_REMOVE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(head,elm); \
if (!p || !*p) \
return NULL; \
r = *p; \
*p = r->field.hte_next; \
r->field.hte_next = NULL; \
--head->hth_n_entries; \
return r; \
} \
/* Invoke the function 'fn' on every element of the table 'head', \
* using 'data' as its second argument. If the function returns \
* nonzero, remove the most recently examined element before invoking \
* the function again. */ \
static inline void \
name##_HT_FOREACH_FN(struct name *head, \
int (*fn)(struct type *, void *), \
void *data) \
{ \
unsigned idx; \
int remove; \
struct type **p, **nextp, *next; \
if (!head->hth_table) \
return; \
for (idx=0; idx < head->hth_table_length; ++idx) { \
p = &head->hth_table[idx]; \
while (*p) { \
nextp = &(*p)->field.hte_next; \
next = *nextp; \
remove = fn(*p, data); \
if (remove) { \
--head->hth_n_entries; \
*p = next; \
} else { \
p = nextp; \
} \
} \
} \
} \
/* Return a pointer to the first element in the table 'head', under \
* an arbitrary order. This order is stable under remove operations, \
* but not under others. If the table is empty, return NULL. */ \
static inline struct type ** \
name##_HT_START(struct name *head) \
{ \
unsigned b = 0; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
/* Return the next element in 'head' after 'elm', under the arbitrary \
* order used by HT_START. If there are no more elements, return \
* NULL. If 'elm' is to be removed from the table, you must call \
* this function for the next value before you remove it. \
*/ \
static inline struct type ** \
name##_HT_NEXT(struct name *head, struct type **elm) \
{ \
if ((*elm)->field.hte_next) { \
return &(*elm)->field.hte_next; \
} else { \
unsigned b = ((*elm)->field.hte_hash % head->hth_table_length)+1; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
} \
static inline struct type ** \
name##_HT_NEXT_RMV(struct name *head, struct type **elm) \
{ \
unsigned h = (*elm)->field.hte_hash; \
*elm = (*elm)->field.hte_next; \
--head->hth_n_entries; \
if (*elm) { \
return elm; \
} else { \
unsigned b = (h % head->hth_table_length)+1; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
}
#define HT_GENERATE(name, type, field, hashfn, eqfn, load, mallocfn, \
reallocfn, freefn) \
static unsigned name##_PRIMES[] = { \
53, 97, 193, 389, \
769, 1543, 3079, 6151, \
12289, 24593, 49157, 98317, \
196613, 393241, 786433, 1572869, \
3145739, 6291469, 12582917, 25165843, \
50331653, 100663319, 201326611, 402653189, \
805306457, 1610612741 \
}; \
static unsigned name##_N_PRIMES = \
(unsigned)(sizeof(name##_PRIMES)/sizeof(name##_PRIMES[0])); \
/* Expand the internal table of 'head' until it is large enough to \
* hold 'size' elements. Return 0 on success, -1 on allocation \
* failure. */ \
int \
name##_HT_GROW(struct name *head, unsigned size) \
{ \
unsigned new_len, new_load_limit; \
int prime_idx; \
struct type **new_table; \
if (head->hth_prime_idx == (int)name##_N_PRIMES - 1) \
return 0; \
if (head->hth_load_limit > size) \
return 0; \
prime_idx = head->hth_prime_idx; \
do { \
new_len = name##_PRIMES[++prime_idx]; \
new_load_limit = (unsigned)(load*new_len); \
} while (new_load_limit <= size && \
prime_idx < (int)name##_N_PRIMES); \
if ((new_table = mallocfn(new_len*sizeof(struct type*)))) { \
unsigned b; \
memset(new_table, 0, new_len*sizeof(struct type*)); \
for (b = 0; b < head->hth_table_length; ++b) { \
struct type *elm, *next; \
unsigned b2; \
elm = head->hth_table[b]; \
while (elm) { \
next = elm->field.hte_next; \
b2 = elm->field.hte_hash % new_len; \
elm->field.hte_next = new_table[b2]; \
new_table[b2] = elm; \
elm = next; \
} \
} \
if (head->hth_table) \
freefn(head->hth_table); \
head->hth_table = new_table; \
} else { \
unsigned b, b2; \
new_table = reallocfn(head->hth_table, new_len*sizeof(struct type*)); \
if (!new_table) return -1; \
memset(new_table + head->hth_table_length, 0, \
(new_len - head->hth_table_length)*sizeof(struct type*)); \
for (b=0; b < head->hth_table_length; ++b) { \
struct type *e, **pE; \
for (pE = &new_table[b], e = *pE; e != NULL; e = *pE) { \
b2 = e->field.hte_hash % new_len; \
if (b2 == b) { \
pE = &e->field.hte_next; \
} else { \
*pE = e->field.hte_next; \
e->field.hte_next = new_table[b2]; \
new_table[b2] = e; \
} \
} \
} \
head->hth_table = new_table; \
} \
head->hth_table_length = new_len; \
head->hth_prime_idx = prime_idx; \
head->hth_load_limit = new_load_limit; \
return 0; \
} \
/* Free all storage held by 'head'. Does not free 'head' itself, or \
* individual elements. */ \
void \
name##_HT_CLEAR(struct name *head) \
{ \
if (head->hth_table) \
freefn(head->hth_table); \
head->hth_table_length = 0; \
name##_HT_INIT(head); \
} \
/* Debugging helper: return false iff the representation of 'head' is \
* internally consistent. */ \
int \
_##name##_HT_REP_IS_BAD(const struct name *head) \
{ \
unsigned n, i; \
struct type *elm; \
if (!head->hth_table_length) { \
if (!head->hth_table && !head->hth_n_entries && \
!head->hth_load_limit && head->hth_prime_idx == -1) \
return 0; \
else \
return 1; \
} \
if (!head->hth_table || head->hth_prime_idx < 0 || \
!head->hth_load_limit) \
return 2; \
if (head->hth_n_entries > head->hth_load_limit) \
return 3; \
if (head->hth_table_length != name##_PRIMES[head->hth_prime_idx]) \
return 4; \
if (head->hth_load_limit != (unsigned)(load*head->hth_table_length)) \
return 5; \
for (n = i = 0; i < head->hth_table_length; ++i) { \
for (elm = head->hth_table[i]; elm; elm = elm->field.hte_next) { \
if (elm->field.hte_hash != hashfn(elm)) \
return 1000 + i; \
if ((elm->field.hte_hash % head->hth_table_length) != i) \
return 10000 + i; \
++n; \
} \
} \
if (n != head->hth_n_entries) \
return 6; \
return 0; \
}
/** Implements an over-optimized "find and insert if absent" block;
* not meant for direct usage by typical code, or usage outside the critical
* path.*/
#define _HT_FIND_OR_INSERT(name, field, hashfn, head, eltype, elm, var, y, n) \
{ \
struct name *_##var##_head = head; \
struct eltype **var; \
if (!_##var##_head->hth_table || \
_##var##_head->hth_n_entries >= _##var##_head->hth_load_limit) \
name##_HT_GROW(_##var##_head, _##var##_head->hth_n_entries+1); \
_HT_SET_HASH((elm), field, hashfn); \
var = _##name##_HT_FIND_P(_##var##_head, (elm)); \
if (*var) { \
y; \
} else { \
n; \
} \
}
#define _HT_FOI_INSERT(field, head, elm, newent, var) \
{ \
newent->field.hte_hash = (elm)->field.hte_hash; \
newent->field.hte_next = NULL; \
*var = newent; \
++((head)->hth_n_entries); \
}
/*
* Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
* by Cristopher Clark, retrofit to allow drop-in memory management, and to
* use the same interface as Niels Provos's HT_H. I'm not sure whether this
* is a derived work any more, but whether it is or not, the license below
* applies.
*
* Copyright (c) 2002, Christopher Clark
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the original author; nor the names of any contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#endif