mirror of
https://github.com/cuberite/libevent.git
synced 2025-08-04 01:36:23 -04:00
On windows, make lock/thread function tables static
This requires us to have a separate implementation of the lock macros that indirects to a set of functions. Fortunately, this isn't too hard to do. This may be a fix for bug 3042969, where our openssl dll and our libevent dll each got their own version of the thread stuff.
This commit is contained in:
parent
5218d2a8b1
commit
5de2bcb70f
@ -36,7 +36,15 @@ extern "C" {
|
||||
|
||||
struct event_base;
|
||||
|
||||
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
|
||||
#ifndef WIN32
|
||||
/* On Windows, the way we currently make DLLs, it's not allowed for us to
|
||||
* have shared global structures. Thus, we only do the direct-call-to-function
|
||||
* code path if we know that the local shared library system supports it.
|
||||
*/
|
||||
#define EVTHREAD_EXPOSE_STRUCTS
|
||||
#endif
|
||||
|
||||
#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
|
||||
/* Global function pointers to lock-related functions. NULL if locking isn't
|
||||
enabled. */
|
||||
extern struct evthread_lock_callbacks _evthread_lock_fns;
|
||||
@ -92,30 +100,6 @@ extern int _evthread_lock_debugging_enabled;
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
|
||||
* so that two threads locking two locks with LOCK2 will not deadlock. */
|
||||
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
void *_lock2_tmplock = (lock2); \
|
||||
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
|
||||
EVLOCK_LOCK(_lock1_tmplock,mode1); \
|
||||
if (_lock2_tmplock != _lock1_tmplock) \
|
||||
EVLOCK_LOCK(_lock2_tmplock,mode2); \
|
||||
} while (0)
|
||||
|
||||
/** Release both lock1 and lock2. */
|
||||
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
void *_lock2_tmplock = (lock2); \
|
||||
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
|
||||
if (_lock2_tmplock != _lock1_tmplock) \
|
||||
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
|
||||
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/** Lock an event_base, if it is set up for locking. Acquires the lock
|
||||
in the base structure whose field is named 'lockvar'. */
|
||||
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
|
||||
@ -127,9 +111,6 @@ extern int _evthread_lock_debugging_enabled;
|
||||
EVLOCK_UNLOCK((base)->lockvar, 0); \
|
||||
} while (0)
|
||||
|
||||
int _evthread_is_debug_lock_held(void *lock);
|
||||
void *_evthread_debug_get_real_lock(void *lock);
|
||||
|
||||
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
|
||||
* locked and held by us. */
|
||||
#define EVLOCK_ASSERT_LOCKED(lock) \
|
||||
@ -184,6 +165,110 @@ EVLOCK_TRY_LOCK(void *lock)
|
||||
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
|
||||
( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
|
||||
|
||||
#elif ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
|
||||
|
||||
unsigned long _evthreadimpl_get_id(void);
|
||||
int _evthreadimpl_is_lock_debugging_enabled(void);
|
||||
void *_evthreadimpl_lock_alloc(unsigned locktype);
|
||||
void _evthreadimpl_lock_free(void *lock, unsigned locktype);
|
||||
int _evthreadimpl_lock_lock(unsigned mode, void *lock);
|
||||
int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
|
||||
void *_evthreadimpl_cond_alloc(unsigned condtype);
|
||||
void _evthreadimpl_cond_free(void *cond);
|
||||
int _evthreadimpl_cond_signal(void *cond, int broadcast);
|
||||
int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
|
||||
|
||||
#define EVTHREAD_GET_ID() _evthreadimpl_get_id()
|
||||
#define EVBASE_IN_THREAD(base) \
|
||||
((base)->th_owner_id == _evthreadimpl_get_id())
|
||||
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
|
||||
((lockvar) = _evthreadimpl_lock_alloc(locktype))
|
||||
|
||||
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
|
||||
do { \
|
||||
void *_lock_tmp_ = (lockvar); \
|
||||
if (_lock_tmp_) \
|
||||
_evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
|
||||
} while (0)
|
||||
|
||||
/** Acquire a lock. */
|
||||
#define EVLOCK_LOCK(lockvar,mode) \
|
||||
do { \
|
||||
if (lockvar) \
|
||||
_evthreadimpl_lock_lock(mode, lockvar); \
|
||||
} while (0)
|
||||
|
||||
/** Release a lock */
|
||||
#define EVLOCK_UNLOCK(lockvar,mode) \
|
||||
do { \
|
||||
if (lockvar) \
|
||||
_evthreadimpl_lock_unlock(mode, lockvar); \
|
||||
} while (0)
|
||||
|
||||
/** Lock an event_base, if it is set up for locking. Acquires the lock
|
||||
in the base structure whose field is named 'lockvar'. */
|
||||
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
|
||||
EVLOCK_LOCK((base)->lockvar, 0); \
|
||||
} while (0)
|
||||
|
||||
/** Unlock an event_base, if it is set up for locking. */
|
||||
#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
|
||||
EVLOCK_UNLOCK((base)->lockvar, 0); \
|
||||
} while (0)
|
||||
|
||||
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
|
||||
* locked and held by us. */
|
||||
#define EVLOCK_ASSERT_LOCKED(lock) \
|
||||
do { \
|
||||
if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
|
||||
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
|
||||
* manage to get it. */
|
||||
static inline int EVLOCK_TRY_LOCK(void *lock);
|
||||
static inline int
|
||||
EVLOCK_TRY_LOCK(void *lock)
|
||||
{
|
||||
if (lock) {
|
||||
int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
|
||||
return !r;
|
||||
} else {
|
||||
/* Locking is disabled either globally or for this thing;
|
||||
* of course we count as having the lock. */
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/** Allocate a new condition variable and store it in the void *, condvar */
|
||||
#define EVTHREAD_ALLOC_COND(condvar) \
|
||||
do { \
|
||||
(condvar) = _evthreadimpl_cond_alloc(0); \
|
||||
} while (0)
|
||||
/** Deallocate and free a condition variable in condvar */
|
||||
#define EVTHREAD_FREE_COND(cond) \
|
||||
do { \
|
||||
if (cond) \
|
||||
_evthreadimpl_cond_free((cond)); \
|
||||
} while (0)
|
||||
/** Signal one thread waiting on cond */
|
||||
#define EVTHREAD_COND_SIGNAL(cond) \
|
||||
( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
|
||||
/** Signal all threads waiting on cond */
|
||||
#define EVTHREAD_COND_BROADCAST(cond) \
|
||||
( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
|
||||
/** Wait until the condition 'cond' is signalled. Must be called while
|
||||
* holding 'lock'. The lock will be released until the condition is
|
||||
* signalled, at which point it will be acquired again. Returns 0 for
|
||||
* success, -1 for failure. */
|
||||
#define EVTHREAD_COND_WAIT(cond, lock) \
|
||||
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
|
||||
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
|
||||
* on timeout. */
|
||||
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
|
||||
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
|
||||
|
||||
#else /* _EVENT_DISABLE_THREAD_SUPPORT */
|
||||
|
||||
#define EVTHREAD_GET_ID() 1
|
||||
@ -211,6 +296,44 @@ EVLOCK_TRY_LOCK(void *lock)
|
||||
|
||||
#endif
|
||||
|
||||
/* This code is shared between both lock impls */
|
||||
#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
|
||||
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
|
||||
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
|
||||
do { \
|
||||
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
|
||||
void *tmp = lockvar1; \
|
||||
lockvar1 = lockvar2; \
|
||||
lockvar2 = tmp; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
|
||||
* so that two threads locking two locks with LOCK2 will not deadlock. */
|
||||
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
void *_lock2_tmplock = (lock2); \
|
||||
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
|
||||
EVLOCK_LOCK(_lock1_tmplock,mode1); \
|
||||
if (_lock2_tmplock != _lock1_tmplock) \
|
||||
EVLOCK_LOCK(_lock2_tmplock,mode2); \
|
||||
} while (0)
|
||||
/** Release both lock1 and lock2. */
|
||||
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
|
||||
do { \
|
||||
void *_lock1_tmplock = (lock1); \
|
||||
void *_lock2_tmplock = (lock2); \
|
||||
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
|
||||
if (_lock2_tmplock != _lock1_tmplock) \
|
||||
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
|
||||
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
|
||||
} while (0)
|
||||
|
||||
int _evthread_is_debug_lock_held(void *lock);
|
||||
void *_evthread_debug_get_real_lock(void *lock);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
83
evthread.c
83
evthread.c
@ -38,13 +38,19 @@
|
||||
#include "util-internal.h"
|
||||
#include "evthread-internal.h"
|
||||
|
||||
#ifdef EVTHREAD_EXPOSE_STRUCTS
|
||||
#define GLOBAL
|
||||
#else
|
||||
#define GLOBAL static
|
||||
#endif
|
||||
|
||||
/* globals */
|
||||
int _evthread_lock_debugging_enabled = 0;
|
||||
struct evthread_lock_callbacks _evthread_lock_fns = {
|
||||
GLOBAL int _evthread_lock_debugging_enabled = 0;
|
||||
GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
|
||||
0, 0, NULL, NULL, NULL, NULL
|
||||
};
|
||||
unsigned long (*_evthread_id_fn)(void) = NULL;
|
||||
struct evthread_condition_callbacks _evthread_cond_fns = {
|
||||
GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
|
||||
GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
|
||||
0, NULL, NULL, NULL, NULL
|
||||
};
|
||||
|
||||
@ -263,4 +269,73 @@ _evthread_debug_get_real_lock(void *lock_)
|
||||
return lock->lock;
|
||||
}
|
||||
|
||||
#ifndef EVTHREAD_EXPOSE_STRUCTS
|
||||
unsigned long
|
||||
_evthreadimpl_get_id()
|
||||
{
|
||||
return _evthread_id_fn ? _evthread_id_fn() : 1;
|
||||
}
|
||||
void *
|
||||
_evthreadimpl_lock_alloc(unsigned locktype)
|
||||
{
|
||||
return _evthread_lock_fns.alloc ?
|
||||
_evthread_lock_fns.alloc(locktype) : NULL;
|
||||
}
|
||||
void
|
||||
_evthreadimpl_lock_free(void *lock, unsigned locktype)
|
||||
{
|
||||
if (_evthread_lock_fns.free)
|
||||
_evthread_lock_fns.free(lock, locktype);
|
||||
}
|
||||
int
|
||||
_evthreadimpl_lock_lock(unsigned mode, void *lock)
|
||||
{
|
||||
if (_evthread_lock_fns.lock)
|
||||
return _evthread_lock_fns.lock(mode, lock);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
int
|
||||
_evthreadimpl_lock_unlock(unsigned mode, void *lock)
|
||||
{
|
||||
if (_evthread_lock_fns.unlock)
|
||||
return _evthread_lock_fns.unlock(mode, lock);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
void *
|
||||
_evthreadimpl_cond_alloc(unsigned condtype)
|
||||
{
|
||||
return _evthread_cond_fns.alloc_condition ?
|
||||
_evthread_cond_fns.alloc_condition(condtype) : NULL;
|
||||
}
|
||||
void
|
||||
_evthreadimpl_cond_free(void *cond)
|
||||
{
|
||||
if (_evthread_cond_fns.free_condition)
|
||||
_evthread_cond_fns.free_condition(cond);
|
||||
}
|
||||
int
|
||||
_evthreadimpl_cond_signal(void *cond, int broadcast)
|
||||
{
|
||||
if (_evthread_cond_fns.signal_condition)
|
||||
return _evthread_cond_fns.signal_condition(cond, broadcast);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
int
|
||||
_evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
|
||||
{
|
||||
if (_evthread_cond_fns.wait_condition)
|
||||
return _evthread_cond_fns.wait_condition(cond, lock, tv);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
int
|
||||
_evthreadimpl_is_lock_debugging_enabled(void)
|
||||
{
|
||||
return _evthread_lock_debugging_enabled;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user