mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-13 14:26:30 -04:00
Merge remote-tracking branch 'azat/be-pair-fix-freeing-shared-lock-v5'
This commit is contained in:
commit
a77a82a03f
@ -45,6 +45,8 @@
|
||||
struct bufferevent_pair {
|
||||
struct bufferevent_private bev;
|
||||
struct bufferevent_pair *partner;
|
||||
/* For ->destruct() lock checking */
|
||||
struct bufferevent_pair *unlinked_partner;
|
||||
};
|
||||
|
||||
|
||||
@ -265,11 +267,40 @@ be_pair_unlink(struct bufferevent *bev)
|
||||
struct bufferevent_pair *bev_p = upcast(bev);
|
||||
|
||||
if (bev_p->partner) {
|
||||
bev_p->unlinked_partner = bev_p->partner;
|
||||
bev_p->partner->partner = NULL;
|
||||
bev_p->partner = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Free *shared* lock in the latest be (since we share it between two of them). */
|
||||
static void
|
||||
be_pair_destruct(struct bufferevent *bev)
|
||||
{
|
||||
struct bufferevent_pair *bev_p = upcast(bev);
|
||||
|
||||
/* Transfer ownership of the lock into partner, otherwise we will use
|
||||
* already free'd lock during freeing second bev, see next example:
|
||||
*
|
||||
* bev1->own_lock = 1
|
||||
* bev2->own_lock = 0
|
||||
* bev2->lock = bev1->lock
|
||||
*
|
||||
* bufferevent_free(bev1) # refcnt == 0 -> unlink
|
||||
* bufferevent_free(bev2) # refcnt == 0 -> unlink
|
||||
*
|
||||
* event_base_free() -> finilizers -> EVTHREAD_FREE_LOCK(bev1->lock)
|
||||
* -> BEV_LOCK(bev2->lock) <-- already freed
|
||||
*
|
||||
* Where bev1 == pair[0], bev2 == pair[1].
|
||||
*/
|
||||
if (bev_p->unlinked_partner && bev_p->bev.own_lock) {
|
||||
bev_p->unlinked_partner->bev.own_lock = 1;
|
||||
bev_p->bev.own_lock = 0;
|
||||
}
|
||||
bev_p->unlinked_partner = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
be_pair_flush(struct bufferevent *bev, short iotype,
|
||||
enum bufferevent_flush_mode mode)
|
||||
@ -320,7 +351,7 @@ const struct bufferevent_ops bufferevent_ops_pair = {
|
||||
be_pair_enable,
|
||||
be_pair_disable,
|
||||
be_pair_unlink,
|
||||
NULL, /* be_pair_destruct, */
|
||||
be_pair_destruct,
|
||||
bufferevent_generic_adj_timeouts_,
|
||||
be_pair_flush,
|
||||
NULL, /* ctrl */
|
||||
|
1
event.c
1
event.c
@ -3765,6 +3765,7 @@ event_free_debug_globals_locks(void)
|
||||
if (event_debug_map_lock_ != NULL) {
|
||||
EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
|
||||
event_debug_map_lock_ = NULL;
|
||||
evthreadimpl_disable_lock_debugging_();
|
||||
}
|
||||
#endif /* EVENT__DISABLE_DEBUG_MODE */
|
||||
#endif /* EVENT__DISABLE_THREAD_SUPPORT */
|
||||
|
@ -376,6 +376,13 @@ int evsig_global_setup_locks_(const int enable_locks);
|
||||
int evutil_global_setup_locks_(const int enable_locks);
|
||||
int evutil_secure_rng_global_setup_locks_(const int enable_locks);
|
||||
|
||||
/** Return current evthread_lock_callbacks */
|
||||
struct evthread_lock_callbacks *evthread_get_lock_callbacks();
|
||||
/** Return current evthread_condition_callbacks */
|
||||
struct evthread_condition_callbacks *evthread_get_condition_callbacks();
|
||||
/** Disable locking for internal usage (like global shutdown) */
|
||||
void evthreadimpl_disable_lock_debugging_(void);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
23
evthread.c
23
evthread.c
@ -69,12 +69,25 @@ evthread_set_id_callback(unsigned long (*id_fn)(void))
|
||||
evthread_id_fn_ = id_fn;
|
||||
}
|
||||
|
||||
struct evthread_lock_callbacks *evthread_get_lock_callbacks()
|
||||
{
|
||||
return evthread_lock_debugging_enabled_
|
||||
? &original_lock_fns_ : &evthread_lock_fns_;
|
||||
}
|
||||
struct evthread_condition_callbacks *evthread_get_condition_callbacks()
|
||||
{
|
||||
return evthread_lock_debugging_enabled_
|
||||
? &original_cond_fns_ : &evthread_cond_fns_;
|
||||
}
|
||||
void evthreadimpl_disable_lock_debugging_(void)
|
||||
{
|
||||
evthread_lock_debugging_enabled_ = 0;
|
||||
}
|
||||
|
||||
int
|
||||
evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
|
||||
{
|
||||
struct evthread_lock_callbacks *target =
|
||||
evthread_lock_debugging_enabled_
|
||||
? &original_lock_fns_ : &evthread_lock_fns_;
|
||||
struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
|
||||
|
||||
if (!cbs) {
|
||||
if (target->alloc)
|
||||
@ -109,9 +122,7 @@ evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
|
||||
int
|
||||
evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
|
||||
{
|
||||
struct evthread_condition_callbacks *target =
|
||||
evthread_lock_debugging_enabled_
|
||||
? &original_cond_fns_ : &evthread_cond_fns_;
|
||||
struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
|
||||
|
||||
if (!cbs) {
|
||||
if (target->alloc_condition)
|
||||
|
@ -75,6 +75,7 @@
|
||||
#include "event2/util.h"
|
||||
|
||||
#include "bufferevent-internal.h"
|
||||
#include "evthread-internal.h"
|
||||
#include "util-internal.h"
|
||||
#ifdef _WIN32
|
||||
#include "iocp-internal.h"
|
||||
@ -173,9 +174,9 @@ test_bufferevent_impl(int use_pair)
|
||||
|
||||
event_dispatch();
|
||||
|
||||
bufferevent_free(bev1);
|
||||
tt_ptr_op(bufferevent_pair_get_partner(bev2), ==, NULL);
|
||||
bufferevent_free(bev2);
|
||||
tt_ptr_op(bufferevent_pair_get_partner(bev1), ==, NULL);
|
||||
bufferevent_free(bev1);
|
||||
|
||||
if (test_ok != 2)
|
||||
test_ok = 0;
|
||||
@ -195,6 +196,143 @@ test_bufferevent_pair(void)
|
||||
test_bufferevent_impl(1);
|
||||
}
|
||||
|
||||
#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED)
|
||||
/**
|
||||
* Trace lock/unlock/alloc/free for locks.
|
||||
* (More heavier then evthread_debug*)
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
void *lock;
|
||||
enum {
|
||||
ALLOC, FREE,
|
||||
} status;
|
||||
size_t locked /** allow recursive locking */;
|
||||
} lock_wrapper;
|
||||
struct lock_unlock_base
|
||||
{
|
||||
/* Original callbacks */
|
||||
struct evthread_lock_callbacks cbs;
|
||||
/* Map of locks */
|
||||
lock_wrapper *locks;
|
||||
size_t nr_locks;
|
||||
} lu_base = {
|
||||
.locks = NULL,
|
||||
};
|
||||
|
||||
static lock_wrapper *lu_find(void *lock_)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < lu_base.nr_locks; ++i) {
|
||||
lock_wrapper *lock = &lu_base.locks[i];
|
||||
if (lock->lock == lock_)
|
||||
return lock;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *trace_lock_alloc(unsigned locktype)
|
||||
{
|
||||
++lu_base.nr_locks;
|
||||
lu_base.locks = realloc(lu_base.locks,
|
||||
sizeof(lock_wrapper) * lu_base.nr_locks);
|
||||
void *lock = lu_base.cbs.alloc(locktype);
|
||||
lu_base.locks[lu_base.nr_locks - 1] = (lock_wrapper){ lock, ALLOC, 0 };
|
||||
return lock;
|
||||
}
|
||||
static void trace_lock_free(void *lock_, unsigned locktype)
|
||||
{
|
||||
lock_wrapper *lock = lu_find(lock_);
|
||||
if (!lock || lock->status == FREE || lock->locked) {
|
||||
__asm__("int3");
|
||||
TT_FAIL(("lock: free error"));
|
||||
} else {
|
||||
lock->status = FREE;
|
||||
lu_base.cbs.free(lock_, locktype);
|
||||
}
|
||||
}
|
||||
static int trace_lock_lock(unsigned mode, void *lock_)
|
||||
{
|
||||
lock_wrapper *lock = lu_find(lock_);
|
||||
if (!lock || lock->status == FREE) {
|
||||
TT_FAIL(("lock: lock error"));
|
||||
return -1;
|
||||
} else {
|
||||
++lock->locked;
|
||||
return lu_base.cbs.lock(mode, lock_);
|
||||
}
|
||||
}
|
||||
static int trace_lock_unlock(unsigned mode, void *lock_)
|
||||
{
|
||||
lock_wrapper *lock = lu_find(lock_);
|
||||
if (!lock || lock->status == FREE || !lock->locked) {
|
||||
TT_FAIL(("lock: unlock error"));
|
||||
return -1;
|
||||
} else {
|
||||
--lock->locked;
|
||||
return lu_base.cbs.unlock(mode, lock_);
|
||||
}
|
||||
}
|
||||
static void lock_unlock_free_thread_cbs()
|
||||
{
|
||||
event_base_free(NULL);
|
||||
|
||||
/** drop immutable flag */
|
||||
evthread_set_lock_callbacks(NULL);
|
||||
/** avoid calling of event_global_setup_locks_() for new cbs */
|
||||
libevent_global_shutdown();
|
||||
/** drop immutable flag for non-debug ops (since called after shutdown) */
|
||||
evthread_set_lock_callbacks(NULL);
|
||||
}
|
||||
|
||||
static int use_lock_unlock_profiler(void)
|
||||
{
|
||||
struct evthread_lock_callbacks cbs = {
|
||||
EVTHREAD_LOCK_API_VERSION,
|
||||
EVTHREAD_LOCKTYPE_RECURSIVE,
|
||||
trace_lock_alloc,
|
||||
trace_lock_free,
|
||||
trace_lock_lock,
|
||||
trace_lock_unlock,
|
||||
};
|
||||
memcpy(&lu_base.cbs, evthread_get_lock_callbacks(),
|
||||
sizeof(lu_base.cbs));
|
||||
{
|
||||
lock_unlock_free_thread_cbs();
|
||||
|
||||
evthread_set_lock_callbacks(&cbs);
|
||||
/** re-create debug locks correctly */
|
||||
evthread_enable_lock_debugging();
|
||||
|
||||
event_init();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static void free_lock_unlock_profiler(struct basic_test_data *data)
|
||||
{
|
||||
lock_unlock_free_thread_cbs();
|
||||
free(lu_base.locks);
|
||||
data->base = NULL;
|
||||
}
|
||||
|
||||
static void test_bufferevent_pair_release_lock(void *arg)
|
||||
{
|
||||
struct basic_test_data *data = arg;
|
||||
use_lock_unlock_profiler();
|
||||
{
|
||||
struct bufferevent *pair[2];
|
||||
if (!bufferevent_pair_new(NULL, BEV_OPT_THREADSAFE, pair)) {
|
||||
bufferevent_free(pair[0]);
|
||||
bufferevent_free(pair[1]);
|
||||
} else
|
||||
tt_abort_perror("bufferevent_pair_new");
|
||||
}
|
||||
free_lock_unlock_profiler(data);
|
||||
end:
|
||||
;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* test watermarks and bufferevent
|
||||
*/
|
||||
@ -949,6 +1087,11 @@ struct testcase_t bufferevent_testcases[] = {
|
||||
|
||||
LEGACY(bufferevent, TT_ISOLATED),
|
||||
LEGACY(bufferevent_pair, TT_ISOLATED),
|
||||
#if defined(EVTHREAD_USE_PTHREADS_IMPLEMENTED)
|
||||
{ "bufferevent_pair_release_lock", test_bufferevent_pair_release_lock,
|
||||
TT_FORK|TT_ISOLATED|TT_NEED_THREADS|TT_NEED_BASE|TT_LEGACY,
|
||||
&basic_setup, NULL },
|
||||
#endif
|
||||
LEGACY(bufferevent_watermarks, TT_ISOLATED),
|
||||
LEGACY(bufferevent_pair_watermarks, TT_ISOLATED),
|
||||
LEGACY(bufferevent_filters, TT_ISOLATED),
|
||||
|
Loading…
x
Reference in New Issue
Block a user