mirror of
https://github.com/cuberite/libevent.git
synced 2025-09-07 19:31:01 -04:00

These are reserved in C. We'd been erroneously using them to indicate internal use. Instead, we now use a trailing underscore whenever we'd been using a leading underscore. This is an automatic conversion. The script that produced was made by running the following script over the output of git ls-tree -r --name-only HEAD | grep '\.[ch]$' | \ xargs ctags --c-kinds=defglmpstuvx -o - | grep '^_' | \ cut -f 1 | sort| uniq (GNU ctags was required.) ===== #!/usr/bin/perl -w -n use strict; BEGIN { print "#!/usr/bin/perl -w -i -p\n\n"; } chomp; next if (/^__func__/ or /^_FILE_OFFSET_BITS/ or /^_FORTIFY_SOURCE/ or /^_GNU_SOURCE/ or /^_WIN32/ or /^_DARWIN_UNLIMITED/ or /^_FILE_OFFSET_BITS/ or /^_LARGEFILE64_SOURCE/ or /^_LFS64_LARGEFILE/ or /^__cdecl/ or /^__attribute__/ or /^__func__/ or /^_SYS_TREE_H_/); my $ident = $_; my $better = $ident; $better =~ s/^_//; if ($ident !~ /EVENT_LOG_/) { $better = "${better}_"; } print "s/(?<![A-Za-z0-9_])$ident(?![A-Za-z0-9_])/$better/g;\n"; === And then running the script below that it generated over all === the .c and .h files again #!/usr/bin/perl -w -i -p s/(?<![A-Za-z0-9_])_ARC4_LOCK(?![A-Za-z0-9_])/ARC4_LOCK_/g; s/(?<![A-Za-z0-9_])_ARC4_UNLOCK(?![A-Za-z0-9_])/ARC4_UNLOCK_/g; s/(?<![A-Za-z0-9_])_bev_group_random_element(?![A-Za-z0-9_])/bev_group_random_element_/g; s/(?<![A-Za-z0-9_])_bev_group_refill_callback(?![A-Za-z0-9_])/bev_group_refill_callback_/g; s/(?<![A-Za-z0-9_])_bev_group_suspend_reading(?![A-Za-z0-9_])/bev_group_suspend_reading_/g; s/(?<![A-Za-z0-9_])_bev_group_suspend_writing(?![A-Za-z0-9_])/bev_group_suspend_writing_/g; s/(?<![A-Za-z0-9_])_bev_group_unsuspend_reading(?![A-Za-z0-9_])/bev_group_unsuspend_reading_/g; s/(?<![A-Za-z0-9_])_bev_group_unsuspend_writing(?![A-Za-z0-9_])/bev_group_unsuspend_writing_/g; s/(?<![A-Za-z0-9_])_bev_refill_callback(?![A-Za-z0-9_])/bev_refill_callback_/g; s/(?<![A-Za-z0-9_])_bufferevent_add_event(?![A-Za-z0-9_])/bufferevent_add_event_/g; s/(?<![A-Za-z0-9_])_bufferevent_cancel_all(?![A-Za-z0-9_])/bufferevent_cancel_all_/g; s/(?<![A-Za-z0-9_])_bufferevent_decref_and_unlock(?![A-Za-z0-9_])/bufferevent_decref_and_unlock_/g; s/(?<![A-Za-z0-9_])_bufferevent_decrement_read_buckets(?![A-Za-z0-9_])/bufferevent_decrement_read_buckets_/g; s/(?<![A-Za-z0-9_])_bufferevent_decrement_write_buckets(?![A-Za-z0-9_])/bufferevent_decrement_write_buckets_/g; s/(?<![A-Za-z0-9_])_bufferevent_del_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_del_generic_timeout_cbs_/g; s/(?<![A-Za-z0-9_])_bufferevent_generic_adj_timeouts(?![A-Za-z0-9_])/bufferevent_generic_adj_timeouts_/g; s/(?<![A-Za-z0-9_])_bufferevent_get_read_max(?![A-Za-z0-9_])/bufferevent_get_read_max_/g; s/(?<![A-Za-z0-9_])_bufferevent_get_rlim_max(?![A-Za-z0-9_])/bufferevent_get_rlim_max_/g; s/(?<![A-Za-z0-9_])_bufferevent_get_write_max(?![A-Za-z0-9_])/bufferevent_get_write_max_/g; s/(?<![A-Za-z0-9_])_bufferevent_incref_and_lock(?![A-Za-z0-9_])/bufferevent_incref_and_lock_/g; s/(?<![A-Za-z0-9_])_bufferevent_init_generic_timeout_cbs(?![A-Za-z0-9_])/bufferevent_init_generic_timeout_cbs_/g; s/(?<![A-Za-z0-9_])_bufferevent_ratelim_init(?![A-Za-z0-9_])/bufferevent_ratelim_init_/g; s/(?<![A-Za-z0-9_])_bufferevent_run_eventcb(?![A-Za-z0-9_])/bufferevent_run_eventcb_/g; s/(?<![A-Za-z0-9_])_bufferevent_run_readcb(?![A-Za-z0-9_])/bufferevent_run_readcb_/g; s/(?<![A-Za-z0-9_])_bufferevent_run_writecb(?![A-Za-z0-9_])/bufferevent_run_writecb_/g; s/(?<![A-Za-z0-9_])_ev(?![A-Za-z0-9_])/ev_/g; s/(?<![A-Za-z0-9_])_evbuffer_chain_pin(?![A-Za-z0-9_])/evbuffer_chain_pin_/g; s/(?<![A-Za-z0-9_])_evbuffer_chain_unpin(?![A-Za-z0-9_])/evbuffer_chain_unpin_/g; s/(?<![A-Za-z0-9_])_evbuffer_decref_and_unlock(?![A-Za-z0-9_])/evbuffer_decref_and_unlock_/g; s/(?<![A-Za-z0-9_])_evbuffer_expand_fast(?![A-Za-z0-9_])/evbuffer_expand_fast_/g; s/(?<![A-Za-z0-9_])_evbuffer_incref(?![A-Za-z0-9_])/evbuffer_incref_/g; s/(?<![A-Za-z0-9_])_evbuffer_incref_and_lock(?![A-Za-z0-9_])/evbuffer_incref_and_lock_/g; s/(?<![A-Za-z0-9_])_EVBUFFER_IOVEC_IS_NATIVE(?![A-Za-z0-9_])/EVBUFFER_IOVEC_IS_NATIVE_/g; s/(?<![A-Za-z0-9_])_evbuffer_overlapped_get_fd(?![A-Za-z0-9_])/evbuffer_overlapped_get_fd_/g; s/(?<![A-Za-z0-9_])_evbuffer_overlapped_set_fd(?![A-Za-z0-9_])/evbuffer_overlapped_set_fd_/g; s/(?<![A-Za-z0-9_])_evbuffer_read_setup_vecs(?![A-Za-z0-9_])/evbuffer_read_setup_vecs_/g; s/(?<![A-Za-z0-9_])_evbuffer_validate(?![A-Za-z0-9_])/evbuffer_validate_/g; s/(?<![A-Za-z0-9_])_evdns_log(?![A-Za-z0-9_])/evdns_log_/g; s/(?<![A-Za-z0-9_])_evdns_nameserver_add_impl(?![A-Za-z0-9_])/evdns_nameserver_add_impl_/g; s/(?<![A-Za-z0-9_])_EVENT_CONFIG_H_(?![A-Za-z0-9_])/EVENT_CONFIG_H__/g; s/(?<![A-Za-z0-9_])_event_debug_assert_is_setup(?![A-Za-z0-9_])/event_debug_assert_is_setup_/g; s/(?<![A-Za-z0-9_])_event_debug_assert_not_added(?![A-Za-z0-9_])/event_debug_assert_not_added_/g; s/(?<![A-Za-z0-9_])_event_debug_get_logging_mask(?![A-Za-z0-9_])/event_debug_get_logging_mask_/g; s/(?<![A-Za-z0-9_])_event_debug_logging_mask(?![A-Za-z0-9_])/event_debug_logging_mask_/g; s/(?<![A-Za-z0-9_])_event_debug_map_lock(?![A-Za-z0-9_])/event_debug_map_lock_/g; s/(?<![A-Za-z0-9_])_event_debug_mode_on(?![A-Za-z0-9_])/event_debug_mode_on_/g; s/(?<![A-Za-z0-9_])_event_debug_note_add(?![A-Za-z0-9_])/event_debug_note_add_/g; s/(?<![A-Za-z0-9_])_event_debug_note_del(?![A-Za-z0-9_])/event_debug_note_del_/g; s/(?<![A-Za-z0-9_])_event_debug_note_setup(?![A-Za-z0-9_])/event_debug_note_setup_/g; s/(?<![A-Za-z0-9_])_event_debug_note_teardown(?![A-Za-z0-9_])/event_debug_note_teardown_/g; s/(?<![A-Za-z0-9_])_event_debugx(?![A-Za-z0-9_])/event_debugx_/g; s/(?<![A-Za-z0-9_])_EVENT_DEFINED_LISTENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_LISTENTRY_/g; s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQENTRY(?![A-Za-z0-9_])/EVENT_DEFINED_TQENTRY_/g; s/(?<![A-Za-z0-9_])_EVENT_DEFINED_TQHEAD(?![A-Za-z0-9_])/EVENT_DEFINED_TQHEAD_/g; s/(?<![A-Za-z0-9_])_EVENT_DNS_USE_FTIME_FOR_ID(?![A-Za-z0-9_])/EVENT_DNS_USE_FTIME_FOR_ID_/g; s/(?<![A-Za-z0-9_])_EVENT_ERR_ABORT(?![A-Za-z0-9_])/EVENT_ERR_ABORT_/g; s/(?<![A-Za-z0-9_])_EVENT_EVCONFIG__PRIVATE_H(?![A-Za-z0-9_])/EVENT_EVCONFIG__PRIVATE_H_/g; s/(?<![A-Za-z0-9_])_event_iocp_port_unlock_and_free(?![A-Za-z0-9_])/event_iocp_port_unlock_and_free_/g; s/(?<![A-Za-z0-9_])_EVENT_LOG_DEBUG(?![A-Za-z0-9_])/EVENT_LOG_DEBUG/g; s/(?<![A-Za-z0-9_])_EVENT_LOG_ERR(?![A-Za-z0-9_])/EVENT_LOG_ERR/g; s/(?<![A-Za-z0-9_])_EVENT_LOG_MSG(?![A-Za-z0-9_])/EVENT_LOG_MSG/g; s/(?<![A-Za-z0-9_])_EVENT_LOG_WARN(?![A-Za-z0-9_])/EVENT_LOG_WARN/g; s/(?<![A-Za-z0-9_])_event_strlcpy(?![A-Za-z0-9_])/event_strlcpy_/g; s/(?<![A-Za-z0-9_])_EVHTTP_REQ_UNKNOWN(?![A-Za-z0-9_])/EVHTTP_REQ_UNKNOWN_/g; s/(?<![A-Za-z0-9_])_EVLOCK_SORTLOCKS(?![A-Za-z0-9_])/EVLOCK_SORTLOCKS_/g; s/(?<![A-Za-z0-9_])_evrpc_hooks(?![A-Za-z0-9_])/evrpc_hooks_/g; s/(?<![A-Za-z0-9_])_evsig_restore_handler(?![A-Za-z0-9_])/evsig_restore_handler_/g; s/(?<![A-Za-z0-9_])_evsig_set_handler(?![A-Za-z0-9_])/evsig_set_handler_/g; s/(?<![A-Za-z0-9_])_evthread_cond_fns(?![A-Za-z0-9_])/evthread_cond_fns_/g; s/(?<![A-Za-z0-9_])_evthread_debug_get_real_lock(?![A-Za-z0-9_])/evthread_debug_get_real_lock_/g; s/(?<![A-Za-z0-9_])_evthread_id_fn(?![A-Za-z0-9_])/evthread_id_fn_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_cond_alloc(?![A-Za-z0-9_])/evthreadimpl_cond_alloc_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_cond_free(?![A-Za-z0-9_])/evthreadimpl_cond_free_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_cond_signal(?![A-Za-z0-9_])/evthreadimpl_cond_signal_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_cond_wait(?![A-Za-z0-9_])/evthreadimpl_cond_wait_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_get_id(?![A-Za-z0-9_])/evthreadimpl_get_id_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_is_lock_debugging_enabled(?![A-Za-z0-9_])/evthreadimpl_is_lock_debugging_enabled_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_lock_alloc(?![A-Za-z0-9_])/evthreadimpl_lock_alloc_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_lock_free(?![A-Za-z0-9_])/evthreadimpl_lock_free_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_locking_enabled(?![A-Za-z0-9_])/evthreadimpl_locking_enabled_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_lock_lock(?![A-Za-z0-9_])/evthreadimpl_lock_lock_/g; s/(?<![A-Za-z0-9_])_evthreadimpl_lock_unlock(?![A-Za-z0-9_])/evthreadimpl_lock_unlock_/g; s/(?<![A-Za-z0-9_])_evthread_is_debug_lock_held(?![A-Za-z0-9_])/evthread_is_debug_lock_held_/g; s/(?<![A-Za-z0-9_])_evthread_lock_debugging_enabled(?![A-Za-z0-9_])/evthread_lock_debugging_enabled_/g; s/(?<![A-Za-z0-9_])_evthread_lock_fns(?![A-Za-z0-9_])/evthread_lock_fns_/g; s/(?<![A-Za-z0-9_])_EVUTIL_NIL_CONDITION(?![A-Za-z0-9_])/EVUTIL_NIL_CONDITION_/g; s/(?<![A-Za-z0-9_])_EVUTIL_NIL_STMT(?![A-Za-z0-9_])/EVUTIL_NIL_STMT_/g; s/(?<![A-Za-z0-9_])_evutil_weakrand(?![A-Za-z0-9_])/evutil_weakrand_/g; s/(?<![A-Za-z0-9_])_http_close_detection(?![A-Za-z0-9_])/http_close_detection_/g; s/(?<![A-Za-z0-9_])_http_connection_test(?![A-Za-z0-9_])/http_connection_test_/g; s/(?<![A-Za-z0-9_])_http_incomplete_test(?![A-Za-z0-9_])/http_incomplete_test_/g; s/(?<![A-Za-z0-9_])_http_stream_in_test(?![A-Za-z0-9_])/http_stream_in_test_/g; s/(?<![A-Za-z0-9_])_internal(?![A-Za-z0-9_])/internal_/g; s/(?<![A-Za-z0-9_])_mm_free_fn(?![A-Za-z0-9_])/mm_free_fn_/g; s/(?<![A-Za-z0-9_])_mm_malloc_fn(?![A-Za-z0-9_])/mm_malloc_fn_/g; s/(?<![A-Za-z0-9_])_mm_realloc_fn(?![A-Za-z0-9_])/mm_realloc_fn_/g; s/(?<![A-Za-z0-9_])_original_cond_fns(?![A-Za-z0-9_])/original_cond_fns_/g; s/(?<![A-Za-z0-9_])_original_lock_fns(?![A-Za-z0-9_])/original_lock_fns_/g; s/(?<![A-Za-z0-9_])_rpc_hook_ctx(?![A-Za-z0-9_])/rpc_hook_ctx_/g; s/(?<![A-Za-z0-9_])_SYS_QUEUE_H_(?![A-Za-z0-9_])/SYS_QUEUE_H__/g; s/(?<![A-Za-z0-9_])_t(?![A-Za-z0-9_])/t_/g; s/(?<![A-Za-z0-9_])_t32(?![A-Za-z0-9_])/t32_/g; s/(?<![A-Za-z0-9_])_test_ai_eq(?![A-Za-z0-9_])/test_ai_eq_/g; s/(?<![A-Za-z0-9_])_URI_ADD(?![A-Za-z0-9_])/URI_ADD_/g; s/(?<![A-Za-z0-9_])_URI_FREE_STR(?![A-Za-z0-9_])/URI_FREE_STR_/g; s/(?<![A-Za-z0-9_])_URI_SET_STR(?![A-Za-z0-9_])/URI_SET_STR_/g; s/(?<![A-Za-z0-9_])_warn_helper(?![A-Za-z0-9_])/warn_helper_/g;
385 lines
14 KiB
C
385 lines
14 KiB
C
/*
|
|
* Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
* derived from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
#ifndef EVTHREAD_INTERNAL_H_INCLUDED_
|
|
#define EVTHREAD_INTERNAL_H_INCLUDED_
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#include "event2/event-config.h"
|
|
#include "evconfig-private.h"
|
|
|
|
#include "event2/thread.h"
|
|
#include "util-internal.h"
|
|
|
|
struct event_base;
|
|
|
|
#ifndef _WIN32
|
|
/* On Windows, the way we currently make DLLs, it's not allowed for us to
|
|
* have shared global structures. Thus, we only do the direct-call-to-function
|
|
* code path if we know that the local shared library system supports it.
|
|
*/
|
|
#define EVTHREAD_EXPOSE_STRUCTS
|
|
#endif
|
|
|
|
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
|
|
/* Global function pointers to lock-related functions. NULL if locking isn't
|
|
enabled. */
|
|
extern struct evthread_lock_callbacks evthread_lock_fns_;
|
|
extern struct evthread_condition_callbacks evthread_cond_fns_;
|
|
extern unsigned long (*evthread_id_fn_)(void);
|
|
extern int evthread_lock_debugging_enabled_;
|
|
|
|
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
|
|
#define EVTHREAD_GET_ID() \
|
|
(evthread_id_fn_ ? evthread_id_fn_() : 1)
|
|
|
|
/** Return true iff we're in the thread that is currently (or most recently)
|
|
* running a given event_base's loop. Requires lock. */
|
|
#define EVBASE_IN_THREAD(base) \
|
|
(evthread_id_fn_ == NULL || \
|
|
(base)->th_owner_id == evthread_id_fn_())
|
|
|
|
/** Return true iff we need to notify the base's main thread about changes to
|
|
* its state, because it's currently running the main loop in another
|
|
* thread. Requires lock. */
|
|
#define EVBASE_NEED_NOTIFY(base) \
|
|
(evthread_id_fn_ != NULL && \
|
|
(base)->running_loop && \
|
|
(base)->th_owner_id != evthread_id_fn_())
|
|
|
|
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
|
|
NULL if locking is not enabled. */
|
|
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
|
|
((lockvar) = evthread_lock_fns_.alloc ? \
|
|
evthread_lock_fns_.alloc(locktype) : NULL)
|
|
|
|
/** Free a given lock, if it is present and locking is enabled. */
|
|
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
|
|
do { \
|
|
void *_lock_tmp_ = (lockvar); \
|
|
if (_lock_tmp_ && evthread_lock_fns_.free) \
|
|
evthread_lock_fns_.free(_lock_tmp_, (locktype)); \
|
|
} while (0)
|
|
|
|
/** Acquire a lock. */
|
|
#define EVLOCK_LOCK(lockvar,mode) \
|
|
do { \
|
|
if (lockvar) \
|
|
evthread_lock_fns_.lock(mode, lockvar); \
|
|
} while (0)
|
|
|
|
/** Release a lock */
|
|
#define EVLOCK_UNLOCK(lockvar,mode) \
|
|
do { \
|
|
if (lockvar) \
|
|
evthread_lock_fns_.unlock(mode, lockvar); \
|
|
} while (0)
|
|
|
|
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
|
|
#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
|
|
do { \
|
|
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
|
|
void *tmp = lockvar1; \
|
|
lockvar1 = lockvar2; \
|
|
lockvar2 = tmp; \
|
|
} \
|
|
} while (0)
|
|
|
|
/** Lock an event_base, if it is set up for locking. Acquires the lock
|
|
in the base structure whose field is named 'lockvar'. */
|
|
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
|
|
EVLOCK_LOCK((base)->lockvar, 0); \
|
|
} while (0)
|
|
|
|
/** Unlock an event_base, if it is set up for locking. */
|
|
#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
|
|
EVLOCK_UNLOCK((base)->lockvar, 0); \
|
|
} while (0)
|
|
|
|
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
|
|
* locked and held by us. */
|
|
#define EVLOCK_ASSERT_LOCKED(lock) \
|
|
do { \
|
|
if ((lock) && evthread_lock_debugging_enabled_) { \
|
|
EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
|
|
} \
|
|
} while (0)
|
|
|
|
/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
|
|
* manage to get it. */
|
|
static inline int EVLOCK_TRY_LOCK(void *lock);
|
|
static inline int
|
|
EVLOCK_TRY_LOCK(void *lock)
|
|
{
|
|
if (lock && evthread_lock_fns_.lock) {
|
|
int r = evthread_lock_fns_.lock(EVTHREAD_TRY, lock);
|
|
return !r;
|
|
} else {
|
|
/* Locking is disabled either globally or for this thing;
|
|
* of course we count as having the lock. */
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
/** Allocate a new condition variable and store it in the void *, condvar */
|
|
#define EVTHREAD_ALLOC_COND(condvar) \
|
|
do { \
|
|
(condvar) = evthread_cond_fns_.alloc_condition ? \
|
|
evthread_cond_fns_.alloc_condition(0) : NULL; \
|
|
} while (0)
|
|
/** Deallocate and free a condition variable in condvar */
|
|
#define EVTHREAD_FREE_COND(cond) \
|
|
do { \
|
|
if (cond) \
|
|
evthread_cond_fns_.free_condition((cond)); \
|
|
} while (0)
|
|
/** Signal one thread waiting on cond */
|
|
#define EVTHREAD_COND_SIGNAL(cond) \
|
|
( (cond) ? evthread_cond_fns_.signal_condition((cond), 0) : 0 )
|
|
/** Signal all threads waiting on cond */
|
|
#define EVTHREAD_COND_BROADCAST(cond) \
|
|
( (cond) ? evthread_cond_fns_.signal_condition((cond), 1) : 0 )
|
|
/** Wait until the condition 'cond' is signalled. Must be called while
|
|
* holding 'lock'. The lock will be released until the condition is
|
|
* signalled, at which point it will be acquired again. Returns 0 for
|
|
* success, -1 for failure. */
|
|
#define EVTHREAD_COND_WAIT(cond, lock) \
|
|
( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), NULL) : 0 )
|
|
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
|
|
* on timeout. */
|
|
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
|
|
( (cond) ? evthread_cond_fns_.wait_condition((cond), (lock), (tv)) : 0 )
|
|
|
|
/** True iff locking functions have been configured. */
|
|
#define EVTHREAD_LOCKING_ENABLED() \
|
|
(evthread_lock_fns_.lock != NULL)
|
|
|
|
#elif ! defined(EVENT__DISABLE_THREAD_SUPPORT)
|
|
|
|
unsigned long evthreadimpl_get_id_(void);
|
|
int evthreadimpl_is_lock_debugging_enabled_(void);
|
|
void *evthreadimpl_lock_alloc_(unsigned locktype);
|
|
void evthreadimpl_lock_free_(void *lock, unsigned locktype);
|
|
int evthreadimpl_lock_lock_(unsigned mode, void *lock);
|
|
int evthreadimpl_lock_unlock_(unsigned mode, void *lock);
|
|
void *evthreadimpl_cond_alloc_(unsigned condtype);
|
|
void evthreadimpl_cond_free_(void *cond);
|
|
int evthreadimpl_cond_signal_(void *cond, int broadcast);
|
|
int evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv);
|
|
int evthreadimpl_locking_enabled_(void);
|
|
|
|
#define EVTHREAD_GET_ID() evthreadimpl_get_id_()
|
|
#define EVBASE_IN_THREAD(base) \
|
|
((base)->th_owner_id == evthreadimpl_get_id_())
|
|
#define EVBASE_NEED_NOTIFY(base) \
|
|
((base)->running_loop && \
|
|
((base)->th_owner_id != evthreadimpl_get_id_()))
|
|
|
|
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
|
|
((lockvar) = evthreadimpl_lock_alloc_(locktype))
|
|
|
|
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
|
|
do { \
|
|
void *_lock_tmp_ = (lockvar); \
|
|
if (_lock_tmp_) \
|
|
evthreadimpl_lock_free_(_lock_tmp_, (locktype)); \
|
|
} while (0)
|
|
|
|
/** Acquire a lock. */
|
|
#define EVLOCK_LOCK(lockvar,mode) \
|
|
do { \
|
|
if (lockvar) \
|
|
evthreadimpl_lock_lock_(mode, lockvar); \
|
|
} while (0)
|
|
|
|
/** Release a lock */
|
|
#define EVLOCK_UNLOCK(lockvar,mode) \
|
|
do { \
|
|
if (lockvar) \
|
|
evthreadimpl_lock_unlock_(mode, lockvar); \
|
|
} while (0)
|
|
|
|
/** Lock an event_base, if it is set up for locking. Acquires the lock
|
|
in the base structure whose field is named 'lockvar'. */
|
|
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
|
|
EVLOCK_LOCK((base)->lockvar, 0); \
|
|
} while (0)
|
|
|
|
/** Unlock an event_base, if it is set up for locking. */
|
|
#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
|
|
EVLOCK_UNLOCK((base)->lockvar, 0); \
|
|
} while (0)
|
|
|
|
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
|
|
* locked and held by us. */
|
|
#define EVLOCK_ASSERT_LOCKED(lock) \
|
|
do { \
|
|
if ((lock) && evthreadimpl_is_lock_debugging_enabled_()) { \
|
|
EVUTIL_ASSERT(evthread_is_debug_lock_held_(lock)); \
|
|
} \
|
|
} while (0)
|
|
|
|
/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
|
|
* manage to get it. */
|
|
static inline int EVLOCK_TRY_LOCK(void *lock);
|
|
static inline int
|
|
EVLOCK_TRY_LOCK(void *lock)
|
|
{
|
|
if (lock) {
|
|
int r = evthreadimpl_lock_lock_(EVTHREAD_TRY, lock);
|
|
return !r;
|
|
} else {
|
|
/* Locking is disabled either globally or for this thing;
|
|
* of course we count as having the lock. */
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
/** Allocate a new condition variable and store it in the void *, condvar */
|
|
#define EVTHREAD_ALLOC_COND(condvar) \
|
|
do { \
|
|
(condvar) = evthreadimpl_cond_alloc_(0); \
|
|
} while (0)
|
|
/** Deallocate and free a condition variable in condvar */
|
|
#define EVTHREAD_FREE_COND(cond) \
|
|
do { \
|
|
if (cond) \
|
|
evthreadimpl_cond_free_((cond)); \
|
|
} while (0)
|
|
/** Signal one thread waiting on cond */
|
|
#define EVTHREAD_COND_SIGNAL(cond) \
|
|
( (cond) ? evthreadimpl_cond_signal_((cond), 0) : 0 )
|
|
/** Signal all threads waiting on cond */
|
|
#define EVTHREAD_COND_BROADCAST(cond) \
|
|
( (cond) ? evthreadimpl_cond_signal_((cond), 1) : 0 )
|
|
/** Wait until the condition 'cond' is signalled. Must be called while
|
|
* holding 'lock'. The lock will be released until the condition is
|
|
* signalled, at which point it will be acquired again. Returns 0 for
|
|
* success, -1 for failure. */
|
|
#define EVTHREAD_COND_WAIT(cond, lock) \
|
|
( (cond) ? evthreadimpl_cond_wait_((cond), (lock), NULL) : 0 )
|
|
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
|
|
* on timeout. */
|
|
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
|
|
( (cond) ? evthreadimpl_cond_wait_((cond), (lock), (tv)) : 0 )
|
|
|
|
#define EVTHREAD_LOCKING_ENABLED() \
|
|
(evthreadimpl_locking_enabled_())
|
|
|
|
#else /* EVENT__DISABLE_THREAD_SUPPORT */
|
|
|
|
#define EVTHREAD_GET_ID() 1
|
|
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_FREE_LOCK(lockvar, locktype) EVUTIL_NIL_STMT_
|
|
|
|
#define EVLOCK_LOCK(lockvar, mode) EVUTIL_NIL_STMT_
|
|
#define EVLOCK_UNLOCK(lockvar, mode) EVUTIL_NIL_STMT_
|
|
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
|
|
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) EVUTIL_NIL_STMT_
|
|
|
|
#define EVBASE_IN_THREAD(base) 1
|
|
#define EVBASE_NEED_NOTIFY(base) 0
|
|
#define EVBASE_ACQUIRE_LOCK(base, lock) EVUTIL_NIL_STMT_
|
|
#define EVBASE_RELEASE_LOCK(base, lock) EVUTIL_NIL_STMT_
|
|
#define EVLOCK_ASSERT_LOCKED(lock) EVUTIL_NIL_STMT_
|
|
|
|
#define EVLOCK_TRY_LOCK(lock) 1
|
|
|
|
#define EVTHREAD_ALLOC_COND(condvar) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_FREE_COND(cond) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_COND_SIGNAL(cond) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_COND_BROADCAST(cond) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_COND_WAIT(cond, lock) EVUTIL_NIL_STMT_
|
|
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) EVUTIL_NIL_STMT_
|
|
|
|
#define EVTHREAD_LOCKING_ENABLED() 0
|
|
|
|
#endif
|
|
|
|
/* This code is shared between both lock impls */
|
|
#if ! defined(EVENT__DISABLE_THREAD_SUPPORT)
|
|
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
|
|
#define EVLOCK_SORTLOCKS_(lockvar1, lockvar2) \
|
|
do { \
|
|
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
|
|
void *tmp = lockvar1; \
|
|
lockvar1 = lockvar2; \
|
|
lockvar2 = tmp; \
|
|
} \
|
|
} while (0)
|
|
|
|
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
|
|
* so that two threads locking two locks with LOCK2 will not deadlock. */
|
|
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
|
|
do { \
|
|
void *_lock1_tmplock = (lock1); \
|
|
void *_lock2_tmplock = (lock2); \
|
|
EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
|
|
EVLOCK_LOCK(_lock1_tmplock,mode1); \
|
|
if (_lock2_tmplock != _lock1_tmplock) \
|
|
EVLOCK_LOCK(_lock2_tmplock,mode2); \
|
|
} while (0)
|
|
/** Release both lock1 and lock2. */
|
|
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
|
|
do { \
|
|
void *_lock1_tmplock = (lock1); \
|
|
void *_lock2_tmplock = (lock2); \
|
|
EVLOCK_SORTLOCKS_(_lock1_tmplock,_lock2_tmplock); \
|
|
if (_lock2_tmplock != _lock1_tmplock) \
|
|
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
|
|
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
|
|
} while (0)
|
|
|
|
int evthread_is_debug_lock_held_(void *lock);
|
|
void *evthread_debug_get_real_lock_(void *lock);
|
|
|
|
void *evthread_setup_global_lock_(void *lock_, unsigned locktype,
|
|
int enable_locks);
|
|
|
|
#define EVTHREAD_SETUP_GLOBAL_LOCK(lockvar, locktype) \
|
|
do { \
|
|
lockvar = evthread_setup_global_lock_(lockvar, \
|
|
(locktype), enable_locks); \
|
|
if (!lockvar) { \
|
|
event_warn("Couldn't allocate %s", #lockvar); \
|
|
return -1; \
|
|
} \
|
|
} while (0);
|
|
|
|
int event_global_setup_locks_(const int enable_locks);
|
|
int evsig_global_setup_locks_(const int enable_locks);
|
|
int evutil_secure_rng_global_setup_locks_(const int enable_locks);
|
|
|
|
#endif
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* EVTHREAD_INTERNAL_H_INCLUDED_ */
|