dtoolbase: Use patomic<size_t> for memory usage counters

This commit is contained in:
rdb 2022-12-03 22:42:03 +01:00
parent 7c85b54ba4
commit 05b3fe2170
4 changed files with 33 additions and 33 deletions

View File

@ -18,7 +18,7 @@
INLINE void MemoryHook::
inc_heap(size_t size) {
#ifdef DO_MEMORY_USAGE
AtomicAdjust::add(_requested_heap_size, (AtomicAdjust::Integer)size);
_requested_heap_size.fetch_add(size, std::memory_order_relaxed);
#endif // DO_MEMORY_USAGE
}
@ -30,7 +30,7 @@ INLINE void MemoryHook::
dec_heap(size_t size) {
#ifdef DO_MEMORY_USAGE
// assert((int)size <= _requested_heap_size);
AtomicAdjust::add(_requested_heap_size, -(AtomicAdjust::Integer)size);
_requested_heap_size.fetch_sub(size, std::memory_order_relaxed);
#endif // DO_MEMORY_USAGE
}

View File

@ -203,10 +203,10 @@ ptr_to_alloc(void *ptr, size_t &size) {
*/
MemoryHook::
MemoryHook(const MemoryHook &copy) :
_total_heap_single_size(copy._total_heap_single_size),
_total_heap_array_size(copy._total_heap_array_size),
_requested_heap_size(copy._requested_heap_size),
_total_mmap_size(copy._total_mmap_size),
_total_heap_single_size(copy._total_heap_single_size.load(std::memory_order_relaxed)),
_total_heap_array_size(copy._total_heap_array_size.load(std::memory_order_relaxed)),
_requested_heap_size(copy._requested_heap_size.load(std::memory_order_relaxed)),
_total_mmap_size(copy._total_mmap_size.load(std::memory_order_relaxed)),
_max_heap_size(copy._max_heap_size),
_page_size(copy._page_size) {
}
@ -250,9 +250,9 @@ heap_alloc_single(size_t size) {
size = get_ptr_size(alloc);
inflated_size = size;
#endif
AtomicAdjust::add(_total_heap_single_size, (AtomicAdjust::Integer)size);
if ((size_t)AtomicAdjust::get(_total_heap_single_size) +
(size_t)AtomicAdjust::get(_total_heap_array_size) >
_total_heap_single_size.fetch_add(size, std::memory_order_relaxed);
if (_total_heap_single_size.load(std::memory_order_relaxed) +
_total_heap_array_size.load(std::memory_order_relaxed) >
_max_heap_size) {
overflow_heap_size();
}
@ -275,8 +275,8 @@ heap_free_single(void *ptr) {
void *alloc = ptr_to_alloc(ptr, size);
#ifdef DO_MEMORY_USAGE
assert((int)size <= _total_heap_single_size);
AtomicAdjust::add(_total_heap_single_size, -(AtomicAdjust::Integer)size);
assert((int)size <= _total_heap_single_size.load(std::memory_order_relaxed));
_total_heap_single_size.fetch_sub(size, std::memory_order_relaxed);
#endif // DO_MEMORY_USAGE
#ifdef MEMORY_HOOK_MALLOC_LOCK
@ -327,9 +327,9 @@ heap_alloc_array(size_t size) {
size = get_ptr_size(alloc);
inflated_size = size;
#endif
AtomicAdjust::add(_total_heap_array_size, (AtomicAdjust::Integer)size);
if ((size_t)AtomicAdjust::get(_total_heap_single_size) +
(size_t)AtomicAdjust::get(_total_heap_array_size) >
_total_heap_array_size.fetch_add(size, std::memory_order_relaxed);
if (_total_heap_single_size.load(std::memory_order_relaxed) +
_total_heap_array_size.load(std::memory_order_relaxed) >
_max_heap_size) {
overflow_heap_size();
}
@ -383,8 +383,8 @@ heap_realloc_array(void *ptr, size_t size) {
size = get_ptr_size(alloc1);
inflated_size = size;
#endif
assert((AtomicAdjust::Integer)orig_size <= _total_heap_array_size);
AtomicAdjust::add(_total_heap_array_size, (AtomicAdjust::Integer)size-(AtomicAdjust::Integer)orig_size);
assert(orig_size <= _total_heap_array_size.load(std::memory_order_relaxed));
_total_heap_array_size.fetch_add(size - orig_size, std::memory_order_relaxed);
#endif // DO_MEMORY_USAGE
// Align this to the requested boundary.
@ -424,7 +424,7 @@ heap_free_array(void *ptr) {
#ifdef DO_MEMORY_USAGE
assert((int)size <= _total_heap_array_size);
AtomicAdjust::add(_total_heap_array_size, -(AtomicAdjust::Integer)size);
_total_heap_array_size.fetch_sub(size, std::memory_order_relaxed);
#endif // DO_MEMORY_USAGE
#ifdef MEMORY_HOOK_MALLOC_LOCK
@ -489,7 +489,7 @@ mmap_alloc(size_t size, bool allow_exec) {
assert((size % _page_size) == 0);
#ifdef DO_MEMORY_USAGE
_total_mmap_size += size;
_total_mmap_size.fetch_add(size, std::memory_order_relaxed);
#endif
#ifdef _WIN32
@ -544,8 +544,8 @@ mmap_free(void *ptr, size_t size) {
assert((size % _page_size) == 0);
#ifdef DO_MEMORY_USAGE
assert((int)size <= _total_mmap_size);
_total_mmap_size -= size;
assert((int)size <= _total_mmap_size.load(std::memory_order_relaxed));
_total_mmap_size.fetch_sub(size, std::memory_order_relaxed);
#endif
#ifdef _WIN32

View File

@ -16,7 +16,7 @@
#include "dtoolbase.h"
#include "numeric_types.h"
#include "atomicAdjust.h"
#include "patomic.h"
#include "mutexImpl.h"
#include <map>
@ -66,10 +66,10 @@ public:
INLINE static size_t get_ptr_size(void *ptr);
protected:
TVOLATILE AtomicAdjust::Integer _total_heap_single_size = 0;
TVOLATILE AtomicAdjust::Integer _total_heap_array_size = 0;
TVOLATILE AtomicAdjust::Integer _requested_heap_size = 0;
TVOLATILE AtomicAdjust::Integer _total_mmap_size = 0;
patomic<size_t> _total_heap_single_size { 0u };
patomic<size_t> _total_heap_array_size { 0u };
patomic<size_t> _requested_heap_size { 0u };
patomic<size_t> _total_mmap_size { 0u };
// If the allocated heap size crosses this threshold, we call
// overflow_heap_size().

View File

@ -153,7 +153,7 @@ get_total_cpp_size() {
INLINE size_t MemoryUsage::
get_panda_heap_single_size() {
#ifdef DO_MEMORY_USAGE
return (size_t)AtomicAdjust::get(get_global_ptr()->_total_heap_single_size);
return get_global_ptr()->_total_heap_single_size.load(std::memory_order_relaxed);
#else
return 0;
#endif
@ -166,7 +166,7 @@ get_panda_heap_single_size() {
INLINE size_t MemoryUsage::
get_panda_heap_array_size() {
#ifdef DO_MEMORY_USAGE
return (size_t)AtomicAdjust::get(get_global_ptr()->_total_heap_array_size);
return get_global_ptr()->_total_heap_array_size.load(std::memory_order_relaxed);
#else
return 0;
#endif
@ -181,7 +181,7 @@ INLINE size_t MemoryUsage::
get_panda_heap_overhead() {
#if defined(DO_MEMORY_USAGE) && (defined(USE_MEMORY_DLMALLOC) || defined(USE_MEMORY_PTMALLOC2))
MemoryUsage *mu = get_global_ptr();
return (size_t)(AtomicAdjust::get(mu->_requested_heap_size) - AtomicAdjust::get(mu->_total_heap_single_size) - AtomicAdjust::get(mu->_total_heap_array_size));
return mu->_requested_heap_size.load(std::memory_order_relaxed) - mu->_total_heap_single_size.load(std::memory_order_relaxed) - mu->_total_heap_array_size.load(std::memory_order_relaxed);
#else
return 0;
#endif
@ -194,7 +194,7 @@ get_panda_heap_overhead() {
INLINE size_t MemoryUsage::
get_panda_mmap_size() {
#ifdef DO_MEMORY_USAGE
return (size_t)AtomicAdjust::get(get_global_ptr()->_total_mmap_size);
return get_global_ptr()->_total_mmap_size.load(std::memory_order_relaxed);
#else
return 0;
#endif
@ -227,7 +227,7 @@ get_external_size() {
#else
// Without alternative malloc, the Panda allocated memory is also included
// in total_size, so we have to subtract it out.
return mu->_total_size - (size_t)mu->_total_heap_single_size - (size_t)mu->_total_heap_array_size;
return mu->_total_size - mu->_total_heap_single_size.load(std::memory_order_relaxed) - mu->_total_heap_array_size.load(std::memory_order_relaxed);
#endif
} else {
return 0;
@ -246,12 +246,12 @@ get_total_size() {
#ifdef DO_MEMORY_USAGE
MemoryUsage *mu = get_global_ptr();
if (mu->_count_memory_usage) {
return mu->_total_size + (size_t)mu->_requested_heap_size;
return mu->_total_size + mu->_requested_heap_size.load(std::memory_order_relaxed);
} else {
#if defined(USE_MEMORY_DLMALLOC) || defined(USE_MEMORY_PTMALLOC2)
return (size_t)mu->_requested_heap_size;
return mu->_requested_heap_size.load(std::memory_order_relaxed);
#else
return (size_t)(AtomicAdjust::get(mu->_total_heap_single_size) + AtomicAdjust::get(mu->_total_heap_array_size));
return mu->_total_heap_single_size.load(std::memory_order_relaxed) + mu->_total_heap_array_size.load(std::memory_order_relaxed);
#endif
}
#else