vertex-data-threaded-paging, pass 1

This commit is contained in:
David Rose 2007-06-06 02:50:38 +00:00
parent 4f13ddc9ac
commit 3e5249d89a
19 changed files with 523 additions and 116 deletions

View File

@ -45,6 +45,7 @@
#include "geomVertexArrayData.h"
#include "vertexDataSaveFile.h"
#include "vertexDataBook.h"
#include "vertexDataPage.h"
#include "config_pgraph.h"
#if defined(WIN32)
@ -82,6 +83,7 @@ PStatCollector GraphicsEngine::_delete_pcollector("App:Delete");
PStatCollector GraphicsEngine::_sw_sprites_pcollector("SW Sprites");
PStatCollector GraphicsEngine::_vertex_data_small_pcollector("Vertex Data:Small");
PStatCollector GraphicsEngine::_vertex_data_independent_pcollector("Vertex Data:Independent");
PStatCollector GraphicsEngine::_vertex_data_pending_pcollector("Vertex Data:Pending");
PStatCollector GraphicsEngine::_vertex_data_resident_pcollector("Vertex Data:Resident");
PStatCollector GraphicsEngine::_vertex_data_compressed_pcollector("Vertex Data:Compressed");
PStatCollector GraphicsEngine::_vertex_data_unused_disk_pcollector("Vertex Data:Disk:Unused");
@ -501,6 +503,9 @@ remove_all_windows() {
// any) has been flushed to disk.
BamCache *cache = BamCache::get_global_ptr();
cache->flush_index();
// And, hey, let's stop the vertex paging thread, if any.
VertexDataPage::stop_thread();
}
////////////////////////////////////////////////////////////////////
@ -758,6 +763,7 @@ render_frame() {
size_t independent = GeomVertexArrayData::get_independent_lru()->get_total_size();
size_t resident = VertexDataPage::get_global_lru(VertexDataPage::RC_resident)->get_total_size();
size_t compressed = VertexDataPage::get_global_lru(VertexDataPage::RC_compressed)->get_total_size();
size_t pending = VertexDataPage::get_pending_lru()->get_total_size();
VertexDataSaveFile *save_file = VertexDataPage::get_save_file();
size_t total_disk = save_file->get_total_file_size();
@ -765,6 +771,7 @@ render_frame() {
_vertex_data_small_pcollector.set_level(small_buf);
_vertex_data_independent_pcollector.set_level(independent);
_vertex_data_pending_pcollector.set_level(pending);
_vertex_data_resident_pcollector.set_level(resident);
_vertex_data_compressed_pcollector.set_level(compressed);
_vertex_data_unused_disk_pcollector.set_level(total_disk - used_disk);

View File

@ -362,6 +362,7 @@ private:
static PStatCollector _sw_sprites_pcollector;
static PStatCollector _vertex_data_small_pcollector;
static PStatCollector _vertex_data_independent_pcollector;
static PStatCollector _vertex_data_pending_pcollector;
static PStatCollector _vertex_data_resident_pcollector;
static PStatCollector _vertex_data_compressed_pcollector;
static PStatCollector _vertex_data_used_disk_pcollector;

View File

@ -307,9 +307,16 @@ ConfigVariableBool vertex_data_allow_reread
"data to the page file, but it requires that bam files are not "
"modified during program execution. Set it false to prevent "
"this, so that the vertex data will need to be written to a "
"page file when it is evicted."));
"page file when it is evicted. Note that this is not fully "
"compatible with vertex-data-threaded-paging."));
ConfigVariableBool vertex_data_threaded_paging
("vertex-data-threaded-paging", true,
PRC_DESC("When this is true (and Panda has been compiled with thread "
"support) then a sub-thread will be spawned to evict vertex pages "
"to disk and read them back again. When this is false, this "
"work will be done in the main thread, which may introduce "
"occasional random chugs in rendering."));
ConfigureFn(config_gobj) {
BufferContext::init_type();

View File

@ -75,6 +75,7 @@ extern EXPCL_PANDA ConfigVariableFilename vertex_save_file_directory;
extern EXPCL_PANDA ConfigVariableString vertex_save_file_prefix;
extern EXPCL_PANDA ConfigVariableInt vertex_data_small_size;
extern EXPCL_PANDA ConfigVariableBool vertex_data_allow_reread;
extern EXPCL_PANDA ConfigVariableBool vertex_data_threaded_paging;
#endif

View File

@ -369,12 +369,18 @@ get_object() {
// Function: GeomVertexArrayDataHandle::get_read_pointer
// Access: Public
// Description: Returns a readable pointer to the beginning of the
// actual data stream.
// actual data stream, or NULL if the data is not
// currently resident. If the data is not currently
// resident, this will implicitly request it to become
// resident soon.
//
// If force is true, this method will never return NULL,
// but may block until the data is available.
////////////////////////////////////////////////////////////////////
INLINE const unsigned char *GeomVertexArrayDataHandle::
get_read_pointer() const {
get_read_pointer(bool force) const {
check_resident();
return _cdata->_buffer.get_read_pointer();
return _cdata->_buffer.get_read_pointer(force);
}
////////////////////////////////////////////////////////////////////
@ -448,7 +454,7 @@ get_modified() const {
INLINE string GeomVertexArrayDataHandle::
get_data() const {
check_resident();
return string((const char *)_cdata->_buffer.get_read_pointer(), _cdata->_buffer.get_size());
return string((const char *)_cdata->_buffer.get_read_pointer(true), _cdata->_buffer.get_size());
}
////////////////////////////////////////////////////////////////////
@ -464,7 +470,7 @@ get_subdata(size_t start, size_t size) const {
check_resident();
start = min(start, _cdata->_buffer.get_size());
size = min(size, _cdata->_buffer.get_size() - start);
return string((const char *)_cdata->_buffer.get_read_pointer() + start, size);
return string((const char *)_cdata->_buffer.get_read_pointer(true) + start, size);
}
////////////////////////////////////////////////////////////////////

View File

@ -43,8 +43,8 @@ ConfigVariableInt vertex_data_page_size
"This also controls the page size that is compressed and written "
"to disk when vertex data pages are evicted from memory."));
SimpleLru GeomVertexArrayData::_independent_lru(max_independent_vertex_data);
SimpleLru GeomVertexArrayData::_small_lru(max_independent_vertex_data);
SimpleLru GeomVertexArrayData::_independent_lru("independent", max_independent_vertex_data);
SimpleLru GeomVertexArrayData::_small_lru("small", max_independent_vertex_data);
VertexDataBook GeomVertexArrayData::_book(vertex_data_page_size);
@ -493,7 +493,7 @@ finalize(BamReader *manager) {
if (_endian_reversed) {
// Now is the time to endian-reverse the data.
VertexDataBuffer new_buffer(cdata->_buffer.get_size());
reverse_data_endianness(new_buffer.get_write_pointer(), cdata->_buffer.get_read_pointer(), cdata->_buffer.get_size());
reverse_data_endianness(new_buffer.get_write_pointer(), cdata->_buffer.get_read_pointer(true), cdata->_buffer.get_size());
cdata->_buffer.swap(new_buffer);
}
@ -570,12 +570,12 @@ write_datagram(BamWriter *manager, Datagram &dg, void *extra_data) const {
if (manager->get_file_endian() == BE_native) {
// For native endianness, we only have to write the data directly.
dg.append_data(_buffer.get_read_pointer(), _buffer.get_size());
dg.append_data(_buffer.get_read_pointer(true), _buffer.get_size());
} else {
// Otherwise, we have to convert it.
unsigned char *new_data = (unsigned char *)alloca(_buffer.get_size());
array_data->reverse_data_endianness(new_data, _buffer.get_read_pointer(), _buffer.get_size());
array_data->reverse_data_endianness(new_data, _buffer.get_read_pointer(true), _buffer.get_size());
dg.append_data(new_data, _buffer.get_size());
}
}
@ -628,7 +628,7 @@ fillin(DatagramIterator &scan, BamReader *manager, void *extra_data) {
// it immediately (and we should, to support threaded CData
// updates).
VertexDataBuffer new_buffer(_buffer.get_size());
array_data->reverse_data_endianness(new_buffer.get_write_pointer(), _buffer.get_read_pointer(), _buffer.get_size());
array_data->reverse_data_endianness(new_buffer.get_write_pointer(), _buffer.get_read_pointer(true), _buffer.get_size());
_buffer.swap(new_buffer);
}
}
@ -729,7 +729,7 @@ copy_data_from(const GeomVertexArrayDataHandle *other) {
_cdata->_buffer.unclean_realloc(other->_cdata->_buffer.get_size());
memcpy(_cdata->_buffer.get_write_pointer(),
other->_cdata->_buffer.get_read_pointer(),
other->_cdata->_buffer.get_read_pointer(true),
other->_cdata->_buffer.get_size());
_cdata->_modified = Geom::get_next_modified();

View File

@ -258,7 +258,7 @@ public:
INLINE const GeomVertexArrayData *get_object() const;
INLINE GeomVertexArrayData *get_object();
INLINE const unsigned char *get_read_pointer() const;
INLINE const unsigned char *get_read_pointer(bool force = true) const;
unsigned char *get_write_pointer();
PUBLISHED:

View File

@ -25,6 +25,7 @@
////////////////////////////////////////////////////////////////////
INLINE size_t SimpleLru::
get_total_size() const {
MutexHolder holder(_global_lock);
return _total_size;
}
@ -36,6 +37,7 @@ get_total_size() const {
////////////////////////////////////////////////////////////////////
INLINE size_t SimpleLru::
get_max_size() const {
MutexHolder holder(_global_lock);
return _max_size;
}
@ -49,8 +51,11 @@ get_max_size() const {
////////////////////////////////////////////////////////////////////
INLINE void SimpleLru::
set_max_size(size_t max_size) {
MutexHolder holder(_global_lock);
_max_size = max_size;
consider_evict();
if (_total_size > _max_size) {
do_evict();
}
}
////////////////////////////////////////////////////////////////////
@ -60,6 +65,7 @@ set_max_size(size_t max_size) {
////////////////////////////////////////////////////////////////////
INLINE void SimpleLru::
consider_evict() {
MutexHolder holder(_global_lock);
if (_total_size > _max_size) {
do_evict();
}
@ -121,35 +127,8 @@ operator = (const SimpleLruPage &copy) {
////////////////////////////////////////////////////////////////////
INLINE SimpleLru *SimpleLruPage::
get_lru() const {
return _lru;
}
////////////////////////////////////////////////////////////////////
// Function: SimpleLruPage::enqueue_lru
// Access: Published
// Description: Adds the page to the tail of the SimpleLru. When it
// reaches the head, it will be the next to be evicted.
////////////////////////////////////////////////////////////////////
INLINE void SimpleLruPage::
enqueue_lru(SimpleLru *lru) {
MutexHolder holder(SimpleLru::_global_lock);
if (_lru != (SimpleLru *)NULL) {
remove_from_list();
_lru->_total_size -= _lru_size;
_lru = NULL;
}
_lru = lru;
if (_lru != (SimpleLru *)NULL) {
_lru->_total_size += _lru_size;
insert_before(_lru);
}
// Let's not automatically evict pages; instead, we'll evict only on
// an explicit epoch test.
// _lru->consider_evict();
return _lru;
}
////////////////////////////////////////////////////////////////////
@ -218,6 +197,8 @@ set_lru_size(size_t lru_size) {
if (_lru != (SimpleLru *)NULL) {
_lru->_total_size -= _lru_size;
_lru->_total_size += lru_size;
_lru_size = lru_size;
} else {
_lru_size = lru_size;
}
_lru_size = lru_size;
}

View File

@ -18,7 +18,11 @@
#include "simpleLru.h"
Mutex SimpleLru::_global_lock;
// We define this as a reference to an allocated object, instead of as
// a concrete object, so that it won't get destructed when the program
// exits. (If it did, there would be an ordering issue between it and
// the various concrete SimpleLru objects which reference it.)
Mutex &SimpleLru::_global_lock = *new Mutex;
////////////////////////////////////////////////////////////////////
// Function: SimpleLru::Constructor
@ -26,7 +30,10 @@ Mutex SimpleLru::_global_lock;
// Description:
////////////////////////////////////////////////////////////////////
SimpleLru::
SimpleLru(size_t max_size) : LinkedListNode(true) {
SimpleLru(const string &name, size_t max_size) :
LinkedListNode(true),
Namable(name)
{
_total_size = 0;
_max_size = max_size;
_active_marker = new SimpleLruPage(0);
@ -53,6 +60,38 @@ SimpleLru::
#endif
}
////////////////////////////////////////////////////////////////////
// Function: SimpleLruPage::enqueue_lru
// Access: Published
// Description: Adds the page to the tail of the SimpleLru. When it
// reaches the head, it will be the next to be evicted.
////////////////////////////////////////////////////////////////////
void SimpleLruPage::
enqueue_lru(SimpleLru *lru) {
MutexHolder holder(SimpleLru::_global_lock);
if (_lru == lru) {
return;
}
if (_lru != (SimpleLru *)NULL) {
remove_from_list();
_lru->_total_size -= _lru_size;
_lru = NULL;
}
_lru = lru;
if (_lru != (SimpleLru *)NULL) {
_lru->_total_size += _lru_size;
insert_before(_lru);
}
// Let's not automatically evict pages; instead, we'll evict only on
// an explicit epoch test.
// _lru->consider_evict();
}
////////////////////////////////////////////////////////////////////
// Function: SimpleLru::count_active_size
// Access: Published
@ -77,10 +116,15 @@ count_active_size() const {
// Function: SimpleLru::do_evict
// Access: Private
// Description: Evicts pages until the LRU is within tolerance.
// Assumes the lock is already held.
////////////////////////////////////////////////////////////////////
void SimpleLru::
do_evict() {
MutexHolder holder(_global_lock);
if (_next == this) {
// Nothing in the queue.
return;
}
// Store the current end of the list. If pages re-enqueue
// themselves during this traversal, we don't want to visit them
// twice.
@ -104,6 +148,25 @@ do_evict() {
}
}
////////////////////////////////////////////////////////////////////
// Function: SimpleLru::do_validate_size
// Access: Private
// Description: Checks that _total_size is consistent. Assume the
// lock is already held.
////////////////////////////////////////////////////////////////////
bool SimpleLru::
do_validate_size() {
size_t total = 0;
LinkedListNode *node = _next;
while (node != this) {
total += ((SimpleLruPage *)node)->get_lru_size();
node = ((SimpleLruPage *)node)->_next;
}
return (total == _total_size);
}
////////////////////////////////////////////////////////////////////
// Function: SimpleLruPage::Destructor
// Access: Published, Virtual

View File

@ -21,6 +21,7 @@
#include "pandabase.h"
#include "linkedListNode.h"
#include "namable.h"
#include "pmutex.h"
#include "mutexHolder.h"
@ -30,9 +31,9 @@ class SimpleLruPage;
// Class : SimpleLru
// Description : An implementation of a very simple LRU algorithm.
////////////////////////////////////////////////////////////////////
class EXPCL_PANDA SimpleLru : public LinkedListNode {
class EXPCL_PANDA SimpleLru : public LinkedListNode, public Namable {
PUBLISHED:
SimpleLru(size_t max_size);
SimpleLru(const string &name, size_t max_size);
~SimpleLru();
INLINE size_t get_total_size() const;
@ -44,10 +45,11 @@ PUBLISHED:
INLINE void begin_epoch();
public:
static Mutex _global_lock;
static Mutex &_global_lock;
private:
void do_evict();
bool do_validate_size();
size_t _total_size;
size_t _max_size;
@ -72,7 +74,7 @@ PUBLISHED:
INLINE SimpleLru *get_lru() const;
INLINE void enqueue_lru(SimpleLru *lru);
void enqueue_lru(SimpleLru *lru);
INLINE void dequeue_lru();
INLINE void mark_used_lru() const;

View File

@ -43,12 +43,23 @@ get_page() const {
// Function: VertexDataBlock::get_pointer
// Access: Public
// Description: Returns a pointer to the start of the allocated
// memory for this buffer.
// memory for this buffer, or NULL if the data is not
// currently resident. If the data is not currently
// resident, this will implicitly request it to become
// resident soon.
//
// If force is true, this method will never return NULL,
// but may block until the data is available.
////////////////////////////////////////////////////////////////////
INLINE unsigned char *VertexDataBlock::
get_pointer() const {
get_pointer(bool force) const {
nassertr(get_page() != (VertexDataPage *)NULL, NULL);
return get_page()->get_page_data() + get_start();
unsigned char *page_data = get_page()->get_page_data(force);
if (page_data == (unsigned char *)NULL) {
return NULL;
} else {
return page_data + get_start();
}
}
////////////////////////////////////////////////////////////////////

View File

@ -41,7 +41,7 @@ PUBLISHED:
INLINE VertexDataBlock *get_next_block() const;
public:
INLINE unsigned char *get_pointer() const;
INLINE unsigned char *get_pointer(bool force) const;
friend class VertexDataPage;
};

View File

@ -65,7 +65,7 @@ operator = (const VertexDataBuffer &copy) {
MutexHolder holder(_lock);
do_unclean_realloc(copy.get_size());
memcpy(_resident_data, copy.get_read_pointer(), _size);
memcpy(_resident_data, copy.get_read_pointer(true), _size);
_source_file = copy._source_file;
_source_pos = copy._source_pos;
}
@ -83,17 +83,23 @@ INLINE VertexDataBuffer::
////////////////////////////////////////////////////////////////////
// Function: VertexDataBuffer::get_read_pointer
// Access: Public
// Description: Returns a read-only pointer to the raw data.
// Description: Returns a read-only pointer to the raw data, or NULL
// if the data is not currently resident. If the data
// is not currently resident, this will implicitly
// request it to become resident soon.
//
// If force is true, this method will never return NULL,
// but may block until the data is available.
////////////////////////////////////////////////////////////////////
INLINE const unsigned char *VertexDataBuffer::
get_read_pointer() const {
get_read_pointer(bool force) const {
MutexHolder holder(_lock);
if (_block != (VertexDataBlock *)NULL) {
// We don't necessarily need to page the buffer all the way into
// independent status; it's sufficient just to return the block's
// pointer, which will force its page to resident status.
return _block->get_pointer();
return _block->get_pointer(force);
}
if (_resident_data == (unsigned char *)NULL && !_source_file.is_null()) {
// If we need to re-read the original source, do so.

View File

@ -95,7 +95,7 @@ do_page_out(VertexDataBook &book) {
// file.
_block = book.alloc(_size);
nassertv(_block != (VertexDataBlock *)NULL);
unsigned char *pointer = _block->get_pointer();
unsigned char *pointer = _block->get_pointer(true);
nassertv(pointer != (unsigned char *)NULL);
memcpy(pointer, _resident_data, _size);
}
@ -164,6 +164,6 @@ do_page_in() {
nassertv(_resident_data != (unsigned char *)NULL);
get_class_type().inc_memory_usage(TypeHandle::MC_array, _size);
memcpy(_resident_data, _block->get_pointer(), _size);
memcpy(_resident_data, _block->get_pointer(true), _size);
_block = NULL;
}

View File

@ -68,7 +68,7 @@ public:
INLINE void operator = (const VertexDataBuffer &copy);
INLINE ~VertexDataBuffer();
INLINE const unsigned char *get_read_pointer() const;
INLINE const unsigned char *get_read_pointer(bool force) const;
INLINE unsigned char *get_write_pointer();
INLINE size_t get_size() const;

View File

@ -26,9 +26,39 @@
////////////////////////////////////////////////////////////////////
INLINE VertexDataPage::RamClass VertexDataPage::
get_ram_class() const {
MutexHolder holder(_lock);
return _ram_class;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::get_pending_ram_class
// Access: Published
// Description: Returns the pending ram class of the array. If this
// is different from get_ram_class(), this page has been
// queued to be processed by the thread. Eventually the
// page will be set to this ram class.
////////////////////////////////////////////////////////////////////
INLINE VertexDataPage::RamClass VertexDataPage::
get_pending_ram_class() const {
MutexHolder holder(_lock);
return _pending_ram_class;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::request_resident
// Access: Published
// Description: Ensures that the page will become resident soon.
// Future calls to get_page_data() will eventually
// return non-NULL.
////////////////////////////////////////////////////////////////////
INLINE void VertexDataPage::
request_resident() {
MutexHolder holder(_lock);
if (_ram_class != RC_resident) {
request_ram_class(RC_resident);
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::get_first_block
// Access: Published
@ -38,7 +68,6 @@ get_ram_class() const {
INLINE VertexDataBlock *VertexDataPage::
get_first_block() const {
MutexHolder holder(_lock);
check_resident();
return (VertexDataBlock *)SimpleAllocator::get_first_block();
}
@ -66,6 +95,18 @@ get_global_lru(RamClass rclass) {
return _global_lru[rclass];
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::get_pending_lru
// Access: Published, Static
// Description: Returns a pointer to the global LRU object that
// manages the VertexDataPage's that are pending
// processing by the thread.
////////////////////////////////////////////////////////////////////
INLINE SimpleLru *VertexDataPage::
get_pending_lru() {
return &_pending_lru;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::get_save_file
// Access: Published, Static
@ -95,56 +136,56 @@ save_to_disk() {
return do_save_to_disk();
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::restore_from_disk
// Access: Published
// Description: Restores the page from disk and makes it
// either compressed or resident (according to whether
// it was stored compressed on disk).
////////////////////////////////////////////////////////////////////
INLINE void VertexDataPage::
restore_from_disk() {
MutexHolder holder(_lock);
do_restore_from_disk();
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::get_page_data
// Access: Public
// Description: Returns a pointer to the page's data area.
// Description: Returns a pointer to the page's data area, or NULL if
// the page is not currently resident. If the page is
// not currently resident, this will implicitly request
// it to become resident soon.
//
// If force is true, this method will never return NULL,
// but may block until the page is available.
////////////////////////////////////////////////////////////////////
INLINE unsigned char *VertexDataPage::
get_page_data() const {
get_page_data(bool force) {
MutexHolder holder(_lock);
check_resident();
return _page_data;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::check_resident
// Access: Private
// Description: Forces the vertex data into system RAM, if it is not
// already there; also, marks it recently-used.
//
// Assumes the lock is already held.
////////////////////////////////////////////////////////////////////
INLINE void VertexDataPage::
check_resident() const {
if (_ram_class != RC_resident) {
((VertexDataPage *)this)->make_resident();
} else {
((VertexDataPage *)this)->mark_used_lru();
if (force) {
make_resident_now();
} else {
request_ram_class(RC_resident);
return NULL;
}
}
nassertv(_size == _uncompressed_size);
mark_used_lru();
nassertr(_size == _uncompressed_size, _page_data);
return _page_data;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::set_ram_class
// Access: Private
// Description: Puts the data in a new ram class.
// Description: Puts the data in a new ram class. Assumes the page
// lock is already held.
////////////////////////////////////////////////////////////////////
INLINE void VertexDataPage::
set_ram_class(RamClass rclass) {
_ram_class = rclass;
mark_used_lru(_global_lru[rclass]);
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::PageThread::Constructor
// Access: Public
// Description:
////////////////////////////////////////////////////////////////////
INLINE VertexDataPage::PageThread::
PageThread() :
Thread("VertexDataPage", "VertexDataPage"),
_shutdown(false),
_working_cvar(_tlock),
_pending_cvar(_tlock)
{
}

View File

@ -54,9 +54,13 @@ ConfigVariableInt max_disk_vertex_data
"that is allowed to be written to disk. Set it to -1 for no "
"limit."));
SimpleLru VertexDataPage::_resident_lru(max_resident_vertex_data);
SimpleLru VertexDataPage::_compressed_lru(max_compressed_vertex_data);
SimpleLru VertexDataPage::_disk_lru(0);
PT(VertexDataPage::PageThread) VertexDataPage::_thread;
Mutex VertexDataPage::_tlock;
SimpleLru VertexDataPage::_resident_lru("resident", max_resident_vertex_data);
SimpleLru VertexDataPage::_compressed_lru("compressed", max_compressed_vertex_data);
SimpleLru VertexDataPage::_disk_lru("disk", 0);
SimpleLru VertexDataPage::_pending_lru("pending", 0);
SimpleLru *VertexDataPage::_global_lru[RC_end_of_list] = {
&VertexDataPage::_resident_lru,
@ -86,6 +90,7 @@ VertexDataPage(size_t page_size) : SimpleAllocator(page_size), SimpleLruPage(pag
_uncompressed_size = _size;
_total_page_size += _size;
get_class_type().inc_memory_usage(TypeHandle::MC_array, (int)_size);
_pending_ram_class = RC_resident;
set_ram_class(RC_resident);
}
@ -116,7 +121,9 @@ VertexDataPage::
VertexDataBlock *VertexDataPage::
alloc(size_t size) {
MutexHolder holder(_lock);
check_resident();
if (_ram_class != RC_resident) {
make_resident_now();
}
VertexDataBlock *block = (VertexDataBlock *)SimpleAllocator::alloc(size);
@ -129,6 +136,27 @@ alloc(size_t size) {
return block;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::stop_thread
// Access: Published, Static
// Description: Call this to stop the paging thread, if it was
// started. This may block until all of the thread's
// pending tasks have been completed.
////////////////////////////////////////////////////////////////////
void VertexDataPage::
stop_thread() {
PT(PageThread) thread;
{
MutexHolder holder(_tlock);
thread = _thread;
_thread.clear();
}
if (thread != (PageThread *)NULL) {
thread->stop_thread();
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::make_block
// Access: Protected, Virtual
@ -162,38 +190,58 @@ evict_lru() {
switch (_ram_class) {
case RC_resident:
if (_compressed_lru.get_max_size() == 0) {
make_disk();
request_ram_class(RC_disk);
} else {
make_compressed();
request_ram_class(RC_compressed);
}
break;
case RC_compressed:
make_disk();
request_ram_class(RC_disk);
break;
case RC_disk:
gobj_cat.warning()
<< "Cannot evict array data from disk.\n";
break;
case RC_end_of_list:
gobj_cat.warning()
<< "Internal error: attempt to evict array data " << this
<< " in inappropriate state " << _ram_class << ".\n";
break;
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::make_resident_now
// Access: Private
// Description: Short-circuits the thread and forces the page into
// resident status immediately.
//
// Intended to be called from the main thread. Assumes
// the lock is already held.
////////////////////////////////////////////////////////////////////
void VertexDataPage::
make_resident_now() {
MutexHolder holder(_tlock);
if (_pending_ram_class != _ram_class) {
nassertv(_thread != (PageThread *)NULL);
_thread->remove_page(this);
}
make_resident();
_pending_ram_class = RC_resident;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::make_resident
// Access: Private
// Description: Moves the page to fully resident status by
// expanding it or reading it from disk as necessary.
//
// Assumes the lock is already held.
// Intended to be called from the sub-thread. Assumes
// the lock is already held.
////////////////////////////////////////////////////////////////////
void VertexDataPage::
make_resident() {
if (_ram_class == RC_resident) {
// If we're already resident, just mark the page recently used.
mark_used_lru();
return;
}
@ -321,6 +369,8 @@ make_disk() {
if (_ram_class == RC_resident || _ram_class == RC_compressed) {
if (!do_save_to_disk()) {
// Can't save it to disk for some reason.
gobj_cat.warning()
<< "Couldn't save page " << this << " to disk.\n";
mark_used_lru();
return;
}
@ -331,7 +381,7 @@ make_disk() {
delete[] _page_data;
_page_data = NULL;
_size = 0;
set_ram_class(RC_disk);
}
}
@ -419,6 +469,52 @@ do_restore_from_disk() {
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::request_ram_class
// Access: Private
// Description: Requests the thread set the page to the indicated ram
// class (if we are using threading). The page will be
// enqueued in the thread, which will eventually be
// responsible for setting the requested ram class.
//
// Assumes the page's lock is already held.
////////////////////////////////////////////////////////////////////
void VertexDataPage::
request_ram_class(RamClass ram_class) {
if (ram_class == _ram_class) {
gobj_cat.warning()
<< "Page " << this << " already has ram class " << ram_class << "\n";
return;
}
if (!vertex_data_threaded_paging || !Thread::is_threading_supported()) {
// No threads. Do it immediately.
switch (ram_class) {
case RC_resident:
make_resident();
break;
case RC_compressed:
make_compressed();
break;
case RC_disk:
make_disk();
break;
}
return;
}
MutexHolder holder(_tlock);
if (_thread == (PageThread *)NULL) {
// Allocate and start a new global thread.
_thread = new PageThread;
_thread->start(TP_low, true);
}
_thread->add_page(this, ram_class);
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::make_save_file
// Access: Private, Static
@ -433,3 +529,149 @@ make_save_file() {
_save_file = new VertexDataSaveFile(vertex_save_file_directory,
vertex_save_file_prefix, max_size);
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::PageThread::add_page
// Access: Public
// Description: Enqueues the indicated page on the thread to convert
// it to the specified ram class.
//
// It is assumed the page's lock is already held, and
// the thread's tlock is already held.
////////////////////////////////////////////////////////////////////
void VertexDataPage::PageThread::
add_page(VertexDataPage *page, RamClass ram_class) {
if (page->_pending_ram_class == ram_class) {
// It's already queued.
nassertv(page->get_lru() == &_pending_lru);
return;
}
if (page->_pending_ram_class != page->_ram_class) {
// It's already queued, but for a different ram class. Dequeue it
// so we can requeue it.
remove_page(page);
}
if (page->_pending_ram_class != ram_class) {
// First, move the page to the "pending" LRU. When it eventually
// gets its requested ram class set, it will be requeued on the
// appropriate live LRU.
page->mark_used_lru(&_pending_lru);
page->_pending_ram_class = ram_class;
if (ram_class == RC_resident) {
_pending_reads.push_back(page);
} else {
_pending_writes.push_back(page);
}
_pending_cvar.signal();
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::PageThread::remove_page
// Access: Public
// Description: Dequeues the indicated page and removes it from the
// pending task list.
//
// It is assumed the page's lock is already held, and
// the thread's tlock is already held.
////////////////////////////////////////////////////////////////////
void VertexDataPage::PageThread::
remove_page(VertexDataPage *page) {
if (page == _working_page) {
// Oops, the thread is currently working on this one. We'll have
// to wait for the thread to finish.
while (page == _working_page) {
_working_cvar.wait();
}
return;
}
if (page->_pending_ram_class == RC_resident) {
PendingPages::iterator pi =
find(_pending_reads.begin(), _pending_reads.end(), page);
nassertv(pi != _pending_reads.end());
_pending_reads.erase(pi);
} else {
PendingPages::iterator pi =
find(_pending_writes.begin(), _pending_writes.end(), page);
nassertv(pi != _pending_writes.end());
_pending_writes.erase(pi);
}
page->_pending_ram_class = page->_ram_class;
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::PageThread::thread_main
// Access: Protected, Virtual
// Description: The main processing loop for the sub-thread.
////////////////////////////////////////////////////////////////////
void VertexDataPage::PageThread::
thread_main() {
_tlock.lock();
while (true) {
PStatClient::thread_tick(get_sync_name());
while (_pending_reads.empty() && _pending_writes.empty()) {
if (_shutdown) {
_tlock.release();
return;
}
_pending_cvar.wait();
}
// Reads always have priority.
if (!_pending_reads.empty()) {
_working_page = _pending_reads.front();
_pending_reads.pop_front();
} else {
_working_page = _pending_writes.front();
_pending_writes.pop_front();
}
RamClass ram_class = _working_page->_pending_ram_class;
_tlock.release();
{
MutexHolder holder(_working_page->_lock);
switch (ram_class) {
case RC_resident:
_working_page->make_resident();
break;
case RC_compressed:
_working_page->make_compressed();
break;
case RC_disk:
_working_page->make_disk();
break;
}
}
_tlock.lock();
_working_page = NULL;
_working_cvar.signal();
}
}
////////////////////////////////////////////////////////////////////
// Function: VertexDataPage::PageThread::stop_thread
// Access: Public
// Description: Signals the thread to stop and waits for it. Does
// not return until the thread has finished.
////////////////////////////////////////////////////////////////////
void VertexDataPage::PageThread::
stop_thread() {
{
MutexHolder holder(_tlock);
_shutdown = true;
_pending_cvar.signal();
}
join();
}

View File

@ -25,7 +25,10 @@
#include "pStatCollector.h"
#include "vertexDataSaveFile.h"
#include "pmutex.h"
#include "conditionVar.h"
#include "thread.h"
#include "mutexHolder.h"
#include "pdeque.h"
class VertexDataBlock;
@ -53,27 +56,31 @@ PUBLISHED:
};
INLINE RamClass get_ram_class() const;
INLINE RamClass get_pending_ram_class() const;
INLINE void request_resident();
VertexDataBlock *alloc(size_t size);
INLINE VertexDataBlock *get_first_block() const;
INLINE static size_t get_total_page_size();
INLINE static SimpleLru *get_global_lru(RamClass rclass);
INLINE static SimpleLru *get_pending_lru();
INLINE static VertexDataSaveFile *get_save_file();
INLINE bool save_to_disk();
INLINE void restore_from_disk();
static void stop_thread();
public:
INLINE unsigned char *get_page_data() const;
INLINE unsigned char *get_page_data(bool force);
protected:
virtual SimpleAllocatorBlock *make_block(size_t start, size_t size);
virtual void evict_lru();
private:
INLINE void check_resident() const;
class PageThread;
void make_resident_now();
void make_resident();
void make_compressed();
void make_disk();
@ -81,19 +88,48 @@ private:
bool do_save_to_disk();
void do_restore_from_disk();
PageThread *get_thread();
void request_ram_class(RamClass ram_class);
INLINE void set_ram_class(RamClass ram_class);
static void make_save_file();
typedef pdeque<VertexDataPage *> PendingPages;
class PageThread : public Thread {
public:
INLINE PageThread();
void add_page(VertexDataPage *page, RamClass ram_class);
void remove_page(VertexDataPage *page);
void stop_thread();
protected:
virtual void thread_main();
private:
VertexDataPage *_working_page;
PendingPages _pending_writes;
PendingPages _pending_reads;
bool _shutdown;
ConditionVar _working_cvar;
ConditionVar _pending_cvar;
};
static PT(PageThread) _thread;
static Mutex _tlock; // Protects the thread members.
unsigned char *_page_data;
size_t _size, _uncompressed_size;
RamClass _ram_class;
PT(VertexDataSaveBlock) _saved_block;
Mutex _lock;
Mutex _lock; // Protects above members
RamClass _pending_ram_class; // Protected by _tlock.
static SimpleLru _resident_lru;
static SimpleLru _compressed_lru;
static SimpleLru _disk_lru;
static SimpleLru _pending_lru;
static SimpleLru *_global_lru[RC_end_of_list];
static size_t _total_page_size;
@ -114,6 +150,8 @@ public:
private:
static TypeHandle _type_handle;
friend class PageThread;
};
#include "vertexDataPage.I"

View File

@ -202,6 +202,7 @@ static LevelCollectorProperties level_properties[] = {
{ 1, "Vertex Data", { 1.0, 0.4, 0.0 }, "MB", 64, 1048576 },
{ 1, "Vertex Data:Small", { 0.2, 0.3, 0.4 } },
{ 1, "Vertex Data:Independent", { 0.9, 0.1, 0.9 } },
{ 1, "Vertex Data:Pending", { 0.6, 0.8, 1.0 } },
{ 1, "Vertex Data:Disk", { 0.6, 0.9, 0.1 } },
{ 1, "Vertex Data:Disk:Unused", { 0.8, 0.4, 0.5 } },
{ 1, "Vertex Data:Disk:Used", { 0.2, 0.1, 0.6 } },