diff --git a/panda/src/pgraph/loader.cxx b/panda/src/pgraph/loader.cxx index f791b3fc32..c89b2c1955 100644 --- a/panda/src/pgraph/loader.cxx +++ b/panda/src/pgraph/loader.cxx @@ -117,7 +117,7 @@ load_bam_stream(istream &in) { //////////////////////////////////////////////////////////////////// // Function: Loader::output // Access: Published, Virtual -// Description: +// Description: //////////////////////////////////////////////////////////////////// void Loader:: output(ostream &out) const { @@ -224,7 +224,7 @@ load_file(const Filename &filename, const LoaderOptions &options) const { int num_dirs = model_path.get_num_directories(); for (int i = 0; i < num_dirs; ++i) { Filename pathname(model_path.get_directory(i), this_filename); - PT(PandaNode) result = try_load_file(pathname, this_options, + PT(PandaNode) result = try_load_file(pathname, this_options, requested_type); if (result != (PandaNode *)NULL) { return result; @@ -286,15 +286,14 @@ try_load_file(const Filename &pathname, const LoaderOptions &options, LoaderFileType *requested_type) const { BamCache *cache = BamCache::get_global_ptr(); - bool cache_only = (options.get_flags() & LoaderOptions::LF_cache_only) != 0; + bool allow_ram_cache = requested_type->get_allow_ram_cache(options); - if (requested_type->get_allow_ram_cache(options)) { + if (allow_ram_cache) { // If we're allowing a RAM cache, use the ModelPool to load the // file. - if (!cache_only || ModelPool::has_model(pathname)) { - PT(PandaNode) node = ModelPool::load_model(pathname, options); - if (node != (PandaNode *)NULL && - (options.get_flags() & LoaderOptions::LF_allow_instance) == 0) { + PT(PandaNode) node = ModelPool::get_model(pathname, true); + if (node != (PandaNode *)NULL) { + if ((options.get_flags() & LoaderOptions::LF_allow_instance) == 0) { if (loader_cat.is_debug()) { loader_cat.debug() << "Model " << pathname << " found in ModelPool.\n"; @@ -320,16 +319,22 @@ try_load_file(const Filename &pathname, const LoaderOptions &options, << "Model " << pathname << " found in disk cache.\n"; } PT(PandaNode) result = DCAST(PandaNode, record->get_data()); - if (result->is_of_type(ModelRoot::get_class_type())) { - ModelRoot *model_root = DCAST(ModelRoot, result.p()); - model_root->set_fullpath(pathname); - model_root->set_timestamp(record->get_source_timestamp()); - } if (premunge_data) { SceneGraphReducer sgr; sgr.premunge(result, RenderState::make_empty()); } + + if (result->is_of_type(ModelRoot::get_class_type())) { + ModelRoot *model_root = DCAST(ModelRoot, result.p()); + model_root->set_fullpath(pathname); + model_root->set_timestamp(record->get_source_timestamp()); + + if (allow_ram_cache) { + // Store the loaded model in the RAM cache. + ModelPool::add_model(pathname, model_root); + } + } return result; } } @@ -339,19 +344,25 @@ try_load_file(const Filename &pathname, const LoaderOptions &options, loader_cat.debug() << "Model " << pathname << " not found in cache.\n"; } - + + bool cache_only = (options.get_flags() & LoaderOptions::LF_cache_only) != 0; if (!cache_only) { PT(PandaNode) result = requested_type->load_file(pathname, options, record); - if (result != (PandaNode *)NULL){ + if (result != (PandaNode *)NULL) { if (record != (BamCacheRecord *)NULL) { record->set_data(result, result); cache->store(record); } - + if (premunge_data) { SceneGraphReducer sgr; sgr.premunge(result, RenderState::make_empty()); } + + if (allow_ram_cache && result->is_of_type(ModelRoot::get_class_type())) { + // Store the loaded model in the RAM cache. + ModelPool::add_model(pathname, DCAST(ModelRoot, result.p())); + } return result; } } @@ -454,7 +465,7 @@ bool Loader:: try_save_file(const Filename &pathname, const LoaderOptions &options, PandaNode *node, LoaderFileType *requested_type) const { bool report_errors = ((options.get_flags() & LoaderOptions::LF_report_errors) != 0 || loader_cat.is_debug()); - + bool result = requested_type->save_file(pathname, options, node); return result; } @@ -493,7 +504,7 @@ load_file_types() { loader_cat.debug() << "done loading file type module: " << name << endl; } - + } else if (words.size() > 1) { // Multiple words: the first n words are filename extensions, // and the last word is the name of the library to load should @@ -501,13 +512,13 @@ load_file_types() { LoaderFileTypeRegistry *registry = LoaderFileTypeRegistry::get_global_ptr(); size_t num_extensions = words.size() - 1; string library_name = words[num_extensions]; - + for (size_t i = 0; i < num_extensions; i++) { string extension = words[i]; if (extension[0] == '.') { extension = extension.substr(1); } - + registry->register_deferred_type(extension, library_name); } } diff --git a/panda/src/pgraph/loader.h b/panda/src/pgraph/loader.h index 6587a59636..adfd77fd86 100644 --- a/panda/src/pgraph/loader.h +++ b/panda/src/pgraph/loader.h @@ -85,16 +85,16 @@ PUBLISHED: BLOCKING INLINE void stop_threads(); INLINE bool remove(AsyncTask *task); - BLOCKING INLINE PT(PandaNode) load_sync(const Filename &filename, + BLOCKING INLINE PT(PandaNode) load_sync(const Filename &filename, const LoaderOptions &options = LoaderOptions()) const; - PT(AsyncTask) make_async_request(const Filename &filename, + PT(AsyncTask) make_async_request(const Filename &filename, const LoaderOptions &options = LoaderOptions()); INLINE void load_async(AsyncTask *request); INLINE bool save_sync(const Filename &filename, const LoaderOptions &options, PandaNode *node) const; - PT(AsyncTask) make_async_save_request(const Filename &filename, + PT(AsyncTask) make_async_save_request(const Filename &filename, const LoaderOptions &options, PandaNode *node); INLINE void save_async(AsyncTask *request); @@ -140,7 +140,7 @@ public: return get_class_type(); } virtual TypeHandle force_init_type() {init_type(); return get_class_type();} - + private: static TypeHandle _type_handle; diff --git a/panda/src/pgraph/loaderFileTypeBam.cxx b/panda/src/pgraph/loaderFileTypeBam.cxx index a585dac81f..bce1fb38b9 100644 --- a/panda/src/pgraph/loaderFileTypeBam.cxx +++ b/panda/src/pgraph/loaderFileTypeBam.cxx @@ -110,7 +110,7 @@ load_file(const Filename &path, const LoaderOptions &options, time_t timestamp = bam_file.get_reader()->get_source()->get_timestamp(); PT(PandaNode) node = bam_file.read_node(report_errors); - if (node->is_of_type(ModelRoot::get_class_type())) { + if (node != (PandaNode *)NULL && node->is_of_type(ModelRoot::get_class_type())) { ModelRoot *model_root = DCAST(ModelRoot, node.p()); model_root->set_fullpath(path); model_root->set_timestamp(timestamp); diff --git a/panda/src/pgraph/modelPool.I b/panda/src/pgraph/modelPool.I index 18e4fadf48..252e3e196e 100644 --- a/panda/src/pgraph/modelPool.I +++ b/panda/src/pgraph/modelPool.I @@ -17,7 +17,8 @@ // Function: ModelPool::has_model // Access: Public, Static // Description: Returns true if the model has ever been loaded, -// false otherwise. +// false otherwise. Note that this does not guarantee +// that the model is still up-to-date. //////////////////////////////////////////////////////////////////// INLINE bool ModelPool:: has_model(const Filename &filename) { @@ -45,6 +46,20 @@ verify_model(const Filename &filename) { return load_model(filename) != (ModelRoot *)NULL; } +//////////////////////////////////////////////////////////////////// +// Function: ModelPool::get_model +// Access: Public, Static +// Description: Returns the model that has already been previously +// loaded, or NULL otherwise. If verify is true, it +// will check if the file is still up-to-date (and +// hasn't been modified in the meantime), and if not, +// will still return NULL. +//////////////////////////////////////////////////////////////////// +INLINE ModelRoot *ModelPool:: +get_model(const Filename &filename, bool verify) { + return get_ptr()->ns_get_model(filename, verify); +} + //////////////////////////////////////////////////////////////////// // Function: ModelPool::load_model // Access: Public, Static diff --git a/panda/src/pgraph/modelPool.cxx b/panda/src/pgraph/modelPool.cxx index f738f7090f..fedd3c1134 100644 --- a/panda/src/pgraph/modelPool.cxx +++ b/panda/src/pgraph/modelPool.cxx @@ -52,13 +52,12 @@ ns_has_model(const Filename &filename) { } //////////////////////////////////////////////////////////////////// -// Function: ModelPool::ns_load_model +// Function: ModelPool::ns_get_model // Access: Private -// Description: The nonstatic implementation of load_model(). +// Description: The nonstatic implementation of get_model(). //////////////////////////////////////////////////////////////////// ModelRoot *ModelPool:: -ns_load_model(const Filename &filename, const LoaderOptions &options) { - VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr(); +ns_get_model(const Filename &filename, bool verify) { PT(ModelRoot) cached_model; bool got_cached_model = false; @@ -74,7 +73,7 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { } } - if (got_cached_model) { + if (got_cached_model && verify) { if (pgraph_cat.is_debug()) { pgraph_cat.debug() << "ModelPool found " << cached_model << " for " << filename << "\n"; @@ -85,6 +84,7 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { // exist (or the model could not be loaded for some reason). if (cache_check_timestamps) { // Check to see if there is a file there now. + VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr(); if (vfs->exists(filename)) { // There is, so try to load it. got_cached_model = false; @@ -93,9 +93,10 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { } else { // This filename was previously attempted, and successfully // loaded. - if (cache_check_timestamps && cached_model->get_timestamp() != 0 && + if (cache_check_timestamps && cached_model->get_timestamp() != 0 && !cached_model->get_fullpath().empty()) { // Compare the timestamp to the file on-disk. + VirtualFileSystem *vfs = VirtualFileSystem::get_global_ptr(); PT(VirtualFile) vfile = vfs->get_file(cached_model->get_fullpath()); if (vfile == NULL) { // The file has disappeared! Look further along the model-path. @@ -116,12 +117,29 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { << "ModelPool returning " << cached_model << " for " << filename << "\n"; } return cached_model; + } else { + return NULL; + } +} + +//////////////////////////////////////////////////////////////////// +// Function: ModelPool::ns_load_model +// Access: Private +// Description: The nonstatic implementation of load_model(). +//////////////////////////////////////////////////////////////////// +ModelRoot *ModelPool:: +ns_load_model(const Filename &filename, const LoaderOptions &options) { + + // First check if it has already been loaded and is still current. + PT(ModelRoot) cached_model = ns_get_model(filename, true); + if (cached_model != (ModelRoot *)NULL) { + return cached_model; } // Look on disk for the current file. LoaderOptions new_options(options); new_options.set_flags((new_options.get_flags() | LoaderOptions::LF_no_ram_cache) & - ~(LoaderOptions::LF_search | LoaderOptions::LF_report_errors)); + ~LoaderOptions::LF_search); Loader *model_loader = Loader::get_global_ptr(); PT(PandaNode) panda_node = model_loader->load_sync(filename, new_options); @@ -133,7 +151,7 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { } else { if (panda_node->is_of_type(ModelRoot::get_class_type())) { node = DCAST(ModelRoot, panda_node); - + } else { // We have to construct a ModelRoot node to put it under. node = new ModelRoot(filename); @@ -154,10 +172,6 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { return (*ti).second; } - if (pgraph_cat.is_debug()) { - pgraph_cat.debug() - << "ModelPool storing " << node << " for " << filename << "\n"; - } _models[filename] = node; } @@ -172,6 +186,10 @@ ns_load_model(const Filename &filename, const LoaderOptions &options) { void ModelPool:: ns_add_model(const Filename &filename, ModelRoot *model) { LightMutexHolder holder(_lock); + if (pgraph_cat.is_debug()) { + pgraph_cat.debug() + << "ModelPool storing " << model << " for " << filename << "\n"; + } // We blow away whatever model was there previously, if any. _models[filename] = model; } @@ -270,19 +288,19 @@ ns_list_contents(ostream &out) const { LightMutexHolder holder(_lock); out << "model pool contents:\n"; - + Models::const_iterator ti; int num_models = 0; for (ti = _models.begin(); ti != _models.end(); ++ti) { if ((*ti).second != NULL) { ++num_models; out << (*ti).first << "\n" - << " (count = " << (*ti).second->get_model_ref_count() + << " (count = " << (*ti).second->get_model_ref_count() << ")\n"; } } - - out << "total number of models: " << num_models << " (plus " + + out << "total number of models: " << num_models << " (plus " << _models.size() - num_models << " entries for nonexistent files)\n"; } diff --git a/panda/src/pgraph/modelPool.h b/panda/src/pgraph/modelPool.h index 264da21243..55b2379fde 100644 --- a/panda/src/pgraph/modelPool.h +++ b/panda/src/pgraph/modelPool.h @@ -49,6 +49,7 @@ class EXPCL_PANDA_PGRAPH ModelPool { PUBLISHED: INLINE static bool has_model(const Filename &filename); INLINE static bool verify_model(const Filename &filename); + INLINE static ModelRoot *get_model(const Filename &filename, bool verify); BLOCKING INLINE static ModelRoot *load_model(const Filename &filename, const LoaderOptions &options = LoaderOptions()); @@ -70,6 +71,7 @@ private: INLINE ModelPool(); bool ns_has_model(const Filename &filename); + ModelRoot *ns_get_model(const Filename &filename, bool verify); ModelRoot *ns_load_model(const Filename &filename, const LoaderOptions &options); void ns_add_model(const Filename &filename, ModelRoot *model);