mirror of
https://github.com/panda3d/panda3d.git
synced 2025-10-03 02:15:43 -04:00
Cull improvements: don't compute bounds when not necessary
This commit is contained in:
parent
75c1364373
commit
c04cd29246
@ -21,6 +21,7 @@
|
||||
#include "depthTestAttrib.h"
|
||||
#include "depthWriteAttrib.h"
|
||||
#include "pStatTimer.h"
|
||||
#include "omniBoundingVolume.h"
|
||||
#include <stdio.h> // For sprintf/snprintf
|
||||
|
||||
PStatCollector FrameRateMeter::_show_fps_pcollector("*:Show fps");
|
||||
@ -39,6 +40,10 @@ FrameRateMeter(const string &name) :
|
||||
|
||||
set_cull_callback();
|
||||
|
||||
// Don't do frustum culling, as the text will always be in view.
|
||||
set_bounds(new OmniBoundingVolume());
|
||||
set_final(true);
|
||||
|
||||
Thread *current_thread = Thread::get_current_thread();
|
||||
|
||||
_show_milliseconds = frame_rate_meter_milliseconds;
|
||||
@ -103,6 +108,11 @@ setup_window(GraphicsOutput *window) {
|
||||
_root.set_material_off(1);
|
||||
_root.set_two_sided(1, 1);
|
||||
|
||||
// If we don't set this explicitly, Panda will cause it to be rendered
|
||||
// in a back-to-front cull bin, which will cause the bounding volume
|
||||
// to be computed unnecessarily. Saves a little bit of overhead.
|
||||
_root.set_bin("unsorted", 0);
|
||||
|
||||
// Create a display region that covers the entire window.
|
||||
_display_region = _window->make_mono_display_region();
|
||||
_display_region->set_sort(frame_rate_meter_layer_sort);
|
||||
@ -188,7 +198,6 @@ cull_callback(CullTraverser *trav, CullTraverserData &data) {
|
||||
if (aspect_ratio != _last_aspect_ratio) {
|
||||
_aspect_ratio_transform = TransformState::make_scale(LVecBase3(aspect_ratio, 1, 1));
|
||||
_last_aspect_ratio = aspect_ratio;
|
||||
cerr << aspect_ratio << "\n";
|
||||
}
|
||||
data._net_transform = data._net_transform->compose(_aspect_ratio_transform);
|
||||
|
||||
|
@ -29,10 +29,12 @@ CullTraverserData(const NodePath &start,
|
||||
_state(state),
|
||||
_view_frustum(view_frustum),
|
||||
_cull_planes(CullPlanes::make_empty()),
|
||||
_draw_mask(DrawMask::all_on())
|
||||
_draw_mask(DrawMask::all_on()),
|
||||
_portal_depth(0)
|
||||
{
|
||||
_node_reader.check_bounds();
|
||||
_portal_depth = 0;
|
||||
// Only update the bounding volume if we're going to end up needing it.
|
||||
bool check_bounds = (view_frustum != (GeometricBoundingVolume *)NULL);
|
||||
_node_reader.check_cached(check_bounds);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@ -84,10 +86,13 @@ CullTraverserData(const CullTraverserData &parent, PandaNode *child) :
|
||||
_state(parent._state),
|
||||
_view_frustum(parent._view_frustum),
|
||||
_cull_planes(parent._cull_planes),
|
||||
_draw_mask(parent._draw_mask)
|
||||
_draw_mask(parent._draw_mask),
|
||||
_portal_depth(parent._portal_depth)
|
||||
{
|
||||
_node_reader.check_bounds();
|
||||
_portal_depth = parent._portal_depth;
|
||||
// Only update the bounding volume if we're going to end up needing it.
|
||||
bool check_bounds = !_cull_planes->is_empty() ||
|
||||
(_view_frustum != (GeometricBoundingVolume *)NULL);
|
||||
_node_reader.check_cached(check_bounds);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
|
@ -130,12 +130,11 @@ apply_transform_and_state(CullTraverser *trav,
|
||||
////////////////////////////////////////////////////////////////////
|
||||
bool CullTraverserData::
|
||||
is_in_view_impl() {
|
||||
CPT(BoundingVolume) node_volume = _node_reader.get_bounds();
|
||||
nassertr(node_volume->is_of_type(GeometricBoundingVolume::get_class_type()), false);
|
||||
const GeometricBoundingVolume *node_gbv =
|
||||
DCAST(GeometricBoundingVolume, node_volume);
|
||||
const GeometricBoundingVolume *node_gbv = NULL;
|
||||
|
||||
if (_view_frustum != (GeometricBoundingVolume *)NULL) {
|
||||
DCAST_INTO_R(node_gbv, _node_reader.get_bounds(), false)
|
||||
|
||||
int result = _view_frustum->contains(node_gbv);
|
||||
|
||||
if (pgraph_cat.is_spam()) {
|
||||
@ -179,6 +178,10 @@ is_in_view_impl() {
|
||||
}
|
||||
|
||||
if (!_cull_planes->is_empty()) {
|
||||
if (node_gbv == (const GeometricBoundingVolume *)NULL) {
|
||||
DCAST_INTO_R(node_gbv, _node_reader.get_bounds(), false)
|
||||
}
|
||||
|
||||
// Also cull against the current clip planes.
|
||||
int result;
|
||||
_cull_planes = _cull_planes->do_cull(result, _state, node_gbv);
|
||||
|
@ -632,14 +632,14 @@ get_internal_vertices(Thread *current_thread) const {
|
||||
// Function: PandaNode::is_bounds_stale
|
||||
// Access: Published
|
||||
// Description: Returns true if the bounding volume of this node is
|
||||
// stale and will be implicitly recomputed at the next
|
||||
// call to get_bounds(), or false if it is fresh and
|
||||
// need not be recomputed.
|
||||
// stale and will be implicitly recomputed at the next
|
||||
// call to get_bounds(), or false if it is fresh and
|
||||
// need not be recomputed.
|
||||
////////////////////////////////////////////////////////////////////
|
||||
bool PandaNode::
|
||||
is_bounds_stale() const {
|
||||
CDReader cdata(_cycler);
|
||||
return (cdata->_last_update != cdata->_next_update);
|
||||
return (cdata->_last_bounds_update != cdata->_next_update);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@ -726,14 +726,13 @@ mark_bounds_stale(int pipeline_stage, Thread *current_thread) const {
|
||||
bool is_stale_bounds;
|
||||
{
|
||||
CDStageReader cdata(_cycler, pipeline_stage, current_thread);
|
||||
is_stale_bounds = (cdata->_last_update != cdata->_next_update);
|
||||
is_stale_bounds = (cdata->_last_bounds_update != cdata->_next_update);
|
||||
}
|
||||
if (!is_stale_bounds) {
|
||||
((PandaNode *)this)->force_bounds_stale(pipeline_stage, current_thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Function: PandaNode::mark_internal_bounds_stale
|
||||
// Access: Protected
|
||||
|
@ -1991,7 +1991,7 @@ get_net_draw_control_mask() const {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(false, pipeline_stage, cdata);
|
||||
return cdataw->_net_draw_control_mask;
|
||||
}
|
||||
return cdata->_net_draw_control_mask;
|
||||
@ -2022,7 +2022,7 @@ get_net_draw_show_mask() const {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(false, pipeline_stage, cdata);
|
||||
return cdataw->_net_draw_show_mask;
|
||||
}
|
||||
return cdata->_net_draw_show_mask;
|
||||
@ -2096,7 +2096,7 @@ get_net_collide_mask(Thread *current_thread) const {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(false, pipeline_stage, cdata);
|
||||
return cdataw->_net_collide_mask;
|
||||
}
|
||||
return cdata->_net_collide_mask;
|
||||
@ -2117,7 +2117,7 @@ get_off_clip_planes(Thread *current_thread) const {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(false, pipeline_stage, cdata);
|
||||
return cdataw->_off_clip_planes;
|
||||
}
|
||||
return cdata->_off_clip_planes;
|
||||
@ -2352,13 +2352,13 @@ CPT(BoundingVolume) PandaNode::
|
||||
get_bounds(Thread *current_thread) const {
|
||||
int pipeline_stage = current_thread->get_pipeline_stage();
|
||||
CDLockedStageReader cdata(_cycler, pipeline_stage, current_thread);
|
||||
if (cdata->_last_update != cdata->_next_update) {
|
||||
if (cdata->_last_bounds_update != cdata->_next_update) {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
CPT(BoundingVolume) result;
|
||||
{
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(true, pipeline_stage, cdata);
|
||||
result = cdataw->_external_bounds;
|
||||
}
|
||||
return result;
|
||||
@ -2386,19 +2386,19 @@ CPT(BoundingVolume) PandaNode::
|
||||
get_bounds(UpdateSeq &seq, Thread *current_thread) const {
|
||||
int pipeline_stage = current_thread->get_pipeline_stage();
|
||||
CDLockedStageReader cdata(_cycler, pipeline_stage, current_thread);
|
||||
if (cdata->_last_update != cdata->_next_update) {
|
||||
if (cdata->_last_bounds_update != cdata->_next_update) {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
CPT(BoundingVolume) result;
|
||||
{
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(true, pipeline_stage, cdata);
|
||||
result = cdataw->_external_bounds;
|
||||
seq = cdataw->_last_update;
|
||||
seq = cdataw->_last_bounds_update;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
seq = cdata->_last_update;
|
||||
seq = cdata->_last_bounds_update;
|
||||
return cdata->_external_bounds;
|
||||
}
|
||||
|
||||
@ -2419,13 +2419,13 @@ int PandaNode::
|
||||
get_nested_vertices(Thread *current_thread) const {
|
||||
int pipeline_stage = current_thread->get_pipeline_stage();
|
||||
CDLockedStageReader cdata(_cycler, pipeline_stage, current_thread);
|
||||
if (cdata->_last_update != cdata->_next_update) {
|
||||
if (cdata->_last_bounds_update != cdata->_next_update) {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
int result;
|
||||
{
|
||||
PStatTimer timer(_update_bounds_pcollector);
|
||||
CDStageWriter cdataw =
|
||||
((PandaNode *)this)->update_bounds(pipeline_stage, cdata);
|
||||
((PandaNode *)this)->update_cached(true, pipeline_stage, cdata);
|
||||
result = cdataw->_nested_vertices;
|
||||
}
|
||||
return result;
|
||||
@ -3760,23 +3760,26 @@ do_find_child(PandaNode *node, const PandaNode::Down *down) const {
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Function: PandaNode::update_bounds
|
||||
// Function: PandaNode::update_cached
|
||||
// Access: Private
|
||||
// Description: Updates the cached values of the node that are
|
||||
// dependent on its children, such as the
|
||||
// external bounding volume, the _net_collide_mask, and
|
||||
// the _off_clip_planes.
|
||||
// external bounding volume, the _net_collide_mask,
|
||||
// and the _off_clip_planes.
|
||||
//
|
||||
// If update_bounds is false, it will not update the
|
||||
// bounding volume or vertex count.
|
||||
//
|
||||
// The old value should be passed in; it will be
|
||||
// released. The new value is returned.
|
||||
////////////////////////////////////////////////////////////////////
|
||||
PandaNode::CDStageWriter PandaNode::
|
||||
update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
update_cached(bool update_bounds, int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
// We might need to try this a couple of times, in case someone else
|
||||
// steps on our result.
|
||||
if (drawmask_cat.is_debug()) {
|
||||
drawmask_cat.debug(false)
|
||||
<< *this << "::update_bounds() {\n";
|
||||
<< *this << "::update_cached() {\n";
|
||||
}
|
||||
Thread *current_thread = cdata.get_current_thread();
|
||||
|
||||
@ -3784,7 +3787,10 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
// Grab the last_update counter.
|
||||
UpdateSeq last_update = cdata->_last_update;
|
||||
UpdateSeq next_update = cdata->_next_update;
|
||||
nassertr(last_update != next_update, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
UpdateSeq last_bounds_update = cdata->_last_bounds_update;
|
||||
nassertr(last_bounds_update != next_update &&
|
||||
(!update_bounds || last_update != next_update),
|
||||
CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
|
||||
// Start with a clean slate.
|
||||
CollideMask net_collide_mask = cdata->_into_collide_mask;
|
||||
@ -3825,25 +3831,31 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
// working (since we're not holding a lock on our set of children
|
||||
// right now). But we also need the regular pointers, to pass to
|
||||
// BoundingVolume::around().
|
||||
const BoundingVolume **child_volumes;
|
||||
#if defined(HAVE_THREADS) && !defined(SIMPLE_THREADS)
|
||||
pvector<CPT(BoundingVolume) > child_volumes_ref;
|
||||
child_volumes_ref.reserve(num_children + 1);
|
||||
if (update_bounds) {
|
||||
child_volumes_ref.reserve(num_children + 1);
|
||||
}
|
||||
#endif
|
||||
const BoundingVolume **child_volumes = (const BoundingVolume **)alloca(sizeof(BoundingVolume *) * (num_children + 1));
|
||||
int child_volumes_i = 0;
|
||||
|
||||
bool all_box = true;
|
||||
CPT(BoundingVolume) internal_bounds =
|
||||
get_internal_bounds(pipeline_stage, current_thread);
|
||||
CPT(BoundingVolume) internal_bounds = NULL;
|
||||
|
||||
if (!internal_bounds->is_empty()) {
|
||||
if (update_bounds) {
|
||||
child_volumes = (const BoundingVolume **)alloca(sizeof(BoundingVolume *) * (num_children + 1));
|
||||
internal_bounds = get_internal_bounds(pipeline_stage, current_thread);
|
||||
|
||||
if (!internal_bounds->is_empty()) {
|
||||
#if defined(HAVE_THREADS) && !defined(SIMPLE_THREADS)
|
||||
child_volumes_ref.push_back(internal_bounds);
|
||||
child_volumes_ref.push_back(internal_bounds);
|
||||
#endif
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = internal_bounds;
|
||||
if (internal_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = internal_bounds;
|
||||
if (internal_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -3855,9 +3867,14 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
const ClipPlaneAttrib *orig_cp = DCAST(ClipPlaneAttrib, off_clip_planes);
|
||||
|
||||
CDLockedStageReader child_cdata(child->_cycler, pipeline_stage, current_thread);
|
||||
if (child_cdata->_last_update != child_cdata->_next_update) {
|
||||
|
||||
UpdateSeq last_child_update = update_bounds
|
||||
? child_cdata->_last_bounds_update
|
||||
: child_cdata->_last_update;
|
||||
|
||||
if (last_child_update != child_cdata->_next_update) {
|
||||
// Child needs update.
|
||||
CDStageWriter child_cdataw = child->update_bounds(pipeline_stage, child_cdata);
|
||||
CDStageWriter child_cdataw = child->update_cached(update_bounds, pipeline_stage, child_cdata);
|
||||
|
||||
net_collide_mask |= child_cdataw->_net_collide_mask;
|
||||
|
||||
@ -3931,17 +3948,20 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
}
|
||||
|
||||
off_clip_planes = orig_cp->compose_off(child_cdataw->_off_clip_planes);
|
||||
if (!child_cdataw->_external_bounds->is_empty()) {
|
||||
|
||||
if (update_bounds) {
|
||||
if (!child_cdataw->_external_bounds->is_empty()) {
|
||||
#if defined(HAVE_THREADS) && !defined(SIMPLE_THREADS)
|
||||
child_volumes_ref.push_back(child_cdataw->_external_bounds);
|
||||
child_volumes_ref.push_back(child_cdataw->_external_bounds);
|
||||
#endif
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = child_cdataw->_external_bounds;
|
||||
if (child_cdataw->_external_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = child_cdataw->_external_bounds;
|
||||
if (child_cdataw->_external_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
}
|
||||
}
|
||||
num_vertices += child_cdataw->_nested_vertices;
|
||||
}
|
||||
num_vertices += child_cdataw->_nested_vertices;
|
||||
|
||||
} else {
|
||||
// Child is good.
|
||||
@ -3983,17 +4003,20 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
}
|
||||
|
||||
off_clip_planes = orig_cp->compose_off(child_cdata->_off_clip_planes);
|
||||
if (!child_cdata->_external_bounds->is_empty()) {
|
||||
|
||||
if (update_bounds) {
|
||||
if (!child_cdata->_external_bounds->is_empty()) {
|
||||
#if defined(HAVE_THREADS) && !defined(SIMPLE_THREADS)
|
||||
child_volumes_ref.push_back(child_cdata->_external_bounds);
|
||||
child_volumes_ref.push_back(child_cdata->_external_bounds);
|
||||
#endif
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = child_cdata->_external_bounds;
|
||||
if (child_cdata->_external_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
nassertr(child_volumes_i < num_children + 1, CDStageWriter(_cycler, pipeline_stage, cdata));
|
||||
child_volumes[child_volumes_i++] = child_cdata->_external_bounds;
|
||||
if (child_cdata->_external_bounds->as_bounding_box() == NULL) {
|
||||
all_box = false;
|
||||
}
|
||||
}
|
||||
num_vertices += child_cdata->_nested_vertices;
|
||||
}
|
||||
num_vertices += child_cdata->_nested_vertices;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4041,46 +4064,51 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
}
|
||||
|
||||
cdataw->_off_clip_planes = off_clip_planes;
|
||||
cdataw->_nested_vertices = num_vertices;
|
||||
|
||||
CPT(TransformState) transform = get_transform(current_thread);
|
||||
PT(GeometricBoundingVolume) gbv;
|
||||
if (update_bounds) {
|
||||
cdataw->_nested_vertices = num_vertices;
|
||||
|
||||
BoundingVolume::BoundsType btype = cdataw->_bounds_type;
|
||||
if (btype == BoundingVolume::BT_default) {
|
||||
btype = bounds_type;
|
||||
CPT(TransformState) transform = get_transform(current_thread);
|
||||
PT(GeometricBoundingVolume) gbv;
|
||||
|
||||
BoundingVolume::BoundsType btype = cdataw->_bounds_type;
|
||||
if (btype == BoundingVolume::BT_default) {
|
||||
btype = bounds_type;
|
||||
}
|
||||
|
||||
if (btype == BoundingVolume::BT_box ||
|
||||
(btype != BoundingVolume::BT_sphere && all_box && transform->is_identity())) {
|
||||
// If all of the child volumes are a BoundingBox, and we
|
||||
// have no transform, then our volume is also a
|
||||
// BoundingBox.
|
||||
|
||||
gbv = new BoundingBox;
|
||||
} else {
|
||||
// Otherwise, it's a sphere.
|
||||
gbv = new BoundingSphere;
|
||||
}
|
||||
|
||||
if (child_volumes_i > 0) {
|
||||
const BoundingVolume **child_begin = &child_volumes[0];
|
||||
const BoundingVolume **child_end = child_begin + child_volumes_i;
|
||||
((BoundingVolume *)gbv)->around(child_begin, child_end);
|
||||
}
|
||||
|
||||
// If we have a transform, apply it to the bounding volume we
|
||||
// just computed.
|
||||
if (!transform->is_identity()) {
|
||||
gbv->xform(transform->get_mat());
|
||||
}
|
||||
|
||||
cdataw->_external_bounds = gbv;
|
||||
cdataw->_last_bounds_update = next_update;
|
||||
}
|
||||
|
||||
if (btype == BoundingVolume::BT_box ||
|
||||
(btype != BoundingVolume::BT_sphere && all_box && transform->is_identity())) {
|
||||
// If all of the child volumes are a BoundingBox, and we
|
||||
// have no transform, then our volume is also a
|
||||
// BoundingBox.
|
||||
|
||||
gbv = new BoundingBox;
|
||||
} else {
|
||||
// Otherwise, it's a sphere.
|
||||
gbv = new BoundingSphere;
|
||||
}
|
||||
|
||||
if (child_volumes_i > 0) {
|
||||
const BoundingVolume **child_begin = &child_volumes[0];
|
||||
const BoundingVolume **child_end = child_begin + child_volumes_i;
|
||||
((BoundingVolume *)gbv)->around(child_begin, child_end);
|
||||
}
|
||||
|
||||
// If we have a transform, apply it to the bounding volume we
|
||||
// just computed.
|
||||
if (!transform->is_identity()) {
|
||||
gbv->xform(transform->get_mat());
|
||||
}
|
||||
|
||||
cdataw->_external_bounds = gbv;
|
||||
cdataw->_last_update = next_update;
|
||||
|
||||
if (drawmask_cat.is_debug()) {
|
||||
drawmask_cat.debug(false)
|
||||
<< "} " << *this << "::update_bounds();\n";
|
||||
<< "} " << *this << "::update_cached();\n";
|
||||
}
|
||||
|
||||
nassertr(cdataw->_last_update == cdataw->_next_update, cdataw);
|
||||
@ -4091,7 +4119,8 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
return cdataw;
|
||||
}
|
||||
|
||||
if (cdataw->_last_update == cdataw->_next_update) {
|
||||
if (cdataw->_last_update == cdataw->_next_update &&
|
||||
(!update_bounds || cdataw->_last_bounds_update == cdataw->_next_update)) {
|
||||
// Someone else has computed the cache for us. OK.
|
||||
return cdataw;
|
||||
}
|
||||
@ -4101,7 +4130,8 @@ update_bounds(int pipeline_stage, PandaNode::CDLockedStageReader &cdata) {
|
||||
// the read lock back.
|
||||
cdata = CDLockedStageReader(_cycler, pipeline_stage, current_thread);
|
||||
|
||||
if (cdata->_last_update == cdata->_next_update) {
|
||||
if (cdata->_last_update == cdata->_next_update &&
|
||||
(!update_bounds || cdata->_last_bounds_update == cdata->_next_update)) {
|
||||
// Someone else has computed the cache for us while we were
|
||||
// diddling with the locks. OK.
|
||||
return CDStageWriter(_cycler, pipeline_stage, cdata);
|
||||
@ -4298,6 +4328,7 @@ CData(const PandaNode::CData ©) :
|
||||
_external_bounds(copy._external_bounds),
|
||||
_last_update(copy._last_update),
|
||||
_next_update(copy._next_update),
|
||||
_last_bounds_update(copy._last_bounds_update),
|
||||
|
||||
_down(copy._down),
|
||||
_stashed(copy._stashed),
|
||||
@ -4780,14 +4811,19 @@ fillin_down_list(PandaNode::Down &down_list, const string &tag,
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Function: PandaNodePipelineReader::check_bounds
|
||||
// Function: PandaNodePipelineReader::check_cached
|
||||
// Access: Public
|
||||
// Description: Ensures that the bounding volume is properly computed
|
||||
// on this node.
|
||||
// Description: Ensures that the draw masks etc. are properly
|
||||
// computed on this node. If update_bounds is true,
|
||||
// also checks the bounding volume.
|
||||
////////////////////////////////////////////////////////////////////
|
||||
void PandaNodePipelineReader::
|
||||
check_bounds() const {
|
||||
if (_cdata->_last_update != _cdata->_next_update) {
|
||||
check_cached(bool update_bounds) const {
|
||||
UpdateSeq last_update = update_bounds
|
||||
? _cdata->_last_bounds_update
|
||||
: _cdata->_last_update;
|
||||
|
||||
if (last_update != _cdata->_next_update) {
|
||||
// The cache is stale; it needs to be rebuilt.
|
||||
|
||||
// We'll need to get a fresh read pointer, since another thread
|
||||
@ -4799,7 +4835,8 @@ check_bounds() const {
|
||||
((PandaNodePipelineReader *)this)->_cdata = NULL;
|
||||
int pipeline_stage = _current_thread->get_pipeline_stage();
|
||||
PandaNode::CDLockedStageReader fresh_cdata(_node->_cycler, pipeline_stage, _current_thread);
|
||||
if (fresh_cdata->_last_update == fresh_cdata->_next_update) {
|
||||
if (fresh_cdata->_last_update == fresh_cdata->_next_update &&
|
||||
(!update_bounds || _cdata->_external_bounds != NULL)) {
|
||||
// What luck, some other thread has already freshened the
|
||||
// cache for us. Save the new pointer, and let the lock
|
||||
// release itself.
|
||||
@ -4814,7 +4851,7 @@ check_bounds() const {
|
||||
// No, the cache is still stale. We have to do the work of
|
||||
// freshening it.
|
||||
PStatTimer timer(PandaNode::_update_bounds_pcollector);
|
||||
PandaNode::CDStageWriter cdataw = ((PandaNode *)_node)->update_bounds(pipeline_stage, fresh_cdata);
|
||||
PandaNode::CDStageWriter cdataw = ((PandaNode *)_node)->update_cached(update_bounds, pipeline_stage, fresh_cdata);
|
||||
nassertv(cdataw->_last_update == cdataw->_next_update);
|
||||
// As above, we save the new pointer, and then let the lock
|
||||
// release itself.
|
||||
@ -4828,4 +4865,5 @@ check_bounds() const {
|
||||
}
|
||||
|
||||
nassertv(_cdata->_last_update == _cdata->_next_update);
|
||||
nassertv(!update_bounds || _cdata->_last_bounds_update == _cdata->_next_update);
|
||||
}
|
||||
|
@ -616,6 +616,11 @@ private:
|
||||
// When _last_update != _next_update, this cache is stale.
|
||||
UpdateSeq _last_update, _next_update;
|
||||
|
||||
// We don't always update the bounding volume and number of
|
||||
// nested vertices. This indicates the last time they were changed.
|
||||
// It is never higher than _last_update.
|
||||
UpdateSeq _last_bounds_update;
|
||||
|
||||
public:
|
||||
// This section stores the links to other nodes above and below
|
||||
// this node in the graph.
|
||||
@ -669,7 +674,8 @@ private:
|
||||
typedef CycleDataStageWriter<CData> CDStageWriter;
|
||||
|
||||
int do_find_child(PandaNode *node, const Down *down) const;
|
||||
CDStageWriter update_bounds(int pipeline_stage, CDLockedStageReader &cdata);
|
||||
CDStageWriter update_cached(bool update_bounds, int pipeline_stage,
|
||||
CDLockedStageReader &cdata);
|
||||
|
||||
static DrawMask _overall_bit;
|
||||
|
||||
@ -823,7 +829,7 @@ public:
|
||||
|
||||
INLINE void release();
|
||||
|
||||
void check_bounds() const;
|
||||
void check_cached(bool update_bounds) const;
|
||||
|
||||
INLINE void compose_draw_mask(DrawMask &running_draw_mask) const;
|
||||
INLINE bool compare_draw_mask(DrawMask running_draw_mask,
|
||||
|
@ -420,6 +420,13 @@ r_flatten(PandaNode *grandparent_node, PandaNode *parent_node,
|
||||
<< *parent_node << ", " << hex << combine_siblings_bits << dec
|
||||
<< ")\n";
|
||||
}
|
||||
|
||||
if ((combine_siblings_bits & (CS_geom_node | CS_other | CS_recurse)) != 0) {
|
||||
// Unset CS_within_radius, since we're going to flatten everything
|
||||
// anyway. This avoids needlessly calculating the bounding volume.
|
||||
combine_siblings_bits &= ~CS_within_radius;
|
||||
}
|
||||
|
||||
int num_nodes = 0;
|
||||
|
||||
if (!parent_node->safe_to_flatten_below()) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user