detect leaked DelayDeletes, explicitly name and destroy DelayDeletes, base.cr.printDelayDeletes(), prevent DelayDeleted objs from being cached

This commit is contained in:
Darren Ranalli 2008-02-20 22:30:38 +00:00
parent 61dc831678
commit a8a8869011
6 changed files with 104 additions and 79 deletions

View File

@ -20,12 +20,28 @@ class CRCache:
""" """
assert self.checkCache() assert self.checkCache()
CRCache.notify.debug("Flushing the cache") CRCache.notify.debug("Flushing the cache")
# give objects a chance to clean themselves up before checking for DelayDelete leaks
messenger.send('clientCleanup')
# some of these objects might be holding delayDeletes on others
# track each object that is delayDeleted after it gets its chance to delete,
# and check them after all objects have had a chance to delete
delayDeleted = []
for distObj in self.dict.values(): for distObj in self.dict.values():
distObj.deleteOrDelay() distObj.deleteOrDelay()
if distObj.getDelayDeleteCount() != 0: if distObj.getDelayDeleteCount() != 0:
self.notify.warning( delayDeleted.append(distObj)
'CRCache.flush: could not delete %s (%s), delayDeleteCount=%s' % # now that all objects have had a chance to delete, are there any objects left
(safeRepr(distObj), itype(distObj), distObj.getDelayDeleteCount())) # that are still delayDeleted?
delayDeleteLeaks = []
for distObj in delayDeleted:
if distObj.getDelayDeleteCount() != 0:
delayDeleteLeaks.append(distObj)
if len(delayDeleteLeaks):
s = 'CRCache.flush:'
for obj in delayDeleteLeaks:
s += ('\n could not delete %s (%s), delayDeletes=%s' %
(safeRepr(obj), itype(obj), obj.getDelayDeleteNames()))
self.notify.error(s)
# Null out all references to the objects so they will get gcd # Null out all references to the objects so they will get gcd
self.dict = {} self.dict = {}
self.fifo = [] self.fifo = []

View File

@ -64,6 +64,8 @@ class ClientRepositoryBase(ConnectionRepository):
self.heartbeatStarted = 0 self.heartbeatStarted = 0
self.lastHeartbeat = 0 self.lastHeartbeat = 0
self._delayDeletedDOs = {}
def setDeferInterval(self, deferInterval): def setDeferInterval(self, deferInterval):
"""Specifies the minimum amount of time, in seconds, that must """Specifies the minimum amount of time, in seconds, that must
elapse before generating any two DistributedObjects whose elapse before generating any two DistributedObjects whose
@ -474,7 +476,8 @@ class ClientRepositoryBase(ConnectionRepository):
# Only cache the object if it is a "cacheable" type # Only cache the object if it is a "cacheable" type
# object; this way we don't clutter up the caches with # object; this way we don't clutter up the caches with
# trivial objects that don't benefit from caching. # trivial objects that don't benefit from caching.
if distObj.getCacheable(): # also don't try to cache an object that is delayDeleted
if distObj.getCacheable() and distObj.getDelayDeleteCount() <= 0:
cache.cache(distObj) cache.cache(distObj)
else: else:
distObj.deleteOrDelay() distObj.deleteOrDelay()
@ -683,3 +686,22 @@ class ClientRepositoryBase(ConnectionRepository):
# By default, no ID's are local. See also # By default, no ID's are local. See also
# ClientRepository.isLocalId(). # ClientRepository.isLocalId().
return 0 return 0
# methods for tracking delaydeletes
def _addDelayDeletedDO(self, do):
# use the id of the object, it's possible to have multiple DelayDeleted instances
# with identical doIds if an object gets deleted then re-generated
key = id(do)
assert key not in self._delayDeletedDOs
self._delayDeletedDOs[key] = do
def _removeDelayDeletedDO(self, do):
key = id(do)
del self._delayDeletedDOs[key]
def printDelayDeletes(self):
print 'DelayDeletes:'
print '============='
for obj in self._delayDeletedDOs.itervalues():
print '%s\t%s (%s)\tdelayDeletes=%s' % (
obj.doId, safeRepr(obj), itype(obj), obj.getDelayDeleteNames())

View File

@ -25,9 +25,13 @@ class DelayDelete:
DelayDelete object ceases to exist, it may be deleted. DelayDelete object ceases to exist, it may be deleted.
""" """
def __init__(self, distObj): def __init__(self, distObj, name):
self.distObj = distObj self._distObj = distObj
self.distObj.delayDelete(1) self._token = self._distObj.acquireDelayDelete(name)
def __del__(self): def destroy(self):
self.distObj.delayDelete(0) token = self._token
# do this first to catch cases where releaseDelayDelete causes
# this method to be called again
del self._token
self._distObj.releaseDelayDelete(token)

View File

@ -29,6 +29,8 @@ class DistributedObject(DistributedObjectBase):
# even to the quiet zone. # even to the quiet zone.
neverDisable = 0 neverDisable = 0
DelayDeleteSerialGen = SerialNumGen()
def __init__(self, cr): def __init__(self, cr):
assert self.notify.debugStateCall(self) assert self.notify.debugStateCall(self)
try: try:
@ -45,9 +47,7 @@ class DistributedObject(DistributedObjectBase):
# it needs to be optimized in this way. # it needs to be optimized in this way.
self.setCacheable(0) self.setCacheable(0)
# This count tells whether the object can be deleted right away, self._token2delayDeleteName = {}
# or not.
self.delayDeleteCount = 0
# This flag tells whether a delete has been requested on this # This flag tells whether a delete has been requested on this
# object. # object.
self.deleteImminent = 0 self.deleteImminent = 0
@ -143,43 +143,45 @@ class DistributedObject(DistributedObjectBase):
return self.cacheable return self.cacheable
def deleteOrDelay(self): def deleteOrDelay(self):
if self.delayDeleteCount > 0: if len(self._token2delayDeleteName) > 0:
self.deleteImminent = 1 self.deleteImminent = 1
else: else:
self.disableAnnounceAndDelete() self.disableAnnounceAndDelete()
def getDelayDeleteCount(self): def getDelayDeleteCount(self):
return self.delayDeleteCount return len(self._token2delayDeleteName)
def delayDelete(self, flag): def acquireDelayDelete(self, name):
# Flag should be 0 or 1, meaning increment or decrement count
# Also see DelayDelete.py # Also see DelayDelete.py
if (flag == 1): if self.getDelayDeleteCount() == 0:
self.delayDeleteCount += 1 self.cr._addDelayDeletedDO(self)
elif (flag == 0):
self.delayDeleteCount -= 1 token = DistributedObject.DelayDeleteSerialGen.next()
else: self._token2delayDeleteName[token] = name
self.notify.error("Invalid flag passed to delayDelete: " + str(flag))
if (self.delayDeleteCount < 0):
self.notify.error("Somebody decremented delayDelete for doId %s without incrementing"
% (self.doId))
elif (self.delayDeleteCount == 0):
assert self.notify.debug( assert self.notify.debug(
"delayDeleteCount for doId %s now 0" % (self.doId)) "delayDelete count for doId %s now %s" %
(self.doId, len(self._token2delayDeleteName)))
# Return the token, user must pass token to releaseDelayDelete
return token
def releaseDelayDelete(self, token):
name = self._token2delayDeleteName.pop(token)
assert self.notify.debug("releasing delayDelete '%s'" % name)
if len(self._token2delayDeleteName) == 0:
assert self.notify.debug(
"delayDelete count for doId %s now 0" % (self.doId))
self.cr._removeDelayDeletedDO(self)
if self.deleteImminent: if self.deleteImminent:
assert self.notify.debug( assert self.notify.debug(
"delayDeleteCount for doId %s -- deleteImminent" % "delayDelete count for doId %s -- deleteImminent" %
(self.doId)) (self.doId))
self.disableAnnounceAndDelete() self.disableAnnounceAndDelete()
else:
self.notify.debug(
"delayDeleteCount for doId %s now %s" %
(self.doId, self.delayDeleteCount))
# Return the count just for kicks def getDelayDeleteNames(self):
return self.delayDeleteCount return self._token2delayDeleteName.values()
def disableAnnounceAndDelete(self): def disableAnnounceAndDelete(self):
self.disableAndAnnounce() self.disableAndAnnounce()

View File

@ -29,13 +29,6 @@ class DistributedObjectOV(DistributedObjectBase):
self.DistributedObjectOV_initialized = 1 self.DistributedObjectOV_initialized = 1
DistributedObjectBase.__init__(self, cr) DistributedObjectBase.__init__(self, cr)
# This count tells whether the object can be deleted right away,
# or not.
self.delayDeleteCount = 0
# This flag tells whether a delete has been requested on this
# object.
self.deleteImminent = 0
# Keep track of our state as a distributed object. This # Keep track of our state as a distributed object. This
# is only trustworthy if the inheriting class properly # is only trustworthy if the inheriting class properly
# calls up the chain for disable() and generate(). # calls up the chain for disable() and generate().
@ -64,51 +57,19 @@ class DistributedObjectOV(DistributedObjectBase):
print print
except Exception, e: print "%serror printing status"%(spaces,), e except Exception, e: print "%serror printing status"%(spaces,), e
def getDelayDeleteCount(self):
# OV objects cannot be delayDeleted
return 0
def deleteOrDelay(self): def deleteOrDelay(self):
if self.delayDeleteCount > 0:
self.deleteImminent = 1
else:
self.disableAnnounceAndDelete() self.disableAnnounceAndDelete()
def delayDelete(self, flag):
# Flag should be 0 or 1, meaning increment or decrement count
# Also see DelayDelete.py
if (flag == 1):
self.delayDeleteCount += 1
elif (flag == 0):
self.delayDeleteCount -= 1
else:
self.notify.error("Invalid flag passed to delayDelete: " + str(flag))
if (self.delayDeleteCount < 0):
self.notify.error("Somebody decremented delayDelete for doId %s without incrementing"
% (self.doId))
elif (self.delayDeleteCount == 0):
assert self.notify.debug(
"delayDeleteCount for doId %s now 0" %
(self.doId))
if self.deleteImminent:
assert self.notify.debug(
"delayDeleteCount for doId %s -- deleteImminent" %
(self.doId))
self.disableAnnounceAndDelete()
else:
self.notify.debug(
"delayDeleteCount for doId %s now %s" %
(self.doId, self.delayDeleteCount))
# Return the count just for kicks
return self.delayDeleteCount
def disableAnnounceAndDelete(self): def disableAnnounceAndDelete(self):
self.disableAndAnnounce() self.disableAndAnnounce()
self.delete() self.delete()
def disableAndAnnounce(self): def disableAndAnnounce(self):
"""
Inheritors should *not* redefine this function.
"""
# We must send the disable announce message *before* we # We must send the disable announce message *before* we
# actually disable the object. That way, the various cleanup # actually disable the object. That way, the various cleanup
# tasks can run first and take care of restoring the object to # tasks can run first and take care of restoring the object to

View File

@ -18,3 +18,23 @@ from IntervalManager import *
if __debug__: if __debug__:
from TestInterval import * from TestInterval import *
from pandac.PandaModules import WaitInterval from pandac.PandaModules import WaitInterval
# there is legacy code in Toontown that puts interval-related DelayDeletes directly
# on the interval object, relying on Python to __del__ the DelayDelete when the interval
# is garbage-collected. DelayDelete now requires .destroy() to be called.
# To reduce code duplication, this method may be called to clean up delayDeletes that
# have been placed on an interval.
def cleanupDelayDeletes(interval):
if hasattr(interval, 'delayDelete'):
delayDelete = interval.delayDelete
# get rid of the reference before calling destroy in case destroy causes
# this function to be called again
del interval.delayDelete
delayDelete.destroy()
if hasattr(interval, 'delayDeletes'):
delayDeletes = interval.delayDeletes
# get rid of the reference before calling destroy in case destroy causes
# this function to be called again
del interval.delayDeletes
for i in delayDeletes:
i.destroy()