Merge pull request #277 from nikitakit/nikita/py3
Allow read-only use of mceditlib in python3
This commit is contained in:
commit
4652e537ae
@ -10,8 +10,9 @@ import Cython.Compiler.Options
|
||||
Cython.Compiler.Options.annotate = True
|
||||
|
||||
import numpy
|
||||
import sys
|
||||
|
||||
with file("version.txt") as f:
|
||||
with open("version.txt") as f:
|
||||
version = f.read().strip()
|
||||
|
||||
install_requires = [
|
||||
@ -23,7 +24,9 @@ include_dirs = [numpy.get_include()]
|
||||
mceditlib_ext_modules = cythonize([
|
||||
"src/mceditlib/nbt.pyx",
|
||||
"src/mceditlib/relight/with_cython.pyx"
|
||||
])
|
||||
],
|
||||
compile_time_env={'IS_PY2': sys.version_info[0] < 3},
|
||||
)
|
||||
|
||||
for m in mceditlib_ext_modules:
|
||||
m.include_dirs = include_dirs
|
||||
@ -48,10 +51,12 @@ setup(name='mceditlib',
|
||||
url='https://github.com/mcedit/mcedit2',
|
||||
license='MIT License',
|
||||
packages=find_packages('src', include=["mceditlib*"]),
|
||||
package_data={'mceditlib': ['blocktypes/*.json', 'anvil/biomes.csv']},
|
||||
package_dir={'': 'src'},
|
||||
ext_modules=mceditlib_ext_modules,
|
||||
include_dirs=include_dirs,
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
install_requires=install_requires,
|
||||
use_2to3=True,
|
||||
)
|
||||
|
@ -780,7 +780,7 @@ class AnvilWorldAdapter(object):
|
||||
"""
|
||||
lockfile = self.revisionHistory.rootFolder.getFilePath("session.lock")
|
||||
self.lockTime = int(time.time() * 1000)
|
||||
with file(lockfile, "wb") as f:
|
||||
with open(lockfile, "wb") as f:
|
||||
f.write(struct.pack(">q", self.lockTime))
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
@ -798,7 +798,7 @@ class AnvilWorldAdapter(object):
|
||||
|
||||
lockfile = self.revisionHistory.rootFolder.getFilePath("session.lock")
|
||||
try:
|
||||
(lock, ) = struct.unpack(">q", file(lockfile, "rb").read())
|
||||
(lock, ) = struct.unpack(">q", open(lockfile, "rb").read())
|
||||
except struct.error:
|
||||
lock = -1
|
||||
if lock != self.lockTime:
|
||||
|
@ -63,7 +63,7 @@ class AnvilWorldFolder(object):
|
||||
return os.unlink(self.getFilePath(path))
|
||||
|
||||
def readFile(self, path):
|
||||
with file(self.getFilePath(path), "rb") as f:
|
||||
with open(self.getFilePath(path), "rb") as f:
|
||||
return f.read()
|
||||
|
||||
def writeFile(self, path, data):
|
||||
@ -71,7 +71,7 @@ class AnvilWorldFolder(object):
|
||||
dirpath = os.path.dirname(path)
|
||||
if not os.path.exists(dirpath):
|
||||
os.makedirs(dirpath)
|
||||
with file(path, "wb") as f:
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
|
||||
def listFolder(self, path):
|
||||
|
@ -23,7 +23,7 @@ def openResource(filename):
|
||||
log.exception("pkg_resources not available")
|
||||
raise
|
||||
|
||||
return file(path)
|
||||
return open(path)
|
||||
|
||||
def getJsonFile(filename):
|
||||
if filename in _cachedJsons:
|
||||
|
@ -79,7 +79,7 @@ def isLevel(cls, filename):
|
||||
return cls.canOpenFile(filename)
|
||||
|
||||
if os.path.isfile(filename):
|
||||
with file(filename, "rb") as f:
|
||||
with open(filename, "rb") as f:
|
||||
data = f.read()
|
||||
|
||||
if hasattr(cls, "_isDataLevel"):
|
||||
|
@ -87,7 +87,7 @@ class JavaLevel(FakeChunkedLevelAdapter):
|
||||
gz = gzip.GzipFile(filename)
|
||||
strdata = gz.read()
|
||||
except Exception:
|
||||
strdata = file(filename, "rb").read()
|
||||
strdata = open(filename, "rb").read()
|
||||
|
||||
data = fromstring(strdata, dtype='uint8')
|
||||
|
||||
|
@ -85,7 +85,7 @@ class ServerJarStorage(object):
|
||||
os.makedirs(self.cacheDir)
|
||||
readme = os.path.join(self.cacheDir, "README.TXT")
|
||||
if not os.path.exists(readme):
|
||||
with file(readme, "w") as f:
|
||||
with open(readme, "w") as f:
|
||||
f.write("""
|
||||
About this folder:
|
||||
|
||||
@ -163,7 +163,7 @@ this way.
|
||||
|
||||
def checksumForVersion(self, v):
|
||||
jf = self.jarfileForVersion(v)
|
||||
with file(jf, "rb") as f:
|
||||
with open(jf, "rb") as f:
|
||||
import hashlib
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
|
||||
@ -198,14 +198,14 @@ def readProperties(filename):
|
||||
if not os.path.exists(filename):
|
||||
return {}
|
||||
|
||||
with file(filename) as f:
|
||||
with open(filename) as f:
|
||||
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
|
||||
|
||||
return properties
|
||||
|
||||
|
||||
def saveProperties(filename, properties):
|
||||
with file(filename, "w") as f:
|
||||
with open(filename, "w") as f:
|
||||
for k, v in properties.iteritems():
|
||||
f.write("{0}={1}\n".format(k, v))
|
||||
|
||||
@ -304,7 +304,7 @@ class MCServerChunkGenerator(object):
|
||||
readme = os.path.join(self.worldCacheDir, "README.TXT")
|
||||
|
||||
if not os.path.exists(readme):
|
||||
with file(readme, "w") as f:
|
||||
with open(readme, "w") as f:
|
||||
f.write("""
|
||||
About this folder:
|
||||
|
||||
|
@ -38,25 +38,46 @@ DEF UNICODE_NAMES = True
|
||||
# For each NBT file loaded, cache all of the unicode strings used for tag names. Saves some hundred kilobytes per
|
||||
# file since tag names often appear multiple times
|
||||
|
||||
DEF UNICODE_CACHE = True
|
||||
# The value of the IS_PY2 macro is received from the build script
|
||||
IF IS_PY2:
|
||||
DEF UNICODE_CACHE = True
|
||||
ELSE:
|
||||
# This codepath is currently unsupported in the python3 version
|
||||
DEF UNICODE_CACHE = False
|
||||
|
||||
import collections
|
||||
import gzip
|
||||
import zlib
|
||||
|
||||
from cStringIO import StringIO
|
||||
from cpython cimport PyTypeObject, PyUnicode_DecodeUTF8, PyList_Append, PyString_FromStringAndSize
|
||||
from cpython cimport PyTypeObject, PyUnicode_DecodeUTF8, PyList_Append
|
||||
IF IS_PY2:
|
||||
from cStringIO import StringIO
|
||||
from cpython cimport PyString_FromStringAndSize
|
||||
binary_type = str
|
||||
cdef object iteritems(obj):
|
||||
return obj.iteritems()
|
||||
ELSE:
|
||||
from io import BytesIO as StringIO
|
||||
binary_type = bytes
|
||||
cdef object iteritems(obj):
|
||||
return obj.items()
|
||||
|
||||
import numpy
|
||||
|
||||
cdef extern from "cStringIO.h":
|
||||
struct PycStringIO_CAPI:
|
||||
int cwrite(object o, char * buf, Py_ssize_t len)
|
||||
PyTypeObject * OutputType
|
||||
cdef extern from "cobject.h":
|
||||
void * PyCObject_Import(char * module_name, char * cobject_name)
|
||||
IF IS_PY2:
|
||||
cdef extern from "cStringIO.h":
|
||||
struct PycStringIO_CAPI:
|
||||
int cwrite(object o, char * buf, Py_ssize_t len)
|
||||
PyTypeObject * OutputType
|
||||
cdef extern from "cobject.h":
|
||||
void * PyCObject_Import(char * module_name, char * cobject_name)
|
||||
|
||||
cdef PycStringIO_CAPI *PycStringIO = <PycStringIO_CAPI *> PyCObject_Import("cStringIO", "cStringIO_CAPI")
|
||||
cdef PyTypeObject * StringO = PycStringIO.OutputType
|
||||
cdef PycStringIO_CAPI *PycStringIO = <PycStringIO_CAPI *> PyCObject_Import("cStringIO", "cStringIO_CAPI")
|
||||
cdef PyTypeObject * StringO = PycStringIO.OutputType
|
||||
ELSE:
|
||||
# The equivalent python3 code has not been written, so for now we fall back
|
||||
# on a codepath that might have poor performance.
|
||||
pass
|
||||
|
||||
# Tag IDs
|
||||
|
||||
@ -131,7 +152,7 @@ cdef class TAG_Value:
|
||||
|
||||
def __set__(self, val):
|
||||
IF UNICODE_NAMES:
|
||||
if isinstance(val, str):
|
||||
if isinstance(val, binary_type):
|
||||
val = PyUnicode_DecodeUTF8(val, len(val), "strict")
|
||||
ELSE:
|
||||
if isinstance(val, unicode):
|
||||
@ -327,7 +348,7 @@ cdef class TAG_String(TAG_Value):
|
||||
return self._value
|
||||
|
||||
def __set__(self, value):
|
||||
if isinstance(value, str):
|
||||
if isinstance(value, binary_type):
|
||||
value = PyUnicode_DecodeUTF8(value, len(value), "strict")
|
||||
self._value = value
|
||||
|
||||
@ -509,7 +530,7 @@ cdef class _TAG_Compound(TAG_Value):
|
||||
return data
|
||||
|
||||
if isinstance(filename_or_buf, basestring):
|
||||
f = file(filename_or_buf, "wb")
|
||||
f = open(filename_or_buf, "wb")
|
||||
f.write(data)
|
||||
else:
|
||||
filename_or_buf.write(data)
|
||||
@ -545,7 +566,7 @@ def load(filename="", buf=None):
|
||||
:rtype: TAG_Compound
|
||||
"""
|
||||
if filename:
|
||||
buf = file(filename, "rb")
|
||||
buf = open(filename, "rb")
|
||||
|
||||
if hasattr(buf, "read"):
|
||||
buf = buf.read()
|
||||
@ -807,7 +828,10 @@ def hexdump(src, length=8):
|
||||
|
||||
cdef void cwrite(obj, char *buf, size_t len):
|
||||
#print "cwrite %s %s %d" % (map(ord, buf[:min(4, len)]), buf[:min(4, len)].decode('ascii', 'replace'), len)
|
||||
PycStringIO.cwrite(obj, buf, len)
|
||||
IF IS_PY2:
|
||||
PycStringIO.cwrite(obj, buf, len)
|
||||
ELSE:
|
||||
obj.write(buf[:len])
|
||||
|
||||
|
||||
cdef void save_tag_id(char tagID, object buf):
|
||||
@ -920,7 +944,7 @@ def nested_string(tag, indent_string=" ", indent=0):
|
||||
if tag.tagID == _ID_COMPOUND:
|
||||
result += 'TAG_Compound({\n'
|
||||
indent += 1
|
||||
for key, value in tag.iteritems():
|
||||
for key, value in iteritems(tag):
|
||||
result += indent_string * indent + '"%s": %s,\n' % (key, nested_string(value, indent_string, indent))
|
||||
indent -= 1
|
||||
result += indent_string * indent + '})'
|
||||
@ -962,7 +986,7 @@ def walk(tag, path=None):
|
||||
if path is None:
|
||||
path = []
|
||||
if tag.isCompound():
|
||||
for name, subtag in tag.iteritems():
|
||||
for name, subtag in iteritems(tag):
|
||||
yield (name, subtag, path)
|
||||
for result in walk(subtag, path + [name]):
|
||||
yield result
|
||||
|
@ -4,7 +4,7 @@
|
||||
Reads and writes chunks to *.mcr* (Minecraft Region)
|
||||
and *.mca* (Minecraft Anvil Region) files
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, division
|
||||
import logging
|
||||
import os
|
||||
import struct
|
||||
@ -49,18 +49,18 @@ class RegionFile(object):
|
||||
if not os.path.exists(path):
|
||||
if readonly:
|
||||
raise IOError("Region file not found: %r" % path)
|
||||
file(path, "w").close()
|
||||
open(path, "w").close()
|
||||
newFile = True
|
||||
|
||||
filesize = os.path.getsize(path)
|
||||
mode = "rb" if readonly else "rb+"
|
||||
with file(self.path, mode) as f:
|
||||
with open(self.path, mode) as f:
|
||||
|
||||
if newFile:
|
||||
filesize = self.SECTOR_BYTES * 2
|
||||
f.truncate(filesize)
|
||||
self.offsets = numpy.zeros(self.SECTOR_BYTES/4, dtype='>u4')
|
||||
self.modTimes = numpy.zeros(self.SECTOR_BYTES/4, dtype='>u4')
|
||||
self.offsets = numpy.zeros(self.SECTOR_BYTES//4, dtype='>u4')
|
||||
self.modTimes = numpy.zeros(self.SECTOR_BYTES//4, dtype='>u4')
|
||||
else:
|
||||
|
||||
if not readonly:
|
||||
@ -81,7 +81,7 @@ class RegionFile(object):
|
||||
self.offsets = numpy.fromstring(offsetsData, dtype='>u4')
|
||||
self.modTimes = numpy.fromstring(modTimesData, dtype='>u4')
|
||||
|
||||
self.freeSectors = [True] * (filesize / self.SECTOR_BYTES)
|
||||
self.freeSectors = [True] * (filesize // self.SECTOR_BYTES)
|
||||
self.freeSectors[0:2] = False, False
|
||||
|
||||
if not newFile:
|
||||
@ -208,7 +208,7 @@ class RegionFile(object):
|
||||
if sectorStart + numSectors > len(self.freeSectors):
|
||||
raise ChunkNotPresent((cx, cz))
|
||||
|
||||
with file(self.path, "rb") as f:
|
||||
with open(self.path, "rb") as f:
|
||||
f.seek(sectorStart * self.SECTOR_BYTES)
|
||||
data = f.read(numSectors * self.SECTOR_BYTES)
|
||||
if len(data) < 5:
|
||||
@ -251,7 +251,7 @@ class RegionFile(object):
|
||||
offset = self._getOffset(cx, cz)
|
||||
sectorNumber = offset >> 8
|
||||
sectorsAllocated = offset & 0xff
|
||||
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1
|
||||
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) // self.SECTOR_BYTES + 1
|
||||
if sectorsNeeded >= 256:
|
||||
err = RegionFormatError("Cannot save chunk %s with compressed length %s (exceeds 1 megabyte)" %
|
||||
((cx, cz), len(data)))
|
||||
@ -301,7 +301,7 @@ class RegionFile(object):
|
||||
|
||||
region_debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data)))
|
||||
|
||||
with file(self.path, "rb+") as f:
|
||||
with open(self.path, "rb+") as f:
|
||||
f.seek(0, 2)
|
||||
filesize = f.tell()
|
||||
|
||||
@ -320,7 +320,7 @@ class RegionFile(object):
|
||||
self.setTimestamp(cx, cz)
|
||||
|
||||
def writeSector(self, sectorNumber, data, format):
|
||||
with file(self.path, "rb+") as f:
|
||||
with open(self.path, "rb+") as f:
|
||||
region_debug("REGION: Writing sector {0}".format(sectorNumber))
|
||||
|
||||
f.seek(sectorNumber * self.SECTOR_BYTES)
|
||||
@ -341,7 +341,7 @@ class RegionFile(object):
|
||||
cx &= 0x1f
|
||||
cz &= 0x1f
|
||||
self.offsets[cx + cz * 32] = offset
|
||||
with file(self.path, "rb+") as f:
|
||||
with open(self.path, "rb+") as f:
|
||||
f.seek(0)
|
||||
f.write(self.offsets.tostring())
|
||||
|
||||
@ -366,8 +366,6 @@ class RegionFile(object):
|
||||
cx &= 0x1f
|
||||
cz &= 0x1f
|
||||
self.modTimes[cx + cz * 32] = timestamp
|
||||
with file(self.path, "rb+") as f:
|
||||
with open(self.path, "rb+") as f:
|
||||
f.seek(self.SECTOR_BYTES)
|
||||
f.write(self.modTimes.tostring())
|
||||
|
||||
|
||||
|
@ -30,7 +30,7 @@ class PocketChunksFile(object):
|
||||
|
||||
@property
|
||||
def file(self):
|
||||
openfile = lambda: file(self.path, "rb+")
|
||||
openfile = lambda: open(self.path, "rb+")
|
||||
if PocketChunksFile.holdFileOpen:
|
||||
if self._file is None:
|
||||
self._file = openfile()
|
||||
@ -47,7 +47,7 @@ class PocketChunksFile(object):
|
||||
self.path = path
|
||||
self._file = None
|
||||
if not os.path.exists(path):
|
||||
file(path, "w").close()
|
||||
open(path, "w").close()
|
||||
|
||||
with self.file as f:
|
||||
|
||||
|
@ -656,7 +656,7 @@ class RevisionHistoryNode(object):
|
||||
if self.worldFolder.containsChunk(cx, cz, dimName):
|
||||
self.worldFolder.deleteChunk(cx, cz, dimName)
|
||||
else:
|
||||
with file(self._deadChunksFile(), "w") as f:
|
||||
with open(self._deadChunksFile(), "w") as f:
|
||||
f.write("%d, %d, %s\n" % (cx, cz, dimName))
|
||||
f.close()
|
||||
self.deadChunks.add((cx, cz, dimName))
|
||||
@ -669,7 +669,7 @@ class RevisionHistoryNode(object):
|
||||
"""
|
||||
if self.invalid:
|
||||
raise RuntimeError("Accessing invalid node: %r" % self)
|
||||
with file(self._deadChunksFile()) as f:
|
||||
with open(self._deadChunksFile()) as f:
|
||||
lines = f.read().split('\n')
|
||||
|
||||
def _coords():
|
||||
@ -733,7 +733,7 @@ class RevisionHistoryNode(object):
|
||||
if self.worldFolder.containsFile(path):
|
||||
self.worldFolder.deleteFile(path)
|
||||
else:
|
||||
with file(self._deadFilesFile(), "w") as f:
|
||||
with open(self._deadFilesFile(), "w") as f:
|
||||
f.write("%s\n" % path)
|
||||
f.close()
|
||||
self.deadFiles.add(path)
|
||||
@ -746,7 +746,7 @@ class RevisionHistoryNode(object):
|
||||
"""
|
||||
if self.invalid:
|
||||
raise RuntimeError("Accessing invalid node: %r" % self)
|
||||
with file(self._deadFilesFile()) as f:
|
||||
with open(self._deadFilesFile()) as f:
|
||||
return f.read().split('\n')
|
||||
|
||||
def readFile(self, path):
|
||||
|
@ -23,12 +23,13 @@ class ISelection(object):
|
||||
"""
|
||||
chunkPositions = NotImplemented
|
||||
|
||||
def __contains__(self, (x, y, z)):
|
||||
def __contains__(self, xyz):
|
||||
"""
|
||||
Return True if the given set of coordinates is within this selection.
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
(x, y, z) = xyz
|
||||
|
||||
def contains_coords(self, x, y, z):
|
||||
"""
|
||||
@ -544,7 +545,8 @@ class BoundingBox(SelectionBox):
|
||||
# return self.__class__(origin, size)
|
||||
return BoundingBox(origin, size)
|
||||
|
||||
def __contains__(self, (x, y, z)):
|
||||
def __contains__(self, xyz):
|
||||
(x, y, z) = xyz
|
||||
if x < self.minx or x >= self.maxx:
|
||||
return False
|
||||
if y < self.miny or y >= self.maxy:
|
||||
|
Reference in New Issue
Block a user