mirror of
https://github.com/panda3d/panda3d.git
synced 2025-10-03 02:15:43 -04:00
deploy-ng: only include libraries referenced by used modules
This commit is contained in:
parent
a686f501b6
commit
55ce23fbba
@ -652,7 +652,7 @@ class Freezer:
|
||||
return 'ModuleDef(%s)' % (', '.join(args))
|
||||
|
||||
def __init__(self, previous = None, debugLevel = 0,
|
||||
platform = None):
|
||||
platform = None, path=None):
|
||||
# Normally, we are freezing for our own platform. Change this
|
||||
# if untrue.
|
||||
self.platform = platform or PandaSystem.getPlatform()
|
||||
@ -663,6 +663,10 @@ class Freezer:
|
||||
# default object will be created when it is needed.
|
||||
self.cenv = None
|
||||
|
||||
# This is the search path to use for Python modules. Leave it
|
||||
# to the default value of None to use sys.path.
|
||||
self.path = path
|
||||
|
||||
# The filename extension to append to the source file before
|
||||
# compiling.
|
||||
self.sourceExtension = '.c'
|
||||
@ -999,7 +1003,7 @@ class Freezer:
|
||||
else:
|
||||
includes.append(mdef)
|
||||
|
||||
self.mf = PandaModuleFinder(excludes = list(excludeDict.keys()), suffixes=self.moduleSuffixes)
|
||||
self.mf = PandaModuleFinder(excludes=list(excludeDict.keys()), suffixes=self.moduleSuffixes, path=self.path)
|
||||
|
||||
# Attempt to import the explicit modules into the modulefinder.
|
||||
|
||||
@ -1672,6 +1676,7 @@ class Freezer:
|
||||
f.write(struct.pack('<I', modsoffset))
|
||||
f.write(struct.pack('<I', len(moduleList)))
|
||||
os.chmod(target, 0o755)
|
||||
return target
|
||||
|
||||
def makeModuleDef(self, mangledName, code):
|
||||
result = ''
|
||||
|
@ -6,12 +6,15 @@ import pip
|
||||
import sys
|
||||
import subprocess
|
||||
import zipfile
|
||||
import struct
|
||||
import io
|
||||
|
||||
import distutils.core
|
||||
import distutils.dir_util
|
||||
import distutils.file_util
|
||||
|
||||
from direct.showutil import FreezeTool
|
||||
from . import FreezeTool
|
||||
from . import pefile
|
||||
import panda3d.core as p3d
|
||||
|
||||
|
||||
@ -19,27 +22,6 @@ if 'basestring' not in globals():
|
||||
basestring = str
|
||||
|
||||
|
||||
# TODO replace with Packager
|
||||
def find_packages(whlfile):
|
||||
if whlfile is None:
|
||||
dtool_fn = p3d.Filename(p3d.ExecutionEnvironment.get_dtool_name())
|
||||
libdir = os.path.dirname(dtool_fn.to_os_specific())
|
||||
filelist = [os.path.join(libdir, i) for i in os.listdir(libdir)]
|
||||
else:
|
||||
filelist = whlfile.namelist()
|
||||
|
||||
return [
|
||||
i for i in filelist
|
||||
if '.so.' in i or
|
||||
#TODO: find a better way to exclude deploy_libs from this.
|
||||
(i.endswith('.so') and not i.startswith('deploy_libs/')) or
|
||||
i.endswith('.dll') or
|
||||
i.endswith('.dylib') or
|
||||
'libpandagl' in i or
|
||||
'libpython' in i
|
||||
]
|
||||
|
||||
|
||||
class build_apps(distutils.core.Command):
|
||||
user_options = [] # TODO
|
||||
|
||||
@ -57,6 +39,18 @@ class build_apps(distutils.core.Command):
|
||||
self.build_scripts= {
|
||||
'.egg': ('.bam', 'egg2bam -o {1} {0}'),
|
||||
}
|
||||
self.exclude_dependencies = []
|
||||
|
||||
# We keep track of the zip files we've opened.
|
||||
self._zip_files = {}
|
||||
|
||||
def _get_zip_file(self, path):
|
||||
if path in self._zip_files:
|
||||
return self._zip_files[path]
|
||||
|
||||
zip = zipfile.ZipFile(path)
|
||||
self._zip_files[path] = zip
|
||||
return zip
|
||||
|
||||
def finalize_options(self):
|
||||
# TODO
|
||||
@ -69,252 +63,476 @@ class build_apps(distutils.core.Command):
|
||||
else:
|
||||
platforms = self.deploy_platforms
|
||||
use_wheels = True
|
||||
print("Building platforms: {}".format(','.join(platforms)))
|
||||
|
||||
saved_path = sys.path[:]
|
||||
print("Building platforms: {0}".format(','.join(platforms)))
|
||||
|
||||
for platform in platforms:
|
||||
builddir = os.path.join(self.build_base, platform)
|
||||
self.build_runtimes(platform, use_wheels)
|
||||
|
||||
if os.path.exists(builddir):
|
||||
distutils.dir_util.remove_tree(builddir)
|
||||
distutils.dir_util.mkpath(builddir)
|
||||
def download_wheels(self, platform):
|
||||
""" Downloads panda3d wheels for the given platform using pip.
|
||||
These are special wheels that are expected to contain a deploy_libs
|
||||
directory containing the Python runtime libraries, which will be added
|
||||
to sys.path."""
|
||||
|
||||
if use_wheels:
|
||||
whldir = os.path.join(self.build_base, '__whl_cache__')
|
||||
abi_tag = pip.pep425tags.get_abi_tag()
|
||||
whldir = os.path.join(self.build_base, '__whl_cache__')
|
||||
abi_tag = pip.pep425tags.get_abi_tag()
|
||||
|
||||
if 'u' in abi_tag and not platform.startswith('manylinux'):
|
||||
abi_tag = abi_tag.replace('u', '')
|
||||
if 'u' in abi_tag and not platform.startswith('manylinux'):
|
||||
abi_tag = abi_tag.replace('u', '')
|
||||
|
||||
pip_args = [
|
||||
'download',
|
||||
'-d', whldir,
|
||||
'-r', self.requirements_path,
|
||||
'--only-binary', ':all:',
|
||||
'--platform', platform,
|
||||
'--abi', abi_tag
|
||||
]
|
||||
pip_args = [
|
||||
'download',
|
||||
'-d', whldir,
|
||||
'-r', self.requirements_path,
|
||||
'--only-binary', ':all:',
|
||||
'--platform', platform,
|
||||
'--abi', abi_tag
|
||||
]
|
||||
|
||||
for index in self.pypi_extra_indexes:
|
||||
pip_args += ['--extra-index-url', index]
|
||||
for index in self.pypi_extra_indexes:
|
||||
pip_args += ['--extra-index-url', index]
|
||||
|
||||
pip.main(args=pip_args)
|
||||
pip.main(args=pip_args)
|
||||
|
||||
wheelpaths = [os.path.join(whldir,i) for i in os.listdir(whldir) if platform in i]
|
||||
wheelpaths = [os.path.join(whldir,i) for i in os.listdir(whldir) if platform in i]
|
||||
return wheelpaths
|
||||
|
||||
p3dwhl = None
|
||||
for whl in wheelpaths:
|
||||
if 'panda3d-' in whl:
|
||||
p3dwhlfn = whl
|
||||
p3dwhl = zipfile.ZipFile(p3dwhlfn)
|
||||
break
|
||||
else:
|
||||
raise RuntimeError("Missing panda3d wheel")
|
||||
def build_runtimes(self, platform, use_wheels):
|
||||
""" Builds the distributions for the given platform. """
|
||||
|
||||
whlfiles = {whl: zipfile.ZipFile(whl) for whl in wheelpaths}
|
||||
builddir = os.path.join(self.build_base, platform)
|
||||
|
||||
# Add whl files to the path so they are picked up by modulefinder
|
||||
sys.path = saved_path[:]
|
||||
for whl in wheelpaths:
|
||||
sys.path.insert(0, whl)
|
||||
if os.path.exists(builddir):
|
||||
distutils.dir_util.remove_tree(builddir)
|
||||
distutils.dir_util.mkpath(builddir)
|
||||
|
||||
# Add deploy_libs from panda3d whl to the path
|
||||
sys.path.insert(0, os.path.join(p3dwhlfn, 'deploy_libs'))
|
||||
path = sys.path[:]
|
||||
p3dwhl = None
|
||||
|
||||
if use_wheels:
|
||||
wheelpaths = self.download_wheels(platform)
|
||||
|
||||
# Create runtimes
|
||||
freezer_extras = set()
|
||||
freezer_modules = set()
|
||||
def create_runtime(appname, mainscript, use_console):
|
||||
freezer = FreezeTool.Freezer(platform=platform)
|
||||
freezer.addModule('__main__', filename=mainscript)
|
||||
for incmod in self.include_modules.get(appname, []) + self.include_modules.get('*', []):
|
||||
freezer.addModule(incmod)
|
||||
for exmod in self.exclude_modules.get(appname, []) + self.exclude_modules.get('*', []):
|
||||
freezer.excludeModule(exmod)
|
||||
freezer.done(addStartupModules=True)
|
||||
|
||||
stub_name = 'deploy-stub'
|
||||
if platform.startswith('win'):
|
||||
if not use_console:
|
||||
stub_name = 'deploy-stubw'
|
||||
stub_name += '.exe'
|
||||
|
||||
if use_wheels:
|
||||
stub_file = p3dwhl.open('panda3d_tools/{}'.format(stub_name))
|
||||
else:
|
||||
dtool_path = p3d.Filename(p3d.ExecutionEnvironment.get_dtool_name()).to_os_specific()
|
||||
stub_path = os.path.join(os.path.dirname(dtool_path), '..', 'bin', stub_name)
|
||||
stub_file = open(stub_path, 'rb')
|
||||
|
||||
freezer.generateRuntimeFromStub(os.path.join(builddir, appname), stub_file)
|
||||
stub_file.close()
|
||||
|
||||
freezer_extras.update(freezer.extras)
|
||||
freezer_modules.update(freezer.getAllModuleNames())
|
||||
|
||||
for appname, scriptname in self.gui_apps.items():
|
||||
create_runtime(appname, scriptname, False)
|
||||
|
||||
for appname, scriptname in self.console_apps.items():
|
||||
create_runtime(appname, scriptname, True)
|
||||
|
||||
# Copy extension modules
|
||||
whl_modules = []
|
||||
whl_modules_ext = ''
|
||||
if use_wheels:
|
||||
# Get the module libs
|
||||
whl_modules = [
|
||||
i.replace('deploy_libs/', '') for i in p3dwhl.namelist() if i.startswith('deploy_libs/')
|
||||
]
|
||||
|
||||
# Pull off extension
|
||||
if whl_modules:
|
||||
whl_modules_ext = '.'.join(whl_modules[0].split('.')[1:])
|
||||
whl_modules = [i.split('.')[0] for i in whl_modules]
|
||||
|
||||
# Make sure to copy any builtins that have shared objects in the deploy libs
|
||||
for mod in freezer_modules:
|
||||
if mod in whl_modules:
|
||||
freezer_extras.add((mod, None))
|
||||
|
||||
# Copy any shared objects we need
|
||||
for module, source_path in freezer_extras:
|
||||
if source_path is not None:
|
||||
# Rename panda3d/core.pyd to panda3d.core.pyd
|
||||
basename = os.path.basename(source_path)
|
||||
if '.' in module:
|
||||
basename = module.rsplit('.', 1)[0] + '.' + basename
|
||||
|
||||
# Remove python version string
|
||||
if sys.version_info >= (3, 0):
|
||||
parts = basename.split('.')
|
||||
parts = parts[:-2] + parts[-1:]
|
||||
basename = '.'.join(parts)
|
||||
else:
|
||||
# Builtin module, but might not be builtin in wheel libs, so double check
|
||||
if module in whl_modules:
|
||||
source_path = '{}/deploy_libs/{}.{}'.format(p3dwhlfn, module, whl_modules_ext)
|
||||
basename = os.path.basename(source_path)
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
target_path = os.path.join(builddir, basename)
|
||||
if '.whl/' in source_path:
|
||||
# This was found in a wheel, extract it
|
||||
whl, wf = source_path.split('.whl/')
|
||||
whl += '.whl'
|
||||
whlfile = whlfiles[whl]
|
||||
print("copying {} -> {}".format(os.path.join(whl, wf), target_path))
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(whlfile.read(wf))
|
||||
else:
|
||||
# Regular file, copy it
|
||||
distutils.file_util.copy_file(source_path, target_path)
|
||||
|
||||
# Find Panda3D libs
|
||||
libs = find_packages(p3dwhl if use_wheels else None)
|
||||
|
||||
# Copy Panda3D files
|
||||
etcdir = os.path.join(builddir, 'etc')
|
||||
if not use_wheels:
|
||||
# Libs
|
||||
for lib in libs:
|
||||
target_path = os.path.join(builddir, os.path.basename(lib))
|
||||
if not os.path.islink(source_path):
|
||||
distutils.file_util.copy_file(lib, target_path)
|
||||
|
||||
# etc
|
||||
dtool_fn = p3d.Filename(p3d.ExecutionEnvironment.get_dtool_name())
|
||||
libdir = os.path.dirname(dtool_fn.to_os_specific())
|
||||
src = os.path.join(libdir, '..', 'etc')
|
||||
distutils.dir_util.copy_tree(src, etcdir)
|
||||
for whl in wheelpaths:
|
||||
if 'panda3d-' in whl:
|
||||
p3dwhlfn = whl
|
||||
p3dwhl = self._get_zip_file(p3dwhlfn)
|
||||
break
|
||||
else:
|
||||
distutils.dir_util.mkpath(etcdir)
|
||||
raise RuntimeError("Missing panda3d wheel")
|
||||
|
||||
# Combine prc files with libs and copy the whole list
|
||||
panda_files = libs + [i for i in p3dwhl.namelist() if i.endswith('.prc')]
|
||||
for pf in panda_files:
|
||||
dstdir = etcdir if pf.endswith('.prc') else builddir
|
||||
target_path = os.path.join(dstdir, os.path.basename(pf))
|
||||
print("copying {} -> {}".format(os.path.join(p3dwhlfn, pf), target_path))
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(p3dwhl.read(pf))
|
||||
#whlfiles = {whl: self._get_zip_file(whl) for whl in wheelpaths}
|
||||
|
||||
# Copy Game Files
|
||||
ignore_copy_list = [
|
||||
'__pycache__',
|
||||
'*.pyc',
|
||||
'*.py',
|
||||
# Add whl files to the path so they are picked up by modulefinder
|
||||
for whl in wheelpaths:
|
||||
path.insert(0, whl)
|
||||
|
||||
# Add deploy_libs from panda3d whl to the path
|
||||
path.insert(0, os.path.join(p3dwhlfn, 'deploy_libs'))
|
||||
|
||||
# Create runtimes
|
||||
freezer_extras = set()
|
||||
freezer_modules = set()
|
||||
def create_runtime(appname, mainscript, use_console):
|
||||
freezer = FreezeTool.Freezer(platform=platform, path=path)
|
||||
freezer.addModule('__main__', filename=mainscript)
|
||||
for incmod in self.include_modules.get(appname, []) + self.include_modules.get('*', []):
|
||||
freezer.addModule(incmod)
|
||||
for exmod in self.exclude_modules.get(appname, []) + self.exclude_modules.get('*', []):
|
||||
freezer.excludeModule(exmod)
|
||||
freezer.done(addStartupModules=True)
|
||||
|
||||
stub_name = 'deploy-stub'
|
||||
if platform.startswith('win'):
|
||||
if not use_console:
|
||||
stub_name = 'deploy-stubw'
|
||||
stub_name += '.exe'
|
||||
|
||||
if use_wheels:
|
||||
stub_file = p3dwhl.open('panda3d_tools/{0}'.format(stub_name))
|
||||
else:
|
||||
dtool_path = p3d.Filename(p3d.ExecutionEnvironment.get_dtool_name()).to_os_specific()
|
||||
stub_path = os.path.join(os.path.dirname(dtool_path), '..', 'bin', stub_name)
|
||||
stub_file = open(stub_path, 'rb')
|
||||
|
||||
freezer.generateRuntimeFromStub(os.path.join(builddir, appname), stub_file)
|
||||
stub_file.close()
|
||||
|
||||
freezer_extras.update(freezer.extras)
|
||||
freezer_modules.update(freezer.getAllModuleNames())
|
||||
|
||||
for appname, scriptname in self.gui_apps.items():
|
||||
create_runtime(appname, scriptname, False)
|
||||
|
||||
for appname, scriptname in self.console_apps.items():
|
||||
create_runtime(appname, scriptname, True)
|
||||
|
||||
# Copy extension modules
|
||||
whl_modules = []
|
||||
whl_modules_ext = ''
|
||||
if use_wheels:
|
||||
# Get the module libs
|
||||
whl_modules = [
|
||||
i.replace('deploy_libs/', '') for i in p3dwhl.namelist() if i.startswith('deploy_libs/')
|
||||
]
|
||||
ignore_copy_list += self.exclude_paths
|
||||
ignore_copy_list = [p3d.GlobPattern(i) for i in ignore_copy_list]
|
||||
|
||||
def check_pattern(src):
|
||||
for pattern in ignore_copy_list:
|
||||
# Normalize file paths across platforms
|
||||
path = p3d.Filename.from_os_specific(src).get_fullpath()
|
||||
#print("check ignore:", pattern, src, pattern.matches(path))
|
||||
if pattern.matches(path):
|
||||
return True
|
||||
return False
|
||||
# Pull off extension
|
||||
if whl_modules:
|
||||
whl_modules_ext = '.'.join(whl_modules[0].split('.')[1:])
|
||||
whl_modules = [i.split('.')[0] for i in whl_modules]
|
||||
|
||||
def dir_has_files(directory):
|
||||
files = [
|
||||
i for i in os.listdir(directory)
|
||||
if not check_pattern(os.path.join(directory, i))
|
||||
]
|
||||
return bool(files)
|
||||
# Make sure to copy any builtins that have shared objects in the deploy libs
|
||||
for mod in freezer_modules:
|
||||
if mod in whl_modules:
|
||||
freezer_extras.add((mod, None))
|
||||
|
||||
def copy_file(src, dst):
|
||||
src = os.path.normpath(src)
|
||||
dst = os.path.normpath(dst)
|
||||
#FIXME: this is a temporary hack to pick up libpandagl.
|
||||
for lib in p3dwhl.namelist():
|
||||
if lib.startswith('panda3d/libpandagl.'):
|
||||
source_path = os.path.join(p3dwhlfn, lib)
|
||||
target_path = os.path.join(builddir, os.path.basename(lib))
|
||||
search_path = [os.path.dirname(source_path)]
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
|
||||
if check_pattern(src):
|
||||
print("skipping file", src)
|
||||
# Copy any shared objects we need
|
||||
for module, source_path in freezer_extras:
|
||||
if source_path is not None:
|
||||
# Rename panda3d/core.pyd to panda3d.core.pyd
|
||||
basename = os.path.basename(source_path)
|
||||
if '.' in module:
|
||||
basename = module.rsplit('.', 1)[0] + '.' + basename
|
||||
|
||||
# Remove python version string
|
||||
if sys.version_info >= (3, 0):
|
||||
parts = basename.split('.')
|
||||
parts = parts[:-2] + parts[-1:]
|
||||
basename = '.'.join(parts)
|
||||
else:
|
||||
# Builtin module, but might not be builtin in wheel libs, so double check
|
||||
if module in whl_modules:
|
||||
source_path = '{0}/deploy_libs/{1}.{2}'.format(p3dwhlfn, module, whl_modules_ext)
|
||||
basename = os.path.basename(source_path)
|
||||
else:
|
||||
continue
|
||||
|
||||
# If this is a dynamic library, search for dependencies.
|
||||
search_path = [os.path.dirname(source_path)]
|
||||
if use_wheels:
|
||||
search_path.append(os.path.join(p3dwhlfn, 'deploy_libs'))
|
||||
|
||||
target_path = os.path.join(builddir, basename)
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
|
||||
# Copy Panda3D files
|
||||
etcdir = os.path.join(builddir, 'etc')
|
||||
if not use_wheels:
|
||||
# etc
|
||||
dtool_fn = p3d.Filename(p3d.ExecutionEnvironment.get_dtool_name())
|
||||
libdir = os.path.dirname(dtool_fn.to_os_specific())
|
||||
src = os.path.join(libdir, '..', 'etc')
|
||||
distutils.dir_util.copy_tree(src, etcdir)
|
||||
else:
|
||||
distutils.dir_util.mkpath(etcdir)
|
||||
|
||||
# Combine prc files with libs and copy the whole list
|
||||
panda_files = [i for i in p3dwhl.namelist() if i.endswith('.prc')]
|
||||
for pf in panda_files:
|
||||
dstdir = etcdir if pf.endswith('.prc') else builddir
|
||||
target_path = os.path.join(dstdir, os.path.basename(pf))
|
||||
source_path = os.path.join(p3dwhlfn, pf)
|
||||
|
||||
# If this is a dynamic library, search for dependencies.
|
||||
search_path = [os.path.dirname(source_path)]
|
||||
if use_wheels:
|
||||
search_path.append(os.path.join(p3dwhlfn, 'deploy_libs'))
|
||||
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
|
||||
# Copy Game Files
|
||||
ignore_copy_list = [
|
||||
'__pycache__',
|
||||
'*.pyc',
|
||||
'*.py',
|
||||
]
|
||||
ignore_copy_list += self.exclude_paths
|
||||
ignore_copy_list = [p3d.GlobPattern(i) for i in ignore_copy_list]
|
||||
|
||||
def check_pattern(src):
|
||||
for pattern in ignore_copy_list:
|
||||
# Normalize file paths across platforms
|
||||
path = p3d.Filename.from_os_specific(src).get_fullpath()
|
||||
#print("check ignore:", pattern, src, pattern.matches(path))
|
||||
if pattern.matches(path):
|
||||
return True
|
||||
return False
|
||||
|
||||
def dir_has_files(directory):
|
||||
files = [
|
||||
i for i in os.listdir(directory)
|
||||
if not check_pattern(os.path.join(directory, i))
|
||||
]
|
||||
return bool(files)
|
||||
|
||||
def copy_file(src, dst):
|
||||
src = os.path.normpath(src)
|
||||
dst = os.path.normpath(dst)
|
||||
|
||||
if check_pattern(src):
|
||||
print("skipping file", src)
|
||||
return
|
||||
|
||||
dst_dir = os.path.dirname(dst)
|
||||
if not os.path.exists(dst_dir):
|
||||
distutils.dir_util.mkpath(dst_dir)
|
||||
|
||||
ext = os.path.splitext(src)[1]
|
||||
if not ext:
|
||||
ext = os.path.basename(src)
|
||||
dst_root = os.path.splitext(dst)[0]
|
||||
|
||||
if ext in self.build_scripts:
|
||||
dst_ext, script = self.build_scripts[ext]
|
||||
dst = dst_root + dst_ext
|
||||
script = script.format(src, dst)
|
||||
print("using script:", script)
|
||||
subprocess.call(script.split())
|
||||
else:
|
||||
#print("Copy file", src, dst)
|
||||
distutils.file_util.copy_file(src, dst)
|
||||
|
||||
def copy_dir(src, dst):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isfile(s):
|
||||
copy_file(s, d)
|
||||
elif dir_has_files(s):
|
||||
copy_dir(s, d)
|
||||
|
||||
for path in self.copy_paths:
|
||||
if isinstance(path, basestring):
|
||||
src = dst = path
|
||||
else:
|
||||
src, dst = path
|
||||
dst = os.path.join(builddir, dst)
|
||||
|
||||
if os.path.isfile(src):
|
||||
copy_file(src, dst)
|
||||
else:
|
||||
copy_dir(src, dst)
|
||||
|
||||
def add_dependency(self, name, target_dir, search_path, referenced_by):
|
||||
""" Searches for the given DLL on the search path. If it exists,
|
||||
copies it to the target_dir. """
|
||||
|
||||
if os.path.exists(os.path.join(target_dir, name)):
|
||||
# We've already added it earlier.
|
||||
return
|
||||
|
||||
if name in self.exclude_dependencies:
|
||||
return
|
||||
|
||||
for dir in search_path:
|
||||
source_path = os.path.join(dir, name)
|
||||
|
||||
if os.path.isfile(source_path):
|
||||
target_path = os.path.join(target_dir, name)
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
return
|
||||
|
||||
elif '.whl/' in source_path:
|
||||
# Check whether the file exists inside the wheel.
|
||||
whl, wf = source_path.split('.whl/')
|
||||
whl += '.whl'
|
||||
whlfile = self._get_zip_file(whl)
|
||||
|
||||
# Look case-insensitively.
|
||||
namelist = whlfile.namelist()
|
||||
namelist_lower = [file.lower() for file in namelist]
|
||||
|
||||
if wf.lower() in namelist_lower:
|
||||
# We have a match. Change it to the correct case.
|
||||
wf = namelist[namelist_lower.index(wf.lower())]
|
||||
source_path = '/'.join((whl, wf))
|
||||
target_path = os.path.join(target_dir, os.path.basename(wf))
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
return
|
||||
|
||||
dst_dir = os.path.dirname(dst)
|
||||
if not os.path.exists(dst_dir):
|
||||
distutils.dir_util.mkpath(dst_dir)
|
||||
# If we didn't find it, look again, but case-insensitively.
|
||||
name_lower = name.lower()
|
||||
|
||||
ext = os.path.splitext(src)[1]
|
||||
if not ext:
|
||||
ext = os.path.basename(src)
|
||||
dst_root = os.path.splitext(dst)[0]
|
||||
for dir in search_path:
|
||||
if os.path.isdir(dir):
|
||||
files = os.listdir(dir)
|
||||
files_lower = [file.lower() for file in files]
|
||||
|
||||
if ext in self.build_scripts:
|
||||
dst_ext, script = self.build_scripts[ext]
|
||||
dst = dst_root + dst_ext
|
||||
script = script.format(src, dst)
|
||||
print("using script:", script)
|
||||
subprocess.call(script.split())
|
||||
else:
|
||||
#print("Copy file", src, dst)
|
||||
distutils.file_util.copy_file(src, dst)
|
||||
if name_lower in files_lower:
|
||||
name = files[files_lower.index(name_lower)]
|
||||
source_path = os.path.join(dir, name)
|
||||
target_path = os.path.join(target_dir, name)
|
||||
self.copy_with_dependencies(source_path, target_path, search_path)
|
||||
|
||||
def copy_dir(src, dst):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isfile(s):
|
||||
copy_file(s, d)
|
||||
elif dir_has_files(s):
|
||||
copy_dir(s, d)
|
||||
# Warn if we can't find it, but only once.
|
||||
self.warn("could not find dependency {0} (referenced by {1})".format(name, referenced_by))
|
||||
self.exclude_dependencies.append(name)
|
||||
|
||||
for path in self.copy_paths:
|
||||
if isinstance(path, basestring):
|
||||
src = dst = path
|
||||
else:
|
||||
src, dst = path
|
||||
dst = os.path.join(builddir, dst)
|
||||
def copy_with_dependencies(self, source_path, target_path, search_path):
|
||||
""" Copies source_path to target_path. It also scans source_path for
|
||||
any dependencies, which are located along the given search_path and
|
||||
copied to the same directory as target_path.
|
||||
|
||||
if os.path.isfile(src):
|
||||
copy_file(src, dst)
|
||||
else:
|
||||
copy_dir(src, dst)
|
||||
source_path may be located inside a .whl file. """
|
||||
|
||||
print("copying {0} -> {1}".format(os.path.relpath(source_path, self.build_base), os.path.relpath(target_path, self.build_base)))
|
||||
|
||||
# Copy the file, and open it for analysis.
|
||||
if '.whl/' in source_path:
|
||||
# This was found in a wheel, extract it
|
||||
whl, wf = source_path.split('.whl/')
|
||||
whl += '.whl'
|
||||
whlfile = self._get_zip_file(whl)
|
||||
data = whlfile.read(wf)
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(data)
|
||||
# Wrap the data in a BytesIO, since we need to be able to seek in
|
||||
# the file; the stream returned by whlfile.open won't let us seek.
|
||||
fp = io.BytesIO(data)
|
||||
else:
|
||||
# Regular file, copy it
|
||||
distutils.file_util.copy_file(source_path, target_path)
|
||||
fp = open(target_path, 'rb')
|
||||
|
||||
# What kind of magic does the file contain?
|
||||
deps = []
|
||||
magic = fp.read(4)
|
||||
if magic.startswith(b'MZ'):
|
||||
# It's a Windows DLL or EXE file.
|
||||
pe = pefile.PEFile()
|
||||
pe.read(fp)
|
||||
deps = pe.imports
|
||||
|
||||
elif magic == b'\x7FELF':
|
||||
# Elf magic. Used on (among others) Linux and FreeBSD.
|
||||
deps = self._read_dependencies_elf(fp)
|
||||
|
||||
elif magic in (b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE',
|
||||
b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE'):
|
||||
# A Mach-O file, as used on macOS.
|
||||
deps = self._read_dependencies_macho(fp)
|
||||
|
||||
elif magic in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\bCA'):
|
||||
# A fat file, containing multiple Mach-O binaries. In the future,
|
||||
# we may want to extract the one containing the architecture we
|
||||
# are building for.
|
||||
deps = self._read_dependencies_fat(fp)
|
||||
|
||||
# If we discovered any dependencies, recursively add those.
|
||||
if deps:
|
||||
target_dir = os.path.dirname(target_path)
|
||||
base = os.path.basename(target_path)
|
||||
for dep in deps:
|
||||
self.add_dependency(dep, target_dir, search_path, base)
|
||||
|
||||
def _read_dependencies_elf(self, elf):
|
||||
""" Having read the first 4 bytes of the ELF file, fetches the
|
||||
dependent libraries and returns those as a list. """
|
||||
|
||||
ident = elf.read(12)
|
||||
|
||||
# Make sure we read in the correct endianness and integer size
|
||||
byte_order = "<>"[ord(ident[1:2]) - 1]
|
||||
elf_class = ord(ident[0:1]) - 1 # 0 = 32-bits, 1 = 64-bits
|
||||
header_struct = byte_order + ("HHIIIIIHHHHHH", "HHIQQQIHHHHHH")[elf_class]
|
||||
section_struct = byte_order + ("4xI8xIII8xI", "4xI16xQQI12xQ")[elf_class]
|
||||
dynamic_struct = byte_order + ("iI", "qQ")[elf_class]
|
||||
|
||||
type, machine, version, entry, phoff, shoff, flags, ehsize, phentsize, phnum, shentsize, shnum, shstrndx \
|
||||
= struct.unpack(header_struct, elf.read(struct.calcsize(header_struct)))
|
||||
dynamic_sections = []
|
||||
string_tables = {}
|
||||
|
||||
# Seek to the section header table and find the .dynamic section.
|
||||
elf.seek(shoff)
|
||||
for i in range(shnum):
|
||||
type, offset, size, link, entsize = struct.unpack_from(section_struct, elf.read(shentsize))
|
||||
if type == 6 and link != 0: # DYNAMIC type, links to string table
|
||||
dynamic_sections.append((offset, size, link, entsize))
|
||||
string_tables[link] = None
|
||||
|
||||
# Read the relevant string tables.
|
||||
for idx in string_tables.keys():
|
||||
elf.seek(shoff + idx * shentsize)
|
||||
type, offset, size, link, entsize = struct.unpack_from(section_struct, elf.read(shentsize))
|
||||
if type != 3: continue
|
||||
elf.seek(offset)
|
||||
string_tables[idx] = elf.read(size)
|
||||
|
||||
# Loop through the dynamic sections and rewrite it if it has an rpath/runpath.
|
||||
needed = []
|
||||
rpath = []
|
||||
for offset, size, link, entsize in dynamic_sections:
|
||||
elf.seek(offset)
|
||||
data = elf.read(entsize)
|
||||
tag, val = struct.unpack_from(dynamic_struct, data)
|
||||
|
||||
# Read tags until we find a NULL tag.
|
||||
while tag != 0:
|
||||
if tag == 1: # A NEEDED entry. Read it from the string table.
|
||||
string = string_tables[link][val : string_tables[link].find(b'\0', val)]
|
||||
needed.append(string.decode('utf-8'))
|
||||
|
||||
elif tag == 15 or tag == 29:
|
||||
# An RPATH or RUNPATH entry.
|
||||
string = string_tables[link][val : string_tables[link].find(b'\0', val)]
|
||||
rpath += string.split(b':')
|
||||
|
||||
data = elf.read(entsize)
|
||||
tag, val = struct.unpack_from(dynamic_struct, data)
|
||||
elf.close()
|
||||
|
||||
#TODO: should we respect the RPATH? Clear it? Warn about it?
|
||||
return needed
|
||||
|
||||
def _read_dependencies_macho(self, fp):
|
||||
""" Having read the first 4 bytes of the Mach-O file, fetches the
|
||||
dependent libraries and returns those as a list. """
|
||||
|
||||
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
|
||||
struct.unpack('<IIIIII', fp.read(24))
|
||||
|
||||
is_64 = (cputype & 0x1000000) != 0
|
||||
if is_64:
|
||||
fp.read(4)
|
||||
|
||||
# After the header, we get a series of linker commands. We just
|
||||
# iterate through them and gather up the LC_LOAD_DYLIB commands.
|
||||
load_dylibs = []
|
||||
for i in range(ncmds):
|
||||
cmd, cmdsize = struct.unpack('<II', fp.read(8))
|
||||
cmd_data = fp.read(cmdsize - 8)
|
||||
cmd &= ~0x80000000
|
||||
|
||||
if cmd == 0x0c: # LC_LOAD_DYLIB
|
||||
dylib = cmd_data[16:].decode('ascii').split('\x00', 1)[0]
|
||||
if dylib.startswith('@loader_path/'):
|
||||
dylib = dylib.replace('@loader_path/', '')
|
||||
load_dylibs.append(dylib)
|
||||
|
||||
return load_dylibs
|
||||
|
||||
def _read_dependencies_fat(self, fp):
|
||||
num_fat = struct.unpack('>I', fp.read(4))[0]
|
||||
if num_fat == 0:
|
||||
return []
|
||||
|
||||
# After the header we get a table of executables in this fat file,
|
||||
# each one with a corresponding offset into the file.
|
||||
# We are just interested in the first one for now.
|
||||
cputype, cpusubtype, offset, size, align = \
|
||||
struct.unpack('>IIIII', fp.read(20))
|
||||
|
||||
# Add 4, since it expects we've already read the magic.
|
||||
fp.seek(offset + 4)
|
||||
return self._read_dependencies_macho(fp)
|
||||
|
||||
|
||||
class bdist_apps(distutils.core.Command):
|
||||
|
863
direct/src/showutil/pefile.py
Executable file
863
direct/src/showutil/pefile.py
Executable file
@ -0,0 +1,863 @@
|
||||
""" Tools for manipulating Portable Executable files.
|
||||
|
||||
This can be used, for example, to extract a list of dependencies from an .exe
|
||||
or .dll file, or to add version information and an icon resource to it. """
|
||||
|
||||
__all__ = ["PEFile"]
|
||||
|
||||
from struct import Struct, unpack, pack, pack_into
|
||||
from collections import namedtuple
|
||||
from array import array
|
||||
import time
|
||||
from io import BytesIO
|
||||
import sys
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
unicode = str
|
||||
unichr = chr
|
||||
|
||||
# Define some internally used structures.
|
||||
RVASize = namedtuple('RVASize', ('addr', 'size'))
|
||||
impdirtab = namedtuple('impdirtab', ('lookup', 'timdat', 'forward', 'name', 'impaddr'))
|
||||
|
||||
|
||||
def _unpack_zstring(mem, offs=0):
|
||||
"Read a zero-terminated string from memory."
|
||||
c = mem[offs]
|
||||
str = ""
|
||||
while c:
|
||||
str += chr(c)
|
||||
offs += 1
|
||||
c = mem[offs]
|
||||
return str
|
||||
|
||||
def _unpack_wstring(mem, offs=0):
|
||||
"Read a UCS-2 string from memory."
|
||||
name_len, = unpack('<H', mem[offs:offs+2])
|
||||
name = ""
|
||||
for i in range(name_len):
|
||||
offs += 2
|
||||
name += unichr(*unpack('<H', mem[offs:offs+2]))
|
||||
return name
|
||||
|
||||
def _padded(n, boundary):
|
||||
align = n % boundary
|
||||
if align:
|
||||
n += boundary - align
|
||||
return n
|
||||
|
||||
|
||||
class Section(object):
|
||||
_header = Struct('<8sIIIIIIHHI')
|
||||
|
||||
modified = True
|
||||
|
||||
def read_header(self, fp):
|
||||
name, vsize, vaddr, size, scnptr, relptr, lnnoptr, nreloc, nlnno, flags = \
|
||||
self._header.unpack(fp.read(40))
|
||||
|
||||
self.name = name.rstrip(b'\x00')
|
||||
self.vaddr = vaddr # Base virtual address to map to.
|
||||
self.vsize = vsize
|
||||
self.offset = scnptr # Offset of the section in the file.
|
||||
self.size = size
|
||||
self.flags = flags
|
||||
|
||||
self.modified = False
|
||||
|
||||
def write_header(self, fp):
|
||||
fp.write(self._header.pack(self.name, self.vsize, self.vaddr,
|
||||
self.size, self.offset, 0, 0, 0, 0,
|
||||
self.flags))
|
||||
|
||||
def __repr__(self):
|
||||
return "<section '%s' memory %x-%x>" % (self.name, self.vaddr, self.vaddr + self.vsize)
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.vaddr > other.vaddr
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.vaddr < other.vaddr
|
||||
|
||||
|
||||
class DataResource(object):
|
||||
""" A resource entry in the resource table. """
|
||||
|
||||
# Resource types.
|
||||
cursor = 1
|
||||
bitmap = 2
|
||||
icon = 3
|
||||
menu = 4
|
||||
dialog = 5
|
||||
string = 6
|
||||
font_directory = 7
|
||||
font = 8
|
||||
accelerator = 9
|
||||
rcdata = 10
|
||||
message_table = 11
|
||||
cursor_group = 12
|
||||
icon_group = 14
|
||||
version = 16
|
||||
dlg_include = 17
|
||||
plug_play = 19
|
||||
vxd = 20
|
||||
animated_cursor = 21
|
||||
animated_icon = 22
|
||||
html = 23
|
||||
manifest = 24
|
||||
|
||||
def __init__(self):
|
||||
self._ident = ()
|
||||
self.data = None
|
||||
self.code_page = 0
|
||||
|
||||
def get_data(self):
|
||||
if self.code_page:
|
||||
return self.data.encode('cp%d' % self.code_page)
|
||||
else:
|
||||
return self.data
|
||||
|
||||
|
||||
class IconGroupResource(object):
|
||||
code_page = 0
|
||||
type = 14
|
||||
_entry = Struct('<BBBxHHIH')
|
||||
Icon = namedtuple('Icon', ('width', 'height', 'planes', 'bpp', 'size', 'id'))
|
||||
|
||||
def __init__(self):
|
||||
self.icons = []
|
||||
|
||||
def add_icon(self, *args, **kwargs):
|
||||
self.icons.append(self.Icon(*args, **kwargs))
|
||||
|
||||
def get_data(self):
|
||||
data = bytearray(pack('<HHH', 0, 1, len(self.icons)))
|
||||
|
||||
for width, height, planes, bpp, size, id in self.icons:
|
||||
colors = 1 << (planes * bpp)
|
||||
if colors >= 256:
|
||||
colors = 0
|
||||
if width >= 256:
|
||||
width = 0
|
||||
if height >= 256:
|
||||
height = 0
|
||||
data += self._entry.pack(width, height, colors, planes, bpp, size, id)
|
||||
return data
|
||||
|
||||
def unpack_from(self, data, offs=0):
|
||||
type, count = unpack('<HH', data[offs+2:offs+6])
|
||||
offs += 6
|
||||
for i in range(count):
|
||||
width, height, colors, planes, bpp, size, id = \
|
||||
self._entry.unpack(data[offs:offs+14])
|
||||
if width == 0:
|
||||
width = 256
|
||||
if height == 0:
|
||||
height = 256
|
||||
self.icons.append(self.Icon(width, height, planes, bpp, size, id))
|
||||
offs += 14
|
||||
|
||||
|
||||
class VersionInfoResource(object):
|
||||
code_page = 0
|
||||
type = 16
|
||||
|
||||
def __init__(self):
|
||||
self.string_info = {}
|
||||
self.var_info = {}
|
||||
self.signature = 0xFEEF04BD
|
||||
self.struct_version = 0x10000
|
||||
self.file_version = (0, 0, 0, 0)
|
||||
self.product_version = (0, 0, 0, 0)
|
||||
self.file_flags_mask = 0x3f
|
||||
self.file_flags = 0
|
||||
self.file_os = 0x40004 # Windows NT
|
||||
self.file_type = 1 # Application
|
||||
self.file_subtype = 0
|
||||
self.file_date = (0, 0)
|
||||
|
||||
def get_data(self):
|
||||
# The first part of the header is pretty much fixed - we'll go
|
||||
# back later to write the struct size.
|
||||
data = bytearray(b'\x00\x004\x00\x00\x00V\x00S\x00_\x00V\x00E\x00R\x00S\x00I\x00O\x00N\x00_\x00I\x00N\x00F\x00O\x00\x00\x00\x00\x00')
|
||||
data += pack('<13I', self.signature, self.struct_version,
|
||||
self.file_version[1] | (self.file_version[0] << 16),
|
||||
self.file_version[3] | (self.file_version[2] << 16),
|
||||
self.product_version[1] | (self.product_version[0] << 16),
|
||||
self.product_version[3] | (self.product_version[2] << 16),
|
||||
self.file_flags_mask, self.file_flags,
|
||||
self.file_os, self.file_type, self.file_subtype,
|
||||
self.file_date[0], self.file_date[1])
|
||||
|
||||
self._pack_info(data, 'StringFileInfo', self.string_info)
|
||||
self._pack_info(data, 'VarFileInfo', self.var_info)
|
||||
data[0:2] = pack('<H', len(data))
|
||||
return data
|
||||
|
||||
def _pack_info(self, data, key, value):
|
||||
offset = len(data)
|
||||
|
||||
if isinstance(value, dict):
|
||||
type = 1
|
||||
value_length = 0
|
||||
elif isinstance(value, bytes) or isinstance(value, unicode):
|
||||
type = 1
|
||||
value_length = len(value) * 2 + 2
|
||||
else:
|
||||
type = 0
|
||||
value_length = len(value)
|
||||
|
||||
data += pack('<HHH', 0, value_length, type)
|
||||
|
||||
for c in key:
|
||||
data += pack('<H', ord(c))
|
||||
data += b'\x00\x00'
|
||||
if len(data) & 2:
|
||||
data += b'\x00\x00'
|
||||
assert len(data) & 3 == 0
|
||||
|
||||
if isinstance(value, dict):
|
||||
for key2, value2 in sorted(value.items(), key=lambda x:x[0]):
|
||||
self._pack_info(data, key2, value2)
|
||||
elif isinstance(value, bytes) or isinstance(value, unicode):
|
||||
for c in value:
|
||||
data += pack('<H', ord(c))
|
||||
data += b'\x00\x00'
|
||||
else:
|
||||
data += value
|
||||
if len(data) & 1:
|
||||
data += b'\x00'
|
||||
|
||||
if len(data) & 2:
|
||||
data += b'\x00\x00'
|
||||
assert len(data) & 3 == 0
|
||||
|
||||
data[offset:offset+2] = pack('<H', len(data) - offset)
|
||||
|
||||
def unpack_from(self, data):
|
||||
length, value_length = unpack('<HH', data[0:4])
|
||||
offset = 40 + value_length + (value_length & 1)
|
||||
dwords = array('I')
|
||||
dwords.fromstring(bytes(data[40:offset]))
|
||||
if len(dwords) > 0:
|
||||
self.signature = dwords[0]
|
||||
if len(dwords) > 1:
|
||||
self.struct_version = dwords[1]
|
||||
if len(dwords) > 3:
|
||||
self.file_version = \
|
||||
(int(dwords[2] >> 16), int(dwords[2] & 0xffff),
|
||||
int(dwords[3] >> 16), int(dwords[3] & 0xffff))
|
||||
if len(dwords) > 5:
|
||||
self.product_version = \
|
||||
(int(dwords[4] >> 16), int(dwords[4] & 0xffff),
|
||||
int(dwords[5] >> 16), int(dwords[5] & 0xffff))
|
||||
if len(dwords) > 7:
|
||||
self.file_flags_mask = dwords[6]
|
||||
self.file_flags = dwords[7]
|
||||
if len(dwords) > 8:
|
||||
self.file_os = dwords[8]
|
||||
if len(dwords) > 9:
|
||||
self.file_type = dwords[9]
|
||||
if len(dwords) > 10:
|
||||
self.file_subtype = dwords[10]
|
||||
if len(dwords) > 12:
|
||||
self.file_date = (dwords[11], dwords[12])
|
||||
|
||||
while offset < length:
|
||||
offset += self._unpack_info(self, data, offset)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key == 'StringFileInfo':
|
||||
return self.string_info
|
||||
elif key == 'VarFileInfo':
|
||||
return self.var_info
|
||||
else:
|
||||
raise KeyError("%s does not exist" % (key))
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in ('StringFileInfo', 'VarFileInfo')
|
||||
|
||||
def _unpack_info(self, dict, data, offset):
|
||||
length, value_length, type = unpack('<HHH', data[offset:offset+6])
|
||||
assert length > 0
|
||||
end = offset + length
|
||||
offset += 6
|
||||
key = ""
|
||||
c, = unpack('<H', data[offset:offset+2])
|
||||
offset += 2
|
||||
while c:
|
||||
key += unichr(c)
|
||||
c, = unpack('<H', data[offset:offset+2])
|
||||
offset += 2
|
||||
|
||||
# Padding bytes to align value to 32-bit boundary.
|
||||
offset = _padded(offset, 4)
|
||||
|
||||
if value_length > 0:
|
||||
# It contains a value.
|
||||
if type:
|
||||
# It's a wchar array value.
|
||||
value = u""
|
||||
c, = unpack('<H', data[offset:offset+2])
|
||||
offset += 2
|
||||
while c:
|
||||
value += unichr(c)
|
||||
c, = unpack('<H', data[offset:offset+2])
|
||||
offset += 2
|
||||
else:
|
||||
# A binary value.
|
||||
value = bytes(data[offset:offset+value_length])
|
||||
offset += value_length
|
||||
dict[key] = value
|
||||
else:
|
||||
# It contains sub-entries.
|
||||
if key not in dict:
|
||||
dict[key] = {}
|
||||
subdict = dict[key]
|
||||
while offset < end:
|
||||
offset += self._unpack_info(subdict, data, offset)
|
||||
|
||||
# Padding bytes to pad value to 32-bit boundary.
|
||||
return _padded(length, 4)
|
||||
|
||||
|
||||
class ResourceTable(object):
|
||||
""" A table in the resource directory. """
|
||||
|
||||
_header = Struct('<IIHHHH')
|
||||
|
||||
def __init__(self, ident=()):
|
||||
self.flags = 0
|
||||
self.timdat = 0
|
||||
self.version = (0, 0)
|
||||
self._name_leaves = []
|
||||
self._id_leaves = []
|
||||
self._ident = ident
|
||||
self._strings_size = 0 # Amount of space occupied by table keys.
|
||||
self._descs_size = 0
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, int):
|
||||
leaves = self._id_leaves
|
||||
else:
|
||||
leaves = self._name_leaves
|
||||
|
||||
i = 0
|
||||
while i < len(leaves):
|
||||
idname, leaf = leaves[i]
|
||||
if idname >= key:
|
||||
if key == idname:
|
||||
return leaf
|
||||
break
|
||||
i += 1
|
||||
if not isinstance(key, int):
|
||||
self._strings_size += _padded(len(key) * 2 + 2, 4)
|
||||
leaf = ResourceTable(ident=self._ident + (key,))
|
||||
leaves.insert(i, (key, leaf))
|
||||
return leaf
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
""" Adds the given item to the table. Maintains sort order. """
|
||||
if isinstance(key, int):
|
||||
leaves = self._id_leaves
|
||||
else:
|
||||
leaves = self._name_leaves
|
||||
|
||||
if not isinstance(value, ResourceTable):
|
||||
self._descs_size += 16
|
||||
|
||||
value._ident = self._ident + (key,)
|
||||
i = 0
|
||||
while i < len(leaves):
|
||||
idname, leaf = leaves[i]
|
||||
if idname >= key:
|
||||
if key == idname:
|
||||
if not isinstance(leaves[i][1], ResourceTable):
|
||||
self._descs_size -= 16
|
||||
leaves[i] = (key, value)
|
||||
return
|
||||
break
|
||||
i += 1
|
||||
if not isinstance(key, int):
|
||||
self._strings_size += _padded(len(key) * 2 + 2, 4)
|
||||
leaves.insert(i, (key, value))
|
||||
|
||||
def __len__(self):
|
||||
return len(self._name_leaves) + len(self._id_leaves)
|
||||
|
||||
def __iter__(self):
|
||||
keys = []
|
||||
for name, leaf in self._name_leaves:
|
||||
keys.append(name)
|
||||
for id, leaf in self._id_leaves:
|
||||
keys.append(id)
|
||||
return iter(keys)
|
||||
|
||||
def items(self):
|
||||
return self._name_leaves + self._id_leaves
|
||||
|
||||
def count_resources(self):
|
||||
"""Counts all of the resources."""
|
||||
count = 0
|
||||
for key, leaf in self._name_leaves + self._id_leaves:
|
||||
if isinstance(leaf, ResourceTable):
|
||||
count += leaf.count_resources()
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def get_nested_tables(self):
|
||||
"""Returns all tables in this table and subtables."""
|
||||
# First we yield child tables, then nested tables. This is the
|
||||
# order in which pack_into assumes the tables will be written.
|
||||
for key, leaf in self._name_leaves + self._id_leaves:
|
||||
if isinstance(leaf, ResourceTable):
|
||||
yield leaf
|
||||
|
||||
for key, leaf in self._name_leaves + self._id_leaves:
|
||||
if isinstance(leaf, ResourceTable):
|
||||
for table in leaf.get_nested_tables():
|
||||
yield table
|
||||
|
||||
def pack_header(self, data, offs):
|
||||
self._header.pack_into(data, offs, self.flags, self.timdat,
|
||||
self.version[0], self.version[1],
|
||||
len(self._name_leaves), len(self._id_leaves))
|
||||
|
||||
def unpack_from(self, mem, addr=0, offs=0):
|
||||
start = addr + offs
|
||||
self.flags, self.timdat, majver, minver, nnames, nids = \
|
||||
self._header.unpack(mem[start:start+16])
|
||||
self.version = (majver, minver)
|
||||
start += 16
|
||||
|
||||
# Subtables/entries specified by string name.
|
||||
self._name_leaves = []
|
||||
for i in range(nnames):
|
||||
name_p, data = unpack('<II', mem[start:start+8])
|
||||
if name_p & 0x80000000:
|
||||
name = _unpack_wstring(mem, addr + (name_p & 0x7fffffff))
|
||||
else:
|
||||
# Not sure what to do with this; I don't have a file with this.
|
||||
name = str(name_p)
|
||||
|
||||
if data & 0x80000000:
|
||||
entry = ResourceTable(self._ident + (name,))
|
||||
entry.unpack_from(mem, addr, data & 0x7fffffff)
|
||||
else:
|
||||
entry = self._unpack_data_entry(mem, addr + data, ident=self._ident+(name,))
|
||||
self._descs_size += 16
|
||||
self._name_leaves.append((name, entry))
|
||||
self._strings_size += _padded(len(name) * 2 + 2, 4)
|
||||
start += 8
|
||||
|
||||
# Subtables/entries specified by integer ID.
|
||||
self._id_leaves = []
|
||||
for i in range(nids):
|
||||
id, data = unpack('<II', mem[start:start+8])
|
||||
if data & 0x80000000:
|
||||
entry = ResourceTable(self._ident + (id,))
|
||||
entry.unpack_from(mem, addr, data & 0x7fffffff)
|
||||
else:
|
||||
entry = self._unpack_data_entry(mem, addr + data, ident=self._ident+(id,))
|
||||
self._descs_size += 16
|
||||
self._id_leaves.append((id, entry))
|
||||
start += 8
|
||||
|
||||
def _unpack_data_entry(self, mem, addr, ident):
|
||||
rva, size, code_page = unpack('<III', mem[addr:addr+12])
|
||||
type, name, lang = ident
|
||||
#print("%s/%s/%s: %s [%s]" % (type, name, lang, size, code_page))
|
||||
|
||||
data = mem[rva:rva+size]
|
||||
|
||||
if type == VersionInfoResource.type:
|
||||
entry = VersionInfoResource()
|
||||
entry.unpack_from(data)
|
||||
elif type == IconGroupResource.type:
|
||||
entry = IconGroupResource()
|
||||
entry.unpack_from(data)
|
||||
else:
|
||||
if code_page != 0:
|
||||
# Decode the data using the provided code page.
|
||||
data = data.decode("cp%d" % code_page)
|
||||
|
||||
entry = DataResource()
|
||||
entry.data = data
|
||||
entry.code_page = code_page
|
||||
|
||||
|
||||
class PEFile(object):
|
||||
|
||||
imports = ()
|
||||
|
||||
def open(self, fn, mode='r'):
|
||||
if 'b' not in mode:
|
||||
mode += 'b'
|
||||
self.fp = open(fn, mode)
|
||||
self.read(self.fp)
|
||||
|
||||
def close(self):
|
||||
self.fp.close()
|
||||
|
||||
def read(self, fp):
|
||||
""" Reads a PE file from the given file object, which must be opened
|
||||
in binary mode. """
|
||||
|
||||
# Read position of header.
|
||||
fp.seek(0x3c)
|
||||
offset, = unpack('<I', fp.read(4))
|
||||
|
||||
fp.seek(offset)
|
||||
if fp.read(4) != b'PE\0\0':
|
||||
raise ValueError("Invalid PE file.")
|
||||
|
||||
# Read the COFF header.
|
||||
self.machine, nscns, timdat, symptr, nsyms, opthdr, flags = \
|
||||
unpack('<HHIIIHH', fp.read(20))
|
||||
|
||||
if nscns == 0:
|
||||
raise ValueError("No sections found.")
|
||||
|
||||
if not opthdr:
|
||||
raise ValueError("No opthdr found.")
|
||||
|
||||
# Read part of the opthdr.
|
||||
magic, self.code_size, self.initialized_size, self.uninitialized_size = \
|
||||
unpack('<HxxIII', fp.read(16))
|
||||
|
||||
# Read alignments.
|
||||
fp.seek(16, 1)
|
||||
self.section_alignment, self.file_alignment = unpack('<II', fp.read(8))
|
||||
|
||||
# Read header/image sizes.
|
||||
fp.seek(16, 1)
|
||||
self.image_size, self.header_size = unpack('<II', fp.read(8))
|
||||
|
||||
if magic == 0x010b: # 32-bit.
|
||||
fp.seek(28, 1)
|
||||
elif magic == 0x20B: # 64-bit.
|
||||
fp.seek(44, 1)
|
||||
else:
|
||||
raise ValueError("unknown type 0x%x" % (magic))
|
||||
|
||||
self.rva_offset = fp.tell()
|
||||
numrvas, = unpack('<I', fp.read(4))
|
||||
|
||||
self.exp_rva = RVASize(0, 0)
|
||||
self.imp_rva = RVASize(0, 0)
|
||||
self.res_rva = RVASize(0, 0)
|
||||
|
||||
# Locate the relevant tables in memory.
|
||||
if numrvas >= 1:
|
||||
self.exp_rva = RVASize(*unpack('<II', fp.read(8)))
|
||||
if numrvas >= 2:
|
||||
self.imp_rva = RVASize(*unpack('<II', fp.read(8)))
|
||||
if numrvas >= 3:
|
||||
self.res_rva = RVASize(*unpack('<II', fp.read(8)))
|
||||
|
||||
# Skip the rest of the tables.
|
||||
if numrvas >= 4:
|
||||
fp.seek((numrvas - 3) * 8, 1)
|
||||
|
||||
# Loop through the sections to find the ones containing our tables.
|
||||
self.sections = []
|
||||
for i in range(nscns):
|
||||
section = Section()
|
||||
section.read_header(fp)
|
||||
self.sections.append(section)
|
||||
|
||||
self.sections.sort()
|
||||
|
||||
# Read the sections into some kind of virtual memory.
|
||||
self.vmem = bytearray(self.sections[-1].vaddr + self.sections[-1].size)
|
||||
|
||||
for section in self.sections:
|
||||
fp.seek(section.offset)
|
||||
fp.readinto(memoryview(self.vmem)[section.vaddr:section.vaddr+section.size])
|
||||
|
||||
# Read the import table.
|
||||
start = self.imp_rva.addr
|
||||
dir = impdirtab(*unpack('<IIIII', self.vmem[start:start+20]))
|
||||
|
||||
imports = []
|
||||
while dir.name and dir.lookup:
|
||||
name = _unpack_zstring(self.vmem, dir.name)
|
||||
imports.append(name)
|
||||
|
||||
start += 20
|
||||
dir = impdirtab(*unpack('<IIIII', self.vmem[start:start+20]))
|
||||
|
||||
# Make it a tuple to indicate we don't support modifying it for now.
|
||||
self.imports = tuple(imports)
|
||||
|
||||
# Read the resource tables from the .rsrc section.
|
||||
self.resources = ResourceTable()
|
||||
if self.res_rva.addr and self.res_rva.size:
|
||||
self.resources.unpack_from(self.vmem, self.res_rva.addr)
|
||||
|
||||
def add_icon(self, icon, ordinal=2):
|
||||
""" Adds an icon resource from the given Icon object. Requires
|
||||
calling add_resource_section() afterwards. """
|
||||
|
||||
group = IconGroupResource()
|
||||
self.resources[group.type][ordinal][1033] = group
|
||||
|
||||
images = sorted(icon.images.items(), key=lambda x:-x[0])
|
||||
id = 1
|
||||
|
||||
# Write 8-bpp image headers for sizes under 256x256.
|
||||
for size, image in images:
|
||||
if size >= 256:
|
||||
continue
|
||||
|
||||
xorsize = size
|
||||
if xorsize % 4 != 0:
|
||||
xorsize += 4 - (xorsize % 4)
|
||||
andsize = (size + 7) >> 3
|
||||
if andsize % 4 != 0:
|
||||
andsize += 4 - (andsize % 4)
|
||||
datasize = 40 + 256 * 4 + (xorsize + andsize) * size
|
||||
group.add_icon(size, size, 1, 8, datasize, id)
|
||||
|
||||
buf = BytesIO()
|
||||
icon._write_bitmap(buf, image, size, 8)
|
||||
|
||||
res = DataResource()
|
||||
res.data = buf.getvalue()
|
||||
self.resources[3][id][1033] = res
|
||||
id += 1
|
||||
|
||||
# And now the 24/32 bpp versions.
|
||||
for size, image in images:
|
||||
if size > 256:
|
||||
continue
|
||||
|
||||
# Calculate the size so we can write the offset within the file.
|
||||
if image.hasAlpha():
|
||||
bpp = 32
|
||||
xorsize = size * 4
|
||||
else:
|
||||
bpp = 24
|
||||
xorsize = size * 3 + (-(size * 3) & 3)
|
||||
andsize = (size + 7) >> 3
|
||||
if andsize % 4 != 0:
|
||||
andsize += 4 - (andsize % 4)
|
||||
datasize = 40 + (xorsize + andsize) * size
|
||||
|
||||
buf = BytesIO()
|
||||
icon._write_bitmap(buf, image, size, bpp)
|
||||
|
||||
res = DataResource()
|
||||
res.data = buf.getvalue()
|
||||
self.resources[3][id][1033] = res
|
||||
group.add_icon(size, size, 1, bpp, datasize, id)
|
||||
id += 1
|
||||
|
||||
def add_section(self, name, flags, data):
|
||||
""" Adds a new section with the given name, flags and data. The
|
||||
virtual address space is automatically resized to fit the new data.
|
||||
|
||||
Returns the newly created Section object. """
|
||||
|
||||
if isinstance(name, unicode):
|
||||
name = name.encode('ascii')
|
||||
|
||||
section = Section()
|
||||
section.name = name
|
||||
section.flags = flags
|
||||
|
||||
# Put it at the end of all the other sections.
|
||||
section.offset = 0
|
||||
for s in self.sections:
|
||||
section.offset = max(section.offset, s.offset + s.size)
|
||||
|
||||
# Align the offset.
|
||||
section.offset = _padded(section.offset, self.file_alignment)
|
||||
|
||||
# Find a place to put it in the virtual address space.
|
||||
section.vaddr = len(self.vmem)
|
||||
align = section.vaddr % self.section_alignment
|
||||
if align:
|
||||
pad = self.section_alignment - align
|
||||
self.vmem += bytearray(pad)
|
||||
section.vaddr += pad
|
||||
|
||||
section.vsize = len(data)
|
||||
section.size = _padded(section.vsize, self.file_alignment)
|
||||
self.vmem += data
|
||||
self.sections.append(section)
|
||||
|
||||
# Update the size tallies from the opthdr.
|
||||
self.image_size += _padded(section.vsize, self.section_alignment)
|
||||
if flags & 0x20:
|
||||
self.code_size += section.size
|
||||
if flags & 0x40:
|
||||
self.initialized_size += section.size
|
||||
if flags & 0x80:
|
||||
self.uninitialized_size += section.size
|
||||
|
||||
return section
|
||||
|
||||
def add_version_info(self, file_ver, product_ver, data, lang=1033, codepage=1200):
|
||||
""" Adds a version info resource to the file. """
|
||||
|
||||
if "FileVersion" not in data:
|
||||
data["FileVersion"] = '.'.join(file_ver)
|
||||
if "ProductVersion" not in data:
|
||||
data["ProductVersion"] = '.'.join(product_ver)
|
||||
|
||||
assert len(file_ver) == 4
|
||||
assert len(product_ver) == 4
|
||||
|
||||
res = VersionInfoResource()
|
||||
res.file_version = file_ver
|
||||
res.product_version = product_ver
|
||||
res.string_info = {
|
||||
"%04x%04x" % (lang, codepage): data
|
||||
}
|
||||
res.var_info = {
|
||||
"Translation": bytearray(pack("<HH", lang, codepage))
|
||||
}
|
||||
|
||||
self.resources[16][1][lang] = res
|
||||
|
||||
def add_resource_section(self):
|
||||
""" Adds a resource section to the file containing the resources that
|
||||
were previously added via add_icon et al. Assumes the file does not
|
||||
contain a resource section yet. """
|
||||
|
||||
# Calculate how much space to reserve.
|
||||
tables = [self.resources] + list(self.resources.get_nested_tables())
|
||||
table_size = 0
|
||||
string_size = 0
|
||||
desc_size = 16 * self.resources.count_resources()
|
||||
|
||||
for table in tables:
|
||||
table._offset = table_size
|
||||
table_size += 16 + 8 * len(table)
|
||||
string_size += table._strings_size
|
||||
desc_size += table._descs_size
|
||||
|
||||
# Now write the actual data.
|
||||
tbl_offs = 0
|
||||
str_offs = table_size
|
||||
desc_offs = str_offs + string_size
|
||||
data_offs = desc_offs + desc_size
|
||||
data = bytearray(data_offs)
|
||||
data_addr = _padded(len(self.vmem), self.section_alignment) + data_offs
|
||||
|
||||
for table in tables:
|
||||
table.pack_header(data, tbl_offs)
|
||||
|
||||
tbl_offs += 16
|
||||
|
||||
for name, leaf in table._name_leaves:
|
||||
if isinstance(leaf, ResourceTable):
|
||||
pack_into('<II', data, tbl_offs, str_offs | 0x80000000, leaf._offset | 0x80000000)
|
||||
else:
|
||||
pack_into('<II', data, tbl_offs, str_offs | 0x80000000, desc_offs)
|
||||
resdata = leaf.get_data()
|
||||
pack_into('<IIII', data, desc_offs, data_addr, len(resdata), leaf.code_page, 0)
|
||||
data += resdata
|
||||
desc_offs += 16
|
||||
data_addr += len(resdata)
|
||||
align = len(resdata) & 3
|
||||
if align:
|
||||
data += bytearray(4 - align)
|
||||
data_addr += 4 - align
|
||||
tbl_offs += 8
|
||||
|
||||
# Pack the name into the string table.
|
||||
pack_into('<H', data, str_offs, len(name))
|
||||
str_offs += 2
|
||||
for c in name:
|
||||
pack_into('<H', data, str_offs, ord(c))
|
||||
str_offs += 2
|
||||
str_offs = _padded(str_offs, 4)
|
||||
|
||||
for id, leaf in table._id_leaves:
|
||||
if isinstance(leaf, ResourceTable):
|
||||
pack_into('<II', data, tbl_offs, id, leaf._offset | 0x80000000)
|
||||
else:
|
||||
pack_into('<II', data, tbl_offs, id, desc_offs)
|
||||
resdata = leaf.get_data()
|
||||
pack_into('<IIII', data, desc_offs, data_addr, len(resdata), leaf.code_page, 0)
|
||||
data += resdata
|
||||
desc_offs += 16
|
||||
data_addr += len(resdata)
|
||||
align = len(resdata) & 3
|
||||
if align:
|
||||
data += bytearray(4 - align)
|
||||
data_addr += 4 - align
|
||||
tbl_offs += 8
|
||||
|
||||
flags = 0x40000040 # readable, contains initialized data
|
||||
section = self.add_section('.rsrc', flags, data)
|
||||
self.res_rva = RVASize(section.vaddr, section.vsize)
|
||||
|
||||
def write_changes(self):
|
||||
""" Assuming the file was opened in read-write mode, writes back the
|
||||
changes made via this class to the .exe file. """
|
||||
|
||||
fp = self.fp
|
||||
# Read position of header.
|
||||
fp.seek(0x3c)
|
||||
offset, = unpack('<I', fp.read(4))
|
||||
|
||||
fp.seek(offset)
|
||||
if fp.read(4) != b'PE\0\0':
|
||||
raise ValueError("Invalid PE file.")
|
||||
|
||||
# Sync read/write pointer. Necessary before write. Bug in Python?
|
||||
fp.seek(fp.tell())
|
||||
|
||||
# Rewrite the first part of the COFF header.
|
||||
timdat = int(time.time())
|
||||
fp.write(pack('<HHI', self.machine, len(self.sections), timdat))
|
||||
|
||||
# Write calculated init and uninitialised sizes to the opthdr.
|
||||
fp.seek(16, 1)
|
||||
fp.write(pack('<III', self.code_size, self.initialized_size, self.uninitialized_size))
|
||||
|
||||
# Same for the image and header size.
|
||||
fp.seek(40, 1)
|
||||
fp.write(pack('<II', self.image_size, self.header_size))
|
||||
|
||||
# Write the modified RVA table.
|
||||
fp.seek(self.rva_offset)
|
||||
numrvas, = unpack('<I', fp.read(4))
|
||||
assert numrvas >= 3
|
||||
|
||||
fp.seek(self.rva_offset + 4)
|
||||
if numrvas >= 1:
|
||||
fp.write(pack('<II', *self.exp_rva))
|
||||
if numrvas >= 2:
|
||||
fp.write(pack('<II', *self.imp_rva))
|
||||
if numrvas >= 3:
|
||||
fp.write(pack('<II', *self.res_rva))
|
||||
|
||||
# Skip the rest of the tables.
|
||||
if numrvas >= 4:
|
||||
fp.seek((numrvas - 3) * 8, 1)
|
||||
|
||||
# Write the modified section headers.
|
||||
for section in self.sections:
|
||||
section.write_header(fp)
|
||||
assert fp.tell() <= self.header_size
|
||||
|
||||
# Write the section data of modified sections.
|
||||
for section in self.sections:
|
||||
if not section.modified:
|
||||
continue
|
||||
|
||||
fp.seek(section.offset)
|
||||
size = min(section.vsize, section.size)
|
||||
fp.write(self.vmem[section.vaddr:section.vaddr+size])
|
||||
|
||||
pad = section.size - size
|
||||
assert pad >= 0
|
||||
if pad > 0:
|
||||
fp.write(bytearray(pad))
|
||||
|
||||
section.modified = False
|
Loading…
x
Reference in New Issue
Block a user