Unpacked BAM wheel into a directory.

This avoids having binary data in the addons repository. I've
also added a script that automates this task. It:

- wipes any pre-existing unpacked BAM wheel,
- unpacks the new BAM wheel,
- copies some files from the extracted wheel into the addon
  directly,
- updates the version number of the add-on so that it matches
  the version of BAM, and
- updates the path where the add-on searches for the unpacked
  wheel.
This commit is contained in:
Sybren A. Stüvel 2017-06-10 10:42:03 +02:00
parent 940bd28434
commit 6e38b99641
24 changed files with 5507 additions and 10 deletions

View File

@ -10,17 +10,23 @@ Bundling BAM with Blender
-------------------------
Blender is bundled with a version of [BAM](https://pypi.python.org/pypi/blender-bam/).
To update this version, first build a new `wheel <http://pythonwheels.com/>`_ file in
To update this version, first build a new [wheel](http://pythonwheels.com/) file in
BAM itself:
python3 setup.py bdist_wheel
Then copy this wheel to Blender:
Since we do not want to have binaries in the addons repository, unpack this wheel to Blender
by running:
cp dist/blender_bam-xxx.whl /path/to/blender/release/scripts/addons/io_blend_utils/
python3 install_whl.py /path/to/blender-asset-manager/dist/blender_bam-xxx.whl
Remove old wheels that are still in `/path/to/blender/release/scripts/addons/io_blend_utils/`
before committing.
This script also updates `__init__.py` to update the version number and path of the extracted
wheel, and removes any pre-existing older versions of the BAM wheels.
The version number and `.whl` extension are maintained in the directory name on purpose.
This way it is clear that it is not a directory to import directly into Blender itself.
Furthermore, I (Sybren) hope that it helps to get changes made in the addons repository
back into the BAM repository.
Running bam-pack from the wheel
@ -29,4 +35,3 @@ Running bam-pack from the wheel
This is the way that Blender runs bam-pack:
PYTHONPATH=./path/to/blender_bam-xxx.whl python3 -m bam.pack

View File

@ -29,7 +29,7 @@ bl_info = {
"category": "Import-Export",
}
BAM_WHEEL_FILE = 'blender_bam-1.1.7-py3-none-any.whl'
BAM_WHEEL_PATH = 'blender_bam-1.1.7-py3-none-any.whl'
import logging
@ -117,11 +117,11 @@ def pythonpath() -> str:
log = logging.getLogger('%s.pythonpath' % __name__)
# Find the wheel to run.
wheelpath = pathlib.Path(__file__).with_name(BAM_WHEEL_FILE)
wheelpath = pathlib.Path(__file__).with_name(BAM_WHEEL_PATH)
if not wheelpath.exists():
raise EnvironmentError('Wheel file %s does not exist!' % wheelpath)
raise EnvironmentError('Wheel %s does not exist!' % wheelpath)
log.info('Using wheel file %s to run BAM-Pack', wheelpath)
log.info('Using wheel %s to run BAM-Pack', wheelpath)
# Update the PYTHONPATH to include that wheel.
existing_pypath = os.environ.get('PYTHONPATH', '')

View File

@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
__version__ = '1.1.7'
if __name__ == '__main__':
from .cli import main
main()

View File

@ -0,0 +1,8 @@
"""Main module for running python -m bam.
Doesn't do much, except for printing general usage information.
"""
print("The 'bam' module cannot be run directly. The following subcommand is available:")
print()
print("python -m bam.pack")

View File

@ -0,0 +1,956 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
#
# (c) 2009, At Mind B.V. - Jeroen Bakker
# (c) 2014, Blender Foundation - Campbell Barton
import gzip
import logging
import os
import struct
import tempfile
log = logging.getLogger("blendfile")
FILE_BUFFER_SIZE = 1024 * 1024
class BlendFileError(Exception):
"""Raised when there was an error reading/parsing a blend file."""
# -----------------------------------------------------------------------------
# module global routines
#
# read routines
# open a filename
# determine if the file is compressed
# and returns a handle
def open_blend(filename, access="rb"):
"""Opens a blend file for reading or writing pending on the access
supports 2 kind of blend files. Uncompressed and compressed.
Known issue: does not support packaged blend files
"""
handle = open(filename, access)
magic_test = b"BLENDER"
magic = handle.read(len(magic_test))
if magic == magic_test:
log.debug("normal blendfile detected")
handle.seek(0, os.SEEK_SET)
bfile = BlendFile(handle)
bfile.is_compressed = False
bfile.filepath_orig = filename
return bfile
elif magic[:2] == b'\x1f\x8b':
log.debug("gzip blendfile detected")
handle.close()
log.debug("decompressing started")
fs = gzip.open(filename, "rb")
data = fs.read(FILE_BUFFER_SIZE)
magic = data[:len(magic_test)]
if magic == magic_test:
handle = tempfile.TemporaryFile()
while data:
handle.write(data)
data = fs.read(FILE_BUFFER_SIZE)
log.debug("decompressing finished")
fs.close()
log.debug("resetting decompressed file")
handle.seek(os.SEEK_SET, 0)
bfile = BlendFile(handle)
bfile.is_compressed = True
bfile.filepath_orig = filename
return bfile
else:
raise BlendFileError("filetype inside gzip not a blend")
else:
raise BlendFileError("filetype not a blend or a gzip blend")
def pad_up_4(offset):
return (offset + 3) & ~3
# -----------------------------------------------------------------------------
# module classes
class BlendFile:
"""
Blend file.
"""
__slots__ = (
# file (result of open())
"handle",
# str (original name of the file path)
"filepath_orig",
# BlendFileHeader
"header",
# struct.Struct
"block_header_struct",
# BlendFileBlock
"blocks",
# [DNAStruct, ...]
"structs",
# dict {b'StructName': sdna_index}
# (where the index is an index into 'structs')
"sdna_index_from_id",
# dict {addr_old: block}
"block_from_offset",
# int
"code_index",
# bool (did we make a change)
"is_modified",
# bool (is file gzipped)
"is_compressed",
)
def __init__(self, handle):
log.debug("initializing reading blend-file")
self.handle = handle
self.header = BlendFileHeader(handle)
self.block_header_struct = self.header.create_block_header_struct()
self.blocks = []
self.code_index = {}
self.structs = []
self.sdna_index_from_id = {}
block = BlendFileBlock(handle, self)
while block.code != b'ENDB':
if block.code == b'DNA1':
(self.structs,
self.sdna_index_from_id,
) = BlendFile.decode_structs(self.header, block, handle)
else:
handle.seek(block.size, os.SEEK_CUR)
self.blocks.append(block)
self.code_index.setdefault(block.code, []).append(block)
block = BlendFileBlock(handle, self)
self.is_modified = False
self.blocks.append(block)
if not self.structs:
raise BlendFileError("No DNA1 block in file, this is not a valid .blend file!")
# cache (could lazy init, incase we never use?)
self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
def __repr__(self):
return '<%s %r>' % (self.__class__.__qualname__, self.handle)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def find_blocks_from_code(self, code):
assert(type(code) == bytes)
if code not in self.code_index:
return []
return self.code_index[code]
def find_block_from_offset(self, offset):
# same as looking looping over all blocks,
# then checking ``block.addr_old == offset``
assert(type(offset) is int)
return self.block_from_offset.get(offset)
def close(self):
"""
Close the blend file
writes the blend file to disk if changes has happened
"""
handle = self.handle
if self.is_modified:
if self.is_compressed:
log.debug("close compressed blend file")
handle.seek(os.SEEK_SET, 0)
log.debug("compressing started")
fs = gzip.open(self.filepath_orig, "wb")
data = handle.read(FILE_BUFFER_SIZE)
while data:
fs.write(data)
data = handle.read(FILE_BUFFER_SIZE)
fs.close()
log.debug("compressing finished")
handle.close()
def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
# never refine to a smaller type
if (self.structs[sdna_index_curr].size >
self.structs[sdna_index_next].size):
raise RuntimeError("cant refine to smaller type (%s -> %s)" %
(self.structs[sdna_index_curr].dna_type_id.decode('ascii'),
self.structs[sdna_index_next].dna_type_id.decode('ascii')))
@staticmethod
def decode_structs(header, block, handle):
"""
DNACatalog is a catalog of all information in the DNA1 file-block
"""
log.debug("building DNA catalog")
shortstruct = DNA_IO.USHORT[header.endian_index]
shortstruct2 = struct.Struct(header.endian_str + b'HH')
intstruct = DNA_IO.UINT[header.endian_index]
data = handle.read(block.size)
types = []
names = []
structs = []
sdna_index_from_id = {}
offset = 8
names_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d names" % names_len)
for i in range(names_len):
tName = DNA_IO.read_data0_offset(data, offset)
offset = offset + len(tName) + 1
names.append(DNAName(tName))
del names_len
offset = pad_up_4(offset)
offset += 4
types_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d types" % types_len)
for i in range(types_len):
dna_type_id = DNA_IO.read_data0_offset(data, offset)
# None will be replaced by the DNAStruct, below
types.append(DNAStruct(dna_type_id))
offset += len(dna_type_id) + 1
offset = pad_up_4(offset)
offset += 4
log.debug("building #%d type-lengths" % types_len)
for i in range(types_len):
tLen = shortstruct.unpack_from(data, offset)[0]
offset = offset + 2
types[i].size = tLen
del types_len
offset = pad_up_4(offset)
offset += 4
structs_len = intstruct.unpack_from(data, offset)[0]
offset += 4
log.debug("building #%d structures" % structs_len)
for sdna_index in range(structs_len):
d = shortstruct2.unpack_from(data, offset)
struct_type_index = d[0]
offset += 4
dna_struct = types[struct_type_index]
sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
structs.append(dna_struct)
fields_len = d[1]
dna_offset = 0
for field_index in range(fields_len):
d2 = shortstruct2.unpack_from(data, offset)
field_type_index = d2[0]
field_name_index = d2[1]
offset += 4
dna_type = types[field_type_index]
dna_name = names[field_name_index]
if dna_name.is_pointer or dna_name.is_method_pointer:
dna_size = header.pointer_size * dna_name.array_size
else:
dna_size = dna_type.size * dna_name.array_size
field = DNAField(dna_type, dna_name, dna_size, dna_offset)
dna_struct.fields.append(field)
dna_struct.field_from_name[dna_name.name_only] = field
dna_offset += dna_size
return structs, sdna_index_from_id
class BlendFileBlock:
"""
Instance of a struct.
"""
__slots__ = (
# BlendFile
"file",
"code",
"size",
"addr_old",
"sdna_index",
"count",
"file_offset",
"user_data",
)
def __str__(self):
return ("<%s.%s (%s), size=%d at %s>" %
# fields=[%s]
(self.__class__.__name__,
self.dna_type_name,
self.code.decode(),
self.size,
# b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
hex(self.addr_old),
))
def __init__(self, handle, bfile):
OLDBLOCK = struct.Struct(b'4sI')
self.file = bfile
self.user_data = None
data = handle.read(bfile.block_header_struct.size)
if len(data) != bfile.block_header_struct.size:
print("WARNING! Blend file seems to be badly truncated!")
self.code = b'ENDB'
self.size = 0
self.addr_old = 0
self.sdna_index = 0
self.count = 0
self.file_offset = 0
return
# header size can be 8, 20, or 24 bytes long
# 8: old blend files ENDB block (exception)
# 20: normal headers 32 bit platform
# 24: normal headers 64 bit platform
if len(data) > 15:
blockheader = bfile.block_header_struct.unpack(data)
self.code = blockheader[0].partition(b'\0')[0]
if self.code != b'ENDB':
self.size = blockheader[1]
self.addr_old = blockheader[2]
self.sdna_index = blockheader[3]
self.count = blockheader[4]
self.file_offset = handle.tell()
else:
self.size = 0
self.addr_old = 0
self.sdna_index = 0
self.count = 0
self.file_offset = 0
else:
blockheader = OLDBLOCK.unpack(data)
self.code = blockheader[0].partition(b'\0')[0]
self.code = DNA_IO.read_data0(blockheader[0])
self.size = 0
self.addr_old = 0
self.sdna_index = 0
self.count = 0
self.file_offset = 0
@property
def dna_type(self):
return self.file.structs[self.sdna_index]
@property
def dna_type_name(self):
return self.dna_type.dna_type_id.decode('ascii')
def refine_type_from_index(self, sdna_index_next):
assert(type(sdna_index_next) is int)
sdna_index_curr = self.sdna_index
self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
self.sdna_index = sdna_index_next
def refine_type(self, dna_type_id):
assert(type(dna_type_id) is bytes)
self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
def get_file_offset(self, path,
default=...,
sdna_index_refine=None,
base_index=0,
):
"""
Return (offset, length)
"""
assert(type(path) is bytes)
ofs = self.file_offset
if base_index != 0:
assert(base_index < self.count)
ofs += (self.size // self.count) * base_index
self.file.handle.seek(ofs, os.SEEK_SET)
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
else:
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
dna_struct = self.file.structs[sdna_index_refine]
field = dna_struct.field_from_path(
self.file.header, self.file.handle, path)
return (self.file.handle.tell(), field.dna_name.array_size)
def get(self, path,
default=...,
sdna_index_refine=None,
use_nil=True, use_str=True,
base_index=0,
):
ofs = self.file_offset
if base_index != 0:
assert(base_index < self.count)
ofs += (self.size // self.count) * base_index
self.file.handle.seek(ofs, os.SEEK_SET)
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
else:
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
dna_struct = self.file.structs[sdna_index_refine]
return dna_struct.field_get(
self.file.header, self.file.handle, path,
default=default,
use_nil=use_nil, use_str=use_str,
)
def get_recursive_iter(self, path, path_root=b"",
default=...,
sdna_index_refine=None,
use_nil=True, use_str=True,
base_index=0,
):
if path_root:
path_full = (
(path_root if type(path_root) is tuple else (path_root, )) +
(path if type(path) is tuple else (path, )))
else:
path_full = path
try:
yield (path_full, self.get(path_full, default, sdna_index_refine, use_nil, use_str, base_index))
except NotImplementedError as ex:
msg, dna_name, dna_type = ex.args
struct_index = self.file.sdna_index_from_id.get(dna_type.dna_type_id, None)
if struct_index is None:
yield (path_full, "<%s>" % dna_type.dna_type_id.decode('ascii'))
else:
struct = self.file.structs[struct_index]
for f in struct.fields:
yield from self.get_recursive_iter(
f.dna_name.name_only, path_full, default, None, use_nil, use_str, 0)
def items_recursive_iter(self):
for k in self.keys():
yield from self.get_recursive_iter(k, use_str=False)
def get_data_hash(self):
"""
Generates a 'hash' that can be used instead of addr_old as block id, and that should be 'stable' across .blend
file load & save (i.e. it does not changes due to pointer addresses variations).
"""
# TODO This implementation is most likely far from optimal... and CRC32 is not renown as the best hashing
# algo either. But for now does the job!
import zlib
def _is_pointer(self, k):
return self.file.structs[self.sdna_index].field_from_path(
self.file.header, self.file.handle, k).dna_name.is_pointer
hsh = 1
for k, v in self.items_recursive_iter():
if not _is_pointer(self, k):
hsh = zlib.adler32(str(v).encode(), hsh)
return hsh
def set(self, path, value,
sdna_index_refine=None,
):
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
else:
self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
dna_struct = self.file.structs[sdna_index_refine]
self.file.handle.seek(self.file_offset, os.SEEK_SET)
self.file.is_modified = True
return dna_struct.field_set(
self.file.header, self.file.handle, path, value)
# ---------------
# Utility get/set
#
# avoid inline pointer casting
def get_pointer(
self, path,
default=...,
sdna_index_refine=None,
base_index=0,
):
if sdna_index_refine is None:
sdna_index_refine = self.sdna_index
result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index)
# default
if type(result) is not int:
return result
assert(self.file.structs[sdna_index_refine].field_from_path(
self.file.header, self.file.handle, path).dna_name.is_pointer)
if result != 0:
# possible (but unlikely)
# that this fails and returns None
# maybe we want to raise some exception in this case
return self.file.find_block_from_offset(result)
else:
return None
# ----------------------
# Python convenience API
# dict like access
def __getitem__(self, item):
return self.get(item, use_str=False)
def __setitem__(self, item, value):
self.set(item, value)
def keys(self):
return (f.dna_name.name_only for f in self.dna_type.fields)
def values(self):
for k in self.keys():
try:
yield self[k]
except NotImplementedError as ex:
msg, dna_name, dna_type = ex.args
yield "<%s>" % dna_type.dna_type_id.decode('ascii')
def items(self):
for k in self.keys():
try:
yield (k, self[k])
except NotImplementedError as ex:
msg, dna_name, dna_type = ex.args
yield (k, "<%s>" % dna_type.dna_type_id.decode('ascii'))
# -----------------------------------------------------------------------------
# Read Magic
#
# magic = str
# pointer_size = int
# is_little_endian = bool
# version = int
class BlendFileHeader:
"""
BlendFileHeader allocates the first 12 bytes of a blend file
it contains information about the hardware architecture
"""
__slots__ = (
# str
"magic",
# int 4/8
"pointer_size",
# bool
"is_little_endian",
# int
"version",
# str, used to pass to 'struct'
"endian_str",
# int, used to index common types
"endian_index",
)
def __init__(self, handle):
FILEHEADER = struct.Struct(b'7s1s1s3s')
log.debug("reading blend-file-header")
values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
self.magic = values[0]
pointer_size_id = values[1]
if pointer_size_id == b'-':
self.pointer_size = 8
elif pointer_size_id == b'_':
self.pointer_size = 4
else:
assert(0)
endian_id = values[2]
if endian_id == b'v':
self.is_little_endian = True
self.endian_str = b'<'
self.endian_index = 0
elif endian_id == b'V':
self.is_little_endian = False
self.endian_index = 1
self.endian_str = b'>'
else:
assert(0)
version_id = values[3]
self.version = int(version_id)
def create_block_header_struct(self):
return struct.Struct(b''.join((
self.endian_str,
b'4sI',
b'I' if self.pointer_size == 4 else b'Q',
b'II',
)))
class DNAName:
"""
DNAName is a C-type name stored in the DNA
"""
__slots__ = (
"name_full",
"name_only",
"is_pointer",
"is_method_pointer",
"array_size",
)
def __init__(self, name_full):
self.name_full = name_full
self.name_only = self.calc_name_only()
self.is_pointer = self.calc_is_pointer()
self.is_method_pointer = self.calc_is_method_pointer()
self.array_size = self.calc_array_size()
def __repr__(self):
return '%s(%r)' % (type(self).__qualname__, self.name_full)
def as_reference(self, parent):
if parent is None:
result = b''
else:
result = parent + b'.'
result = result + self.name_only
return result
def calc_name_only(self):
result = self.name_full.strip(b'*()')
index = result.find(b'[')
if index != -1:
result = result[:index]
return result
def calc_is_pointer(self):
return (b'*' in self.name_full)
def calc_is_method_pointer(self):
return (b'(*' in self.name_full)
def calc_array_size(self):
result = 1
temp = self.name_full
index = temp.find(b'[')
while index != -1:
index_2 = temp.find(b']')
result *= int(temp[index + 1:index_2])
temp = temp[index_2 + 1:]
index = temp.find(b'[')
return result
class DNAField:
"""
DNAField is a coupled DNAStruct and DNAName
and cache offset for reuse
"""
__slots__ = (
# DNAName
"dna_name",
# tuple of 3 items
# [bytes (struct name), int (struct size), DNAStruct]
"dna_type",
# size on-disk
"dna_size",
# cached info (avoid looping over fields each time)
"dna_offset",
)
def __init__(self, dna_type, dna_name, dna_size, dna_offset):
self.dna_type = dna_type
self.dna_name = dna_name
self.dna_size = dna_size
self.dna_offset = dna_offset
class DNAStruct:
"""
DNAStruct is a C-type structure stored in the DNA
"""
__slots__ = (
"dna_type_id",
"size",
"fields",
"field_from_name",
"user_data",
)
def __init__(self, dna_type_id):
self.dna_type_id = dna_type_id
self.fields = []
self.field_from_name = {}
self.user_data = None
def __repr__(self):
return '%s(%r)' % (type(self).__qualname__, self.dna_type_id)
def field_from_path(self, header, handle, path):
"""
Support lookups as bytes or a tuple of bytes and optional index.
C style 'id.name' --> (b'id', b'name')
C style 'array[4]' --> ('array', 4)
"""
if type(path) is tuple:
name = path[0]
if len(path) >= 2 and type(path[1]) is not bytes:
name_tail = path[2:]
index = path[1]
assert(type(index) is int)
else:
name_tail = path[1:]
index = 0
else:
name = path
name_tail = None
index = 0
assert(type(name) is bytes)
field = self.field_from_name.get(name)
if field is not None:
handle.seek(field.dna_offset, os.SEEK_CUR)
if index != 0:
if field.dna_name.is_pointer:
index_offset = header.pointer_size * index
else:
index_offset = field.dna_type.size * index
assert(index_offset < field.dna_size)
handle.seek(index_offset, os.SEEK_CUR)
if not name_tail: # None or ()
return field
else:
return field.dna_type.field_from_path(header, handle, name_tail)
def field_get(self, header, handle, path,
default=...,
use_nil=True, use_str=True,
):
field = self.field_from_path(header, handle, path)
if field is None:
if default is not ...:
return default
else:
raise KeyError("%r not found in %r (%r)" %
(path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
dna_type = field.dna_type
dna_name = field.dna_name
dna_size = field.dna_size
if dna_name.is_pointer:
return DNA_IO.read_pointer(handle, header)
elif dna_type.dna_type_id == b'int':
if dna_name.array_size > 1:
return [DNA_IO.read_int(handle, header) for i in range(dna_name.array_size)]
return DNA_IO.read_int(handle, header)
elif dna_type.dna_type_id == b'short':
if dna_name.array_size > 1:
return [DNA_IO.read_short(handle, header) for i in range(dna_name.array_size)]
return DNA_IO.read_short(handle, header)
elif dna_type.dna_type_id == b'uint64_t':
if dna_name.array_size > 1:
return [DNA_IO.read_ulong(handle, header) for i in range(dna_name.array_size)]
return DNA_IO.read_ulong(handle, header)
elif dna_type.dna_type_id == b'float':
if dna_name.array_size > 1:
return [DNA_IO.read_float(handle, header) for i in range(dna_name.array_size)]
return DNA_IO.read_float(handle, header)
elif dna_type.dna_type_id == b'char':
if dna_size == 1:
# Single char, assume it's bitflag or int value, and not a string/bytes data...
return DNA_IO.read_char(handle, header)
if use_str:
if use_nil:
return DNA_IO.read_string0(handle, dna_name.array_size)
else:
return DNA_IO.read_string(handle, dna_name.array_size)
else:
if use_nil:
return DNA_IO.read_bytes0(handle, dna_name.array_size)
else:
return DNA_IO.read_bytes(handle, dna_name.array_size)
else:
raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" %
(path, dna_name.name_only), dna_name, dna_type)
def field_set(self, header, handle, path, value):
assert(type(path) == bytes)
field = self.field_from_path(header, handle, path)
if field is None:
raise KeyError("%r not found in %r" %
(path, [f.dna_name.name_only for f in self.fields]))
dna_type = field.dna_type
dna_name = field.dna_name
if dna_type.dna_type_id == b'char':
if type(value) is str:
return DNA_IO.write_string(handle, value, dna_name.array_size)
else:
return DNA_IO.write_bytes(handle, value, dna_name.array_size)
else:
raise NotImplementedError("Setting %r is not yet supported for %r" %
(dna_type, dna_name), dna_name, dna_type)
class DNA_IO:
"""
Module like class, for read-write utility functions.
Only stores static methods & constants.
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def write_string(handle, astring, fieldlen):
assert(isinstance(astring, str))
if len(astring) >= fieldlen:
stringw = astring[0:fieldlen]
else:
stringw = astring + '\0'
handle.write(stringw.encode('utf-8'))
@staticmethod
def write_bytes(handle, astring, fieldlen):
assert(isinstance(astring, (bytes, bytearray)))
if len(astring) >= fieldlen:
stringw = astring[0:fieldlen]
else:
stringw = astring + b'\0'
handle.write(stringw)
@staticmethod
def read_bytes(handle, length):
data = handle.read(length)
return data
@staticmethod
def read_bytes0(handle, length):
data = handle.read(length)
return DNA_IO.read_data0(data)
@staticmethod
def read_string(handle, length):
return DNA_IO.read_bytes(handle, length).decode('utf-8')
@staticmethod
def read_string0(handle, length):
return DNA_IO.read_bytes0(handle, length).decode('utf-8')
@staticmethod
def read_data0_offset(data, offset):
add = data.find(b'\0', offset) - offset
return data[offset:offset + add]
@staticmethod
def read_data0(data):
add = data.find(b'\0')
return data[:add]
UCHAR = struct.Struct(b'<b'), struct.Struct(b'>b')
@staticmethod
def read_char(handle, fileheader):
st = DNA_IO.UCHAR[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
@staticmethod
def read_ushort(handle, fileheader):
st = DNA_IO.USHORT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
@staticmethod
def read_short(handle, fileheader):
st = DNA_IO.SSHORT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
@staticmethod
def read_uint(handle, fileheader):
st = DNA_IO.UINT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
@staticmethod
def read_int(handle, fileheader):
st = DNA_IO.SINT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
FLOAT = struct.Struct(b'<f'), struct.Struct(b'>f')
@staticmethod
def read_float(handle, fileheader):
st = DNA_IO.FLOAT[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
@staticmethod
def read_ulong(handle, fileheader):
st = DNA_IO.ULONG[fileheader.endian_index]
return st.unpack(handle.read(st.size))[0]
@staticmethod
def read_pointer(handle, header):
"""
reads an pointer from a file handle
the pointer size is given by the header (BlendFileHeader)
"""
if header.pointer_size == 4:
st = DNA_IO.UINT[header.endian_index]
return st.unpack(handle.read(st.size))[0]
if header.pointer_size == 8:
st = DNA_IO.ULONG[header.endian_index]
return st.unpack(handle.read(st.size))[0]

View File

@ -0,0 +1,114 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
"""
A simply utility to copy blend files and their deps to a new location.
Similar to packing, but don't attempt any path remapping.
"""
from bam.blend import blendfile_path_walker
TIMEIT = False
# ------------------
# Ensure module path
import os
import sys
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
if path not in sys.path:
sys.path.append(path)
del os, sys, path
# --------
def copy_paths(
paths,
output,
base,
# load every libs dep, not just used deps.
all_deps=False,
# yield reports
report=None,
# Filename filter, allow to exclude files from the pack,
# function takes a string returns True if the files should be included.
filename_filter=None,
):
import os
import shutil
from bam.utils.system import colorize, is_subdir
path_copy_files = set(paths)
# Avoid walking over same libs many times
lib_visit = {}
yield report("Reading %d blend file(s)\n" % len(paths))
for blendfile_src in paths:
yield report(" %s: %r\n" % (colorize("blend", color='blue'), blendfile_src))
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=True,
recursive=True,
recursive_all=all_deps,
lib_visit=lib_visit,
):
f_abs = os.path.normpath(fp.filepath_absolute)
path_copy_files.add(f_abs)
# Source -> Dest Map
path_src_dst_map = {}
for path_src in sorted(path_copy_files):
if filename_filter and not filename_filter(path_src):
yield report(" %s: %r\n" % (colorize("exclude", color='yellow'), path_src))
continue
if not os.path.exists(path_src):
yield report(" %s: %r\n" % (colorize("missing path", color='red'), path_src))
continue
if not is_subdir(path_src, base):
yield report(" %s: %r\n" % (colorize("external path ignored", color='red'), path_src))
continue
path_rel = os.path.relpath(path_src, base)
path_dst = os.path.join(output, path_rel)
path_src_dst_map[path_src] = path_dst
# Create directories
path_dst_dir = {os.path.dirname(path_dst) for path_dst in path_src_dst_map.values()}
yield report("Creating %d directories in %r\n" % (len(path_dst_dir), output))
for path_dir in sorted(path_dst_dir):
os.makedirs(path_dir, exist_ok=True)
del path_dst_dir
# Copy files
yield report("Copying %d files to %r\n" % (len(path_src_dst_map), output))
for path_src, path_dst in sorted(path_src_dst_map.items()):
yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), path_src, path_dst))
shutil.copy(path_src, path_dst)

View File

@ -0,0 +1,673 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
import os
import sys
import shutil
from bam.blend import blendfile_path_walker
TIMEIT = False
# ------------------
# Ensure module path
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
if path not in sys.path:
sys.path.append(path)
del path
# --------
# ----------------------
# debug low level output
#
# ... when internals _really_ fail & we want to know why
def _dbg(text):
from bam.utils.system import colorize
if type(text) is bytes:
text = text.decode('utf-8')
sys.__stdout__.write(colorize(text, color='red') + "\n")
sys.__stdout__.flush()
def _relpath_remap(
path_src,
base_dir_src,
fp_basedir,
blendfile_src_dir_fakeroot=None,
):
if not os.path.isabs(path_src):
# Absolute win32 paths on a unix system
# cause bad issues!
if len(path_src) >= 2:
if path_src[0] != b'/'[0] and path_src[1] == b':'[0]:
pass
else:
raise Exception("Internal error 'path_src' -> %r must be absolute" % path_src)
path_src = os.path.normpath(path_src)
if os.name != "nt":
path_dst = os.path.relpath(path_src, base_dir_src)
else:
# exception for windows, we need to support mapping between drives
try:
path_dst = os.path.relpath(path_src, base_dir_src)
except ValueError:
# include the absolute path when the file is on a different drive.
path_dst = os.path.relpath(
os.path.join(base_dir_src, b'__' + path_src.replace(b':', b'\\')),
base_dir_src,
)
if blendfile_src_dir_fakeroot is None:
# /foo/../bar.png --> /foo/__/bar.png
path_dst = path_dst.replace(b'..', b'__')
path_dst = os.path.normpath(path_dst)
else:
if b'..' in path_dst:
# remap, relative to project root
# paths
path_dst = os.path.join(blendfile_src_dir_fakeroot, path_dst)
path_dst = os.path.normpath(path_dst)
# if there are paths outside the root still...
# This means they are outside the project directory, We dont support this,
# so name accordingly
if b'..' in path_dst:
# SHOULD NEVER HAPPEN
path_dst = path_dst.replace(b'..', b'__nonproject__')
path_dst = b'_' + path_dst
# _dbg(b"FINAL A: " + path_dst)
path_dst_final = os.path.join(os.path.relpath(base_dir_src, fp_basedir), path_dst)
path_dst_final = os.path.normpath(path_dst_final)
# _dbg(b"FINAL B: " + path_dst_final)
return path_dst, path_dst_final
def pack(
# store the blendfile relative to this directory, can be:
# os.path.dirname(blendfile_src)
# but in some cases we wan't to use a path higher up.
# base_dir_src,
blendfile_src, blendfile_dst,
# the path to the top directory of the project's repository.
# the packed archive will reproduce the exact same hierarchy below that base path
# if set to None, it defaults to the given blendfile_src's directory.
# especially useful when used together with the warn_remap_externals option.
repository_base_path=None,
# type of archive to produce (either ZIP or plain usual directory).
mode='ZIP',
# optionally pass in the temp dir
base_dir_dst_temp=None,
paths_remap_relbase=None,
deps_remap=None, paths_remap=None, paths_uuid=None,
# load every libs dep, not just used deps.
all_deps=False,
compress_level=-1,
# yield reports
report=None,
# The project path, eg:
# /home/me/myproject/mysession/path/to/blend/file.blend
# the path would be: b'path/to/blend'
#
# This is needed so we can choose to store paths
# relative to project or relative to the current file.
#
# When None, map _all_ paths are relative to the current blend.
# converting: '../../bar' --> '__/__/bar'
# so all paths are nested and not moved outside the session path.
blendfile_src_dir_fakeroot=None,
# Read variations from json files.
use_variations=False,
# do _everything_ except to write the paths.
# useful if we want to calculate deps to remap but postpone applying them.
readonly=False,
# Warn when we found a dependency external to given repository_base_path.
warn_remap_externals=False,
# dict of binary_edits:
# {file: [(ofs, bytes), ...], ...}
# ... where the file is the relative 'packed' location.
binary_edits=None,
# Filename filter, allow to exclude files from the pack,
# function takes a string returns True if the files should be included.
filename_filter=None,
):
"""
:param deps_remap: Store path deps_remap info as follows.
{"file.blend": {"path_new": "path_old", ...}, ...}
:type deps_remap: dict or None
"""
# Internal details:
# - we copy to a temp path before operating on the blend file
# so we can modify in-place.
# - temp files are only created once, (if we never touched them before),
# this way, for linked libraries - a single blend file may be used
# multiple times, each access will apply new edits on top of the old ones.
# - we track which libs we have touched (using 'lib_visit' arg),
# this means that the same libs wont be touched many times to modify the same data
# also prevents cyclic loops from crashing.
if sys.stdout.isatty():
from bam.utils.system import colorize
else:
from bam.utils.system import colorize_dummy as colorize
assert isinstance(blendfile_src, bytes)
assert isinstance(blendfile_dst, bytes)
# in case this is directly from the command line or user-input
blendfile_src = os.path.normpath(os.path.abspath(blendfile_src))
blendfile_dst = os.path.normpath(os.path.abspath(blendfile_dst))
assert blendfile_src != blendfile_dst
# first check args are OK
# fakeroot _cant_ start with a separator, since we prepend chars to it.
if blendfile_src_dir_fakeroot is not None:
assert isinstance(blendfile_src_dir_fakeroot, bytes)
assert not blendfile_src_dir_fakeroot.startswith(os.sep.encode('ascii'))
path_temp_files = set()
path_copy_files = set()
# path_temp_files --> original-location
path_temp_files_orig = {}
TEMP_SUFFIX = b'@'
if report is None:
def report(msg):
return msg
yield report("%s: %r...\n" % (colorize("\nscanning deps", color='bright_green'), blendfile_src))
if TIMEIT:
import time
t = time.time()
base_dir_src = os.path.dirname(blendfile_src) if repository_base_path is None \
else os.path.normpath(os.path.abspath(repository_base_path))
base_dir_dst = os.path.dirname(blendfile_dst)
# _dbg(blendfile_src)
# _dbg(blendfile_dst)
assert base_dir_src != base_dir_dst
if base_dir_dst_temp is None:
# Always try to pack using a unique folder name.
import uuid
suf = 'temp' if mode == 'ZIP' else 'pack'
while True:
unique = uuid.uuid4().hex
name = '__blendfile_%s_%s__' % (unique, suf)
base_dir_dst_temp = os.path.join(base_dir_dst, name.encode('ascii'))
if not os.path.exists(base_dir_dst_temp):
break
def temp_remap_cb(filepath, rootdir):
"""
Create temp files in the destination path.
"""
filepath = blendfile_path_walker.utils.compatpath(filepath)
if use_variations:
if blendfile_levels_dict_curr:
filepath = blendfile_levels_dict_curr.get(filepath, filepath)
# ...
# first remap this blend file to the location it will end up (so we can get images relative to _that_)
# TODO(cam) cache the results
fp_basedir_conv = _relpath_remap(os.path.join(rootdir, b'dummy'), base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
# then get the file relative to the new location
filepath_tmp = _relpath_remap(filepath, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)[0]
filepath_tmp = os.path.normpath(os.path.join(base_dir_dst_temp, filepath_tmp)) + TEMP_SUFFIX
# only overwrite once (so we can write into a path already containing files)
if filepath_tmp not in path_temp_files:
if mode != 'NONE':
import shutil
os.makedirs(os.path.dirname(filepath_tmp), exist_ok=True)
shutil.copy(filepath, filepath_tmp)
path_temp_files.add(filepath_tmp)
path_temp_files_orig[filepath_tmp] = filepath
if mode != 'NONE':
return filepath_tmp
else:
return filepath
# -----------------
# Variation Support
#
# Use a json file to allow recursive-remapping of variations.
#
# file_a.blend
# file_a.json '{"variations": ["tree.blue.blend", ...]}'
# file_a.blend -> file_b.blend
# file_b.blend --> tree.blend
#
# the variation of `file_a.blend` causes `file_b.blend`
# to link in `tree.blue.blend`
if use_variations:
blendfile_levels = []
blendfile_levels_dict = []
blendfile_levels_dict_curr = {}
def blendfile_levels_rebuild():
# after changing blend file configurations,
# re-create current variation lookup table
blendfile_levels_dict_curr.clear()
for d in blendfile_levels_dict:
if d is not None:
blendfile_levels_dict_curr.update(d)
# use variations!
def blendfile_level_cb_enter(filepath):
import json
filepath_json = os.path.splitext(filepath)[0] + b".json"
if os.path.exists(filepath_json):
with open(filepath_json, encoding='utf-8') as f_handle:
variations = [f.encode("utf-8") for f in json.load(f_handle).get("variations")]
# convert to absolute paths
basepath = os.path.dirname(filepath)
variations = {
# Reverse lookup, from non-variation to variation we specify in this file.
# {"/abs/path/foo.png": "/abs/path/foo.variation.png", ...}
# .. where the input _is_ the variation,
# we just make it absolute and use the non-variation as
# the key to the variation value.
b".".join(f.rsplit(b".", 2)[0::2]): f for f_ in variations
for f in (os.path.normpath(os.path.join(basepath, f_)),)
}
else:
variations = None
blendfile_levels.append(filepath)
blendfile_levels_dict.append(variations)
if variations:
blendfile_levels_rebuild()
def blendfile_level_cb_exit(filepath):
blendfile_levels.pop()
blendfile_levels_dict.pop()
if blendfile_levels_dict_curr:
blendfile_levels_rebuild()
else:
blendfile_level_cb_enter = blendfile_level_cb_exit = None
blendfile_levels_dict_curr = None
lib_visit = {}
fp_blend_basename_last = b''
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=readonly,
temp_remap_cb=temp_remap_cb,
recursive=True,
recursive_all=all_deps,
lib_visit=lib_visit,
blendfile_level_cb=(
blendfile_level_cb_enter,
blendfile_level_cb_exit,
)
):
# we could pass this in!
fp_blend = os.path.join(fp.basedir, fp_blend_basename)
if fp_blend_basename_last != fp_blend_basename:
yield report(" %s: %r\n" % (colorize("blend", color='blue'), fp_blend))
fp_blend_basename_last = fp_blend_basename
if binary_edits is not None:
# TODO, temp_remap_cb makes paths, this isn't ideal,
# in this case we only want to remap!
if mode == 'NONE':
tmp = temp_remap_cb(fp_blend, base_dir_src)
tmp = os.path.relpath(tmp, base_dir_src)
else:
tmp = temp_remap_cb(fp_blend, base_dir_src)
tmp = os.path.relpath(tmp[:-len(TEMP_SUFFIX)], base_dir_dst_temp)
binary_edits_curr = binary_edits.setdefault(tmp, [])
del tmp
# assume the path might be relative
path_src_orig = fp.filepath
path_rel = blendfile_path_walker.utils.compatpath(path_src_orig)
path_src = blendfile_path_walker.utils.abspath(path_rel, fp.basedir)
path_src = os.path.normpath(path_src)
if warn_remap_externals and b".." in os.path.relpath(path_src, base_dir_src):
yield report(" %s: %r\n" % (colorize("non-local", color='bright_yellow'), path_src))
if filename_filter and not filename_filter(path_src):
yield report(" %s: %r\n" % (colorize("exclude", color='yellow'), path_src))
continue
# apply variation (if available)
if use_variations:
if blendfile_levels_dict_curr:
path_src_variation = blendfile_levels_dict_curr.get(path_src)
if path_src_variation is not None:
path_src = path_src_variation
path_rel = os.path.join(os.path.dirname(path_rel), os.path.basename(path_src))
del path_src_variation
# destination path realtive to the root
# assert(b'..' not in path_src)
assert(b'..' not in base_dir_src)
# first remap this blend file to the location it will end up (so we can get images relative to _that_)
# TODO(cam) cache the results
fp_basedir_conv = _relpath_remap(fp_blend, base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
# then get the file relative to the new location
path_dst, path_dst_final = _relpath_remap(path_src, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)
path_dst = os.path.join(base_dir_dst, path_dst)
path_dst_final = b'//' + path_dst_final
# Assign direct or add to edit-list (to apply later)
if not readonly:
fp.filepath = path_dst_final
if binary_edits is not None:
fp.filepath_assign_edits(path_dst_final, binary_edits_curr)
# add to copy-list
# never copy libs (handled separately)
if not isinstance(fp, blendfile_path_walker.FPElem_block_path) or fp.userdata[0].code != b'LI':
assert path_src != path_dst
path_copy_files.add((path_src, path_dst))
for file_list in (
blendfile_path_walker.utils.find_sequence_paths(path_src) if fp.is_sequence else (),
fp.files_siblings(),
):
_src_dir = os.path.dirname(path_src)
_dst_dir = os.path.dirname(path_dst)
path_copy_files.update(
{(os.path.join(_src_dir, f), os.path.join(_dst_dir, f))
for f in file_list
})
del _src_dir, _dst_dir
if deps_remap is not None:
# this needs to become JSON later... ugh, need to use strings
deps_remap.setdefault(
fp_blend_basename.decode('utf-8'),
{})[path_dst_final.decode('utf-8')] = path_src_orig.decode('utf-8')
del lib_visit, fp_blend_basename_last
if TIMEIT:
print(" Time: %.4f\n" % (time.time() - t))
yield report(("%s: %d files\n") %
(colorize("\narchiving", color='bright_green'), len(path_copy_files) + 1))
# handle deps_remap and file renaming
if deps_remap is not None:
blendfile_src_basename = os.path.basename(blendfile_src).decode('utf-8')
blendfile_dst_basename = os.path.basename(blendfile_dst).decode('utf-8')
if blendfile_src_basename != blendfile_dst_basename:
if mode == 'FILE':
deps_remap[blendfile_dst_basename] = deps_remap[blendfile_src_basename]
del deps_remap[blendfile_src_basename]
del blendfile_src_basename, blendfile_dst_basename
# store path mapping {dst: src}
if paths_remap is not None:
if paths_remap_relbase is not None:
def relbase(fn):
return os.path.relpath(fn, paths_remap_relbase)
else:
def relbase(fn):
return fn
for src, dst in path_copy_files:
# TODO. relative to project-basepath
paths_remap[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = relbase(src).decode('utf-8')
# main file XXX, should have better way!
paths_remap[os.path.basename(blendfile_src).decode('utf-8')] = relbase(blendfile_src).decode('utf-8')
# blend libs
for dst in path_temp_files:
src = path_temp_files_orig[dst]
k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
paths_remap[k] = relbase(src).decode('utf-8')
del k
del relbase
if paths_uuid is not None:
from bam.utils.system import uuid_from_file
for src, dst in path_copy_files:
# reports are handled again, later on.
if os.path.exists(src):
paths_uuid[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = uuid_from_file(src)
# XXX, better way to store temp target
blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
paths_uuid[os.path.basename(blendfile_src).decode('utf-8')] = uuid_from_file(blendfile_dst_tmp)
# blend libs
for dst in path_temp_files:
k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
if k not in paths_uuid:
if mode == 'NONE':
dst = path_temp_files_orig[dst]
paths_uuid[k] = uuid_from_file(dst)
del k
del blendfile_dst_tmp
del uuid_from_file
# --------------------
# Handle File Copy/Zip
if mode == 'FILE':
blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
shutil.move(blendfile_dst_tmp, blendfile_dst)
path_temp_files.remove(blendfile_dst_tmp)
# strip TEMP_SUFFIX and move to the destination directory.
for fn in path_temp_files:
dst_rel, _ = _relpath_remap(fn[:-len(TEMP_SUFFIX)], base_dir_dst_temp, base_dir_dst, None)
dst = os.path.join(base_dir_dst, dst_rel)
yield report(" %s: %r -> %r\n" % (colorize("moving", color='blue'), fn, dst))
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.move(fn, dst)
for src, dst in path_copy_files:
assert(b'.blend' not in dst)
assert src != dst
# in rare cases a filepath could point to a directory
if (not os.path.exists(src)) or os.path.isdir(src):
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
else:
yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), src, dst))
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copy(src, dst)
shutil.rmtree(base_dir_dst_temp)
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
elif mode == 'ZIP':
import zipfile
# not awesome!
import zlib
assert(compress_level in range(-1, 10))
_compress_level_orig = zlib.Z_DEFAULT_COMPRESSION
zlib.Z_DEFAULT_COMPRESSION = compress_level
_compress_mode = zipfile.ZIP_STORED if (compress_level == 0) else zipfile.ZIP_DEFLATED
if _compress_mode == zipfile.ZIP_STORED:
def is_compressed_filetype(fn):
return False
else:
from bam.utils.system import is_compressed_filetype
with zipfile.ZipFile(blendfile_dst.decode('utf-8'), 'w', _compress_mode) as zip_handle:
for fn in path_temp_files:
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), fn))
zip_handle.write(
fn.decode('utf-8'),
arcname=os.path.relpath(fn[:-1], base_dir_dst_temp).decode('utf-8'),
)
os.remove(fn)
shutil.rmtree(base_dir_dst_temp)
for src, dst in path_copy_files:
assert(not dst.endswith(b'.blend'))
# in rare cases a filepath could point to a directory
if (not os.path.exists(src)) or os.path.isdir(src):
yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
else:
yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), src))
zip_handle.write(
src.decode('utf-8'),
arcname=os.path.relpath(dst, base_dir_dst).decode('utf-8'),
compress_type=zipfile.ZIP_STORED if is_compressed_filetype(dst) else _compress_mode,
)
zlib.Z_DEFAULT_COMPRESSION = _compress_level_orig
del _compress_level_orig, _compress_mode
yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
elif mode == 'NONE':
pass
else:
raise Exception("%s not a known mode" % mode)
def create_argparse():
import argparse
parser = argparse.ArgumentParser(
description="Run this script to extract blend-files(s) and their dependencies "
"to a destination path.")
# for main_render() only, but validate args.
parser.add_argument(
"-i", "--input", dest="path_src", metavar='FILE', required=True,
help="Input path(s) or a wildcard to glob many files",
)
parser.add_argument(
"-e", "--exclude", dest="exclude", metavar='PATTERN', required=False,
help='Exclusion pattern, such as "*.abc;*.mov;*.mkv"')
parser.add_argument(
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
help="Output file (must be a .blend for --mode=FILE or a .zip when --mode=ZIP), "
"or a directory when multiple inputs are passed",
)
parser.add_argument(
"-m", "--mode", dest="mode", metavar='MODE', required=False,
choices=('FILE', 'ZIP'), default='ZIP',
help="FILE copies the blend file(s) + dependencies to a directory, ZIP to an archive.",
)
parser.add_argument(
"-q", "--quiet", dest="use_quiet", action='store_true', required=False,
help="Suppress status output",
)
parser.add_argument(
"-t", "--temp", dest="temp_path", metavar='DIR', required=False,
help="Temporary directory to use. When not supplied, a unique directory is used.",
)
return parser
def exclusion_filter(exclude: str):
"""Converts a filter string "*.abc;*.def" to a function that can be passed to pack().
If 'exclude' is None or an empty string, returns None (which means "no filtering").
"""
if not exclude:
return None
import re
import fnmatch
# convert string into regex callback that operates on bytes
# "*.txt;*.png;*.rst" --> rb".*\.txt$|.*\.png$|.*\.rst$"
pattern = b'|'.join(fnmatch.translate(f).encode('utf-8')
for f in exclude.split(';')
if f)
compiled_pattern = re.compile(pattern, re.IGNORECASE)
def filename_filter(fname: bytes):
return not compiled_pattern.match(fname)
return filename_filter
def main():
parser = create_argparse()
args = parser.parse_args()
if args.use_quiet:
def report(msg):
pass
else:
def report(msg):
sys.stdout.write(msg)
sys.stdout.flush()
for msg in pack(
args.path_src.encode('utf8'),
args.path_dst.encode('utf8'),
mode=args.mode,
base_dir_dst_temp=args.temp_path,
filename_filter=exclusion_filter(args.exclude),
):
report(msg)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,143 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
"""
This script takes Blend-File and remaps their paths to the original locations.
(needed for uploading to the server)
"""
VERBOSE = 1
from bam.blend import blendfile_path_walker
def blendfile_remap(
blendfile_src, blendpath_dst,
deps_remap=None, deps_remap_cb=None,
deps_remap_cb_userdata=None,
):
import os
def temp_remap_cb(filepath, level):
"""
Simply point to the output dir.
"""
basename = os.path.basename(blendfile_src)
filepath_tmp = os.path.join(blendpath_dst, basename)
# ideally we could avoid copying _ALL_ blends
# TODO(cam)
import shutil
shutil.copy(filepath, filepath_tmp)
return filepath_tmp
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=False,
temp_remap_cb=temp_remap_cb,
recursive=False,
):
# path_dst_final - current path in blend.
# path_src_orig - original path from JSON.
path_dst_final_b = fp.filepath
# support 2 modes, callback or dictionary
if deps_remap_cb is not None:
path_src_orig = deps_remap_cb(path_dst_final_b, deps_remap_cb_userdata)
if path_src_orig is not None:
fp.filepath = path_src_orig
if VERBOSE:
print(" Remapping:", path_dst_final_b, "->", path_src_orig)
else:
path_dst_final = path_dst_final_b.decode('utf-8')
path_src_orig = deps_remap.get(path_dst_final)
if path_src_orig is not None:
fp.filepath = path_src_orig.encode('utf-8')
if VERBOSE:
print(" Remapping:", path_dst_final, "->", path_src_orig)
def pack_restore(blendfile_dir_src, blendfile_dir_dst, pathmap):
import os
for dirpath, dirnames, filenames in os.walk(blendfile_dir_src):
if dirpath.startswith(b"."):
continue
for filename in filenames:
if os.path.splitext(filename)[1].lower() == b".blend":
remap = pathmap.get(filename.decode('utf-8'))
if remap is not None:
filepath = os.path.join(dirpath, filename)
# main function call
blendfile_remap(filepath, blendfile_dir_dst, remap)
def create_argparse():
import os
import argparse
usage_text = (
"Run this script to remap blend-file(s) paths using a JSON file created by 'packer.py':" +
os.path.basename(__file__) +
"--input=DIR --remap=JSON [options]")
parser = argparse.ArgumentParser(description=usage_text)
# for main_render() only, but validate args.
parser.add_argument(
"-i", "--input", dest="path_src", metavar='DIR', required=True,
help="Input path(s) or a wildcard to glob many files")
parser.add_argument(
"-o", "--output", dest="path_dst", metavar='DIR', required=True,
help="Output directory ")
parser.add_argument(
"-r", "--deps_remap", dest="deps_remap", metavar='JSON', required=True,
help="JSON file containing the path remapping info")
return parser
def main():
import sys
import json
parser = create_argparse()
args = parser.parse_args(sys.argv[1:])
encoding = sys.getfilesystemencoding()
with open(args.deps_remap, 'r', encoding='utf-8') as f:
pathmap = json.load(f)
pack_restore(
args.path_src.encode(encoding),
args.path_dst.encode(encoding),
pathmap,
)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,280 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
"""
Module for remapping paths from one directory to another.
"""
import os
# ----------------------------------------------------------------------------
# private utility functions
def _is_blend(f):
return f.lower().endswith(b'.blend')
def _warn__ascii(msg):
print(" warning: %s" % msg)
def _info__ascii(msg):
print(msg)
def _warn__json(msg):
import json
print(json.dumps(("warning", msg)), end=",\n")
def _info__json(msg):
import json
print(json.dumps(("info", msg)), end=",\n")
def _uuid_from_file(fn, block_size=1 << 20):
with open(fn, 'rb') as f:
# first get the size
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0, os.SEEK_SET)
# done!
import hashlib
sha1 = hashlib.new('sha512')
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
return (hex(size)[2:] + sha1.hexdigest()).encode()
def _iter_files(paths, check_ext=None):
# note, sorting isn't needed
# just gives predictable output
for p in paths:
p = os.path.abspath(p)
for dirpath, dirnames, filenames in sorted(os.walk(p)):
# skip '.svn'
if dirpath.startswith(b'.') and dirpath != b'.':
continue
for filename in sorted(filenames):
if check_ext is None or check_ext(filename):
filepath = os.path.join(dirpath, filename)
yield filepath
# ----------------------------------------------------------------------------
# Public Functions
def start(
paths,
is_quiet=False,
dry_run=False,
use_json=False,
):
if use_json:
warn = _warn__json
info = _info__json
else:
warn = _warn__ascii
info = _info__ascii
if use_json:
print("[")
# {(sha1, length): "filepath"}
remap_uuid = {}
# relative paths which don't exist,
# don't complain when they're missing on remap.
# {f_src: [relative path deps, ...]}
remap_lost = {}
# all files we need to map
# absolute paths
files_to_map = set()
# TODO, validate paths aren't nested! ["/foo", "/foo/bar"]
# it will cause problems touching files twice!
# ------------------------------------------------------------------------
# First walk over all blends
from bam.blend import blendfile_path_walker
for blendfile_src in _iter_files(paths, check_ext=_is_blend):
if not is_quiet:
info("blend read: %r" % blendfile_src)
remap_lost[blendfile_src] = remap_lost_blendfile_src = set()
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_src,
readonly=True,
recursive=False,
):
# TODO. warn when referencing files outside 'paths'
# so we can update the reference
f_abs = fp.filepath_absolute
f_abs = os.path.normpath(f_abs)
if os.path.exists(f_abs):
files_to_map.add(f_abs)
else:
if not is_quiet:
warn("file %r not found!" % f_abs)
# don't complain about this file being missing on remap
remap_lost_blendfile_src.add(fp.filepath)
# so we can know where its moved to
files_to_map.add(blendfile_src)
del blendfile_path_walker
# ------------------------------------------------------------------------
# Store UUID
#
# note, sorting is only to give predictable warnings/behavior
for f in sorted(files_to_map):
f_uuid = _uuid_from_file(f)
f_match = remap_uuid.get(f_uuid)
if f_match is not None:
if not is_quiet:
warn("duplicate file found! (%r, %r)" % (f_match, f))
remap_uuid[f_uuid] = f
# now find all deps
remap_data_args = (
remap_uuid,
remap_lost,
)
if use_json:
if not remap_uuid:
print("\"nothing to remap!\"")
else:
print("\"complete\"")
print("]")
else:
if not remap_uuid:
print("Nothing to remap!")
return remap_data_args
def finish(
paths, remap_data_args,
is_quiet=False,
force_relative=False,
dry_run=False,
use_json=False,
):
if use_json:
warn = _warn__json
info = _info__json
else:
warn = _warn__ascii
info = _info__ascii
if use_json:
print("[")
(remap_uuid,
remap_lost,
) = remap_data_args
remap_src_to_dst = {}
remap_dst_to_src = {}
for f_dst in _iter_files(paths):
f_uuid = _uuid_from_file(f_dst)
f_src = remap_uuid.get(f_uuid)
if f_src is not None:
remap_src_to_dst[f_src] = f_dst
remap_dst_to_src[f_dst] = f_src
# now the fun begins, remap _all_ paths
from bam.blend import blendfile_path_walker
for blendfile_dst in _iter_files(paths, check_ext=_is_blend):
blendfile_src = remap_dst_to_src.get(blendfile_dst)
if blendfile_src is None:
if not is_quiet:
warn("new blendfile added since beginning 'remap': %r" % blendfile_dst)
continue
# not essential, just so we can give more meaningful errors
remap_lost_blendfile_src = remap_lost[blendfile_src]
if not is_quiet:
info("blend write: %r -> %r" % (blendfile_src, blendfile_dst))
blendfile_src_basedir = os.path.dirname(blendfile_src)
blendfile_dst_basedir = os.path.dirname(blendfile_dst)
for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
blendfile_dst,
readonly=False,
recursive=False,
):
# TODO. warn when referencing files outside 'paths'
# so we can update the reference
f_src_orig = fp.filepath
if f_src_orig in remap_lost_blendfile_src:
# this file never existed, so we can't remap it
continue
is_relative = f_src_orig.startswith(b'//')
if is_relative:
f_src_abs = fp.filepath_absolute_resolve(basedir=blendfile_src_basedir)
else:
f_src_abs = f_src_orig
f_src_abs = os.path.normpath(f_src_abs)
f_dst_abs = remap_src_to_dst.get(f_src_abs)
if f_dst_abs is None:
if not is_quiet:
warn("file %r not found in map!" % f_src_abs)
continue
# now remap!
if is_relative or force_relative:
f_dst_final = b'//' + os.path.relpath(f_dst_abs, blendfile_dst_basedir)
else:
f_dst_final = f_dst_abs
if f_dst_final != f_src_orig:
if not dry_run:
fp.filepath = f_dst_final
if not is_quiet:
info("remap %r -> %r" % (f_src_abs, f_dst_abs))
del blendfile_path_walker
if use_json:
print("\"complete\"\n]")

View File

@ -0,0 +1,953 @@
#!/usr/bin/env python3
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
import os
import logging
from . import blendfile
# gives problems with scripts that use stdout, for testing 'bam deps' for eg.
DEBUG = False
VERBOSE = DEBUG or False # os.environ.get('BAM_VERBOSE', False)
TIMEIT = False
USE_ALEMBIC_BRANCH = True
class C_defs:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
# DNA_sequence_types.h (Sequence.type)
SEQ_TYPE_IMAGE = 0
SEQ_TYPE_META = 1
SEQ_TYPE_SCENE = 2
SEQ_TYPE_MOVIE = 3
SEQ_TYPE_SOUND_RAM = 4
SEQ_TYPE_SOUND_HD = 5
SEQ_TYPE_MOVIECLIP = 6
SEQ_TYPE_MASK = 7
SEQ_TYPE_EFFECT = 8
IMA_SRC_FILE = 1
IMA_SRC_SEQUENCE = 2
IMA_SRC_MOVIE = 3
# DNA_modifier_types.h
eModifierType_MeshCache = 46
# DNA_particle_types.h
PART_DRAW_OB = 7
PART_DRAW_GR = 8
# DNA_object_types.h
# Object.transflag
OB_DUPLIGROUP = 1 << 8
if USE_ALEMBIC_BRANCH:
CACHE_LIBRARY_SOURCE_CACHE = 1
log_deps = logging.getLogger("path_walker")
log_deps.setLevel({
(True, True): logging.DEBUG,
(False, True): logging.INFO,
(False, False): logging.WARNING
}[DEBUG, VERBOSE])
if VERBOSE:
def set_as_str(s):
if s is None:
return "None"
return ", ".join(sorted(str(i) for i in s))
class FPElem:
"""
Tiny filepath class to hide blendfile.
"""
__slots__ = (
"basedir",
# library link level
"level",
# True when this is apart of a sequence (image or movieclip)
"is_sequence",
"userdata",
)
def __init__(self, basedir, level,
# subclasses get/set functions should use
userdata):
self.basedir = basedir
self.level = level
self.is_sequence = False
# subclass must call
self.userdata = userdata
def files_siblings(self):
return ()
# --------
# filepath
def filepath_absolute_resolve(self, basedir=None):
"""
Resolve the filepath, with the option to override the basedir.
"""
filepath = self.filepath
if filepath.startswith(b'//'):
if basedir is None:
basedir = self.basedir
return os.path.normpath(os.path.join(
basedir,
utils.compatpath(filepath[2:]),
))
else:
return utils.compatpath(filepath)
def filepath_assign_edits(self, filepath, binary_edits):
self._set_cb_edits(filepath, binary_edits)
@staticmethod
def _filepath_assign_edits(block, path, filepath, binary_edits):
"""
Record the write to a separate entry (binary file-like object),
this lets us replay the edits later.
(so we can replay them onto the clients local cache without a file transfer).
"""
import struct
assert(type(filepath) is bytes)
assert(type(path) is bytes)
ofs, size = block.get_file_offset(path)
# ensure we dont write past the field size & allow for \0
filepath = filepath[:size - 1]
binary_edits.append((ofs, filepath + b'\0'))
@property
def filepath(self):
return self._get_cb()
@filepath.setter
def filepath(self, filepath):
self._set_cb(filepath)
@property
def filepath_absolute(self):
return self.filepath_absolute_resolve()
class FPElem_block_path(FPElem):
"""
Simple block-path:
userdata = (block, path)
"""
__slots__ = ()
def _get_cb(self):
block, path = self.userdata
return block[path]
def _set_cb(self, filepath):
block, path = self.userdata
block[path] = filepath
def _set_cb_edits(self, filepath, binary_edits):
block, path = self.userdata
self._filepath_assign_edits(block, path, filepath, binary_edits)
class FPElem_sequence_single(FPElem):
"""
Movie sequence
userdata = (block, path, sub_block, sub_path)
"""
__slots__ = ()
def _get_cb(self):
block, path, sub_block, sub_path = self.userdata
return block[path] + sub_block[sub_path]
def _set_cb(self, filepath):
block, path, sub_block, sub_path = self.userdata
head, sep, tail = utils.splitpath(filepath)
block[path] = head + sep
sub_block[sub_path] = tail
def _set_cb_edits(self, filepath, binary_edits):
block, path, sub_block, sub_path = self.userdata
head, sep, tail = utils.splitpath(filepath)
self._filepath_assign_edits(block, path, head + sep, binary_edits)
self._filepath_assign_edits(sub_block, sub_path, tail, binary_edits)
class FPElem_sequence_image_seq(FPElem_sequence_single):
"""
Image sequence
userdata = (block, path, sub_block, sub_path)
"""
__slots__ = ()
def files_siblings(self):
block, path, sub_block, sub_path = self.userdata
array = block.get_pointer(b'stripdata')
files = [array.get(b'name', use_str=False, base_index=i) for i in range(array.count)]
return files
class FilePath:
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
# ------------------------------------------------------------------------
# Main function to visit paths
@staticmethod
def visit_from_blend(
filepath,
# never modify the blend
readonly=True,
# callback that creates a temp file and returns its path.
temp_remap_cb=None,
# recursive options
recursive=False,
# recurse all indirectly linked data
# (not just from the initially referenced blend file)
recursive_all=False,
# list of ID block names we want to load, or None to load all
block_codes=None,
# root when we're loading libs indirectly
rootdir=None,
level=0,
# dict of id's used so we don't follow these links again
# prevents cyclic references too!
# {lib_path: set([block id's ...])}
lib_visit=None,
# optional blendfile callbacks
# These callbacks run on enter-exit blend files
# so you can keep track of what file and level you're at.
blendfile_level_cb=(None, None),
):
# print(level, block_codes)
import os
filepath = os.path.abspath(filepath)
indent_str = " " * level
# print(indent_str + "Opening:", filepath)
# print(indent_str + "... blocks:", block_codes)
log = log_deps.getChild('visit_from_blend')
log.info("~")
log.info("%sOpening: %s", indent_str, filepath)
if VERBOSE:
log.info("%s blocks: %s", indent_str, set_as_str(block_codes))
blendfile_level_cb_enter, blendfile_level_cb_exit = blendfile_level_cb
if blendfile_level_cb_enter is not None:
blendfile_level_cb_enter(filepath)
basedir = os.path.dirname(filepath)
if rootdir is None:
rootdir = basedir
if lib_visit is None:
lib_visit = {}
if recursive and (level > 0) and (block_codes is not None) and (recursive_all is False):
# prevent from expanding the
# same datablock more then once
# note: we could *almost* id_name, however this isn't unique for libraries.
expand_addr_visit = set()
# {lib_id: {block_ids... }}
expand_codes_idlib = {}
# libraries used by this blend
block_codes_idlib = set()
# XXX, checking 'block_codes' isn't 100% reliable,
# but at least don't touch the same blocks twice.
# whereas block_codes is intended to only operate on blocks we requested.
lib_block_codes_existing = lib_visit.setdefault(filepath, set())
# only for this block
def _expand_codes_add_test(block, code):
# return True, if the ID should be searched further
#
# we could investigate a better way...
# Not to be accessing ID blocks at this point. but its harmless
if code == b'ID':
assert(code == block.code)
if recursive:
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
return False
else:
id_name = block[b'id', b'name']
# if we touched this already, don't touch again
# (else we may modify the same path multiple times)
#
# FIXME, works in some cases but not others
# keep, without this we get errors
# Gooseberry r668
# bam pack scenes/01_island/01_meet_franck/01_01_01_A/01_01_01_A.comp.blend
# gives strange errors
'''
if id_name not in block_codes:
return False
'''
# instead just don't operate on blocks multiple times
# ... rather than attempt to check on what we need or not.
len_prev = len(lib_block_codes_existing)
lib_block_codes_existing.add(id_name)
if len_prev == len(lib_block_codes_existing):
return False
len_prev = len(expand_addr_visit)
expand_addr_visit.add(block.addr_old)
return (len_prev != len(expand_addr_visit))
def block_expand(block, code):
assert(block.code == code)
if _expand_codes_add_test(block, code):
yield block
assert(block.code == code)
fn = ExpandID.expand_funcs.get(code)
if fn is not None:
for sub_block in fn(block):
if sub_block is not None:
yield from block_expand(sub_block, sub_block.code)
else:
if code == b'ID':
yield block
else:
expand_addr_visit = None
# set below
expand_codes_idlib = None
# never set
block_codes_idlib = None
def block_expand(block, code):
assert(block.code == code)
yield block
# ------
# Define
#
# - iter_blocks_id(code)
# - iter_blocks_idlib()
if block_codes is None:
def iter_blocks_id(code):
return blend.find_blocks_from_code(code)
def iter_blocks_idlib():
return blend.find_blocks_from_code(b'LI')
else:
def iter_blocks_id(code):
for block in blend.find_blocks_from_code(code):
if block[b'id', b'name'] in block_codes:
yield from block_expand(block, code)
if block_codes_idlib is not None:
def iter_blocks_idlib():
for block in blend.find_blocks_from_code(b'LI'):
# TODO, this should work but in fact mades some libs not link correctly.
if block[b'name'] in block_codes_idlib:
yield from block_expand(block, b'LI')
else:
def iter_blocks_idlib():
return blend.find_blocks_from_code(b'LI')
if temp_remap_cb is not None:
filepath_tmp = temp_remap_cb(filepath, rootdir)
else:
filepath_tmp = filepath
# store info to pass along with each iteration
extra_info = rootdir, os.path.basename(filepath)
with blendfile.open_blend(filepath_tmp, "rb" if readonly else "r+b") as blend:
for code in blend.code_index.keys():
# handle library blocks as special case
if ((len(code) != 2) or
(code in {
# libraries handled below
b'LI',
b'ID',
# unneeded
b'WM',
b'SN', # bScreen
})):
continue
# if VERBOSE:
# print(" Scanning", code)
for block in iter_blocks_id(code):
yield from FilePath.from_block(block, basedir, extra_info, level)
# print("A:", expand_addr_visit)
# print("B:", block_codes)
if VERBOSE:
log.info("%s expand_addr_visit=%s", indent_str, set_as_str(expand_addr_visit))
if recursive:
if expand_codes_idlib is None:
expand_codes_idlib = {}
for block in blend.find_blocks_from_code(b'ID'):
expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
# look into libraries
lib_all = []
for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):
lib = blend.find_block_from_offset(lib_id)
lib_path = lib[b'name']
# get all data needed to read the blend files here (it will be freed!)
# lib is an address at the moment, we only use as a way to group
lib_all.append((lib_path, lib_block_codes))
# import IPython; IPython.embed()
# ensure we expand indirect linked libs
if block_codes_idlib is not None:
block_codes_idlib.add(lib_path)
# do this after, incase we mangle names above
for block in iter_blocks_idlib():
yield from FilePath.from_block(block, basedir, extra_info, level)
del blend
# ----------------
# Handle Recursive
if recursive:
# now we've closed the file, loop on other files
# note, sorting - isn't needed, it just gives predictable load-order.
for lib_path, lib_block_codes in lib_all:
lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))
# if we visited this before,
# check we don't follow the same links more than once
lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())
lib_block_codes -= lib_block_codes_existing
# don't touch them again
# XXX, this is now maintained in "_expand_generic_material"
# lib_block_codes_existing.update(lib_block_codes)
# print("looking for", lib_block_codes)
if not lib_block_codes:
if VERBOSE:
print((indent_str + " "), "Library Skipped (visited): ", filepath, " -> ", lib_path_abs, sep="")
continue
if not os.path.exists(lib_path_abs):
if VERBOSE:
print((indent_str + " "), "Library Missing: ", filepath, " -> ", lib_path_abs, sep="")
continue
# import IPython; IPython.embed()
if VERBOSE:
print((indent_str + " "), "Library: ", filepath, " -> ", lib_path_abs, sep="")
# print((indent_str + " "), lib_block_codes)
yield from FilePath.visit_from_blend(
lib_path_abs,
readonly=readonly,
temp_remap_cb=temp_remap_cb,
recursive=True,
block_codes=lib_block_codes,
rootdir=rootdir,
level=level + 1,
lib_visit=lib_visit,
blendfile_level_cb=blendfile_level_cb,
)
if blendfile_level_cb_exit is not None:
blendfile_level_cb_exit(filepath)
# ------------------------------------------------------------------------
# Direct filepaths from Blocks
#
# (no expanding or following references)
@staticmethod
def from_block(block: blendfile.BlendFileBlock, basedir, extra_info, level):
assert(block.code != b'DATA')
fn = FilePath._from_block_dict.get(block.code)
if fn is None:
return
yield from fn(block, basedir, extra_info, level)
@staticmethod
def _from_block_OB(block, basedir, extra_info, level):
# 'ob->modifiers[...].filepath'
for block_mod in bf_utils.iter_ListBase(
block.get_pointer((b'modifiers', b'first')),
next_item=(b'modifier', b'next')):
item_md_type = block_mod[b'modifier', b'type']
if item_md_type == C_defs.eModifierType_MeshCache:
yield FPElem_block_path(basedir, level, (block_mod, b'filepath')), extra_info
@staticmethod
def _from_block_MC(block, basedir, extra_info, level):
# TODO, image sequence
fp = FPElem_block_path(basedir, level, (block, b'name'))
fp.is_sequence = True
yield fp, extra_info
@staticmethod
def _from_block_IM(block, basedir, extra_info, level):
# old files miss this
image_source = block.get(b'source', C_defs.IMA_SRC_FILE)
if image_source not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:
return
if block[b'packedfile']:
return
fp = FPElem_block_path(basedir, level, (block, b'name'))
if image_source == C_defs.IMA_SRC_SEQUENCE:
fp.is_sequence = True
yield fp, extra_info
@staticmethod
def _from_block_VF(block, basedir, extra_info, level):
if block[b'packedfile']:
return
if block[b'name'] != b'<builtin>': # builtin font
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_SO(block, basedir, extra_info, level):
if block[b'packedfile']:
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
@staticmethod
def _from_block_ME(block, basedir, extra_info, level):
block_external = block.get_pointer((b'ldata', b'external'), None)
if block_external is None:
block_external = block.get_pointer((b'fdata', b'external'), None)
if block_external is not None:
yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info
if USE_ALEMBIC_BRANCH:
@staticmethod
def _from_block_CL(block, basedir, extra_info, level):
if block[b'source_mode'] == C_defs.CACHE_LIBRARY_SOURCE_CACHE:
yield FPElem_block_path(basedir, level, (block, b'input_filepath')), extra_info
@staticmethod
def _from_block_CF(block, basedir, extra_info, level):
yield FPElem_block_path(basedir, level, (block, b'filepath')), extra_info
@staticmethod
def _from_block_SC(block, basedir, extra_info, level):
block_ed = block.get_pointer(b'ed')
if block_ed is not None:
sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
def seqbase(someseq):
for item in someseq:
item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
if item_type >= C_defs.SEQ_TYPE_EFFECT:
pass
elif item_type == C_defs.SEQ_TYPE_META:
yield from seqbase(bf_utils.iter_ListBase(
item.get_pointer((b'seqbase', b'first'), sdna_index_refine=sdna_index_Sequence)))
else:
item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)
if item_strip is None: # unlikely!
continue
item_stripdata = item_strip.get_pointer(b'stripdata')
if item_type == C_defs.SEQ_TYPE_IMAGE:
yield FPElem_sequence_image_seq(
basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
elif item_type in {C_defs.SEQ_TYPE_MOVIE, C_defs.SEQ_TYPE_SOUND_RAM, C_defs.SEQ_TYPE_SOUND_HD}:
yield FPElem_sequence_single(
basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer((b'seqbase', b'first'))))
@staticmethod
def _from_block_LI(block, basedir, extra_info, level):
if block.get(b'packedfile', None):
return
yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
# _from_block_IM --> {b'IM': _from_block_IM, ...}
_from_block_dict = {
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
if isinstance(s_fn, staticmethod)
if k.startswith("_from_block_")
}
class bf_utils:
@staticmethod
def iter_ListBase(block, next_item=b'next'):
while block:
yield block
block = block.file.find_block_from_offset(block[next_item])
def iter_array(block, length=-1):
assert(block.code == b'DATA')
from . import blendfile
import os
handle = block.file.handle
header = block.file.header
for i in range(length):
block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)
offset = blendfile.DNA_IO.read_pointer(handle, header)
sub_block = block.file.find_block_from_offset(offset)
yield sub_block
# -----------------------------------------------------------------------------
# ID Expand
class ExpandID:
# fake module
#
# TODO:
#
# Array lookups here are _WAY_ too complicated,
# we need some nicer way to represent pointer indirection (easy like in C!)
# but for now, use what we have.
#
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def _expand_generic_material(block):
array_len = block.get(b'totcol')
if array_len != 0:
array = block.get_pointer(b'mat')
for sub_block in bf_utils.iter_array(array, array_len):
yield sub_block
@staticmethod
def _expand_generic_mtex(block):
field = block.dna_type.field_from_name[b'mtex']
array_len = field.dna_size // block.file.header.pointer_size
for i in range(array_len):
item = block.get_pointer((b'mtex', i))
if item:
yield item.get_pointer(b'tex')
yield item.get_pointer(b'object')
@staticmethod
def _expand_generic_nodetree(block):
assert(block.dna_type.dna_type_id == b'bNodeTree')
sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']
for item in bf_utils.iter_ListBase(block.get_pointer((b'nodes', b'first'))):
item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)
if item_type != 221: # CMP_NODE_R_LAYERS
yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)
def _expand_generic_nodetree_id(block):
block_ntree = block.get_pointer(b'nodetree', None)
if block_ntree is not None:
yield from ExpandID._expand_generic_nodetree(block_ntree)
@staticmethod
def _expand_generic_animdata(block):
block_adt = block.get_pointer(b'adt')
if block_adt:
yield block_adt.get_pointer(b'action')
# TODO, NLA
@staticmethod
def expand_OB(block): # 'Object'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
has_dup_group = False
yield block.get_pointer(b'data')
if block[b'transflag'] & C_defs.OB_DUPLIGROUP:
dup_group = block.get_pointer(b'dup_group')
if dup_group is not None:
has_dup_group = True
yield dup_group
del dup_group
yield block.get_pointer(b'proxy')
yield block.get_pointer(b'proxy_group')
if USE_ALEMBIC_BRANCH:
if has_dup_group:
sdna_index_CacheLibrary = block.file.sdna_index_from_id.get(b'CacheLibrary')
if sdna_index_CacheLibrary is not None:
yield block.get_pointer(b'cache_library')
# 'ob->pose->chanbase[...].custom'
block_pose = block.get_pointer(b'pose')
if block_pose is not None:
assert(block_pose.dna_type.dna_type_id == b'bPose')
sdna_index_bPoseChannel = block_pose.file.sdna_index_from_id[b'bPoseChannel']
for item in bf_utils.iter_ListBase(block_pose.get_pointer((b'chanbase', b'first'))):
item_custom = item.get_pointer(b'custom', sdna_index_refine=sdna_index_bPoseChannel)
if item_custom is not None:
yield item_custom
# Expand the objects 'ParticleSettings' via:
# 'ob->particlesystem[...].part'
sdna_index_ParticleSystem = block.file.sdna_index_from_id.get(b'ParticleSystem')
if sdna_index_ParticleSystem is not None:
for item in bf_utils.iter_ListBase(
block.get_pointer((b'particlesystem', b'first'))):
item_part = item.get_pointer(b'part', sdna_index_refine=sdna_index_ParticleSystem)
if item_part is not None:
yield item_part
@staticmethod
def expand_ME(block): # 'Mesh'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
yield block.get_pointer(b'texcomesh')
# TODO, TexFace? - it will be slow, we could simply ignore :S
@staticmethod
def expand_CU(block): # 'Curve'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
sub_block = block.get_pointer(b'vfont')
if sub_block is not None:
yield sub_block
yield block.get_pointer(b'vfontb')
yield block.get_pointer(b'vfonti')
yield block.get_pointer(b'vfontbi')
yield block.get_pointer(b'bevobj')
yield block.get_pointer(b'taperobj')
yield block.get_pointer(b'textoncurve')
@staticmethod
def expand_MB(block): # 'MBall'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_material(block)
@staticmethod
def expand_AR(block): # 'bArmature'
yield from ExpandID._expand_generic_animdata(block)
@staticmethod
def expand_LA(block): # 'Lamp'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
@staticmethod
def expand_MA(block): # 'Material'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
yield block.get_pointer(b'group')
@staticmethod
def expand_TE(block): # 'Tex'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield block.get_pointer(b'ima')
@staticmethod
def expand_WO(block): # 'World'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield from ExpandID._expand_generic_mtex(block)
@staticmethod
def expand_NT(block): # 'bNodeTree'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree(block)
@staticmethod
def expand_PA(block): # 'ParticleSettings'
yield from ExpandID._expand_generic_animdata(block)
block_ren_as = block[b'ren_as']
if block_ren_as == C_defs.PART_DRAW_GR:
yield block.get_pointer(b'dup_group')
elif block_ren_as == C_defs.PART_DRAW_OB:
yield block.get_pointer(b'dup_ob')
yield from ExpandID._expand_generic_mtex(block)
@staticmethod
def expand_SC(block): # 'Scene'
yield from ExpandID._expand_generic_animdata(block)
yield from ExpandID._expand_generic_nodetree_id(block)
yield block.get_pointer(b'camera')
yield block.get_pointer(b'world')
yield block.get_pointer(b'set', None)
yield block.get_pointer(b'clip', None)
sdna_index_Base = block.file.sdna_index_from_id[b'Base']
for item in bf_utils.iter_ListBase(block.get_pointer((b'base', b'first'))):
yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)
block_ed = block.get_pointer(b'ed')
if block_ed is not None:
sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
def seqbase(someseq):
for item in someseq:
item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
if item_type >= C_defs.SEQ_TYPE_EFFECT:
pass
elif item_type == C_defs.SEQ_TYPE_META:
yield from seqbase(bf_utils.iter_ListBase(
item.get_pointer((b'seqbase' b'first'), sdna_index_refine=sdna_index_Sequence)))
else:
if item_type == C_defs.SEQ_TYPE_SCENE:
yield item.get_pointer(b'scene')
elif item_type == C_defs.SEQ_TYPE_MOVIECLIP:
yield item.get_pointer(b'clip')
elif item_type == C_defs.SEQ_TYPE_MASK:
yield item.get_pointer(b'mask')
elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:
yield item.get_pointer(b'sound')
yield from seqbase(bf_utils.iter_ListBase(
block_ed.get_pointer((b'seqbase', b'first'))))
@staticmethod
def expand_GR(block): # 'Group'
sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']
for item in bf_utils.iter_ListBase(block.get_pointer((b'gobject', b'first'))):
yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)
# expand_GR --> {b'GR': expand_GR, ...}
expand_funcs = {
k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
if isinstance(s_fn, staticmethod)
if k.startswith("expand_")
}
# -----------------------------------------------------------------------------
# Packing Utility
class utils:
# fake module
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def abspath(path, start, library=None):
import os
if path.startswith(b'//'):
# if library:
# start = os.path.dirname(abspath(library.filepath))
return os.path.join(start, path[2:])
return path
if __import__("os").sep == '/':
@staticmethod
def compatpath(path):
return path.replace(b'\\', b'/')
else:
@staticmethod
def compatpath(path):
# keep '//'
return path[:2] + path[2:].replace(b'/', b'\\')
@staticmethod
def splitpath(path):
"""
Splits the path using either slashes
"""
split1 = path.rpartition(b'/')
split2 = path.rpartition(b'\\')
if len(split1[0]) > len(split2[0]):
return split1
else:
return split2
def find_sequence_paths(filepath, use_fullpath=True):
# supports str, byte paths
basedir, filename = os.path.split(filepath)
if not os.path.exists(basedir):
return []
filename_noext, ext = os.path.splitext(filename)
from string import digits
if isinstance(filepath, bytes):
digits = digits.encode()
filename_nodigits = filename_noext.rstrip(digits)
if len(filename_nodigits) == len(filename_noext):
# input isn't from a sequence
return []
files = os.listdir(basedir)
files[:] = [
f for f in files
if f.startswith(filename_nodigits) and
f.endswith(ext) and
f[len(filename_nodigits):-len(ext) if ext else -1].isdigit()
]
if use_fullpath:
files[:] = [
os.path.join(basedir, f) for f in files
]
return files

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
"""CLI interface to BAM-pack.
Run this using:
python -m bam.pack
"""
if __name__ == '__main__':
from bam.blend import blendfile_pack
blendfile_pack.main()

View File

@ -0,0 +1,143 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
def colorize_dummy(msg, color=None):
return msg
_USE_COLOR = True
if _USE_COLOR:
color_codes = {
'black': '\033[0;30m',
'bright_gray': '\033[0;37m',
'blue': '\033[0;34m',
'white': '\033[1;37m',
'green': '\033[0;32m',
'bright_blue': '\033[1;34m',
'cyan': '\033[0;36m',
'bright_green': '\033[1;32m',
'red': '\033[0;31m',
'bright_cyan': '\033[1;36m',
'purple': '\033[0;35m',
'bright_red': '\033[1;31m',
'yellow': '\033[0;33m',
'bright_purple':'\033[1;35m',
'dark_gray': '\033[1;30m',
'bright_yellow':'\033[1;33m',
'normal': '\033[0m',
}
def colorize(msg, color=None):
return (color_codes[color] + msg + color_codes['normal'])
else:
colorize = colorize_dummy
def uuid_from_file(fn, block_size=1 << 20):
"""
Returns an arbitrary sized unique ASCII string based on the file contents.
(exact hashing method may change).
"""
with open(fn, 'rb') as f:
# first get the size
import os
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(0, os.SEEK_SET)
del os
# done!
import hashlib
sha1 = hashlib.new('sha512')
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
# skip the '0x'
return hex(size)[2:] + sha1.hexdigest()
def write_json_to_zip(zip_handle, path, data=None):
import json
zip_handle.writestr(
path,
json.dumps(
data,
check_circular=False,
# optional (pretty)
sort_keys=True, indent=4, separators=(',', ': '),
).encode('utf-8'))
def write_json_to_file(path, data):
import json
with open(path, 'w') as file_handle:
json.dump(
data, file_handle, ensure_ascii=False,
check_circular=False,
# optional (pretty)
sort_keys=True, indent=4, separators=(',', ': '),
)
def is_compressed_filetype(filepath):
"""
Use to check if we should compress files in a zip.
"""
# for now, only include files which Blender is likely to reference
import os
assert(isinstance(filepath, bytes))
return os.path.splitext(filepath)[1].lower() in {
# images
b'.exr',
b'.jpg', b'.jpeg',
b'.png',
# audio
b'.aif', b'.aiff',
b'.mp3',
b'.ogg', b'.ogv',
b'.wav',
# video
b'.avi',
b'.mkv',
b'.mov',
b'.mpg', b'.mpeg',
# archives
# '.bz2', '.tbz',
# '.gz', '.tgz',
# '.zip',
}
def is_subdir(path, directory):
"""
Returns true if *path* in a subdirectory of *directory*.
"""
import os
from os.path import normpath, normcase, sep
path = normpath(normcase(path))
directory = normpath(normcase(directory))
if len(path) > len(directory):
sep = sep.encode('ascii') if isinstance(directory, bytes) else sep
if path.startswith(directory.rstrip(sep) + sep):
return True
return False

View File

@ -0,0 +1,3 @@
Bam Asset Manager is a tool to manage assets in Blender.

View File

@ -0,0 +1,24 @@
Metadata-Version: 2.0
Name: blender-bam
Version: 1.1.7
Summary: Bam Asset Manager
Home-page: http://developer.blender.org/project/view/55
Author: Campbell Barton, Francesco Siddi
Author-email: ideasman42@gmail.com
License: GPLv2+
Download-URL: https://pypi.python.org/pypi/blender-bam
Platform: any
Classifier: Development Status :: 3 - Alpha
Classifier: Environment :: Console
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Topic :: Utilities
Requires-Dist: requests (>=2.4)
Bam Asset Manager is a tool to manage assets in Blender.

View File

@ -0,0 +1,20 @@
bam/__init__.py,sha256=8dY6Im7e-qr6P60wCCOd2eipbQT3rfZ8De66BoEzmlA,113
bam/__main__.py,sha256=wHUbMNeJKFAT6tvd6-EiuT6gVpbHrXOFo0SypFjo3hk,237
bam/cli.py,sha256=jUZ30j4e2RHb6CKJjn3y6sj-2fTR70XliphQMAaETTc,71067
bam/pack.py,sha256=tAmpW_o1-m5TLXeNY4_FbZCdtqnIcg_bE2Uv_twABlE,166
bam/blend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bam/blend/blendfile.py,sha256=AoufnaY7u9S7TIYPOz4IShu-goVdyafzoq-s5Kzifwo,31207
bam/blend/blendfile_copy.py,sha256=1Dm2xBZ6_49usDHI0xW6iR2tJ6c-UrfCuqO-mjCuhL4,3733
bam/blend/blendfile_pack.py,sha256=vJb_3bQdJ4jkG7R44TJm_WNqmt9-o_oW-n2w8WUTBYo,26053
bam/blend/blendfile_pack_restore.py,sha256=WRRbzq6iZj_nGPU1wqmiJl3rYNiXEEfF8F82m34iYDY,4531
bam/blend/blendfile_path_remap.py,sha256=KtluKRf8rfjYv8Lgd1ZTjcTBgtMw_iJmE-fpjxBN8xI,8102
bam/blend/blendfile_path_walker.py,sha256=tFDm4XQbir8lBipJmT-T_UF-Pct4sdx-3ir7CBt-m_Y,34344
bam/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
bam/utils/system.py,sha256=FdKBEpO6it1lg9u5QK9j1VN-sK9MZVTzvGLA1GW1xEk,4107
blender_bam-1.1.7.dist-info/DESCRIPTION.rst,sha256=gxRn9duvXLzZWddzDz36dsnzAkNwg1eMAbfz7KAMV9s,59
blender_bam-1.1.7.dist-info/METADATA,sha256=X0NTEUrOeWvEz-vJmDvprtKuKiByuVXM3q0OpfSnZPE,813
blender_bam-1.1.7.dist-info/RECORD,,
blender_bam-1.1.7.dist-info/WHEEL,sha256=dXGL5yz26tu5uNsUy9EBoBYhrvMYqmFH9Vm82OQUT-8,95
blender_bam-1.1.7.dist-info/entry_points.txt,sha256=yGjZcACWl4EQuQbVwuGgtURy1yYE2YvkH7c6Is6ADgQ,38
blender_bam-1.1.7.dist-info/metadata.json,sha256=g6R9wHmwtz2QQntiWKsPTy_Uqn2z6A9txtSr03a-gHA,1069
blender_bam-1.1.7.dist-info/top_level.txt,sha256=3Jh27QbVRbZ8nvfhcKPiJaOO3iyjKw8SSS5dabqINcw,4

View File

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0.a0)
Root-Is-Purelib: true
Tag: py3-none-any

View File

@ -0,0 +1,3 @@
[console_scripts]
bam = bam.cli:main

View File

@ -0,0 +1 @@
{"classifiers": ["Development Status :: 3 - Alpha", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Utilities"], "download_url": "https://pypi.python.org/pypi/blender-bam", "extensions": {"python.commands": {"wrap_console": {"bam": "bam.cli:main"}}, "python.details": {"contacts": [{"email": "ideasman42@gmail.com", "name": "Campbell Barton, Francesco Siddi", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://developer.blender.org/project/view/55"}}, "python.exports": {"console_scripts": {"bam": "bam.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "GPLv2+", "metadata_version": "2.0", "name": "blender-bam", "platform": "any", "run_requires": [{"requires": ["requests (>=2.4)"]}], "summary": "Bam Asset Manager", "version": "1.1.7"}

129
io_blend_utils/install_whl.py Executable file
View File

@ -0,0 +1,129 @@
#!/usr/bin/env python3.5
"""This Python script installs a new version of BAM here."""
import pathlib
my_dir = pathlib.Path(__file__).absolute().parent
def main():
import argparse
parser = argparse.ArgumentParser(description="This script installs a new version of BAM here.")
parser.add_argument('wheelfile', type=pathlib.Path,
help='Location of the wheel file to install.')
args = parser.parse_args()
install(args.wheelfile.expanduser())
def install(wheelfile: pathlib.Path):
import json
import os
import re
assert_is_zipfile(wheelfile)
wipe_preexisting()
print('Installing %s' % wheelfile)
target = my_dir / wheelfile.name
print('Creating target directory %s' % target)
target.mkdir(parents=True)
extract(wheelfile, target)
copy_files(target)
version = find_version(target)
print('This is BAM version %s' % (version, ))
update_init_file(wheelfile, version)
print('Done installing %s' % wheelfile.name)
def assert_is_zipfile(wheelfile: pathlib.Path):
import zipfile
# In Python 3.6 conversion to str is not necessary any more:
if not zipfile.is_zipfile(str(wheelfile)):
log.error('%s is not a valid ZIP file!' % wheelfile)
raise SystemExit()
def wipe_preexisting():
import shutil
for existing in sorted(my_dir.glob('blender_bam-*.whl')):
if existing.is_dir():
print('Wiping pre-existing directory %s' % existing)
# In Python 3.6 conversion to str is not necessary any more:
shutil.rmtree(str(existing))
else:
print('Wiping pre-existing file %s' % existing)
existing.unlink()
def extract(wheelfile: pathlib.Path, target: pathlib.Path):
import os
import zipfile
# In Python 3.6 conversion to str is not necessary any more:
os.chdir(str(target))
print('Extracting wheel')
# In Python 3.6 conversion to str is not necessary any more:
with zipfile.ZipFile(str(wheelfile)) as whlzip:
whlzip.extractall()
os.chdir(str(my_dir))
def copy_files(target: pathlib.Path):
import shutil
print('Copying some files from wheel to other locations')
# In Python 3.6 conversion to str is not necessary any more:
shutil.copy(str(target / 'bam' / 'blend' / 'blendfile_path_walker.py'), './blend')
shutil.copy(str(target / 'bam' / 'blend' / 'blendfile.py'), './blend')
shutil.copy(str(target / 'bam' / 'utils' / 'system.py'), './utils')
def find_version(target: pathlib.Path):
import json
print('Obtaining version number from wheel.')
distinfo = next(target.glob('*.dist-info'))
with (distinfo / 'metadata.json').open() as infofile:
metadata = json.load(infofile)
# "1.2.3" -> (1, 2, 3)
str_ver = metadata['version']
return tuple(int(x) for x in str_ver.split('.'))
def update_init_file(wheelfile: pathlib.Path, version: tuple):
import os
import re
print('Updating __init__.py to point to this wheel.')
path_line_re = re.compile(r'^BAM_WHEEL_PATH\s*=')
version_line_re = re.compile(r'^\s+[\'"]version[\'"]: (\([0-9,]+\)),')
with open('__init__.py', 'r') as infile, \
open('__init__.py~whl~installer~', 'w') as outfile:
for line in infile:
if version_line_re.match(line):
outfile.write(" 'version': %s,%s" % (version, os.linesep))
if path_line_re.match(line):
outfile.write("BAM_WHEEL_PATH = '%s'%s" % (wheelfile.name, os.linesep))
else:
outfile.write(line)
os.unlink('__init__.py')
os.rename('__init__.py~whl~installer~', '__init__.py')
if __name__ == '__main__':
main()