Moving BlenderKit to new repository on GitHub
BlenderKit addon will be here since now: https://github.com/BlenderKit/blenderkit And all release notes , docs and information will be on BlenderKit website: www.blenderkit.com The move happens since Blender Foundation ended the commercial addon offering for all commercial addons. This means a bit less comfort for our users, but also many new possibilities!
Referenced by issue blender/blender#98970, blenderkit add-on fail to install in my blender Referenced by issue blender/blender#96723, Blenderkit doesn't load image icons after starting a new project. Referenced by issue blender/blender#94709, Blender Crash when using undo with BlenderKit add-on Referenced by issue #93287, Blenderkit login Referenced by issue #92497, BlenderKit preview thumbnails are black when turning on Limit Size in Preferences>Viewport>Textures
|
@ -1,3 +0,0 @@
|
|||
BlenderKit add-on is the official addon of the BlenderKit service for Blender 3d.
|
||||
It enables users to upload, search, download, and rate different assets for blender.
|
||||
It works together with BlenderKit server.
|
|
@ -1,403 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import utils, ui
|
||||
|
||||
import bpy
|
||||
import uuid
|
||||
|
||||
|
||||
def append_brush(file_name, brushname=None, link=False, fake_user=True):
|
||||
'''append a brush'''
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
for m in data_from.brushes:
|
||||
if m == brushname or brushname is None:
|
||||
data_to.brushes = [m]
|
||||
brushname = m
|
||||
brush = bpy.data.brushes[brushname]
|
||||
if fake_user:
|
||||
brush.use_fake_user = True
|
||||
return brush
|
||||
|
||||
|
||||
def append_material(file_name, matname=None, link=False, fake_user=True):
|
||||
'''append a material type asset'''
|
||||
# first, we have to check if there is a material with same name
|
||||
# in previous step there's check if the imported material
|
||||
# is already in the scene, so we know same name != same material
|
||||
|
||||
mats_before = bpy.data.materials[:]
|
||||
try:
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
found = False
|
||||
for m in data_from.materials:
|
||||
if m == matname or matname is None:
|
||||
data_to.materials = [m]
|
||||
# print(m, type(m))
|
||||
matname = m
|
||||
found = True
|
||||
break;
|
||||
|
||||
#not found yet? probably some name inconsistency then.
|
||||
if not found and len(data_from.materials)>0:
|
||||
data_to.materials = [data_from.materials[0]]
|
||||
matname = data_from.materials[0]
|
||||
print(f"the material wasn't found under the exact name, appended another one: {matname}")
|
||||
# print('in the appended file the name is ', matname)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print('failed to open the asset file')
|
||||
# we have to find the new material , due to possible name changes
|
||||
mat = None
|
||||
for m in bpy.data.materials:
|
||||
if m not in mats_before:
|
||||
mat = m
|
||||
break;
|
||||
#still not found?
|
||||
if mat is None:
|
||||
mat = bpy.data.materials.get(matname)
|
||||
|
||||
if fake_user:
|
||||
mat.use_fake_user = True
|
||||
return mat
|
||||
|
||||
|
||||
def append_scene(file_name, scenename=None, link=False, fake_user=False):
|
||||
'''append a scene type asset'''
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
for s in data_from.scenes:
|
||||
if s == scenename or scenename is None:
|
||||
data_to.scenes = [s]
|
||||
scenename = s
|
||||
scene = bpy.data.scenes[scenename]
|
||||
if fake_user:
|
||||
scene.use_fake_user = True
|
||||
# scene has to have a new uuid, so user reports aren't screwed.
|
||||
scene['uuid'] = str(uuid.uuid4())
|
||||
|
||||
#reset ui_props of the scene to defaults:
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
ui_props.down_up = 'SEARCH'
|
||||
|
||||
return scene
|
||||
|
||||
|
||||
def get_node_sure(node_tree, ntype=''):
|
||||
'''
|
||||
Gets a node of certain type, but creates a new one if not pre
|
||||
'''
|
||||
node = None
|
||||
for n in node_tree.nodes:
|
||||
if ntype == n.bl_rna.identifier:
|
||||
node = n
|
||||
return node
|
||||
if not node:
|
||||
node = node_tree.nodes.new(type=ntype)
|
||||
|
||||
return node
|
||||
|
||||
def hdr_swap(name, hdr):
|
||||
'''
|
||||
Try to replace the hdr in current world setup. If this fails, create a new world.
|
||||
:param name: Name of the resulting world (renamse the current one if swap is successfull)
|
||||
:param hdr: Image type
|
||||
:return: None
|
||||
'''
|
||||
w = bpy.context.scene.world
|
||||
if w:
|
||||
w.use_nodes = True
|
||||
w.name = name
|
||||
nt = w.node_tree
|
||||
for n in nt.nodes:
|
||||
if 'ShaderNodeTexEnvironment' == n.bl_rna.identifier:
|
||||
env_node = n
|
||||
env_node.image = hdr
|
||||
return
|
||||
new_hdr_world(name,hdr)
|
||||
|
||||
|
||||
def new_hdr_world(name, hdr):
|
||||
'''
|
||||
creates a new world, links in the hdr with mapping node, and links the world to scene
|
||||
:param name: Name of the world datablock
|
||||
:param hdr: Image type
|
||||
:return: None
|
||||
'''
|
||||
w = bpy.data.worlds.new(name=name)
|
||||
w.use_nodes = True
|
||||
bpy.context.scene.world = w
|
||||
|
||||
nt = w.node_tree
|
||||
env_node = nt.nodes.new(type='ShaderNodeTexEnvironment')
|
||||
env_node.image = hdr
|
||||
background = get_node_sure(nt, 'ShaderNodeBackground')
|
||||
tex_coord = get_node_sure(nt, 'ShaderNodeTexCoord')
|
||||
mapping = get_node_sure(nt, 'ShaderNodeMapping')
|
||||
|
||||
nt.links.new(env_node.outputs['Color'], background.inputs['Color'])
|
||||
nt.links.new(tex_coord.outputs['Generated'], mapping.inputs['Vector'])
|
||||
nt.links.new(mapping.outputs['Vector'], env_node.inputs['Vector'])
|
||||
env_node.location.x = -400
|
||||
mapping.location.x = -600
|
||||
tex_coord.location.x = -800
|
||||
|
||||
|
||||
def load_HDR(file_name, name):
|
||||
'''Load a HDR into file and link it to scene world. '''
|
||||
already_linked = False
|
||||
for i in bpy.data.images:
|
||||
if i.filepath == file_name:
|
||||
hdr = i
|
||||
already_linked = True
|
||||
break;
|
||||
|
||||
if not already_linked:
|
||||
hdr = bpy.data.images.load(file_name)
|
||||
|
||||
hdr_swap(name, hdr)
|
||||
return hdr
|
||||
|
||||
|
||||
def link_collection(file_name, obnames=[], location=(0, 0, 0), link=False, parent = None, **kwargs):
|
||||
'''link an instanced group - model type asset'''
|
||||
sel = utils.selection_get()
|
||||
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
scols = []
|
||||
for col in data_from.collections:
|
||||
if col == kwargs['name']:
|
||||
data_to.collections = [col]
|
||||
|
||||
rotation = (0, 0, 0)
|
||||
if kwargs.get('rotation') is not None:
|
||||
rotation = kwargs['rotation']
|
||||
|
||||
bpy.ops.object.empty_add(type='PLAIN_AXES', location=location, rotation=rotation)
|
||||
main_object = bpy.context.view_layer.objects.active
|
||||
main_object.instance_type = 'COLLECTION'
|
||||
|
||||
if parent is not None:
|
||||
main_object.parent = bpy.data.objects.get(parent)
|
||||
|
||||
main_object.matrix_world.translation = location
|
||||
|
||||
for col in bpy.data.collections:
|
||||
if col.library is not None:
|
||||
fp = bpy.path.abspath(col.library.filepath)
|
||||
fp1 = bpy.path.abspath(file_name)
|
||||
if fp == fp1:
|
||||
main_object.instance_collection = col
|
||||
break;
|
||||
|
||||
#sometimes, the lib might already be without the actual link.
|
||||
if not main_object.instance_collection and kwargs['name']:
|
||||
col = bpy.data.collections.get(kwargs['name'])
|
||||
if col:
|
||||
main_object.instance_collection = col
|
||||
|
||||
main_object.name = main_object.instance_collection.name
|
||||
|
||||
# bpy.ops.wm.link(directory=file_name + "/Collection/", filename=kwargs['name'], link=link, instance_collections=True,
|
||||
# autoselect=True)
|
||||
# main_object = bpy.context.view_layer.objects.active
|
||||
# if kwargs.get('rotation') is not None:
|
||||
# main_object.rotation_euler = kwargs['rotation']
|
||||
# main_object.location = location
|
||||
|
||||
utils.selection_set(sel)
|
||||
return main_object, []
|
||||
|
||||
|
||||
def append_particle_system(file_name, obnames=[], location=(0, 0, 0), link=False, **kwargs):
|
||||
'''link an instanced group - model type asset'''
|
||||
|
||||
pss = []
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
for ps in data_from.particles:
|
||||
pss.append(ps)
|
||||
data_to.particles = pss
|
||||
|
||||
s = bpy.context.scene
|
||||
sel = utils.selection_get()
|
||||
|
||||
target_object = bpy.context.scene.objects.get(kwargs['target_object'])
|
||||
if target_object is not None and target_object.type == 'MESH':
|
||||
target_object.select_set(True)
|
||||
bpy.context.view_layer.objects.active = target_object
|
||||
|
||||
for ps in pss:
|
||||
# now let's tune this ps to the particular objects area:
|
||||
totarea = 0
|
||||
for p in target_object.data.polygons:
|
||||
totarea += p.area
|
||||
count = int(ps.count * totarea)
|
||||
|
||||
if ps.child_type in ('INTERPOLATED', 'SIMPLE'):
|
||||
total_count = count * ps.rendered_child_count
|
||||
disp_count = count * ps.child_nbr
|
||||
else:
|
||||
total_count = count
|
||||
|
||||
bbox_threshold = 25000
|
||||
display_threshold = 200000
|
||||
total_max_threshold = 2000000
|
||||
# emitting too many parent particles just kills blender now.
|
||||
|
||||
#this part tuned child count, we'll leave children to artists only.
|
||||
# if count > total_max_threshold:
|
||||
# ratio = round(count / total_max_threshold)
|
||||
#
|
||||
# if ps.child_type in ('INTERPOLATED', 'SIMPLE'):
|
||||
# ps.rendered_child_count *= ratio
|
||||
# else:
|
||||
# ps.child_type = 'INTERPOLATED'
|
||||
# ps.rendered_child_count = ratio
|
||||
# count = max(2, int(count / ratio))
|
||||
|
||||
#1st level of optimizaton - switch t bounding boxes.
|
||||
if total_count>bbox_threshold:
|
||||
target_object.display_type = 'BOUNDS'
|
||||
# 2nd level of optimization - reduce percentage of displayed particles.
|
||||
ps.display_percentage = min(ps.display_percentage, max(1, int(100 * display_threshold / total_count)))
|
||||
#here we can also tune down number of children displayed.
|
||||
#set the count
|
||||
ps.count = count
|
||||
#add the modifier
|
||||
bpy.ops.object.particle_system_add()
|
||||
# 3rd level - hide particle system from viewport - is done on the modifier..
|
||||
if total_count > total_max_threshold:
|
||||
target_object.modifiers[-1].show_viewport = False
|
||||
|
||||
target_object.particle_systems[-1].settings = ps
|
||||
|
||||
target_object.select_set(False)
|
||||
utils.selection_set(sel)
|
||||
return target_object, []
|
||||
|
||||
|
||||
def append_objects(file_name, obnames=[], location=(0, 0, 0), link=False, **kwargs):
|
||||
'''append objects into scene individually'''
|
||||
#simplified version of append
|
||||
if kwargs.get('name'):
|
||||
# by now used for appending into scene
|
||||
scene = bpy.context.scene
|
||||
sel = utils.selection_get()
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
path = file_name + "\\Collection\\"
|
||||
collection_name = kwargs.get('name')
|
||||
fc = utils.get_fake_context(bpy.context, area_type='VIEW_3D')
|
||||
bpy.ops.wm.append(fc, filename=collection_name, directory=path)
|
||||
|
||||
return_obs = []
|
||||
to_hidden_collection = []
|
||||
collection = None
|
||||
for ob in bpy.context.scene.objects:
|
||||
if ob.select_get():
|
||||
return_obs.append(ob)
|
||||
if not ob.parent:
|
||||
main_object = ob
|
||||
ob.location = location
|
||||
# check for object that should be hidden
|
||||
if ob.users_collection[0].name == collection_name:
|
||||
collection = ob.users_collection[0]
|
||||
collection['is_blenderkit_asset'] = True
|
||||
|
||||
else:
|
||||
to_hidden_collection.append(ob)
|
||||
|
||||
if kwargs.get('rotation'):
|
||||
main_object.rotation_euler = kwargs['rotation']
|
||||
|
||||
if kwargs.get('parent') is not None:
|
||||
main_object.parent = bpy.data.objects[kwargs['parent']]
|
||||
main_object.matrix_world.translation = location
|
||||
|
||||
|
||||
#move objects that should be hidden to a sub collection
|
||||
if len(to_hidden_collection)>0 and collection is not None:
|
||||
hidden_collection_name = collection_name+'_hidden'
|
||||
h_col = bpy.data.collections.new(name = hidden_collection_name)
|
||||
collection.children.link(h_col)
|
||||
for ob in to_hidden_collection:
|
||||
ob.users_collection[0].objects.unlink(ob)
|
||||
h_col.objects.link(ob)
|
||||
utils.exclude_collection(hidden_collection_name)
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
utils.selection_set(sel)
|
||||
#let collection also store info that it was created by BlenderKit, for purging reasons
|
||||
|
||||
return main_object, return_obs
|
||||
|
||||
#this is used for uploads:
|
||||
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
|
||||
sobs = []
|
||||
# for col in data_from.collections:
|
||||
# if col == kwargs.get('name'):
|
||||
for ob in data_from.objects:
|
||||
if ob in obnames or obnames == []:
|
||||
sobs.append(ob)
|
||||
data_to.objects = sobs
|
||||
# data_to.objects = data_from.objects#[name for name in data_from.objects if name.startswith("house")]
|
||||
|
||||
# link them to scene
|
||||
scene = bpy.context.scene
|
||||
sel = utils.selection_get()
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
return_obs = [] # this might not be needed, but better be sure to rewrite the list.
|
||||
main_object = None
|
||||
hidden_objects = []
|
||||
#
|
||||
for obj in data_to.objects:
|
||||
if obj is not None:
|
||||
# if obj.name not in scene.objects:
|
||||
scene.collection.objects.link(obj)
|
||||
if obj.parent is None:
|
||||
obj.location = location
|
||||
main_object = obj
|
||||
obj.select_set(True)
|
||||
# we need to unhide object so make_local op can use those too.
|
||||
if link == True:
|
||||
if obj.hide_viewport:
|
||||
hidden_objects.append(obj)
|
||||
obj.hide_viewport = False
|
||||
return_obs.append(obj)
|
||||
|
||||
# Only after all objects are in scene! Otherwise gets broken relationships
|
||||
if link == True:
|
||||
bpy.ops.object.make_local(type='SELECT_OBJECT')
|
||||
for ob in hidden_objects:
|
||||
ob.hide_viewport = True
|
||||
|
||||
if kwargs.get('rotation') is not None:
|
||||
main_object.rotation_euler = kwargs['rotation']
|
||||
|
||||
if kwargs.get('parent') is not None:
|
||||
main_object.parent = bpy.data.objects[kwargs['parent']]
|
||||
main_object.matrix_world.translation = location
|
||||
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
|
||||
utils.selection_set(sel)
|
||||
|
||||
|
||||
return main_object, return_obs
|
|
@ -1,393 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import utils
|
||||
|
||||
import bpy
|
||||
from object_print3d_utils import operators as ops
|
||||
|
||||
RENDER_OBTYPES = ['MESH', 'CURVE', 'SURFACE', 'METABALL', 'TEXT']
|
||||
|
||||
|
||||
def check_material(props, mat):
|
||||
e = bpy.context.scene.render.engine
|
||||
shaders = []
|
||||
textures = []
|
||||
props.texture_count = 0
|
||||
props.node_count = 0
|
||||
props.total_megapixels = 0
|
||||
props.is_procedural = True
|
||||
|
||||
if e == 'CYCLES':
|
||||
|
||||
if mat.node_tree is not None:
|
||||
checknodes = mat.node_tree.nodes[:]
|
||||
while len(checknodes) > 0:
|
||||
n = checknodes.pop()
|
||||
props.node_count += 1
|
||||
if n.type == 'GROUP': # dive deeper here.
|
||||
checknodes.extend(n.node_tree.nodes)
|
||||
if len(n.outputs) == 1 and n.outputs[0].type == 'SHADER' and n.type != 'GROUP':
|
||||
if n.type not in shaders:
|
||||
shaders.append(n.type)
|
||||
if n.type == 'TEX_IMAGE':
|
||||
|
||||
if n.image is not None:
|
||||
mattype = 'image based'
|
||||
props.is_procedural = False
|
||||
if n.image not in textures:
|
||||
textures.append(n.image)
|
||||
props.texture_count += 1
|
||||
props.total_megapixels += (n.image.size[0] * n.image.size[1])
|
||||
|
||||
maxres = max(n.image.size[0], n.image.size[1])
|
||||
props.texture_resolution_max = max(props.texture_resolution_max, maxres)
|
||||
minres = min(n.image.size[0], n.image.size[1])
|
||||
if props.texture_resolution_min == 0:
|
||||
props.texture_resolution_min = minres
|
||||
else:
|
||||
props.texture_resolution_min = min(props.texture_resolution_min, minres)
|
||||
|
||||
props.shaders = ''
|
||||
for s in shaders:
|
||||
if s.startswith('BSDF_'):
|
||||
s = s[5:]
|
||||
s = s.lower().replace('_', ' ')
|
||||
props.shaders += (s + ', ')
|
||||
|
||||
|
||||
def check_render_engine(props, obs):
|
||||
ob = obs[0]
|
||||
m = None
|
||||
|
||||
e = bpy.context.scene.render.engine
|
||||
mattype = None
|
||||
materials = []
|
||||
shaders = []
|
||||
textures = []
|
||||
props.uv = False
|
||||
props.texture_count = 0
|
||||
props.total_megapixels = 0
|
||||
props.node_count = 0
|
||||
for ob in obs: # TODO , this is duplicated here for other engines, otherwise this should be more clever.
|
||||
for ms in ob.material_slots:
|
||||
if ms.material is not None:
|
||||
m = ms.material
|
||||
if m.name not in materials:
|
||||
materials.append(m.name)
|
||||
if ob.type == 'MESH' and len(ob.data.uv_layers) > 0:
|
||||
props.uv = True
|
||||
|
||||
if e == 'BLENDER_RENDER':
|
||||
props.engine = 'BLENDER_INTERNAL'
|
||||
elif e == 'CYCLES':
|
||||
|
||||
props.engine = 'CYCLES'
|
||||
|
||||
for mname in materials:
|
||||
m = bpy.data.materials[mname]
|
||||
if m is not None and m.node_tree is not None:
|
||||
checknodes = m.node_tree.nodes[:]
|
||||
while len(checknodes) > 0:
|
||||
n = checknodes.pop()
|
||||
props.node_count +=1
|
||||
if n.type == 'GROUP': # dive deeper here.
|
||||
checknodes.extend(n.node_tree.nodes)
|
||||
if len(n.outputs) == 1 and n.outputs[0].type == 'SHADER' and n.type != 'GROUP':
|
||||
if n.type not in shaders:
|
||||
shaders.append(n.type)
|
||||
if n.type == 'TEX_IMAGE':
|
||||
|
||||
|
||||
if n.image is not None and n.image not in textures:
|
||||
props.is_procedural = False
|
||||
mattype = 'image based'
|
||||
|
||||
textures.append(n.image)
|
||||
props.texture_count += 1
|
||||
props.total_megapixels += (n.image.size[0] * n.image.size[1])
|
||||
|
||||
maxres = max(n.image.size[0], n.image.size[1])
|
||||
props.texture_resolution_max = max(props.texture_resolution_max, maxres)
|
||||
minres = min(n.image.size[0], n.image.size[1])
|
||||
if props.texture_resolution_min == 0:
|
||||
props.texture_resolution_min = minres
|
||||
else:
|
||||
props.texture_resolution_min = min(props.texture_resolution_min, minres)
|
||||
|
||||
|
||||
# if mattype == None:
|
||||
# mattype = 'procedural'
|
||||
# tags['material type'] = mattype
|
||||
|
||||
elif e == 'BLENDER_GAME':
|
||||
props.engine = 'BLENDER_GAME'
|
||||
|
||||
# write to object properties.
|
||||
props.materials = ''
|
||||
props.shaders = ''
|
||||
for m in materials:
|
||||
props.materials += (m + ', ')
|
||||
for s in shaders:
|
||||
if s.startswith('BSDF_'):
|
||||
s = s[5:]
|
||||
s = s.lower()
|
||||
s = s.replace('_', ' ')
|
||||
props.shaders += (s + ', ')
|
||||
|
||||
|
||||
def check_printable(props, obs):
|
||||
if len(obs) == 1:
|
||||
check_cls = (
|
||||
ops.Print3DCheckSolid,
|
||||
ops.Print3DCheckIntersections,
|
||||
ops.Print3DCheckDegenerate,
|
||||
ops.Print3DCheckDistorted,
|
||||
ops.Print3DCheckThick,
|
||||
ops.Print3DCheckSharp,
|
||||
# ops.Print3DCheckOverhang,
|
||||
)
|
||||
|
||||
ob = obs[0]
|
||||
|
||||
info = []
|
||||
for cls in check_cls:
|
||||
cls.main_check(ob, info)
|
||||
|
||||
printable = True
|
||||
for item in info:
|
||||
passed = item[0].endswith(' 0')
|
||||
if not passed:
|
||||
# print(item[0])
|
||||
printable = False
|
||||
|
||||
props.printable_3d = printable
|
||||
|
||||
|
||||
def check_rig(props, obs):
|
||||
for ob in obs:
|
||||
if ob.type == 'ARMATURE':
|
||||
props.rig = True
|
||||
|
||||
|
||||
def check_anim(props, obs):
|
||||
animated = False
|
||||
for ob in obs:
|
||||
if ob.animation_data is not None:
|
||||
a = ob.animation_data.action
|
||||
if a is not None:
|
||||
for c in a.fcurves:
|
||||
if len(c.keyframe_points) > 1:
|
||||
animated = True
|
||||
|
||||
# c.keyframe_points.remove(c.keyframe_points[0])
|
||||
if animated:
|
||||
props.animated = True
|
||||
|
||||
|
||||
def check_meshprops(props, obs):
|
||||
''' checks polycount, manifold, mesh parts (not implemented)'''
|
||||
fc = 0
|
||||
fcr = 0
|
||||
tris = 0
|
||||
quads = 0
|
||||
ngons = 0
|
||||
vc = 0
|
||||
|
||||
edges_counts = {}
|
||||
manifold = True
|
||||
|
||||
for ob in obs:
|
||||
if ob.type == 'MESH' or ob.type == 'CURVE':
|
||||
ob_eval = None
|
||||
if ob.type == 'CURVE':
|
||||
# depsgraph = bpy.context.evaluated_depsgraph_get()
|
||||
# object_eval = ob.evaluated_get(depsgraph)
|
||||
mesh = ob.to_mesh()
|
||||
else:
|
||||
mesh = ob.data
|
||||
fco = len(mesh.polygons)
|
||||
fc += fco
|
||||
vc += len(mesh.vertices)
|
||||
fcor = fco
|
||||
for f in mesh.polygons:
|
||||
# face sides counter
|
||||
if len(f.vertices) == 3:
|
||||
tris += 1
|
||||
elif len(f.vertices) == 4:
|
||||
quads += 1
|
||||
elif len(f.vertices) > 4:
|
||||
ngons += 1
|
||||
|
||||
# manifold counter
|
||||
for i, v in enumerate(f.vertices):
|
||||
v1 = f.vertices[i - 1]
|
||||
e = (min(v, v1), max(v, v1))
|
||||
edges_counts[e] = edges_counts.get(e, 0) + 1
|
||||
|
||||
# all meshes have to be manifold for this to work.
|
||||
manifold = manifold and not any(i in edges_counts.values() for i in [0, 1, 3, 4])
|
||||
|
||||
for m in ob.modifiers:
|
||||
if m.type == 'SUBSURF' or m.type == 'MULTIRES':
|
||||
fcor *= 4 ** m.render_levels
|
||||
if m.type == 'SOLIDIFY': # this is rough estimate, not to waste time with evaluating all nonmanifold edges
|
||||
fcor *= 2
|
||||
if m.type == 'ARRAY':
|
||||
fcor *= m.count
|
||||
if m.type == 'MIRROR':
|
||||
fcor *= 2
|
||||
if m.type == 'DECIMATE':
|
||||
fcor *= m.ratio
|
||||
fcr += fcor
|
||||
|
||||
if ob_eval:
|
||||
ob_eval.to_mesh_clear()
|
||||
|
||||
# write out props
|
||||
props.face_count = fc
|
||||
props.face_count_render = fcr
|
||||
# print(tris, quads, ngons)
|
||||
if quads > 0 and tris == 0 and ngons == 0:
|
||||
props.mesh_poly_type = 'QUAD'
|
||||
elif quads > tris and quads > ngons:
|
||||
props.mesh_poly_type = 'QUAD_DOMINANT'
|
||||
elif tris > quads and tris > quads:
|
||||
props.mesh_poly_type = 'TRI_DOMINANT'
|
||||
elif quads == 0 and tris > 0 and ngons == 0:
|
||||
props.mesh_poly_type = 'TRI'
|
||||
elif ngons > quads and ngons > tris:
|
||||
props.mesh_poly_type = 'NGON'
|
||||
else:
|
||||
props.mesh_poly_type = 'OTHER'
|
||||
|
||||
props.manifold = manifold
|
||||
|
||||
|
||||
def countObs(props, obs):
|
||||
ob_types = {}
|
||||
count = len(obs)
|
||||
for ob in obs:
|
||||
otype = ob.type.lower()
|
||||
ob_types[otype] = ob_types.get(otype, 0) + 1
|
||||
props.object_count = count
|
||||
|
||||
|
||||
def check_modifiers(props, obs):
|
||||
# modif_mapping = {
|
||||
# }
|
||||
modifiers = []
|
||||
for ob in obs:
|
||||
for m in ob.modifiers:
|
||||
mtype = m.type
|
||||
mtype = mtype.replace('_', ' ')
|
||||
mtype = mtype.lower()
|
||||
# mtype = mtype.capitalize()
|
||||
if mtype not in modifiers:
|
||||
modifiers.append(mtype)
|
||||
if m.type == 'SMOKE':
|
||||
if m.smoke_type == 'FLOW':
|
||||
smt = m.flow_settings.smoke_flow_type
|
||||
if smt == 'BOTH' or smt == 'FIRE':
|
||||
modifiers.append('fire')
|
||||
|
||||
# for mt in modifiers:
|
||||
effectmodifiers = ['soft body', 'fluid simulation', 'particle system', 'collision', 'smoke', 'cloth',
|
||||
'dynamic paint']
|
||||
for m in modifiers:
|
||||
if m in effectmodifiers:
|
||||
props.simulation = True
|
||||
if ob.rigid_body is not None:
|
||||
props.simulation = True
|
||||
modifiers.append('rigid body')
|
||||
finalstr = ''
|
||||
for m in modifiers:
|
||||
finalstr += m
|
||||
finalstr += ','
|
||||
props.modifiers = finalstr
|
||||
|
||||
|
||||
def get_autotags():
|
||||
""" call all analysis functions """
|
||||
ui = bpy.context.window_manager.blenderkitUI
|
||||
if ui.asset_type == 'MODEL':
|
||||
ob = utils.get_active_model()
|
||||
obs = utils.get_hierarchy(ob)
|
||||
props = ob.blenderkit
|
||||
if props.name == "":
|
||||
props.name = ob.name
|
||||
|
||||
# reset some properties here, because they might not get re-filled at all when they aren't needed anymore.
|
||||
props.texture_resolution_max = 0
|
||||
props.texture_resolution_min = 0
|
||||
# disabled printing checking, some 3d print addon bug.
|
||||
# check_printable( props, obs)
|
||||
check_render_engine(props, obs)
|
||||
|
||||
dim, bbox_min, bbox_max = utils.get_dimensions(obs)
|
||||
props.dimensions = dim
|
||||
props.bbox_min = bbox_min
|
||||
props.bbox_max = bbox_max
|
||||
|
||||
check_rig(props, obs)
|
||||
check_anim(props, obs)
|
||||
check_meshprops(props, obs)
|
||||
check_modifiers(props, obs)
|
||||
countObs(props, obs)
|
||||
elif ui.asset_type == 'MATERIAL':
|
||||
# reset some properties here, because they might not get re-filled at all when they aren't needed anymore.
|
||||
|
||||
mat = utils.get_active_asset()
|
||||
props = mat.blenderkit
|
||||
props.texture_resolution_max = 0
|
||||
props.texture_resolution_min = 0
|
||||
check_material(props, mat)
|
||||
elif ui.asset_type == 'HDR':
|
||||
# reset some properties here, because they might not get re-filled at all when they aren't needed anymore.
|
||||
|
||||
hdr = utils.get_active_asset()
|
||||
props = hdr.blenderkit
|
||||
props.texture_resolution_max = max(hdr.size[0],hdr.size[1])
|
||||
|
||||
|
||||
class AutoFillTags(bpy.types.Operator):
|
||||
"""Fill tags for asset. Now run before upload, no need to interact from user side"""
|
||||
bl_idname = "object.blenderkit_auto_tags"
|
||||
bl_label = "Generate Auto Tags for BlenderKit"
|
||||
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return utils.uploadable_asset_poll()
|
||||
|
||||
def execute(self, context):
|
||||
get_autotags()
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def register_asset_inspector():
|
||||
bpy.utils.register_class(AutoFillTags)
|
||||
|
||||
|
||||
def unregister_asset_inspector():
|
||||
bpy.utils.unregister_class(AutoFillTags)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
|
@ -1,8 +0,0 @@
|
|||
import sys
|
||||
import json
|
||||
from blenderkit import resolutions
|
||||
|
||||
BLENDERKIT_EXPORT_DATA = sys.argv[-1]
|
||||
|
||||
if __name__ == "__main__":
|
||||
resolutions.run_bg(sys.argv[-1])
|
|
@ -1,671 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
from blenderkit import paths, utils, bg_blender, ui_panels, icons, tasks_queue, download
|
||||
|
||||
import tempfile, os, subprocess, json, sys
|
||||
|
||||
import bpy
|
||||
from bpy.props import (
|
||||
FloatProperty,
|
||||
IntProperty,
|
||||
EnumProperty,
|
||||
BoolProperty,
|
||||
StringProperty,
|
||||
)
|
||||
|
||||
BLENDERKIT_EXPORT_DATA_FILE = "data.json"
|
||||
|
||||
thumbnail_resolutions = (
|
||||
('256', '256', ''),
|
||||
('512', '512', ''),
|
||||
('1024', '1024 - minimum for public', ''),
|
||||
('2048', '2048', ''),
|
||||
)
|
||||
|
||||
thumbnail_angles = (
|
||||
('DEFAULT', 'default', ''),
|
||||
('FRONT', 'front', ''),
|
||||
('SIDE', 'side', ''),
|
||||
('TOP', 'top', ''),
|
||||
)
|
||||
|
||||
thumbnail_snap = (
|
||||
('GROUND', 'ground', ''),
|
||||
('WALL', 'wall', ''),
|
||||
('CEILING', 'ceiling', ''),
|
||||
('FLOAT', 'floating', ''),
|
||||
)
|
||||
|
||||
|
||||
def get_texture_ui(tpath, iname):
|
||||
tex = bpy.data.textures.get(iname)
|
||||
|
||||
if tpath.startswith('//'):
|
||||
tpath = bpy.path.abspath(tpath)
|
||||
|
||||
if not tex or not tex.image or not tex.image.filepath == tpath:
|
||||
tasks_queue.add_task((utils.get_hidden_image, (tpath, iname)), only_last=True)
|
||||
tasks_queue.add_task((utils.get_hidden_texture, (iname,)), only_last=True)
|
||||
return None
|
||||
return tex
|
||||
|
||||
|
||||
def check_thumbnail(props, imgpath):
|
||||
img = utils.get_hidden_image(imgpath, 'upload_preview', force_reload=True)
|
||||
# print(' check thumbnail ', img)
|
||||
if img is not None: # and img.size[0] == img.size[1] and img.size[0] >= 512 and (
|
||||
# img.file_format == 'JPEG' or img.file_format == 'PNG'):
|
||||
props.has_thumbnail = True
|
||||
props.thumbnail_generating_state = ''
|
||||
|
||||
tex = utils.get_hidden_texture(img.name)
|
||||
# pcoll = icons.icon_collections["previews"]
|
||||
# pcoll.load(img.name, img.filepath, 'IMAGE')
|
||||
|
||||
return img
|
||||
else:
|
||||
props.has_thumbnail = False
|
||||
output = ''
|
||||
if img is None or img.size[0] == 0 or img.filepath.find('thumbnail_notready.jpg') > -1:
|
||||
output += 'No thumbnail or wrong file path\n'
|
||||
else:
|
||||
pass;
|
||||
# this is causing problems on some platforms, don't know why..
|
||||
# if img.size[0] != img.size[1]:
|
||||
# output += 'image not a square\n'
|
||||
# if img.size[0] < 512:
|
||||
# output += 'image too small, should be at least 512x512\n'
|
||||
# if img.file_format != 'JPEG' or img.file_format != 'PNG':
|
||||
# output += 'image has to be a jpeg or png'
|
||||
props.thumbnail_generating_state = output
|
||||
|
||||
|
||||
def update_upload_model_preview(self, context):
|
||||
ob = utils.get_active_model()
|
||||
if ob is not None:
|
||||
props = ob.blenderkit
|
||||
imgpath = props.thumbnail
|
||||
img = check_thumbnail(props, imgpath)
|
||||
|
||||
|
||||
def update_upload_scene_preview(self, context):
|
||||
s = bpy.context.scene
|
||||
props = s.blenderkit
|
||||
imgpath = props.thumbnail
|
||||
check_thumbnail(props, imgpath)
|
||||
|
||||
|
||||
def update_upload_material_preview(self, context):
|
||||
if hasattr(bpy.context, 'active_object') \
|
||||
and bpy.context.view_layer.objects.active is not None \
|
||||
and bpy.context.active_object.active_material is not None:
|
||||
mat = bpy.context.active_object.active_material
|
||||
props = mat.blenderkit
|
||||
imgpath = props.thumbnail
|
||||
check_thumbnail(props, imgpath)
|
||||
|
||||
|
||||
def update_upload_brush_preview(self, context):
|
||||
brush = utils.get_active_brush()
|
||||
if brush is not None:
|
||||
props = brush.blenderkit
|
||||
imgpath = bpy.path.abspath(brush.icon_filepath)
|
||||
check_thumbnail(props, imgpath)
|
||||
|
||||
|
||||
def start_thumbnailer(self=None, json_args=None, props=None, wait=False, add_bg_process=True):
|
||||
# Prepare to save the file
|
||||
|
||||
binary_path = bpy.app.binary_path
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
ext = '.blend'
|
||||
|
||||
tfpath = paths.get_thumbnailer_filepath()
|
||||
datafile = os.path.join(json_args['tempdir'], BLENDERKIT_EXPORT_DATA_FILE)
|
||||
try:
|
||||
with open(datafile, 'w', encoding='utf-8') as s:
|
||||
json.dump(json_args, s, ensure_ascii=False, indent=4)
|
||||
|
||||
proc = subprocess.Popen([
|
||||
binary_path,
|
||||
"--background",
|
||||
"-noaudio",
|
||||
tfpath,
|
||||
"--python", os.path.join(script_path, "autothumb_model_bg.py"),
|
||||
"--", datafile,
|
||||
], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
|
||||
|
||||
eval_path_computing = "bpy.data.objects['%s'].blenderkit.is_generating_thumbnail" % json_args['asset_name']
|
||||
eval_path_state = "bpy.data.objects['%s'].blenderkit.thumbnail_generating_state" % json_args['asset_name']
|
||||
eval_path = "bpy.data.objects['%s']" % json_args['asset_name']
|
||||
|
||||
bg_blender.add_bg_process(name = f"{json_args['asset_name']} thumbnailer" ,eval_path_computing=eval_path_computing, eval_path_state=eval_path_state,
|
||||
eval_path=eval_path, process_type='THUMBNAILER', process=proc)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.report({'WARNING'}, "Error while exporting file: %s" % str(e))
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def start_material_thumbnailer(self=None, json_args=None, props=None, wait=False, add_bg_process=True):
|
||||
'''
|
||||
|
||||
Parameters
|
||||
----------
|
||||
self
|
||||
json_args - all arguments:
|
||||
props - blenderkit upload props with thumbnail settings, to communicate back, if not present, not used.
|
||||
wait - wait for the rendering to finish
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
'''
|
||||
if props:
|
||||
props.is_generating_thumbnail = True
|
||||
props.thumbnail_generating_state = 'starting blender instance'
|
||||
|
||||
binary_path = bpy.app.binary_path
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
tfpath = paths.get_material_thumbnailer_filepath()
|
||||
datafile = os.path.join(json_args['tempdir'], BLENDERKIT_EXPORT_DATA_FILE)
|
||||
|
||||
try:
|
||||
with open(datafile, 'w', encoding='utf-8') as s:
|
||||
json.dump(json_args, s, ensure_ascii=False, indent=4)
|
||||
|
||||
proc = subprocess.Popen([
|
||||
binary_path,
|
||||
"--background",
|
||||
"-noaudio",
|
||||
tfpath,
|
||||
"--python", os.path.join(script_path, "autothumb_material_bg.py"),
|
||||
"--", datafile,
|
||||
], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
|
||||
|
||||
eval_path_computing = "bpy.data.materials['%s'].blenderkit.is_generating_thumbnail" % json_args['asset_name']
|
||||
eval_path_state = "bpy.data.materials['%s'].blenderkit.thumbnail_generating_state" % json_args['asset_name']
|
||||
eval_path = "bpy.data.materials['%s']" % json_args['asset_name']
|
||||
|
||||
bg_blender.add_bg_process(name=f"{json_args['asset_name']} thumbnailer", eval_path_computing=eval_path_computing,
|
||||
eval_path_state=eval_path_state,
|
||||
eval_path=eval_path, process_type='THUMBNAILER', process=proc)
|
||||
if props:
|
||||
props.thumbnail_generating_state = 'Saving .blend file'
|
||||
|
||||
if wait:
|
||||
while proc.poll() is None:
|
||||
stdout_data, stderr_data = proc.communicate()
|
||||
print(stdout_data)
|
||||
except Exception as e:
|
||||
if self:
|
||||
self.report({'WARNING'}, "Error while packing file: %s" % str(e))
|
||||
else:
|
||||
print(e)
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class GenerateThumbnailOperator(bpy.types.Operator):
|
||||
"""Generate Cycles thumbnail for model assets"""
|
||||
bl_idname = "object.blenderkit_generate_thumbnail"
|
||||
bl_label = "BlenderKit Thumbnail Generator"
|
||||
bl_options = {'REGISTER', 'INTERNAL'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def draw(self, context):
|
||||
ob = bpy.context.active_object
|
||||
while ob.parent is not None:
|
||||
ob = ob.parent
|
||||
props = ob.blenderkit
|
||||
layout = self.layout
|
||||
layout.label(text='thumbnailer settings')
|
||||
layout.prop(props, 'thumbnail_background_lightness')
|
||||
layout.prop(props, 'thumbnail_angle')
|
||||
layout.prop(props, 'thumbnail_snap_to')
|
||||
layout.prop(props, 'thumbnail_samples')
|
||||
layout.prop(props, 'thumbnail_resolution')
|
||||
layout.prop(props, 'thumbnail_denoising')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
layout.prop(preferences, "thumbnail_use_gpu")
|
||||
|
||||
def execute(self, context):
|
||||
asset = utils.get_active_model()
|
||||
asset.blenderkit.is_generating_thumbnail = True
|
||||
asset.blenderkit.thumbnail_generating_state = 'starting blender instance'
|
||||
|
||||
tempdir = tempfile.mkdtemp()
|
||||
ext = '.blend'
|
||||
filepath = os.path.join(tempdir, "thumbnailer_blenderkit" + ext)
|
||||
|
||||
path_can_be_relative = True
|
||||
file_dir = os.path.dirname(bpy.data.filepath)
|
||||
if file_dir == '':
|
||||
file_dir = tempdir
|
||||
path_can_be_relative = False
|
||||
|
||||
an_slug = paths.slugify(asset.name)
|
||||
thumb_path = os.path.join(file_dir, an_slug)
|
||||
if path_can_be_relative:
|
||||
rel_thumb_path = os.path.join('//', an_slug)
|
||||
else:
|
||||
rel_thumb_path = thumb_path
|
||||
|
||||
|
||||
i = 0
|
||||
while os.path.isfile(thumb_path + '.jpg'):
|
||||
thumb_path = os.path.join(file_dir, an_slug + '_' + str(i).zfill(4))
|
||||
rel_thumb_path = os.path.join('//', an_slug + '_' + str(i).zfill(4))
|
||||
i += 1
|
||||
bkit = asset.blenderkit
|
||||
|
||||
bkit.thumbnail = rel_thumb_path + '.jpg'
|
||||
bkit.thumbnail_generating_state = 'Saving .blend file'
|
||||
|
||||
# if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
# save a copy of actual scene but don't interfere with the users models
|
||||
|
||||
bpy.ops.wm.save_as_mainfile(filepath=filepath, compress=False, copy=True)
|
||||
# get all included objects
|
||||
obs = utils.get_hierarchy(asset)
|
||||
obnames = []
|
||||
for ob in obs:
|
||||
obnames.append(ob.name)
|
||||
|
||||
args_dict = {
|
||||
"type": "material",
|
||||
"asset_name": asset.name,
|
||||
"filepath": filepath,
|
||||
"thumbnail_path": thumb_path,
|
||||
"tempdir": tempdir,
|
||||
}
|
||||
thumbnail_args = {
|
||||
"type": "model",
|
||||
"models": str(obnames),
|
||||
"thumbnail_angle": bkit.thumbnail_angle,
|
||||
"thumbnail_snap_to": bkit.thumbnail_snap_to,
|
||||
"thumbnail_background_lightness": bkit.thumbnail_background_lightness,
|
||||
"thumbnail_resolution": bkit.thumbnail_resolution,
|
||||
"thumbnail_samples": bkit.thumbnail_samples,
|
||||
"thumbnail_denoising": bkit.thumbnail_denoising,
|
||||
}
|
||||
args_dict.update(thumbnail_args)
|
||||
|
||||
start_thumbnailer(self,
|
||||
json_args=args_dict,
|
||||
props=asset.blenderkit, wait=False)
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
wm = context.window_manager
|
||||
# if bpy.data.filepath == '':
|
||||
# ui_panels.ui_message(
|
||||
# title="Can't render thumbnail",
|
||||
# message="please save your file first")
|
||||
#
|
||||
# return {'FINISHED'}
|
||||
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
class ReGenerateThumbnailOperator(bpy.types.Operator):
|
||||
"""
|
||||
Generate default thumbnail with Cycles renderer and upload it.
|
||||
Works also for assets from search results, without being downloaded before
|
||||
"""
|
||||
bl_idname = "object.blenderkit_regenerate_thumbnail"
|
||||
bl_label = "BlenderKit Thumbnail Re-generate"
|
||||
bl_options = {'REGISTER', 'INTERNAL'}
|
||||
|
||||
asset_index: IntProperty(name="Asset Index", description='asset index in search results', default=-1)
|
||||
|
||||
thumbnail_background_lightness: FloatProperty(name="Thumbnail Background Lightness",
|
||||
description="set to make your material stand out", default=1.0,
|
||||
min=0.01, max=10)
|
||||
|
||||
thumbnail_angle: EnumProperty(
|
||||
name='Thumbnail Angle',
|
||||
items=thumbnail_angles,
|
||||
default='DEFAULT',
|
||||
description='thumbnailer angle',
|
||||
)
|
||||
|
||||
thumbnail_snap_to: EnumProperty(
|
||||
name='Model Snaps To:',
|
||||
items=thumbnail_snap,
|
||||
default='GROUND',
|
||||
description='typical placing of the interior. Leave on ground for most objects that respect gravity :)',
|
||||
)
|
||||
|
||||
thumbnail_resolution: EnumProperty(
|
||||
name="Resolution",
|
||||
items=thumbnail_resolutions,
|
||||
description="Thumbnail resolution",
|
||||
default="1024",
|
||||
)
|
||||
|
||||
thumbnail_samples: IntProperty(name="Cycles Samples",
|
||||
description="cycles samples setting", default=100,
|
||||
min=5, max=5000)
|
||||
thumbnail_denoising: BoolProperty(name="Use Denoising",
|
||||
description="Use denoising", default=True)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return True # bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def draw(self, context):
|
||||
props = self
|
||||
layout = self.layout
|
||||
# layout.label('This will re-generate thumbnail and directly upload it to server. You should see your updated thumbnail online depending ')
|
||||
layout.label(text='thumbnailer settings')
|
||||
layout.prop(props, 'thumbnail_background_lightness')
|
||||
layout.prop(props, 'thumbnail_angle')
|
||||
layout.prop(props, 'thumbnail_snap_to')
|
||||
layout.prop(props, 'thumbnail_samples')
|
||||
layout.prop(props, 'thumbnail_resolution')
|
||||
layout.prop(props, 'thumbnail_denoising')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
layout.prop(preferences, "thumbnail_use_gpu")
|
||||
|
||||
def execute(self, context):
|
||||
if not self.asset_index > -1:
|
||||
return {'CANCELLED'}
|
||||
|
||||
# either get the data from search results
|
||||
sr = bpy.context.window_manager['search results']
|
||||
asset_data = sr[self.asset_index].to_dict()
|
||||
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
an_slug = paths.slugify(asset_data['name'])
|
||||
thumb_path = os.path.join(tempdir, an_slug)
|
||||
|
||||
|
||||
args_dict = {
|
||||
"type": "material",
|
||||
"asset_name": asset_data['name'],
|
||||
"asset_data": asset_data,
|
||||
# "filepath": filepath,
|
||||
"thumbnail_path": thumb_path,
|
||||
"tempdir": tempdir,
|
||||
"do_download": True,
|
||||
"upload_after_render": True,
|
||||
}
|
||||
thumbnail_args = {
|
||||
"type": "model",
|
||||
"thumbnail_angle": self.thumbnail_angle,
|
||||
"thumbnail_snap_to": self.thumbnail_snap_to,
|
||||
"thumbnail_background_lightness": self.thumbnail_background_lightness,
|
||||
"thumbnail_resolution": self.thumbnail_resolution,
|
||||
"thumbnail_samples": self.thumbnail_samples,
|
||||
"thumbnail_denoising": self.thumbnail_denoising,
|
||||
}
|
||||
args_dict.update(thumbnail_args)
|
||||
|
||||
start_thumbnailer(self,
|
||||
json_args=args_dict,
|
||||
wait=False)
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
wm = context.window_manager
|
||||
# if bpy.data.filepath == '':
|
||||
# ui_panels.ui_message(
|
||||
# title="Can't render thumbnail",
|
||||
# message="please save your file first")
|
||||
#
|
||||
# return {'FINISHED'}
|
||||
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
class GenerateMaterialThumbnailOperator(bpy.types.Operator):
|
||||
"""Generate default thumbnail with Cycles renderer"""
|
||||
bl_idname = "object.blenderkit_generate_material_thumbnail"
|
||||
bl_label = "BlenderKit Material Thumbnail Generator"
|
||||
bl_options = {'REGISTER', 'INTERNAL'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def check(self, context):
|
||||
return True
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
props = bpy.context.active_object.active_material.blenderkit
|
||||
layout.prop(props, 'thumbnail_generator_type')
|
||||
layout.prop(props, 'thumbnail_scale')
|
||||
layout.prop(props, 'thumbnail_background')
|
||||
if props.thumbnail_background:
|
||||
layout.prop(props, 'thumbnail_background_lightness')
|
||||
layout.prop(props, 'thumbnail_resolution')
|
||||
layout.prop(props, 'thumbnail_samples')
|
||||
layout.prop(props, 'thumbnail_denoising')
|
||||
layout.prop(props, 'adaptive_subdivision')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
layout.prop(preferences, "thumbnail_use_gpu")
|
||||
|
||||
def execute(self, context):
|
||||
asset = bpy.context.active_object.active_material
|
||||
tempdir = tempfile.mkdtemp()
|
||||
filepath = os.path.join(tempdir, "material_thumbnailer_cycles.blend")
|
||||
# if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
# save a copy of actual scene but don't interfere with the users models
|
||||
bpy.ops.wm.save_as_mainfile(filepath=filepath, compress=False, copy=True)
|
||||
|
||||
thumb_dir = os.path.dirname(bpy.data.filepath)
|
||||
an_slug = paths.slugify(asset.name)
|
||||
|
||||
thumb_path = os.path.join(thumb_dir, an_slug)
|
||||
rel_thumb_path = os.path.join('//', an_slug)
|
||||
|
||||
# auto increase number of the generated thumbnail.
|
||||
i = 0
|
||||
while os.path.isfile(thumb_path + '.png'):
|
||||
thumb_path = os.path.join(thumb_dir, an_slug + '_' + str(i).zfill(4))
|
||||
rel_thumb_path = os.path.join('//', an_slug + '_' + str(i).zfill(4))
|
||||
i += 1
|
||||
|
||||
asset.blenderkit.thumbnail = rel_thumb_path + '.png'
|
||||
bkit = asset.blenderkit
|
||||
|
||||
args_dict = {
|
||||
"type": "material",
|
||||
"asset_name": asset.name,
|
||||
"filepath": filepath,
|
||||
"thumbnail_path": thumb_path,
|
||||
"tempdir": tempdir,
|
||||
}
|
||||
|
||||
thumbnail_args = {
|
||||
"thumbnail_type": bkit.thumbnail_generator_type,
|
||||
"thumbnail_scale": bkit.thumbnail_scale,
|
||||
"thumbnail_background": bkit.thumbnail_background,
|
||||
"thumbnail_background_lightness": bkit.thumbnail_background_lightness,
|
||||
"thumbnail_resolution": bkit.thumbnail_resolution,
|
||||
"thumbnail_samples": bkit.thumbnail_samples,
|
||||
"thumbnail_denoising": bkit.thumbnail_denoising,
|
||||
"adaptive_subdivision": bkit.adaptive_subdivision,
|
||||
"texture_size_meters": bkit.texture_size_meters,
|
||||
}
|
||||
args_dict.update(thumbnail_args)
|
||||
start_material_thumbnailer(self,
|
||||
json_args=args_dict,
|
||||
props=asset.blenderkit, wait=False)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
wm = context.window_manager
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
class ReGenerateMaterialThumbnailOperator(bpy.types.Operator):
|
||||
"""
|
||||
Generate default thumbnail with Cycles renderer and upload it.
|
||||
Works also for assets from search results, without being downloaded before
|
||||
"""
|
||||
bl_idname = "object.blenderkit_regenerate_material_thumbnail"
|
||||
bl_label = "BlenderKit Material Thumbnail Re-Generator"
|
||||
bl_options = {'REGISTER', 'INTERNAL'}
|
||||
|
||||
asset_index: IntProperty(name="Asset Index", description='asset index in search results', default=-1)
|
||||
|
||||
thumbnail_scale: FloatProperty(name="Thumbnail Object Size",
|
||||
description="Size of material preview object in meters."
|
||||
"Change for materials that look better at sizes different than 1m",
|
||||
default=1, min=0.00001, max=10)
|
||||
thumbnail_background: BoolProperty(name="Thumbnail Background (for Glass only)",
|
||||
description="For refractive materials, you might need a background.\n"
|
||||
"Don't use for other types of materials.\n"
|
||||
"Transparent background is preferred",
|
||||
default=False)
|
||||
thumbnail_background_lightness: FloatProperty(name="Thumbnail Background Lightness",
|
||||
description="Set to make your material stand out with enough contrast",
|
||||
default=.9,
|
||||
min=0.00001, max=1)
|
||||
thumbnail_samples: IntProperty(name="Cycles Samples",
|
||||
description="Cycles samples", default=100,
|
||||
min=5, max=5000)
|
||||
thumbnail_denoising: BoolProperty(name="Use Denoising",
|
||||
description="Use denoising", default=True)
|
||||
adaptive_subdivision: BoolProperty(name="Adaptive Subdivide",
|
||||
description="Use adaptive displacement subdivision", default=False)
|
||||
|
||||
thumbnail_resolution: EnumProperty(
|
||||
name="Resolution",
|
||||
items=thumbnail_resolutions,
|
||||
description="Thumbnail resolution",
|
||||
default="1024",
|
||||
)
|
||||
|
||||
thumbnail_generator_type: EnumProperty(
|
||||
name="Thumbnail Style",
|
||||
items=(
|
||||
('BALL', 'Ball', ""),
|
||||
('BALL_COMPLEX', 'Ball complex', 'Complex ball to highlight edgewear or material thickness'),
|
||||
('FLUID', 'Fluid', 'Fluid'),
|
||||
('CLOTH', 'Cloth', 'Cloth'),
|
||||
('HAIR', 'Hair', 'Hair ')
|
||||
),
|
||||
description="Style of asset",
|
||||
default="BALL",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return True # bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def check(self, context):
|
||||
return True
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
props = self
|
||||
layout.prop(props, 'thumbnail_generator_type')
|
||||
layout.prop(props, 'thumbnail_scale')
|
||||
layout.prop(props, 'thumbnail_background')
|
||||
if props.thumbnail_background:
|
||||
layout.prop(props, 'thumbnail_background_lightness')
|
||||
layout.prop(props, 'thumbnail_resolution')
|
||||
layout.prop(props, 'thumbnail_samples')
|
||||
layout.prop(props, 'thumbnail_denoising')
|
||||
layout.prop(props, 'adaptive_subdivision')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
layout.prop(preferences, "thumbnail_use_gpu")
|
||||
|
||||
def execute(self, context):
|
||||
|
||||
if not self.asset_index > -1:
|
||||
return {'CANCELLED'}
|
||||
|
||||
# either get the data from search results
|
||||
sr = bpy.context.window_manager['search results']
|
||||
asset_data = sr[self.asset_index].to_dict()
|
||||
an_slug = paths.slugify(asset_data['name'])
|
||||
|
||||
tempdir = tempfile.mkdtemp()
|
||||
|
||||
thumb_path = os.path.join(tempdir,an_slug)
|
||||
|
||||
args_dict = {
|
||||
"type": "material",
|
||||
"asset_name": asset_data['name'],
|
||||
"asset_data": asset_data,
|
||||
"thumbnail_path": thumb_path,
|
||||
"tempdir": tempdir,
|
||||
"do_download": True,
|
||||
"upload_after_render": True,
|
||||
}
|
||||
thumbnail_args = {
|
||||
"thumbnail_type": self.thumbnail_generator_type,
|
||||
"thumbnail_scale": self.thumbnail_scale,
|
||||
"thumbnail_background": self.thumbnail_background,
|
||||
"thumbnail_background_lightness": self.thumbnail_background_lightness,
|
||||
"thumbnail_resolution": self.thumbnail_resolution,
|
||||
"thumbnail_samples": self.thumbnail_samples,
|
||||
"thumbnail_denoising": self.thumbnail_denoising,
|
||||
"adaptive_subdivision": self.adaptive_subdivision,
|
||||
"texture_size_meters": utils.get_param(asset_data, 'textureSizeMeters', 1.0),
|
||||
}
|
||||
args_dict.update(thumbnail_args)
|
||||
start_material_thumbnailer(self,
|
||||
json_args=args_dict,
|
||||
wait=False)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
# scene = bpy.context.scene
|
||||
# ui_props = bpy.context.window_manager.blenderkitUI
|
||||
# if ui_props.active_index > -1:
|
||||
# sr = bpy.context.window_manager['search results']
|
||||
# self.asset_data = dict(sr[ui_props.active_index])
|
||||
# else:
|
||||
#
|
||||
# active_asset = utils.get_active_asset_by_type(asset_type = self.asset_type)
|
||||
# self.asset_data = active_asset.get('asset_data')
|
||||
|
||||
wm = context.window_manager
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
def register_thumbnailer():
|
||||
bpy.utils.register_class(GenerateThumbnailOperator)
|
||||
bpy.utils.register_class(ReGenerateThumbnailOperator)
|
||||
bpy.utils.register_class(GenerateMaterialThumbnailOperator)
|
||||
bpy.utils.register_class(ReGenerateMaterialThumbnailOperator)
|
||||
|
||||
|
||||
def unregister_thumbnailer():
|
||||
bpy.utils.unregister_class(GenerateThumbnailOperator)
|
||||
bpy.utils.unregister_class(ReGenerateThumbnailOperator)
|
||||
bpy.utils.unregister_class(GenerateMaterialThumbnailOperator)
|
||||
bpy.utils.unregister_class(ReGenerateMaterialThumbnailOperator)
|
|
@ -1,171 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
|
||||
from blenderkit import utils, append_link, bg_blender, upload_bg, download
|
||||
|
||||
import sys, json, math, os
|
||||
import bpy
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
BLENDERKIT_EXPORT_DATA = sys.argv[-1]
|
||||
|
||||
|
||||
def render_thumbnails():
|
||||
bpy.ops.render.render(write_still=True, animation=False)
|
||||
|
||||
|
||||
def unhide_collection(cname):
|
||||
collection = bpy.context.scene.collection.children[cname]
|
||||
collection.hide_viewport = False
|
||||
collection.hide_render = False
|
||||
collection.hide_select = False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
bg_blender.progress('preparing thumbnail scene')
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
|
||||
with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
|
||||
data = json.load(s)
|
||||
# append_material(file_name, matname = None, link = False, fake_user = True)
|
||||
if data.get('do_download'):
|
||||
#need to save the file, so that asset doesn't get downloaded into addon directory
|
||||
temp_blend_path = os.path.join(data['tempdir'], 'temp.blend')
|
||||
|
||||
# if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
bpy.ops.wm.save_as_mainfile(filepath=temp_blend_path)
|
||||
|
||||
asset_data = data['asset_data']
|
||||
has_url = download.get_download_url(asset_data, download.get_scene_id(), user_preferences.api_key, tcom=None,
|
||||
resolution='blend')
|
||||
if not has_url:
|
||||
bg_blender.progress("couldn't download asset for thumnbail re-rendering")
|
||||
exit()
|
||||
# download first, or rather make sure if it's already downloaded
|
||||
bg_blender.progress('downloading asset')
|
||||
fpath = download.download_asset_file(asset_data)
|
||||
data['filepath'] = fpath
|
||||
|
||||
mat = append_link.append_material(file_name=data['filepath'], matname=data["asset_name"], link=True,
|
||||
fake_user=False)
|
||||
|
||||
|
||||
s = bpy.context.scene
|
||||
|
||||
colmapdict = {
|
||||
'BALL': 'Ball',
|
||||
'BALL_COMPLEX': 'Ball complex',
|
||||
'FLUID': 'Fluid',
|
||||
'CLOTH': 'Cloth',
|
||||
'HAIR': 'Hair'
|
||||
}
|
||||
unhide_collection(colmapdict[data["thumbnail_type"]])
|
||||
if data['thumbnail_background']:
|
||||
unhide_collection('Background')
|
||||
bpy.data.materials["bg checker colorable"].node_tree.nodes['input_level'].outputs['Value'].default_value \
|
||||
= data['thumbnail_background_lightness']
|
||||
tscale = data["thumbnail_scale"]
|
||||
scaler = bpy.context.view_layer.objects['scaler']
|
||||
scaler.scale = (tscale, tscale, tscale)
|
||||
utils.activate(scaler)
|
||||
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
|
||||
|
||||
bpy.context.view_layer.update()
|
||||
|
||||
for ob in bpy.context.visible_objects:
|
||||
if ob.name[:15] == 'MaterialPreview':
|
||||
utils.activate(ob)
|
||||
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
|
||||
|
||||
ob.material_slots[0].material = mat
|
||||
ob.data.use_auto_texspace = False
|
||||
ob.data.texspace_size.x = 1 #/ tscale
|
||||
ob.data.texspace_size.y = 1 #/ tscale
|
||||
ob.data.texspace_size.z = 1 #/ tscale
|
||||
if data["adaptive_subdivision"] == True:
|
||||
ob.cycles.use_adaptive_subdivision = True
|
||||
|
||||
else:
|
||||
ob.cycles.use_adaptive_subdivision = False
|
||||
ts = data['texture_size_meters']
|
||||
if data["thumbnail_type"] in ['BALL', 'BALL_COMPLEX', 'CLOTH']:
|
||||
utils.automap(ob.name, tex_size = ts / tscale, just_scale = True, bg_exception=True)
|
||||
bpy.context.view_layer.update()
|
||||
|
||||
s.cycles.volume_step_size = tscale * .1
|
||||
|
||||
if user_preferences.thumbnail_use_gpu:
|
||||
bpy.context.scene.cycles.device = 'GPU'
|
||||
|
||||
s.cycles.samples = data['thumbnail_samples']
|
||||
bpy.context.view_layer.cycles.use_denoising = data['thumbnail_denoising']
|
||||
|
||||
# import blender's HDR here
|
||||
hdr_path = Path('datafiles/studiolights/world/interior.exr')
|
||||
bpath = Path(bpy.utils.resource_path('LOCAL'))
|
||||
ipath = bpath / hdr_path
|
||||
ipath = str(ipath)
|
||||
|
||||
# this stuff is for mac and possibly linux. For blender // means relative path.
|
||||
# for Mac, // means start of absolute path
|
||||
if ipath.startswith('//'):
|
||||
ipath = ipath[1:]
|
||||
|
||||
img = bpy.data.images['interior.exr']
|
||||
img.filepath = ipath
|
||||
img.reload()
|
||||
|
||||
bpy.context.scene.render.resolution_x = int(data['thumbnail_resolution'])
|
||||
bpy.context.scene.render.resolution_y = int(data['thumbnail_resolution'])
|
||||
|
||||
bpy.context.scene.render.filepath = data['thumbnail_path']
|
||||
bg_blender.progress('rendering thumbnail')
|
||||
# bpy.ops.wm.save_as_mainfile(filepath='C:/tmp/test.blend')
|
||||
# fal
|
||||
render_thumbnails()
|
||||
if data.get('upload_after_render') and data.get('asset_data'):
|
||||
bg_blender.progress('uploading thumbnail')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
|
||||
file = {
|
||||
"type": "thumbnail",
|
||||
"index": 0,
|
||||
"file_path": data['thumbnail_path'] + '.png'
|
||||
}
|
||||
upload_data = {
|
||||
"name": data['asset_data']['name'],
|
||||
"token": preferences.api_key,
|
||||
"id": data['asset_data']['id']
|
||||
}
|
||||
upload_bg.upload_file(upload_data, file)
|
||||
bg_blender.progress('background autothumbnailer finished successfully')
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
sys.exit(1)
|
|
@ -1,207 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
|
||||
from blenderkit import utils, append_link, bg_blender, download, upload_bg, upload
|
||||
|
||||
import sys, json, math, os
|
||||
import bpy
|
||||
import mathutils
|
||||
|
||||
BLENDERKIT_EXPORT_DATA = sys.argv[-1]
|
||||
|
||||
|
||||
def get_obnames():
|
||||
with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
|
||||
data = json.load(s)
|
||||
obnames = eval(data['models'])
|
||||
return obnames
|
||||
|
||||
|
||||
def center_obs_for_thumbnail(obs):
|
||||
s = bpy.context.scene
|
||||
# obs = bpy.context.selected_objects
|
||||
parent = obs[0]
|
||||
if parent.type == 'EMPTY' and parent.instance_collection is not None:
|
||||
obs = parent.instance_collection.objects[:]
|
||||
|
||||
while parent.parent != None:
|
||||
parent = parent.parent
|
||||
# reset parent rotation, so we see how it really snaps.
|
||||
parent.rotation_euler = (0, 0, 0)
|
||||
bpy.context.view_layer.update()
|
||||
minx, miny, minz, maxx, maxy, maxz = utils.get_bounds_worldspace(obs)
|
||||
|
||||
cx = (maxx - minx) / 2 + minx
|
||||
cy = (maxy - miny) / 2 + miny
|
||||
for ob in s.collection.objects:
|
||||
ob.select_set(False)
|
||||
|
||||
bpy.context.view_layer.objects.active = parent
|
||||
parent.location += mathutils.Vector((-cx, -cy, -minz))
|
||||
|
||||
camZ = s.camera.parent.parent
|
||||
camZ.location.z = (maxz - minz) / 2
|
||||
dx = (maxx - minx)
|
||||
dy = (maxy - miny)
|
||||
dz = (maxz - minz)
|
||||
r = math.sqrt(dx * dx + dy * dy + dz * dz)
|
||||
|
||||
scaler = bpy.context.view_layer.objects['scaler']
|
||||
scaler.scale = (r, r, r)
|
||||
coef = .7
|
||||
r *= coef
|
||||
camZ.scale = (r, r, r)
|
||||
bpy.context.view_layer.update()
|
||||
|
||||
|
||||
def render_thumbnails():
|
||||
bpy.ops.render.render(write_still=True, animation=False)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
|
||||
data = json.load(s)
|
||||
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
|
||||
|
||||
if data.get('do_download'):
|
||||
# if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
#need to save the file, so that asset doesn't get downloaded into addon directory
|
||||
temp_blend_path = os.path.join(data['tempdir'], 'temp.blend')
|
||||
bpy.ops.wm.save_as_mainfile(filepath = temp_blend_path)
|
||||
|
||||
bg_blender.progress('Downloading asset')
|
||||
asset_data = data['asset_data']
|
||||
has_url = download.get_download_url(asset_data, download.get_scene_id(), user_preferences.api_key, tcom=None,
|
||||
resolution='blend')
|
||||
if not has_url == True:
|
||||
bg_blender.progress("couldn't download asset for thumnbail re-rendering")
|
||||
# download first, or rather make sure if it's already downloaded
|
||||
bg_blender.progress('downloading asset')
|
||||
fpath = download.download_asset_file(asset_data)
|
||||
data['filepath'] = fpath
|
||||
main_object, allobs = append_link.link_collection(fpath,
|
||||
location=(0,0,0),
|
||||
rotation=(0,0,0),
|
||||
link=True,
|
||||
name=asset_data['name'],
|
||||
parent=None)
|
||||
allobs = [main_object]
|
||||
else:
|
||||
bg_blender.progress('preparing thumbnail scene')
|
||||
|
||||
obnames = get_obnames()
|
||||
main_object, allobs = append_link.append_objects(file_name=data['filepath'],
|
||||
obnames=obnames,
|
||||
link=True)
|
||||
bpy.context.view_layer.update()
|
||||
|
||||
|
||||
camdict = {
|
||||
'GROUND': 'camera ground',
|
||||
'WALL': 'camera wall',
|
||||
'CEILING': 'camera ceiling',
|
||||
'FLOAT': 'camera float'
|
||||
}
|
||||
|
||||
bpy.context.scene.camera = bpy.data.objects[camdict[data['thumbnail_snap_to']]]
|
||||
center_obs_for_thumbnail(allobs)
|
||||
bpy.context.scene.render.filepath = data['thumbnail_path']
|
||||
if user_preferences.thumbnail_use_gpu:
|
||||
bpy.context.scene.cycles.device = 'GPU'
|
||||
|
||||
fdict = {
|
||||
'DEFAULT': 1,
|
||||
'FRONT': 2,
|
||||
'SIDE': 3,
|
||||
'TOP': 4,
|
||||
}
|
||||
s = bpy.context.scene
|
||||
s.frame_set(fdict[data['thumbnail_angle']])
|
||||
|
||||
snapdict = {
|
||||
'GROUND': 'Ground',
|
||||
'WALL': 'Wall',
|
||||
'CEILING': 'Ceiling',
|
||||
'FLOAT': 'Float'
|
||||
}
|
||||
|
||||
collection = bpy.context.scene.collection.children[snapdict[data['thumbnail_snap_to']]]
|
||||
collection.hide_viewport = False
|
||||
collection.hide_render = False
|
||||
collection.hide_select = False
|
||||
|
||||
main_object.rotation_euler = (0, 0, 0)
|
||||
bpy.data.materials['bkit background'].node_tree.nodes['Value'].outputs['Value'].default_value \
|
||||
= data['thumbnail_background_lightness']
|
||||
s.cycles.samples = data['thumbnail_samples']
|
||||
bpy.context.view_layer.cycles.use_denoising = data['thumbnail_denoising']
|
||||
bpy.context.view_layer.update()
|
||||
|
||||
# import blender's HDR here
|
||||
# hdr_path = Path('datafiles/studiolights/world/interior.exr')
|
||||
# bpath = Path(bpy.utils.resource_path('LOCAL'))
|
||||
# ipath = bpath / hdr_path
|
||||
# ipath = str(ipath)
|
||||
|
||||
# this stuff is for mac and possibly linux. For blender // means relative path.
|
||||
# for Mac, // means start of absolute path
|
||||
# if ipath.startswith('//'):
|
||||
# ipath = ipath[1:]
|
||||
#
|
||||
# img = bpy.data.images['interior.exr']
|
||||
# img.filepath = ipath
|
||||
# img.reload()
|
||||
|
||||
bpy.context.scene.render.resolution_x = int(data['thumbnail_resolution'])
|
||||
bpy.context.scene.render.resolution_y = int(data['thumbnail_resolution'])
|
||||
|
||||
bg_blender.progress('rendering thumbnail')
|
||||
render_thumbnails()
|
||||
fpath = data['thumbnail_path'] + '.jpg'
|
||||
if data.get('upload_after_render') and data.get('asset_data'):
|
||||
# try to patch for the sake of older assets where thumbnail update doesn't work for the reasont
|
||||
# that original thumbnail files aren't available.
|
||||
# upload.patch_individual_metadata(data['asset_data']['id'], {}, user_preferences)
|
||||
bg_blender.progress('uploading thumbnail')
|
||||
file = {
|
||||
"type": "thumbnail",
|
||||
"index": 0,
|
||||
"file_path": fpath
|
||||
}
|
||||
upload_data = {
|
||||
"name": data['asset_data']['name'],
|
||||
"token": user_preferences.api_key,
|
||||
"id": data['asset_data']['id']
|
||||
}
|
||||
|
||||
upload_bg.upload_file(upload_data, file)
|
||||
|
||||
bg_blender.progress('background autothumbnailer finished successfully')
|
||||
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
|
@ -1,278 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import utils
|
||||
|
||||
import bpy
|
||||
import sys, threading, os
|
||||
import re
|
||||
|
||||
from bpy.props import (
|
||||
EnumProperty,
|
||||
)
|
||||
|
||||
bg_processes = []
|
||||
|
||||
|
||||
class threadCom: # object passed to threads to read background process stdout info
|
||||
''' Object to pass data between thread and '''
|
||||
|
||||
def __init__(self, eval_path_computing, eval_path_state, eval_path, process_type, proc, location=None, name=''):
|
||||
# self.obname=ob.name
|
||||
self.name = name
|
||||
self.eval_path_computing = eval_path_computing # property that gets written to.
|
||||
self.eval_path_state = eval_path_state # property that gets written to.
|
||||
self.eval_path = eval_path # property that gets written to.
|
||||
self.process_type = process_type
|
||||
self.outtext = ''
|
||||
self.proc = proc
|
||||
self.lasttext = ''
|
||||
self.message = '' # the message to be sent.
|
||||
self.progress = 0.0
|
||||
self.location = location
|
||||
self.error = False
|
||||
self.log = ''
|
||||
|
||||
|
||||
def threadread(tcom):
|
||||
'''reads stdout of background process.
|
||||
this threads basically waits for a stdout line to come in,
|
||||
fills the data, dies.'''
|
||||
found = False
|
||||
while not found:
|
||||
if tcom.proc.poll() is not None:
|
||||
#process terminated
|
||||
return
|
||||
inline = tcom.proc.stdout.readline()
|
||||
# print('readthread', time.time())
|
||||
inline = str(inline)
|
||||
s = inline.find('progress{')
|
||||
if s > -1:
|
||||
e = inline.find('}')
|
||||
tcom.outtext = inline[s + 9:e]
|
||||
found = True
|
||||
if tcom.outtext.find('%') > -1:
|
||||
tcom.progress = float(re.findall('\d+\.\d+|\d+', tcom.outtext)[0])
|
||||
return
|
||||
if s == -1:
|
||||
s = inline.find('Remaining')
|
||||
if s > -1:
|
||||
# e=inline.find('}')
|
||||
tcom.outtext = inline[s: s + 18]
|
||||
found = True
|
||||
return
|
||||
if len(inline) > 3:
|
||||
print(inline, len(inline))
|
||||
# if inline.find('Error'):
|
||||
# tcom.error = True
|
||||
# tcom.outtext = inline[2:]
|
||||
|
||||
|
||||
def progress(text, n=None):
|
||||
'''function for reporting during the script, works for background operations in the header.'''
|
||||
# for i in range(n+1):
|
||||
# sys.stdout.flush()
|
||||
text = str(text)
|
||||
if n is None:
|
||||
n = ''
|
||||
else:
|
||||
n = ' ' + ' ' + str(int(n * 1000) / 1000) + '% '
|
||||
spaces = ' ' * (len(text) + 55)
|
||||
try:
|
||||
sys.stdout.write('progress{%s%s}\n' % (text, n))
|
||||
|
||||
sys.stdout.flush()
|
||||
except Exception as e:
|
||||
print('background progress reporting race condition')
|
||||
print(e)
|
||||
|
||||
|
||||
# @bpy.app.handlers.persistent
|
||||
def bg_update():
|
||||
'''monitoring of background process'''
|
||||
text = ''
|
||||
#utils.p('timer search')
|
||||
# utils.p('start bg_blender timer bg_update')
|
||||
|
||||
s = bpy.context.scene
|
||||
|
||||
global bg_processes
|
||||
if len(bg_processes) == 0:
|
||||
# utils.p('end bg_blender timer bg_update')
|
||||
|
||||
return 2
|
||||
#cleanup dead processes first
|
||||
remove_processes = []
|
||||
for p in bg_processes:
|
||||
if p[1].proc.poll() is not None:
|
||||
remove_processes.append(p)
|
||||
for p in remove_processes:
|
||||
bg_processes.remove(p)
|
||||
|
||||
#Parse process output
|
||||
for p in bg_processes:
|
||||
# proc=p[1].proc
|
||||
readthread = p[0]
|
||||
tcom = p[1]
|
||||
if not readthread.is_alive():
|
||||
readthread.join()
|
||||
# readthread.
|
||||
estring = None
|
||||
if tcom.error:
|
||||
estring = tcom.eval_path_computing + ' = False'
|
||||
tcom.lasttext = tcom.outtext
|
||||
if tcom.outtext != '':
|
||||
tcom.outtext = ''
|
||||
text =tcom.lasttext.replace("'","")
|
||||
estring = tcom.eval_path_state + ' = text'
|
||||
# print(tcom.lasttext)
|
||||
if 'finished successfully' in tcom.lasttext:
|
||||
bg_processes.remove(p)
|
||||
estring = tcom.eval_path_computing + ' = False'
|
||||
else:
|
||||
readthread = threading.Thread(target=threadread, args=([tcom]), daemon=True)
|
||||
readthread.start()
|
||||
p[0] = readthread
|
||||
if estring:
|
||||
try:
|
||||
exec(estring)
|
||||
except Exception as e:
|
||||
print('Exception while reading from background process')
|
||||
print(e)
|
||||
|
||||
# if len(bg_processes) == 0:
|
||||
# bpy.app.timers.unregister(bg_update)
|
||||
if len(bg_processes) > 0:
|
||||
# utils.p('end bg_blender timer bg_update')
|
||||
|
||||
return .3
|
||||
# utils.p('end bg_blender timer bg_update')
|
||||
|
||||
return 1.
|
||||
|
||||
|
||||
process_types = (
|
||||
('UPLOAD', 'Upload', ''),
|
||||
('THUMBNAILER', 'Thumbnailer', ''),
|
||||
)
|
||||
|
||||
process_sources = (
|
||||
('MODEL', 'Model', 'set of objects'),
|
||||
('SCENE', 'Scene', 'set of scenes'),
|
||||
('HDR', 'HDR', 'HDR image'),
|
||||
('MATERIAL', 'Material', 'any .blend Material'),
|
||||
('TEXTURE', 'Texture', 'a texture, or texture set'),
|
||||
('BRUSH', 'Brush', 'brush, can be any type of blender brush'),
|
||||
)
|
||||
|
||||
|
||||
class KillBgProcess(bpy.types.Operator):
|
||||
'''Remove processes in background'''
|
||||
bl_idname = "object.kill_bg_process"
|
||||
bl_label = "Kill Background Process"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
process_type: EnumProperty(
|
||||
name="Type",
|
||||
items=process_types,
|
||||
description="Type of process",
|
||||
default="UPLOAD",
|
||||
)
|
||||
|
||||
process_source: EnumProperty(
|
||||
name="Source",
|
||||
items=process_sources,
|
||||
description="Source of process",
|
||||
default="MODEL",
|
||||
)
|
||||
|
||||
def execute(self, context):
|
||||
s = bpy.context.scene
|
||||
|
||||
cls = bpy.ops.object.convert.__class__
|
||||
# first do the easy stuff...TODO all cases.
|
||||
props = utils.get_upload_props()
|
||||
if self.process_type == 'UPLOAD':
|
||||
props.uploading = False
|
||||
if self.process_type == 'THUMBNAILER':
|
||||
props.is_generating_thumbnail = False
|
||||
global blenderkit_bg_process
|
||||
# print('killing', self.process_source, self.process_type)
|
||||
# then go kill the process. this wasn't working for unsetting props and that was the reason for changing to the method above.
|
||||
|
||||
processes = bg_processes
|
||||
for p in processes:
|
||||
|
||||
tcom = p[1]
|
||||
# print(tcom.process_type, self.process_type)
|
||||
if tcom.process_type == self.process_type:
|
||||
source = eval(tcom.eval_path)
|
||||
kill = False
|
||||
#TODO HDR - add killing of process
|
||||
if source.bl_rna.name == 'Object' and self.process_source == 'MODEL':
|
||||
if source.name == bpy.context.active_object.name:
|
||||
kill = True
|
||||
if source.bl_rna.name == 'Scene' and self.process_source == 'SCENE':
|
||||
if source.name == bpy.context.scene.name:
|
||||
kill = True
|
||||
if source.bl_rna.name == 'Image' and self.process_source == 'HDR':
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
if source.name == ui_props.hdr_upload_image.name:
|
||||
kill = False
|
||||
|
||||
if source.bl_rna.name == 'Material' and self.process_source == 'MATERIAL':
|
||||
if source.name == bpy.context.active_object.active_material.name:
|
||||
kill = True
|
||||
if source.bl_rna.name == 'Brush' and self.process_source == 'BRUSH':
|
||||
brush = utils.get_active_brush()
|
||||
if brush is not None and source.name == brush.name:
|
||||
kill = True
|
||||
if kill:
|
||||
estring = tcom.eval_path_computing + ' = False'
|
||||
exec(estring)
|
||||
processes.remove(p)
|
||||
tcom.proc.kill()
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def add_bg_process(location=None, name=None, eval_path_computing='', eval_path_state='', eval_path='', process_type='',
|
||||
process=None):
|
||||
'''adds process for monitoring'''
|
||||
global bg_processes
|
||||
tcom = threadCom(eval_path_computing, eval_path_state, eval_path, process_type, process, location, name)
|
||||
readthread = threading.Thread(target=threadread, args=([tcom]), daemon=True)
|
||||
readthread.start()
|
||||
|
||||
bg_processes.append([readthread, tcom])
|
||||
# if not bpy.app.timers.is_registered(bg_update):
|
||||
# bpy.app.timers.register(bg_update, persistent=True)
|
||||
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(KillBgProcess)
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.use_timers and not bpy.app.background:
|
||||
bpy.app.timers.register(bg_update)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(KillBgProcess)
|
||||
if bpy.app.timers.is_registered(bg_update):
|
||||
bpy.app.timers.unregister(bg_update)
|
|
@ -1,201 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import tasks_queue, utils, paths, search, categories, oauth, ui, ui_panels, colors, reports
|
||||
|
||||
import bpy
|
||||
|
||||
import threading
|
||||
import requests
|
||||
import time
|
||||
import logging
|
||||
|
||||
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
from bpy.props import (
|
||||
BoolProperty,
|
||||
)
|
||||
|
||||
CLIENT_ID = "IdFRwa3SGA8eMpzhRVFMg5Ts8sPK93xBjif93x0F"
|
||||
PORTS = [62485, 65425, 55428, 49452, 35452, 25152, 5152, 1234]
|
||||
|
||||
active_authenticator = None
|
||||
|
||||
|
||||
def login_thread(signup=False):
|
||||
global active_authenticator
|
||||
r_url = paths.get_oauth_landing_url()
|
||||
url = paths.get_bkit_url()
|
||||
authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS)
|
||||
# we store authenticator globally to be able to ping the server if connection fails.
|
||||
active_authenticator = authenticator
|
||||
thread = threading.Thread(target=login, args=([signup, url, r_url, authenticator]), daemon=True)
|
||||
thread.start()
|
||||
|
||||
|
||||
def login(signup, url, r_url, authenticator):
|
||||
try:
|
||||
auth_token, refresh_token, oauth_response = authenticator.get_new_token(register=signup, redirect_url=r_url)
|
||||
except Exception as e:
|
||||
tasks_queue.add_task((reports.add_report, (e, 20, colors.RED)))
|
||||
|
||||
bk_logger.debug('tokens retrieved')
|
||||
tasks_queue.add_task((write_tokens, (auth_token, refresh_token, oauth_response)))
|
||||
|
||||
|
||||
def refresh_token_thread():
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if len(preferences.api_key_refresh) > 0 and preferences.refresh_in_progress == False:
|
||||
preferences.refresh_in_progress = True
|
||||
url = paths.get_bkit_url()
|
||||
thread = threading.Thread(target=refresh_token, args=([preferences.api_key_refresh, url]), daemon=True)
|
||||
thread.start()
|
||||
else:
|
||||
reports.add_report('Already Refreshing token, will be ready soon. If this fails, please login again in Login panel.')
|
||||
|
||||
|
||||
def refresh_token(api_key_refresh, url):
|
||||
authenticator = oauth.SimpleOAuthAuthenticator(server_url=url, client_id=CLIENT_ID, ports=PORTS)
|
||||
auth_token, refresh_token, oauth_response = authenticator.get_refreshed_token(api_key_refresh)
|
||||
if auth_token is not None and refresh_token is not None:
|
||||
tasks_queue.add_task((write_tokens, (auth_token, refresh_token, oauth_response)))
|
||||
return auth_token, refresh_token, oauth_response
|
||||
|
||||
|
||||
def write_tokens(auth_token, refresh_token, oauth_response):
|
||||
bk_logger.debug('writing tokens')
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
preferences.api_key_refresh = refresh_token
|
||||
preferences.api_key = auth_token
|
||||
preferences.api_key_timeout = time.time() + oauth_response['expires_in']
|
||||
preferences.api_key_life = oauth_response['expires_in']
|
||||
preferences.login_attempt = False
|
||||
preferences.refresh_in_progress = False
|
||||
props = utils.get_search_props()
|
||||
if props is not None:
|
||||
props.report = ''
|
||||
reports.add_report('BlenderKit Re-Login success')
|
||||
search.get_profile()
|
||||
|
||||
categories.fetch_categories_thread(auth_token, force = False)
|
||||
|
||||
|
||||
class RegisterLoginOnline(bpy.types.Operator):
|
||||
"""Login online on BlenderKit webpage"""
|
||||
|
||||
bl_idname = "wm.blenderkit_login"
|
||||
bl_label = "BlenderKit login/signup"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
signup: BoolProperty(
|
||||
name="create a new account",
|
||||
description="True for register, otherwise login",
|
||||
default=False,
|
||||
options={'SKIP_SAVE'}
|
||||
)
|
||||
|
||||
message: bpy.props.StringProperty(
|
||||
name="Message",
|
||||
description="",
|
||||
default="You were logged out from BlenderKit.\n Clicking OK takes you to web login. ")
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return True
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
utils.label_multiline(layout, text=self.message, width = 300)
|
||||
|
||||
def execute(self, context):
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
preferences.login_attempt = True
|
||||
login_thread(self.signup)
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
wm = bpy.context.window_manager
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
preferences.api_key_refresh = ''
|
||||
preferences.api_key = ''
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
class Logout(bpy.types.Operator):
|
||||
"""Logout from BlenderKit immediately"""
|
||||
|
||||
bl_idname = "wm.blenderkit_logout"
|
||||
bl_label = "BlenderKit logout"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
preferences.login_attempt = False
|
||||
preferences.api_key_refresh = ''
|
||||
preferences.api_key = ''
|
||||
if bpy.context.window_manager.get('bkit profile'):
|
||||
del (bpy.context.window_manager['bkit profile'])
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class CancelLoginOnline(bpy.types.Operator):
|
||||
"""Cancel login attempt"""
|
||||
|
||||
bl_idname = "wm.blenderkit_login_cancel"
|
||||
bl_label = "BlenderKit login cancel"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
global active_authenticator
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
preferences.login_attempt = False
|
||||
try:
|
||||
if active_authenticator is not None:
|
||||
requests.get(active_authenticator.redirect_uri)
|
||||
active_authenticator = None
|
||||
except Exception as e:
|
||||
print('stopped login attempt')
|
||||
print(e)
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
classes = (
|
||||
RegisterLoginOnline,
|
||||
CancelLoginOnline,
|
||||
Logout,
|
||||
)
|
||||
|
||||
|
||||
def register():
|
||||
for c in classes:
|
||||
bpy.utils.register_class(c)
|
||||
|
||||
|
||||
def unregister():
|
||||
for c in classes:
|
||||
bpy.utils.unregister_class(c)
|
|
@ -1,36 +0,0 @@
|
|||
bl_info = {
|
||||
"name": "BL UI Widgets",
|
||||
"description": "UI Widgets to draw in the 3D view",
|
||||
"author": "Jayanam",
|
||||
"version": (0, 6, 4, 2),
|
||||
"blender": (2, 80, 0),
|
||||
"location": "View3D",
|
||||
"category": "Object"}
|
||||
|
||||
# Blender imports
|
||||
import bpy
|
||||
|
||||
from bpy.props import *
|
||||
|
||||
|
||||
addon_keymaps = []
|
||||
|
||||
def register():
|
||||
|
||||
bpy.utils.register_class(DP_OT_draw_operator)
|
||||
kcfg = bpy.context.window_manager.keyconfigs.addon
|
||||
if kcfg:
|
||||
km = kcfg.keymaps.new(name='3D View', space_type='VIEW_3D')
|
||||
|
||||
|
||||
addon_keymaps.append((km, kmi))
|
||||
|
||||
def unregister():
|
||||
for km, kmi in addon_keymaps:
|
||||
km.keymap_items.remove(kmi)
|
||||
addon_keymaps.clear()
|
||||
|
||||
bpy.utils.unregister_class(DP_OT_draw_operator)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
|
@ -1,205 +0,0 @@
|
|||
from . bl_ui_widget import *
|
||||
|
||||
import blf
|
||||
import bpy
|
||||
|
||||
class BL_UI_Button(BL_UI_Widget):
|
||||
|
||||
def __init__(self, x, y, width, height):
|
||||
super().__init__(x, y, width, height)
|
||||
self._text_color = (1.0, 1.0, 1.0, 1.0)
|
||||
self._hover_bg_color = (0.5, 0.5, 0.5, 1.0)
|
||||
self._select_bg_color = (0.7, 0.7, 0.7, 1.0)
|
||||
|
||||
self._text = "Button"
|
||||
self._text_size = 16
|
||||
self._textpos = (x, y)
|
||||
|
||||
self.__state = 0
|
||||
self.__image = None
|
||||
self.__image_size = (24, 24)
|
||||
self.__image_position = (4, 2)
|
||||
|
||||
@property
|
||||
def text_color(self):
|
||||
return self._text_color
|
||||
|
||||
@text_color.setter
|
||||
def text_color(self, value):
|
||||
self._text_color = value
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self._text
|
||||
|
||||
@text.setter
|
||||
def text(self, value):
|
||||
self._text = value
|
||||
|
||||
@property
|
||||
def text_size(self):
|
||||
return self._text_size
|
||||
|
||||
@text_size.setter
|
||||
def text_size(self, value):
|
||||
self._text_size = value
|
||||
|
||||
@property
|
||||
def hover_bg_color(self):
|
||||
return self._hover_bg_color
|
||||
|
||||
@hover_bg_color.setter
|
||||
def hover_bg_color(self, value):
|
||||
self._hover_bg_color = value
|
||||
|
||||
@property
|
||||
def select_bg_color(self):
|
||||
return self._select_bg_color
|
||||
|
||||
@select_bg_color.setter
|
||||
def select_bg_color(self, value):
|
||||
self._select_bg_color = value
|
||||
|
||||
def set_image_size(self, imgage_size):
|
||||
self.__image_size = imgage_size
|
||||
|
||||
def set_image_position(self, image_position):
|
||||
self.__image_position = image_position
|
||||
|
||||
def set_image(self, rel_filepath):
|
||||
#first try to access the image, for cases where it can get removed
|
||||
try:
|
||||
self.__image
|
||||
self.__image.filepath
|
||||
self.__image.pixels
|
||||
except:
|
||||
self.__image = None
|
||||
try:
|
||||
if self.__image is None or self.__image.filepath != rel_filepath:
|
||||
self.__image = bpy.data.images.load(rel_filepath, check_existing=True)
|
||||
self.__image.gl_load()
|
||||
|
||||
if self.__image and len(self.__image.pixels) == 0:
|
||||
self.__image.reload()
|
||||
self.__image.gl_load()
|
||||
|
||||
except Exception as e:
|
||||
self.__image = None
|
||||
|
||||
def update(self, x, y):
|
||||
super().update(x, y)
|
||||
self._textpos = [x, y]
|
||||
|
||||
def draw(self):
|
||||
if not self._is_visible:
|
||||
return
|
||||
area_height = self.get_area_height()
|
||||
|
||||
self.shader.bind()
|
||||
|
||||
self.set_colors()
|
||||
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
|
||||
self.batch_panel.draw(self.shader)
|
||||
|
||||
self.draw_image()
|
||||
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
|
||||
# Draw text
|
||||
self.draw_text(area_height)
|
||||
|
||||
def set_colors(self):
|
||||
color = self._bg_color
|
||||
text_color = self._text_color
|
||||
|
||||
# pressed
|
||||
if self.__state == 1:
|
||||
color = self._select_bg_color
|
||||
|
||||
# hover
|
||||
elif self.__state == 2:
|
||||
color = self._hover_bg_color
|
||||
|
||||
self.shader.uniform_float("color", color)
|
||||
|
||||
def draw_text(self, area_height):
|
||||
font_id = 1
|
||||
blf.size(font_id, self._text_size, 72)
|
||||
size = blf.dimensions(0, self._text)
|
||||
|
||||
textpos_y = area_height - self._textpos[1] - (self.height + size[1]) / 2.0
|
||||
blf.position(font_id, self._textpos[0] + (self.width - size[0]) / 2.0, textpos_y + 1, 0)
|
||||
|
||||
r, g, b, a = self._text_color
|
||||
blf.color(font_id, r, g, b, a)
|
||||
|
||||
blf.draw(font_id, self._text)
|
||||
|
||||
def draw_image(self):
|
||||
if self.__image is not None:
|
||||
try:
|
||||
y_screen_flip = self.get_area_height() - self.y_screen
|
||||
|
||||
off_x, off_y = self.__image_position
|
||||
sx, sy = self.__image_size
|
||||
|
||||
# bottom left, top left, top right, bottom right
|
||||
vertices = (
|
||||
(self.x_screen + off_x, y_screen_flip - off_y),
|
||||
(self.x_screen + off_x, y_screen_flip - sy - off_y),
|
||||
(self.x_screen + off_x + sx, y_screen_flip - sy - off_y),
|
||||
(self.x_screen + off_x + sx, y_screen_flip - off_y))
|
||||
|
||||
self.shader_img = gpu.shader.from_builtin('2D_IMAGE')
|
||||
self.batch_img = batch_for_shader(self.shader_img, 'TRI_FAN',
|
||||
{ "pos" : vertices,
|
||||
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1))
|
||||
},)
|
||||
|
||||
# send image to gpu if it isn't there already
|
||||
if self.__image.gl_load():
|
||||
raise Exception()
|
||||
|
||||
bgl.glActiveTexture(bgl.GL_TEXTURE0)
|
||||
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.__image.bindcode)
|
||||
|
||||
self.shader_img.bind()
|
||||
self.shader_img.uniform_int("image", 0)
|
||||
self.batch_img.draw(self.shader_img)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def set_mouse_down(self, mouse_down_func):
|
||||
self.mouse_down_func = mouse_down_func
|
||||
|
||||
def mouse_down(self, x, y):
|
||||
if self.is_in_rect(x,y):
|
||||
self.__state = 1
|
||||
try:
|
||||
self.mouse_down_func(self)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def mouse_move(self, x, y):
|
||||
if self.is_in_rect(x,y):
|
||||
if(self.__state != 1):
|
||||
|
||||
# hover state
|
||||
self.__state = 2
|
||||
else:
|
||||
self.__state = 0
|
||||
|
||||
def mouse_up(self, x, y):
|
||||
if self.is_in_rect(x,y):
|
||||
self.__state = 2
|
||||
else:
|
||||
self.__state = 0
|
|
@ -1,59 +0,0 @@
|
|||
from . bl_ui_widget import *
|
||||
|
||||
class BL_UI_Drag_Panel(BL_UI_Widget):
|
||||
|
||||
def __init__(self, x, y, width, height):
|
||||
super().__init__(x,y, width, height)
|
||||
self.drag_offset_x = 0
|
||||
self.drag_offset_y = 0
|
||||
self.is_drag = False
|
||||
self.widgets = []
|
||||
|
||||
def set_location(self, x, y):
|
||||
super().set_location(x,y)
|
||||
self.layout_widgets()
|
||||
|
||||
def add_widget(self, widget):
|
||||
self.widgets.append(widget)
|
||||
|
||||
def add_widgets(self, widgets):
|
||||
self.widgets = widgets
|
||||
self.layout_widgets()
|
||||
|
||||
|
||||
def layout_widgets(self):
|
||||
for widget in self.widgets:
|
||||
widget.update(self.x_screen + widget.x, self.y_screen + widget.y)
|
||||
|
||||
def update(self, x, y):
|
||||
super().update(x - self.drag_offset_x, y + self.drag_offset_y)
|
||||
|
||||
def child_widget_focused(self, x, y):
|
||||
for widget in self.widgets:
|
||||
if widget.is_in_rect(x, y):
|
||||
return True
|
||||
return False
|
||||
|
||||
def mouse_down(self, x, y):
|
||||
if self.child_widget_focused(x, y):
|
||||
return False
|
||||
|
||||
if self.is_in_rect(x,y):
|
||||
height = self.get_area_height()
|
||||
self.is_drag = True
|
||||
self.drag_offset_x = x - self.x_screen
|
||||
self.drag_offset_y = y - (height - self.y_screen)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def mouse_move(self, x, y):
|
||||
if self.is_drag:
|
||||
height = self.get_area_height()
|
||||
self.update(x, height - y)
|
||||
self.layout_widgets()
|
||||
|
||||
def mouse_up(self, x, y):
|
||||
self.is_drag = False
|
||||
self.drag_offset_x = 0
|
||||
self.drag_offset_y = 0
|
|
@ -1,93 +0,0 @@
|
|||
import bpy
|
||||
|
||||
from bpy.types import Operator
|
||||
|
||||
class BL_UI_OT_draw_operator(Operator):
|
||||
bl_idname = "object.bl_ui_ot_draw_operator"
|
||||
bl_label = "bl ui widgets operator"
|
||||
bl_description = "Operator for bl ui widgets"
|
||||
bl_options = {'REGISTER'}
|
||||
|
||||
def __init__(self):
|
||||
self.draw_handle = None
|
||||
self.draw_event = None
|
||||
self._finished = False
|
||||
|
||||
self.widgets = []
|
||||
|
||||
def init_widgets(self, context, widgets):
|
||||
self.widgets = widgets
|
||||
for widget in self.widgets:
|
||||
widget.init(context)
|
||||
|
||||
def on_invoke(self, context, event):
|
||||
pass
|
||||
|
||||
def on_finish(self, context):
|
||||
self._finished = True
|
||||
|
||||
def invoke(self, context, event):
|
||||
|
||||
self.on_invoke(context, event)
|
||||
|
||||
args = (self, context)
|
||||
|
||||
self.register_handlers(args, context)
|
||||
|
||||
context.window_manager.modal_handler_add(self)
|
||||
|
||||
self.active_window_pointer = context.window.as_pointer()
|
||||
self.active_area_pointer = context.area.as_pointer()
|
||||
self.active_region_pointer = context.region.as_pointer()
|
||||
return {"RUNNING_MODAL"}
|
||||
|
||||
def register_handlers(self, args, context):
|
||||
self.draw_handle = bpy.types.SpaceView3D.draw_handler_add(self.draw_callback_px, args, "WINDOW", "POST_PIXEL")
|
||||
self.draw_event = context.window_manager.event_timer_add(0.1, window=context.window)
|
||||
|
||||
def unregister_handlers(self, context):
|
||||
|
||||
context.window_manager.event_timer_remove(self.draw_event)
|
||||
|
||||
bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, "WINDOW")
|
||||
|
||||
self.draw_handle = None
|
||||
self.draw_event = None
|
||||
|
||||
def handle_widget_events(self, event):
|
||||
result = False
|
||||
for widget in self.widgets:
|
||||
if widget.handle_event(event):
|
||||
result = True
|
||||
return result
|
||||
|
||||
def modal(self, context, event):
|
||||
|
||||
if self._finished:
|
||||
return {'FINISHED'}
|
||||
|
||||
if context.area:
|
||||
context.area.tag_redraw()
|
||||
|
||||
if self.handle_widget_events(event):
|
||||
return {'RUNNING_MODAL'}
|
||||
|
||||
if event.type in {"ESC"}:
|
||||
self.finish()
|
||||
|
||||
return {"PASS_THROUGH"}
|
||||
|
||||
def finish(self):
|
||||
self.unregister_handlers(bpy.context)
|
||||
self.on_finish(bpy.context)
|
||||
|
||||
# Draw handler to paint onto the screen
|
||||
def draw_callback_px(self, op, context):
|
||||
try:
|
||||
if context.area.as_pointer() == self.active_area_pointer:
|
||||
for widget in self.widgets:
|
||||
widget.draw()
|
||||
except:
|
||||
pass;
|
||||
# context.window_manager.event_timer_remove(self.draw_event)
|
||||
# bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, "WINDOW")
|
|
@ -1,97 +0,0 @@
|
|||
from . bl_ui_widget import *
|
||||
|
||||
import blf
|
||||
import bpy
|
||||
|
||||
class BL_UI_Image(BL_UI_Widget):
|
||||
|
||||
def __init__(self, x, y, width, height):
|
||||
super().__init__(x, y, width, height)
|
||||
|
||||
self.__state = 0
|
||||
self.__image = None
|
||||
self.__image_size = (24, 24)
|
||||
self.__image_position = (4, 2)
|
||||
|
||||
def set_image_size(self, imgage_size):
|
||||
self.__image_size = imgage_size
|
||||
|
||||
def set_image_position(self, image_position):
|
||||
self.__image_position = image_position
|
||||
|
||||
def set_image(self, rel_filepath):
|
||||
try:
|
||||
if self.__image is None or self.__image.filepath != rel_filepath:
|
||||
self.__image = bpy.data.images.load(rel_filepath, check_existing=True)
|
||||
self.__image.gl_load()
|
||||
except:
|
||||
pass
|
||||
|
||||
def update(self, x, y):
|
||||
super().update(x, y)
|
||||
|
||||
def draw(self):
|
||||
if not self._is_visible:
|
||||
return
|
||||
|
||||
area_height = self.get_area_height()
|
||||
|
||||
self.shader.bind()
|
||||
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
|
||||
self.batch_panel.draw(self.shader)
|
||||
|
||||
self.draw_image()
|
||||
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
|
||||
|
||||
def draw_image(self):
|
||||
if self.__image is not None:
|
||||
try:
|
||||
y_screen_flip = self.get_area_height() - self.y_screen
|
||||
|
||||
off_x, off_y = self.__image_position
|
||||
sx, sy = self.__image_size
|
||||
|
||||
# bottom left, top left, top right, bottom right
|
||||
vertices = (
|
||||
(self.x_screen + off_x, y_screen_flip - off_y),
|
||||
(self.x_screen + off_x, y_screen_flip - sy - off_y),
|
||||
(self.x_screen + off_x + sx, y_screen_flip - sy - off_y),
|
||||
(self.x_screen + off_x + sx, y_screen_flip - off_y))
|
||||
|
||||
self.shader_img = gpu.shader.from_builtin('2D_IMAGE')
|
||||
self.batch_img = batch_for_shader(self.shader_img, 'TRI_FAN',
|
||||
{ "pos" : vertices,
|
||||
"texCoord": ((0, 1), (0, 0), (1, 0), (1, 1))
|
||||
},)
|
||||
|
||||
# send image to gpu if it isn't there already
|
||||
if self.__image.gl_load():
|
||||
raise Exception()
|
||||
|
||||
bgl.glActiveTexture(bgl.GL_TEXTURE0)
|
||||
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.__image.bindcode)
|
||||
|
||||
self.shader_img.bind()
|
||||
self.shader_img.uniform_int("image", 0)
|
||||
self.batch_img.draw(self.shader_img)
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def set_mouse_down(self, mouse_down_func):
|
||||
self.mouse_down_func = mouse_down_func
|
||||
|
||||
def mouse_down(self, x, y):
|
||||
return False
|
||||
|
||||
def mouse_move(self, x, y):
|
||||
return
|
||||
|
||||
def mouse_up(self, x, y):
|
||||
return
|
|
@ -1,72 +0,0 @@
|
|||
from . bl_ui_widget import *
|
||||
|
||||
import blf
|
||||
|
||||
class BL_UI_Label(BL_UI_Widget):
|
||||
|
||||
def __init__(self, x, y, width, height):
|
||||
super().__init__(x, y, width, height)
|
||||
|
||||
self._text_color = (1.0, 1.0, 1.0, 1.0)
|
||||
self._text = "Label"
|
||||
self._text_size = 16
|
||||
self._ralign = 'LEFT'
|
||||
self._valign = 'TOP'
|
||||
|
||||
@property
|
||||
def text_color(self):
|
||||
return self._text_color
|
||||
|
||||
@text_color.setter
|
||||
def text_color(self, value):
|
||||
self._text_color = value
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self._text
|
||||
|
||||
@text.setter
|
||||
def text(self, value):
|
||||
self._text = value
|
||||
|
||||
@property
|
||||
def text_size(self):
|
||||
return self._text_size
|
||||
|
||||
@text_size.setter
|
||||
def text_size(self, value):
|
||||
self._text_size = value
|
||||
|
||||
def is_in_rect(self, x, y):
|
||||
return False
|
||||
|
||||
def draw(self):
|
||||
if not self._is_visible:
|
||||
return
|
||||
|
||||
|
||||
area_height = self.get_area_height()
|
||||
|
||||
font_id = 1
|
||||
blf.size(font_id, self._text_size, 72)
|
||||
size = blf.dimensions(font_id, self._text)
|
||||
|
||||
textpos_y = area_height - self.y_screen - self.height
|
||||
|
||||
r, g, b, a = self._text_color
|
||||
x = self.x_screen
|
||||
y = textpos_y
|
||||
if self._halign != 'LEFT':
|
||||
width, height = blf.dimensions(font_id, self._text)
|
||||
if self._halign == 'RIGHT':
|
||||
x -= width
|
||||
elif self._halign == 'CENTER':
|
||||
x -= width // 2
|
||||
if self._valign == 'CENTER':
|
||||
y -= height // 2
|
||||
# bottom could be here but there's no reason for it
|
||||
blf.position(font_id, x, y, 0)
|
||||
|
||||
blf.color(font_id, r, g, b, a)
|
||||
|
||||
blf.draw(font_id, self._text)
|
|
@ -1,195 +0,0 @@
|
|||
import gpu
|
||||
import bgl
|
||||
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
class BL_UI_Widget:
|
||||
|
||||
def __init__(self, x, y, width, height):
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.x_screen = x
|
||||
self.y_screen = y
|
||||
self.width = width
|
||||
self.height = height
|
||||
self._bg_color = (0.8, 0.8, 0.8, 1.0)
|
||||
self._tag = None
|
||||
self.context = None
|
||||
self.__inrect = False
|
||||
self._mouse_down = False
|
||||
self._mouse_down_right = False
|
||||
self._is_visible = True
|
||||
|
||||
def set_location(self, x, y):
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.x_screen = x
|
||||
self.y_screen = y
|
||||
self.update(x,y)
|
||||
|
||||
@property
|
||||
def bg_color(self):
|
||||
return self._bg_color
|
||||
|
||||
@bg_color.setter
|
||||
def bg_color(self, value):
|
||||
self._bg_color = value
|
||||
|
||||
@property
|
||||
def visible(self):
|
||||
return self._is_visible
|
||||
|
||||
@visible.setter
|
||||
def visible(self, value):
|
||||
self._is_visible = value
|
||||
|
||||
@property
|
||||
def tag(self):
|
||||
return self._tag
|
||||
|
||||
@tag.setter
|
||||
def tag(self, value):
|
||||
self._tag = value
|
||||
|
||||
def draw(self):
|
||||
if not self._is_visible:
|
||||
return
|
||||
|
||||
self.shader.bind()
|
||||
self.shader.uniform_float("color", self._bg_color)
|
||||
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
self.batch_panel.draw(self.shader)
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
|
||||
def init(self, context):
|
||||
self.context = context
|
||||
self.update(self.x, self.y)
|
||||
|
||||
def update(self, x, y):
|
||||
|
||||
area_height = self.get_area_height()
|
||||
self.x_screen = x
|
||||
self.y_screen = y
|
||||
|
||||
indices = ((0, 1, 2), (0, 2, 3))
|
||||
|
||||
y_screen_flip = area_height - self.y_screen
|
||||
|
||||
# bottom left, top left, top right, bottom right
|
||||
vertices = (
|
||||
(self.x_screen, y_screen_flip),
|
||||
(self.x_screen, y_screen_flip - self.height),
|
||||
(self.x_screen + self.width, y_screen_flip - self.height),
|
||||
(self.x_screen + self.width, y_screen_flip))
|
||||
|
||||
self.shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
self.batch_panel = batch_for_shader(self.shader, 'TRIS', {"pos" : vertices}, indices=indices)
|
||||
|
||||
def handle_event(self, event):
|
||||
if not self._is_visible:
|
||||
return False
|
||||
x = event.mouse_region_x
|
||||
y = event.mouse_region_y
|
||||
|
||||
if (event.type == 'LEFTMOUSE'):
|
||||
if (event.value == 'PRESS'):
|
||||
self._mouse_down = True
|
||||
return self.mouse_down(x, y)
|
||||
else:
|
||||
self._mouse_down = False
|
||||
self.mouse_up(x, y)
|
||||
|
||||
elif (event.type == 'RIGHTMOUSE'):
|
||||
|
||||
if (event.value == 'PRESS'):
|
||||
self._mouse_down_right = True
|
||||
return self.mouse_down_right(x, y)
|
||||
else:
|
||||
self._mouse_down_right = False
|
||||
self.mouse_up(x, y)
|
||||
|
||||
elif (event.type == 'MOUSEMOVE'):
|
||||
self.mouse_move(x, y)
|
||||
inrect = self.is_in_rect(x, y)
|
||||
|
||||
# we enter the rect
|
||||
if not self.__inrect and inrect:
|
||||
self.__inrect = True
|
||||
self.mouse_enter(event, x, y)
|
||||
|
||||
# we are leaving the rect
|
||||
elif self.__inrect and not inrect:
|
||||
self.__inrect = False
|
||||
self.mouse_exit(event, x, y)
|
||||
|
||||
return False
|
||||
|
||||
elif event.value == 'PRESS' and self.__inrect and (event.ascii != '' or event.type in self.get_input_keys()):
|
||||
|
||||
return self.text_input(event)
|
||||
|
||||
return False
|
||||
|
||||
def get_input_keys(self) :
|
||||
return []
|
||||
|
||||
def get_area_height(self):
|
||||
return self.context.area.height
|
||||
|
||||
def is_in_rect(self, x, y):
|
||||
area_height = self.get_area_height()
|
||||
|
||||
widget_y = area_height - self.y_screen
|
||||
if (
|
||||
(self.x_screen <= x <= (self.x_screen + self.width)) and
|
||||
(widget_y >= y >= (widget_y - self.height))
|
||||
):
|
||||
# print('is in rect!?')
|
||||
# print('area height', area_height)
|
||||
# print ('x sceen ',self.x_screen,'x ', x, 'width', self.width)
|
||||
# print ('widghet y', widget_y,'y', y, 'height',self.height)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def text_input(self, event):
|
||||
return False
|
||||
|
||||
def mouse_down(self, x, y):
|
||||
return self.is_in_rect(x,y)
|
||||
|
||||
def mouse_down_right(self, x, y):
|
||||
return self.is_in_rect(x,y)
|
||||
|
||||
def mouse_up(self, x, y):
|
||||
pass
|
||||
|
||||
def set_mouse_enter(self, mouse_enter_func):
|
||||
self.mouse_enter_func = mouse_enter_func
|
||||
|
||||
def call_mouse_enter(self):
|
||||
try:
|
||||
if self.mouse_enter_func:
|
||||
self.mouse_enter_func(self)
|
||||
except:
|
||||
pass
|
||||
|
||||
def mouse_enter(self, event, x, y):
|
||||
self.call_mouse_enter()
|
||||
|
||||
def set_mouse_exit(self, mouse_exit_func):
|
||||
self.mouse_exit_func = mouse_exit_func
|
||||
|
||||
def call_mouse_exit(self):
|
||||
try:
|
||||
if self.mouse_exit_func:
|
||||
self.mouse_exit_func(self)
|
||||
except:
|
||||
pass
|
||||
|
||||
def mouse_exit(self, event, x, y):
|
||||
self.call_mouse_exit()
|
||||
|
||||
def mouse_move(self, x, y):
|
||||
pass
|
|
@ -1,274 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import paths, utils, tasks_queue, rerequests, ui, colors, reports
|
||||
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
import bpy
|
||||
import time
|
||||
|
||||
import shutil
|
||||
import threading
|
||||
import logging
|
||||
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
|
||||
def count_to_parent(parent):
|
||||
for c in parent['children']:
|
||||
count_to_parent(c)
|
||||
parent['assetCount'] += c['assetCount']
|
||||
|
||||
|
||||
def fix_category_counts(categories):
|
||||
for c in categories:
|
||||
count_to_parent(c)
|
||||
|
||||
|
||||
def filter_category(category):
|
||||
''' filter categories with no assets, so they aren't shown in search panel'''
|
||||
if category['assetCount'] < 1:
|
||||
return True
|
||||
else:
|
||||
to_remove = []
|
||||
for c in category['children']:
|
||||
if filter_category(c):
|
||||
to_remove.append(c)
|
||||
for c in to_remove:
|
||||
category['children'].remove(c)
|
||||
|
||||
|
||||
def filter_categories(categories):
|
||||
for category in categories:
|
||||
filter_category(category)
|
||||
|
||||
|
||||
def get_category_path(categories, category):
|
||||
'''finds the category in all possible subcategories and returns the path to it'''
|
||||
category_path = []
|
||||
check_categories = categories[:]
|
||||
parents = {}
|
||||
while len(check_categories) > 0:
|
||||
ccheck = check_categories.pop()
|
||||
# print(ccheck['name'])
|
||||
if not ccheck.get('children'):
|
||||
continue
|
||||
|
||||
for ch in ccheck['children']:
|
||||
# print(ch['name'])
|
||||
parents[ch['slug']] = ccheck['slug']
|
||||
|
||||
if ch['slug'] == category:
|
||||
category_path = [ch['slug']]
|
||||
slug = ch['slug']
|
||||
while parents.get(slug):
|
||||
slug = parents.get(slug)
|
||||
category_path.insert(0, slug)
|
||||
return category_path
|
||||
check_categories.append(ch)
|
||||
return category_path
|
||||
|
||||
def get_category_name_path(categories, category):
|
||||
'''finds the category in all possible subcategories and returns the path to it'''
|
||||
category_path = []
|
||||
check_categories = categories[:]
|
||||
parents = {}
|
||||
# utils.pprint(categories)
|
||||
while len(check_categories) > 0:
|
||||
ccheck = check_categories.pop()
|
||||
# print(ccheck['name'])
|
||||
if not ccheck.get('children'):
|
||||
continue
|
||||
|
||||
for ch in ccheck['children']:
|
||||
# print(ch['name'])
|
||||
parents[ch['slug']] = ccheck
|
||||
|
||||
if ch['slug'] == category:
|
||||
category_path = [ch['name']]
|
||||
slug = ch['slug']
|
||||
while parents.get(slug):
|
||||
parent = parents.get(slug)
|
||||
slug = parent['slug']
|
||||
|
||||
category_path.insert(0, parent['name'])
|
||||
return category_path
|
||||
check_categories.append(ch)
|
||||
return category_path
|
||||
|
||||
def get_category(categories, cat_path=()):
|
||||
for category in cat_path:
|
||||
for c in categories:
|
||||
if c['slug'] == category:
|
||||
categories = c['children']
|
||||
if category == cat_path[-1]:
|
||||
return (c)
|
||||
break;
|
||||
|
||||
|
||||
# def get_upload_asset_type(self):
|
||||
# typemapper = {
|
||||
# bpy.types.Object.blenderkit: 'model',
|
||||
# bpy.types.Scene.blenderkit: 'scene',
|
||||
# bpy.types.Image.blenderkit: 'hdr',
|
||||
# bpy.types.Material.blenderkit: 'material',
|
||||
# bpy.types.Brush.blenderkit: 'brush'
|
||||
# }
|
||||
# asset_type = typemapper[type(self)]
|
||||
# return asset_type
|
||||
|
||||
def update_category_enums(self, context):
|
||||
'''Fixes if lower level is empty - sets it to None, because enum value can be higher.'''
|
||||
enums = get_subcategory_enums(self, context)
|
||||
if enums[0][0] == 'NONE' and self.subcategory != 'NONE':
|
||||
self.subcategory = 'NONE'
|
||||
|
||||
|
||||
def update_subcategory_enums(self, context):
|
||||
'''Fixes if lower level is empty - sets it to None, because enum value can be higher.'''
|
||||
enums = get_subcategory1_enums(self, context)
|
||||
if enums[0][0] == 'NONE' and self.subcategory1 != 'NONE':
|
||||
self.subcategory1 = 'NONE'
|
||||
|
||||
|
||||
def get_category_enums(self, context):
|
||||
wm = bpy.context.window_manager
|
||||
props = bpy.context.window_manager.blenderkitUI
|
||||
asset_type = props.asset_type.lower()
|
||||
# asset_type = self.asset_type#get_upload_asset_type(self)
|
||||
asset_categories = get_category(wm['bkit_categories'], cat_path=(asset_type,))
|
||||
items = []
|
||||
for c in asset_categories['children']:
|
||||
items.append((c['slug'], c['name'], c['description']))
|
||||
if len(items) == 0:
|
||||
items.append(('NONE', '', 'no categories on this level defined'))
|
||||
return items
|
||||
|
||||
|
||||
def get_subcategory_enums(self, context):
|
||||
wm = bpy.context.window_manager
|
||||
props = bpy.context.window_manager.blenderkitUI
|
||||
asset_type = props.asset_type.lower()
|
||||
items = []
|
||||
if self.category != '':
|
||||
asset_categories = get_category(wm['bkit_categories'], cat_path=(asset_type, self.category,))
|
||||
for c in asset_categories['children']:
|
||||
items.append((c['slug'], c['name'], c['description']))
|
||||
if len(items) == 0:
|
||||
items.append(('NONE', '', 'no categories on this level defined'))
|
||||
# print('subcategory', items)
|
||||
return items
|
||||
|
||||
|
||||
def get_subcategory1_enums(self, context):
|
||||
wm = bpy.context.window_manager
|
||||
props = bpy.context.window_manager.blenderkitUI
|
||||
asset_type = props.asset_type.lower()
|
||||
items = []
|
||||
if self.category != '' and self.subcategory != '':
|
||||
asset_categories = get_category(wm['bkit_categories'], cat_path=(asset_type, self.category, self.subcategory,))
|
||||
if asset_categories:
|
||||
for c in asset_categories['children']:
|
||||
items.append((c['slug'], c['name'], c['description']))
|
||||
if len(items) == 0:
|
||||
items.append(('NONE', '', 'no categories on this level defined'))
|
||||
return items
|
||||
|
||||
|
||||
def copy_categories():
|
||||
# this creates the categories system on only
|
||||
tempdir = paths.get_temp_dir()
|
||||
categories_filepath = os.path.join(tempdir, 'categories.json')
|
||||
if not os.path.exists(categories_filepath):
|
||||
source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
|
||||
# print('attempt to copy categories from: %s to %s' % (categories_filepath, source_path))
|
||||
try:
|
||||
shutil.copy(source_path, categories_filepath)
|
||||
except:
|
||||
print("couldn't copy categories file")
|
||||
|
||||
|
||||
def load_categories():
|
||||
copy_categories()
|
||||
tempdir = paths.get_temp_dir()
|
||||
categories_filepath = os.path.join(tempdir, 'categories.json')
|
||||
|
||||
wm = bpy.context.window_manager
|
||||
try:
|
||||
with open(categories_filepath, 'r', encoding='utf-8') as catfile:
|
||||
wm['bkit_categories'] = json.load(catfile)
|
||||
|
||||
wm['active_category'] = {
|
||||
'MODEL': ['model'],
|
||||
'SCENE': ['scene'],
|
||||
'HDR': ['hdr'],
|
||||
'MATERIAL': ['material'],
|
||||
'BRUSH': ['brush'],
|
||||
}
|
||||
except:
|
||||
print('categories failed to read')
|
||||
|
||||
|
||||
#
|
||||
catfetch_counter = 0
|
||||
|
||||
|
||||
def fetch_categories(API_key, force=False):
|
||||
url = paths.get_api_url() + 'categories/'
|
||||
|
||||
headers = utils.get_headers(API_key)
|
||||
|
||||
tempdir = paths.get_temp_dir()
|
||||
categories_filepath = os.path.join(tempdir, 'categories.json')
|
||||
if os.path.exists(categories_filepath):
|
||||
catfile_age = time.time() - os.path.getmtime(categories_filepath)
|
||||
else:
|
||||
catfile_age = 10000000
|
||||
|
||||
# global catfetch_counter
|
||||
# catfetch_counter += 1
|
||||
# bk_logger.debug('fetching categories: ', catfetch_counter)
|
||||
# bk_logger.debug('age of cat file', catfile_age)
|
||||
try:
|
||||
# read categories only once per day maximum, or when forced to do so.
|
||||
if catfile_age > 86400 or force:
|
||||
bk_logger.debug('requesting categories from server')
|
||||
r = rerequests.get(url, headers=headers)
|
||||
rdata = r.json()
|
||||
categories = rdata['results']
|
||||
fix_category_counts(categories)
|
||||
# filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
|
||||
with open(categories_filepath, 'w', encoding='utf-8') as s:
|
||||
json.dump(categories, s, ensure_ascii=False, indent=4)
|
||||
tasks_queue.add_task((load_categories, ()))
|
||||
except Exception as e:
|
||||
t = 'BlenderKit failed to download fresh categories from the server'
|
||||
tasks_queue.add_task((reports.add_report(),(t, 15, colors.RED)))
|
||||
bk_logger.debug(t)
|
||||
bk_logger.exception(e)
|
||||
if not os.path.exists(categories_filepath):
|
||||
source_path = paths.get_addon_file(subpath='data' + os.sep + 'categories.json')
|
||||
shutil.copy(source_path, categories_filepath)
|
||||
|
||||
|
||||
def fetch_categories_thread(API_key, force=False):
|
||||
cat_thread = threading.Thread(target=fetch_categories, args=([API_key, force]), daemon=True)
|
||||
cat_thread.start()
|
|
@ -1,25 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# this module defines color palette for BlenderKit UI
|
||||
|
||||
WHITE = (1, 1, 1, .9)
|
||||
|
||||
TEXT = (.9, .9, .9, .6)
|
||||
GREEN = (.9, 1, .9, .6)
|
||||
RED = (1, .5, .5, .8)
|
|
@ -1,232 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# mainly update functions and callbacks for ratings properties, here to avoid circular imports.
|
||||
import bpy
|
||||
from blenderkit import utils, paths, tasks_queue, rerequests
|
||||
|
||||
import threading
|
||||
import requests
|
||||
import logging
|
||||
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
|
||||
def upload_comment_thread(url, comment='', api_key=None):
|
||||
''' Upload rating thread function / disconnected from blender data.'''
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
bk_logger.debug('upload comment ' + comment)
|
||||
|
||||
# rating_url = url + rating_name + '/'
|
||||
data = {
|
||||
"content_type": "",
|
||||
"object_pk": "",
|
||||
"timestamp": "",
|
||||
"security_hash": "",
|
||||
"honeypot": "",
|
||||
"name": "",
|
||||
"email": "",
|
||||
"url": "",
|
||||
"comment": comment,
|
||||
"followup": False,
|
||||
"reply_to": None
|
||||
}
|
||||
|
||||
# try:
|
||||
r = rerequests.put(url, data=data, verify=True, headers=headers)
|
||||
# print(r)
|
||||
# print(dir(r))
|
||||
# print(r.text)
|
||||
# except requests.exceptions.RequestException as e:
|
||||
# print('ratings upload failed: %s' % str(e))
|
||||
|
||||
|
||||
def upload_comment_flag_thread( asset_id = '', comment_id='', flag='like', api_key=None):
|
||||
''' Upload rating thread function / disconnected from blender data.'''
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
bk_logger.debug('upload comment flag' + str(comment_id))
|
||||
|
||||
# rating_url = url + rating_name + '/'
|
||||
data = {
|
||||
"comment": comment_id,
|
||||
"flag": flag,
|
||||
}
|
||||
url = paths.get_api_url() + 'comments/feedback/'
|
||||
|
||||
# try:
|
||||
r = rerequests.post(url, data=data, verify=True, headers=headers)
|
||||
# print(r.text)
|
||||
|
||||
#here it's important we read back, so likes are updated accordingly:
|
||||
get_comments(asset_id, api_key)
|
||||
|
||||
|
||||
def send_comment_flag_to_thread(asset_id = '', comment_id='', flag='like', api_key = None):
|
||||
'''Sens rating into thread rating, main purpose is for tasks_queue.
|
||||
One function per property to avoid lost data due to stashing.'''
|
||||
thread = threading.Thread(target=upload_comment_flag_thread, args=(asset_id, comment_id, flag, api_key))
|
||||
thread.start()
|
||||
|
||||
def send_comment_to_thread(url, comment, api_key):
|
||||
'''Sens rating into thread rating, main purpose is for tasks_queue.
|
||||
One function per property to avoid lost data due to stashing.'''
|
||||
thread = threading.Thread(target=upload_comment_thread, args=(url, comment, api_key))
|
||||
thread.start()
|
||||
|
||||
|
||||
def store_comments_local(asset_id, comments):
|
||||
context = bpy.context
|
||||
ac = context.window_manager.get('asset comments', {})
|
||||
ac[asset_id] = comments
|
||||
context.window_manager['asset comments'] = ac
|
||||
|
||||
|
||||
def get_comments_local(asset_id):
|
||||
context = bpy.context
|
||||
context.window_manager['asset comments'] = context.window_manager.get('asset comments', {})
|
||||
comments = context.window_manager['asset comments'].get(asset_id)
|
||||
if comments:
|
||||
return comments
|
||||
return None
|
||||
|
||||
def get_comments_thread(asset_id, api_key):
|
||||
thread = threading.Thread(target=get_comments, args=([asset_id, api_key]), daemon=True)
|
||||
thread.start()
|
||||
|
||||
def get_comments(asset_id, api_key):
|
||||
'''
|
||||
Retrieve comments from BlenderKit server. Can be run from a thread
|
||||
Parameters
|
||||
----------
|
||||
asset_id
|
||||
headers
|
||||
|
||||
Returns
|
||||
-------
|
||||
ratings - dict of type:value ratings
|
||||
'''
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
url = paths.get_api_url() + 'comments/assets-uuidasset/' + asset_id + '/'
|
||||
params = {}
|
||||
r = rerequests.get(url, params=params, verify=True, headers=headers)
|
||||
if r is None:
|
||||
return
|
||||
# print(r.status_code)
|
||||
if r.status_code == 200:
|
||||
rj = r.json()
|
||||
# store comments - send them to task queue
|
||||
# print('retrieved comments')
|
||||
# print(rj)
|
||||
tasks_queue.add_task((store_comments_local, (asset_id, rj['results'])))
|
||||
|
||||
# if len(rj['results'])==0:
|
||||
# # store empty ratings too, so that server isn't checked repeatedly
|
||||
# tasks_queue.add_task((store_rating_local_empty,(asset_id,)))
|
||||
# return ratings
|
||||
|
||||
|
||||
def store_notifications_count_local(all_count):
|
||||
'''Store total count of notifications on server in preferences'''
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
user_preferences.notifications_counter = all_count
|
||||
|
||||
def store_notifications_local(notifications):
|
||||
'''Store notifications in Blender'''
|
||||
bpy.context.window_manager['bkit notifications'] = notifications
|
||||
|
||||
def count_all_notifications():
|
||||
'''Return count of all notifications on server'''
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
return user_preferences.notifications_counter
|
||||
|
||||
|
||||
def check_notifications_read():
|
||||
'''checks if all notifications were already read, and removes them if so'''
|
||||
notifications = bpy.context.window_manager.get('bkit notifications')
|
||||
if notifications is None or notifications.get('count') == 0:
|
||||
return True
|
||||
for n in notifications['results']:
|
||||
if n['unread'] == 1:
|
||||
return False
|
||||
bpy.context.window_manager['bkit notifications'] = None
|
||||
return True
|
||||
|
||||
def get_notifications_thread(api_key, all_count = 1000):
|
||||
thread = threading.Thread(target=get_notifications, args=([api_key, all_count]), daemon=True)
|
||||
thread.start()
|
||||
|
||||
def get_notifications(api_key, all_count = 1000):
|
||||
'''
|
||||
Retrieve notifications from BlenderKit server. Can be run from a thread.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
api_key
|
||||
all_count
|
||||
|
||||
Returns
|
||||
-------
|
||||
'''
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
params = {}
|
||||
|
||||
url = paths.get_api_url() + 'notifications/all_count/'
|
||||
r = rerequests.get(url, params=params, verify=True, headers=headers)
|
||||
if r.status_code ==200:
|
||||
rj = r.json()
|
||||
# print(rj)
|
||||
# no new notifications?
|
||||
if all_count >= rj['allCount']:
|
||||
tasks_queue.add_task((store_notifications_count_local, ([rj['allCount']])))
|
||||
|
||||
return
|
||||
url = paths.get_api_url() + 'notifications/unread/'
|
||||
r = rerequests.get(url, params=params, verify=True, headers=headers)
|
||||
if r is None:
|
||||
return
|
||||
if r.status_code == 200:
|
||||
rj = r.json()
|
||||
# store notifications - send them to task queue
|
||||
tasks_queue.add_task((store_notifications_local, ([rj])))
|
||||
|
||||
def mark_notification_read_thread(api_key, notification_id):
|
||||
thread = threading.Thread(target=mark_notification_read, args=([api_key, notification_id]), daemon=True)
|
||||
thread.start()
|
||||
|
||||
def mark_notification_read(api_key, notification_id):
|
||||
'''
|
||||
mark notification as read
|
||||
'''
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
url = paths.get_api_url() + f'notifications/mark-as-read/{notification_id}/'
|
||||
params = {}
|
||||
r = rerequests.get(url, params=params, verify=True, headers=headers)
|
||||
if r is None:
|
||||
return
|
||||
# print(r.text)
|
||||
# if r.status_code == 200:
|
||||
# rj = r.json()
|
||||
# # store notifications - send them to task queue
|
||||
# print(rj)
|
||||
# tasks_queue.add_task((mark_notification_read_local, ([notification_id])))
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
import os
|
||||
import bpy
|
||||
|
||||
# We can store multiple preview collections here,
|
||||
# however in this example we only store "main"
|
||||
icon_collections = {}
|
||||
|
||||
icons_read = {
|
||||
'fp.png': 'free',
|
||||
'flp.png': 'full',
|
||||
'trophy.png': 'trophy',
|
||||
'dumbbell.png': 'dumbbell',
|
||||
'cc0.png': 'cc0',
|
||||
'royalty_free.png': 'royalty_free',
|
||||
'filter.png': 'filter',
|
||||
'filter_active.png': 'filter_active',
|
||||
'bell.png': 'bell',
|
||||
}
|
||||
|
||||
verification_icons = {
|
||||
'vs_ready.png':'ready',
|
||||
'vs_deleted.png':'deleted' ,
|
||||
'vs_uploaded.png': 'uploaded',
|
||||
'vs_uploading.png': 'uploading',
|
||||
'vs_on_hold.png': 'on_hold',
|
||||
'vs_validated.png': 'validated',
|
||||
'vs_rejected.png': 'rejected'
|
||||
|
||||
}
|
||||
|
||||
icons_read.update(verification_icons)
|
||||
|
||||
def register_icons():
|
||||
# Note that preview collections returned by bpy.utils.previews
|
||||
# are regular py objects - you can use them to store custom data.
|
||||
import bpy.utils.previews
|
||||
pcoll = bpy.utils.previews.new()
|
||||
|
||||
# path to the folder where the icon is
|
||||
# the path is calculated relative to this py file inside the addon folder
|
||||
icons_dir = os.path.join(os.path.dirname(__file__), "thumbnails")
|
||||
|
||||
# load a preview thumbnail of a file and store in the previews collection
|
||||
for ir in icons_read.keys():
|
||||
pcoll.load(icons_read[ir], os.path.join(icons_dir, ir), 'IMAGE')
|
||||
|
||||
# iprev = pcoll.new(icons_read[ir])
|
||||
# img = bpy.data.images.load(os.path.join(icons_dir, ir))
|
||||
# iprev.image_size = (img.size[0], img.size[1])
|
||||
# iprev.image_pixels_float = img.pixels[:]
|
||||
|
||||
icon_collections["main"] = pcoll
|
||||
icon_collections["previews"] = bpy.utils.previews.new()
|
||||
|
||||
|
||||
def unregister_icons():
|
||||
for pcoll in icon_collections.values():
|
||||
bpy.utils.previews.remove(pcoll)
|
||||
icon_collections.clear()
|
|
@ -1,505 +0,0 @@
|
|||
import bpy
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
def get_orig_render_settings():
|
||||
rs = bpy.context.scene.render
|
||||
ims = rs.image_settings
|
||||
|
||||
vs = bpy.context.scene.view_settings
|
||||
|
||||
orig_settings = {
|
||||
'file_format': ims.file_format,
|
||||
'quality': ims.quality,
|
||||
'color_mode': ims.color_mode,
|
||||
'compression': ims.compression,
|
||||
'exr_codec': ims.exr_codec,
|
||||
'view_transform': vs.view_transform
|
||||
}
|
||||
return orig_settings
|
||||
|
||||
|
||||
def set_orig_render_settings(orig_settings):
|
||||
rs = bpy.context.scene.render
|
||||
ims = rs.image_settings
|
||||
vs = bpy.context.scene.view_settings
|
||||
|
||||
ims.file_format = orig_settings['file_format']
|
||||
ims.quality = orig_settings['quality']
|
||||
ims.color_mode = orig_settings['color_mode']
|
||||
ims.compression = orig_settings['compression']
|
||||
ims.exr_codec = orig_settings['exr_codec']
|
||||
|
||||
vs.view_transform = orig_settings['view_transform']
|
||||
|
||||
|
||||
def img_save_as(img, filepath='//', file_format='JPEG', quality=90, color_mode='RGB', compression=15,
|
||||
view_transform='Raw', exr_codec='DWAA'):
|
||||
'''Uses Blender 'save render' to save images - BLender isn't really able so save images with other methods correctly.'''
|
||||
|
||||
ors = get_orig_render_settings()
|
||||
|
||||
rs = bpy.context.scene.render
|
||||
vs = bpy.context.scene.view_settings
|
||||
|
||||
ims = rs.image_settings
|
||||
ims.file_format = file_format
|
||||
ims.quality = quality
|
||||
ims.color_mode = color_mode
|
||||
ims.compression = compression
|
||||
ims.exr_codec = exr_codec
|
||||
vs.view_transform = view_transform
|
||||
|
||||
img.save_render(filepath=bpy.path.abspath(filepath), scene=bpy.context.scene)
|
||||
|
||||
set_orig_render_settings(ors)
|
||||
|
||||
|
||||
def set_colorspace(img, colorspace):
|
||||
'''sets image colorspace, but does so in a try statement, because some people might actually replace the default
|
||||
colorspace settings, and it literally can't be guessed what these people use, even if it will mostly be the filmic addon.
|
||||
'''
|
||||
try:
|
||||
if colorspace == 'Non-Color':
|
||||
img.colorspace_settings.is_data = True
|
||||
else:
|
||||
img.colorspace_settings.name = colorspace
|
||||
except:
|
||||
print(f'Colorspace {colorspace} not found.')
|
||||
|
||||
def analyze_image_is_true_hdr(image):
|
||||
import numpy
|
||||
scene = bpy.context.scene
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
size = image.size
|
||||
imageWidth = size[0]
|
||||
imageHeight = size[1]
|
||||
tempBuffer = numpy.empty(imageWidth * imageHeight * 4, dtype=numpy.float32)
|
||||
image.pixels.foreach_get(tempBuffer)
|
||||
image.blenderkit.true_hdr = numpy.amax(tempBuffer) > 1.05
|
||||
|
||||
def generate_hdr_thumbnail():
|
||||
import numpy
|
||||
scene = bpy.context.scene
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
hdr_image = ui_props.hdr_upload_image # bpy.data.images.get(ui_props.hdr_upload_image)
|
||||
|
||||
base, ext = os.path.splitext(hdr_image.filepath)
|
||||
thumb_path = base + '.jpg'
|
||||
thumb_name = os.path.basename(thumb_path)
|
||||
|
||||
max_thumbnail_size = 2048
|
||||
size = hdr_image.size
|
||||
ratio = size[0] / size[1]
|
||||
|
||||
imageWidth = size[0]
|
||||
imageHeight = size[1]
|
||||
thumbnailWidth = min(size[0], max_thumbnail_size)
|
||||
thumbnailHeight = min(size[1], int(max_thumbnail_size / ratio))
|
||||
|
||||
tempBuffer = numpy.empty(imageWidth * imageHeight * 4, dtype=numpy.float32)
|
||||
inew = bpy.data.images.new(thumb_name, imageWidth, imageHeight, alpha=False, float_buffer=False)
|
||||
|
||||
hdr_image.pixels.foreach_get(tempBuffer)
|
||||
|
||||
hdr_image.blenderkit.true_hdr = numpy.amax(tempBuffer) > 1.05
|
||||
|
||||
inew.filepath = thumb_path
|
||||
set_colorspace(inew, 'Linear')
|
||||
inew.pixels.foreach_set(tempBuffer)
|
||||
|
||||
bpy.context.view_layer.update()
|
||||
if thumbnailWidth < imageWidth:
|
||||
inew.scale(thumbnailWidth, thumbnailHeight)
|
||||
|
||||
img_save_as(inew, filepath=inew.filepath)
|
||||
|
||||
|
||||
def find_color_mode(image):
|
||||
if not isinstance(image, bpy.types.Image):
|
||||
raise (TypeError)
|
||||
else:
|
||||
depth_mapping = {
|
||||
8: 'BW',
|
||||
24: 'RGB',
|
||||
32: 'RGBA', # can also be bw.. but image.channels doesn't work.
|
||||
96: 'RGB',
|
||||
128: 'RGBA',
|
||||
}
|
||||
return depth_mapping.get(image.depth, 'RGB')
|
||||
|
||||
|
||||
def find_image_depth(image):
|
||||
if not isinstance(image, bpy.types.Image):
|
||||
raise (TypeError)
|
||||
else:
|
||||
depth_mapping = {
|
||||
8: '8',
|
||||
24: '8',
|
||||
32: '8', # can also be bw.. but image.channels doesn't work.
|
||||
96: '16',
|
||||
128: '16',
|
||||
}
|
||||
return depth_mapping.get(image.depth, '8')
|
||||
|
||||
|
||||
def can_erase_alpha(na):
|
||||
alpha = na[3::4]
|
||||
alpha_sum = alpha.sum()
|
||||
if alpha_sum == alpha.size:
|
||||
print('image can have alpha erased')
|
||||
# print(alpha_sum, alpha.size)
|
||||
return alpha_sum == alpha.size
|
||||
|
||||
|
||||
def is_image_black(na):
|
||||
r = na[::4]
|
||||
g = na[1::4]
|
||||
b = na[2::4]
|
||||
|
||||
rgbsum = r.sum() + g.sum() + b.sum()
|
||||
|
||||
# print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
|
||||
if rgbsum == 0:
|
||||
print('image can have alpha channel dropped')
|
||||
return rgbsum == 0
|
||||
|
||||
|
||||
def is_image_bw(na):
|
||||
r = na[::4]
|
||||
g = na[1::4]
|
||||
b = na[2::4]
|
||||
|
||||
rg_equal = r == g
|
||||
gb_equal = g == b
|
||||
rgbequal = rg_equal.all() and gb_equal.all()
|
||||
if rgbequal:
|
||||
print('image is black and white, can have channels reduced')
|
||||
|
||||
return rgbequal
|
||||
|
||||
|
||||
def numpytoimage(a, iname, width=0, height=0, channels=3):
|
||||
t = time.time()
|
||||
foundimage = False
|
||||
|
||||
for image in bpy.data.images:
|
||||
|
||||
if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]:
|
||||
i = image
|
||||
foundimage = True
|
||||
if not foundimage:
|
||||
if channels == 4:
|
||||
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True,
|
||||
generated_type='BLANK', float=True)
|
||||
if channels == 3:
|
||||
bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False,
|
||||
generated_type='BLANK', float=True)
|
||||
|
||||
i = None
|
||||
|
||||
for image in bpy.data.images:
|
||||
# print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
|
||||
if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height:
|
||||
i = image
|
||||
if i is None:
|
||||
i = bpy.data.images.new(iname, width, height, alpha=False, float_buffer=False, stereo3d=False, is_data=False,
|
||||
tiled=False)
|
||||
|
||||
# dropping this re-shaping code - just doing flat array for speed and simplicity
|
||||
# d = a.shape[0] * a.shape[1]
|
||||
# a = a.swapaxes(0, 1)
|
||||
# a = a.reshape(d)
|
||||
# a = a.repeat(channels)
|
||||
# a[3::4] = 1
|
||||
i.pixels.foreach_set(a) # this gives big speedup!
|
||||
print('\ntime ' + str(time.time() - t))
|
||||
return i
|
||||
|
||||
|
||||
def imagetonumpy_flat(i):
|
||||
t = time.time()
|
||||
|
||||
import numpy
|
||||
|
||||
width = i.size[0]
|
||||
height = i.size[1]
|
||||
# print(i.channels)
|
||||
|
||||
size = width * height * i.channels
|
||||
na = numpy.empty(size, numpy.float32)
|
||||
i.pixels.foreach_get(na)
|
||||
|
||||
# dropping this re-shaping code - just doing flat array for speed and simplicity
|
||||
# na = na[::4]
|
||||
# na = na.reshape(height, width, i.channels)
|
||||
# na = na.swapaxnes(0, 1)
|
||||
|
||||
# print('\ntime of image to numpy ' + str(time.time() - t))
|
||||
return na
|
||||
|
||||
|
||||
def imagetonumpy(i):
|
||||
t = time.time()
|
||||
|
||||
import numpy as np
|
||||
|
||||
width = i.size[0]
|
||||
height = i.size[1]
|
||||
# print(i.channels)
|
||||
|
||||
size = width * height * i.channels
|
||||
na = np.empty(size, np.float32)
|
||||
i.pixels.foreach_get(na)
|
||||
|
||||
# dropping this re-shaping code - just doing flat array for speed and simplicity
|
||||
# na = na[::4]
|
||||
na = na.reshape(height, width, i.channels)
|
||||
na = na.swapaxes(0, 1)
|
||||
|
||||
# print('\ntime of image to numpy ' + str(time.time() - t))
|
||||
return na
|
||||
|
||||
|
||||
def downscale(i):
|
||||
minsize = 128
|
||||
|
||||
sx, sy = i.size[:]
|
||||
sx = round(sx / 2)
|
||||
sy = round(sy / 2)
|
||||
if sx > minsize and sy > minsize:
|
||||
i.scale(sx, sy)
|
||||
|
||||
|
||||
def get_rgb_mean(i):
|
||||
'''checks if normal map values are ok.'''
|
||||
import numpy
|
||||
|
||||
na = imagetonumpy_flat(i)
|
||||
|
||||
r = na[::4]
|
||||
g = na[1::4]
|
||||
b = na[2::4]
|
||||
|
||||
rmean = r.mean()
|
||||
gmean = g.mean()
|
||||
bmean = b.mean()
|
||||
|
||||
rmedian = numpy.median(r)
|
||||
gmedian = numpy.median(g)
|
||||
bmedian = numpy.median(b)
|
||||
|
||||
# return(rmedian,gmedian, bmedian)
|
||||
return (rmean, gmean, bmean)
|
||||
|
||||
|
||||
def check_nmap_mean_ok(i):
|
||||
'''checks if normal map values are in standard range.'''
|
||||
|
||||
rmean, gmean, bmean = get_rgb_mean(i)
|
||||
|
||||
# we could/should also check blue, but some ogl substance exports have 0-1, while 90% nmaps have 0.5 - 1.
|
||||
nmap_ok = 0.45 < rmean < 0.55 and .45 < gmean < .55
|
||||
|
||||
return nmap_ok
|
||||
|
||||
|
||||
def check_nmap_ogl_vs_dx(i, mask=None, generated_test_images=False):
|
||||
'''
|
||||
checks if normal map is directX or OpenGL.
|
||||
Returns - String value - DirectX and OpenGL
|
||||
'''
|
||||
import numpy
|
||||
width = i.size[0]
|
||||
height = i.size[1]
|
||||
|
||||
rmean, gmean, bmean = get_rgb_mean(i)
|
||||
|
||||
na = imagetonumpy(i)
|
||||
|
||||
if mask:
|
||||
mask = imagetonumpy(mask)
|
||||
|
||||
red_x_comparison = numpy.zeros((width, height), numpy.float32)
|
||||
green_y_comparison = numpy.zeros((width, height), numpy.float32)
|
||||
|
||||
if generated_test_images:
|
||||
red_x_comparison_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
|
||||
green_y_comparison_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
|
||||
|
||||
ogl = numpy.zeros((width, height), numpy.float32)
|
||||
dx = numpy.zeros((width, height), numpy.float32)
|
||||
|
||||
if generated_test_images:
|
||||
ogl_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
|
||||
dx_img = numpy.empty((width, height, 4), numpy.float32) # images for debugging purposes
|
||||
|
||||
for y in range(0, height):
|
||||
for x in range(0, width):
|
||||
# try to mask with UV mask image
|
||||
if mask is None or mask[x, y, 3] > 0:
|
||||
|
||||
last_height_x = ogl[max(x - 1, 0), min(y, height - 1)]
|
||||
last_height_y = ogl[max(x, 0), min(y - 1, height - 1)]
|
||||
|
||||
diff_x = ((na[x, y, 0] - rmean) / ((na[x, y, 2] - 0.5)))
|
||||
diff_y = ((na[x, y, 1] - gmean) / ((na[x, y, 2] - 0.5)))
|
||||
calc_height = (last_height_x + last_height_y) \
|
||||
- diff_x - diff_y
|
||||
calc_height = calc_height / 2
|
||||
ogl[x, y] = calc_height
|
||||
if generated_test_images:
|
||||
rgb = calc_height * .1 + .5
|
||||
ogl_img[x, y] = [rgb, rgb, rgb, 1]
|
||||
|
||||
# green channel
|
||||
last_height_x = dx[max(x - 1, 0), min(y, height - 1)]
|
||||
last_height_y = dx[max(x, 0), min(y - 1, height - 1)]
|
||||
|
||||
diff_x = ((na[x, y, 0] - rmean) / ((na[x, y, 2] - 0.5)))
|
||||
diff_y = ((na[x, y, 1] - gmean) / ((na[x, y, 2] - 0.5)))
|
||||
calc_height = (last_height_x + last_height_y) \
|
||||
- diff_x + diff_y
|
||||
calc_height = calc_height / 2
|
||||
dx[x, y] = calc_height
|
||||
if generated_test_images:
|
||||
rgb = calc_height * .1 + .5
|
||||
dx_img[x, y] = [rgb, rgb, rgb, 1]
|
||||
|
||||
ogl_std = ogl.std()
|
||||
dx_std = dx.std()
|
||||
|
||||
# print(mean_ogl, mean_dx)
|
||||
# print(max_ogl, max_dx)
|
||||
print(ogl_std, dx_std)
|
||||
print(i.name)
|
||||
# if abs(mean_ogl) > abs(mean_dx):
|
||||
if abs(ogl_std) > abs(dx_std):
|
||||
print('this is probably a DirectX texture')
|
||||
else:
|
||||
print('this is probably an OpenGL texture')
|
||||
|
||||
if generated_test_images:
|
||||
# red_x_comparison_img = red_x_comparison_img.swapaxes(0,1)
|
||||
# red_x_comparison_img = red_x_comparison_img.flatten()
|
||||
#
|
||||
# green_y_comparison_img = green_y_comparison_img.swapaxes(0,1)
|
||||
# green_y_comparison_img = green_y_comparison_img.flatten()
|
||||
#
|
||||
# numpytoimage(red_x_comparison_img, 'red_' + i.name, width=width, height=height, channels=1)
|
||||
# numpytoimage(green_y_comparison_img, 'green_' + i.name, width=width, height=height, channels=1)
|
||||
|
||||
ogl_img = ogl_img.swapaxes(0, 1)
|
||||
ogl_img = ogl_img.flatten()
|
||||
|
||||
dx_img = dx_img.swapaxes(0, 1)
|
||||
dx_img = dx_img.flatten()
|
||||
|
||||
numpytoimage(ogl_img, 'OpenGL', width=width, height=height, channels=1)
|
||||
numpytoimage(dx_img, 'DirectX', width=width, height=height, channels=1)
|
||||
|
||||
if abs(ogl_std) > abs(dx_std):
|
||||
return 'DirectX'
|
||||
return 'OpenGL'
|
||||
|
||||
|
||||
def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False):
|
||||
'''checks the image and saves it to drive with possibly reduced channels.
|
||||
Also can remove the image from the asset if the image is pure black
|
||||
- it finds it's usages and replaces the inputs where the image is used
|
||||
with zero/black color.
|
||||
currently implemented file type conversions:
|
||||
PNG->JPG
|
||||
'''
|
||||
colorspace = teximage.colorspace_settings.name
|
||||
teximage.colorspace_settings.name = 'Non-Color'
|
||||
# teximage.colorspace_settings.name = 'sRGB' color correction mambo jambo.
|
||||
|
||||
JPEG_QUALITY = 90
|
||||
# is_image_black(na)
|
||||
# is_image_bw(na)
|
||||
|
||||
rs = bpy.context.scene.render
|
||||
ims = rs.image_settings
|
||||
|
||||
orig_file_format = ims.file_format
|
||||
orig_quality = ims.quality
|
||||
orig_color_mode = ims.color_mode
|
||||
orig_compression = ims.compression
|
||||
orig_depth = ims.color_depth
|
||||
|
||||
# if is_image_black(na):
|
||||
# # just erase the image from the asset here, no need to store black images.
|
||||
# pass;
|
||||
|
||||
# fp = teximage.filepath
|
||||
|
||||
# setup image depth, 8 or 16 bit.
|
||||
# this should normally divide depth with number of channels, but blender always states that number of channels is 4, even if there are only 3
|
||||
|
||||
print(teximage.name)
|
||||
print(teximage.depth)
|
||||
print(teximage.channels)
|
||||
|
||||
bpy.context.scene.display_settings.display_device = 'None'
|
||||
|
||||
image_depth = find_image_depth(teximage)
|
||||
|
||||
ims.color_mode = find_color_mode(teximage)
|
||||
# image_depth = str(max(min(int(teximage.depth / 3), 16), 8))
|
||||
print('resulting depth set to:', image_depth)
|
||||
|
||||
fp = input_filepath
|
||||
if do_reductions:
|
||||
na = imagetonumpy_flat(teximage)
|
||||
|
||||
if can_erase_alpha(na):
|
||||
print(teximage.file_format)
|
||||
if teximage.file_format == 'PNG':
|
||||
print('changing type of image to JPG')
|
||||
base, ext = os.path.splitext(fp)
|
||||
teximage['original_extension'] = ext
|
||||
|
||||
fp = fp.replace('.png', '.jpg')
|
||||
fp = fp.replace('.PNG', '.jpg')
|
||||
|
||||
teximage.name = teximage.name.replace('.png', '.jpg')
|
||||
teximage.name = teximage.name.replace('.PNG', '.jpg')
|
||||
|
||||
teximage.file_format = 'JPEG'
|
||||
ims.quality = JPEG_QUALITY
|
||||
ims.color_mode = 'RGB'
|
||||
|
||||
if is_image_bw(na):
|
||||
ims.color_mode = 'BW'
|
||||
|
||||
ims.file_format = teximage.file_format
|
||||
ims.color_depth = image_depth
|
||||
|
||||
# all pngs with max compression
|
||||
if ims.file_format == 'PNG':
|
||||
ims.compression = 100
|
||||
# all jpgs brought to reasonable quality
|
||||
if ims.file_format == 'JPG':
|
||||
ims.quality = JPEG_QUALITY
|
||||
|
||||
if do_downscale:
|
||||
downscale(teximage)
|
||||
|
||||
# it's actually very important not to try to change the image filepath and packed file filepath before saving,
|
||||
# blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
|
||||
teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene)
|
||||
if len(teximage.packed_files) > 0:
|
||||
teximage.unpack(method='REMOVE')
|
||||
teximage.filepath = fp
|
||||
teximage.filepath_raw = fp
|
||||
teximage.reload()
|
||||
|
||||
teximage.colorspace_settings.name = colorspace
|
||||
|
||||
ims.file_format = orig_file_format
|
||||
ims.quality = orig_quality
|
||||
ims.color_mode = orig_color_mode
|
||||
ims.compression = orig_compression
|
||||
ims.color_depth = orig_depth
|
|
@ -1,117 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
import json
|
||||
import webbrowser
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from urllib.parse import parse_qs, quote as urlquote, urlparse
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class PortsBlockedException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleOAuthAuthenticator(object):
|
||||
def __init__(self, server_url, client_id, ports):
|
||||
self.server_url = server_url
|
||||
self.client_id = client_id
|
||||
self.ports = ports
|
||||
|
||||
def _get_tokens(self, authorization_code=None, refresh_token=None, grant_type="authorization_code"):
|
||||
data = {
|
||||
"grant_type": grant_type,
|
||||
"state": "random_state_string",
|
||||
"client_id": self.client_id,
|
||||
"scopes": "read write",
|
||||
}
|
||||
if hasattr(self, 'redirect_uri'):
|
||||
data["redirect_uri"] = self.redirect_uri
|
||||
if authorization_code:
|
||||
data['code'] = authorization_code
|
||||
if refresh_token:
|
||||
data['refresh_token'] = refresh_token
|
||||
|
||||
response = requests.post(
|
||||
'%s/o/token/' % self.server_url,
|
||||
data=data
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print("error retrieving refresh tokens %s" % response.status_code)
|
||||
print(response.content)
|
||||
return None, None, None
|
||||
|
||||
response_json = json.loads(response.content)
|
||||
refresh_token = response_json['refresh_token']
|
||||
access_token = response_json['access_token']
|
||||
return access_token, refresh_token, response_json
|
||||
|
||||
def get_new_token(self, register=True, redirect_url=None):
|
||||
class HTTPServerHandler(BaseHTTPRequestHandler):
|
||||
html_template = '<html>%(head)s<h1>%(message)s</h1></html>'
|
||||
|
||||
def do_GET(self):
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'text/html')
|
||||
self.end_headers()
|
||||
if 'code' in self.path:
|
||||
self.auth_code = self.path.split('=')[1]
|
||||
# Display to the user that they no longer need the browser window
|
||||
if redirect_url:
|
||||
redirect_string = (
|
||||
'<head><meta http-equiv="refresh" content="0;url=%(redirect_url)s"></head>'
|
||||
'<script> window.location.href="%(redirect_url)s"; </script>' % {'redirect_url': redirect_url}
|
||||
)
|
||||
else:
|
||||
redirect_string = ""
|
||||
self.wfile.write(bytes(self.html_template % {'head': redirect_string, 'message': 'You may now close this window.'}, 'utf-8'))
|
||||
qs = parse_qs(urlparse(self.path).query)
|
||||
self.server.authorization_code = qs['code'][0]
|
||||
else:
|
||||
self.wfile.write(bytes(self.html_template % {'head': '', 'message': 'Authorization failed.'}, 'utf-8'))
|
||||
|
||||
for port in self.ports:
|
||||
try:
|
||||
httpServer = HTTPServer(('localhost', port), HTTPServerHandler)
|
||||
except Exception as e:
|
||||
print(f"Port {port}: {e}")
|
||||
continue
|
||||
break
|
||||
else:
|
||||
print("All available ports are blocked")
|
||||
raise PortsBlockedException(f"All available ports are blocked: {self.ports}")
|
||||
print(f"Choosen port {port}")
|
||||
self.redirect_uri = f"http://localhost:{port}/consumer/exchange/"
|
||||
authorize_url = (
|
||||
"/o/authorize?client_id=%s&state=random_state_string&response_type=code&"
|
||||
"redirect_uri=%s" % (self.client_id, self.redirect_uri)
|
||||
)
|
||||
if register:
|
||||
authorize_url = "%s/accounts/register/?next=%s" % (self.server_url, urlquote(authorize_url))
|
||||
else:
|
||||
authorize_url = "%s%s" % (self.server_url, authorize_url)
|
||||
webbrowser.open_new(authorize_url)
|
||||
|
||||
httpServer.handle_request()
|
||||
authorization_code = httpServer.authorization_code
|
||||
return self._get_tokens(authorization_code=authorization_code)
|
||||
|
||||
def get_refreshed_token(self, refresh_token):
|
||||
return self._get_tokens(refresh_token=refresh_token, grant_type="refresh_token")
|
|
@ -1,300 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import utils
|
||||
|
||||
import bpy, mathutils
|
||||
from bpy.types import (
|
||||
Operator)
|
||||
|
||||
|
||||
def getNodes(nt, node_type='OUTPUT_MATERIAL'):
|
||||
chnodes = nt.nodes[:]
|
||||
nodes = []
|
||||
while len(chnodes) > 0:
|
||||
n = chnodes.pop()
|
||||
if n.type == node_type:
|
||||
nodes.append(n)
|
||||
if n.type == 'GROUP':
|
||||
chnodes.extend(n.node_tree.nodes)
|
||||
return nodes
|
||||
|
||||
|
||||
def getShadersCrawl(nt, chnodes):
|
||||
shaders = []
|
||||
done_nodes = chnodes[:]
|
||||
|
||||
while len(chnodes) > 0:
|
||||
check_node = chnodes.pop()
|
||||
is_shader = False
|
||||
for o in check_node.outputs:
|
||||
if o.type == 'SHADER':
|
||||
is_shader = True
|
||||
for i in check_node.inputs:
|
||||
if i.type == 'SHADER':
|
||||
is_shader = False # this is for mix nodes and group inputs..
|
||||
if len(i.links) > 0:
|
||||
for l in i.links:
|
||||
fn = l.from_node
|
||||
if fn not in done_nodes:
|
||||
done_nodes.append(fn)
|
||||
chnodes.append(fn)
|
||||
if fn.type == 'GROUP':
|
||||
group_outputs = getNodes(fn.node_tree, node_type='GROUP_OUTPUT')
|
||||
shaders.extend(getShadersCrawl(fn.node_tree, group_outputs))
|
||||
|
||||
if check_node.type == 'GROUP':
|
||||
is_shader = False
|
||||
|
||||
if is_shader:
|
||||
shaders.append((check_node, nt))
|
||||
|
||||
return (shaders)
|
||||
|
||||
|
||||
def addColorCorrectors(material):
|
||||
nt = material.node_tree
|
||||
output = getNodes(nt, 'OUTPUT_MATERIAL')[0]
|
||||
shaders = getShadersCrawl(nt, [output])
|
||||
|
||||
correctors = []
|
||||
for shader, nt in shaders:
|
||||
|
||||
if shader.type != 'BSDF_TRANSPARENT': # exclude transparent for color tweaks
|
||||
for i in shader.inputs:
|
||||
if i.type == 'RGBA':
|
||||
if len(i.links) > 0:
|
||||
l = i.links[0]
|
||||
if not (l.from_node.type == 'GROUP' and l.from_node.node_tree.name == 'bkit_asset_tweaker'):
|
||||
from_socket = l.from_socket
|
||||
to_socket = l.to_socket
|
||||
|
||||
g = nt.nodes.new(type='ShaderNodeGroup')
|
||||
g.node_tree = bpy.data.node_groups['bkit_asset_tweaker']
|
||||
g.location = shader.location
|
||||
g.location.x -= 100
|
||||
|
||||
nt.links.new(from_socket, g.inputs[0])
|
||||
nt.links.new(g.outputs[0], to_socket)
|
||||
else:
|
||||
g = l.from_node
|
||||
tweakers.append(g)
|
||||
else:
|
||||
g = nt.nodes.new(type='ShaderNodeGroup')
|
||||
g.node_tree = bpy.data.node_groups['bkit_asset_tweaker']
|
||||
g.location = shader.location
|
||||
g.location.x -= 100
|
||||
|
||||
nt.links.new(g.outputs[0], i)
|
||||
correctors.append(g)
|
||||
|
||||
|
||||
# def modelProxy():
|
||||
# utils.p('No proxies in Blender anymore')
|
||||
# return False
|
||||
#
|
||||
# s = bpy.context.scene
|
||||
# ao = bpy.context.active_object
|
||||
# if utils.is_linked_asset(ao):
|
||||
# utils.activate(ao)
|
||||
#
|
||||
# g = ao.instance_collection
|
||||
#
|
||||
# rigs = []
|
||||
#
|
||||
# for ob in g.objects:
|
||||
# if ob.type == 'ARMATURE':
|
||||
# rigs.append(ob)
|
||||
#
|
||||
# if len(rigs) == 1:
|
||||
#
|
||||
# ao.instance_collection = None
|
||||
# bpy.ops.object.duplicate()
|
||||
# new_ao = bpy.context.view_layer.objects.active
|
||||
# new_ao.instance_collection = g
|
||||
# new_ao.empty_display_type = 'SPHERE'
|
||||
# new_ao.empty_display_size *= 0.1
|
||||
#
|
||||
# # bpy.ops.object.proxy_make(object=rigs[0].name)
|
||||
# proxy = bpy.context.active_object
|
||||
# bpy.context.view_layer.objects.active = ao
|
||||
# ao.select_set(True)
|
||||
# new_ao.select_set(True)
|
||||
# new_ao.use_extra_recalc_object = True
|
||||
# new_ao.use_extra_recalc_data = True
|
||||
# bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
|
||||
# return True
|
||||
# else: # TODO report this to ui
|
||||
# utils.p('not sure what to proxify')
|
||||
# return False
|
||||
|
||||
|
||||
eevee_transp_nodes = [
|
||||
'BSDF_GLASS',
|
||||
'BSDF_REFRACTION',
|
||||
'BSDF_TRANSPARENT',
|
||||
'PRINCIPLED_VOLUME',
|
||||
'VOLUME_ABSORPTION',
|
||||
'VOLUME_SCATTER'
|
||||
]
|
||||
|
||||
|
||||
def ensure_eevee_transparency(m):
|
||||
''' ensures alpha for transparent materials when the user didn't set it up correctly'''
|
||||
# if the blend mode is opaque, it means user probably ddidn't know or forgot to
|
||||
# set up material properly
|
||||
if m.blend_method == 'OPAQUE':
|
||||
alpha = False
|
||||
for n in m.node_tree.nodes:
|
||||
if n.type in eevee_transp_nodes:
|
||||
alpha = True
|
||||
elif n.type == 'BSDF_PRINCIPLED':
|
||||
i = n.inputs['Transmission']
|
||||
if i.default_value > 0 or len(i.links) > 0:
|
||||
alpha = True
|
||||
if alpha:
|
||||
m.blend_method = 'HASHED'
|
||||
m.shadow_method = 'HASHED'
|
||||
|
||||
|
||||
class BringToScene(Operator):
|
||||
"""Bring linked object hierarchy to scene and make it editable"""
|
||||
|
||||
bl_idname = "object.blenderkit_bring_to_scene"
|
||||
bl_label = "BlenderKit bring objects to scene"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def execute(self, context):
|
||||
|
||||
s = bpy.context.scene
|
||||
sobs = s.collection.all_objects
|
||||
aob = bpy.context.active_object
|
||||
dg = aob.instance_collection
|
||||
vlayer = bpy.context.view_layer
|
||||
instances_emptys = []
|
||||
|
||||
# first, find instances of this collection in the scene
|
||||
for ob in sobs:
|
||||
if ob.instance_collection == dg and ob not in instances_emptys:
|
||||
instances_emptys.append(ob)
|
||||
ob.instance_collection = None
|
||||
ob.instance_type = 'NONE'
|
||||
# dg.make_local
|
||||
parent = None
|
||||
obs = []
|
||||
for ob in dg.objects:
|
||||
dg.objects.unlink(ob)
|
||||
try:
|
||||
s.collection.objects.link(ob)
|
||||
ob.select_set(True)
|
||||
obs.append(ob)
|
||||
if ob.parent == None:
|
||||
parent = ob
|
||||
bpy.context.view_layer.objects.active = parent
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
bpy.ops.object.make_local(type='ALL')
|
||||
|
||||
for i, ob in enumerate(obs):
|
||||
if ob.name in vlayer.objects:
|
||||
obs[i] = vlayer.objects[ob.name]
|
||||
try:
|
||||
ob.select_set(True)
|
||||
except Exception as e:
|
||||
print('failed to select an object from the collection, getting a replacement.')
|
||||
print(e)
|
||||
|
||||
related = []
|
||||
|
||||
for i, ob in enumerate(instances_emptys):
|
||||
if i > 0:
|
||||
bpy.ops.object.duplicate(linked=True)
|
||||
|
||||
related.append([ob, bpy.context.active_object, mathutils.Vector(bpy.context.active_object.scale)])
|
||||
|
||||
for relation in related:
|
||||
bpy.ops.object.select_all(action='DESELECT')
|
||||
bpy.context.view_layer.objects.active = relation[0]
|
||||
relation[0].select_set(True)
|
||||
relation[1].select_set(True)
|
||||
relation[1].matrix_world = relation[0].matrix_world
|
||||
relation[1].scale.x = relation[2].x * relation[0].scale.x
|
||||
relation[1].scale.y = relation[2].y * relation[0].scale.y
|
||||
relation[1].scale.z = relation[2].z * relation[0].scale.z
|
||||
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
# class ModelProxy(Operator):
|
||||
# """Attempt to create proxy armature from the asset"""
|
||||
# bl_idname = "object.blenderkit_make_proxy"
|
||||
# bl_label = "BlenderKit Make Proxy"
|
||||
#
|
||||
# @classmethod
|
||||
# def poll(cls, context):
|
||||
# return bpy.context.view_layer.objects.active is not None
|
||||
#
|
||||
# def execute(self, context):
|
||||
# result = modelProxy()
|
||||
# if not result:
|
||||
# self.report({'INFO'}, 'No proxy made.There is no armature or more than one in the model.')
|
||||
# return {'FINISHED'}
|
||||
|
||||
|
||||
class ColorCorrector(Operator):
|
||||
"""Add color corector to the asset. """
|
||||
bl_idname = "object.blenderkit_color_corrector"
|
||||
bl_label = "Add color corrector"
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return bpy.context.view_layer.objects.active is not None
|
||||
|
||||
def execute(self, context):
|
||||
ao = bpy.context.active_object
|
||||
g = ao.instance_collection
|
||||
ao['color correctors'] = []
|
||||
mats = []
|
||||
|
||||
for o in g.objects:
|
||||
for ms in o.material_slots:
|
||||
if ms.material not in mats:
|
||||
mats.append(ms.material)
|
||||
for mat in mats:
|
||||
correctors = addColorCorrectors(mat)
|
||||
|
||||
return 'FINISHED'
|
||||
|
||||
|
||||
def register_overrides():
|
||||
bpy.utils.register_class(BringToScene)
|
||||
# bpy.utils.register_class(ModelProxy)
|
||||
bpy.utils.register_class(ColorCorrector)
|
||||
|
||||
|
||||
def unregister_overrides():
|
||||
bpy.utils.unregister_class(BringToScene)
|
||||
# bpy.utils.unregister_class(ModelProxy)
|
||||
bpy.utils.unregister_class(ColorCorrector)
|
|
@ -1,407 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
import bpy, os, sys, tempfile, shutil
|
||||
from blenderkit import tasks_queue, ui, utils, reports
|
||||
|
||||
_presets = os.path.join(bpy.utils.user_resource('SCRIPTS'), "presets")
|
||||
BLENDERKIT_LOCAL = "http://localhost:8001"
|
||||
BLENDERKIT_MAIN = "https://www.blenderkit.com"
|
||||
BLENDERKIT_DEVEL = "https://devel.blenderkit.com"
|
||||
BLENDERKIT_API = "/api/v1/"
|
||||
BLENDERKIT_REPORT_URL = "usage_report/"
|
||||
BLENDERKIT_USER_ASSETS = "/my-assets"
|
||||
BLENDERKIT_PLANS = "/plans/pricing/"
|
||||
BLENDERKIT_MANUAL = "https://youtu.be/pSay3yaBWV0"
|
||||
BLENDERKIT_MODEL_UPLOAD_INSTRUCTIONS_URL = "https://www.blenderkit.com/docs/upload/"
|
||||
BLENDERKIT_MATERIAL_UPLOAD_INSTRUCTIONS_URL = "https://www.blenderkit.com/docs/uploading-material/"
|
||||
BLENDERKIT_BRUSH_UPLOAD_INSTRUCTIONS_URL = "https://www.blenderkit.com/docs/uploading-brush/"
|
||||
BLENDERKIT_HDR_UPLOAD_INSTRUCTIONS_URL = "https://www.blenderkit.com/docs/uploading-hdr/"
|
||||
BLENDERKIT_SCENE_UPLOAD_INSTRUCTIONS_URL = "https://www.blenderkit.com/docs/uploading-scene/"
|
||||
BLENDERKIT_LOGIN_URL = "https://www.blenderkit.com/accounts/login"
|
||||
BLENDERKIT_OAUTH_LANDING_URL = "/oauth-landing/"
|
||||
BLENDERKIT_SIGNUP_URL = "https://www.blenderkit.com/accounts/register"
|
||||
BLENDERKIT_SETTINGS_FILENAME = os.path.join(_presets, "bkit.json")
|
||||
|
||||
|
||||
def cleanup_old_folders():
|
||||
'''function to clean up any historical folders for BlenderKit. By now removes the temp folder.'''
|
||||
orig_temp = os.path.join(os.path.expanduser('~'), 'blenderkit_data', 'temp')
|
||||
if os.path.isdir(orig_temp):
|
||||
try:
|
||||
shutil.rmtree(orig_temp)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print("couldn't delete old temp directory")
|
||||
|
||||
|
||||
def get_bkit_url():
|
||||
# bpy.app.debug_value = 2
|
||||
d = bpy.app.debug_value
|
||||
# d = 2
|
||||
if d == 1:
|
||||
url = BLENDERKIT_LOCAL
|
||||
elif d == 2:
|
||||
url = BLENDERKIT_DEVEL
|
||||
else:
|
||||
url = BLENDERKIT_MAIN
|
||||
return url
|
||||
|
||||
|
||||
def find_in_local(text=''):
|
||||
fs = []
|
||||
for p, d, f in os.walk('.'):
|
||||
for file in f:
|
||||
if text in file:
|
||||
fs.append(file)
|
||||
return fs
|
||||
|
||||
|
||||
def get_api_url():
|
||||
return get_bkit_url() + BLENDERKIT_API
|
||||
|
||||
|
||||
def get_oauth_landing_url():
|
||||
return get_bkit_url() + BLENDERKIT_OAUTH_LANDING_URL
|
||||
|
||||
|
||||
def get_author_gallery_url(author_id):
|
||||
return f'{get_bkit_url()}/asset-gallery?query=author_id:{author_id}'
|
||||
|
||||
def get_asset_gallery_url(asset_id):
|
||||
return f'{get_bkit_url()}/asset-gallery-detail/{asset_id}/'
|
||||
|
||||
def default_global_dict():
|
||||
from os.path import expanduser
|
||||
home = expanduser("~")
|
||||
return home + os.sep + 'blenderkit_data'
|
||||
|
||||
|
||||
def get_categories_filepath():
|
||||
tempdir = get_temp_dir()
|
||||
return os.path.join(tempdir, 'categories.json')
|
||||
|
||||
dirs_exist_dict = {}#cache these results since this is used very often
|
||||
# this causes the function to fail if user deletes the directory while blender is running,
|
||||
# but comes back when blender is restarted.
|
||||
def get_temp_dir(subdir=None):
|
||||
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
#first try cached results
|
||||
if subdir is not None:
|
||||
d = dirs_exist_dict.get(subdir)
|
||||
if d is not None:
|
||||
return d
|
||||
else:
|
||||
d = dirs_exist_dict.get('top')
|
||||
if d is not None:
|
||||
return d
|
||||
|
||||
# tempdir = user_preferences.temp_dir
|
||||
tempdir = os.path.join(tempfile.gettempdir(), 'bkit_temp')
|
||||
if tempdir.startswith('//'):
|
||||
tempdir = bpy.path.abspath(tempdir)
|
||||
try:
|
||||
if not os.path.exists(tempdir):
|
||||
os.makedirs(tempdir)
|
||||
dirs_exist_dict['top'] = tempdir
|
||||
|
||||
if subdir is not None:
|
||||
tempdir = os.path.join(tempdir, subdir)
|
||||
if not os.path.exists(tempdir):
|
||||
os.makedirs(tempdir)
|
||||
dirs_exist_dict[subdir] = tempdir
|
||||
|
||||
cleanup_old_folders()
|
||||
except:
|
||||
tasks_queue.add_task((reports.add_report, ('Cache directory not found. Resetting Cache folder path.',)))
|
||||
|
||||
p = default_global_dict()
|
||||
if p == user_preferences.global_dir:
|
||||
message = 'Global dir was already default, plese set a global directory in addon preferences to a dir where you have write permissions.'
|
||||
tasks_queue.add_task((reports.add_report, (message,)))
|
||||
return None
|
||||
user_preferences.global_dir = p
|
||||
tempdir = get_temp_dir(subdir=subdir)
|
||||
return tempdir
|
||||
|
||||
|
||||
|
||||
def get_download_dirs(asset_type):
|
||||
''' get directories where assets will be downloaded'''
|
||||
subdmapping = {'brush': 'brushes', 'texture': 'textures', 'model': 'models', 'scene': 'scenes',
|
||||
'material': 'materials', 'hdr':'hdrs'}
|
||||
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
dirs = []
|
||||
if user_preferences.directory_behaviour == 'BOTH' or 'GLOBAL':
|
||||
ddir = user_preferences.global_dir
|
||||
if ddir.startswith('//'):
|
||||
ddir = bpy.path.abspath(ddir)
|
||||
if not os.path.exists(ddir):
|
||||
os.makedirs(ddir)
|
||||
|
||||
subd = subdmapping[asset_type]
|
||||
subdir = os.path.join(ddir, subd)
|
||||
if not os.path.exists(subdir):
|
||||
os.makedirs(subdir)
|
||||
dirs.append(subdir)
|
||||
if (
|
||||
user_preferences.directory_behaviour == 'BOTH' or user_preferences.directory_behaviour == 'LOCAL') and bpy.data.is_saved: # it's important local get's solved as second, since for the linking process only last filename will be taken. For download process first name will be taken and if 2 filenames were returned, file will be copied to the 2nd path.
|
||||
ddir = user_preferences.project_subdir
|
||||
if ddir.startswith('//'):
|
||||
ddir = bpy.path.abspath(ddir)
|
||||
if not os.path.exists(ddir):
|
||||
os.makedirs(ddir)
|
||||
|
||||
subd = subdmapping[asset_type]
|
||||
|
||||
subdir = os.path.join(ddir, subd)
|
||||
if not os.path.exists(subdir):
|
||||
os.makedirs(subdir)
|
||||
dirs.append(subdir)
|
||||
|
||||
return dirs
|
||||
|
||||
|
||||
def slugify(slug):
|
||||
"""
|
||||
Normalizes string, converts to lowercase, removes non-alpha characters,
|
||||
and converts spaces to hyphens.
|
||||
"""
|
||||
import unicodedata, re
|
||||
slug = slug.lower()
|
||||
|
||||
characters = '<>:"/\\|?\*., ()#'
|
||||
for ch in characters:
|
||||
slug = slug.replace(ch, '_')
|
||||
# import re
|
||||
# slug = unicodedata.normalize('NFKD', slug)
|
||||
# slug = slug.encode('ascii', 'ignore').lower()
|
||||
slug = re.sub(r'[^a-z0-9]+.- ', '-', slug).strip('-')
|
||||
slug = re.sub(r'[-]+', '-', slug)
|
||||
slug = re.sub(r'/', '_', slug)
|
||||
slug = re.sub(r'\\\'\"', '_', slug)
|
||||
if len(slug)>50:
|
||||
slug = slug[:50]
|
||||
return slug
|
||||
|
||||
|
||||
def extract_filename_from_url(url):
|
||||
# print(url)
|
||||
if url is not None:
|
||||
imgname = url.split('/')[-1]
|
||||
imgname = imgname.split('?')[0]
|
||||
return imgname
|
||||
return ''
|
||||
|
||||
|
||||
resolution_suffix = {
|
||||
'blend': '',
|
||||
'resolution_0_5K': '_05k',
|
||||
'resolution_1K': '_1k',
|
||||
'resolution_2K': '_2k',
|
||||
'resolution_4K': '_4k',
|
||||
'resolution_8K': '_8k',
|
||||
}
|
||||
resolutions = {
|
||||
'resolution_0_5K': 512,
|
||||
'resolution_1K': 1024,
|
||||
'resolution_2K': 2048,
|
||||
'resolution_4K': 4096,
|
||||
'resolution_8K': 8192,
|
||||
}
|
||||
|
||||
|
||||
def round_to_closest_resolution(res):
|
||||
rdist = 1000000
|
||||
# while res/2>1:
|
||||
# p2res*=2
|
||||
# res = res/2
|
||||
# print(p2res, res)
|
||||
for rkey in resolutions:
|
||||
# print(resolutions[rkey], rdist)
|
||||
d = abs(res - resolutions[rkey])
|
||||
if d < rdist:
|
||||
rdist = d
|
||||
p2res = rkey
|
||||
|
||||
return p2res
|
||||
|
||||
|
||||
def get_res_file(asset_data, resolution, find_closest_with_url = False):
|
||||
'''
|
||||
Returns closest resolution that current asset can offer.
|
||||
If there are no resolutions, return orig file.
|
||||
If orig file is requested, return it.
|
||||
params
|
||||
asset_data
|
||||
resolution - ideal resolution
|
||||
find_closest_with_url:
|
||||
returns only resolutions that already containt url in the asset data, used in scenes where asset is/was already present.
|
||||
Returns:
|
||||
resolution file
|
||||
resolution, so that other processess can pass correctly which resolution is downloaded.
|
||||
'''
|
||||
orig = None
|
||||
res = None
|
||||
closest = None
|
||||
target_resolution = resolutions.get(resolution)
|
||||
mindist = 100000000
|
||||
|
||||
for f in asset_data['files']:
|
||||
if f['fileType'] == 'blend':
|
||||
orig = f
|
||||
if resolution == 'blend':
|
||||
#orig file found, return.
|
||||
return orig , 'blend'
|
||||
|
||||
if f['fileType'] == resolution:
|
||||
#exact match found, return.
|
||||
return f, resolution
|
||||
# find closest resolution if the exact match won't be found.
|
||||
rval = resolutions.get(f['fileType'])
|
||||
if rval and target_resolution:
|
||||
rdiff = abs(target_resolution - rval)
|
||||
if rdiff < mindist:
|
||||
closest = f
|
||||
mindist = rdiff
|
||||
# print('\n\n\n\n\n\n\n\n')
|
||||
# print(closest)
|
||||
# print('\n\n\n\n\n\n\n\n')
|
||||
if not res and not closest:
|
||||
# utils.pprint(f'will download blend instead of resolution {resolution}')
|
||||
return orig , 'blend'
|
||||
# utils.pprint(f'found closest resolution {closest["fileType"]} instead of the requested {resolution}')
|
||||
return closest, closest['fileType']
|
||||
|
||||
def server_2_local_filename(asset_data, filename):
|
||||
'''
|
||||
Convert file name on server to file name local.
|
||||
This should get replaced
|
||||
'''
|
||||
# print(filename)
|
||||
fn = filename.replace('blend_', '')
|
||||
fn = fn.replace('resolution_', '')
|
||||
# print('after replace ', fn)
|
||||
n = slugify(asset_data['name']) + '_' + fn
|
||||
return n
|
||||
|
||||
def get_texture_directory(asset_data, resolution = 'blend'):
|
||||
tex_dir_path = f"//textures{resolution_suffix[resolution]}{os.sep}"
|
||||
return tex_dir_path
|
||||
|
||||
def get_download_filepaths(asset_data, resolution='blend', can_return_others = False):
|
||||
'''Get all possible paths of the asset and resolution. Usually global and local directory.'''
|
||||
dirs = get_download_dirs(asset_data['assetType'])
|
||||
res_file, resolution = get_res_file(asset_data, resolution, find_closest_with_url = can_return_others)
|
||||
name_slug = slugify(asset_data['name'])
|
||||
asset_folder_name = f"{name_slug}_{asset_data['id']}"
|
||||
|
||||
# utils.pprint('get download filenames ', dict(res_file))
|
||||
file_names = []
|
||||
|
||||
if not res_file:
|
||||
return file_names
|
||||
# fn = asset_data['file_name'].replace('blend_', '')
|
||||
if res_file.get('url') is not None:
|
||||
#Tweak the names a bit:
|
||||
# remove resolution and blend words in names
|
||||
#
|
||||
fn = extract_filename_from_url(res_file['url'])
|
||||
n = server_2_local_filename(asset_data,fn)
|
||||
for d in dirs:
|
||||
asset_folder_path = os.path.join(d,asset_folder_name)
|
||||
if not os.path.exists(asset_folder_path):
|
||||
os.makedirs(asset_folder_path)
|
||||
|
||||
file_name = os.path.join(asset_folder_path, n)
|
||||
file_names.append(file_name)
|
||||
|
||||
utils.p('file paths', file_names)
|
||||
return file_names
|
||||
|
||||
|
||||
def delete_asset_debug(asset_data):
|
||||
'''TODO fix this for resolutions - should get ALL files from ALL resolutions.'''
|
||||
from blenderkit import download
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
api_key = user_preferences.api_key
|
||||
|
||||
download.get_download_url(asset_data, download.get_scene_id(), api_key)
|
||||
|
||||
file_names = get_download_filepaths(asset_data)
|
||||
for f in file_names:
|
||||
asset_dir = os.path.dirname(f)
|
||||
|
||||
if os.path.isdir(asset_dir):
|
||||
|
||||
try:
|
||||
print(asset_dir)
|
||||
shutil.rmtree(asset_dir)
|
||||
except:
|
||||
e = sys.exc_info()[0]
|
||||
print(e)
|
||||
pass;
|
||||
|
||||
|
||||
def get_clean_filepath():
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
subpath = "blendfiles" + os.sep + "cleaned.blend"
|
||||
cp = os.path.join(script_path, subpath)
|
||||
return cp
|
||||
|
||||
|
||||
def get_thumbnailer_filepath():
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# fpath = os.path.join(p, subpath)
|
||||
subpath = "blendfiles" + os.sep + "thumbnailer.blend"
|
||||
return os.path.join(script_path, subpath)
|
||||
|
||||
|
||||
def get_material_thumbnailer_filepath():
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# fpath = os.path.join(p, subpath)
|
||||
subpath = "blendfiles" + os.sep + "material_thumbnailer_cycles.blend"
|
||||
return os.path.join(script_path, subpath)
|
||||
"""
|
||||
for p in bpy.utils.script_paths():
|
||||
testfname= os.path.join(p, subpath)#p + '%saddons%sobject_fracture%sdata.blend' % (s,s,s)
|
||||
if os.path.isfile( testfname):
|
||||
fname=testfname
|
||||
return(fname)
|
||||
return None
|
||||
"""
|
||||
|
||||
|
||||
def get_addon_file(subpath=''):
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# fpath = os.path.join(p, subpath)
|
||||
return os.path.join(script_path, subpath)
|
||||
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
def get_addon_thumbnail_path(name):
|
||||
global script_path
|
||||
# fpath = os.path.join(p, subpath)
|
||||
ext = name.split('.')[-1]
|
||||
next = ''
|
||||
if not (ext == 'jpg' or ext == 'png'): # already has ext?
|
||||
next = '.jpg'
|
||||
subpath = "thumbnails" + os.sep + name + next
|
||||
return os.path.join(script_path, subpath)
|
|
@ -1,297 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
from blenderkit import paths, utils, rerequests, tasks_queue, ratings_utils, icons
|
||||
|
||||
import bpy
|
||||
import requests, threading
|
||||
import logging
|
||||
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
from bpy.props import (
|
||||
IntProperty,
|
||||
FloatProperty,
|
||||
StringProperty,
|
||||
EnumProperty,
|
||||
BoolProperty,
|
||||
PointerProperty,
|
||||
)
|
||||
from bpy.types import (
|
||||
Operator,
|
||||
Panel,
|
||||
)
|
||||
|
||||
|
||||
def pretty_print_POST(req):
|
||||
"""
|
||||
pretty print a request
|
||||
"""
|
||||
print('{}\n{}\n{}\n\n{}'.format(
|
||||
'-----------START-----------',
|
||||
req.method + ' ' + req.url,
|
||||
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
|
||||
req.body,
|
||||
))
|
||||
|
||||
|
||||
def upload_review_thread(url, reviews, headers):
|
||||
r = rerequests.put(url, data=reviews, verify=True, headers=headers)
|
||||
|
||||
# except requests.exceptions.RequestException as e:
|
||||
# print('reviews upload failed: %s' % str(e))
|
||||
|
||||
|
||||
|
||||
def upload_rating(asset):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
api_key = user_preferences.api_key
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
bkit_ratings = asset.bkit_ratings
|
||||
# print('rating asset', asset_data['name'], asset_data['assetBaseId'])
|
||||
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
|
||||
|
||||
ratings = [
|
||||
|
||||
]
|
||||
|
||||
if bkit_ratings.rating_quality > 0.1:
|
||||
ratings = (('quality', bkit_ratings.rating_quality),)
|
||||
tasks_queue.add_task((ratings_utils.send_rating_to_thread_quality, (url, ratings, headers)), wait=2.5,
|
||||
only_last=True)
|
||||
if bkit_ratings.rating_work_hours > 0.1:
|
||||
ratings = (('working_hours', round(bkit_ratings.rating_work_hours, 1)),)
|
||||
tasks_queue.add_task((ratings_utils.send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5,
|
||||
only_last=True)
|
||||
|
||||
thread = threading.Thread(target=ratings_utils.upload_rating_thread, args=(url, ratings, headers))
|
||||
thread.start()
|
||||
|
||||
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/review'
|
||||
|
||||
reviews = {
|
||||
'reviewText': bkit_ratings.rating_compliments,
|
||||
'reviewTextProblems': bkit_ratings.rating_problems,
|
||||
}
|
||||
if not (bkit_ratings.rating_compliments == '' and bkit_ratings.rating_compliments == ''):
|
||||
thread = threading.Thread(target=upload_review_thread, args=(url, reviews, headers))
|
||||
thread.start()
|
||||
|
||||
# the info that the user rated an item is stored in the scene
|
||||
s = bpy.context.scene
|
||||
s['assets rated'] = s.get('assets rated', {})
|
||||
if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
|
||||
s['assets rated'][asset['asset_data']['assetBaseId']] = True
|
||||
|
||||
|
||||
def get_assets_for_rating():
|
||||
'''
|
||||
gets assets from scene that could/should be rated by the user.
|
||||
TODO this is only a draft.
|
||||
|
||||
'''
|
||||
assets = []
|
||||
for ob in bpy.context.scene.objects:
|
||||
if ob.get('asset_data'):
|
||||
assets.append(ob)
|
||||
for m in bpy.data.materials:
|
||||
if m.get('asset_data'):
|
||||
assets.append(m)
|
||||
for b in bpy.data.brushes:
|
||||
if b.get('asset_data'):
|
||||
assets.append(b)
|
||||
return assets
|
||||
|
||||
|
||||
asset_types = (
|
||||
('MODEL', 'Model', 'set of objects'),
|
||||
('SCENE', 'Scene', 'scene'),
|
||||
('HDR', 'HDR', 'hdr'),
|
||||
('MATERIAL', 'Material', 'any .blend Material'),
|
||||
('TEXTURE', 'Texture', 'a texture, or texture set'),
|
||||
('BRUSH', 'Brush', 'brush, can be any type of blender brush'),
|
||||
('ADDON', 'Addon', 'addnon'),
|
||||
)
|
||||
|
||||
|
||||
# TODO drop this operator, not needed anymore.
|
||||
class UploadRatingOperator(bpy.types.Operator):
|
||||
"""Upload rating to the web db"""
|
||||
bl_idname = "object.blenderkit_rating_upload"
|
||||
bl_label = "Send Rating"
|
||||
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
|
||||
|
||||
# type of upload - model, material, textures, e.t.c.
|
||||
# asset_type: EnumProperty(
|
||||
# name="Type",
|
||||
# items=asset_types,
|
||||
# description="Type of asset",
|
||||
# default="MODEL",
|
||||
# )
|
||||
|
||||
# @classmethod
|
||||
# def poll(cls, context):
|
||||
# return bpy.context.active_object != None and bpy.context.active_object.get('asset_id') is not None
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.label(text='Rating sent to server. Thanks for rating!')
|
||||
|
||||
def execute(self, context):
|
||||
return {'FINISHED'}
|
||||
|
||||
def invoke(self, context, event):
|
||||
wm = context.window_manager
|
||||
asset = utils.get_active_asset()
|
||||
upload_rating(asset)
|
||||
return wm.invoke_props_dialog(self)
|
||||
|
||||
|
||||
def draw_ratings_menu(self, context, layout):
|
||||
pcoll = icons.icon_collections["main"]
|
||||
|
||||
profile_name = ''
|
||||
profile = bpy.context.window_manager.get('bkit profile')
|
||||
if profile and len(profile['user']['firstName']) > 0:
|
||||
profile_name = ' ' + profile['user']['firstName']
|
||||
|
||||
col = layout.column()
|
||||
# layout.template_icon_view(bkit_ratings, property, show_labels=False, scale=6.0, scale_popup=5.0)
|
||||
row = col.row()
|
||||
row.label(text='Quality:', icon='SOLO_ON')
|
||||
row = col.row()
|
||||
row.label(text='Please help the community by rating quality:')
|
||||
|
||||
row = col.row()
|
||||
row.prop(self, 'rating_quality_ui', expand=True, icon_only=True, emboss=False)
|
||||
if self.rating_quality > 0:
|
||||
# row = col.row()
|
||||
|
||||
row.label(text=f' Thanks{profile_name}!', icon='FUND')
|
||||
# row.label(text=str(self.rating_quality))
|
||||
col.separator()
|
||||
col.separator()
|
||||
|
||||
row = col.row()
|
||||
row.label(text='Complexity:', icon_value=pcoll['dumbbell'].icon_id)
|
||||
row = col.row()
|
||||
row.label(text=f"How many hours did this {self.asset_type} save you?")
|
||||
|
||||
if utils.profile_is_validator():
|
||||
row = col.row()
|
||||
row.prop(self, 'rating_work_hours')
|
||||
|
||||
if self.asset_type in ('model', 'scene'):
|
||||
row = col.row()
|
||||
|
||||
row.prop(self, 'rating_work_hours_ui', expand=True, icon_only=False, emboss=True)
|
||||
if float(self.rating_work_hours_ui) > 100:
|
||||
utils.label_multiline(col,
|
||||
text=f"\nThat's huge! please be sure to give such rating only to godly {self.asset_type}s.\n",
|
||||
width=500)
|
||||
elif float(self.rating_work_hours_ui) > 18:
|
||||
col.separator()
|
||||
|
||||
utils.label_multiline(col,
|
||||
text=f"\nThat's a lot! please be sure to give such rating only to amazing {self.asset_type}s.\n",
|
||||
width=500)
|
||||
|
||||
|
||||
elif self.asset_type == 'hdr':
|
||||
row = col.row()
|
||||
row.prop(self, 'rating_work_hours_ui_1_10', expand=True, icon_only=False, emboss=True)
|
||||
else:
|
||||
row = col.row()
|
||||
row.prop(self, 'rating_work_hours_ui_1_5', expand=True, icon_only=False, emboss=True)
|
||||
|
||||
if self.rating_work_hours > 0:
|
||||
row = col.row()
|
||||
row.label(text=f'Thanks{profile_name}, you are amazing!', icon='FUND')
|
||||
|
||||
|
||||
class FastRateMenu(Operator, ratings_utils.RatingsProperties):
|
||||
"""Rating of the assets , also directly from the asset bar - without need to download assets"""
|
||||
bl_idname = "wm.blenderkit_menu_rating_upload"
|
||||
bl_label = "Ratings"
|
||||
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
scene = bpy.context.scene
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
return True;
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
draw_ratings_menu(self, context, layout)
|
||||
|
||||
def execute(self, context):
|
||||
scene = bpy.context.scene
|
||||
ui_props = bpy.context.window_manager.blenderkitUI
|
||||
#get asset id
|
||||
if ui_props.active_index > -1:
|
||||
sr = bpy.context.window_manager['search results']
|
||||
asset_data = dict(sr[ui_props.active_index])
|
||||
self.asset_id = asset_data['id']
|
||||
self.asset_type = asset_data['assetType']
|
||||
|
||||
if self.asset_id == '':
|
||||
return {'CANCELLED'}
|
||||
|
||||
wm = context.window_manager
|
||||
|
||||
self.prefill_ratings()
|
||||
|
||||
if self.asset_type in ('model', 'scene'):
|
||||
# spawn a wider one for validators for the enum buttons
|
||||
return wm.invoke_popup(self, width=500)
|
||||
else:
|
||||
return wm.invoke_popup(self)
|
||||
|
||||
|
||||
def rating_menu_draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
ui_props = context.window_manager.blenderkitUI
|
||||
sr = bpy.context.window_manager['search results']
|
||||
|
||||
asset_search_index = ui_props.active_index
|
||||
if asset_search_index > -1:
|
||||
asset_data = dict(sr['results'][asset_search_index])
|
||||
|
||||
col = layout.column()
|
||||
layout.label(text='Admin rating Tools:')
|
||||
col.operator_context = 'INVOKE_DEFAULT'
|
||||
|
||||
op = col.operator('wm.blenderkit_menu_rating_upload', text='Add Rating')
|
||||
op.asset_id = asset_data['id']
|
||||
op.asset_name = asset_data['name']
|
||||
op.asset_type = asset_data['assetType']
|
||||
|
||||
|
||||
def register_ratings():
|
||||
bpy.utils.register_class(UploadRatingOperator)
|
||||
bpy.utils.register_class(FastRateMenu)
|
||||
# bpy.types.OBJECT_MT_blenderkit_asset_menu.append(rating_menu_draw)
|
||||
|
||||
|
||||
def unregister_ratings():
|
||||
pass;
|
||||
# bpy.utils.unregister_class(StarRatingOperator)
|
||||
bpy.utils.unregister_class(UploadRatingOperator)
|
||||
bpy.utils.unregister_class(FastRateMenu)
|
|
@ -1,367 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
# mainly update functions and callbacks for ratings properties, here to avoid circular imports.
|
||||
import bpy
|
||||
from blenderkit import utils, paths, tasks_queue, rerequests
|
||||
|
||||
from bpy.props import (
|
||||
IntProperty,
|
||||
FloatProperty,
|
||||
FloatVectorProperty,
|
||||
StringProperty,
|
||||
EnumProperty,
|
||||
BoolProperty,
|
||||
PointerProperty,
|
||||
)
|
||||
|
||||
import threading
|
||||
import requests
|
||||
import logging
|
||||
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
|
||||
def upload_rating_thread(url, ratings, headers):
|
||||
''' Upload rating thread function / disconnected from blender data.'''
|
||||
bk_logger.debug('upload rating ' + url + str(ratings))
|
||||
for rating_name, score in ratings:
|
||||
if (score != -1 and score != 0):
|
||||
rating_url = url + rating_name + '/'
|
||||
data = {
|
||||
"score": score, # todo this kind of mixing is too much. Should have 2 bkit structures, upload, use
|
||||
}
|
||||
|
||||
try:
|
||||
r = rerequests.put(rating_url, data=data, verify=True, headers=headers)
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print('ratings upload failed: %s' % str(e))
|
||||
|
||||
|
||||
def send_rating_to_thread_quality(url, ratings, headers):
|
||||
'''Sens rating into thread rating, main purpose is for tasks_queue.
|
||||
One function per property to avoid lost data due to stashing.'''
|
||||
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
|
||||
thread.start()
|
||||
|
||||
|
||||
def send_rating_to_thread_work_hours(url, ratings, headers):
|
||||
'''Sens rating into thread rating, main purpose is for tasks_queue.
|
||||
One function per property to avoid lost data due to stashing.'''
|
||||
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
|
||||
thread.start()
|
||||
|
||||
|
||||
def store_rating_local_empty(asset_id):
|
||||
context = bpy.context
|
||||
ar = context.window_manager['asset ratings']
|
||||
ar[asset_id] = ar.get(asset_id, {})
|
||||
|
||||
|
||||
def store_rating_local(asset_id, type='quality', value=0):
|
||||
context = bpy.context
|
||||
ar = context.window_manager['asset ratings']
|
||||
ar[asset_id] = ar.get(asset_id, {})
|
||||
ar[asset_id][type] = value
|
||||
|
||||
|
||||
def get_rating(asset_id, headers):
|
||||
'''
|
||||
Retrieve ratings from BlenderKit server. Can be run from a thread
|
||||
Parameters
|
||||
----------
|
||||
asset_id
|
||||
headers
|
||||
|
||||
Returns
|
||||
-------
|
||||
ratings - dict of type:value ratings
|
||||
'''
|
||||
url = paths.get_api_url() + 'assets/' + asset_id + '/rating/'
|
||||
params = {}
|
||||
r = rerequests.get(url, params=params, verify=True, headers=headers)
|
||||
if r is None:
|
||||
return
|
||||
if r.status_code == 200:
|
||||
rj = r.json()
|
||||
ratings = {}
|
||||
# print(rj)
|
||||
# store ratings - send them to task queue
|
||||
for r in rj['results']:
|
||||
ratings[r['ratingType']] = r['score']
|
||||
tasks_queue.add_task((store_rating_local,(asset_id, r['ratingType'], r['score'])))
|
||||
# store_rating_local(asset_id, type = r['ratingType'], value = r['score'])
|
||||
|
||||
if len(rj['results'])==0:
|
||||
# store empty ratings too, so that server isn't checked repeatedly
|
||||
tasks_queue.add_task((store_rating_local_empty,(asset_id,)))
|
||||
# return ratings
|
||||
|
||||
|
||||
def get_rating_local(asset_id):
|
||||
context = bpy.context
|
||||
context.window_manager['asset ratings'] = context.window_manager.get('asset ratings', {})
|
||||
rating = context.window_manager['asset ratings'].get(asset_id)
|
||||
if rating:
|
||||
return rating.to_dict()
|
||||
return None
|
||||
|
||||
|
||||
def update_ratings_quality(self, context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
api_key = user_preferences.api_key
|
||||
|
||||
headers = utils.get_headers(api_key)
|
||||
|
||||
if not (hasattr(self, 'rating_quality')):
|
||||
# first option is for rating of assets that are from scene
|
||||
asset = self.id_data
|
||||
bkit_ratings = asset.bkit_ratings
|
||||
asset_id = asset['asset_data']['id']
|
||||
else:
|
||||
# this part is for operator rating:
|
||||
bkit_ratings = self
|
||||
asset_id = self.asset_id
|
||||
|
||||
if bkit_ratings.rating_quality > 0.1:
|
||||
url = paths.get_api_url() + f'assets/{asset_id}/rating/'
|
||||
|
||||
store_rating_local(asset_id, type='quality', value=bkit_ratings.rating_quality)
|
||||
|
||||
ratings = [('quality', bkit_ratings.rating_quality)]
|
||||
tasks_queue.add_task((send_rating_to_thread_quality, (url, ratings, headers)), wait=2.5, only_last=True)
|
||||
|
||||
|
||||
def update_ratings_work_hours(self, context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
api_key = user_preferences.api_key
|
||||
headers = utils.get_headers(api_key)
|
||||
if not (hasattr(self, 'rating_work_hours')):
|
||||
# first option is for rating of assets that are from scene
|
||||
asset = self.id_data
|
||||
bkit_ratings = asset.bkit_ratings
|
||||
asset_id = asset['asset_data']['id']
|
||||
else:
|
||||
# this part is for operator rating:
|
||||
bkit_ratings = self
|
||||
asset_id = self.asset_id
|
||||
|
||||
if bkit_ratings.rating_work_hours > 0.45:
|
||||
url = paths.get_api_url() + f'assets/{asset_id}/rating/'
|
||||
|
||||
store_rating_local(asset_id, type='working_hours', value=bkit_ratings.rating_work_hours)
|
||||
|
||||
ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
|
||||
tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
|
||||
|
||||
|
||||
def update_quality_ui(self, context):
|
||||
'''Converts the _ui the enum into actual quality number.'''
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.api_key == '':
|
||||
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
|
||||
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
|
||||
# return
|
||||
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
|
||||
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
|
||||
# self.rating_quality_ui = '0'
|
||||
self.rating_quality = int(self.rating_quality_ui)
|
||||
|
||||
|
||||
def update_ratings_work_hours_ui(self, context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.api_key == '':
|
||||
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
|
||||
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
|
||||
# return
|
||||
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
|
||||
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
|
||||
# self.rating_work_hours_ui = '0'
|
||||
self.rating_work_hours = float(self.rating_work_hours_ui)
|
||||
|
||||
|
||||
def update_ratings_work_hours_ui_1_5(self, context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.api_key == '':
|
||||
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
|
||||
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
|
||||
# return
|
||||
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
|
||||
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
|
||||
# self.rating_work_hours_ui_1_5 = '0'
|
||||
self.rating_work_hours = float(self.rating_work_hours_ui_1_5)
|
||||
|
||||
|
||||
def update_ratings_work_hours_ui_1_10(self, context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.api_key == '':
|
||||
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
|
||||
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
|
||||
# return
|
||||
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
|
||||
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
|
||||
# self.rating_work_hours_ui_1_5 = '0'
|
||||
# print('updating 1-5')
|
||||
# print(float(self.rating_work_hours_ui_1_5))
|
||||
self.rating_work_hours = float(self.rating_work_hours_ui_1_10)
|
||||
|
||||
|
||||
def stars_enum_callback(self, context):
|
||||
'''regenerates the enum property used to display rating stars, so that there are filled/empty stars correctly.'''
|
||||
items = []
|
||||
for a in range(0, 10):
|
||||
if self.rating_quality < a + 1:
|
||||
icon = 'SOLO_OFF'
|
||||
else:
|
||||
icon = 'SOLO_ON'
|
||||
# has to have something before the number in the value, otherwise fails on registration.
|
||||
items.append((f'{a + 1}', f'{a + 1}', '', icon, a + 1))
|
||||
return items
|
||||
|
||||
|
||||
class RatingsProperties():
|
||||
message: StringProperty(
|
||||
name="message",
|
||||
description="message",
|
||||
default="Rating asset",
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
asset_id: StringProperty(
|
||||
name="Asset Base Id",
|
||||
description="Unique id of the asset (hidden)",
|
||||
default="",
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
asset_name: StringProperty(
|
||||
name="Asset Name",
|
||||
description="Name of the asset (hidden)",
|
||||
default="",
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
asset_type: StringProperty(
|
||||
name="Asset type",
|
||||
description="asset type",
|
||||
default="",
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
rating_quality: IntProperty(name="Quality",
|
||||
description="quality of the material",
|
||||
default=0,
|
||||
min=-1, max=10,
|
||||
update=update_ratings_quality,
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
# the following enum is only to ease interaction - enums support 'drag over' and enable to draw the stars easily.
|
||||
rating_quality_ui: EnumProperty(name='rating_quality_ui',
|
||||
items=stars_enum_callback,
|
||||
description='Rating stars 0 - 10',
|
||||
default=0,
|
||||
update=update_quality_ui,
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
rating_work_hours: FloatProperty(name="Work Hours",
|
||||
description="How many hours did this work take?",
|
||||
default=0.00,
|
||||
min=0.0, max=300,
|
||||
update=update_ratings_work_hours,
|
||||
options={'SKIP_SAVE'}
|
||||
)
|
||||
|
||||
high_rating_warning = "This is a high rating, please be sure to give such rating only to amazing assets"
|
||||
|
||||
possible_wh_values = [0,.5,1,2,3,4,5,6,8,10,15,20,30,50,100,150,200,250]
|
||||
items_models = [('0', '0', ''),
|
||||
('.5', '0.5', ''),
|
||||
('1', '1', ''),
|
||||
('2', '2', ''),
|
||||
('3', '3', ''),
|
||||
('4', '4', ''),
|
||||
('5', '5', ''),
|
||||
('6', '6', ''),
|
||||
('8', '8', ''),
|
||||
('10', '10', ''),
|
||||
('15', '15', ''),
|
||||
('20', '20', ''),
|
||||
('30', '30', high_rating_warning),
|
||||
('50', '50', high_rating_warning),
|
||||
('100', '100', high_rating_warning),
|
||||
('150', '150', high_rating_warning),
|
||||
('200', '200', high_rating_warning),
|
||||
('250', '250', high_rating_warning),
|
||||
]
|
||||
rating_work_hours_ui: EnumProperty(name="Work Hours",
|
||||
description="How many hours did this work take?",
|
||||
items=items_models,
|
||||
default='0', update=update_ratings_work_hours_ui,
|
||||
options={'SKIP_SAVE'}
|
||||
)
|
||||
possible_wh_values_1_5 = [0,.2, .5,1,2,3,4,5]
|
||||
|
||||
items_1_5 = [('0', '0', ''),
|
||||
('.2', '0.2', ''),
|
||||
('.5', '0.5', ''),
|
||||
('1', '1', ''),
|
||||
('2', '2', ''),
|
||||
('3', '3', ''),
|
||||
('4', '4', ''),
|
||||
('5', '5', '')
|
||||
]
|
||||
rating_work_hours_ui_1_5: EnumProperty(name="Work Hours",
|
||||
description="How many hours did this work take?",
|
||||
items=items_1_5,
|
||||
default='0',
|
||||
update=update_ratings_work_hours_ui_1_5,
|
||||
options={'SKIP_SAVE'}
|
||||
)
|
||||
possible_wh_values_1_10 = [0,1,2,3,4,5,6,7,8,9,10]
|
||||
|
||||
items_1_10= [('0', '0', ''),
|
||||
('1', '1', ''),
|
||||
('2', '2', ''),
|
||||
('3', '3', ''),
|
||||
('4', '4', ''),
|
||||
('5', '5', ''),
|
||||
('6', '6', ''),
|
||||
('7', '7', ''),
|
||||
('8', '8', ''),
|
||||
('9', '9', ''),
|
||||
('10', '10', '')
|
||||
]
|
||||
rating_work_hours_ui_1_10: EnumProperty(name="Work Hours",
|
||||
description="How many hours did this work take?",
|
||||
items= items_1_10,
|
||||
default='0',
|
||||
update=update_ratings_work_hours_ui_1_10,
|
||||
options={'SKIP_SAVE'}
|
||||
)
|
||||
|
||||
def prefill_ratings(self):
|
||||
# pre-fill ratings
|
||||
ratings = get_rating_local(self.asset_id)
|
||||
if ratings and ratings.get('quality'):
|
||||
self.rating_quality = ratings['quality']
|
||||
if ratings and ratings.get('working_hours'):
|
||||
wh = int(ratings['working_hours'])
|
||||
whs = str(wh)
|
||||
if wh in self.possible_wh_values:
|
||||
self.rating_work_hours_ui = whs
|
||||
if wh < 6 and wh in self.possible_wh_values_1_5:
|
||||
self.rating_work_hours_ui_1_5 = whs
|
||||
if wh < 11 and wh in self.possible_wh_values_1_10:
|
||||
self.rating_work_hours_ui_1_10 = whs
|
|
@ -1,67 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
import time
|
||||
import bpy
|
||||
from blenderkit import colors, asset_bar_op, ui_bgl, utils
|
||||
|
||||
reports = []
|
||||
|
||||
|
||||
def add_report(text='', timeout=5, color=colors.GREEN):
|
||||
global reports
|
||||
# check for same reports and just make them longer by the timeout.
|
||||
for old_report in reports:
|
||||
if old_report.text == text:
|
||||
old_report.timeout = old_report.age + timeout
|
||||
return
|
||||
report = Report(text=text, timeout=timeout, color=color)
|
||||
reports.append(report)
|
||||
|
||||
|
||||
class Report():
|
||||
def __init__(self, area_pointer=0, text='', timeout=5, color=(.5, 1, .5, 1)):
|
||||
self.text = text
|
||||
self.timeout = timeout
|
||||
self.start_time = time.time()
|
||||
self.color = color
|
||||
self.draw_color = color
|
||||
self.age = 0
|
||||
if asset_bar_op.active_area_pointer == 0:
|
||||
w, a, r = utils.get_largest_area(area_type='VIEW_3D')
|
||||
|
||||
self.active_area_pointer = a.as_pointer()
|
||||
else:
|
||||
self.active_area_pointer = asset_bar_op.active_area_pointer
|
||||
|
||||
def fade(self):
|
||||
fade_time = 1
|
||||
self.age = time.time() - self.start_time
|
||||
if self.age + fade_time > self.timeout:
|
||||
alpha_multiplier = (self.timeout - self.age) / fade_time
|
||||
self.draw_color = (self.color[0], self.color[1], self.color[2], self.color[3] * alpha_multiplier)
|
||||
if self.age > self.timeout:
|
||||
global reports
|
||||
try:
|
||||
reports.remove(self)
|
||||
except Exception as e:
|
||||
pass;
|
||||
|
||||
def draw(self, x, y):
|
||||
if (bpy.context.area is not None and bpy.context.area.as_pointer() == self.active_area_pointer):
|
||||
ui_bgl.draw_text(self.text, x, y + 8, 16, self.draw_color)
|
|
@ -1,115 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import ui, utils, paths, tasks_queue, bkit_oauth, reports
|
||||
|
||||
import requests
|
||||
import bpy
|
||||
import logging
|
||||
|
||||
bk_logger = logging.getLogger('rerequests')
|
||||
|
||||
|
||||
class FakeResponse():
|
||||
def __init__(self, text='', status_code = 400):
|
||||
self.text = text
|
||||
self.status_code = status_code
|
||||
def json(self):
|
||||
return {}
|
||||
|
||||
def rerequest(method, url, recursion=0, **kwargs):
|
||||
# first get any additional args from kwargs
|
||||
immediate = False
|
||||
if kwargs.get('immediate'):
|
||||
immediate = kwargs['immediate']
|
||||
kwargs.pop('immediate')
|
||||
# first normal attempt
|
||||
try:
|
||||
response = requests.request(method, url, **kwargs)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
tasks_queue.add_task((reports.add_report, (
|
||||
'Connection error.', 10)))
|
||||
return FakeResponse()
|
||||
|
||||
bk_logger.debug(url + str(kwargs))
|
||||
bk_logger.debug(response.status_code)
|
||||
|
||||
if response.status_code == 401:
|
||||
try:
|
||||
rdata = response.json()
|
||||
except:
|
||||
rdata = {}
|
||||
|
||||
tasks_queue.add_task((reports.add_report, (method + ' request Failed.' + str(rdata.get('detail')),)))
|
||||
|
||||
if rdata.get('detail') == 'Invalid token.':
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.api_key != '':
|
||||
if user_preferences.enable_oauth and user_preferences.api_key_refresh != '':
|
||||
tasks_queue.add_task((reports.add_report, (
|
||||
'refreshing token. If this fails, please login in BlenderKit Login panel.', 10)))
|
||||
refresh_url = paths.get_bkit_url()
|
||||
auth_token, refresh_token, oauth_response = bkit_oauth.refresh_token(
|
||||
user_preferences.api_key_refresh, refresh_url)
|
||||
|
||||
# bk_logger.debug(auth_token, refresh_token)
|
||||
if auth_token is not None:
|
||||
if immediate == True:
|
||||
# this can write tokens occasionally into prefs. used e.g. in upload. Only possible
|
||||
# in non-threaded tasks
|
||||
bpy.context.preferences.addons['blenderkit'].preferences.api_key = auth_token
|
||||
bpy.context.preferences.addons['blenderkit'].preferences.api_key_refresh = refresh_token
|
||||
else:
|
||||
tasks_queue.add_task((bkit_oauth.write_tokens, (auth_token, refresh_token, oauth_response)))
|
||||
|
||||
kwargs['headers'] = utils.get_headers(auth_token)
|
||||
response = requests.request(method, url, **kwargs)
|
||||
bk_logger.debug('reresult', response.status_code)
|
||||
if response.status_code >= 400:
|
||||
bk_logger.debug('reresult', response.text)
|
||||
tasks_queue.add_task((reports.add_report, (
|
||||
response.text, 10)))
|
||||
|
||||
else:
|
||||
tasks_queue.add_task((reports.add_report, (
|
||||
'Refreshing token failed.Please login manually.', 10)))
|
||||
# tasks_queue.add_task((bkit_oauth.write_tokens, ('', '', '')))
|
||||
tasks_queue.add_task((bpy.ops.wm.blenderkit_login, ('INVOKE_DEFAULT',)), fake_context=True)
|
||||
return response
|
||||
|
||||
|
||||
def get(url, **kwargs):
|
||||
response = rerequest('get', url, **kwargs)
|
||||
return response
|
||||
|
||||
|
||||
def post(url, **kwargs):
|
||||
response = rerequest('post', url, **kwargs)
|
||||
return response
|
||||
|
||||
|
||||
def put(url, **kwargs):
|
||||
response = rerequest('put', url, **kwargs)
|
||||
return response
|
||||
|
||||
|
||||
def patch(url, **kwargs):
|
||||
response = rerequest('patch', url, **kwargs)
|
||||
return response
|
|
@ -1,716 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import paths, append_link, bg_blender, utils, download, search, rerequests, upload_bg, image_utils
|
||||
|
||||
import sys, json, os, time
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bpy
|
||||
import requests
|
||||
import math
|
||||
import threading
|
||||
|
||||
resolutions = {
|
||||
'resolution_0_5K': 512,
|
||||
'resolution_1K': 1024,
|
||||
'resolution_2K': 2048,
|
||||
'resolution_4K': 4096,
|
||||
'resolution_8K': 8192,
|
||||
}
|
||||
rkeys = list(resolutions.keys())
|
||||
|
||||
resolution_props_to_server = {
|
||||
|
||||
'512': 'resolution_0_5K',
|
||||
'1024': 'resolution_1K',
|
||||
'2048': 'resolution_2K',
|
||||
'4096': 'resolution_4K',
|
||||
'8192': 'resolution_8K',
|
||||
'ORIGINAL': 'blend',
|
||||
}
|
||||
|
||||
|
||||
def get_current_resolution():
|
||||
actres = 0
|
||||
for i in bpy.data.images:
|
||||
if i.name != 'Render Result':
|
||||
actres = max(actres, i.size[0], i.size[1])
|
||||
return actres
|
||||
|
||||
|
||||
def save_image_safely(teximage, filepath):
|
||||
'''
|
||||
Blender makes it really hard to save images...
|
||||
Would be worth investigating PIL or similar instead
|
||||
Parameters
|
||||
----------
|
||||
teximage
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
'''
|
||||
JPEG_QUALITY = 98
|
||||
|
||||
rs = bpy.context.scene.render
|
||||
ims = rs.image_settings
|
||||
|
||||
orig_file_format = ims.file_format
|
||||
orig_quality = ims.quality
|
||||
orig_color_mode = ims.color_mode
|
||||
orig_compression = ims.compression
|
||||
|
||||
ims.file_format = teximage.file_format
|
||||
if teximage.file_format == 'PNG':
|
||||
ims.color_mode = 'RGBA'
|
||||
elif teximage.channels == 3:
|
||||
ims.color_mode = 'RGB'
|
||||
else:
|
||||
ims.color_mode = 'BW'
|
||||
|
||||
# all pngs with max compression
|
||||
if ims.file_format == 'PNG':
|
||||
ims.compression = 100
|
||||
# all jpgs brought to reasonable quality
|
||||
if ims.file_format == 'JPG':
|
||||
ims.quality = JPEG_QUALITY
|
||||
# it's actually very important not to try to change the image filepath and packed file filepath before saving,
|
||||
# blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
|
||||
teximage.save_render(filepath=bpy.path.abspath(filepath), scene=bpy.context.scene)
|
||||
|
||||
teximage.filepath = filepath
|
||||
for packed_file in teximage.packed_files:
|
||||
packed_file.filepath = filepath
|
||||
teximage.filepath_raw = filepath
|
||||
teximage.reload()
|
||||
|
||||
ims.file_format = orig_file_format
|
||||
ims.quality = orig_quality
|
||||
ims.color_mode = orig_color_mode
|
||||
ims.compression = orig_compression
|
||||
|
||||
|
||||
def extxchange_to_resolution(filepath):
|
||||
base, ext = os.path.splitext(filepath)
|
||||
if ext in ('.png', '.PNG'):
|
||||
ext = 'jpg'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def upload_resolutions(files, asset_data):
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
|
||||
upload_data = {
|
||||
"name": asset_data['name'],
|
||||
"token": preferences.api_key,
|
||||
"id": asset_data['id']
|
||||
}
|
||||
|
||||
uploaded = upload_bg.upload_files(upload_data, files)
|
||||
|
||||
if uploaded:
|
||||
bg_blender.progress('upload finished successfully')
|
||||
else:
|
||||
bg_blender.progress('upload failed.')
|
||||
|
||||
|
||||
def unpack_asset(data):
|
||||
utils.p('unpacking asset')
|
||||
asset_data = data['asset_data']
|
||||
# utils.pprint(asset_data)
|
||||
|
||||
blend_file_name = os.path.basename(bpy.data.filepath)
|
||||
ext = os.path.splitext(blend_file_name)[1]
|
||||
|
||||
resolution = asset_data.get('resolution', 'blend')
|
||||
# TODO - passing resolution inside asset data might not be the best solution
|
||||
tex_dir_path = paths.get_texture_directory(asset_data, resolution=resolution)
|
||||
tex_dir_abs = bpy.path.abspath(tex_dir_path)
|
||||
if not os.path.exists(tex_dir_abs):
|
||||
try:
|
||||
os.mkdir(tex_dir_abs)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
bpy.data.use_autopack = False
|
||||
for image in bpy.data.images:
|
||||
if image.name != 'Render Result':
|
||||
# suffix = paths.resolution_suffix(data['suffix'])
|
||||
fp = get_texture_filepath(tex_dir_path, image, resolution=resolution)
|
||||
utils.p('unpacking file', image.name)
|
||||
utils.p(image.filepath, fp)
|
||||
|
||||
for pf in image.packed_files:
|
||||
pf.filepath = fp # bpy.path.abspath(fp)
|
||||
image.filepath = fp # bpy.path.abspath(fp)
|
||||
image.filepath_raw = fp # bpy.path.abspath(fp)
|
||||
# image.save()
|
||||
if len(image.packed_files) > 0:
|
||||
# image.unpack(method='REMOVE')
|
||||
image.unpack(method='WRITE_ORIGINAL')
|
||||
|
||||
#mark asset browser asset
|
||||
data_block = None
|
||||
if asset_data['assetType'] == 'model':
|
||||
for ob in bpy.data.objects:
|
||||
if ob.parent is None and ob in bpy.context.visible_objects:
|
||||
ob.asset_mark()
|
||||
# for c in bpy.data.collections:
|
||||
# if c.get('asset_data') is not None:
|
||||
# c.asset_mark()
|
||||
# data_block = c
|
||||
elif asset_data['assetType'] == 'material':
|
||||
for m in bpy.data.materials:
|
||||
m.asset_mark()
|
||||
data_block = m
|
||||
elif asset_data['assetType'] == 'scene':
|
||||
bpy.context.scene.asset_mark()
|
||||
elif asset_data['assetType'] =='brush':
|
||||
for b in bpy.data.brushes:
|
||||
if b.get('asset_data') is not None:
|
||||
b.asset_mark()
|
||||
data_block = b
|
||||
if data_block is not None:
|
||||
tags = data_block.asset_data.tags
|
||||
for t in tags:
|
||||
tags.remove(t)
|
||||
tags.new('description: ' + asset_data['description'])
|
||||
tags.new('tags: ' + ','.join(asset_data['tags']))
|
||||
#
|
||||
# if this isn't here, blender crashes when saving file.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
bpy.ops.wm.save_as_mainfile(filepath = bpy.data.filepath, compress=False)
|
||||
# now try to delete the .blend1 file
|
||||
try:
|
||||
|
||||
os.remove(bpy.data.filepath + '1')
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
|
||||
def patch_asset_empty(asset_id, api_key):
|
||||
'''
|
||||
This function patches the asset for the purpose of it getting a reindex.
|
||||
Should be removed once this is fixed on the server and
|
||||
the server is able to reindex after uploads of resolutions
|
||||
Returns
|
||||
-------
|
||||
'''
|
||||
upload_data = {
|
||||
}
|
||||
url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
|
||||
headers = utils.get_headers(api_key)
|
||||
try:
|
||||
r = rerequests.patch(url, json=upload_data, headers=headers, verify=True) # files = files,
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(e)
|
||||
return {'CANCELLED'}
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
def reduce_all_images(target_scale=1024):
|
||||
for img in bpy.data.images:
|
||||
if img.name != 'Render Result':
|
||||
print('scaling ', img.name, img.size[0], img.size[1])
|
||||
# make_possible_reductions_on_image(i)
|
||||
if max(img.size) > target_scale:
|
||||
ratio = float(target_scale) / float(max(img.size))
|
||||
print(ratio)
|
||||
# i.save()
|
||||
fp = '//tempimagestorage'
|
||||
# print('generated filename',fp)
|
||||
# for pf in img.packed_files:
|
||||
# pf.filepath = fp # bpy.path.abspath(fp)
|
||||
|
||||
img.filepath = fp
|
||||
img.filepath_raw = fp
|
||||
print(int(img.size[0] * ratio), int(img.size[1] * ratio))
|
||||
img.scale(int(img.size[0] * ratio), int(img.size[1] * ratio))
|
||||
img.update()
|
||||
# img.save()
|
||||
# img.reload()
|
||||
img.pack()
|
||||
|
||||
|
||||
def get_texture_filepath(tex_dir_path, image, resolution='blend'):
|
||||
image_file_name = bpy.path.basename(image.filepath)
|
||||
if image_file_name == '':
|
||||
image_file_name = image.name.split('.')[0]
|
||||
|
||||
suffix = paths.resolution_suffix[resolution]
|
||||
|
||||
fp = os.path.join(tex_dir_path, image_file_name)
|
||||
# check if there is allready an image with same name and thus also assigned path
|
||||
# (can happen easily with genearted tex sets and more materials)
|
||||
done = False
|
||||
fpn = fp
|
||||
i = 0
|
||||
while not done:
|
||||
is_solo = True
|
||||
for image1 in bpy.data.images:
|
||||
if image != image1 and image1.filepath == fpn:
|
||||
is_solo = False
|
||||
fpleft, fpext = os.path.splitext(fp)
|
||||
fpn = fpleft + str(i).zfill(3) + fpext
|
||||
i += 1
|
||||
if is_solo:
|
||||
done = True
|
||||
|
||||
return fpn
|
||||
|
||||
|
||||
def generate_lower_resolutions_hdr(asset_data, fpath):
|
||||
'''generates lower resolutions for HDR images'''
|
||||
hdr = bpy.data.images.load(fpath)
|
||||
actres = max(hdr.size[0], hdr.size[1])
|
||||
p2res = paths.round_to_closest_resolution(actres)
|
||||
original_filesize = os.path.getsize(fpath) # for comparison on the original level
|
||||
i = 0
|
||||
finished = False
|
||||
files = []
|
||||
while not finished:
|
||||
dirn = os.path.dirname(fpath)
|
||||
fn_strip, ext = os.path.splitext(fpath)
|
||||
ext = '.exr'
|
||||
if i>0:
|
||||
image_utils.downscale(hdr)
|
||||
|
||||
|
||||
hdr_resolution_filepath = fn_strip + paths.resolution_suffix[p2res] + ext
|
||||
image_utils.img_save_as(hdr, filepath=hdr_resolution_filepath, file_format='OPEN_EXR', quality=20, color_mode='RGB', compression=15,
|
||||
view_transform='Raw', exr_codec = 'DWAA')
|
||||
|
||||
if os.path.exists(hdr_resolution_filepath):
|
||||
reduced_filesize = os.path.getsize(hdr_resolution_filepath)
|
||||
|
||||
# compare file sizes
|
||||
print(f'HDR size was reduced from {original_filesize} to {reduced_filesize}')
|
||||
if reduced_filesize < original_filesize:
|
||||
# this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
|
||||
# usually however the advantage can be big also for same as original resolution
|
||||
files.append({
|
||||
"type": p2res,
|
||||
"index": 0,
|
||||
"file_path": hdr_resolution_filepath
|
||||
})
|
||||
|
||||
print('prepared resolution file: ', p2res)
|
||||
|
||||
if rkeys.index(p2res) == 0:
|
||||
finished = True
|
||||
else:
|
||||
p2res = rkeys[rkeys.index(p2res) - 1]
|
||||
i+=1
|
||||
|
||||
print('uploading resolution files')
|
||||
upload_resolutions(files, asset_data)
|
||||
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
patch_asset_empty(asset_data['id'], preferences.api_key)
|
||||
|
||||
|
||||
def generate_lower_resolutions(data):
|
||||
asset_data = data['asset_data']
|
||||
actres = get_current_resolution()
|
||||
# first let's skip procedural assets
|
||||
base_fpath = bpy.data.filepath
|
||||
|
||||
s = bpy.context.scene
|
||||
|
||||
print('current resolution of the asset ', actres)
|
||||
if actres > 0:
|
||||
p2res = paths.round_to_closest_resolution(actres)
|
||||
orig_res = p2res
|
||||
print(p2res)
|
||||
finished = False
|
||||
files = []
|
||||
# now skip assets that have lowest possible resolution already
|
||||
if p2res != [0]:
|
||||
original_textures_filesize = 0
|
||||
for i in bpy.data.images:
|
||||
abspath = bpy.path.abspath(i.filepath)
|
||||
if os.path.exists(abspath):
|
||||
original_textures_filesize += os.path.getsize(abspath)
|
||||
|
||||
while not finished:
|
||||
|
||||
blend_file_name = os.path.basename(base_fpath)
|
||||
|
||||
dirn = os.path.dirname(base_fpath)
|
||||
fn_strip, ext = os.path.splitext(blend_file_name)
|
||||
|
||||
fn = fn_strip + paths.resolution_suffix[p2res] + ext
|
||||
fpath = os.path.join(dirn, fn)
|
||||
|
||||
tex_dir_path = paths.get_texture_directory(asset_data, resolution=p2res)
|
||||
|
||||
tex_dir_abs = bpy.path.abspath(tex_dir_path)
|
||||
if not os.path.exists(tex_dir_abs):
|
||||
os.mkdir(tex_dir_abs)
|
||||
|
||||
reduced_textures_filessize = 0
|
||||
for i in bpy.data.images:
|
||||
if i.name != 'Render Result':
|
||||
|
||||
print('scaling ', i.name, i.size[0], i.size[1])
|
||||
fp = get_texture_filepath(tex_dir_path, i, resolution=p2res)
|
||||
|
||||
if p2res == orig_res:
|
||||
# first, let's link the image back to the original one.
|
||||
i['blenderkit_original_path'] = i.filepath
|
||||
# first round also makes reductions on the image, while keeping resolution
|
||||
image_utils.make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False)
|
||||
|
||||
else:
|
||||
# lower resolutions only downscale
|
||||
image_utils.make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True)
|
||||
|
||||
abspath = bpy.path.abspath(i.filepath)
|
||||
if os.path.exists(abspath):
|
||||
reduced_textures_filessize += os.path.getsize(abspath)
|
||||
|
||||
i.pack()
|
||||
# save
|
||||
print(fpath)
|
||||
# if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
# save the file
|
||||
bpy.ops.wm.save_as_mainfile(filepath=fpath, compress=True, copy=True)
|
||||
# compare file sizes
|
||||
print(f'textures size was reduced from {original_textures_filesize} to {reduced_textures_filessize}')
|
||||
if reduced_textures_filessize < original_textures_filesize:
|
||||
# this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
|
||||
# usually however the advantage can be big also for same as original resolution
|
||||
files.append({
|
||||
"type": p2res,
|
||||
"index": 0,
|
||||
"file_path": fpath
|
||||
})
|
||||
|
||||
print('prepared resolution file: ', p2res)
|
||||
if rkeys.index(p2res) == 0:
|
||||
finished = True
|
||||
else:
|
||||
p2res = rkeys[rkeys.index(p2res) - 1]
|
||||
print('uploading resolution files')
|
||||
upload_resolutions(files, data['asset_data'])
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
patch_asset_empty(data['asset_data']['id'], preferences.api_key)
|
||||
return
|
||||
|
||||
|
||||
def regenerate_thumbnail_material(data):
|
||||
# this should re-generate material thumbnail and re-upload it.
|
||||
# first let's skip procedural assets
|
||||
base_fpath = bpy.data.filepath
|
||||
blend_file_name = os.path.basename(base_fpath)
|
||||
bpy.ops.mesh.primitive_cube_add()
|
||||
aob = bpy.context.active_object
|
||||
bpy.ops.object.material_slot_add()
|
||||
aob.material_slots[0].material = bpy.data.materials[0]
|
||||
props = aob.active_material.blenderkit
|
||||
props.thumbnail_generator_type = 'BALL'
|
||||
props.thumbnail_background = False
|
||||
props.thumbnail_resolution = '256'
|
||||
# layout.prop(props, 'thumbnail_generator_type')
|
||||
# layout.prop(props, 'thumbnail_scale')
|
||||
# layout.prop(props, 'thumbnail_background')
|
||||
# if props.thumbnail_background:
|
||||
# layout.prop(props, 'thumbnail_background_lightness')
|
||||
# layout.prop(props, 'thumbnail_resolution')
|
||||
# layout.prop(props, 'thumbnail_samples')
|
||||
# layout.prop(props, 'thumbnail_denoising')
|
||||
# layout.prop(props, 'adaptive_subdivision')
|
||||
# preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
# layout.prop(preferences, "thumbnail_use_gpu")
|
||||
# TODO: here it should call start_material_thumbnailer , but with the wait property on, so it can upload afterwards.
|
||||
bpy.ops.object.blenderkit_generate_material_thumbnail()
|
||||
time.sleep(130)
|
||||
# save
|
||||
# this does the actual job
|
||||
|
||||
return
|
||||
|
||||
|
||||
def assets_db_path():
|
||||
dpath = os.path.dirname(bpy.data.filepath)
|
||||
fpath = os.path.join(dpath, 'all_assets.json')
|
||||
return fpath
|
||||
|
||||
|
||||
def get_assets_search():
|
||||
# bpy.app.debug_value = 2
|
||||
|
||||
results = []
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
url = paths.get_api_url() + 'search/all'
|
||||
i = 0
|
||||
while url is not None:
|
||||
headers = utils.get_headers(preferences.api_key)
|
||||
print('fetching assets from assets endpoint')
|
||||
print(url)
|
||||
retries = 0
|
||||
while retries < 3:
|
||||
r = rerequests.get(url, headers=headers)
|
||||
|
||||
try:
|
||||
adata = r.json()
|
||||
url = adata.get('next')
|
||||
print(i)
|
||||
i += 1
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print('failed to get next')
|
||||
if retries == 2:
|
||||
url = None
|
||||
if adata.get('results') != None:
|
||||
results.extend(adata['results'])
|
||||
retries = 3
|
||||
print(f'fetched page {i}')
|
||||
retries += 1
|
||||
|
||||
fpath = assets_db_path()
|
||||
with open(fpath, 'w', encoding = 'utf-8') as s:
|
||||
json.dump(results, s, ensure_ascii=False, indent=4)
|
||||
|
||||
|
||||
def get_assets_for_resolutions(page_size=100, max_results=100000000):
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
|
||||
dpath = os.path.dirname(bpy.data.filepath)
|
||||
filepath = os.path.join(dpath, 'assets_for_resolutions.json')
|
||||
params = {
|
||||
'order': '-created',
|
||||
'textureResolutionMax_gte': '100',
|
||||
# 'last_resolution_upload_lt':'2020-9-01'
|
||||
}
|
||||
search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
|
||||
api_key=preferences.api_key)
|
||||
return filepath
|
||||
|
||||
|
||||
def get_materials_for_validation(page_size=100, max_results=100000000):
|
||||
preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
dpath = os.path.dirname(bpy.data.filepath)
|
||||
filepath = os.path.join(dpath, 'materials_for_validation.json')
|
||||
params = {
|
||||
'order': '-created',
|
||||
'asset_type': 'material',
|
||||
'verification_status': 'uploaded'
|
||||
}
|
||||
search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
|
||||
api_key=preferences.api_key)
|
||||
return filepath
|
||||
|
||||
|
||||
|
||||
|
||||
def load_assets_list(filepath):
|
||||
if os.path.exists(filepath):
|
||||
with open(filepath, 'r', encoding='utf-8') as s:
|
||||
assets = json.load(s)
|
||||
return assets
|
||||
|
||||
|
||||
def check_needs_resolutions(a):
|
||||
if a['verificationStatus'] == 'validated' and a['assetType'] in ('material', 'model', 'scene', 'hdr'):
|
||||
# the search itself now picks the right assets so there's no need to filter more than asset types.
|
||||
# TODO needs to check first if the upload date is older than resolution upload date, for that we need resolution upload date.
|
||||
for f in a['files']:
|
||||
if f['fileType'].find('resolution') > -1:
|
||||
return False
|
||||
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def download_asset(asset_data, resolution='blend', unpack=False, api_key=''):
|
||||
'''
|
||||
Download an asset non-threaded way.
|
||||
Parameters
|
||||
----------
|
||||
asset_data - search result from elastic or assets endpoints from API
|
||||
|
||||
Returns
|
||||
-------
|
||||
path to the resulting asset file or None if asset isn't accessible
|
||||
'''
|
||||
|
||||
has_url = download.get_download_url(asset_data, download.get_scene_id(), api_key, tcom=None,
|
||||
resolution='blend')
|
||||
if has_url:
|
||||
fpath = download.download_asset_file(asset_data, api_key = api_key)
|
||||
if fpath and unpack and asset_data['assetType'] != 'hdr':
|
||||
send_to_bg(asset_data, fpath, command='unpack', wait=True)
|
||||
return fpath
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def generate_resolution_thread(asset_data, api_key):
|
||||
'''
|
||||
A thread that downloads file and only then starts an instance of Blender that generates the resolution
|
||||
Parameters
|
||||
----------
|
||||
asset_data
|
||||
|
||||
Returns
|
||||
-------
|
||||
|
||||
'''
|
||||
|
||||
fpath = download_asset(asset_data, unpack=True, api_key=api_key)
|
||||
|
||||
if fpath:
|
||||
if asset_data['assetType'] != 'hdr':
|
||||
print('send to bg ', fpath)
|
||||
proc = send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True);
|
||||
else:
|
||||
generate_lower_resolutions_hdr(asset_data, fpath)
|
||||
# send_to_bg by now waits for end of the process.
|
||||
# time.sleep((5))
|
||||
|
||||
|
||||
def iterate_for_resolutions(filepath, process_count=12, api_key='', do_checks = True):
|
||||
''' iterate through all assigned assets, check for those which need generation and send them to res gen'''
|
||||
assets = load_assets_list(filepath)
|
||||
print(len(assets))
|
||||
threads = []
|
||||
for asset_data in assets:
|
||||
asset_data = search.parse_result(asset_data)
|
||||
if asset_data is not None:
|
||||
|
||||
if not do_checks or check_needs_resolutions(asset_data):
|
||||
print('downloading and generating resolution for %s' % asset_data['name'])
|
||||
# this is just a quick hack for not using original dirs in blendrkit...
|
||||
generate_resolution_thread(asset_data, api_key)
|
||||
# thread = threading.Thread(target=generate_resolution_thread, args=(asset_data, api_key))
|
||||
# thread.start()
|
||||
#
|
||||
# threads.append(thread)
|
||||
# print('processes ', len(threads))
|
||||
# while len(threads) > process_count - 1:
|
||||
# for t in threads:
|
||||
# if not t.is_alive():
|
||||
# threads.remove(t)
|
||||
# break;
|
||||
# else:
|
||||
# print(f'Failed to generate resolution:{asset_data["name"]}')
|
||||
else:
|
||||
print('not generated resolutions:', asset_data['name'])
|
||||
|
||||
|
||||
def send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True):
|
||||
'''
|
||||
Send varioust task to a new blender instance that runs and closes after finishing the task.
|
||||
This function waits until the process finishes.
|
||||
The function tries to set the same bpy.app.debug_value in the instance of Blender that is run.
|
||||
Parameters
|
||||
----------
|
||||
asset_data
|
||||
fpath - file that will be processed
|
||||
command - command which should be run in background.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
'''
|
||||
data = {
|
||||
'fpath': fpath,
|
||||
'debug_value': bpy.app.debug_value,
|
||||
'asset_data': asset_data,
|
||||
'command': command,
|
||||
}
|
||||
binary_path = bpy.app.binary_path
|
||||
tempdir = tempfile.mkdtemp()
|
||||
datafile = os.path.join(tempdir + 'resdata.json')
|
||||
script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
with open(datafile, 'w', encoding = 'utf-8') as s:
|
||||
json.dump(data, s, ensure_ascii=False, indent=4)
|
||||
|
||||
print('opening Blender instance to do processing - ', command)
|
||||
|
||||
if wait:
|
||||
proc = subprocess.run([
|
||||
binary_path,
|
||||
"--background",
|
||||
"-noaudio",
|
||||
fpath,
|
||||
"--python", os.path.join(script_path, "resolutions_bg.py"),
|
||||
"--", datafile
|
||||
], bufsize=1, stdout=sys.stdout, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
|
||||
|
||||
else:
|
||||
# TODO this should be fixed to allow multithreading.
|
||||
proc = subprocess.Popen([
|
||||
binary_path,
|
||||
"--background",
|
||||
"-noaudio",
|
||||
fpath,
|
||||
"--python", os.path.join(script_path, "resolutions_bg.py"),
|
||||
"--", datafile
|
||||
], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
|
||||
return proc
|
||||
|
||||
|
||||
def write_data_back(asset_data):
|
||||
'''ensures that the data in the resolution file is the same as in the database.'''
|
||||
pass;
|
||||
|
||||
|
||||
def run_bg(datafile):
|
||||
print('background file operation')
|
||||
with open(datafile, 'r',encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
bpy.app.debug_value = data['debug_value']
|
||||
write_data_back(data['asset_data'])
|
||||
if data['command'] == 'generate_resolutions':
|
||||
generate_lower_resolutions(data)
|
||||
elif data['command'] == 'unpack':
|
||||
unpack_asset(data)
|
||||
elif data['command'] == 'regen_thumbnail':
|
||||
regenerate_thumbnail_material(data)
|
||||
|
||||
# load_assets_list()
|
||||
# generate_lower_resolutions()
|
||||
# class TestOperator(bpy.types.Operator):
|
||||
# """Tooltip"""
|
||||
# bl_idname = "object.test_anything"
|
||||
# bl_label = "Test Operator"
|
||||
#
|
||||
# @classmethod
|
||||
# def poll(cls, context):
|
||||
# return True
|
||||
#
|
||||
# def execute(self, context):
|
||||
# iterate_for_resolutions()
|
||||
# return {'FINISHED'}
|
||||
#
|
||||
#
|
||||
# def register():
|
||||
# bpy.utils.register_class(TestOperator)
|
||||
#
|
||||
#
|
||||
# def unregister():
|
||||
# bpy.utils.unregister_class(TestOperator)
|
|
@ -1,8 +0,0 @@
|
|||
import sys
|
||||
import json
|
||||
from blenderkit import resolutions
|
||||
|
||||
BLENDERKIT_EXPORT_DATA = sys.argv[-1]
|
||||
|
||||
if __name__ == "__main__":
|
||||
resolutions.run_bg(sys.argv[-1])
|
1685
blenderkit/search.py
|
@ -1,125 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
from blenderkit import utils
|
||||
|
||||
import bpy
|
||||
from bpy.app.handlers import persistent
|
||||
|
||||
import queue
|
||||
import logging
|
||||
bk_logger = logging.getLogger('blenderkit')
|
||||
|
||||
@persistent
|
||||
def scene_load(context):
|
||||
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
|
||||
if user_preferences.use_timers and not bpy.app.background:
|
||||
if not (bpy.app.timers.is_registered(queue_worker)):
|
||||
bpy.app.timers.register(queue_worker)
|
||||
|
||||
|
||||
def get_queue():
|
||||
# we pick just a random one of blender types, to try to get a persistent queue
|
||||
t = bpy.types.Scene
|
||||
|
||||
if not hasattr(t, 'task_queue'):
|
||||
t.task_queue = queue.Queue()
|
||||
return t.task_queue
|
||||
|
||||
class task_object:
|
||||
def __init__(self, command = '', arguments = (), wait = 0, only_last = False, fake_context = False, fake_context_area = 'VIEW_3D'):
|
||||
self.command = command
|
||||
self.arguments = arguments
|
||||
self.wait = wait
|
||||
self.only_last = only_last
|
||||
self.fake_context = fake_context
|
||||
self.fake_context_area = fake_context_area
|
||||
|
||||
def add_task(task, wait = 0, only_last = False, fake_context = False, fake_context_area = 'VIEW_3D'):
|
||||
q = get_queue()
|
||||
taskob = task_object(task[0],task[1], wait = wait, only_last = only_last, fake_context = fake_context, fake_context_area = fake_context_area)
|
||||
q.put(taskob)
|
||||
|
||||
|
||||
def queue_worker():
|
||||
# utils.p('start queue worker timer')
|
||||
|
||||
#bk_logger.debug('timer queue worker')
|
||||
time_step = 2.0
|
||||
q = get_queue()
|
||||
|
||||
back_to_queue = [] #delayed events
|
||||
stashed = {}
|
||||
# first round we get all tasks that are supposed to be stashed and run only once (only_last option)
|
||||
# stashing finds tasks with the property only_last and same command and executes only the last one.
|
||||
while not q.empty():
|
||||
# print('queue while 1')
|
||||
|
||||
task = q.get()
|
||||
if task.only_last:
|
||||
#this now makes the keys not only by task, but also first argument.
|
||||
# by now stashing is only used for ratings, where the first argument is url.
|
||||
# This enables fast rating of multiple assets while allowing larger delay for uploading of ratings.
|
||||
# this avoids a duplicate request error on the server
|
||||
stashed[str(task.command)+str(task.arguments[0])] = task
|
||||
else:
|
||||
back_to_queue.append(task)
|
||||
if len(stashed.keys())>1:
|
||||
bk_logger.debug('task queue stashed task:' +str(stashed))
|
||||
#return tasks to que except for stashed
|
||||
for task in back_to_queue:
|
||||
q.put(task)
|
||||
#return stashed tasks to queue
|
||||
for k in stashed.keys():
|
||||
q.put(stashed[k])
|
||||
#second round, execute or put back waiting tasks.
|
||||
back_to_queue = []
|
||||
while not q.empty():
|
||||
# print('window manager', bpy.context.window_manager)
|
||||
task = q.get()
|
||||
|
||||
if task.wait>0:
|
||||
task.wait-=time_step
|
||||
back_to_queue.append(task)
|
||||
else:
|
||||
bk_logger.debug('task queue task:'+ str( task.command) +str( task.arguments))
|
||||
try:
|
||||
if task.fake_context:
|
||||
fc = utils.get_fake_context(bpy.context, area_type = task.fake_context_area)
|
||||
task.command(fc,*task.arguments)
|
||||
else:
|
||||
task.command(*task.arguments)
|
||||
except Exception as e:
|
||||
bk_logger.error('task queue failed task:'+ str(task.command)+str(task.arguments)+ str(e))
|
||||
# bk_logger.exception('Got exception on main handler')
|
||||
# raise
|
||||
# print('queue while 2')
|
||||
for task in back_to_queue:
|
||||
q.put(task)
|
||||
# utils.p('end queue worker timer')
|
||||
|
||||
return 2.0
|
||||
|
||||
|
||||
def register():
|
||||
bpy.app.handlers.load_post.append(scene_load)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.app.handlers.load_post.remove(scene_load)
|
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 861 B |
Before Width: | Height: | Size: 946 B |
Before Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 1.7 KiB |
Before Width: | Height: | Size: 525 B |
Before Width: | Height: | Size: 525 B |
Before Width: | Height: | Size: 540 B |
Before Width: | Height: | Size: 561 B |
Before Width: | Height: | Size: 65 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 540 B |
Before Width: | Height: | Size: 4.6 KiB |
Before Width: | Height: | Size: 1.7 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 8.8 KiB |
Before Width: | Height: | Size: 2.9 KiB |
Before Width: | Height: | Size: 3.0 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 1.5 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 2.1 KiB |
Before Width: | Height: | Size: 1.8 KiB |
Before Width: | Height: | Size: 1.4 KiB |
Before Width: | Height: | Size: 2.3 KiB |
1937
blenderkit/ui.py
|
@ -1,155 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
import bgl, blf
|
||||
|
||||
import bpy, blf
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
def draw_rect(x, y, width, height, color):
|
||||
xmax = x + width
|
||||
ymax = y + height
|
||||
points = ((x, y), # (x, y)
|
||||
(x, ymax), # (x, y)
|
||||
(xmax, ymax), # (x, y)
|
||||
(xmax, y), # (x, y)
|
||||
)
|
||||
indices = ((0, 1, 2), (2, 3, 0))
|
||||
|
||||
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
batch = batch_for_shader(shader, 'TRIS', {"pos": points}, indices=indices)
|
||||
|
||||
shader.bind()
|
||||
shader.uniform_float("color", color)
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def draw_line2d(x1, y1, x2, y2, width, color):
|
||||
coords = (
|
||||
(x1, y1), (x2, y2))
|
||||
|
||||
indices = (
|
||||
(0, 1),)
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
|
||||
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
batch = batch_for_shader(shader, 'LINES', {"pos": coords}, indices=indices)
|
||||
shader.bind()
|
||||
shader.uniform_float("color", color)
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def draw_lines(vertices, indices, color):
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
|
||||
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
|
||||
batch = batch_for_shader(shader, 'LINES', {"pos": vertices}, indices=indices)
|
||||
shader.bind()
|
||||
shader.uniform_float("color", color)
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def draw_rect_3d(coords, color):
|
||||
indices = [(0, 1, 2), (2, 3, 0)]
|
||||
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
|
||||
batch = batch_for_shader(shader, 'TRIS', {"pos": coords}, indices=indices)
|
||||
shader.uniform_float("color", color)
|
||||
batch.draw(shader)
|
||||
|
||||
cached_images = {}
|
||||
def draw_image(x, y, width, height, image, transparency, crop=(0, 0, 1, 1), batch = None):
|
||||
# draw_rect(x,y, width, height, (.5,0,0,.5))
|
||||
if not image:
|
||||
return;
|
||||
ci = cached_images.get(image.filepath)
|
||||
if ci is not None:
|
||||
if ci['x'] == x and ci['y'] ==y:
|
||||
batch = ci['batch']
|
||||
image_shader = ci['image_shader']
|
||||
if not batch:
|
||||
|
||||
coords = [
|
||||
(x, y), (x + width, y),
|
||||
(x, y + height), (x + width, y + height)]
|
||||
|
||||
uvs = [(crop[0], crop[1]),
|
||||
(crop[2], crop[1]),
|
||||
(crop[0], crop[3]),
|
||||
(crop[2], crop[3]),
|
||||
]
|
||||
|
||||
indices = [(0, 1, 2), (2, 1, 3)]
|
||||
|
||||
image_shader = shader = gpu.shader.from_builtin('2D_IMAGE')
|
||||
batch = batch_for_shader(image_shader, 'TRIS',
|
||||
{"pos": coords,
|
||||
"texCoord": uvs},
|
||||
indices=indices)
|
||||
|
||||
|
||||
# tell shader to use the image that is bound to image unit 0
|
||||
image_shader.uniform_int("image", 0)
|
||||
cached_images[image.filepath] = {
|
||||
'x': x,
|
||||
'y': y,
|
||||
'batch': batch,
|
||||
'image_shader': image_shader
|
||||
}
|
||||
# send image to gpu if it isn't there already
|
||||
if image.gl_load():
|
||||
raise Exception()
|
||||
|
||||
# texture identifier on gpu
|
||||
texture_id = image.bindcode
|
||||
|
||||
# in case someone disabled it before
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
|
||||
# bind texture to image unit 0
|
||||
bgl.glActiveTexture(bgl.GL_TEXTURE0)
|
||||
bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture_id)
|
||||
|
||||
image_shader.bind()
|
||||
|
||||
batch.draw(image_shader)
|
||||
|
||||
# bgl.glDisable(bgl.GL_TEXTURE_2D)
|
||||
return batch
|
||||
|
||||
|
||||
def draw_text(text, x, y, size, color=(1, 1, 1, 0.5), halign = 'LEFT', valign = 'TOP'):
|
||||
font_id = 1
|
||||
# bgl.glColor4f(*color)
|
||||
if type(text) != str:
|
||||
text = str(text)
|
||||
blf.color(font_id, color[0], color[1], color[2], color[3])
|
||||
blf.size(font_id, size, 72)
|
||||
if halign != 'LEFT':
|
||||
width,height = blf.dimensions(font_id, text)
|
||||
if halign == 'RIGHT':
|
||||
x-=width
|
||||
elif halign == 'CENTER':
|
||||
x-=width//2
|
||||
if valign=='CENTER':
|
||||
y-=height//2
|
||||
#bottom could be here but there's no reason for it
|
||||
blf.position(font_id, x, y, 0)
|
||||
|
||||
blf.draw(font_id, text)
|
1387
blenderkit/upload.py
|
@ -1,187 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
|
||||
|
||||
from blenderkit import paths, append_link, bg_blender, utils, rerequests, tasks_queue, ui, reports
|
||||
|
||||
import sys, json, os, time
|
||||
import requests
|
||||
import logging
|
||||
|
||||
import bpy
|
||||
|
||||
BLENDERKIT_EXPORT_DATA = sys.argv[-1]
|
||||
|
||||
|
||||
def print_gap():
|
||||
print('\n\n\n\n')
|
||||
|
||||
|
||||
class upload_in_chunks(object):
|
||||
def __init__(self, filename, chunksize=1 << 13, report_name='file'):
|
||||
self.filename = filename
|
||||
self.chunksize = chunksize
|
||||
self.totalsize = os.path.getsize(filename)
|
||||
self.readsofar = 0
|
||||
self.report_name = report_name
|
||||
|
||||
def __iter__(self):
|
||||
with open(self.filename, 'rb') as file:
|
||||
while True:
|
||||
data = file.read(self.chunksize)
|
||||
if not data:
|
||||
sys.stderr.write("\n")
|
||||
break
|
||||
self.readsofar += len(data)
|
||||
percent = self.readsofar * 1e2 / self.totalsize
|
||||
tasks_queue.add_task((reports.add_report, (f"Uploading {self.report_name} {percent}%",)))
|
||||
|
||||
# bg_blender.progress('uploading %s' % self.report_name, percent)
|
||||
# sys.stderr.write("\r{percent:3.0f}%".format(percent=percent))
|
||||
yield data
|
||||
|
||||
def __len__(self):
|
||||
return self.totalsize
|
||||
|
||||
|
||||
def upload_file(upload_data, f):
|
||||
headers = utils.get_headers(upload_data['token'])
|
||||
version_id = upload_data['id']
|
||||
|
||||
message = f"uploading {f['type']} {os.path.basename(f['file_path'])}"
|
||||
tasks_queue.add_task((reports.add_report, (message,)))
|
||||
|
||||
upload_info = {
|
||||
'assetId': version_id,
|
||||
'fileType': f['type'],
|
||||
'fileIndex': f['index'],
|
||||
'originalFilename': os.path.basename(f['file_path'])
|
||||
}
|
||||
upload_create_url = paths.get_api_url() + 'uploads/'
|
||||
upload = rerequests.post(upload_create_url, json=upload_info, headers=headers, verify=True)
|
||||
upload = upload.json()
|
||||
#
|
||||
chunk_size = 1024 * 1024 * 2
|
||||
# utils.pprint(upload)
|
||||
# file gets uploaded here:
|
||||
uploaded = False
|
||||
# s3 upload is now the only option
|
||||
for a in range(0, 5):
|
||||
if not uploaded:
|
||||
try:
|
||||
upload_response = requests.put(upload['s3UploadUrl'],
|
||||
data=upload_in_chunks(f['file_path'], chunk_size, f['type']),
|
||||
stream=True, verify=True)
|
||||
|
||||
if 250 > upload_response.status_code > 199:
|
||||
uploaded = True
|
||||
upload_done_url = paths.get_api_url() + 'uploads_s3/' + upload['id'] + '/upload-file/'
|
||||
upload_response = rerequests.post(upload_done_url, headers=headers, verify=True)
|
||||
# print(upload_response)
|
||||
# print(upload_response.text)
|
||||
tasks_queue.add_task((reports.add_report, (f"Finished file upload: {os.path.basename(f['file_path'])}",)))
|
||||
return True
|
||||
else:
|
||||
print(upload_response.text)
|
||||
message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}"
|
||||
tasks_queue.add_task((reports.add_report, (message,)))
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
message = f"Upload failed, retry. File : {f['type']} {os.path.basename(f['file_path'])}"
|
||||
tasks_queue.add_task((reports.add_report, (message,)))
|
||||
time.sleep(1)
|
||||
|
||||
# confirm single file upload to bkit server
|
||||
|
||||
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def upload_files(upload_data, files):
|
||||
'''uploads several files in one run'''
|
||||
uploaded_all = True
|
||||
for f in files:
|
||||
uploaded = upload_file(upload_data, f)
|
||||
if not uploaded:
|
||||
uploaded_all = False
|
||||
tasks_queue.add_task((reports.add_report, (f"Uploaded all files for asset {upload_data['name']}",)))
|
||||
return uploaded_all
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
# bg_blender.progress('preparing scene - append data')
|
||||
with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
|
||||
data = json.load(s)
|
||||
|
||||
bpy.app.debug_value = data.get('debug_value', 0)
|
||||
export_data = data['export_data']
|
||||
upload_data = data['upload_data']
|
||||
|
||||
bpy.data.scenes.new('upload')
|
||||
for s in bpy.data.scenes:
|
||||
if s.name != 'upload':
|
||||
bpy.data.scenes.remove(s)
|
||||
|
||||
if upload_data['assetType'] == 'model':
|
||||
obnames = export_data['models']
|
||||
main_source, allobs = append_link.append_objects(file_name=export_data['source_filepath'],
|
||||
obnames=obnames,
|
||||
rotation=(0, 0, 0))
|
||||
g = bpy.data.collections.new(upload_data['name'])
|
||||
for o in allobs:
|
||||
g.objects.link(o)
|
||||
bpy.context.scene.collection.children.link(g)
|
||||
elif upload_data['assetType'] == 'scene':
|
||||
sname = export_data['scene']
|
||||
main_source = append_link.append_scene(file_name=export_data['source_filepath'],
|
||||
scenename=sname)
|
||||
bpy.data.scenes.remove(bpy.data.scenes['upload'])
|
||||
main_source.name = sname
|
||||
elif upload_data['assetType'] == 'material':
|
||||
matname = export_data['material']
|
||||
main_source = append_link.append_material(file_name=export_data['source_filepath'], matname=matname)
|
||||
|
||||
elif upload_data['assetType'] == 'brush':
|
||||
brushname = export_data['brush']
|
||||
main_source = append_link.append_brush(file_name=export_data['source_filepath'], brushname=brushname)
|
||||
|
||||
bpy.ops.file.pack_all()
|
||||
|
||||
main_source.blenderkit.uploading = False
|
||||
#write ID here.
|
||||
main_source.blenderkit.asset_base_id = export_data['assetBaseId']
|
||||
main_source.blenderkit.id = export_data['id']
|
||||
|
||||
fpath = os.path.join(export_data['temp_dir'], upload_data['assetBaseId'] + '.blend')
|
||||
|
||||
#if this isn't here, blender crashes.
|
||||
bpy.context.preferences.filepaths.file_preview_type = 'NONE'
|
||||
|
||||
bpy.ops.wm.save_as_mainfile(filepath=fpath, compress=True, copy=False)
|
||||
os.remove(export_data['source_filepath'])
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
# bg_blender.progress(e)
|
||||
sys.exit(1)
|
1001
blenderkit/utils.py
|
@ -1,80 +0,0 @@
|
|||
# ##### BEGIN GPL LICENSE BLOCK #####
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or
|
||||
# modify it under the terms of the GNU General Public License
|
||||
# as published by the Free Software Foundation; either version 2
|
||||
# of the License, or (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
import bpy
|
||||
from blenderkit import paths
|
||||
|
||||
import requests, os, json, threading
|
||||
|
||||
|
||||
def get_addon_version():
|
||||
# should return addon version, but since Blender 3.0 this is synced with Blender version
|
||||
ver = bpy.app.version
|
||||
return '%i.%i.%i' % (ver[0], ver[1], ver[2])
|
||||
|
||||
|
||||
# import blenderkit
|
||||
# ver = blenderkit.bl_info['version']
|
||||
# return '%i.%i.%i' % (ver[0], ver[1], ver[2])
|
||||
|
||||
|
||||
def check_version(url, api_key, module):
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"Authorization": "Bearer %s" % api_key}
|
||||
|
||||
print('checking online version of module %s' % str(module.bl_info['name']))
|
||||
try:
|
||||
r = requests.get(url, headers=headers)
|
||||
data = r.json()
|
||||
ver_online = {
|
||||
'addonVersion2.8': data['addonVersion']
|
||||
}
|
||||
tempdir = paths.get_temp_dir()
|
||||
|
||||
ver_filepath = os.path.join(tempdir, 'addon_version.json')
|
||||
with open(ver_filepath, 'w', encoding = 'utf-8') as s:
|
||||
json.dump(ver_online, s, ensure_ascii=False, indent=4)
|
||||
except:
|
||||
print("couldn't check online for version updates")
|
||||
|
||||
|
||||
def compare_versions(module):
|
||||
try:
|
||||
ver_local = module.bl_info['version']
|
||||
ver_local_float = ver_local[0] + .01 * ver_local[1] + .0001 * ver_local[2]
|
||||
|
||||
tempdir = paths.get_temp_dir()
|
||||
ver_filepath = os.path.join(tempdir, 'addon_version.json')
|
||||
with open(ver_filepath, 'r',encoding='utf-8') as s:
|
||||
data = json.load(s)
|
||||
|
||||
ver_online = data['addonVersion2.8'].split('.')
|
||||
ver_online_float = int(ver_online[0]) + .01 * int(ver_online[1]) + .0001 * int(ver_online[2])
|
||||
|
||||
# print('versions: installed-%s, online-%s' % (str(ver_local_float), str(ver_online_float)))
|
||||
if ver_online_float > ver_local_float:
|
||||
return True
|
||||
except:
|
||||
print("couldn't compare addon versions")
|
||||
return False
|
||||
|
||||
|
||||
def check_version_thread(url, API_key, module):
|
||||
thread = threading.Thread(target=check_version, args=([url, API_key, module]), daemon=True)
|
||||
thread.start()
|