Merge branch 'master' into sculpt-dev

This commit is contained in:
Pablo Dobarro 2020-12-22 16:30:14 +01:00
commit 554be8d96c
9 changed files with 98 additions and 53 deletions

View File

@ -314,6 +314,14 @@ class OptiXDevice : public CUDADevice {
common_cflags += string_printf(" -I\"%s/include\"", optix_sdk_path);
}
// Specialization for shader raytracing
if (requested_features.use_shader_raytrace) {
common_cflags += " --keep-device-functions";
}
else {
common_cflags += " -D __NO_SHADER_RAYTRACE__";
}
return common_cflags;
}

View File

@ -548,22 +548,23 @@ void SVMCompiler::generated_shared_closure_nodes(ShaderNode *root_node,
}
}
void SVMCompiler::generate_aov_node(ShaderNode *node, CompilerState *state)
void SVMCompiler::find_aov_nodes_and_dependencies(ShaderNodeSet &aov_nodes,
ShaderGraph *graph,
CompilerState *state)
{
/* execute dependencies for node */
foreach (ShaderInput *in, node->inputs) {
if (in->link != NULL) {
ShaderNodeSet dependencies;
find_dependencies(dependencies, state->nodes_done, in);
generate_svm_nodes(dependencies, state);
foreach (ShaderNode *node, graph->nodes) {
if (node->special_type == SHADER_SPECIAL_TYPE_OUTPUT_AOV) {
OutputAOVNode *aov_node = static_cast<OutputAOVNode *>(node);
if (aov_node->slot >= 0) {
aov_nodes.insert(aov_node);
foreach (ShaderInput *in, node->inputs) {
if (in->link != NULL) {
find_dependencies(aov_nodes, state->nodes_done, in);
}
}
}
}
}
/* compile node itself */
generate_node(node, state->nodes_done);
state->nodes_done.insert(node);
state->nodes_done_flag[node->id] = true;
}
void SVMCompiler::generate_multi_closure(ShaderNode *root_node,
@ -631,6 +632,25 @@ void SVMCompiler::generate_multi_closure(ShaderNode *root_node,
}
}
/* For dependencies AOV nodes, prevent them from being categorized
* as exclusive deps of one or the other closure, since the need to
* execute them for AOV writing is not dependent on the closure
* weights. */
if (state->aov_nodes.size()) {
set_intersection(state->aov_nodes.begin(),
state->aov_nodes.end(),
cl1deps.begin(),
cl1deps.end(),
std::inserter(shareddeps, shareddeps.begin()),
node_id_comp);
set_intersection(state->aov_nodes.begin(),
state->aov_nodes.end(),
cl2deps.begin(),
cl2deps.end(),
std::inserter(shareddeps, shareddeps.begin()),
node_id_comp);
}
if (!shareddeps.empty()) {
if (cl1in->link) {
generated_shared_closure_nodes(root_node, cl1in->link->parent, state, shareddeps);
@ -782,6 +802,9 @@ void SVMCompiler::compile_type(Shader *shader, ShaderGraph *graph, ShaderType ty
}
if (generate) {
if (type == SHADER_TYPE_SURFACE) {
find_aov_nodes_and_dependencies(state.aov_nodes, graph, &state);
}
generate_multi_closure(clin->link->parent, clin->link->parent, &state);
}
}
@ -789,28 +812,15 @@ void SVMCompiler::compile_type(Shader *shader, ShaderGraph *graph, ShaderType ty
/* compile output node */
output->compile(*this);
if (type == SHADER_TYPE_SURFACE) {
vector<OutputAOVNode *> aov_outputs;
foreach (ShaderNode *node, graph->nodes) {
if (node->special_type == SHADER_SPECIAL_TYPE_OUTPUT_AOV) {
OutputAOVNode *aov_node = static_cast<OutputAOVNode *>(node);
if (aov_node->slot >= 0) {
aov_outputs.push_back(aov_node);
}
}
}
if (aov_outputs.size() > 0) {
/* AOV passes are only written if the object is directly visible, so
* there is no point in evaluating all the nodes generated only for the
* AOV outputs if that's not the case. Therefore, we insert
* NODE_AOV_START into the shader before the AOV-only nodes are
* generated which tells the kernel that it can stop evaluation
* early if AOVs will not be written. */
add_node(NODE_AOV_START, 0, 0, 0);
foreach (OutputAOVNode *node, aov_outputs) {
generate_aov_node(node, &state);
}
}
if (!state.aov_nodes.empty()) {
/* AOV passes are only written if the object is directly visible, so
* there is no point in evaluating all the nodes generated only for the
* AOV outputs if that's not the case. Therefore, we insert
* NODE_AOV_START into the shader before the AOV-only nodes are
* generated which tells the kernel that it can stop evaluation
* early if AOVs will not be written. */
add_node(NODE_AOV_START, 0, 0, 0);
generate_svm_nodes(state.aov_nodes, &state);
}
}

View File

@ -176,6 +176,9 @@ class SVMCompiler {
/* Set of closures which were already compiled. */
ShaderNodeSet closure_done;
/* Set of nodes used for writing AOVs. */
ShaderNodeSet aov_nodes;
/* ** SVM nodes generation state ** */
/* Flag whether the node with corresponding ID was already compiled or
@ -197,6 +200,9 @@ class SVMCompiler {
const ShaderNodeSet &done,
ShaderInput *input,
ShaderNode *skip_node = NULL);
void find_aov_nodes_and_dependencies(ShaderNodeSet &aov_nodes,
ShaderGraph *graph,
CompilerState *state);
void generate_node(ShaderNode *node, ShaderNodeSet &done);
void generate_aov_node(ShaderNode *node, CompilerState *state);
void generate_closure_node(ShaderNode *node, CompilerState *state);

View File

@ -296,13 +296,10 @@ def bake_action_iter(
pbone.keyframe_insert("rotation_axis_angle", index=-1, frame=f, group=name)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = pbone.rotation_euler.copy()
euler.make_compatible(euler_prev)
euler = pbone.matrix_basis.to_euler(obj.rotation_mode, euler_prev)
pbone.rotation_euler = euler
euler_prev = euler
del euler
else:
euler_prev = pbone.rotation_euler.copy()
euler_prev = pbone.rotation_euler.copy()
pbone.keyframe_insert("rotation_euler", index=-1, frame=f, group=name)
pbone.keyframe_insert("scale", index=-1, frame=f, group=name)

View File

@ -200,7 +200,7 @@ void BKE_id_free(struct Main *bmain, void *idv);
void BKE_id_free_us(struct Main *bmain, void *idv) ATTR_NONNULL();
void BKE_id_delete(struct Main *bmain, void *idv) ATTR_NONNULL();
void BKE_id_multi_tagged_delete(struct Main *bmain) ATTR_NONNULL();
size_t BKE_id_multi_tagged_delete(struct Main *bmain) ATTR_NONNULL();
void BKE_libblock_management_main_add(struct Main *bmain, void *idv);
void BKE_libblock_management_main_remove(struct Main *bmain, void *idv);

View File

@ -237,7 +237,7 @@ void BKE_id_free_us(Main *bmain, void *idv) /* test users */
}
}
static void id_delete(Main *bmain, const bool do_tagged_deletion)
static size_t id_delete(Main *bmain, const bool do_tagged_deletion)
{
const int tag = LIB_TAG_DOIT;
ListBase *lbarray[MAX_LIBARRAY];
@ -346,6 +346,7 @@ static void id_delete(Main *bmain, const bool do_tagged_deletion)
* have been already cleared when we reach it
* (e.g. Objects being processed before meshes, they'll have already released their 'reference'
* over meshes when we come to freeing obdata). */
size_t num_datablocks_deleted = 0;
for (i = do_tagged_deletion ? 1 : base_count; i--;) {
ListBase *lb = lbarray[i];
ID *id, *id_next;
@ -360,11 +361,13 @@ static void id_delete(Main *bmain, const bool do_tagged_deletion)
BLI_assert(id->us == 0);
}
BKE_id_free_ex(bmain, id, free_flag, !do_tagged_deletion);
++num_datablocks_deleted;
}
}
}
bmain->is_memfile_undo_written = false;
return num_datablocks_deleted;
}
/**
@ -386,8 +389,9 @@ void BKE_id_delete(Main *bmain, void *idv)
*
* \warning Considered experimental for now, seems to be working OK but this is
* risky code in a complicated area.
* \return Number of deleted datablocks.
*/
void BKE_id_multi_tagged_delete(Main *bmain)
size_t BKE_id_multi_tagged_delete(Main *bmain)
{
id_delete(bmain, true);
return id_delete(bmain, true);
}

View File

@ -113,7 +113,9 @@ static void seq_convert_transform_animation(const Scene *scene,
BezTriple *bezt = fcu->bezt;
for (int i = 0; i < fcu->totvert; i++, bezt++) {
/* Same math as with old_image_center_*, but simplified. */
bezt->vec[0][1] = image_size / 2 + bezt->vec[0][1] - scene->r.xsch / 2;
bezt->vec[1][1] = image_size / 2 + bezt->vec[1][1] - scene->r.xsch / 2;
bezt->vec[2][1] = image_size / 2 + bezt->vec[2][1] - scene->r.xsch / 2;
}
}
}
@ -250,7 +252,9 @@ static void seq_convert_transform_animation_2(const Scene *scene,
BezTriple *bezt = fcu->bezt;
for (int i = 0; i < fcu->totvert; i++, bezt++) {
/* Same math as with old_image_center_*, but simplified. */
bezt->vec[0][1] *= scale_to_fit_factor;
bezt->vec[1][1] *= scale_to_fit_factor;
bezt->vec[2][1] *= scale_to_fit_factor;
}
}
}

View File

@ -353,6 +353,8 @@ PyDoc_STRVAR(bpy_orphans_purge_doc,
"\n"
" Remove (delete) all IDs with no user.\n"
"\n"
" :return: The number of deleted IDs.\n"
"\n"
" WARNING: Considered experimental feature currently.\n");
static PyObject *bpy_orphans_purge(PyObject *UNUSED(self),
PyObject *UNUSED(args),
@ -376,13 +378,11 @@ static PyObject *bpy_orphans_purge(PyObject *UNUSED(self),
}
FOREACH_MAIN_ID_END;
BKE_id_multi_tagged_delete(bmain);
const size_t num_datablocks_deleted = BKE_id_multi_tagged_delete(bmain);
/* Force full redraw, mandatory to avoid crashes when running this from UI... */
WM_main_add_notifier(NC_WINDOW, NULL);
Py_INCREF(Py_None);
return Py_None;
return PyLong_FromSize_t(num_datablocks_deleted);
}
PyMethodDef BPY_rna_id_collection_user_map_method_def = {

View File

@ -425,7 +425,10 @@ static void seq_disk_cache_handle_versioning(SeqDiskCache *disk_cache)
FILE *file = BLI_fopen(path_version_file, "r");
if (file) {
fscanf(file, "%d", &version);
const int num_items_read = fscanf(file, "%d", &version);
if (num_items_read == 0) {
version = -1;
}
fclose(file);
}
@ -510,10 +513,14 @@ static size_t inflate_file_to_imbuf(ImBuf *ibuf, FILE *file, DiskCacheHeaderEntr
ibuf->rect_float, header_entry->size_raw, file, header_entry->offset);
}
static void seq_disk_cache_read_header(FILE *file, DiskCacheHeader *header)
static bool seq_disk_cache_read_header(FILE *file, DiskCacheHeader *header)
{
fseek(file, 0, 0);
fread(header, sizeof(*header), 1, file);
const size_t num_items_read = fread(header, sizeof(*header), 1, file);
if (num_items_read < 1) {
perror("unable to read disk cache header");
return false;
}
for (int i = 0; i < DCACHE_IMAGES_PER_FILE; i++) {
if ((ENDIAN_ORDER == B_ENDIAN) && header->entry[i].encoding == 0) {
@ -523,6 +530,8 @@ static void seq_disk_cache_read_header(FILE *file, DiskCacheHeader *header)
BLI_endian_switch_uint64(&header->entry[i].size_raw);
}
}
return true;
}
static size_t seq_disk_cache_write_header(FILE *file, DiskCacheHeader *header)
@ -611,8 +620,12 @@ static bool seq_disk_cache_write_file(SeqDiskCache *disk_cache, SeqCacheKey *key
DiskCacheHeader header;
memset(&header, 0, sizeof(header));
seq_disk_cache_read_header(file, &header);
if (!seq_disk_cache_read_header(file, &header)) {
fclose(file);
return false;
}
int entry_index = seq_disk_cache_add_header_entry(key, ibuf, &header);
size_t bytes_written = deflate_imbuf_to_file(
ibuf, file, seq_disk_cache_compression_level(), &header.entry[entry_index]);
@ -644,7 +657,10 @@ static ImBuf *seq_disk_cache_read_file(SeqDiskCache *disk_cache, SeqCacheKey *ke
return NULL;
}
seq_disk_cache_read_header(file, &header);
if (!seq_disk_cache_read_header(file, &header)) {
fclose(file);
return NULL;
}
int entry_index = seq_disk_cache_get_header_entry(key, &header);
/* Item not found. */