Nodes: Use topology cache for node exec node list

Instead of generating a dependency sorted node list whenever evaluating
texture or EEVEE/viewport shader nodes, use the existing sorted array
from the topology cache. This may be more efficient because the
algorithm isn't quadratic. It's also the second-to-last place to
use `node.runtime->level`, which can be removed soon.

Differential Revision: https://developer.blender.org/D16565
This commit is contained in:
Hans Goudey 2022-11-21 11:30:49 -06:00
parent b0e38f4d04
commit b391037424
6 changed files with 24 additions and 40 deletions

View File

@ -512,9 +512,6 @@ bool ntreeHasTree(const struct bNodeTree *ntree, const struct bNodeTree *lookup)
void ntreeUpdateAllNew(struct Main *main);
void ntreeUpdateAllUsers(struct Main *main, struct ID *id);
void ntreeGetDependencyList(struct bNodeTree *ntree,
struct bNode ***r_deplist,
int *r_deplist_len);
void ntreeUpdateNodeLevels(struct bNodeTree *ntree);
/**

View File

@ -289,6 +289,18 @@ inline blender::Span<const bNode *> bNodeTree::toposort_right_to_left() const
return this->runtime->toposort_right_to_left;
}
inline blender::Span<bNode *> bNodeTree::toposort_left_to_right()
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));
return this->runtime->toposort_left_to_right;
}
inline blender::Span<bNode *> bNodeTree::toposort_right_to_left()
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));
return this->runtime->toposort_right_to_left;
}
inline blender::Span<const bNode *> bNodeTree::all_nodes() const
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));

View File

@ -4118,32 +4118,6 @@ static int node_get_deplist_recurs(bNodeTree *ntree, bNode *node, bNode ***nsort
return level;
}
void ntreeGetDependencyList(struct bNodeTree *ntree, struct bNode ***r_deplist, int *r_deplist_len)
{
*r_deplist_len = 0;
/* first clear data */
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
node->runtime->done = false;
(*r_deplist_len)++;
}
if (*r_deplist_len == 0) {
*r_deplist = nullptr;
return;
}
bNode **nsort;
nsort = *r_deplist = (bNode **)MEM_callocN((*r_deplist_len) * sizeof(bNode *),
"sorted node array");
/* recursive check */
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
if (node->runtime->done == 0) {
node->runtime->level = node_get_deplist_recurs(ntree, node, &nsort);
}
}
}
/* only updates node->level for detecting cycles links */
void ntreeUpdateNodeLevels(bNodeTree *ntree)
{

View File

@ -620,7 +620,9 @@ typedef struct bNodeTree {
* toposort. However, if a connected component does not contain a cycle, this component is sorted
* correctly. Use #has_available_link_cycle to check for cycles.
*/
blender::Span<bNode *> toposort_left_to_right();
blender::Span<const bNode *> toposort_left_to_right() const;
blender::Span<bNode *> toposort_right_to_left();
blender::Span<const bNode *> toposort_right_to_left() const;
/** True when there are any cycles in the node tree. */
bool has_available_link_cycle() const;

View File

@ -144,6 +144,7 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
bNodeTree *ntree,
bNodeInstanceKey parent_key)
{
using namespace blender;
bNodeTreeExec *exec;
bNode *node;
bNodeExec *nodeexec;
@ -151,8 +152,6 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
bNodeSocket *sock;
bNodeStack *ns;
int index;
bNode **nodelist;
int totnodes, n;
/* XXX: texture-nodes have threading issues with muting, have to disable it there. */
/* ensure all sock->link pointers and node levels are correct */
@ -161,8 +160,8 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
* since most of the time it won't be (thanks to ntree design)!!! */
BKE_ntree_update_main_tree(G.main, ntree, nullptr);
/* get a dependency-sorted list of nodes */
ntreeGetDependencyList(ntree, &nodelist, &totnodes);
ntree->ensure_topology_cache();
const Span<bNode *> nodelist = ntree->toposort_left_to_right();
/* XXX could let callbacks do this for specialized data */
exec = MEM_cnew<bNodeTreeExec>("node tree execution data");
@ -171,7 +170,7 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
/* set stack indices */
index = 0;
for (n = 0; n < totnodes; n++) {
for (const int n : nodelist.index_range()) {
node = nodelist[n];
/* init node socket stack indexes */
@ -192,7 +191,7 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
}
/* allocated exec data pointers for nodes */
exec->totnodes = totnodes;
exec->totnodes = nodelist.size();
exec->nodeexec = (bNodeExec *)MEM_callocN(exec->totnodes * sizeof(bNodeExec),
"node execution data");
/* allocate data pointer for node stack */
@ -200,12 +199,13 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
exec->stack = (bNodeStack *)MEM_callocN(exec->stacksize * sizeof(bNodeStack), "bNodeStack");
/* all non-const results are considered inputs */
int n;
for (n = 0; n < exec->stacksize; n++) {
exec->stack[n].hasinput = 1;
}
/* prepare all nodes for execution */
for (n = 0, nodeexec = exec->nodeexec; n < totnodes; n++, nodeexec++) {
for (n = 0, nodeexec = exec->nodeexec; n < nodelist.size(); n++, nodeexec++) {
node = nodeexec->node = nodelist[n];
nodeexec->free_exec_fn = node->typeinfo->free_exec_fn;
@ -236,10 +236,6 @@ bNodeTreeExec *ntree_exec_begin(bNodeExecContext *context,
}
}
if (nodelist) {
MEM_freeN(nodelist);
}
return exec;
}

View File

@ -889,6 +889,9 @@ static void ntree_shader_weight_tree_invert(bNodeTree *ntree, bNode *output_node
case SH_NODE_VOLUME_PRINCIPLED:
case SH_NODE_VOLUME_SCATTER:
fromsock = ntree_shader_node_find_input(fromnode, "Weight");
/* Make "weight" sockets available so that links to it are available as well and are
* not ignored in other places. */
fromsock->flag &= ~SOCK_UNAVAIL;
if (fromsock->link) {
ntree_weight_tree_merge_weight(ntree, fromnode, fromsock, &tonode, &tosock);
}