Cleanup: Use topology cache of group output node

Using a cache greatly simplifies access to the output node.
I touched on the most common and understandable cases for me.
The texture nodes were touched because it looked pretty generic.

Differential Revision: https://developer.blender.org/D16699
This commit is contained in:
Iliya Katueshenock 2022-12-09 15:35:38 -06:00 committed by Hans Goudey
parent ad05b78d09
commit fc5f7a1e2d
3 changed files with 39 additions and 43 deletions

View File

@ -509,20 +509,15 @@ static void determine_group_output_states(const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const Span<SocketFieldState> field_state_by_socket_id)
{
for (const bNode *group_output_node : tree.nodes_by_type("NodeGroupOutput")) {
/* Ignore inactive group output nodes. */
if (!(group_output_node->flag & NODE_DO_OUTPUT)) {
continue;
}
/* Determine dependencies of all group outputs. */
for (const bNodeSocket *group_output_socket :
group_output_node->input_sockets().drop_back(1)) {
OutputFieldDependency field_dependency = find_group_output_dependencies(
*group_output_socket, field_state_by_socket_id);
new_inferencing_interface.outputs[group_output_socket->index()] = std::move(
field_dependency);
}
break;
const bNode *group_output_node = tree.group_output_node();
if (!group_output_node) {
return;
}
for (const bNodeSocket *group_output_socket : group_output_node->input_sockets().drop_back(1)) {
OutputFieldDependency field_dependency = find_group_output_dependencies(
*group_output_socket, field_state_by_socket_id);
new_inferencing_interface.outputs[group_output_socket->index()] = std::move(field_dependency);
}
}

View File

@ -43,19 +43,20 @@ static void group_gpu_copy_inputs(bNode *gnode, GPUNodeStack *in, bNodeStack *gs
*/
static void group_gpu_move_outputs(bNode *gnode, GPUNodeStack *out, bNodeStack *gstack)
{
bNodeTree *ngroup = (bNodeTree *)gnode->id;
const bNodeTree &ngroup = *reinterpret_cast<bNodeTree *>(gnode->id);
for (bNode *node : ngroup->all_nodes()) {
if (node->type == NODE_GROUP_OUTPUT && (node->flag & NODE_DO_OUTPUT)) {
int a;
LISTBASE_FOREACH_INDEX (bNodeSocket *, sock, &node->inputs, a) {
bNodeStack *ns = node_get_socket_stack(gstack, sock);
if (ns) {
/* convert the node stack data result back to gpu stack */
node_gpu_stack_from_data(&out[a], sock->type, ns);
}
}
break; /* only one active output node */
ngroup.ensure_topology_cache();
const bNode *group_output_node = ngroup.group_output_node();
if (!group_output_node) {
return;
}
int a;
LISTBASE_FOREACH_INDEX (bNodeSocket *, sock, &group_output_node->inputs, a) {
bNodeStack *ns = node_get_socket_stack(gstack, sock);
if (ns) {
/* convert the node stack data result back to gpu stack */
node_gpu_stack_from_data(&out[a], sock->type, ns);
}
}
}

View File

@ -84,24 +84,24 @@ static void group_copy_inputs(bNode *gnode, bNodeStack **in, bNodeStack *gstack)
*/
static void group_copy_outputs(bNode *gnode, bNodeStack **out, bNodeStack *gstack)
{
bNodeTree *ngroup = (bNodeTree *)gnode->id;
bNode *node;
bNodeSocket *sock;
bNodeStack *ns;
int a;
const bNodeTree &ngroup = *reinterpret_cast<bNodeTree *>(gnode->id);
for (node = static_cast<bNode *>(ngroup->nodes.first); node; node = node->next) {
if (node->type == NODE_GROUP_OUTPUT && (node->flag & NODE_DO_OUTPUT)) {
for (sock = static_cast<bNodeSocket *>(node->inputs.first), a = 0; sock;
sock = sock->next, a++) {
if (out[a]) { /* shouldn't need to check this T36694. */
ns = node_get_socket_stack(gstack, sock);
if (ns) {
copy_stack(out[a], ns);
}
}
}
break; /* only one active output node */
ngroup.ensure_topology_cache();
const bNode *group_output_node = ngroup.group_output_node();
if (!group_output_node) {
return;
}
int a;
LISTBASE_FOREACH_INDEX (bNodeSocket *, sock, &group_output_node->inputs, a) {
if (!out[a]) {
/* shouldn't need to check this T36694. */
continue;
}
bNodeStack *ns = node_get_socket_stack(gstack, sock);
if (ns) {
copy_stack(out[a], ns);
}
}
}