Cleanup: Add accessor for node index

Add `bNode::index()` to allow accessing node indices directly without
manually de-referencing the runtime struct. Also adds some asserts to
make sure the access is valid and to check the nodes runtime vector.

Eagerly maintain the node's index in the tree so it can be accessed
without relying on the topology cache.

Differential Revision: https://developer.blender.org/D16683
This commit is contained in:
Hans Goudey 2022-12-11 20:23:18 -06:00
parent 05bf5c4e0e
commit 178eb5bac5
9 changed files with 59 additions and 31 deletions

View File

@ -668,6 +668,10 @@ void nodeUnlinkNode(struct bNodeTree *ntree, struct bNode *node);
* Find the first available, non-duplicate name for a given node.
*/
void nodeUniqueName(struct bNodeTree *ntree, struct bNode *node);
/**
* Create a new unique integer identifier for the node. Also set the node's
* index in the tree, which is an eagerly maintained cache.
*/
void nodeUniqueID(struct bNodeTree *ntree, struct bNode *node);
/**

View File

@ -251,12 +251,14 @@ class bNodeRuntime : NonCopyable, NonMovable {
/** List of cached internal links (input to output), for muted nodes and operators. */
Vector<bNodeLink *> internal_links;
/** Eagerly maintained cache of the node's index in the tree. */
int index_in_tree = -1;
/** Only valid if #topology_cache_is_dirty is false. */
Vector<bNodeSocket *> inputs;
Vector<bNodeSocket *> outputs;
Map<StringRefNull, bNodeSocket *> inputs_by_identifier;
Map<StringRefNull, bNodeSocket *> outputs_by_identifier;
int index_in_tree = -1;
bool has_available_linked_inputs = false;
bool has_available_linked_outputs = false;
Vector<bNode *> direct_children_in_frame;
@ -467,6 +469,15 @@ inline blender::Span<bNode *> bNodeTree::root_frames() const
/** \name #bNode Inline Methods
* \{ */
inline int bNode::index() const
{
const int index = this->runtime->index_in_tree;
/* The order of nodes should always be consistent with the `nodes_by_id` vector. */
BLI_assert(index ==
this->runtime->owner_tree->runtime->nodes_by_id.index_of_as(this->identifier));
return index;
}
inline blender::Span<bNodeSocket *> bNode::input_sockets()
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));

View File

@ -145,11 +145,13 @@ static void ntree_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, cons
dst_runtime.nodes_by_id.reserve(ntree_src->all_nodes().size());
BLI_listbase_clear(&ntree_dst->nodes);
LISTBASE_FOREACH (const bNode *, src_node, &ntree_src->nodes) {
int i;
LISTBASE_FOREACH_INDEX (const bNode *, src_node, &ntree_src->nodes, i) {
/* Don't find a unique name for every node, since they should have valid names already. */
bNode *new_node = blender::bke::node_copy_with_mapping(
ntree_dst, *src_node, flag_subdata, false, socket_map);
dst_runtime.nodes_by_id.add_new(new_node);
new_node->runtime->index_in_tree = i;
}
/* copy links */
@ -673,9 +675,11 @@ void ntreeBlendReadData(BlendDataReader *reader, ID *owner_id, bNodeTree *ntree)
BKE_animdata_blend_read_data(reader, ntree->adt);
BLO_read_list(reader, &ntree->nodes);
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &ntree->nodes, i) {
node->runtime = MEM_new<bNodeRuntime>(__func__);
node->typeinfo = nullptr;
node->runtime->index_in_tree = i;
/* Create the `nodes_by_id` cache eagerly so it can be expected to be valid. Because
* we create it here we also have to check for zero identifiers from previous versions. */
@ -2197,6 +2201,8 @@ void nodeUniqueID(bNodeTree *ntree, bNode *node)
node->identifier = new_id;
ntree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = ntree->runtime->nodes_by_id.index_range().last();
BLI_assert(node->runtime->index_in_tree == ntree->runtime->nodes_by_id.index_of(node));
}
bNode *nodeAddNode(const bContext *C, bNodeTree *ntree, const char *idname)
@ -2936,8 +2942,10 @@ void nodeRebuildIDVector(bNodeTree *node_tree)
{
/* Rebuild nodes #VectorSet which must have the same order as the list. */
node_tree->runtime->nodes_by_id.clear();
LISTBASE_FOREACH (bNode *, node, &node_tree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &node_tree->nodes, i) {
node_tree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = i;
}
}

View File

@ -278,7 +278,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
Stack<Item, 64> nodes_to_check;
nodes_to_check.push({&start_node});
node_states[start_node.runtime->index_in_tree].is_in_stack = true;
node_states[start_node.index()].is_in_stack = true;
while (!nodes_to_check.is_empty()) {
Item &item = nodes_to_check.peek();
bNode &node = *item.node;
@ -306,7 +306,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
}
bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
bNode &linked_node = *linked_socket.runtime->owner_node;
ToposortNodeState &linked_node_state = node_states[linked_node.runtime->index_in_tree];
ToposortNodeState &linked_node_state = node_states[linked_node.index()];
if (linked_node_state.is_done) {
/* The linked node has already been visited. */
item.link_index++;
@ -324,7 +324,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
/* If no other element has been pushed, the current node can be pushed to the sorted list. */
if (&item == &nodes_to_check.peek()) {
ToposortNodeState &node_state = node_states[node.runtime->index_in_tree];
ToposortNodeState &node_state = node_states[node.index()];
node_state.is_done = true;
node_state.is_in_stack = false;
r_sorted_nodes.append(&node);
@ -345,7 +345,7 @@ static void update_toposort(const bNodeTree &ntree,
Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}
@ -361,7 +361,7 @@ static void update_toposort(const bNodeTree &ntree,
if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
r_cycle_detected = true;
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}

View File

@ -492,9 +492,12 @@ class NodeTreeMainUpdater {
#ifdef DEBUG
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
for (bNode *node : ntree.all_nodes()) {
BLI_assert(node->identifier > 0);
node_identifiers.add_new(node->identifier);
const Span<const bNode *> nodes = ntree.all_nodes();
for (const int i : nodes.index_range()) {
const bNode &node = *nodes[i];
BLI_assert(node.identifier > 0);
node_identifiers.add_new(node.identifier);
BLI_assert(node.runtime->index_in_tree == i);
}
#endif
@ -761,15 +764,14 @@ class NodeTreeMainUpdater {
Array<int> toposort_indices(toposort.size());
for (const int i : toposort.index_range()) {
const bNode &node = *toposort[i];
toposort_indices[node.runtime->index_in_tree] = i;
toposort_indices[node.index()] = i;
}
LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
link->flag |= NODE_LINK_VALID;
const bNode &from_node = *link->fromnode;
const bNode &to_node = *link->tonode;
if (toposort_indices[from_node.runtime->index_in_tree] >
toposort_indices[to_node.runtime->index_in_tree]) {
if (toposort_indices[from_node.index()] > toposort_indices[to_node.index()]) {
link->flag &= ~NODE_LINK_VALID;
continue;
}

View File

@ -268,6 +268,7 @@ void node_sort(bNodeTree &ntree)
for (const int i : sort_nodes.index_range()) {
BLI_addtail(&ntree.nodes, sort_nodes[i]);
ntree.runtime->nodes_by_id.add_new(sort_nodes[i]);
sort_nodes[i]->runtime->index_in_tree = i;
}
}

View File

@ -1641,31 +1641,31 @@ static void node_join_attach_recursive(bNodeTree &ntree,
bNode *frame,
const VectorSet<bNode *> &selected_nodes)
{
join_states[node->runtime->index_in_tree].done = true;
join_states[node->index()].done = true;
if (node == frame) {
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
else if (node->parent) {
/* call recursively */
if (!join_states[node->parent->runtime->index_in_tree].done) {
if (!join_states[node->parent->index()].done) {
node_join_attach_recursive(ntree, join_states, node->parent, frame, selected_nodes);
}
/* in any case: if the parent is a descendant, so is the child */
if (join_states[node->parent->runtime->index_in_tree].descendent) {
join_states[node->runtime->index_in_tree].descendent = true;
if (join_states[node->parent->index()].descendent) {
join_states[node->index()].descendent = true;
}
else if (selected_nodes.contains(node)) {
/* if parent is not an descendant of the frame, reattach the node */
nodeDetachNode(&ntree, node);
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
else if (selected_nodes.contains(node)) {
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
@ -1685,7 +1685,7 @@ static int node_join_exec(bContext *C, wmOperator * /*op*/)
Array<NodeJoinState> join_states(ntree.all_nodes().size(), NodeJoinState{false, false});
for (bNode *node : ntree.all_nodes()) {
if (!join_states[node->runtime->index_in_tree].done) {
if (!join_states[node->index()].done) {
node_join_attach_recursive(ntree, join_states, node, frame_node, selected_nodes);
}
}
@ -1818,26 +1818,26 @@ static void node_detach_recursive(bNodeTree &ntree,
MutableSpan<NodeDetachstate> detach_states,
bNode *node)
{
detach_states[node->runtime->index_in_tree].done = true;
detach_states[node->index()].done = true;
if (node->parent) {
/* call recursively */
if (!detach_states[node->parent->runtime->index_in_tree].done) {
if (!detach_states[node->parent->index()].done) {
node_detach_recursive(ntree, detach_states, node->parent);
}
/* in any case: if the parent is a descendant, so is the child */
if (detach_states[node->parent->runtime->index_in_tree].descendent) {
detach_states[node->runtime->index_in_tree].descendent = true;
if (detach_states[node->parent->index()].descendent) {
detach_states[node->index()].descendent = true;
}
else if (node->flag & NODE_SELECT) {
/* if parent is not a descendant of a selected node, detach */
nodeDetachNode(&ntree, node);
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
else if (node->flag & NODE_SELECT) {
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
@ -1853,7 +1853,7 @@ static int node_detach_exec(bContext *C, wmOperator * /*op*/)
* relative order is preserved here!
*/
for (bNode *node : ntree.all_nodes()) {
if (!detach_states[node->runtime->index_in_tree].done) {
if (!detach_states[node->index()].done) {
node_detach_recursive(ntree, detach_states, node);
}
}

View File

@ -1267,7 +1267,7 @@ static int node_select_same_type_step_exec(bContext *C, wmOperator *op)
}
}
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->runtime->index_in_tree];
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->index()];
if (new_active_node == &active_node) {
return OPERATOR_CANCELLED;
}

View File

@ -358,6 +358,8 @@ typedef struct bNode {
bNodeRuntimeHandle *runtime;
#ifdef __cplusplus
/** The index in the owner node tree. */
int index() const;
blender::StringRefNull label_or_name() const;
bool is_muted() const;
bool is_reroute() const;