Nodes: use CacheMutex to protect topology cache

No functional changes are expected.
This commit is contained in:
Jacques Lucke 2022-12-20 13:05:02 +01:00
parent 105c0aa5b6
commit 844b6e3982
4 changed files with 34 additions and 57 deletions

View File

@ -5,6 +5,7 @@
#include <memory>
#include <mutex>
#include "BLI_cache_mutex.hh"
#include "BLI_multi_value_map.hh"
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
@ -119,9 +120,8 @@ class bNodeTreeRuntime : NonCopyable, NonMovable {
* Protects access to all topology cache variables below. This is necessary so that the cache can
* be updated on a const #bNodeTree.
*/
std::mutex topology_cache_mutex;
bool topology_cache_is_dirty = true;
bool topology_cache_exists = false;
CacheMutex topology_cache_mutex;
std::atomic<bool> topology_cache_exists = false;
/**
* Under some circumstances, it can be useful to use the cached data while editing the
* #bNodeTree. By default, this is protected against using an assert.
@ -298,7 +298,7 @@ inline bool topology_cache_is_available(const bNodeTree &tree)
if (tree.runtime->allow_use_dirty_topology_cache.load() > 0) {
return true;
}
if (tree.runtime->topology_cache_is_dirty) {
if (tree.runtime->topology_cache_mutex.is_dirty()) {
return false;
}
return true;

View File

@ -2010,7 +2010,7 @@ bNode *nodeFindNodebyName(bNodeTree *ntree, const char *name)
void nodeFindNode(bNodeTree *ntree, bNodeSocket *sock, bNode **r_node, int *r_sockindex)
{
*r_node = nullptr;
if (!ntree->runtime->topology_cache_is_dirty) {
if (ntree->runtime->topology_cache_mutex.is_cached()) {
bNode *node = &sock->owner_node();
*r_node = node;
if (r_sockindex) {

View File

@ -22,26 +22,6 @@ void preprocess_geometry_node_tree_for_evaluation(bNodeTree &tree_cow)
blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree_cow);
}
static void double_checked_lock(std::mutex &mutex, bool &data_is_dirty, FunctionRef<void()> fn)
{
if (!data_is_dirty) {
return;
}
std::lock_guard lock{mutex};
if (!data_is_dirty) {
return;
}
fn();
data_is_dirty = false;
}
static void double_checked_lock_with_task_isolation(std::mutex &mutex,
bool &data_is_dirty,
FunctionRef<void()> fn)
{
double_checked_lock(mutex, data_is_dirty, [&]() { threading::isolate_task(fn); });
}
static void update_interface_sockets(const bNodeTree &ntree)
{
bNodeTreeRuntime &tree_runtime = *ntree.runtime;
@ -434,37 +414,34 @@ static void update_group_output_node(const bNodeTree &ntree)
static void ensure_topology_cache(const bNodeTree &ntree)
{
bNodeTreeRuntime &tree_runtime = *ntree.runtime;
double_checked_lock_with_task_isolation(
tree_runtime.topology_cache_mutex, tree_runtime.topology_cache_is_dirty, [&]() {
update_interface_sockets(ntree);
update_node_vector(ntree);
update_link_vector(ntree);
update_socket_vectors_and_owner_node(ntree);
update_internal_link_inputs(ntree);
update_directly_linked_links_and_sockets(ntree);
threading::parallel_invoke(
tree_runtime.nodes_by_id.size() > 32,
[&]() { update_logical_origins(ntree); },
[&]() { update_nodes_by_type(ntree); },
[&]() { update_sockets_by_identifier(ntree); },
[&]() {
update_toposort(ntree,
ToposortDirection::LeftToRight,
tree_runtime.toposort_left_to_right,
tree_runtime.has_available_link_cycle);
},
[&]() {
bool dummy;
update_toposort(ntree,
ToposortDirection::RightToLeft,
tree_runtime.toposort_right_to_left,
dummy);
},
[&]() { update_root_frames(ntree); },
[&]() { update_direct_frames_childrens(ntree); });
update_group_output_node(ntree);
tree_runtime.topology_cache_exists = true;
});
tree_runtime.topology_cache_mutex.ensure([&]() {
update_interface_sockets(ntree);
update_node_vector(ntree);
update_link_vector(ntree);
update_socket_vectors_and_owner_node(ntree);
update_internal_link_inputs(ntree);
update_directly_linked_links_and_sockets(ntree);
threading::parallel_invoke(
tree_runtime.nodes_by_id.size() > 32,
[&]() { update_logical_origins(ntree); },
[&]() { update_nodes_by_type(ntree); },
[&]() { update_sockets_by_identifier(ntree); },
[&]() {
update_toposort(ntree,
ToposortDirection::LeftToRight,
tree_runtime.toposort_left_to_right,
tree_runtime.has_available_link_cycle);
},
[&]() {
bool dummy;
update_toposort(
ntree, ToposortDirection::RightToLeft, tree_runtime.toposort_right_to_left, dummy);
},
[&]() { update_root_frames(ntree); },
[&]() { update_direct_frames_childrens(ntree); });
update_group_output_node(ntree);
tree_runtime.topology_cache_exists = true;
});
}
} // namespace blender::bke::node_tree_runtime

View File

@ -51,7 +51,7 @@ enum eNodeTreeChangedFlag {
static void add_tree_tag(bNodeTree *ntree, const eNodeTreeChangedFlag flag)
{
ntree->runtime->changed_flag |= flag;
ntree->runtime->topology_cache_is_dirty = true;
ntree->runtime->topology_cache_mutex.tag_dirty();
}
static void add_node_tag(bNodeTree *ntree, bNode *node, const eNodeTreeChangedFlag flag)