Depsgraph: Avoid threading for trivial operations

Found this while looking into T70463, solves the high spinning times
mentioned in T70463#791026.

Sounds logical that iterating over an array to modify a single property
is faster than doing it in threads. But strangely, doing it for both
nodes and its components is still faster in threads here.

Gives extra speedup with a file mentioned in the report.

Reviewed By: brecht, mont29

Differential Revision: https://developer.blender.org/D6017
This commit is contained in:
Sergey Sharybin 2019-10-08 11:34:07 +02:00
parent abc36cad83
commit 054ab92f8b
3 changed files with 20 additions and 73 deletions

View File

@ -809,24 +809,6 @@ static void deg_graph_clear_id_recalc_flags(ID *id)
}
}
static void deg_graph_clear_id_node_func(void *__restrict data_v,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
{
/* TODO: we clear original ID recalc flags here, but this may not work
* correctly when there are multiple depsgraph with others still using
* the recalc flag. */
DEG::Depsgraph *deg_graph = reinterpret_cast<DEG::Depsgraph *>(data_v);
DEG::IDNode *id_node = deg_graph->id_nodes[i];
id_node->is_user_modified = false;
deg_graph_clear_id_recalc_flags(id_node->id_cow);
if (deg_graph->is_active) {
deg_graph_clear_id_recalc_flags(id_node->id_orig);
}
}
void DEG_ids_clear_recalc(Main *UNUSED(bmain), Depsgraph *depsgraph)
{
DEG::Depsgraph *deg_graph = reinterpret_cast<DEG::Depsgraph *>(depsgraph);
@ -836,10 +818,14 @@ void DEG_ids_clear_recalc(Main *UNUSED(bmain), Depsgraph *depsgraph)
return;
}
/* Go over all ID nodes nodes, clearing tags. */
const int num_id_nodes = deg_graph->id_nodes.size();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1024;
BLI_task_parallel_range(0, num_id_nodes, deg_graph, deg_graph_clear_id_node_func, &settings);
memset(deg_graph->id_type_updated, 0, sizeof(deg_graph->id_type_updated));
for (DEG::IDNode *id_node : deg_graph->id_nodes) {
/* TODO: we clear original ID recalc flags here, but this may not work
* correctly when there are multiple depsgraph with others still using
* the recalc flag. */
id_node->is_user_modified = false;
deg_graph_clear_id_recalc_flags(id_node->id_cow);
if (deg_graph->is_active) {
deg_graph_clear_id_recalc_flags(id_node->id_orig);
}
}
}

View File

@ -91,10 +91,6 @@ static void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id)
BLI_task_pool_delayed_push_end(pool, thread_id);
}
struct CalculatePendingData {
Depsgraph *graph;
};
static bool check_operation_node_visible(OperationNode *op_node)
{
const ComponentNode *comp_node = op_node->owner;
@ -106,13 +102,8 @@ static bool check_operation_node_visible(OperationNode *op_node)
return comp_node->affects_directly_visible;
}
static void calculate_pending_func(void *__restrict data_v,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
static void calculate_pending_parents_for_node(OperationNode *node)
{
CalculatePendingData *data = (CalculatePendingData *)data_v;
Depsgraph *graph = data->graph;
OperationNode *node = graph->operations[i];
/* Update counters, applies for both visible and invisible IDs. */
node->num_links_pending = 0;
node->scheduled = false;
@ -145,13 +136,9 @@ static void calculate_pending_func(void *__restrict data_v,
static void calculate_pending_parents(Depsgraph *graph)
{
const int num_operations = graph->operations.size();
CalculatePendingData data;
data.graph = graph;
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1024;
BLI_task_parallel_range(0, num_operations, &data, calculate_pending_func, &settings);
for (OperationNode *node : graph->operations) {
calculate_pending_parents_for_node(node);
}
}
static void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)

View File

@ -87,15 +87,6 @@ typedef std::deque<OperationNode *> FlushQueue;
namespace {
void flush_init_operation_node_func(void *__restrict data_v,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
{
Depsgraph *graph = (Depsgraph *)data_v;
OperationNode *node = graph->operations[i];
node->scheduled = false;
}
void flush_init_id_node_func(void *__restrict data_v,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
@ -110,13 +101,10 @@ void flush_init_id_node_func(void *__restrict data_v,
BLI_INLINE void flush_prepare(Depsgraph *graph)
{
{
const int num_operations = graph->operations.size();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1024;
BLI_task_parallel_range(0, num_operations, graph, flush_init_operation_node_func, &settings);
for (OperationNode *node : graph->operations) {
node->scheduled = false;
}
{
const int num_id_nodes = graph->id_nodes.size();
TaskParallelSettings settings;
@ -395,27 +383,13 @@ void deg_graph_flush_updates(Main *bmain, Depsgraph *graph)
invalidate_tagged_evaluated_data(graph);
}
static void graph_clear_operation_func(void *__restrict data_v,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
{
Depsgraph *graph = (Depsgraph *)data_v;
OperationNode *node = graph->operations[i];
/* Clear node's "pending update" settings. */
node->flag &= ~(DEPSOP_FLAG_DIRECTLY_MODIFIED | DEPSOP_FLAG_NEEDS_UPDATE |
DEPSOP_FLAG_USER_MODIFIED);
}
/* Clear tags from all operation nodes. */
void deg_graph_clear_tags(Depsgraph *graph)
{
/* Go over all operation nodes, clearing tags. */
{
const int num_operations = graph->operations.size();
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = 1024;
BLI_task_parallel_range(0, num_operations, graph, graph_clear_operation_func, &settings);
for (OperationNode *node : graph->operations) {
node->flag &= ~(DEPSOP_FLAG_DIRECTLY_MODIFIED | DEPSOP_FLAG_NEEDS_UPDATE |
DEPSOP_FLAG_USER_MODIFIED);
}
/* Clear any entry tags which haven't been flushed. */
BLI_gset_clear(graph->entry_tags, NULL);