Merge branch 'refactor-mesh-position-generic' into refactor-mesh-corners-generic

This commit is contained in:
Hans Goudey 2022-12-11 23:56:50 -06:00
commit 8f773072a9
21 changed files with 190 additions and 185 deletions

View File

@ -218,7 +218,7 @@ harvest_rpath_lib(openvdb/lib openvdb/lib "*${SHAREDLIBEXT}*")
harvest_rpath_python(openvdb/lib/python${PYTHON_SHORT_VERSION} python/lib/python${PYTHON_SHORT_VERSION} "*pyopenvdb*")
harvest(xr_openxr_sdk/include/openxr xr_openxr_sdk/include/openxr "*.h")
harvest(xr_openxr_sdk/lib xr_openxr_sdk/lib "*.a")
harvest(osl/bin osl/bin "oslc")
harvest_rpath_bin(osl/bin osl/bin "oslc")
harvest(osl/include osl/include "*.h")
harvest(osl/lib osl/lib "*.a")
harvest(osl/share/OSL/shaders osl/share/OSL/shaders "*.h")

View File

@ -358,12 +358,12 @@ Array<Vector<int>> build_vert_to_loop_map(Span<int> corner_verts, int verts_num)
Array<Vector<int>> build_edge_to_loop_map(Span<int> corner_edges, int edges_num);
Vector<Vector<int>> build_edge_to_loop_map_resizable(Span<int> corner_edges, int edges_num);
inline int previous_poly_loop(const MPoly &poly, int loop_i)
inline int poly_loop_prev(const MPoly &poly, int loop_i)
{
return loop_i - 1 + (loop_i == poly.loopstart) * poly.totloop;
}
inline int next_poly_loop(const MPoly &poly, int loop_i)
inline int poly_loop_next(const MPoly &poly, int loop_i)
{
if (loop_i == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;

View File

@ -668,6 +668,10 @@ void nodeUnlinkNode(struct bNodeTree *ntree, struct bNode *node);
* Find the first available, non-duplicate name for a given node.
*/
void nodeUniqueName(struct bNodeTree *ntree, struct bNode *node);
/**
* Create a new unique integer identifier for the node. Also set the node's
* index in the tree, which is an eagerly maintained cache.
*/
void nodeUniqueID(struct bNodeTree *ntree, struct bNode *node);
/**

View File

@ -251,12 +251,14 @@ class bNodeRuntime : NonCopyable, NonMovable {
/** List of cached internal links (input to output), for muted nodes and operators. */
Vector<bNodeLink *> internal_links;
/** Eagerly maintained cache of the node's index in the tree. */
int index_in_tree = -1;
/** Only valid if #topology_cache_is_dirty is false. */
Vector<bNodeSocket *> inputs;
Vector<bNodeSocket *> outputs;
Map<StringRefNull, bNodeSocket *> inputs_by_identifier;
Map<StringRefNull, bNodeSocket *> outputs_by_identifier;
int index_in_tree = -1;
bool has_available_linked_inputs = false;
bool has_available_linked_outputs = false;
Vector<bNode *> direct_children_in_frame;
@ -467,6 +469,15 @@ inline blender::Span<bNode *> bNodeTree::root_frames() const
/** \name #bNode Inline Methods
* \{ */
inline int bNode::index() const
{
const int index = this->runtime->index_in_tree;
/* The order of nodes should always be consistent with the `nodes_by_id` vector. */
BLI_assert(index ==
this->runtime->owner_tree->runtime->nodes_by_id.index_of_as(this->identifier));
return index;
}
inline blender::Span<bNodeSocket *> bNode::input_sockets()
{
BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this));

View File

@ -605,7 +605,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
/* For every corner, mix the values from the adjacent edges on the face. */
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::previous_poly_loop(poly, loop_index);
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int edge_i = corner_edges[loop_index];
const int edge_i_prev = corner_edges[loop_index_prev];
mixer.mix_in(loop_index, old_values[edge_i]);
@ -632,7 +632,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
for (const int poly_index : range) {
const MPoly &poly = polys[poly_index];
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::previous_poly_loop(poly, loop_index);
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int edge_i = corner_edges[loop_index];
const int edge_i_prev = corner_edges[loop_index_prev];
if (old_values[edge_i] && old_values[edge_i_prev]) {

View File

@ -27,6 +27,7 @@
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_task.hh"
#include "BLI_timeit.hh"
#include "BLI_utildefines.h"
#include "BKE_customdata.h"
@ -744,22 +745,20 @@ void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space,
#define LOOP_SPLIT_TASK_BLOCK_SIZE 1024
struct LoopSplitTaskData {
/* Specific to each instance (each task). */
enum class Type : int8_t {
BlockEnd = 0, /* Set implicitly by calloc. */
Fan = 1,
Single = 2,
};
/** We have to create those outside of tasks, since #MemArena is not thread-safe. */
MLoopNorSpace *lnor_space;
float3 *lnor;
int ml_curr_index;
int ml_prev_index;
/** Also used a flag to switch between single or fan process! */
const int *e2l_prev;
int ml_prev_index;
int mp_index;
/** This one is special, it's owned and managed by worker tasks,
* avoid to have to create it for each fan! */
BLI_Stack *edge_vectors;
char pad_c;
Type flag;
};
struct LoopSplitTaskDataCommon {
@ -776,7 +775,7 @@ struct LoopSplitTaskDataCommon {
Span<int> corner_verts;
Span<int> corner_edges;
Span<MPoly> polys;
MutableSpan<int2> edge_to_loops;
Span<int2> edge_to_loops;
Span<int> loop_to_poly;
Span<float3> polynors;
Span<float3> vert_normals;
@ -910,7 +909,8 @@ static void loop_manifold_fan_around_vert_next(const Span<int> corner_verts,
int *r_mlfan_vert_index,
int *r_mpfan_curr_index)
{
const int fan_vert_curr = corner_verts[*r_mlfan_curr_index];
const int mlfan_curr_orig = *r_mlfan_curr_index;
const uint vert_fan_orig = corner_verts[mlfan_curr_orig];
/* WARNING: This is rather complex!
* We have to find our next edge around the vertex (fan mode).
@ -925,12 +925,10 @@ static void loop_manifold_fan_around_vert_next(const Span<int> corner_verts,
BLI_assert(*r_mlfan_curr_index >= 0);
BLI_assert(*r_mpfan_curr_index >= 0);
const int fan_vert_next = corner_verts[*r_mlfan_curr_index];
const uint vert_fan_next = corner_verts[*r_mlfan_curr_index];
const MPoly &mpfan_next = polys[*r_mpfan_curr_index];
if ((fan_vert_curr == fan_vert_next && fan_vert_curr == mv_pivot_index) ||
(fan_vert_curr != fan_vert_next && fan_vert_curr != mv_pivot_index)) {
if ((vert_fan_orig == vert_fan_next && vert_fan_orig == mv_pivot_index) ||
(vert_fan_orig != vert_fan_next && vert_fan_orig != mv_pivot_index)) {
/* We need the previous loop, but current one is our vertex's loop. */
*r_mlfan_vert_index = *r_mlfan_curr_index;
if (--(*r_mlfan_curr_index) < mpfan_next.loopstart) {
@ -953,30 +951,26 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
const Span<float3> positions = common_data->positions;
const Span<MEdge> edges = common_data->edges;
const Span<float3> polynors = common_data->polynors;
const Span<int> corner_verts = common_data->corner_verts;
const Span<int> corner_edges = common_data->corner_edges;
const Span<float3> polynors = common_data->polynors;
MutableSpan<float3> loop_normals = common_data->loopnors;
MLoopNorSpace *lnor_space = data->lnor_space;
float3 *lnor = data->lnor;
const int ml_curr_index = data->ml_curr_index;
const int ml_prev_index = data->ml_curr_index;
#if 0 /* Not needed for 'single' loop. */
const int ml_prev_index = data->ml_prev_index;
const int *e2l_prev = data->e2l_prev;
#endif
const int mp_index = data->mp_index;
/* Simple case (both edges around that vertex are sharp in current polygon),
* this loop just takes its poly normal.
*/
copy_v3_v3(*lnor, polynors[mp_index]);
loop_normals[ml_curr_index] = polynors[mp_index];
#if 0
printf("BASIC: handling loop %d / edge %d / vert %d / poly %d\n",
ml_curr_index,
ml_curr->e,
ml_curr->v,
loops[ml_curr_index].e,
loops[ml_curr_index].v,
mp_index);
#endif
@ -984,32 +978,32 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
if (lnors_spacearr) {
float vec_curr[3], vec_prev[3];
/* The vertex we are "fanning" around! */
const int mv_pivot_index = corner_verts[ml_curr_index];
const float3 &mv_pivot = positions[mv_pivot_index];
const uint mv_pivot_index =
corner_verts[ml_curr_index]; /* The vertex we are "fanning" around! */
const MEdge *me_curr = &edges[corner_edges[ml_curr_index]];
const float3 &mv_2 = (me_curr->v1 == mv_pivot_index) ? positions[me_curr->v2] :
positions[me_curr->v1];
const int vert_2 = me_curr->v1 == mv_pivot_index ? me_curr->v2 : me_curr->v1;
const MEdge *me_prev = &edges[corner_edges[ml_prev_index]];
const float3 &mv_3 = (me_prev->v1 == mv_pivot_index) ? positions[me_prev->v2] :
positions[me_prev->v1];
const int vert_3 = me_prev->v1 == mv_pivot_index ? me_prev->v2 : me_prev->v1;
sub_v3_v3v3(vec_curr, mv_2, mv_pivot);
sub_v3_v3v3(vec_curr, positions[vert_2], positions[mv_pivot_index]);
normalize_v3(vec_curr);
sub_v3_v3v3(vec_prev, mv_3, mv_pivot);
sub_v3_v3v3(vec_prev, positions[vert_3], positions[mv_pivot_index]);
normalize_v3(vec_prev);
BKE_lnor_space_define(lnor_space, *lnor, vec_curr, vec_prev, nullptr);
BKE_lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, nullptr);
/* We know there is only one loop in this space, no need to create a link-list in this case. */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, nullptr, true);
if (!clnors_data.is_empty()) {
BKE_lnor_space_custom_data_to_normal(lnor_space, clnors_data[ml_curr_index], *lnor);
BKE_lnor_space_custom_data_to_normal(
lnor_space, clnors_data[ml_curr_index], loop_normals[ml_curr_index]);
}
}
}
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
{
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
MutableSpan<float3> loopnors = common_data->loopnors;
@ -1031,9 +1025,6 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
const int ml_curr_index = data->ml_curr_index;
const int ml_prev_index = data->ml_prev_index;
const int mp_index = data->mp_index;
const int *e2l_prev = data->e2l_prev;
BLI_Stack *edge_vectors = data->edge_vectors;
/* Sigh! we have to fan around current vertex, until we find the other non-smooth edge,
* and accumulate face normals into the vertex!
@ -1041,10 +1032,10 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
* same as the vertex normal, but I do not see any easy way to detect that (would need to count
* number of sharp edges per vertex, I doubt the additional memory usage would be worth it,
* especially as it should not be a common case in real-life meshes anyway). */
const int mv_pivot_index = corner_verts[ml_curr_index]; /* The vertex we are "fanning" around! */
const float3 &mv_pivot = positions[mv_pivot_index];
const uint mv_pivot_index =
corner_verts[ml_curr_index]; /* The vertex we are "fanning" around! */
/* `ml_curr` would be mlfan_prev if we needed that one. */
/* `ml_curr_index` would be mlfan_prev if we needed that one. */
const MEdge *me_org = &edges[corner_edges[ml_curr_index]];
float vec_curr[3], vec_prev[3], vec_org[3];
@ -1061,7 +1052,6 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
/* Temp clnors stack. */
BLI_SMALLSTACK_DECLARE(clnors, short *);
const int *e2lfan_curr = e2l_prev;
/* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
*/
int mlfan_curr_index = ml_prev_index;
@ -1077,7 +1067,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
const float3 &mv_2 = (me_org->v1 == mv_pivot_index) ? positions[me_org->v2] :
positions[me_org->v1];
sub_v3_v3v3(vec_org, mv_2, mv_pivot);
sub_v3_v3v3(vec_org, mv_2, positions[mv_pivot_index]);
normalize_v3(vec_org);
copy_v3_v3(vec_prev, vec_org);
@ -1099,7 +1089,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
const float3 &mv_2 = (me_curr->v1 == mv_pivot_index) ? positions[me_curr->v2] :
positions[me_curr->v1];
sub_v3_v3v3(vec_curr, mv_2, mv_pivot);
sub_v3_v3v3(vec_curr, mv_2, positions[mv_pivot_index]);
normalize_v3(vec_curr);
}
@ -1141,7 +1131,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
}
}
if (IS_EDGE_SHARP(e2lfan_curr) || (me_curr == me_org)) {
if (IS_EDGE_SHARP(edge_to_loops[corner_edges[mlfan_curr_index]]) || (me_curr == me_org)) {
/* Current edge is sharp and we have finished with this fan of faces around this vert,
* or this vert is smooth, and we have completed a full turn around it. */
// printf("FAN: Finished!\n");
@ -1154,13 +1144,11 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, LoopSpli
loop_manifold_fan_around_vert_next(corner_verts,
polys,
loop_to_poly,
e2lfan_curr,
edge_to_loops[corner_edges[mlfan_curr_index]],
mv_pivot_index,
&mlfan_curr_index,
&mlfan_vert_index,
&mpfan_curr_index);
e2lfan_curr = edge_to_loops[corner_edges[mlfan_curr_index]];
}
{
@ -1220,10 +1208,9 @@ static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
{
if (data->e2l_prev) {
if (data->flag == LoopSplitTaskData::Type::Fan) {
BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
data->edge_vectors = edge_vectors;
split_loop_nor_fan_do(common_data, data);
split_loop_nor_fan_do(common_data, data, edge_vectors);
}
else {
/* No need for edge_vectors for 'single' case! */
@ -1242,8 +1229,7 @@ static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
nullptr;
for (int i = 0; i < LOOP_SPLIT_TASK_BLOCK_SIZE; i++, data++) {
/* A -1 ml_curr_index is used to tag ended data! */
if (data->ml_curr_index == -1) {
if (data->flag == LoopSplitTaskData::Type::BlockEnd) {
break;
}
loop_split_worker_do(common_data, data, edge_vectors);
@ -1270,7 +1256,8 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<int> corner_
const int ml_prev_index,
const int mp_curr_index)
{
const int mv_pivot_index = corner_verts[ml_curr_index]; /* The vertex we are "fanning" around! */
/* The vertex we are "fanning" around! */
const uint mv_pivot_index = corner_verts[ml_curr_index];
const int *e2lfan_curr = e2l_prev;
if (IS_EDGE_SHARP(e2lfan_curr)) {
@ -1312,7 +1299,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<int> corner_
if (skip_loops[mlfan_vert_index]) {
if (mlfan_vert_index == ml_curr_index) {
/* We walked around a whole cyclic smooth fan without finding any already-processed loop,
* means we can use initial `ml_curr` / `ml_prev` edge as start for this smooth fan. */
* means we can use initial current / previous edge as start for this smooth fan. */
return true;
}
/* Already checked in some previous looping, we can abort. */
@ -1326,8 +1313,9 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<int> corner_
static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common_data)
{
using namespace blender;
using namespace blender::bke;
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
MutableSpan<float3> loopnors = common_data->loopnors;
const Span<int> corner_verts = common_data->corner_verts;
const Span<int> corner_edges = common_data->corner_edges;
@ -1359,22 +1347,16 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
*/
for (const int mp_index : polys.index_range()) {
const MPoly &poly = polys[mp_index];
const int ml_last_index = (poly.loopstart + poly.totloop) - 1;
int ml_curr_index = poly.loopstart;
int ml_prev_index = ml_last_index;
float3 *lnors = &loopnors[ml_curr_index];
for (; ml_curr_index <= ml_last_index; ml_curr_index++, lnors++) {
const int *e2l_curr = edge_to_loops[corner_edges[ml_curr_index]];
const int *e2l_prev = edge_to_loops[corner_edges[ml_prev_index]];
for (const int ml_curr_index : IndexRange(poly.loopstart, poly.totloop)) {
const int ml_prev_index = mesh_topology::poly_loop_prev(poly, ml_curr_index);
#if 0
printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",
ml_curr_index,
corner_edges[ml_curr_index],
corner_verts[ml_curr_index],
IS_EDGE_SHARP(e2l_curr),
loops[ml_curr_index].e,
loops[ml_curr_index].v,
IS_EDGE_SHARP(edge_to_loops[loops[ml_curr_index].e]),
skip_loops[ml_curr_index]);
#endif
@ -1388,17 +1370,18 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
* However, this would complicate the code, add more memory usage, and despite its logical
* complexity, #loop_manifold_fan_around_vert_next() is quite cheap in term of CPU cycles,
* so really think it's not worth it. */
if (!IS_EDGE_SHARP(e2l_curr) && (skip_loops[ml_curr_index] ||
!loop_split_generator_check_cyclic_smooth_fan(corner_verts,
corner_edges,
polys,
edge_to_loops,
loop_to_poly,
e2l_prev,
skip_loops,
ml_curr_index,
ml_prev_index,
mp_index))) {
if (!IS_EDGE_SHARP(edge_to_loops[corner_edges[ml_curr_index]]) &&
(skip_loops[ml_curr_index] || !loop_split_generator_check_cyclic_smooth_fan(
corner_verts,
corner_edges,
polys,
edge_to_loops,
loop_to_poly,
edge_to_loops[corner_edges[ml_prev_index]],
skip_loops,
ml_curr_index,
ml_prev_index,
mp_index))) {
// printf("SKIPPING!\n");
}
else {
@ -1422,34 +1405,27 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
memset(data, 0, sizeof(*data));
}
if (IS_EDGE_SHARP(e2l_curr) && IS_EDGE_SHARP(e2l_prev)) {
data->lnor = lnors;
if (IS_EDGE_SHARP(edge_to_loops[corner_edges[ml_curr_index]]) &&
IS_EDGE_SHARP(edge_to_loops[corner_edges[ml_prev_index]])) {
data->ml_curr_index = ml_curr_index;
#if 0 /* Not needed for 'single' loop. */
data->ml_prev_index = ml_prev_index;
data->e2l_prev = nullptr; /* Tag as 'single' task. */
#endif
data->flag = LoopSplitTaskData::Type::Single;
data->mp_index = mp_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
}
}
/* We *do not need* to check/tag loops as already computed!
* Due to the fact a loop only links to one of its two edges,
* a same fan *will never be walked more than once!*
* Since we consider edges having neighbor polys with inverted
* (flipped) normals as sharp, we are sure that no fan will be skipped,
* even only considering the case (sharp curr_edge, smooth prev_edge),
* and not the alternative (smooth curr_edge, sharp prev_edge).
* All this due/thanks to link between normals and loop ordering (i.e. winding).
*/
else {
#if 0 /* Not needed for 'fan' loops. */
data->lnor = lnors;
#endif
/* We do not need to check/tag loops as already computed. Due to the fact that a loop
* only points to one of its two edges, the same fan will never be walked more than once.
* Since we consider edges that have neighbor polys with inverted (flipped) normals as
* sharp, we are sure that no fan will be skipped, even only considering the case (sharp
* current edge, smooth previous edge), and not the alternative (smooth current edge,
* sharp previous edge). All this due/thanks to the link between normals and loop
* ordering (i.e. winding). */
data->ml_curr_index = ml_curr_index;
data->ml_prev_index = ml_prev_index;
data->e2l_prev = e2l_prev; /* Also tag as 'fan' task. */
data->flag = LoopSplitTaskData::Type::Fan;
data->mp_index = mp_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
@ -1467,13 +1443,9 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
loop_split_worker_do(common_data, data, edge_vectors);
}
}
ml_prev_index = ml_curr_index;
}
}
/* Last block of data. Since it is calloc'ed and we use first nullptr item as stopper,
* everything is fine. */
if (pool && data_idx) {
BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
}

View File

@ -145,11 +145,13 @@ static void ntree_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, cons
dst_runtime.nodes_by_id.reserve(ntree_src->all_nodes().size());
BLI_listbase_clear(&ntree_dst->nodes);
LISTBASE_FOREACH (const bNode *, src_node, &ntree_src->nodes) {
int i;
LISTBASE_FOREACH_INDEX (const bNode *, src_node, &ntree_src->nodes, i) {
/* Don't find a unique name for every node, since they should have valid names already. */
bNode *new_node = blender::bke::node_copy_with_mapping(
ntree_dst, *src_node, flag_subdata, false, socket_map);
dst_runtime.nodes_by_id.add_new(new_node);
new_node->runtime->index_in_tree = i;
}
/* copy links */
@ -673,9 +675,11 @@ void ntreeBlendReadData(BlendDataReader *reader, ID *owner_id, bNodeTree *ntree)
BKE_animdata_blend_read_data(reader, ntree->adt);
BLO_read_list(reader, &ntree->nodes);
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &ntree->nodes, i) {
node->runtime = MEM_new<bNodeRuntime>(__func__);
node->typeinfo = nullptr;
node->runtime->index_in_tree = i;
/* Create the `nodes_by_id` cache eagerly so it can be expected to be valid. Because
* we create it here we also have to check for zero identifiers from previous versions. */
@ -1373,8 +1377,7 @@ void nodeRegisterType(bNodeType *nt)
if (nt->declare && !nt->declaration_is_dynamic) {
if (nt->fixed_declaration == nullptr) {
nt->fixed_declaration = new blender::nodes::NodeDeclaration();
blender::nodes::NodeDeclarationBuilder builder{*nt->fixed_declaration};
nt->declare(builder);
blender::nodes::build_node_declaration(*nt, *nt->fixed_declaration);
}
}
@ -2198,6 +2201,8 @@ void nodeUniqueID(bNodeTree *ntree, bNode *node)
node->identifier = new_id;
ntree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = ntree->runtime->nodes_by_id.index_range().last();
BLI_assert(node->runtime->index_in_tree == ntree->runtime->nodes_by_id.index_of(node));
}
bNode *nodeAddNode(const bContext *C, bNodeTree *ntree, const char *idname)
@ -2937,8 +2942,10 @@ void nodeRebuildIDVector(bNodeTree *node_tree)
{
/* Rebuild nodes #VectorSet which must have the same order as the list. */
node_tree->runtime->nodes_by_id.clear();
LISTBASE_FOREACH (bNode *, node, &node_tree->nodes) {
int i;
LISTBASE_FOREACH_INDEX (bNode *, node, &node_tree->nodes, i) {
node_tree->runtime->nodes_by_id.add_new(node);
node->runtime->index_in_tree = i;
}
}
@ -3607,8 +3614,7 @@ bool nodeDeclarationEnsureOnOutdatedNode(bNodeTree * /*ntree*/, bNode *node)
}
if (node->typeinfo->declaration_is_dynamic) {
node->runtime->declaration = new blender::nodes::NodeDeclaration();
blender::nodes::NodeDeclarationBuilder builder{*node->runtime->declaration};
node->typeinfo->declare(builder);
blender::nodes::build_node_declaration(*node->typeinfo, *node->runtime->declaration);
}
else {
/* Declaration should have been created in #nodeRegisterType. */

View File

@ -278,7 +278,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
Stack<Item, 64> nodes_to_check;
nodes_to_check.push({&start_node});
node_states[start_node.runtime->index_in_tree].is_in_stack = true;
node_states[start_node.index()].is_in_stack = true;
while (!nodes_to_check.is_empty()) {
Item &item = nodes_to_check.peek();
bNode &node = *item.node;
@ -306,7 +306,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
}
bNodeSocket &linked_socket = *socket.runtime->directly_linked_sockets[item.link_index];
bNode &linked_node = *linked_socket.runtime->owner_node;
ToposortNodeState &linked_node_state = node_states[linked_node.runtime->index_in_tree];
ToposortNodeState &linked_node_state = node_states[linked_node.index()];
if (linked_node_state.is_done) {
/* The linked node has already been visited. */
item.link_index++;
@ -324,7 +324,7 @@ static void toposort_from_start_node(const ToposortDirection direction,
/* If no other element has been pushed, the current node can be pushed to the sorted list. */
if (&item == &nodes_to_check.peek()) {
ToposortNodeState &node_state = node_states[node.runtime->index_in_tree];
ToposortNodeState &node_state = node_states[node.index()];
node_state.is_done = true;
node_state.is_in_stack = false;
r_sorted_nodes.append(&node);
@ -345,7 +345,7 @@ static void update_toposort(const bNodeTree &ntree,
Array<ToposortNodeState> node_states(tree_runtime.nodes_by_id.size());
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}
@ -361,7 +361,7 @@ static void update_toposort(const bNodeTree &ntree,
if (r_sorted_nodes.size() < tree_runtime.nodes_by_id.size()) {
r_cycle_detected = true;
for (bNode *node : tree_runtime.nodes_by_id) {
if (node_states[node->runtime->index_in_tree].is_done) {
if (node_states[node->index()].is_done) {
/* Ignore nodes that are done already. */
continue;
}

View File

@ -56,13 +56,6 @@ static InputSocketFieldType get_interface_input_field_type(const bNode &node,
/* Get the field type from the declaration. */
const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
const InputSocketFieldType field_type = socket_decl.input_field_type();
if (field_type == InputSocketFieldType::Implicit) {
return field_type;
}
if (node_decl->is_function_node()) {
/* In a function node, every socket supports fields. */
return InputSocketFieldType::IsSupported;
}
return field_type;
}
@ -93,11 +86,6 @@ static OutputFieldDependency get_interface_output_field_dependency(const bNode &
/* Node declarations should be implemented for nodes involved here. */
BLI_assert(node_decl != nullptr);
if (node_decl->is_function_node()) {
/* In a generic function node, all outputs depend on all inputs. */
return OutputFieldDependency::ForDependentField();
}
/* Use the socket declaration. */
const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
return socket_decl.output_field_dependency();

View File

@ -492,9 +492,12 @@ class NodeTreeMainUpdater {
#ifdef DEBUG
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
for (bNode *node : ntree.all_nodes()) {
BLI_assert(node->identifier > 0);
node_identifiers.add_new(node->identifier);
const Span<const bNode *> nodes = ntree.all_nodes();
for (const int i : nodes.index_range()) {
const bNode &node = *nodes[i];
BLI_assert(node.identifier > 0);
node_identifiers.add_new(node.identifier);
BLI_assert(node.runtime->index_in_tree == i);
}
#endif
@ -761,15 +764,14 @@ class NodeTreeMainUpdater {
Array<int> toposort_indices(toposort.size());
for (const int i : toposort.index_range()) {
const bNode &node = *toposort[i];
toposort_indices[node.runtime->index_in_tree] = i;
toposort_indices[node.index()] = i;
}
LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
link->flag |= NODE_LINK_VALID;
const bNode &from_node = *link->fromnode;
const bNode &to_node = *link->tonode;
if (toposort_indices[from_node.runtime->index_in_tree] >
toposort_indices[to_node.runtime->index_in_tree]) {
if (toposort_indices[from_node.index()] > toposort_indices[to_node.index()]) {
link->flag &= ~NODE_LINK_VALID;
continue;
}

View File

@ -276,13 +276,22 @@ static void do_version_hue_sat_node(bNodeTree *ntree, bNode *node)
return;
}
/* Make sure new sockets are properly created. */
node_verify_sockets(ntree, node, false);
/* Convert value from old storage to new sockets. */
NodeHueSat *nhs = node->storage;
bNodeSocket *hue = nodeFindSocket(node, SOCK_IN, "Hue"),
*saturation = nodeFindSocket(node, SOCK_IN, "Saturation"),
*value = nodeFindSocket(node, SOCK_IN, "Value");
bNodeSocket *hue = nodeFindSocket(node, SOCK_IN, "Hue");
bNodeSocket *saturation = nodeFindSocket(node, SOCK_IN, "Saturation");
bNodeSocket *value = nodeFindSocket(node, SOCK_IN, "Value");
if (hue == NULL) {
hue = nodeAddStaticSocket(ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Hue", "Hue");
}
if (saturation == NULL) {
saturation = nodeAddStaticSocket(
ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Saturation", "Saturation");
}
if (value == NULL) {
value = nodeAddStaticSocket(ntree, node, SOCK_IN, SOCK_FLOAT, PROP_FACTOR, "Value", "Value");
}
((bNodeSocketValueFloat *)hue->default_value)->value = nhs->hue;
((bNodeSocketValueFloat *)saturation->default_value)->value = nhs->sat;
((bNodeSocketValueFloat *)value->default_value)->value = nhs->val;

View File

@ -268,6 +268,7 @@ void node_sort(bNodeTree &ntree)
for (const int i : sort_nodes.index_range()) {
BLI_addtail(&ntree.nodes, sort_nodes[i]);
ntree.runtime->nodes_by_id.add_new(sort_nodes[i]);
sort_nodes[i]->runtime->index_in_tree = i;
}
}

View File

@ -1641,31 +1641,31 @@ static void node_join_attach_recursive(bNodeTree &ntree,
bNode *frame,
const VectorSet<bNode *> &selected_nodes)
{
join_states[node->runtime->index_in_tree].done = true;
join_states[node->index()].done = true;
if (node == frame) {
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
else if (node->parent) {
/* call recursively */
if (!join_states[node->parent->runtime->index_in_tree].done) {
if (!join_states[node->parent->index()].done) {
node_join_attach_recursive(ntree, join_states, node->parent, frame, selected_nodes);
}
/* in any case: if the parent is a descendant, so is the child */
if (join_states[node->parent->runtime->index_in_tree].descendent) {
join_states[node->runtime->index_in_tree].descendent = true;
if (join_states[node->parent->index()].descendent) {
join_states[node->index()].descendent = true;
}
else if (selected_nodes.contains(node)) {
/* if parent is not an descendant of the frame, reattach the node */
nodeDetachNode(&ntree, node);
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
else if (selected_nodes.contains(node)) {
nodeAttachNode(&ntree, node, frame);
join_states[node->runtime->index_in_tree].descendent = true;
join_states[node->index()].descendent = true;
}
}
@ -1685,7 +1685,7 @@ static int node_join_exec(bContext *C, wmOperator * /*op*/)
Array<NodeJoinState> join_states(ntree.all_nodes().size(), NodeJoinState{false, false});
for (bNode *node : ntree.all_nodes()) {
if (!join_states[node->runtime->index_in_tree].done) {
if (!join_states[node->index()].done) {
node_join_attach_recursive(ntree, join_states, node, frame_node, selected_nodes);
}
}
@ -1818,26 +1818,26 @@ static void node_detach_recursive(bNodeTree &ntree,
MutableSpan<NodeDetachstate> detach_states,
bNode *node)
{
detach_states[node->runtime->index_in_tree].done = true;
detach_states[node->index()].done = true;
if (node->parent) {
/* call recursively */
if (!detach_states[node->parent->runtime->index_in_tree].done) {
if (!detach_states[node->parent->index()].done) {
node_detach_recursive(ntree, detach_states, node->parent);
}
/* in any case: if the parent is a descendant, so is the child */
if (detach_states[node->parent->runtime->index_in_tree].descendent) {
detach_states[node->runtime->index_in_tree].descendent = true;
if (detach_states[node->parent->index()].descendent) {
detach_states[node->index()].descendent = true;
}
else if (node->flag & NODE_SELECT) {
/* if parent is not a descendant of a selected node, detach */
nodeDetachNode(&ntree, node);
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
else if (node->flag & NODE_SELECT) {
detach_states[node->runtime->index_in_tree].descendent = true;
detach_states[node->index()].descendent = true;
}
}
@ -1853,7 +1853,7 @@ static int node_detach_exec(bContext *C, wmOperator * /*op*/)
* relative order is preserved here!
*/
for (bNode *node : ntree.all_nodes()) {
if (!detach_states[node->runtime->index_in_tree].done) {
if (!detach_states[node->index()].done) {
node_detach_recursive(ntree, detach_states, node);
}
}

View File

@ -1267,7 +1267,7 @@ static int node_select_same_type_step_exec(bContext *C, wmOperator *op)
}
}
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->runtime->index_in_tree];
bNode *new_active_node = node_tree.all_nodes()[toposort[new_index]->index()];
if (new_active_node == &active_node) {
return OPERATOR_CANCELLED;
}

View File

@ -361,8 +361,7 @@ static Vector<NodeLinkItem> ui_node_link_items(NodeLinkArg *arg,
using namespace blender::nodes;
r_node_decl.emplace(NodeDeclaration());
NodeDeclarationBuilder node_decl_builder{*r_node_decl};
arg->node_type->declare(node_decl_builder);
blender::nodes::build_node_declaration(*arg->node_type, *r_node_decl);
Span<SocketDeclarationPtr> socket_decls = (in_out == SOCK_IN) ? r_node_decl->inputs() :
r_node_decl->outputs();
int index = 0;

View File

@ -358,6 +358,8 @@ typedef struct bNode {
bNodeRuntimeHandle *runtime;
#ifdef __cplusplus
/** The index in the owner node tree. */
int index() const;
blender::StringRefNull label_or_name() const;
bool is_muted() const;
bool is_reroute() const;

View File

@ -309,7 +309,6 @@ class NodeDeclaration {
private:
Vector<SocketDeclarationPtr> inputs_;
Vector<SocketDeclarationPtr> outputs_;
bool is_function_node_ = false;
friend NodeDeclarationBuilder;
@ -320,11 +319,6 @@ class NodeDeclaration {
Span<SocketDeclarationPtr> outputs() const;
Span<SocketDeclarationPtr> sockets(eNodeSocketInOut in_out) const;
bool is_function_node() const
{
return is_function_node_;
}
MEM_CXX_CLASS_ALLOC_FUNCS("NodeDeclaration")
};
@ -332,22 +326,22 @@ class NodeDeclarationBuilder {
private:
NodeDeclaration &declaration_;
Vector<std::unique_ptr<BaseSocketDeclarationBuilder>> builders_;
bool is_function_node_ = false;
public:
NodeDeclarationBuilder(NodeDeclaration &declaration);
/**
* All inputs support fields, and all outputs are fields if any of the inputs is a field.
* Calling field status definitions on each socket is unnecessary. Must be called before adding
* any sockets.
* Calling field status definitions on each socket is unnecessary.
*/
void is_function_node(bool value = true)
void is_function_node()
{
BLI_assert_msg(declaration_.inputs().is_empty() && declaration_.outputs().is_empty(),
"is_function_node() must be called before any socket is created");
declaration_.is_function_node_ = value;
is_function_node_ = true;
}
void finalize();
template<typename DeclType>
typename DeclType::Builder &add_input(StringRef name, StringRef identifier = "");
template<typename DeclType>
@ -367,6 +361,8 @@ void index(const bNode &node, void *r_value);
void id_or_index(const bNode &node, void *r_value);
} // namespace implicit_field_inputs
void build_node_declaration(const bNodeType &typeinfo, NodeDeclaration &r_declaration);
/* -------------------------------------------------------------------- */
/** \name #OutputFieldDependency Inline Methods
* \{ */
@ -551,10 +547,6 @@ inline typename DeclType::Builder &NodeDeclarationBuilder::add_socket(StringRef
socket_decl->name_ = name;
socket_decl->identifier_ = identifier.is_empty() ? name : identifier;
socket_decl->in_out_ = in_out;
if (declaration_.is_function_node()) {
socket_decl->input_field_type_ = InputSocketFieldType::IsSupported;
socket_decl->output_field_dependency_ = OutputFieldDependency::ForDependentField();
}
declarations.append(std::move(socket_decl));
Builder &socket_decl_builder_ref = *socket_decl_builder;
builders_.append(std::move(socket_decl_builder));

View File

@ -339,7 +339,7 @@ class GlareOperation : public NodeOperation {
* --------------- */
/* Not yet implemented. Unreachable code due to the is_identity method. */
Result execute_fog_glow(Result &highlights_result)
Result execute_fog_glow(Result & /*highlights_result*/)
{
BLI_assert_unreachable();
return Result(ResultType::Color, texture_pool());
@ -350,7 +350,7 @@ class GlareOperation : public NodeOperation {
* -------------- */
/* Not yet implemented. Unreachable code due to the is_identity method. */
Result execute_streaks(Result &highlights_result)
Result execute_streaks(Result & /*highlights_result*/)
{
BLI_assert_unreachable();
return Result(ResultType::Color, texture_pool());

View File

@ -255,8 +255,8 @@ static int adjacent_edge(const Span<int> corner_verts,
const int vertex)
{
const int adjacent_loop_i = (corner_verts[loop_i] == vertex) ?
bke::mesh_topology::previous_poly_loop(poly, loop_i) :
bke::mesh_topology::next_poly_loop(poly, loop_i);
bke::mesh_topology::poly_loop_prev(poly, loop_i) :
bke::mesh_topology::poly_loop_next(poly, loop_i);
return corner_edges[adjacent_loop_i];
}

View File

@ -81,10 +81,8 @@ class CornerPreviousEdgeFieldInput final : public bke::MeshFieldInput {
return VArray<int>::ForFunc(
mesh.totloop,
[polys, corner_edges, loop_to_poly_map = std::move(loop_to_poly_map)](const int corner_i) {
const int poly_i = loop_to_poly_map[corner_i];
const MPoly &poly = polys[poly_i];
const int corner_i_prev = bke::mesh_topology::previous_poly_loop(poly, corner_i);
return corner_edges[corner_i_prev];
const MPoly &poly = polys[loop_to_poly_map[corner_i]];
return corner_edges[bke::mesh_topology::poly_loop_prev(poly, corner_i)];
});
}

View File

@ -7,6 +7,27 @@
namespace blender::nodes {
void build_node_declaration(const bNodeType &typeinfo, NodeDeclaration &r_declaration)
{
NodeDeclarationBuilder node_decl_builder{r_declaration};
typeinfo.declare(node_decl_builder);
node_decl_builder.finalize();
}
void NodeDeclarationBuilder::finalize()
{
if (is_function_node_) {
for (SocketDeclarationPtr &socket_decl : declaration_.inputs_) {
if (socket_decl->input_field_type_ != InputSocketFieldType::Implicit) {
socket_decl->input_field_type_ = InputSocketFieldType::IsSupported;
}
}
for (SocketDeclarationPtr &socket_decl : declaration_.outputs_) {
socket_decl->output_field_dependency_ = OutputFieldDependency::ForDependentField();
}
}
}
bool NodeDeclaration::matches(const bNode &node) const
{
auto check_sockets = [&](ListBase sockets, Span<SocketDeclarationPtr> socket_decls) {