Fix T53143: Knife Crash after Grid Fill
BM_ELEM_INTERNAL_TAG flag wasn't ensured to be cleared.
This commit is contained in:
parent
868bde5920
commit
4f247ed07a
|
@ -341,9 +341,9 @@ enum {
|
|||
/* spare tag, assumed dirty, use define in each function to name based on use */
|
||||
// _BM_ELEM_TAG_ALT = (1 << 6), // UNUSED
|
||||
/**
|
||||
* for low level internal API tagging,
|
||||
* since tools may want to tag verts and
|
||||
* not have functions clobber them */
|
||||
* For low level internal API tagging,
|
||||
* since tools may want to tag verts and not have functions clobber them.
|
||||
* Leave cleared! */
|
||||
BM_ELEM_INTERNAL_TAG = (1 << 7),
|
||||
};
|
||||
|
||||
|
|
|
@ -2090,25 +2090,33 @@ BMFace *bmesh_kernel_join_face_kill_edge(BMesh *bm, BMFace *f1, BMFace *f2, BMEd
|
|||
}
|
||||
|
||||
/* validate no internal join */
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f1); i < f1len; i++, l_iter = l_iter->next) {
|
||||
BM_elem_flag_disable(l_iter->v, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f2); i < f2len; i++, l_iter = l_iter->next) {
|
||||
BM_elem_flag_disable(l_iter->v, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
{
|
||||
bool is_dupe = false;
|
||||
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f1); i < f1len; i++, l_iter = l_iter->next) {
|
||||
if (l_iter != l_f1) {
|
||||
BM_elem_flag_enable(l_iter->v, BM_ELEM_INTERNAL_TAG);
|
||||
/* TODO: skip clearing once this is ensured. */
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f2); i < f2len; i++, l_iter = l_iter->next) {
|
||||
BM_elem_flag_disable(l_iter->v, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
}
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f2); i < f2len; i++, l_iter = l_iter->next) {
|
||||
if (l_iter != l_f2) {
|
||||
/* as soon as a duplicate is found, bail out */
|
||||
if (BM_elem_flag_test(l_iter->v, BM_ELEM_INTERNAL_TAG)) {
|
||||
return NULL;
|
||||
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f1); i < f1len; i++, l_iter = l_iter->next) {
|
||||
BM_elem_flag_set(l_iter->v, BM_ELEM_INTERNAL_TAG, l_iter != l_f1);
|
||||
}
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f2); i < f2len; i++, l_iter = l_iter->next) {
|
||||
if (l_iter != l_f2) {
|
||||
/* as soon as a duplicate is found, bail out */
|
||||
if (BM_elem_flag_test(l_iter->v, BM_ELEM_INTERNAL_TAG)) {
|
||||
is_dupe = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Cleanup tags. */
|
||||
for (i = 0, l_iter = BM_FACE_FIRST_LOOP(f1); i < f1len; i++, l_iter = l_iter->next) {
|
||||
BM_elem_flag_disable(l_iter->v, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
if (is_dupe) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* join the two loop */
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "BLI_listbase.h"
|
||||
#include "BLI_mempool.h"
|
||||
#include "BLI_utildefines_iter.h"
|
||||
#include "BLI_stack.h"
|
||||
|
||||
#include "bmesh.h"
|
||||
|
||||
|
@ -141,28 +142,36 @@ int BM_mesh_edgeloops_find(
|
|||
}
|
||||
|
||||
/* first flush edges to tags, and tag verts */
|
||||
BLI_Stack *edge_stack = BLI_stack_new(sizeof(BMEdge *), __func__);
|
||||
BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
|
||||
BLI_assert(!BM_elem_flag_test(e, BM_ELEM_INTERNAL_TAG));
|
||||
if (test_fn(e, user_data)) {
|
||||
BM_elem_flag_enable(e, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v1, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v2, BM_ELEM_INTERNAL_TAG);
|
||||
BLI_stack_push(edge_stack, (void *)&e);
|
||||
}
|
||||
else {
|
||||
BM_elem_flag_disable(e, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
}
|
||||
|
||||
BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
|
||||
const uint edges_len = BLI_stack_count(edge_stack);
|
||||
BMEdge **edges = MEM_mallocN(sizeof(*edges) * edges_len, __func__);
|
||||
BLI_stack_pop_n_reverse(edge_stack, edges, BLI_stack_count(edge_stack));
|
||||
BLI_stack_free(edge_stack);
|
||||
|
||||
for (uint i = 0; i < edges_len; i += 1) {
|
||||
e = edges[i];
|
||||
if (BM_elem_flag_test(e, BM_ELEM_INTERNAL_TAG)) {
|
||||
BMEdgeLoopStore *el_store = MEM_callocN(sizeof(BMEdgeLoopStore), __func__);
|
||||
|
||||
/* add both directions */
|
||||
if (bm_loop_build(el_store, e->v1, e->v2, 1) &&
|
||||
bm_loop_build(el_store, e->v2, e->v1, -1) &&
|
||||
el_store->len > 1)
|
||||
bm_loop_build(el_store, e->v2, e->v1, -1) &&
|
||||
el_store->len > 1)
|
||||
{
|
||||
BLI_addtail(r_eloops, el_store);
|
||||
BM_elem_flag_disable(e, BM_ELEM_INTERNAL_TAG);
|
||||
count++;
|
||||
}
|
||||
else {
|
||||
|
@ -170,6 +179,15 @@ int BM_mesh_edgeloops_find(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (uint i = 0; i < edges_len; i += 1) {
|
||||
e = edges[i];
|
||||
BM_elem_flag_disable(e, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(e->v1, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(e->v2, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
|
||||
MEM_freeN(edges);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -267,6 +285,7 @@ bool BM_mesh_edgeloops_find_path(
|
|||
{
|
||||
BMIter iter;
|
||||
BMEdge *e;
|
||||
bool found = false;
|
||||
|
||||
BLI_assert(v_src != v_dst);
|
||||
|
||||
|
@ -274,28 +293,43 @@ bool BM_mesh_edgeloops_find_path(
|
|||
BMVert *v;
|
||||
BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) {
|
||||
BM_elem_index_set(v, 0);
|
||||
BM_elem_flag_disable(v, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
}
|
||||
bm->elem_index_dirty |= BM_VERT;
|
||||
|
||||
/* first flush edges to tags, and tag verts */
|
||||
int edges_len;
|
||||
BMEdge **edges;
|
||||
|
||||
if (test_fn) {
|
||||
BLI_Stack *edge_stack = BLI_stack_new(sizeof(BMEdge *), __func__);
|
||||
BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
|
||||
if (test_fn(e, user_data)) {
|
||||
BM_elem_flag_enable(e, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v1, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v2, BM_ELEM_INTERNAL_TAG);
|
||||
BLI_stack_push(edge_stack, (void *)&e);
|
||||
}
|
||||
else {
|
||||
BM_elem_flag_disable(e, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
}
|
||||
edges_len = BLI_stack_count(edge_stack);
|
||||
edges = MEM_mallocN(sizeof(*edges) * edges_len, __func__);
|
||||
BLI_stack_pop_n_reverse(edge_stack, edges, BLI_stack_count(edge_stack));
|
||||
BLI_stack_free(edge_stack);
|
||||
}
|
||||
else {
|
||||
BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
|
||||
int i = 0;
|
||||
edges_len = bm->totedge;
|
||||
edges = MEM_mallocN(sizeof(*edges) * edges_len, __func__);
|
||||
|
||||
BM_ITER_MESH_INDEX (e, &iter, bm, BM_EDGES_OF_MESH, i) {
|
||||
BM_elem_flag_enable(e, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v1, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(e->v2, BM_ELEM_INTERNAL_TAG);
|
||||
edges[i] = e;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,11 +388,19 @@ bool BM_mesh_edgeloops_find_path(
|
|||
|
||||
BLI_addtail(r_eloops, el_store);
|
||||
|
||||
return true;
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
for (uint i = 0; i < edges_len; i += 1) {
|
||||
e = edges[i];
|
||||
BM_elem_flag_disable(e, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(e->v1, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(e->v2, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
MEM_freeN(edges);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
|
||||
|
@ -753,19 +795,29 @@ bool BM_edgeloop_overlap_check(struct BMEdgeLoopStore *el_store_a, struct BMEdge
|
|||
{
|
||||
LinkData *node;
|
||||
|
||||
/* init */
|
||||
for (node = el_store_a->verts.first; node; node = node->next) {
|
||||
BM_elem_flag_disable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
for (node = el_store_b->verts.first; node; node = node->next) {
|
||||
BM_elem_flag_enable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
/* A little more efficient if 'a' as smaller. */
|
||||
if (el_store_a->len > el_store_b->len) {
|
||||
SWAP(BMEdgeLoopStore *, el_store_a, el_store_b);
|
||||
}
|
||||
|
||||
/* check 'a' */
|
||||
/* init */
|
||||
for (node = el_store_a->verts.first; node; node = node->next) {
|
||||
if (BM_elem_flag_test((BMVert *)node->data, BM_ELEM_INTERNAL_TAG)) {
|
||||
BM_elem_flag_enable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
for (node = el_store_b->verts.first; node; node = node->next) {
|
||||
BM_elem_flag_disable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
|
||||
/* Check 'a' (clear as we go). */
|
||||
for (node = el_store_a->verts.first; node; node = node->next) {
|
||||
if (!BM_elem_flag_test((BMVert *)node->data, BM_ELEM_INTERNAL_TAG)) {
|
||||
/* Finish clearing 'a', leave tag clean. */
|
||||
while ((node = node->next)) {
|
||||
BM_elem_flag_disable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
BM_elem_flag_disable((BMVert *)node->data, BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -957,7 +957,7 @@ static void bm_loop_walk_add(struct LoopWalkCtx *lwc, BMLoop *l)
|
|||
{
|
||||
const int i = BM_elem_index_get(l);
|
||||
const float w = lwc->loop_weights[i];
|
||||
BM_elem_flag_enable(l, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(l, BM_ELEM_INTERNAL_TAG);
|
||||
lwc->data_array[lwc->data_len] = BM_ELEM_CD_GET_VOID_P(l, lwc->cd_layer_offset);
|
||||
lwc->data_index_array[lwc->data_len] = i;
|
||||
lwc->weight_array[lwc->data_len] = w;
|
||||
|
@ -976,7 +976,7 @@ static void bm_loop_walk_data(struct LoopWalkCtx *lwc, BMLoop *l_walk)
|
|||
int i;
|
||||
|
||||
BLI_assert(CustomData_data_equals(lwc->type, lwc->data_ref, BM_ELEM_CD_GET_VOID_P(l_walk, lwc->cd_layer_offset)));
|
||||
BLI_assert(BM_elem_flag_test(l_walk, BM_ELEM_INTERNAL_TAG) == false);
|
||||
BLI_assert(BM_elem_flag_test(l_walk, BM_ELEM_INTERNAL_TAG));
|
||||
|
||||
bm_loop_walk_add(lwc, l_walk);
|
||||
|
||||
|
@ -988,7 +988,7 @@ static void bm_loop_walk_data(struct LoopWalkCtx *lwc, BMLoop *l_walk)
|
|||
l_other = l_other->next;
|
||||
}
|
||||
BLI_assert(l_other->v == l_walk->v);
|
||||
if (!BM_elem_flag_test(l_other, BM_ELEM_INTERNAL_TAG)) {
|
||||
if (BM_elem_flag_test(l_other, BM_ELEM_INTERNAL_TAG)) {
|
||||
if (CustomData_data_equals(lwc->type, lwc->data_ref, BM_ELEM_CD_GET_VOID_P(l_other, lwc->cd_layer_offset))) {
|
||||
bm_loop_walk_data(lwc, l_other);
|
||||
}
|
||||
|
@ -1012,9 +1012,10 @@ LinkNode *BM_vert_loop_groups_data_layer_create(
|
|||
lwc.loop_weights = loop_weights;
|
||||
lwc.arena = arena;
|
||||
|
||||
/* Enable 'BM_ELEM_INTERNAL_TAG', leaving the flag clean on completion. */
|
||||
loop_num = 0;
|
||||
BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
|
||||
BM_elem_flag_disable(l, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_enable(l, BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_index_set(l, loop_num); /* set_dirty! */
|
||||
loop_num++;
|
||||
}
|
||||
|
@ -1026,7 +1027,7 @@ LinkNode *BM_vert_loop_groups_data_layer_create(
|
|||
lwc.weight_array = BLI_memarena_alloc(lwc.arena, sizeof(float) * loop_num);
|
||||
|
||||
BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
|
||||
if (!BM_elem_flag_test(l, BM_ELEM_INTERNAL_TAG)) {
|
||||
if (BM_elem_flag_test(l, BM_ELEM_INTERNAL_TAG)) {
|
||||
struct LoopGroupCD *lf = BLI_memarena_alloc(lwc.arena, sizeof(*lf));
|
||||
int len_prev = lwc.data_len;
|
||||
|
||||
|
|
|
@ -1236,6 +1236,8 @@ bool BM_face_split_edgenet_connect_islands(
|
|||
BMLoop *l_iter, *l_first;
|
||||
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
|
||||
do {
|
||||
BLI_assert(!BM_elem_flag_test(l_iter->v, VERT_NOT_IN_STACK));
|
||||
BLI_assert(!BM_elem_flag_test(l_iter->e, EDGE_NOT_IN_STACK));
|
||||
edge_arr[i++] = l_iter->e;
|
||||
} while ((l_iter = l_iter->next) != l_first);
|
||||
BLI_assert(i == edge_arr_len);
|
||||
|
|
|
@ -2109,7 +2109,8 @@ bool BM_face_exists_multi(BMVert **varr, BMEdge **earr, int len)
|
|||
|
||||
if (tot_tag == 0) {
|
||||
/* no faces use only boundary verts, quit early */
|
||||
return false;
|
||||
ok = false;
|
||||
goto finally;
|
||||
}
|
||||
|
||||
/* 2) loop over non-boundary edges that use boundary verts,
|
||||
|
@ -2144,6 +2145,12 @@ bool BM_face_exists_multi(BMVert **varr, BMEdge **earr, int len)
|
|||
}
|
||||
}
|
||||
|
||||
finally:
|
||||
/* Cleanup */
|
||||
for (i = 0; i < len; i++) {
|
||||
BM_elem_flag_disable(varr[i], BM_ELEM_INTERNAL_TAG);
|
||||
BM_elem_flag_disable(earr[i], BM_ELEM_INTERNAL_TAG);
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue