Edit Mesh: partial updates for normal and face tessellation

This patch exposes functionality for performing partial mesh updates
for normal calculation and face tessellation while transforming a mesh.

The partial update data only needs to be generated once,
afterwards the cached connectivity information can be reused
(with the exception of changing proportional editing radius).

Currently this is only used for transform, in the future it could be
used for other operators as well as the transform panel.

The best-case overall speedup while transforming geometry is about
1.45x since the time to update a small number of normals and faces is
negligible.

For an additional speedup partial face tessellation is multi-threaded,
this gives ~15x speedup on my system (timing tessellation alone).
Exact results depend on the number of CPU cores available.

Ref D11494

Reviewed By: mano-wii
This commit is contained in:
Campbell Barton 2021-05-28 18:18:13 +10:00
parent 00073651d4
commit c2fa36999f
Notes: blender-bot 2024-03-08 15:16:25 +01:00
Referenced by issue #89289, "Edit Mesh: partial updates" related crash in add-on
Referenced by issue #88550, Mesh Optimization Project Progress
13 changed files with 819 additions and 50 deletions

View File

@ -33,6 +33,7 @@ extern "C" {
struct BMLoop;
struct BMesh;
struct BMPartialUpdate;
struct BoundBox;
struct Depsgraph;
struct Mesh;
@ -85,6 +86,8 @@ typedef struct BMEditMesh {
/* editmesh.c */
void BKE_editmesh_looptri_calc(BMEditMesh *em);
void BKE_editmesh_looptri_calc_with_partial(BMEditMesh *em, struct BMPartialUpdate *bmpinfo);
BMEditMesh *BKE_editmesh_create(BMesh *bm, const bool do_tessellate);
BMEditMesh *BKE_editmesh_copy(BMEditMesh *em);
BMEditMesh *BKE_editmesh_from_object(struct Object *ob);

View File

@ -149,6 +149,14 @@ void BKE_editmesh_looptri_calc(BMEditMesh *em)
#endif
}
void BKE_editmesh_looptri_calc_with_partial(BMEditMesh *em, struct BMPartialUpdate *bmpinfo)
{
BLI_assert(em->tottri == poly_to_tri_count(em->bm->totface, em->bm->totloop));
BLI_assert(em->looptris != NULL);
BM_mesh_calc_tessellation_with_partial(em->bm, em->looptris, bmpinfo);
}
void BKE_editmesh_free_derivedmesh(BMEditMesh *em)
{
if (em->mesh_eval_cage) {

View File

@ -104,6 +104,8 @@ set(SRC
intern/bmesh_mesh_duplicate.h
intern/bmesh_mesh_tessellate.c
intern/bmesh_mesh_tessellate.h
intern/bmesh_mesh_partial_update.c
intern/bmesh_mesh_partial_update.h
intern/bmesh_mesh_validate.c
intern/bmesh_mesh_validate.h
intern/bmesh_mods.c

View File

@ -215,6 +215,7 @@ extern "C" {
#include "intern/bmesh_mesh.h"
#include "intern/bmesh_mesh_convert.h"
#include "intern/bmesh_mesh_duplicate.h"
#include "intern/bmesh_mesh_partial_update.h"
#include "intern/bmesh_mesh_tessellate.h"
#include "intern/bmesh_mesh_validate.h"
#include "intern/bmesh_mods.h"

View File

@ -373,15 +373,16 @@ typedef struct BMVertsCalcNormalsData {
float (*vnos)[3];
} BMVertsCalcNormalsData;
static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp_f)
static void mesh_verts_calc_normals_accum(
BMFace *f,
const float *f_no,
const float (*edgevec)[3],
/* Read-write data, protected by an atomic-based fake spin-lock like system. */
float (*vnos)[3])
{
#define FLT_EQ_NONAN(_fa, _fb) (*((const uint32_t *)&_fa) == *((const uint32_t *)&_fb))
BMVertsCalcNormalsData *data = userdata;
BMFace *f = (BMFace *)mp_f;
const float *f_no = data->fnos ? data->fnos[BM_elem_index_get(f)] : f->no;
BMLoop *l_first, *l_iter;
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
@ -391,8 +392,8 @@ static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp
/* calculate the dot product of the two edges that
* meet at the loop's vertex */
e1diff = data->edgevec[BM_elem_index_get(l_iter->prev->e)];
e2diff = data->edgevec[BM_elem_index_get(l_iter->e)];
e1diff = edgevec[BM_elem_index_get(l_iter->prev->e)];
e2diff = edgevec[BM_elem_index_get(l_iter->e)];
dotprod = dot_v3v3(e1diff, e2diff);
/* edge vectors are calculated from e->v1 to e->v2, so
@ -410,7 +411,7 @@ static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp
}
/* accumulate weighted face normal into the vertex's normal */
float *v_no = data->vnos ? data->vnos[BM_elem_index_get(l_iter->v)] : l_iter->v->no;
float *v_no = vnos ? vnos[BM_elem_index_get(l_iter->v)] : l_iter->v->no;
/* This block is a lockless threadsafe madd_v3_v3fl.
* It uses the first float of the vector as a sort of cheap spin-lock,
@ -447,6 +448,14 @@ static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp
#undef FLT_EQ_NONAN
}
static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp_f)
{
BMVertsCalcNormalsData *data = userdata;
BMFace *f = (BMFace *)mp_f;
const float *f_no = data->fnos ? data->fnos[BM_elem_index_get(f)] : f->no;
mesh_verts_calc_normals_accum(f, f_no, data->edgevec, data->vnos);
}
static void mesh_verts_calc_normals_normalize_cb(void *userdata, MempoolIterData *mp_v)
{
BMVertsCalcNormalsData *data = userdata;
@ -529,6 +538,130 @@ void BM_mesh_normals_update(BMesh *bm)
MEM_freeN(edgevec);
}
static void mesh_faces_parallel_range_calc_normals_cb(
void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
{
BMFace *f = ((BMFace **)userdata)[iter];
BM_face_normal_update(f);
}
static void mesh_edges_parallel_range_calc_vectors_cb(
void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
{
BMEdge *e = ((BMEdge **)((void **)userdata)[0])[iter];
float *r_edgevec = ((float(*)[3])((void **)userdata)[1])[iter];
sub_v3_v3v3(r_edgevec, e->v1->co, e->v2->co);
normalize_v3(r_edgevec);
}
static void mesh_verts_parallel_range_calc_normals_accum_cb(
void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
{
BMFace *f = ((BMFace **)((void **)userdata)[0])[iter];
const float(*edgevec)[3] = (float(*)[3])((void **)userdata)[1];
mesh_verts_calc_normals_accum(f, f->no, edgevec, NULL);
}
static void mesh_verts_parallel_range_calc_normals_normalize_cb(
void *userdata, const int iter, const TaskParallelTLS *__restrict UNUSED(tls))
{
BMVert *v = ((BMVert **)userdata)[iter];
if (UNLIKELY(normalize_v3(v->no) == 0.0f)) {
normalize_v3_v3(v->no, v->co);
}
}
/**
* A version of #BM_mesh_normals_update that updates a subset of geometry,
* used to avoid the overhead of updating everything.
*/
void BM_mesh_normals_update_with_partial(BMesh *bm, const BMPartialUpdate *bmpinfo)
{
BLI_assert(bmpinfo->params.do_normals);
BMVert **verts = bmpinfo->verts;
BMEdge **edges = bmpinfo->edges;
BMFace **faces = bmpinfo->faces;
const int verts_len = bmpinfo->verts_len;
const int edges_len = bmpinfo->edges_len;
const int faces_len = bmpinfo->faces_len;
const int faces_len_normal_calc_accumulate = bmpinfo->faces_len_normal_calc_accumulate;
float(*edgevec)[3] = MEM_mallocN(sizeof(*edgevec) * edges_len, __func__);
for (int i = 0; i < verts_len; i++) {
zero_v3(verts[i]->no);
}
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
{
/* Faces. */
BLI_task_parallel_range(
0, faces_len, faces, mesh_faces_parallel_range_calc_normals_cb, &settings);
}
/* Temporarily override the edge indices,
* storing the correct indices in the case they're not dirty.
*
* \note in most cases indices are modified and #BMesh.elem_index_dirty is set.
* This is an exceptional case where indices are restored because the worst case downside
* of marking the edge indices dirty would require a full loop over all edges to
* correct the indices in other functions which need them to be valid.
* When moving a few vertices on a high poly mesh setting and restoring connected
* edges has very little overhead compared with restoring all edge indices. */
int *edge_index_value = NULL;
if ((bm->elem_index_dirty & BM_EDGE) == 0) {
edge_index_value = MEM_mallocN(sizeof(*edge_index_value) * edges_len, __func__);
for (int i = 0; i < edges_len; i++) {
BMEdge *e = edges[i];
edge_index_value[i] = BM_elem_index_get(e);
BM_elem_index_set(e, i); /* set_dirty! (restore before this function exits). */
}
}
else {
for (int i = 0; i < edges_len; i++) {
BMEdge *e = edges[i];
BM_elem_index_set(e, i); /* set_dirty! (already dirty) */
}
}
{
/* Verts. */
/* Compute normalized direction vectors for each edge.
* Directions will be used for calculating the weights of the face normals on the vertex
* normals. */
void *data[2] = {edges, edgevec};
BLI_task_parallel_range(
0, edges_len, data, mesh_edges_parallel_range_calc_vectors_cb, &settings);
/* Add weighted face normals to vertices. */
data[0] = faces;
BLI_task_parallel_range(0,
faces_len_normal_calc_accumulate,
data,
mesh_verts_parallel_range_calc_normals_accum_cb,
&settings);
/* Normalize the accumulated vertex normals. */
BLI_task_parallel_range(
0, verts_len, verts, mesh_verts_parallel_range_calc_normals_normalize_cb, &settings);
}
if (edge_index_value != NULL) {
for (int i = 0; i < edges_len; i++) {
BMEdge *e = edges[i];
BM_elem_index_set(e, edge_index_value[i]); /* set_ok (restore) */
}
MEM_freeN(edge_index_value);
}
MEM_freeN(edgevec);
}
/**
* \brief BMesh Compute Normals from/to external data.
*

View File

@ -25,6 +25,7 @@
struct BMAllocTemplate;
struct BMLoopNorEditDataArray;
struct MLoopNorSpaceArray;
struct BMPartialUpdate;
void BM_mesh_elem_toolflags_ensure(BMesh *bm);
void BM_mesh_elem_toolflags_clear(BMesh *bm);
@ -41,6 +42,8 @@ void BM_mesh_data_free(BMesh *bm);
void BM_mesh_clear(BMesh *bm);
void BM_mesh_normals_update(BMesh *bm);
void BM_mesh_normals_update_with_partial(BMesh *bm, const struct BMPartialUpdate *bmpinfo);
void BM_verts_calc_normal_vcos(BMesh *bm,
const float (*fnos)[3],
const float (*vcos)[3],

View File

@ -0,0 +1,272 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bmesh
*
* Generate data needed for partially updating mesh information.
* Currently this is used for normals and tessellation.
*
* Transform is the obvious use case where there is no need to update normals or tessellation
* for geometry which has not been modified.
*
* In the future this could be integrated into GPU updates too.
*
* Potential Improvements
* ======================
*
* Some calculations could be significantly limited in the case of affine transformations
* (tessellation is an obvious candidate). Where only faces which have a mix
* of tagged and untagged vertices would need to be recalculated.
*
* In general this would work well besides some corner cases such as scaling to zero.
* Although the exact result may depend on the normal (for N-GONS),
* so for now update the tessellation of all tagged geometry.
*/
#include "DNA_object_types.h"
#include "MEM_guardedalloc.h"
#include "BLI_alloca.h"
#include "BLI_bitmap.h"
#include "BLI_math_vector.h"
#include "bmesh.h"
/**
* Grow by 1.5x (rounding up).
*
* \note Use conservative reallocation since the initial sizes reserved
* may be close to (or exactly) the number of elements needed.
*/
#define GROW(len_alloc) ((len_alloc) + ((len_alloc) - ((len_alloc) / 2)))
#define GROW_ARRAY(mem, len_alloc) \
{ \
mem = MEM_reallocN(mem, (sizeof(*mem)) * ((len_alloc) = GROW(len_alloc))); \
} \
((void)0)
#define GROW_ARRAY_AS_NEEDED(mem, len_alloc, index) \
if (UNLIKELY(len_alloc == index)) { \
GROW_ARRAY(mem, len_alloc); \
}
BLI_INLINE bool partial_elem_vert_ensure(BMPartialUpdate *bmpinfo,
BLI_bitmap *verts_tag,
BMVert *v)
{
const int i = BM_elem_index_get(v);
if (!BLI_BITMAP_TEST(verts_tag, i)) {
BLI_BITMAP_ENABLE(verts_tag, i);
GROW_ARRAY_AS_NEEDED(bmpinfo->verts, bmpinfo->verts_len_alloc, bmpinfo->verts_len);
bmpinfo->verts[bmpinfo->verts_len++] = v;
return true;
}
return false;
}
BLI_INLINE bool partial_elem_edge_ensure(BMPartialUpdate *bmpinfo,
BLI_bitmap *edges_tag,
BMEdge *e)
{
const int i = BM_elem_index_get(e);
if (!BLI_BITMAP_TEST(edges_tag, i)) {
BLI_BITMAP_ENABLE(edges_tag, i);
GROW_ARRAY_AS_NEEDED(bmpinfo->edges, bmpinfo->edges_len_alloc, bmpinfo->edges_len);
bmpinfo->edges[bmpinfo->edges_len++] = e;
return true;
}
return false;
}
BLI_INLINE bool partial_elem_face_ensure(BMPartialUpdate *bmpinfo,
BLI_bitmap *faces_tag,
BMFace *f)
{
const int i = BM_elem_index_get(f);
if (!BLI_BITMAP_TEST(faces_tag, i)) {
BLI_BITMAP_ENABLE(faces_tag, i);
GROW_ARRAY_AS_NEEDED(bmpinfo->faces, bmpinfo->faces_len_alloc, bmpinfo->faces_len);
bmpinfo->faces[bmpinfo->faces_len++] = f;
return true;
}
return false;
}
BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
const BMPartialUpdate_Params *params,
const int verts_len,
bool (*filter_fn)(BMVert *, void *user_data),
void *user_data)
{
/* The caller is doing something wrong if this isn't the case. */
BLI_assert(verts_len <= bm->totvert);
BMPartialUpdate *bmpinfo = MEM_callocN(sizeof(*bmpinfo), __func__);
/* Reserve more edges than vertices since it's common for a grid topology
* to use around twice as many edges as vertices. */
const int default_verts_len_alloc = verts_len;
const int default_edges_len_alloc = min_ii(bm->totedge, verts_len * 2);
const int default_faces_len_alloc = min_ii(bm->totface, verts_len);
/* Allocate tags instead of using #BM_ELEM_TAG because the caller may already be using tags.
* Further, walking over all geometry to clear the tags isn't so efficient. */
BLI_bitmap *verts_tag = NULL;
BLI_bitmap *edges_tag = NULL;
BLI_bitmap *faces_tag = NULL;
/* Set vert inline. */
BM_mesh_elem_index_ensure(bm, (BM_EDGE | BM_FACE));
if (params->do_normals || params->do_tessellate) {
/* - Extend to all vertices connected faces:
* In the case of tessellation this is enough.
*
* In the case of vertex normal calculation,
* All the relevant connectivity data can be accessed from the faces
* (there is no advantage in storing connected edges or vertices in this pass).
*
* NOTE: In the future it may be useful to differentiate between vertices
* that are directly marked (by the filter function when looping over all vertices).
* And vertices marked from indirect connections.
* This would require an extra tag array, so avoid this unless it's needed.
*/
/* Faces. */
if (bmpinfo->faces == NULL) {
bmpinfo->faces_len_alloc = default_faces_len_alloc;
bmpinfo->faces = MEM_mallocN((sizeof(BMFace *) * bmpinfo->faces_len_alloc), __func__);
faces_tag = BLI_BITMAP_NEW((size_t)bm->totface, __func__);
}
BMVert *v;
BMIter iter;
int i;
BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
BM_elem_index_set(v, i); /* set_inline */
if (!filter_fn(v, user_data)) {
continue;
}
BMEdge *e_iter = v->e;
if (e_iter != NULL) {
/* Loop over edges. */
BMEdge *e_first = v->e;
do {
BMLoop *l_iter = e_iter->l;
if (e_iter->l != NULL) {
BMLoop *l_first = e_iter->l;
/* Loop over radial loops. */
do {
if (l_iter->v == v) {
partial_elem_face_ensure(bmpinfo, faces_tag, l_iter->f);
}
} while ((l_iter = l_iter->radial_next) != l_first);
}
} while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
}
}
}
const int faces_len_direct = bmpinfo->faces_len;
if (params->do_normals) {
/* - Extend to all faces vertices:
* Any changes to the faces normal needs to update all surrounding vertices.
*
* - Extend to all these vertices connected edges:
* These and needed to access those vertices edge vectors in normal calculation logic.
*/
/* Vertices. */
if (bmpinfo->verts == NULL) {
bmpinfo->verts_len_alloc = default_verts_len_alloc;
bmpinfo->verts = MEM_mallocN((sizeof(BMVert *) * bmpinfo->verts_len_alloc), __func__);
verts_tag = BLI_BITMAP_NEW((size_t)bm->totvert, __func__);
}
/* Edges. */
if (bmpinfo->edges == NULL) {
bmpinfo->edges_len_alloc = default_edges_len_alloc;
bmpinfo->edges = MEM_mallocN((sizeof(BMEdge *) * bmpinfo->edges_len_alloc), __func__);
edges_tag = BLI_BITMAP_NEW((size_t)bm->totedge, __func__);
}
for (int i = 0; i < bmpinfo->faces_len; i++) {
BMFace *f = bmpinfo->faces[i];
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
if (!partial_elem_vert_ensure(bmpinfo, verts_tag, l_iter->v)) {
continue;
}
BMVert *v = l_iter->v;
BMEdge *e_first = v->e;
BMEdge *e_iter = e_first;
do {
if (e_iter->l) {
partial_elem_edge_ensure(bmpinfo, edges_tag, e_iter);
/* These faces need to be taken into account when weighting vertex normals
* but aren't needed for tessellation nor do their normals need to be recalculated.
* These faces end up between `faces_len` and `faces_len_normal_calc_accumulate`
* in the faces array. */
BMLoop *l_first_radial = e_iter->l;
BMLoop *l_iter_radial = l_first_radial;
/* Loop over radial loops. */
do {
if (l_iter_radial->v == v) {
partial_elem_face_ensure(bmpinfo, faces_tag, l_iter_radial->f);
}
} while ((l_iter_radial = l_iter_radial->radial_next) != l_first_radial);
}
} while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
} while ((l_iter = l_iter->next) != l_first);
}
}
bmpinfo->faces_len_normal_calc_accumulate = bmpinfo->faces_len;
bmpinfo->faces_len = faces_len_direct;
if (verts_tag) {
MEM_freeN(verts_tag);
}
if (edges_tag) {
MEM_freeN(edges_tag);
}
if (faces_tag) {
MEM_freeN(faces_tag);
}
bmpinfo->params = *params;
return bmpinfo;
}
void BM_mesh_partial_destroy(BMPartialUpdate *bmpinfo)
{
if (bmpinfo->verts) {
MEM_freeN(bmpinfo->verts);
}
if (bmpinfo->edges) {
MEM_freeN(bmpinfo->edges);
}
if (bmpinfo->faces) {
MEM_freeN(bmpinfo->faces);
}
MEM_freeN(bmpinfo);
}

View File

@ -0,0 +1,69 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#pragma once
/** \file
* \ingroup bmesh
*/
#include "BLI_compiler_attrs.h"
/**
* Parameters used to determine which kinds of data needs to be generated.
*/
typedef struct BMPartialUpdate_Params {
bool do_normals;
bool do_tessellate;
} BMPartialUpdate_Params;
/**
* Cached data to speed up partial updates.
*
* Hints:
*
* - Avoid creating this data for single updates,
* it should be created and reused across multiple updates to gain a significant benefit
* (while transforming geometry for example).
*
* - Partial normal updates use face & loop indices,
* setting them to dirty values between updates will slow down normal recalculation.
*/
typedef struct BMPartialUpdate {
BMVert **verts;
BMEdge **edges;
BMFace **faces;
int verts_len, verts_len_alloc;
int edges_len, edges_len_alloc;
int faces_len, faces_len_alloc;
/**
* Faces at the end of `faces` that don't need to have the normals recalculated
* but must be included when waiting the vertex normals.
*/
int faces_len_normal_calc_accumulate;
/** Store the parameters used in creation so invalid use can be asserted. */
BMPartialUpdate_Params params;
} BMPartialUpdate;
BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
const BMPartialUpdate_Params *params,
const int verts_len,
bool (*filter_fn)(BMVert *, void *user_data),
void *user_data)
ATTR_NONNULL(1, 2, 4) ATTR_WARN_UNUSED_RESULT;
void BM_mesh_partial_destroy(BMPartialUpdate *bmpinfo) ATTR_NONNULL(1);

View File

@ -32,6 +32,7 @@
#include "BLI_memarena.h"
#include "BLI_polyfill_2d.h"
#include "BLI_polyfill_2d_beautify.h"
#include "BLI_task.h"
#include "bmesh.h"
#include "bmesh_tools.h"
@ -151,6 +152,105 @@ void BM_mesh_calc_tessellation(BMesh *bm, BMLoop *(*looptris)[3])
/** \} */
/* -------------------------------------------------------------------- */
/** \name Default Tessellation (Partial Updates)
* \{ */
struct PartialTessellationUserData {
BMFace **faces;
BMLoop *(*looptris)[3];
};
struct PartialTessellationUserTLS {
MemArena *pf_arena;
};
static void mesh_calc_tessellation_for_face_partial_fn(void *__restrict userdata,
const int index,
const TaskParallelTLS *__restrict tls)
{
struct PartialTessellationUserTLS *tls_data = tls->userdata_chunk;
struct PartialTessellationUserData *data = userdata;
BMFace *f = data->faces[index];
BMLoop *l = BM_FACE_FIRST_LOOP(f);
const int offset = BM_elem_index_get(l) - (BM_elem_index_get(f) * 2);
mesh_calc_tessellation_for_face(data->looptris + offset, f, &tls_data->pf_arena);
}
static void mesh_calc_tessellation_for_face_partial_free_fn(
const void *__restrict UNUSED(userdata), void *__restrict tls_v)
{
struct PartialTessellationUserTLS *tls_data = tls_v;
if (tls_data->pf_arena) {
BLI_memarena_free(tls_data->pf_arena);
}
}
static void bm_mesh_calc_tessellation_with_partial__multi_threaded(BMLoop *(*looptris)[3],
const BMPartialUpdate *bmpinfo)
{
const int faces_len = bmpinfo->faces_len;
BMFace **faces = bmpinfo->faces;
struct PartialTessellationUserData data = {
.faces = faces,
.looptris = looptris,
};
struct PartialTessellationUserTLS tls_dummy = {NULL};
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = true;
settings.userdata_chunk = &tls_dummy;
settings.userdata_chunk_size = sizeof(tls_dummy);
settings.func_free = mesh_calc_tessellation_for_face_partial_free_fn;
BLI_task_parallel_range(
0, faces_len, &data, mesh_calc_tessellation_for_face_partial_fn, &settings);
}
static void bm_mesh_calc_tessellation_with_partial__single_threaded(BMLoop *(*looptris)[3],
const BMPartialUpdate *bmpinfo)
{
const int faces_len = bmpinfo->faces_len;
BMFace **faces = bmpinfo->faces;
MemArena *pf_arena = NULL;
for (int index = 0; index < faces_len; index++) {
BMFace *f = faces[index];
BMLoop *l = BM_FACE_FIRST_LOOP(f);
const int offset = BM_elem_index_get(l) - (BM_elem_index_get(f) * 2);
mesh_calc_tessellation_for_face(looptris + offset, f, &pf_arena);
}
if (pf_arena) {
BLI_memarena_free(pf_arena);
}
}
void BM_mesh_calc_tessellation_with_partial(BMesh *bm,
BMLoop *(*looptris)[3],
const BMPartialUpdate *bmpinfo)
{
BLI_assert(bmpinfo->params.do_tessellate);
BM_mesh_elem_index_ensure(bm, BM_LOOP | BM_FACE);
/* On systems with 32+ cores,
* only a very small number of faces has any advantage single threading (in the 100's).
* Note that between 500-2000 quads, the difference isn't so much
* (tessellation isn't a bottleneck in this case anyway).
* Avoid the slight overhead of using threads in this case. */
if (bmpinfo->faces_len < 1024) {
bm_mesh_calc_tessellation_with_partial__single_threaded(looptris, bmpinfo);
}
else {
bm_mesh_calc_tessellation_with_partial__multi_threaded(looptris, bmpinfo);
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Beauty Mesh Tessellation
*

View File

@ -20,5 +20,11 @@
* \ingroup bmesh
*/
struct BMPartialUpdate;
void BM_mesh_calc_tessellation(BMesh *bm, BMLoop *(*looptris)[3]);
void BM_mesh_calc_tessellation_beauty(BMesh *bm, BMLoop *(*looptris)[3]);
void BM_mesh_calc_tessellation_with_partial(BMesh *bm,
BMLoop *(*looptris)[3],
const struct BMPartialUpdate *bmpinfo);

View File

@ -21,6 +21,7 @@
*/
struct Heap;
struct BMPartialUpdate;
#include "BLI_compiler_attrs.h"

View File

@ -46,6 +46,7 @@
* \{ */
struct ARegion;
struct BMPartialUpdate;
struct Depsgraph;
struct NumInput;
struct Object;

View File

@ -52,6 +52,66 @@
/** \name Container TransCustomData Creation
* \{ */
static void tc_mesh_customdata_free_fn(struct TransInfo *t,
struct TransDataContainer *tc,
struct TransCustomData *custom_data);
struct TransCustomDataLayer;
static void tc_mesh_customdatacorrect_free(struct TransCustomDataLayer *tcld);
struct TransCustomDataMesh {
struct TransCustomDataLayer *cd_layer_correct;
struct {
struct BMPartialUpdate *cache;
/** The size of proportional editing used for `partial_update_cache`. */
float prop_size;
/** The size of proportional editing for the last update. */
float prop_size_prev;
} partial_update;
};
static struct TransCustomDataMesh *tc_mesh_customdata_ensure(TransDataContainer *tc)
{
struct TransCustomDataMesh *tcmd = tc->custom.type.data;
BLI_assert(tc->custom.type.data == NULL ||
tc->custom.type.free_cb == tc_mesh_customdata_free_fn);
if (tc->custom.type.data == NULL) {
tc->custom.type.data = MEM_callocN(sizeof(struct TransCustomDataMesh), __func__);
tc->custom.type.free_cb = tc_mesh_customdata_free_fn;
tcmd = tc->custom.type.data;
}
return tcmd;
}
static void tc_mesh_customdata_free(struct TransCustomDataMesh *tcmd)
{
if (tcmd->cd_layer_correct != NULL) {
tc_mesh_customdatacorrect_free(tcmd->cd_layer_correct);
}
if (tcmd->partial_update.cache != NULL) {
BM_mesh_partial_destroy(tcmd->partial_update.cache);
}
MEM_freeN(tcmd);
}
static void tc_mesh_customdata_free_fn(struct TransInfo *UNUSED(t),
struct TransDataContainer *UNUSED(tc),
struct TransCustomData *custom_data)
{
struct TransCustomDataMesh *tcmd = custom_data->data;
tc_mesh_customdata_free(tcmd);
custom_data->data = NULL;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name CustomData TransCustomDataLayer Creation
* \{ */
struct TransCustomDataMergeGroup {
/** map {BMVert: TransCustomDataLayerVert} */
struct LinkNode **cd_loop_groups;
@ -83,33 +143,6 @@ struct TransCustomDataLayer {
bool use_merge_group;
};
static void tc_mesh_customdatacorrect_free_fn(struct TransInfo *UNUSED(t),
struct TransDataContainer *UNUSED(tc),
struct TransCustomData *custom_data)
{
struct TransCustomDataLayer *tcld = custom_data->data;
bmesh_edit_end(tcld->bm, BMO_OPTYPE_FLAG_UNTAN_MULTIRES);
if (tcld->bm_origfaces) {
BM_mesh_free(tcld->bm_origfaces);
}
if (tcld->origfaces) {
BLI_ghash_free(tcld->origfaces, NULL, NULL);
}
if (tcld->merge_group.origverts) {
BLI_ghash_free(tcld->merge_group.origverts, NULL, NULL);
}
if (tcld->arena) {
BLI_memarena_free(tcld->arena);
}
if (tcld->merge_group.customdatalayer_map) {
MEM_freeN(tcld->merge_group.customdatalayer_map);
}
MEM_freeN(tcld);
custom_data->data = NULL;
}
#define USE_FACE_SUBSTITUTE
#ifdef USE_FACE_SUBSTITUTE
# define FACE_SUBSTITUTE_INDEX INT_MIN
@ -292,8 +325,8 @@ static void tc_mesh_customdatacorrect_init_container_merge_group(TransDataContai
tcld->arena, tcld->merge_group.data_len * sizeof(*tcld->merge_group.data));
}
static struct TransCustomDataLayer *tc_mesh_customdatacorrect_create(TransDataContainer *tc,
const bool use_merge_group)
static struct TransCustomDataLayer *tc_mesh_customdatacorrect_create_impl(
TransDataContainer *tc, const bool use_merge_group)
{
BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
BMesh *bm = em->bm;
@ -341,18 +374,41 @@ static struct TransCustomDataLayer *tc_mesh_customdatacorrect_create(TransDataCo
return tcld;
}
static void tc_mesh_customdata_create(TransDataContainer *tc, const bool use_merge_group)
static void tc_mesh_customdatacorrect_create(TransDataContainer *tc, const bool use_merge_group)
{
struct TransCustomDataLayer *customdatacorrect;
customdatacorrect = tc_mesh_customdatacorrect_create(tc, use_merge_group);
customdatacorrect = tc_mesh_customdatacorrect_create_impl(tc, use_merge_group);
if (!customdatacorrect) {
return;
}
BLI_assert(tc->custom.type.data == NULL);
tc->custom.type.data = customdatacorrect;
tc->custom.type.free_cb = tc_mesh_customdatacorrect_free_fn;
struct TransCustomDataMesh *tcmd = tc_mesh_customdata_ensure(tc);
BLI_assert(tcmd->cd_layer_correct == NULL);
tcmd->cd_layer_correct = customdatacorrect;
}
static void tc_mesh_customdatacorrect_free(struct TransCustomDataLayer *tcld)
{
bmesh_edit_end(tcld->bm, BMO_OPTYPE_FLAG_UNTAN_MULTIRES);
if (tcld->bm_origfaces) {
BM_mesh_free(tcld->bm_origfaces);
}
if (tcld->origfaces) {
BLI_ghash_free(tcld->origfaces, NULL, NULL);
}
if (tcld->merge_group.origverts) {
BLI_ghash_free(tcld->merge_group.origverts, NULL, NULL);
}
if (tcld->arena) {
BLI_memarena_free(tcld->arena);
}
if (tcld->merge_group.customdatalayer_map) {
MEM_freeN(tcld->merge_group.customdatalayer_map);
}
MEM_freeN(tcld);
}
void transform_convert_mesh_customdatacorrect_init(TransInfo *t)
@ -390,10 +446,14 @@ void transform_convert_mesh_customdatacorrect_init(TransInfo *t)
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
if (tc->custom.type.data != NULL) {
tc_mesh_customdatacorrect_free_fn(t, tc, &tc->custom.type);
struct TransCustomDataMesh *tcmd = tc->custom.type.data;
if (tcmd && tcmd->cd_layer_correct) {
tc_mesh_customdatacorrect_free(tcmd->cd_layer_correct);
tcmd->cd_layer_correct = NULL;
}
}
tc_mesh_customdata_create(tc, use_merge_group);
tc_mesh_customdatacorrect_create(tc, use_merge_group);
}
}
@ -555,10 +615,11 @@ static void tc_mesh_customdatacorrect_apply_vert(struct TransCustomDataLayer *tc
static void tc_mesh_customdatacorrect_apply(TransDataContainer *tc, bool is_final)
{
if (!tc->custom.type.data) {
struct TransCustomDataMesh *tcmd = tc->custom.type.data;
struct TransCustomDataLayer *tcld = tcmd ? tcmd->cd_layer_correct : NULL;
if (tcld == NULL) {
return;
}
struct TransCustomDataLayer *tcld = tc->custom.type.data;
const bool use_merge_group = tcld->use_merge_group;
struct TransCustomDataMergeGroup *merge_data = tcld->merge_group.data;
@ -590,7 +651,8 @@ static void tc_mesh_customdatacorrect_apply(TransDataContainer *tc, bool is_fina
static void tc_mesh_customdatacorrect_restore(struct TransInfo *t)
{
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
struct TransCustomDataLayer *tcld = tc->custom.type.data;
struct TransCustomDataMesh *tcmd = tc->custom.type.data;
struct TransCustomDataLayer *tcld = tcmd ? tcmd->cd_layer_correct : NULL;
if (!tcld) {
continue;
}
@ -1617,6 +1679,100 @@ void createTransEditVerts(TransInfo *t)
/** \name Recalc Mesh Data
* \{ */
static bool bm_vert_tag_filter_fn(BMVert *v, void *UNUSED(user_data))
{
return BM_elem_flag_test(v, BM_ELEM_TAG);
}
static BMPartialUpdate *tc_mesh_ensure_partial_update(TransInfo *t, TransDataContainer *tc)
{
struct TransCustomDataMesh *tcmd = tc_mesh_customdata_ensure(tc);
if (tcmd->partial_update.cache) {
/* Recalculate partial update data when the proportional editing size changes.
*
* Note that decreasing the proportional editing size requires the existing
* partial data is used before recreating this partial data at the smaller size.
* Since excluding geometry from being transformed requires an update.
*
* Extra logic is needed to account for this situation. */
bool recalc;
if (tcmd->partial_update.prop_size_prev < t->prop_size) {
/* Size increase, simply recalculate. */
recalc = true;
}
else if (tcmd->partial_update.prop_size_prev > t->prop_size) {
/* Size decreased, first use this partial data since reducing the size will transform
* geometry which needs recalculating. */
tcmd->partial_update.prop_size_prev = t->prop_size;
recalc = false;
}
else if (tcmd->partial_update.prop_size != t->prop_size) {
BLI_assert(tcmd->partial_update.prop_size > tcmd->partial_update.prop_size_prev);
recalc = true;
}
else {
BLI_assert(t->prop_size == tcmd->partial_update.prop_size_prev);
recalc = false;
}
if (!recalc) {
return tcmd->partial_update.cache;
}
BM_mesh_partial_destroy(tcmd->partial_update.cache);
tcmd->partial_update.cache = NULL;
}
BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT, BM_ELEM_TAG, false);
int verts_len = 0;
int i;
TransData *td;
for (i = 0, td = tc->data; i < tc->data_len; i++, td++) {
if (td->factor != 0.0f) {
BMVert *v = (BMVert *)td->extra;
BM_elem_flag_enable(v, BM_ELEM_TAG);
verts_len += 1;
}
}
TransDataMirror *td_mirror = tc->data_mirror;
for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) {
BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co));
/* The equality check is to account for the case when topology mirror moves
* the vertex from it's original location to match it's symmetrical position,
* with proportional editing enabled. */
if (BM_elem_flag_test(v_mirr, BM_ELEM_TAG) || !equals_v3v3(td_mirror->loc, td_mirror->iloc)) {
BMVert *v_mirr_other = (BMVert *)td_mirror->extra;
/* This assert should never fail since there is no overlap
* between mirrored vertices and non-mirrored. */
BLI_assert(!BM_elem_flag_test(v_mirr_other, BM_ELEM_TAG));
BM_elem_flag_enable(v_mirr_other, BM_ELEM_TAG);
verts_len += 1;
}
}
tcmd->partial_update.cache = BM_mesh_partial_create_from_verts(em->bm,
&(BMPartialUpdate_Params){
.do_tessellate = true,
.do_normals = true,
},
verts_len,
bm_vert_tag_filter_fn,
NULL);
tcmd->partial_update.prop_size_prev = t->prop_size;
tcmd->partial_update.prop_size = t->prop_size;
return tcmd->partial_update.cache;
}
static void tc_mesh_transdata_mirror_apply(TransDataContainer *tc)
{
if (tc->use_mirror_axis_any) {
@ -1678,8 +1834,22 @@ void recalcData_mesh(TransInfo *t)
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
DEG_id_tag_update(tc->obedit->data, ID_RECALC_GEOMETRY);
BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
EDBM_mesh_normals_update(em);
BKE_editmesh_looptri_calc(em);
/* The additional cost of generating the partial connectivity data isn't justified
* when all data needs to be updated.
*
* While proportional editing can cause all geometry to need updating with a partial selection.
* It's impractical to calculate this ahead of time.
* Further, the down side of using partial updates when their not needed is negligible. */
if (em->bm->totvert == em->bm->totvertsel) {
EDBM_mesh_normals_update(em);
BKE_editmesh_looptri_calc(em);
}
else {
BMPartialUpdate *partial_update_cache = tc_mesh_ensure_partial_update(t, tc);
BM_mesh_normals_update_with_partial(em->bm, partial_update_cache);
BKE_editmesh_looptri_calc_with_partial(em, partial_update_cache);
}
}
}
/** \} */