Dyntopo: do a little bit of code cleanup

This commit is contained in:
Joseph Eagar 2021-05-31 10:53:22 -07:00
parent 552e44bd25
commit 4adc0a7798
3 changed files with 139 additions and 95 deletions

View File

@ -1077,6 +1077,8 @@ Object *multires_dump_grids_bmesh(Object *bmob, BMesh *bm)
return ob;
}
//#define LIMIT_MAX_DISPLACEMENT
static void multires_bmesh_space_set_cb(void *__restrict userdata,
const int pidx,
const TaskParallelTLS *__restrict UNUSED(tls))
@ -1084,7 +1086,6 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata,
MultiresThreadedData *tdata = userdata;
int cd_mdisps_off = tdata->cd_mdisps_off;
// int cd_mask_off = tdata->cd_mask_off;
BMesh *bm = tdata->bm;
MultiResSpace op = tdata->bmop;
BMFace *f = bm->ftable[pidx];
@ -1092,7 +1093,10 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata,
int S, x, y;
BMLoop *l = f->l_first;
BMLoop *l;
#ifdef LIMIT_MAX_DISPLACEMENT
l = f->l_first;
float cent[3];
int tot = 0;
@ -1105,20 +1109,22 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata,
} while (l != f->l_first);
mul_v3_fl(cent, 1.0f / (float)tot);
// bool has_grid_mask = tdata->has_grid_mask;
#endif
l = f->l_first;
S = 0;
do {
// GridPaintMask *gpm = has_grid_mask ? BM_ELEM_CD_GET_VOID_P(l, cd_mask_off) : NULL;
MDisps *mdisp = BM_ELEM_CD_GET_VOID_P(l, cd_mdisps_off);
float(*dispgrid)[3] = NULL;
dispgrid = mdisp->disps;
/*try to limit numerical instability by clamping make displacement*/
#ifdef LIMIT_MAX_DISPLACEMENT
float maxlen = len_v3v3(l->v->co, cent) * 15.0f;
maxlen = MAX2(maxlen, 0.00001f);
#endif
for (y = 0; y < gridSize; y++) {
for (x = 0; x < gridSize; x++) {
@ -1161,11 +1167,17 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata,
// try to prevent errors
float len = len_v3(data);
#ifdef LIMIT_MAX_DISPLACEMENT
if (len > maxlen) {
mul_v3_fl(data, maxlen/len);
} else if (isnan(len)) {
zero_v3(data);
}
#else
if (isnan(len)) {
zero_v3(data);
}
#endif
break;
}
}
@ -1176,15 +1188,18 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata,
} while (l != f->l_first);
}
/* XXX WARNING: subsurf elements from dm and oldGridData *must* be of the same format (size),
* because this code uses CCGKey's info from dm to access oldGridData's normals
* (through the call to grid_tangent_matrix())! */
/* The original version of this function was broken (and subsequently removed)
because it didn't properly set the subdivision level; it also used the old
multires system. The new subdiv API is now used instead.
*/
void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
{
if (!bm->totface || !CustomData_has_layer(&bm->ldata, CD_MDISPS)) {
return;
}
//get multires settings
MultiresModifierData *mmd = bm->haveMultiResSettings ? &bm->multires : NULL;
if (!mmd && ob) {
@ -1194,7 +1209,8 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
if (!mmd || !CustomData_has_layer(&bm->ldata, CD_MDISPS)) {
return;
}
//cache multires settings in bmesh
bm->multiresSpace = mode;
Mesh _me, *me = &_me;
@ -1212,14 +1228,17 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
BM_mesh_bm_to_me_for_eval(bm, me, &extra);
SubdivSettings settings2;
// ensure we control the level
//copy the settings and then set subdivision level to max
MultiresModifierData mmdcpy = *mmd;
mmdcpy.lvl = mmdcpy.sculptlvl = mmdcpy.renderlvl = mmdcpy.totlvl;
//set up subdivision surface
BKE_multires_subdiv_settings_init(&settings2, &mmdcpy);
Subdiv *sd = BKE_subdiv_new_from_mesh(&settings2, me);
BKE_subdiv_eval_begin_from_mesh(sd, me, NULL);
//create a fake object with .sculpt set to NULL
Object fakeob;
if (ob) {
fakeob = *ob;
@ -1231,10 +1250,10 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
BLI_addtail(&fakeob.modifiers, &mmdcpy);
}
int i, gridSize, dGridSize, dSkip;
int i, gridSize;
int totpoly = bm->totface;
// paranoia recalc of indices/lookup tables
// force paranoia recalc of indices and lookup tables
bm->elem_index_dirty |= BM_FACE;
bm->elem_table_dirty |= BM_FACE;
@ -1243,12 +1262,14 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
gridSize = multires_side_tot[mmd->totlvl];
/* when adding new faces in edit mode, need to allocate disps */
int cd_disp_off = CustomData_get_offset(&bm->ldata, CD_MDISPS);
BMFace *f;
BMIter iter;
i = 0;
/*check that all grids are allocated and also set some indices*/
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
BMIter iter2;
BMLoop *l;
@ -1257,6 +1278,8 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
BM_ITER_ELEM (l, &iter2, f, BM_LOOPS_OF_FACE) {
MDisps *mdisp = BM_ELEM_CD_GET_VOID_P(l, cd_disp_off);
/* allocate new disps, this can happen with newly created faces */
if (!mdisp->disps) {
multires_reallocate_mdisps(1, mdisp, mmd->totlvl);
}
@ -1273,6 +1296,8 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
}
}
//do the space conversion
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.min_iter_per_thread = CCG_TASK_LIMIT;
@ -1291,13 +1316,7 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode)
};
BLI_task_parallel_range(0, totpoly, &data, multires_bmesh_space_set_cb, &settings);
// MDisps = CustomData_get
// if (mode == MULTIRES_SPACE_TANGENT) {
// ccgSubSurf_stitchFaces(ccgdm->ss, 0, NULL, 0);
// ccgSubSurf_updateNormals(ccgdm->ss, NULL, 0);
//}
BKE_mesh_free(me);
BKE_subdiv_free(sd);

View File

@ -35,6 +35,7 @@
#include "BKE_DerivedMesh.h"
#include "BKE_ccg.h"
#include "BKE_global.h"
#include "BKE_mesh.h"
#include "BKE_subdiv.h"
#include "BKE_subdiv_eval.h"
@ -1040,8 +1041,8 @@ typedef struct AverageGridsBoundariesData {
SubdivCCG *subdiv_ccg;
CCGKey *key;
/* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_edges*/
int *idxmap;
/* Optional lookup table. Maps task index to index in `subdiv_ccg->adjacent_vertices`. */
int *adjacent_edge_index_map;
} AverageGridsBoundariesData;
typedef struct AverageGridsBoundariesTLSData {
@ -1093,7 +1094,9 @@ static void subdiv_ccg_average_grids_boundaries_task(void *__restrict userdata_v
const TaskParallelTLS *__restrict tls_v)
{
AverageGridsBoundariesData *data = userdata_v;
const int adjacent_edge_index = data->idxmap ? data->idxmap[n] : n;
const int adjacent_edge_index = data->adjacent_edge_index_map ?
data->adjacent_edge_index_map[n] :
n;
AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
@ -1114,7 +1117,7 @@ typedef struct AverageGridsCornerData {
CCGKey *key;
/* Optional lookup table. Maps task range index to index in subdiv_ccg->adjacent_vertices*/
int *idxmap;
int *adjacent_vert_index_map;
} AverageGridsCornerData;
static void subdiv_ccg_average_grids_corners(SubdivCCG *subdiv_ccg,
@ -1147,41 +1150,59 @@ static void subdiv_ccg_average_grids_corners_task(void *__restrict userdata_v,
const TaskParallelTLS *__restrict UNUSED(tls_v))
{
AverageGridsCornerData *data = userdata_v;
const int adjacent_vertex_index = data->idxmap ? data->idxmap[n] : n;
const int adjacent_vertex_index = data->adjacent_vert_index_map ?
data->adjacent_vert_index_map[n] :
n;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[adjacent_vertex_index];
subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
}
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
static void subdiv_ccg_average_boundaries(SubdivCCG *subdiv_ccg,
CCGKey *key,
int *adjacent_edge_index_map,
int num_adjacent_edges)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
.subdiv_ccg = subdiv_ccg, .key = key, .adjacent_edge_index_map = adjacent_edge_index_map};
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_edges,
num_adjacent_edges,
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
static void subdiv_ccg_average_all_boundaries(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_boundaries(subdiv_ccg, key, NULL, subdiv_ccg->num_adjacent_edges);
}
static void subdiv_ccg_average_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
int *adjacent_vert_index_map,
int num_adjacent_vertices)
{
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsCornerData corner_data = {.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = NULL};
AverageGridsCornerData corner_data = {
.subdiv_ccg = subdiv_ccg, .key = key, .adjacent_vert_index_map = adjacent_vert_index_map};
BLI_task_parallel_range(0,
subdiv_ccg->num_adjacent_vertices,
num_adjacent_vertices,
&corner_data,
subdiv_ccg_average_grids_corners_task,
&parallel_range_settings);
}
static void subdiv_ccg_average_all_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
subdiv_ccg_average_corners(subdiv_ccg, key, NULL, subdiv_ccg->num_adjacent_vertices);
}
static void subdiv_ccg_average_all_boundaries_and_corners(SubdivCCG *subdiv_ccg, CCGKey *key)
{
@ -1209,19 +1230,18 @@ void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
subdiv_ccg_average_all_boundaries_and_corners(subdiv_ccg, &key);
}
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces)
static void subdiv_ccg_affected_face_adjacency(SubdivCCG *subdiv_ccg,
struct CCGFace **effected_faces,
int num_effected_faces,
GSet *r_adjacent_vertices,
GSet *r_adjacent_edges)
{
Subdiv *subdiv = subdiv_ccg->subdiv;
GSet *adjacent_verts = BLI_gset_ptr_new(__func__);
GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner;
GSetIterator gi;
StaticOrHeapIntStorage face_vertices_storage;
StaticOrHeapIntStorage face_edges_storage;
static_or_heap_storage_init(&face_vertices_storage);
static_or_heap_storage_init(&face_edges_storage);
@ -1243,72 +1263,63 @@ void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
int edge_vertices[2];
topology_refiner->getEdgeVertices(topology_refiner, edge_index, edge_vertices);
const bool is_edge_flipped = (edge_vertices[0] != vertex_index);
/* Grid which is adjacent to the current corner. */
const int current_grid_index = face->start_grid_index + corner;
/* Grid which is adjacent to the next corner. */
const int next_grid_index = face->start_grid_index + (corner + 1) % num_face_grids;
SubdivCCGAdjacentEdge *adjacent_edge = &subdiv_ccg->adjacent_edges[edge_index];
BLI_gset_add(adjacent_edges, adjacent_edge);
/* Grid which is adjacent to the current corner. */
const int grid_index = face->start_grid_index + corner;
BLI_gset_add(r_adjacent_edges, adjacent_edge);
SubdivCCGAdjacentVertex *adjacent_vertex = &subdiv_ccg->adjacent_vertices[vertex_index];
BLI_gset_add(adjacent_verts, adjacent_vertex);
BLI_gset_add(r_adjacent_vertices, adjacent_vertex);
}
}
static_or_heap_storage_free(&face_vertices_storage);
static_or_heap_storage_free(&face_edges_storage);
}
void subdiv_ccg_average_faces_boundaries_and_corners(SubdivCCG *subdiv_ccg,
CCGKey *key,
struct CCGFace **effected_faces,
int num_effected_faces)
{
GSet *adjacent_vertices = BLI_gset_ptr_new(__func__);
GSet *adjacent_edges = BLI_gset_ptr_new(__func__);
GSetIterator gi;
subdiv_ccg_affected_face_adjacency(
subdiv_ccg, effected_faces, num_effected_faces, adjacent_vertices, adjacent_edges);
int *adjacent_vertex_index_map;
int *adjacent_edge_index_map;
StaticOrHeapIntStorage index_heap;
static_or_heap_storage_init(&index_heap);
/* first do boundaries */
int *idxmap = MEM_mallocN(sizeof(*idxmap) * BLI_gset_len(adjacent_edges), "idxmap");
int i = 0;
/* Average boundaries. */
adjacent_edge_index_map = static_or_heap_storage_get(&index_heap, BLI_gset_len(adjacent_edges));
GSET_ITER_INDEX (gi, adjacent_edges, i) {
SubdivCCGAdjacentEdge *adjacent_edge = BLI_gsetIterator_getKey(&gi);
idxmap[i] = adjacent_edge - subdiv_ccg->adjacent_edges;
adjacent_edge_index_map[i] = adjacent_edge - subdiv_ccg->adjacent_edges;
}
subdiv_ccg_average_boundaries(
subdiv_ccg, key, adjacent_edge_index_map, BLI_gset_len(adjacent_edges));
TaskParallelSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = idxmap};
AverageGridsBoundariesTLSData tls_data = {NULL};
parallel_range_settings.userdata_chunk = &tls_data;
parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
parallel_range_settings.func_free = subdiv_ccg_average_grids_boundaries_free;
BLI_task_parallel_range(0,
BLI_gset_len(adjacent_edges),
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
/* Average corners. */
/*now do corners*/
MEM_SAFE_FREE(idxmap);
idxmap = MEM_mallocN(sizeof(*idxmap) * BLI_gset_len(adjacent_verts), "idxmap");
GSET_ITER_INDEX (gi, adjacent_verts, i) {
adjacent_vertex_index_map = static_or_heap_storage_get(&index_heap,
BLI_gset_len(adjacent_vertices));
GSET_ITER_INDEX (gi, adjacent_vertices, i) {
SubdivCCGAdjacentVertex *adjacent_vertex = BLI_gsetIterator_getKey(&gi);
idxmap[i] = adjacent_vertex - subdiv_ccg->adjacent_vertices;
adjacent_vertex_index_map[i] = adjacent_vertex - subdiv_ccg->adjacent_vertices;
}
subdiv_ccg_average_corners(
subdiv_ccg, key, adjacent_vertex_index_map, BLI_gset_len(adjacent_vertices));
AverageGridsCornerData corner_data = {.subdiv_ccg = subdiv_ccg, .key = key, .idxmap = idxmap};
BLI_parallel_range_settings_defaults(&parallel_range_settings);
BLI_task_parallel_range(0,
BLI_gset_len(adjacent_verts),
&corner_data,
subdiv_ccg_average_grids_corners_task,
&parallel_range_settings);
BLI_gset_free(adjacent_verts, NULL);
BLI_gset_free(adjacent_vertices, NULL);
BLI_gset_free(adjacent_edges, NULL);
MEM_SAFE_FREE(idxmap);
static_or_heap_storage_free(&index_heap);
}
typedef struct StitchFacesInnerGridsData {

View File

@ -846,20 +846,30 @@ static bool modifier_apply_obdata(
return true;
}
bool ED_object_modifier_apply(Main *bmain,
ReportList *reports,
Depsgraph *depsgraph,
Scene *scene,
Object *ob,
ModifierData *md,
int mode,
bool keep_modifier)
ATTR_NO_OPT bool ED_object_modifier_apply(Main *bmain,
ReportList *reports,
Depsgraph *depsgraph,
Scene *scene,
Object *ob,
ModifierData *md,
int mode,
bool keep_modifier)
{
if (BKE_object_is_in_editmode(ob)) {
BKE_report(reports, RPT_ERROR, "Modifiers cannot be applied in edit mode");
return false;
}
if (mode != MODIFIER_APPLY_SHAPE && ID_REAL_USERS(ob->data) > 1) {
bool allow_multi_user = mode == MODIFIER_APPLY_SHAPE;
if (md) {
const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
allow_multi_user |= ELEM(
mti->type, eModifierTypeType_NonGeometrical, eModifierTypeType_OnlyDeform);
}
// bool allow_multi_user = md && md->type == eModifierType_DataTransfer || md->flag & ;
if (!allow_multi_user && ID_REAL_USERS(ob->data) > 1) {
BKE_report(reports, RPT_ERROR, "Modifiers cannot be applied to multi-user data");
return false;
}
@ -1385,7 +1395,8 @@ void OBJECT_OT_modifier_move_to_index(wmOperatorType *ot)
/** \name Apply Modifier Operator
* \{ */
static bool modifier_apply_poll_ex(bContext *C, bool allow_shared)
#include "BLI_compiler_attrs.h"
ATTR_NO_OPT static bool modifier_apply_poll_ex(bContext *C, bool allow_shared)
{
if (!edit_modifier_poll_generic(C, &RNA_Modifier, 0, false, false)) {
return false;
@ -1396,6 +1407,9 @@ static bool modifier_apply_poll_ex(bContext *C, bool allow_shared)
Object *ob = (ptr.owner_id != NULL) ? (Object *)ptr.owner_id : ED_object_active_context(C);
ModifierData *md = ptr.data; /* May be NULL. */
allow_shared = true;
// allow_shared = allow_shared || (md && md->type == eModifierType_DataTransfer);
if (ID_IS_OVERRIDE_LIBRARY(ob) || ((ob->data != NULL) && ID_IS_OVERRIDE_LIBRARY(ob->data))) {
CTX_wm_operator_poll_msg_set(C, "Modifiers cannot be applied on override data");
return false;