Sculpt: various fixes
* Fixed very nasty customdata bug where if the first layer of a given type was CD_TEMPORARY, then on being stripped out of the customdata set the active references. I think I tracked down where this happened, but just to be safe I also wrote code to detect it and regenerate the active refs too. * Fixed rotate brush. * Fixed mesh filter crashing with automasking on.
This commit is contained in:
parent
0b5b8d0beb
commit
3d4e14b7e1
File diff suppressed because it is too large
Load Diff
|
@ -255,6 +255,13 @@ static bool check_builtin_init()
|
|||
SUBTYPE_SET(autosmooth_radius_scale, BRUSH_CHANNEL_PERCENT);
|
||||
SUBTYPE_SET(topology_rake_radius_scale, BRUSH_CHANNEL_PERCENT);
|
||||
|
||||
SETCAT(concave_mask_factor, "Automasking");
|
||||
SETCAT(automasking, "Automasking");
|
||||
SETCAT(automasking_boundary_edges_propagation_steps, "Automasking");
|
||||
|
||||
BrushChannelType *def = GETDEF(concave_mask_factor);
|
||||
def->mappings.pressure.inv = true;
|
||||
|
||||
// don't group strength/radius/direction in subpanels
|
||||
// SETCAT(strength, "Basic");
|
||||
// SETCAT(radius, "Basic");
|
||||
|
|
|
@ -2240,6 +2240,65 @@ void CustomData_update_typemap(CustomData *data)
|
|||
}
|
||||
}
|
||||
|
||||
static void customdata_regen_active_refs(CustomData *data)
|
||||
{
|
||||
int i, j;
|
||||
bool changed = false;
|
||||
|
||||
/* explicitly flag active layers */
|
||||
for (i = 0, j = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
int n = layer - base;
|
||||
|
||||
layer->active = n == base->active;
|
||||
layer->active_clone = n == base->active_clone;
|
||||
layer->active_mask = n == base->active_mask;
|
||||
layer->active_rnd = n == base->active_rnd;
|
||||
}
|
||||
|
||||
/* regenerate active refs */
|
||||
for (i = 0; i < CD_NUMTYPES; i++) {
|
||||
if (data->typemap[i] != -1) {
|
||||
CustomDataLayer *base = data->layers + data->typemap[i];
|
||||
|
||||
base->active = base->active_clone = base->active_mask = base->active_rnd = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* set active n in base layer for all types */
|
||||
for (i = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
|
||||
int n = layer - base;
|
||||
|
||||
if (layer->active) {
|
||||
base->active = n;
|
||||
}
|
||||
if (layer->active_mask) {
|
||||
base->active_mask = n;
|
||||
}
|
||||
if (layer->active_clone) {
|
||||
base->active_clone = n;
|
||||
}
|
||||
if (layer->active_rnd) {
|
||||
base->active_rnd = n;
|
||||
}
|
||||
}
|
||||
|
||||
/* set active n in all layers */
|
||||
for (i = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
|
||||
layer->active = base->active;
|
||||
layer->active_mask = base->active_mask;
|
||||
layer->active_clone = base->active_clone;
|
||||
layer->active_rnd = base->active_rnd;
|
||||
}
|
||||
}
|
||||
|
||||
/* currently only used in BLI_assert */
|
||||
#ifndef NDEBUG
|
||||
static bool customdata_typemap_is_valid(const CustomData *data)
|
||||
|
@ -2268,6 +2327,8 @@ void CustomData_copy_all_layout(const struct CustomData *source, struct CustomDa
|
|||
dest->layers[i].data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
customdata_regen_active_refs(dest);
|
||||
}
|
||||
|
||||
bool CustomData_merge(const struct CustomData *source,
|
||||
|
@ -2289,6 +2350,10 @@ bool CustomData_merge(const struct CustomData *source,
|
|||
int type = layer->type;
|
||||
int flag = layer->flag;
|
||||
|
||||
if (flag & CD_FLAG_NOCOPY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type != lasttype) {
|
||||
number = 0;
|
||||
maxnumber = CustomData_layertype_layers_max(type);
|
||||
|
@ -2302,9 +2367,6 @@ bool CustomData_merge(const struct CustomData *source,
|
|||
number++;
|
||||
}
|
||||
|
||||
if (flag & CD_FLAG_NOCOPY) {
|
||||
continue;
|
||||
}
|
||||
if (layer->anonymous_id &&
|
||||
!BKE_anonymous_attribute_id_has_strong_references(layer->anonymous_id)) {
|
||||
/* This attribute is not referenced anymore, so it can be treated as if it didn't exist. */
|
||||
|
@ -2358,6 +2420,8 @@ bool CustomData_merge(const struct CustomData *source,
|
|||
}
|
||||
|
||||
CustomData_update_typemap(dest);
|
||||
customdata_regen_active_refs(dest);
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
|
@ -3116,6 +3180,20 @@ void CustomData_free_temporary(CustomData *data, int totelem)
|
|||
{
|
||||
int i, j;
|
||||
bool changed = false;
|
||||
|
||||
/* explicitly flag active layers */
|
||||
for (i = 0, j = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
int n = layer - base;
|
||||
|
||||
layer->active = n == base->active;
|
||||
layer->active_clone = n == base->active_clone;
|
||||
layer->active_mask = n == base->active_mask;
|
||||
layer->active_rnd = n == base->active_rnd;
|
||||
}
|
||||
|
||||
/* free temp layers */
|
||||
for (i = 0, j = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
|
||||
|
@ -3126,12 +3204,56 @@ void CustomData_free_temporary(CustomData *data, int totelem)
|
|||
if ((layer->flag & CD_FLAG_TEMPORARY) == CD_FLAG_TEMPORARY) {
|
||||
customData_free_layer__internal(layer, totelem);
|
||||
changed = true;
|
||||
|
||||
// compact data->layers by not incrementing j here
|
||||
}
|
||||
else {
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
CustomData_update_typemap(data);
|
||||
|
||||
/* regenerate active refs */
|
||||
for (int i = 0; i < CD_NUMTYPES; i++) {
|
||||
if (data->typemap[i] != -1) {
|
||||
CustomDataLayer *base = data->layers + data->typemap[i];
|
||||
base->active = base->active_clone = base->active_mask = base->active_rnd = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* set active n in base layer for all types */
|
||||
for (i = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
|
||||
int n = layer - base;
|
||||
|
||||
if (layer->active) {
|
||||
base->active = n;
|
||||
}
|
||||
if (layer->active_mask) {
|
||||
base->active_mask = n;
|
||||
}
|
||||
if (layer->active_clone) {
|
||||
base->active_clone = n;
|
||||
}
|
||||
if (layer->active_rnd) {
|
||||
base->active_rnd = n;
|
||||
}
|
||||
}
|
||||
|
||||
/* set active n in all layers */
|
||||
for (i = 0; i < data->totlayer; i++) {
|
||||
CustomDataLayer *layer = &data->layers[i];
|
||||
CustomDataLayer *base = data->layers + data->typemap[layer->type];
|
||||
|
||||
layer->active = base->active;
|
||||
layer->active_mask = base->active_mask;
|
||||
layer->active_clone = base->active_clone;
|
||||
layer->active_rnd = base->active_rnd;
|
||||
}
|
||||
|
||||
data->totlayer = j;
|
||||
|
||||
if (data->totlayer <= data->maxlayer - CUSTOMDATA_GROW) {
|
||||
|
@ -5656,4 +5778,5 @@ void CustomData_blend_read(BlendDataReader *reader, CustomData *data, int count)
|
|||
}
|
||||
|
||||
CustomData_update_typemap(data);
|
||||
customdata_regen_active_refs(data); // check for corrupted active layer refs
|
||||
}
|
||||
|
|
|
@ -2431,6 +2431,8 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
|
|||
Mesh *mesh_orig = BKE_object_get_original_mesh(ob);
|
||||
bool is_dyntopo = (mesh_orig->flag & ME_SCULPT_DYNAMIC_TOPOLOGY);
|
||||
|
||||
void SCULPT_update_customdata_refs(SculptSession * ss);
|
||||
|
||||
if (is_dyntopo) {
|
||||
BMesh *bm = SCULPT_dyntopo_empty_bmesh();
|
||||
|
||||
|
@ -2447,10 +2449,11 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
|
|||
.cd_mask_extra = CD_MASK_DYNTOPO_VERT}));
|
||||
|
||||
SCULPT_dyntopo_node_layers_add(ob->sculpt);
|
||||
|
||||
SCULPT_undo_ensure_bmlog(ob);
|
||||
|
||||
pbvh = build_pbvh_for_dynamic_topology(ob);
|
||||
|
||||
SCULPT_update_customdata_refs(ob->sculpt);
|
||||
}
|
||||
else {
|
||||
Object *object_eval = DEG_get_evaluated_object(depsgraph, ob);
|
||||
|
|
|
@ -3337,6 +3337,10 @@ void BKE_pbvh_update_offsets(PBVH *pbvh,
|
|||
const int cd_dyn_vert,
|
||||
const int cd_face_areas)
|
||||
{
|
||||
if (pbvh->bm) {
|
||||
pbvh->cd_vcol_offset = CustomData_get_offset(&pbvh->bm->vdata, CD_PROP_COLOR);
|
||||
}
|
||||
|
||||
pbvh->cd_face_node_offset = cd_face_node_offset;
|
||||
pbvh->cd_vert_node_offset = cd_vert_node_offset;
|
||||
pbvh->cd_face_area = cd_face_areas;
|
||||
|
|
|
@ -70,7 +70,7 @@ void bm_free_ids_check(BMesh *bm, uint id)
|
|||
}
|
||||
}
|
||||
|
||||
static void bm_id_freelist_take(BMesh *bm, uint id)
|
||||
void bm_id_freelist_take(BMesh *bm, uint id)
|
||||
{
|
||||
bm_free_ids_check(bm, id);
|
||||
|
||||
|
|
|
@ -92,15 +92,15 @@ typedef struct myiter {
|
|||
|
||||
# define GHashIterator myiter
|
||||
|
||||
#ifdef __GNUC__
|
||||
# ifdef __GNUC__
|
||||
/* I can't even *cast* signed ints in gcc's sign-conversion warning? gcc 10.3.0 -joeedh */
|
||||
#pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
#endif
|
||||
# pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
# endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
# ifdef __GNUC__
|
||||
/* I can't even *cast* signed ints in gcc's sign-conversion warning? gcc 10.3.0 -joeedh */
|
||||
#pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
#endif
|
||||
# pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
# endif
|
||||
|
||||
# define BLI_ghash_free(sh, a, b) free_smallhash(sh)
|
||||
# define BLI_ghash_int_new_ex(a, b) new_smallhash()
|
||||
|
@ -462,6 +462,10 @@ static uint bm_log_vert_id_get(BMLog *log, BMVert *v)
|
|||
/* Get a vertex from its unique ID */
|
||||
static BMVert *bm_log_vert_from_id(BMLog *log, uint id)
|
||||
{
|
||||
if (log->bm->idmap.map && id >= ((unsigned int)log->bm->idmap.map_size)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (BMVert *)BM_ELEM_FROM_ID(log->bm, id);
|
||||
}
|
||||
|
||||
|
@ -2170,6 +2174,8 @@ static void full_copy_swap(BMesh *bm, BMLog *log, BMLogEntry *entry)
|
|||
static void bm_log_undo_intern(
|
||||
BMesh *bm, BMLog *log, BMLogEntry *entry, BMLogCallbacks *callbacks, const char *node_layer_id)
|
||||
{
|
||||
log->bm = bm;
|
||||
|
||||
bm->elem_index_dirty |= BM_VERT | BM_EDGE | BM_FACE;
|
||||
bm->elem_table_dirty |= BM_VERT | BM_EDGE | BM_FACE;
|
||||
|
||||
|
|
|
@ -718,6 +718,10 @@ void BM_mesh_bm_from_me(Object *ob,
|
|||
|
||||
int iters[] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, -1, BM_FACES_OF_MESH};
|
||||
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
uint max_id = 0;
|
||||
#endif
|
||||
|
||||
// find first element in each id run and assign to map
|
||||
for (int i = 0; i < 4; i++) {
|
||||
int type = 1 << i;
|
||||
|
@ -740,7 +744,9 @@ void BM_mesh_bm_from_me(Object *ob,
|
|||
|
||||
do {
|
||||
uint id = (uint)BM_ELEM_GET_ID(bm, (BMElem *)l);
|
||||
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
max_id = MAX2(max_id, i);
|
||||
#endif
|
||||
if (!BM_ELEM_FROM_ID(bm, id)) {
|
||||
bm_assign_id_intern(bm, (BMElem *)l, id);
|
||||
}
|
||||
|
@ -748,6 +754,9 @@ void BM_mesh_bm_from_me(Object *ob,
|
|||
}
|
||||
else {
|
||||
uint id = (uint)BM_ELEM_GET_ID(bm, elem);
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
max_id = MAX2(max_id, i);
|
||||
#endif
|
||||
|
||||
if (!BM_ELEM_FROM_ID(bm, id)) {
|
||||
bm_assign_id_intern(bm, elem, id);
|
||||
|
@ -791,11 +800,61 @@ void BM_mesh_bm_from_me(Object *ob,
|
|||
|
||||
if (BM_ELEM_FROM_ID(bm, id) != elem) {
|
||||
bm_alloc_id(bm, elem);
|
||||
|
||||
id = (uint)BM_ELEM_GET_ID(bm, elem);
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
max_id = MAX2(max_id, id);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
max_id = MAX2(bm->idmap.maxid, max_id);
|
||||
bm->idmap.maxid = max_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef WITH_BM_ID_FREELIST
|
||||
/*ensure correct id freelist*/
|
||||
if (bm->idmap.flag & BM_HAS_IDS) {
|
||||
bm_free_ids_check(bm, bm->idmap.max_id);
|
||||
|
||||
MEM_SAFE_FREE(bm->idmap.freelist);
|
||||
bm->idmap.freelist_len = 0;
|
||||
bm->idmap.freelist_size = 0;
|
||||
bm->idmap.freelist = NULL;
|
||||
|
||||
memset(bm->idmap.free_ids, 0, bm->idmap.free_ids_size * sizeof(*bm->idmap.free_ids));
|
||||
|
||||
BLI_mempool_iter miter;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
int htype = 1 << i;
|
||||
|
||||
if (!(bm->idmap.flag & htype)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
BLI_mempool *pool = (&bm->vpool)[i];
|
||||
BLI_mempool_iternew(pool, &miter);
|
||||
BMElem *elem = (BMElem *)BLI_mempool_iterstep(&miter);
|
||||
|
||||
for (; elem; elem = (BMElem *)BLI_mempool_iterstep(&miter)) {
|
||||
uint id = (uint)BM_ELEM_GET_ID(bm, elem);
|
||||
|
||||
BLI_BITMAP_SET(bm->idmap.free_ids, id, true);
|
||||
}
|
||||
}
|
||||
|
||||
for (uint i = 0; i < max_id; i++) {
|
||||
if (!BLI_BITMAP_TEST(bm->idmap.free_ids, id)) {
|
||||
bm_id_freelist_push(bm, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/* MSelect clears the array elements (avoid adding multiple times).
|
||||
*
|
||||
|
|
|
@ -79,6 +79,11 @@ static void extract_vcol_init(const MeshRenderData *mr,
|
|||
if (svcol_layers & (1 << i)) {
|
||||
char attr_name[32], attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
|
||||
const char *layer_name = CustomData_get_layer_name(cd_vdata, CD_PROP_COLOR, i);
|
||||
|
||||
if (!layer_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
|
||||
|
||||
BLI_snprintf(attr_name, sizeof(attr_name), "c%s", attr_safe_name);
|
||||
|
@ -160,7 +165,8 @@ static void extract_vcol_init(const MeshRenderData *mr,
|
|||
}
|
||||
else {
|
||||
MPropCol *vcol = (MPropCol *)CustomData_get_layer_n(cd_vdata, CD_PROP_COLOR, i);
|
||||
for (int ml_index = 0; ml_index < mr->loop_len; ml_index++, vcol_data++) {
|
||||
|
||||
for (int ml_index = 0; vcol && ml_index < mr->loop_len; ml_index++, vcol_data++) {
|
||||
vcol_data->r = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[0]);
|
||||
vcol_data->g = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[1]);
|
||||
vcol_data->b = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[2]);
|
||||
|
|
|
@ -1656,69 +1656,11 @@ static bool sculpt_check_unique_face_set_for_edge_in_base_mesh(const SculptSessi
|
|||
return true;
|
||||
}
|
||||
|
||||
bool SCULPT_vertex_has_unique_face_set(const SculptSession *ss, SculptVertRef index)
|
||||
bool SCULPT_vertex_has_unique_face_set(const SculptSession *ss, SculptVertRef vertex)
|
||||
{
|
||||
switch (BKE_pbvh_type(ss->pbvh)) {
|
||||
case PBVH_FACES: {
|
||||
return sculpt_check_unique_face_set_in_base_mesh(ss, index);
|
||||
}
|
||||
case PBVH_BMESH: {
|
||||
BMVert *v = (BMVert *)index.i;
|
||||
MDynTopoVert *mv = BKE_PBVH_DYNVERT(ss->cd_dyn_vert, v);
|
||||
MDynTopoVert *mv = SCULPT_vertex_get_mdyntopo(ss, vertex);
|
||||
|
||||
if (mv->flag & DYNVERT_NEED_BOUNDARY) {
|
||||
BKE_pbvh_update_vert_boundary(ss->cd_dyn_vert,
|
||||
ss->cd_faceset_offset,
|
||||
ss->cd_vert_node_offset,
|
||||
ss->cd_face_node_offset,
|
||||
v,
|
||||
ss->boundary_symmetry);
|
||||
}
|
||||
|
||||
return !(mv->flag & DYNVERT_FSET_BOUNDARY);
|
||||
|
||||
#if 0
|
||||
int face_set = 0;
|
||||
bool first = true;
|
||||
if (ss->cd_faceset_offset == -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
BM_ITER_ELEM (l, &iter, v, BM_LOOPS_OF_VERT) {
|
||||
BMFace *f = l->f;
|
||||
int face_set2 = BM_ELEM_CD_GET_INT(f, ss->cd_faceset_offset);
|
||||
|
||||
if (!first && abs(face_set2) != abs(face_set)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
first = false;
|
||||
face_set = face_set2;
|
||||
}
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
case PBVH_GRIDS: {
|
||||
const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh);
|
||||
const int grid_index = index.i / key->grid_area;
|
||||
const int vertex_index = index.i - grid_index * key->grid_area;
|
||||
const SubdivCCGCoord coord = {.grid_index = grid_index,
|
||||
.x = vertex_index % key->grid_size,
|
||||
.y = vertex_index / key->grid_size};
|
||||
int v1, v2;
|
||||
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
|
||||
ss->subdiv_ccg, &coord, ss->mloop, ss->mpoly, &v1, &v2);
|
||||
switch (adjacency) {
|
||||
case SUBDIV_CCG_ADJACENT_VERTEX:
|
||||
return sculpt_check_unique_face_set_in_base_mesh(ss, BKE_pbvh_make_vref(v1));
|
||||
case SUBDIV_CCG_ADJACENT_EDGE:
|
||||
return sculpt_check_unique_face_set_for_edge_in_base_mesh(ss, v1, v2);
|
||||
case SUBDIV_CCG_ADJACENT_NONE:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return !SCULPT_vertex_is_boundary(ss, vertex, SCULPT_BOUNDARY_FACE_SET);
|
||||
}
|
||||
|
||||
int SCULPT_face_set_next_available_get(SculptSession *ss)
|
||||
|
@ -2181,6 +2123,49 @@ static bool sculpt_check_boundary_vertex_in_base_mesh(const SculptSession *ss,
|
|||
return BLI_BITMAP_TEST(ss->vertex_info.boundary,
|
||||
BKE_pbvh_vertex_index_to_table(ss->pbvh, index));
|
||||
}
|
||||
|
||||
static void grids_update_boundary_flags(const SculptSession *ss, SculptVertRef vertex)
|
||||
{
|
||||
MDynTopoVert *mv = ss->mdyntopo_verts + vertex.i;
|
||||
|
||||
mv->flag &= ~(DYNVERT_CORNER | DYNVERT_BOUNDARY | DYNVERT_NEED_BOUNDARY | DYNVERT_FSET_BOUNDARY |
|
||||
DYNVERT_FSET_CORNER);
|
||||
|
||||
int index = (int)vertex.i;
|
||||
const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh);
|
||||
const int grid_index = index / key->grid_area;
|
||||
const int vertex_index = index - grid_index * key->grid_area;
|
||||
const SubdivCCGCoord coord = {.grid_index = grid_index,
|
||||
.x = vertex_index % key->grid_size,
|
||||
.y = vertex_index / key->grid_size};
|
||||
int v1, v2;
|
||||
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
|
||||
ss->subdiv_ccg, &coord, ss->mloop, ss->mpoly, &v1, &v2);
|
||||
|
||||
switch (adjacency) {
|
||||
case SUBDIV_CCG_ADJACENT_VERTEX:
|
||||
if (sculpt_check_unique_face_set_in_base_mesh(ss, BKE_pbvh_make_vref(v1))) {
|
||||
mv->flag |= DYNVERT_FSET_BOUNDARY;
|
||||
}
|
||||
if (sculpt_check_boundary_vertex_in_base_mesh(ss, BKE_pbvh_make_vref(v1))) {
|
||||
mv->flag |= DYNVERT_BOUNDARY;
|
||||
}
|
||||
break;
|
||||
case SUBDIV_CCG_ADJACENT_EDGE:
|
||||
if (sculpt_check_unique_face_set_for_edge_in_base_mesh(ss, v1, v2)) {
|
||||
mv->flag |= DYNVERT_FSET_BOUNDARY;
|
||||
}
|
||||
|
||||
if (sculpt_check_boundary_vertex_in_base_mesh(ss, BKE_pbvh_make_vref(v1)) &&
|
||||
sculpt_check_boundary_vertex_in_base_mesh(ss, BKE_pbvh_make_vref(v2))) {
|
||||
mv->flag |= DYNVERT_BOUNDARY;
|
||||
}
|
||||
break;
|
||||
case SUBDIV_CCG_ADJACENT_NONE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void faces_update_boundary_flags(const SculptSession *ss, const SculptVertRef vertex)
|
||||
{
|
||||
BKE_pbvh_update_vert_boundary_faces(ss->face_sets,
|
||||
|
@ -2249,25 +2234,12 @@ SculptCornerType SCULPT_vertex_is_corner(const SculptSession *ss,
|
|||
}
|
||||
break;
|
||||
case PBVH_GRIDS: {
|
||||
const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh);
|
||||
const int grid_index = vertex.i / key->grid_area;
|
||||
const int vertex_index = vertex.i - grid_index * key->grid_area;
|
||||
const SubdivCCGCoord coord = {.grid_index = grid_index,
|
||||
.x = vertex_index % key->grid_size,
|
||||
.y = vertex_index / key->grid_size};
|
||||
int v1, v2;
|
||||
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
|
||||
ss->subdiv_ccg, &coord, ss->mloop, ss->mpoly, &v1, &v2);
|
||||
switch (adjacency) {
|
||||
case SUBDIV_CCG_ADJACENT_VERTEX:
|
||||
return sculpt_check_corner_in_base_mesh(ss, BKE_pbvh_make_vref(v1), check_facesets);
|
||||
case SUBDIV_CCG_ADJACENT_EDGE:
|
||||
return false; // sculpt_check_unique_face_set_for_edge_in_base_mesh(ss, v1, v2);
|
||||
case SUBDIV_CCG_ADJACENT_NONE:
|
||||
return false;
|
||||
}
|
||||
mv = ss->mdyntopo_verts + vertex.i;
|
||||
|
||||
return 0;
|
||||
if (mv->flag & DYNVERT_NEED_BOUNDARY) {
|
||||
grids_update_boundary_flags(ss, vertex);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2331,6 +2303,7 @@ SculptBoundaryType SCULPT_vertex_is_boundary(const SculptSession *ss,
|
|||
int v1, v2;
|
||||
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
|
||||
ss->subdiv_ccg, &coord, ss->mloop, ss->mpoly, &v1, &v2);
|
||||
|
||||
switch (adjacency) {
|
||||
case SUBDIV_CCG_ADJACENT_VERTEX:
|
||||
return sculpt_check_boundary_vertex_in_base_mesh(ss, BKE_pbvh_make_vref(v1)) ?
|
||||
|
@ -5667,21 +5640,19 @@ static void do_topology_slide_task_cb_ex(void *__restrict userdata,
|
|||
void SCULPT_relax_vertex(SculptSession *ss,
|
||||
PBVHVertexIter *vd,
|
||||
float factor,
|
||||
bool filter_boundary_face_sets,
|
||||
SculptBoundaryType boundary_mask,
|
||||
float *r_final_pos)
|
||||
{
|
||||
float smooth_pos[3];
|
||||
float final_disp[3];
|
||||
float boundary_normal[3];
|
||||
int avg_count = 0;
|
||||
int neighbor_count = 0;
|
||||
zero_v3(smooth_pos);
|
||||
zero_v3(boundary_normal);
|
||||
|
||||
int bset = SCULPT_BOUNDARY_MESH | SCULPT_BOUNDARY_SHARP;
|
||||
int bset = boundary_mask;
|
||||
|
||||
// forcibly enable if no ss->cache
|
||||
if (!ss->cache || (ss->cache->brush->flag2 & BRUSH_SMOOTH_PRESERVE_FACE_SETS)) {
|
||||
if (ss->cache && (ss->cache->brush->flag2 & BRUSH_SMOOTH_PRESERVE_FACE_SETS)) {
|
||||
bset |= SCULPT_BOUNDARY_FACE_SET;
|
||||
}
|
||||
|
||||
|
@ -5692,41 +5663,43 @@ void SCULPT_relax_vertex(SculptSession *ss,
|
|||
|
||||
const int is_boundary = SCULPT_vertex_is_boundary(ss, vd->vertex, bset);
|
||||
|
||||
float boundary_tan_a[3];
|
||||
float boundary_tan_b[3];
|
||||
bool have_boundary_tan_a = false;
|
||||
|
||||
SculptVertexNeighborIter ni;
|
||||
SCULPT_VERTEX_NEIGHBORS_ITER_BEGIN (ss, vd->vertex, ni) {
|
||||
neighbor_count++;
|
||||
if (!filter_boundary_face_sets ||
|
||||
(filter_boundary_face_sets && !SCULPT_vertex_has_unique_face_set(ss, ni.vertex))) {
|
||||
|
||||
/* When the vertex to relax is boundary, use only connected boundary vertices for the
|
||||
* average position. */
|
||||
if (is_boundary) {
|
||||
if (!SCULPT_vertex_is_boundary(ss, ni.vertex, bset)) {
|
||||
continue;
|
||||
}
|
||||
add_v3_v3(smooth_pos, SCULPT_vertex_co_get(ss, ni.vertex));
|
||||
avg_count++;
|
||||
/* When the vertex to relax is boundary, use only connected boundary vertices for the
|
||||
* average position. */
|
||||
if (is_boundary) {
|
||||
if (!SCULPT_vertex_is_boundary(ss, ni.vertex, bset)) {
|
||||
continue;
|
||||
}
|
||||
add_v3_v3(smooth_pos, SCULPT_vertex_co_get(ss, ni.vertex));
|
||||
avg_count++;
|
||||
|
||||
/* Calculate a normal for the constraint plane using the edges of the boundary. */
|
||||
float to_neighbor[3];
|
||||
sub_v3_v3v3(to_neighbor, SCULPT_vertex_co_get(ss, ni.vertex), vd->co);
|
||||
normalize_v3(to_neighbor);
|
||||
add_v3_v3(boundary_normal, to_neighbor);
|
||||
/* Calculate a normal for the constraint plane using the edges of the boundary. */
|
||||
float to_neighbor[3];
|
||||
sub_v3_v3v3(to_neighbor, SCULPT_vertex_co_get(ss, ni.vertex), vd->co);
|
||||
normalize_v3(to_neighbor);
|
||||
|
||||
if (!have_boundary_tan_a) {
|
||||
copy_v3_v3(boundary_tan_a, to_neighbor);
|
||||
have_boundary_tan_a = true;
|
||||
}
|
||||
else {
|
||||
add_v3_v3(smooth_pos, SCULPT_vertex_co_get(ss, ni.vertex));
|
||||
avg_count++;
|
||||
copy_v3_v3(boundary_tan_b, to_neighbor);
|
||||
}
|
||||
}
|
||||
else {
|
||||
add_v3_v3(smooth_pos, SCULPT_vertex_co_get(ss, ni.vertex));
|
||||
avg_count++;
|
||||
}
|
||||
}
|
||||
SCULPT_VERTEX_NEIGHBORS_ITER_END(ni);
|
||||
|
||||
/* Don't modify corner vertices. */
|
||||
if (neighbor_count <= 2) {
|
||||
copy_v3_v3(r_final_pos, vd->co);
|
||||
return;
|
||||
}
|
||||
|
||||
if (avg_count > 0) {
|
||||
mul_v3_fl(smooth_pos, 1.0f / avg_count);
|
||||
}
|
||||
|
@ -5739,8 +5712,9 @@ void SCULPT_relax_vertex(SculptSession *ss,
|
|||
float smooth_closest_plane[3];
|
||||
float vno[3];
|
||||
|
||||
if ((is_boundary & SCULPT_BOUNDARY_MESH) && avg_count == 2) {
|
||||
normalize_v3_v3(vno, boundary_normal);
|
||||
if ((is_boundary) && avg_count == 2 && fabsf(dot_v3v3(boundary_tan_a, boundary_tan_b)) < 0.99f) {
|
||||
cross_v3_v3v3(vno, boundary_tan_a, boundary_tan_b);
|
||||
normalize_v3(vno);
|
||||
}
|
||||
else {
|
||||
SCULPT_vertex_normal_get(ss, vd->vertex, vno);
|
||||
|
@ -5796,7 +5770,8 @@ static void do_topology_relax_task_cb_ex(void *__restrict userdata,
|
|||
vd.vertex,
|
||||
thread_id);
|
||||
|
||||
SCULPT_relax_vertex(ss, &vd, fade * bstrength, false, vd.co);
|
||||
SCULPT_relax_vertex(ss, &vd, fade * bstrength, SCULPT_BOUNDARY_DEFAULT, vd.co);
|
||||
|
||||
if (vd.mvert) {
|
||||
vd.mvert->flag |= ME_VERT_PBVH_UPDATE;
|
||||
}
|
||||
|
@ -7925,7 +7900,8 @@ static void do_twist_brush_post_smooth_task_cb_ex(void *__restrict userdata,
|
|||
*/
|
||||
|
||||
float final_co[3];
|
||||
SCULPT_relax_vertex(ss, &vd, clamp_f(smooth_fade, 0.0f, 1.0f), false, final_co);
|
||||
SCULPT_relax_vertex(
|
||||
ss, &vd, clamp_f(smooth_fade, 0.0f, 1.0f), SCULPT_BOUNDARY_DEFAULT, final_co);
|
||||
|
||||
sub_v3_v3v3(disp, final_co, vd.co);
|
||||
add_v3_v3(vd.co, disp);
|
||||
|
@ -9408,10 +9384,13 @@ static void SCULPT_run_command_list(
|
|||
* zero radius, thus we have no pbvh nodes on the first brush step. */
|
||||
if (totnode ||
|
||||
((brush->falloff_shape == PAINT_FALLOFF_SHAPE_SPHERE) && (brush->flag & BRUSH_ANCHORED))) {
|
||||
if (SCULPT_stroke_is_first_brush_step(ss->cache)) {
|
||||
if (SCULPT_is_automasking_enabled(sd, ss, brush)) {
|
||||
if (SCULPT_is_automasking_enabled(sd, ss, brush)) {
|
||||
if (SCULPT_stroke_is_first_brush_step(ss->cache)) {
|
||||
ss->cache->automasking = SCULPT_automasking_cache_init(sd, brush, ob);
|
||||
}
|
||||
else {
|
||||
SCULPT_automasking_step_update(ss->cache->automasking, ss, sd, brush);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -119,11 +119,9 @@ static float sculpt_concavity_factor(AutomaskingCache *automasking, float fac)
|
|||
return fac;
|
||||
}
|
||||
|
||||
static int automasking_get_propegation(SculptSession *ss)
|
||||
static int automasking_get_propegation(SculptSession *ss, Sculpt *sd, const Brush *brush)
|
||||
{
|
||||
return BKE_brush_channelset_get_int(ss->cache->channels_final,
|
||||
"automasking_boundary_edges_propagation_steps",
|
||||
&ss->cache->input_mapping);
|
||||
return SCULPT_get_int(ss, automasking_boundary_edges_propagation_steps, sd, brush);
|
||||
}
|
||||
|
||||
static bool SCULPT_automasking_needs_factors_cache(SculptSession *ss,
|
||||
|
@ -136,10 +134,10 @@ static bool SCULPT_automasking_needs_factors_cache(SculptSession *ss,
|
|||
return true;
|
||||
}
|
||||
if (automasking_flags & BRUSH_AUTOMASKING_BOUNDARY_EDGES) {
|
||||
return brush && automasking_get_propegation(ss) != 1;
|
||||
return brush && automasking_get_propegation(ss, sd, brush) != 1;
|
||||
}
|
||||
if (automasking_flags & BRUSH_AUTOMASKING_BOUNDARY_FACE_SETS) {
|
||||
return brush && automasking_get_propegation(ss) != 1;
|
||||
return brush && automasking_get_propegation(ss, sd, brush) != 1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -155,8 +153,7 @@ float SCULPT_automasking_factor_get(AutomaskingCache *automasking,
|
|||
return mask;
|
||||
}
|
||||
|
||||
float concave_factor = BKE_brush_channelset_get_float(
|
||||
ss->cache->channels_final, "concave_mask_factor", &ss->cache->input_mapping);
|
||||
float concave_factor = automasking->settings.concave_factor;
|
||||
|
||||
do_concave = ss->cache && concave_factor > 0.0f &&
|
||||
(automasking->settings.flags & BRUSH_AUTOMASKING_CONCAVITY);
|
||||
|
@ -396,8 +393,15 @@ static void SCULPT_automasking_cache_settings_update(AutomaskingCache *automaski
|
|||
automasking->settings.flags = sculpt_automasking_mode_effective_bits(ss, sd, brush);
|
||||
|
||||
automasking->settings.initial_face_set = SCULPT_active_face_set_get(ss);
|
||||
automasking->settings.concave_factor = BKE_brush_channelset_get_float(
|
||||
ss->cache->channels_final, "concave_mask_factor", &ss->cache->input_mapping);
|
||||
automasking->settings.concave_factor = SCULPT_get_float(ss, concave_mask_factor, sd, brush);
|
||||
}
|
||||
|
||||
void SCULPT_automasking_step_update(AutomaskingCache *automasking,
|
||||
SculptSession *ss,
|
||||
Sculpt *sd,
|
||||
const Brush *brush)
|
||||
{
|
||||
automasking->settings.concave_factor = SCULPT_get_float(ss, concave_mask_factor, sd, brush);
|
||||
}
|
||||
|
||||
float SCULPT_calc_concavity(SculptSession *ss, SculptVertRef vref)
|
||||
|
@ -500,7 +504,7 @@ AutomaskingCache *SCULPT_automasking_cache_init(Sculpt *sd, const Brush *brush,
|
|||
*f = 1.0f;
|
||||
}
|
||||
|
||||
const int boundary_propagation_steps = automasking_get_propegation(ss);
|
||||
const int boundary_propagation_steps = automasking_get_propegation(ss, sd, brush);
|
||||
|
||||
if (SCULPT_is_automasking_mode_enabled(ss, sd, brush, BRUSH_AUTOMASKING_TOPOLOGY)) {
|
||||
SCULPT_vertex_random_access_ensure(ss);
|
||||
|
|
|
@ -394,6 +394,10 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
|
|||
MVert *v = &ss->mvert[ml->v];
|
||||
float fno[3];
|
||||
|
||||
MDynTopoVert *mv = ss->mdyntopo_verts + i;
|
||||
|
||||
mv->flag |= DYNVERT_NEED_BOUNDARY;
|
||||
|
||||
normal_short_to_float_v3(fno, v->no);
|
||||
float mask = ss->vmask ? ss->vmask[ml->v] : 0.0f;
|
||||
|
||||
|
@ -560,17 +564,20 @@ static void do_relax_face_sets_brush_task_cb_ex(void *__restrict userdata,
|
|||
continue;
|
||||
}
|
||||
|
||||
const float fade = bstrength * SCULPT_brush_strength_factor(ss,
|
||||
brush,
|
||||
vd.co,
|
||||
sqrtf(test.dist),
|
||||
vd.no,
|
||||
vd.fno,
|
||||
vd.mask ? *vd.mask : 0.0f,
|
||||
vd.vertex,
|
||||
thread_id);
|
||||
float fade = bstrength * SCULPT_brush_strength_factor(ss,
|
||||
brush,
|
||||
vd.co,
|
||||
sqrtf(test.dist),
|
||||
vd.no,
|
||||
vd.fno,
|
||||
vd.mask ? *vd.mask : 0.0f,
|
||||
vd.vertex,
|
||||
thread_id);
|
||||
|
||||
SCULPT_relax_vertex(ss, &vd, fade * bstrength, relax_face_sets, vd.co);
|
||||
CLAMP(fade, 0.0f, 1.0f);
|
||||
|
||||
SCULPT_relax_vertex(
|
||||
ss, &vd, fade * bstrength, SCULPT_BOUNDARY_DEFAULT | SCULPT_BOUNDARY_FACE_SET, vd.co);
|
||||
if (vd.mvert) {
|
||||
vd.mvert->flag |= ME_VERT_PBVH_UPDATE;
|
||||
}
|
||||
|
|
|
@ -429,12 +429,16 @@ static void mesh_filter_task_cb(void *__restrict userdata,
|
|||
break;
|
||||
}
|
||||
case MESH_FILTER_RELAX: {
|
||||
SCULPT_relax_vertex(ss, &vd, clamp_f(fade, 0.0f, 1.0f), false, val);
|
||||
SCULPT_relax_vertex(ss, &vd, clamp_f(fade, 0.0f, 1.0f), SCULPT_BOUNDARY_DEFAULT, val);
|
||||
sub_v3_v3v3(disp, val, vd.co);
|
||||
break;
|
||||
}
|
||||
case MESH_FILTER_RELAX_FACE_SETS: {
|
||||
SCULPT_relax_vertex(ss, &vd, clamp_f(fade, 0.0f, 1.0f), relax_face_sets, val);
|
||||
SCULPT_relax_vertex(ss,
|
||||
&vd,
|
||||
clamp_f(fade, 0.0f, 1.0f),
|
||||
SCULPT_BOUNDARY_DEFAULT | SCULPT_BOUNDARY_FACE_SET,
|
||||
val);
|
||||
sub_v3_v3v3(disp, val, vd.co);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -301,7 +301,8 @@ typedef enum SculptBoundaryType {
|
|||
SCULPT_BOUNDARY_FACE_SET = 1 << 1,
|
||||
SCULPT_BOUNDARY_SEAM = 1 << 2,
|
||||
SCULPT_BOUNDARY_SHARP = 1 << 3,
|
||||
SCULPT_BOUNDARY_ALL = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)
|
||||
SCULPT_BOUNDARY_ALL = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3),
|
||||
SCULPT_BOUNDARY_DEFAULT = (1 << 0) | (1 << 3) // mesh and sharp
|
||||
} SculptBoundaryType;
|
||||
|
||||
/* Boundary Info needs to be initialized in order to use this function. */
|
||||
|
@ -525,6 +526,10 @@ bool SCULPT_is_automasking_mode_enabled(const SculptSession *ss,
|
|||
const Brush *br,
|
||||
const eAutomasking_flag mode);
|
||||
bool SCULPT_is_automasking_enabled(Sculpt *sd, const SculptSession *ss, const Brush *br);
|
||||
void SCULPT_automasking_step_update(struct AutomaskingCache *automasking,
|
||||
SculptSession *ss,
|
||||
Sculpt *sd,
|
||||
const Brush *brush);
|
||||
|
||||
typedef enum eBoundaryAutomaskMode {
|
||||
AUTOMASK_INIT_BOUNDARY_EDGES = 1,
|
||||
|
@ -819,7 +824,7 @@ void SCULPT_do_uniform_weights_smooth_brush(Sculpt *sd, Object *ob, PBVHNode **n
|
|||
void SCULPT_relax_vertex(struct SculptSession *ss,
|
||||
struct PBVHVertexIter *vd,
|
||||
float factor,
|
||||
bool filter_boundary_face_sets,
|
||||
SculptBoundaryType boundary_mask,
|
||||
float *r_final_pos);
|
||||
|
||||
/* Symmetrize Map. */
|
||||
|
@ -1176,7 +1181,6 @@ typedef struct AutomaskingCache {
|
|||
* initialized in #SCULPT_automasking_cache_init when needed. */
|
||||
// float *factor;
|
||||
SculptCustomLayer *factorlayer;
|
||||
float concave_mask_factor;
|
||||
} AutomaskingCache;
|
||||
|
||||
typedef struct StrokeCache {
|
||||
|
|
Loading…
Reference in New Issue