Sculpt: fix set limit surface crashing

Also tweaked smallhash hash function.
This commit is contained in:
Joseph Eagar 2021-10-04 10:24:48 -07:00
parent 23db569df7
commit ea19c3a4b8
8 changed files with 63 additions and 68 deletions

View File

@ -1531,6 +1531,14 @@ void BKE_builtin_apply_hard_edge_mode(BrushChannelSet *chset, bool do_apply)
// make sure preserve faceset boundaries is on
ch = BRUSHSET_LOOKUP(chset, preserve_faceset_boundary);
if (ch) {
ch->flag &= ~BRUSH_CHANNEL_INHERIT;
ch->ivalue = 1;
}
//turn off dyntopo surface smoothing
ch = BRUSHSET_LOOKUP(chset, dyntopo_disable_smooth);
if (ch) {
ch->flag &= ~BRUSH_CHANNEL_INHERIT;
ch->ivalue = 1;

View File

@ -45,6 +45,7 @@ typedef struct SmallHash {
SmallHashEntry *buckets;
SmallHashEntry buckets_stack[SMSTACKSIZE];
bool use_pointer_hash;
bool using_stack;
} SmallHash;

View File

@ -58,16 +58,6 @@
#include "BLI_asan.h"
#include "BLI_strict_flags.h"
#ifdef BLI_asan_poison
# undef BLI_asan_poison
#endif
#ifdef BLI_asan_unpoison
# undef BLI_asan_unpoison
#endif
#define BLI_asan_poison(a, b)
#define BLI_asan_unpoison(a, b)
/* NOTE: copied from BLO_blend_defs.h, don't use here because we're in BLI. */
#ifdef __BIG_ENDIAN__
/* Big Endian */
@ -123,19 +113,27 @@ int BLI_smallhash_memuse(SmallHash *sh)
return (int)sh->nbuckets * sizeof(SmallHashEntry) + (int)sizeof(SmallHash);
}
#if 0
BLI_INLINE uintptr_t smallhash_key(const uintptr_t key)
{
#if 1
# if 1
return key;
#else
# else
uintptr_t y = (size_t)key;
/* bottom 3 or 4 bits are likely to be 0; rotate y by 4 to avoid
* excessive hash collisions for dicts and sets */
return (uintptr_t)(y >> 4) | ((uintptr_t)y << (sizeof(uintptr_t[8]) - 4));
#endif
# endif
}
#endif
#ifdef _keyrot
# undef _keyrot
#endif
#define _keyrot(y) ((uintptr_t)(y) >> 4) | ((uintptr_t)(y) << (sizeof(uintptr_t[8]) - 4))
#define smallhash_key(key) sh->use_pointer_hash ? _keyrot(key) : (key)
/**
* Check if the number of items in the smallhash is large enough to require more buckets.
*/
@ -155,14 +153,10 @@ BLI_INLINE void smallhash_init_empty(SmallHash *sh)
{
uint i;
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
for (i = 0; i < sh->nbuckets; i++) {
sh->buckets[i].key = SMHASH_KEY_UNUSED;
sh->buckets[i].val = SMHASH_CELL_FREE;
}
BLI_asan_poison(&sh->buckets, sizeof(void *));
}
/**
@ -186,8 +180,6 @@ BLI_INLINE SmallHashEntry *smallhash_lookup(SmallHash *sh, const uintptr_t key)
BLI_assert(key != SMHASH_KEY_UNUSED);
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
/* NOTE: there are always more buckets than entries,
* so we know there will always be a free bucket if the key isn't found. */
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
@ -199,8 +191,6 @@ BLI_INLINE SmallHashEntry *smallhash_lookup(SmallHash *sh, const uintptr_t key)
}
}
BLI_asan_poison(&sh->buckets, sizeof(void *));
return NULL;
}
@ -212,15 +202,11 @@ BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uint
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
for (e = &sh->buckets[h % sh->nbuckets]; smallhash_val_is_used(e->val);
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
/* pass */
}
BLI_asan_poison(&sh->buckets, sizeof(void *));
return e;
}
@ -228,8 +214,6 @@ BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
{
check_stack_move(sh);
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
SmallHashEntry *buckets_old = sh->buckets;
const uint nbuckets_old = sh->nbuckets;
const bool was_alloc = (buckets_old != sh->buckets_stack);
@ -252,7 +236,6 @@ BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
sh->nfreecells = nbuckets;
sh->nentries = 0;
BLI_asan_poison(&sh->buckets, sizeof(void *));
smallhash_init_empty(sh);
for (i = 0; i < nbuckets_old; i++) {
@ -284,15 +267,11 @@ void BLI_smallhash_init_ex(SmallHash *sh, const uint nentries_reserve)
sh->buckets = sh->buckets_stack;
BLI_asan_poison(&sh->buckets, sizeof(void *));
if (nentries_reserve) {
smallhash_buckets_reserve(sh, nentries_reserve);
if (sh->nbuckets > SMSTACKSIZE) {
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
sh->buckets = MEM_mallocN(sizeof(*sh->buckets) * sh->nbuckets, __func__);
BLI_asan_poison(&sh->buckets, sizeof(void *));
sh->using_stack = false;
}
@ -311,8 +290,6 @@ void BLI_smallhash_release(SmallHash *sh)
{
check_stack_move(sh);
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
if (sh->buckets && sh->buckets != sh->buckets_stack) {
MEM_freeN(sh->buckets);
}
@ -332,8 +309,6 @@ bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item)
BLI_assert(key != SMHASH_KEY_UNUSED);
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
/* NOTE: there are always more buckets than entries,
* so we know there will always be a free bucket if the key isn't found. */
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
@ -345,8 +320,6 @@ bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item)
}
}
BLI_asan_poison(&sh->buckets, sizeof(void *));
bool ret;
if (e->val == SMHASH_CELL_FREE || e->val == SMHASH_CELL_UNUSED) {
@ -484,8 +457,6 @@ void BLI_smallhash_clear(SmallHash *sh, uintptr_t key)
{
check_stack_move(sh);
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
SmallHashEntry *e = sh->buckets;
for (uint i = 0; i < sh->nbuckets; i++, e++) {
@ -494,8 +465,6 @@ void BLI_smallhash_clear(SmallHash *sh, uintptr_t key)
}
sh->nentries = 0;
BLI_asan_poison(&sh->buckets, sizeof(void *));
}
bool BLI_smallhash_haskey(SmallHash *sh, uintptr_t key)
@ -512,8 +481,6 @@ int BLI_smallhash_len(SmallHash *sh)
BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
BLI_asan_unpoison(&iter->sh->buckets, sizeof(void *));
while (iter->i < iter->sh->nbuckets) {
if (smallhash_val_is_used(iter->sh->buckets[iter->i].val)) {
if (key) {
@ -526,7 +493,6 @@ BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *ke
iter->i++;
}
BLI_asan_poison(&iter->sh->buckets, sizeof(void *));
return NULL;
}

View File

@ -13729,9 +13729,13 @@ static int sculpt_set_limit_surface_exec(bContext *C, wmOperator *UNUSED(op))
SCULPT_vertex_random_access_ensure(ss);
if (ss->limit_surface) {
SCULPT_temp_customlayer_release(ss, ss->limit_surface);
}
MEM_SAFE_FREE(ss->limit_surface);
ss->limit_surface = MEM_callocN(sizeof(*ss->limit_surface), "ss->limit_surface");
ss->limit_surface = MEM_callocN(sizeof(SculptCustomLayer), "ss->limit_surface");
SculptLayerParams params = {.permanent = false, .simple_array = false};
SCULPT_temp_customlayer_ensure(
@ -13740,12 +13744,12 @@ static int sculpt_set_limit_surface_exec(bContext *C, wmOperator *UNUSED(op))
ss, ATTR_DOMAIN_POINT, CD_PROP_FLOAT3, "_sculpt_limit_surface", ss->limit_surface, &params);
const int totvert = SCULPT_vertex_count_get(ss);
const bool weighted = false;
for (int i = 0; i < totvert; i++) {
SculptVertRef vertex = BKE_pbvh_table_index_to_vertex(ss->pbvh, i);
float *f = SCULPT_temp_cdata_get(vertex, ss->limit_surface);
SCULPT_neighbor_coords_average(ss, f, vertex, 0.0, true);
SCULPT_neighbor_coords_average(ss, f, vertex, 0.0, true, weighted);
}
return OPERATOR_FINISHED;

View File

@ -681,6 +681,7 @@ static void do_array_smooth_task_cb_ex(void *__restrict userdata,
bool any_modified = false;
bool check_fsets = ss->cache->brush->flag2 & BRUSH_SMOOTH_PRESERVE_FACE_SETS;
const bool weighted = (ss->cache->brush->flag2 & BRUSH_SMOOTH_USE_AREA_WEIGHT);
PBVHVertexIter vd;
BKE_pbvh_vertex_iter_begin (ss->pbvh, data->nodes[n], vd, PBVH_ITER_UNIQUE) {
@ -696,7 +697,7 @@ static void do_array_smooth_task_cb_ex(void *__restrict userdata,
float smooth_co[3];
SCULPT_neighbor_coords_average(
ss, smooth_co, vd.vertex, ss->cache->brush->autosmooth_projection, check_fsets);
ss, smooth_co, vd.vertex, ss->cache->brush->autosmooth_projection, check_fsets, weighted);
float disp[3];
sub_v3_v3v3(disp, smooth_co, vd.co);
mul_v3_fl(disp, fade);

View File

@ -331,6 +331,7 @@ static void mesh_filter_task_cb(void *__restrict userdata,
/* This produces better results as the relax operation is no completely focused on the
* boundaries. */
const bool relax_face_sets = !(ss->filter_cache->iteration_count % 3 == 0);
const bool weighted = false;
PBVHVertexIter vd;
BKE_pbvh_vertex_iter_begin (ss->pbvh, node, vd, PBVH_ITER_UNIQUE) {
@ -453,6 +454,7 @@ static void mesh_filter_task_cb(void *__restrict userdata,
orig_data.co,
ss->filter_cache->surface_smooth_shape_preservation,
0.0f,
false,
false);
break;
}
@ -478,7 +480,7 @@ static void mesh_filter_task_cb(void *__restrict userdata,
float disp_avg[3];
float avg_co[3];
SCULPT_neighbor_coords_average(ss, avg_co, vd.vertex, 0.0f, false);
SCULPT_neighbor_coords_average(ss, avg_co, vd.vertex, 0.0f, false, weighted);
sub_v3_v3v3(disp_avg, avg_co, vd.co);
mul_v3_v3fl(
disp_avg, disp_avg, smooth_ratio * pow2f(ss->filter_cache->sharpen_factor[vd.index]));
@ -537,13 +539,15 @@ static void mesh_filter_enhance_details_init_directions(SculptSession *ss)
const int totvert = SCULPT_vertex_count_get(ss);
FilterCache *filter_cache = ss->filter_cache;
bool weighted = false;
filter_cache->detail_directions = MEM_malloc_arrayN(
totvert, sizeof(float[3]), "detail directions");
for (int i = 0; i < totvert; i++) {
SculptVertRef vertex = BKE_pbvh_table_index_to_vertex(ss->pbvh, i);
float avg[3];
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false);
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false, weighted);
sub_v3_v3v3(filter_cache->detail_directions[i], avg, SCULPT_vertex_co_get(ss, vertex));
}
}
@ -624,11 +628,13 @@ static void mesh_filter_sharpen_init(SculptSession *ss,
filter_cache->detail_directions = MEM_malloc_arrayN(
totvert, sizeof(float[3]), "sharpen detail direction");
const bool weighted = false;
for (int i = 0; i < totvert; i++) {
float avg[3];
SculptVertRef vertex = BKE_pbvh_table_index_to_vertex(ss->pbvh, i);
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false);
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false, weighted);
sub_v3_v3v3(filter_cache->detail_directions[i], avg, SCULPT_vertex_co_get(ss, vertex));
filter_cache->sharpen_factor[i] = len_v3(filter_cache->detail_directions[i]);
}

View File

@ -744,8 +744,12 @@ void SCULPT_bmesh_four_neighbor_average(SculptSession *ss,
bool do_origco);
/* Smoothing api */
void SCULPT_neighbor_coords_average(
SculptSession *ss, float result[3], SculptVertRef index, float projection, bool check_fsets);
void SCULPT_neighbor_coords_average(SculptSession *ss,
float result[3],
SculptVertRef index,
float projection,
bool check_fsets,
bool weighted);
float SCULPT_neighbor_mask_average(SculptSession *ss, SculptVertRef index);
void SCULPT_neighbor_color_average(SculptSession *ss, float result[4], SculptVertRef index);
@ -792,7 +796,8 @@ void SCULPT_surface_smooth_laplacian_step(SculptSession *ss,
const float origco[3],
const float alpha,
const float projection,
bool check_fsets);
bool check_fsets,
bool weighted);
void SCULPT_surface_smooth_displace_step(SculptSession *ss,
float *co,

View File

@ -622,10 +622,8 @@ void SCULPT_bmesh_four_neighbor_average(SculptSession *ss,
}
}
static void sculpt_neighbor_coords_average_fset(SculptSession *ss,
float result[3],
SculptVertRef vertex,
float projection)
static void sculpt_neighbor_coords_average_fset(
SculptSession *ss, float result[3], SculptVertRef vertex, float projection, bool weighted)
{
float avg[3] = {0.0f, 0.0f, 0.0f};
float *co, no[3];
@ -638,7 +636,6 @@ static void sculpt_neighbor_coords_average_fset(SculptSession *ss,
SCULPT_vertex_normal_get(ss, vertex, no);
}
const bool weighted = (ss->cache->brush->flag2 & BRUSH_SMOOTH_USE_AREA_WEIGHT) && !boundary;
float *areas;
if (weighted) {
@ -694,11 +691,15 @@ static void sculpt_neighbor_coords_average_fset(SculptSession *ss,
/* Generic functions for laplacian smoothing. These functions do not take boundary vertices into
* account. */
void SCULPT_neighbor_coords_average(
SculptSession *ss, float result[3], SculptVertRef vertex, float projection, bool check_fsets)
void SCULPT_neighbor_coords_average(SculptSession *ss,
float result[3],
SculptVertRef vertex,
float projection,
bool check_fsets,
bool weighted)
{
if (check_fsets) {
sculpt_neighbor_coords_average_fset(ss, result, vertex, projection);
sculpt_neighbor_coords_average_fset(ss, result, vertex, projection, weighted);
return;
}
@ -711,7 +712,6 @@ void SCULPT_neighbor_coords_average(
SCULPT_vertex_normal_get(ss, vertex, no);
}
const bool weighted = ss->cache ? ss->cache->brush->flag2 & BRUSH_SMOOTH_USE_AREA_WEIGHT : false;
float *areas;
if (weighted) {
@ -856,6 +856,7 @@ static void SCULPT_enhance_details_brush(Sculpt *sd,
SculptCustomLayer scl;
SculptLayerParams params = {.permanent = false, .simple_array = false};
bool weighted = SCULPT_get_int(ss, use_weighted_smooth, sd, brush);
SCULPT_temp_customlayer_ensure(
ss, ATTR_DOMAIN_POINT, CD_PROP_FLOAT3, "__dyntopo_detail_dir", &params);
@ -878,7 +879,7 @@ static void SCULPT_enhance_details_brush(Sculpt *sd,
SculptVertRef vertex = BKE_pbvh_table_index_to_vertex(ss->pbvh, i);
float *dir = SCULPT_temp_cdata_get(vertex, &scl);
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false);
SCULPT_neighbor_coords_average(ss, avg, vertex, 0.0f, false, weighted);
sub_v3_v3v3(dir, avg, SCULPT_vertex_co_get(ss, vertex));
}
}
@ -1220,11 +1221,13 @@ void SCULPT_surface_smooth_laplacian_step(SculptSession *ss,
const float origco[3],
const float alpha,
const float projection,
bool check_fsets)
bool check_fsets,
bool weighted)
{
float laplacian_smooth_co[3];
float weigthed_o[3], weigthed_q[3], d[3];
SCULPT_neighbor_coords_average(ss, laplacian_smooth_co, v_index, projection, check_fsets);
SCULPT_neighbor_coords_average(
ss, laplacian_smooth_co, v_index, projection, check_fsets, weighted);
// int index = BKE_pbvh_vertex_index_to_table(ss->pbvh, v_index);
@ -1316,7 +1319,8 @@ static void SCULPT_do_surface_smooth_brush_laplacian_task_cb_ex(
orig_data.co,
alpha,
data->smooth_projection,
check_fsets);
check_fsets,
weighted);
madd_v3_v3fl(vd.co, disp, clamp_f(fade, 0.0f, 1.0f));
if (vd.mvert) {
vd.mvert->flag |= ME_VERT_PBVH_UPDATE;