Sculpt dyntopo:

* BLI_table_gset now internally uses a SmallHash instead of
  a GHash.  Profiling revealed this to be quite a bit
  faster.
* This is something of a stopgap until C++-afication of
  pbvh, when we'll have our pick of a bunch of
  really nice C++ hashmap libs.
* pbvh_collapse_edge bites the dust; dyntopo now uses
  BM_collapse_edge.  Of the three topology operations
  (subdivide edge, collapse edge, dissolve 3/4 valence
  vertex) only dissolve still has a dyntopo-specific
  implementation.
* Fixed a bunch of annoying memory corruption bugs.
* Non-manifold triangles are now detected in more
  places.

SmallHash changes:

* Enabled removal
* Fixed infinite loop bug caused by
  improperly counting free*d* cells
  versus free cells.
* Added a BLI_smallhash_ensure_p method
  that works just like the GHash version.

Sculpt replay system

* Roughed out a simple system to record and
  play back sculpt strokes using a simple
  text format.
* This is exclusively for
  performance profiling and unit tests.
* For each brush stroke the system saves a copy
  of the active StrokeCache and PaintStroke
  and parts of Sculpt.

This should make profiling DRAM thrashing a lot
easier.
This commit is contained in:
Joseph Eagar 2021-09-07 23:49:54 -08:00
parent 37bce7b701
commit 3e6edf5278
19 changed files with 2440 additions and 387 deletions

View File

@ -29,7 +29,7 @@
#include "BKE_ccg.h"
#include <stdint.h>
#define DEFRAGMENT_MEMORY
//#define DEFRAGMENT_MEMORY
#ifdef __cplusplus
extern "C" {

File diff suppressed because it is too large Load Diff

View File

@ -3996,6 +3996,11 @@ void BKE_pbvh_get_vert_face_areas(PBVH *pbvh, SculptVertRef vertex, float *r_are
w += BM_ELEM_CD_GET_FLOAT(e->l->radial_next->f, cd_face_area) * 0.5f;
}
if (j >= valence) {
printf("%s: error, corrupt edge cycle\n", __func__);
break;
}
r_areas[j++] = w;
e = v == e->v1 ? e->v1_disk_link.next : e->v2_disk_link.next;

View File

@ -59,6 +59,8 @@ Topology rake:
#include "BKE_DerivedMesh.h"
#include "BKE_ccg.h"
#include "BKE_context.h"
#include "BKE_global.h"
#include "BKE_pbvh.h"
#include "GPU_buffers.h"
@ -1402,26 +1404,24 @@ void bke_pbvh_update_vert_boundary(int cd_dyn_vert,
if (!e) {
mv->flag |= DYNVERT_BOUNDARY;
mv->valence = 0;
return;
}
int fset1 = 0;
int fset2 = 0;
int fset3 = 0;
BMVert *vfset1 = NULL;
BMVert *vfset2 = NULL;
int fset2_count = 0;
int fset1_count = 0;
bool first = true;
int val = 0;
FSetTemp *fsets = NULL;
BLI_array_staticdeclare(fsets, 32);
int sharpcount = 0;
int seamcount = 0;
int fsetcount = 0;
#if 0
struct FaceSetRef {
int fset;
BMVert *v2;
BMEdge *e;
} *fsets = NULL;
#endif
int *fsets = NULL;
BLI_array_staticdeclare(fsets, 16);
do {
BMVert *v2 = v == e->v1 ? e->v2 : e->v1;
@ -1452,73 +1452,38 @@ void bke_pbvh_update_vert_boundary(int cd_dyn_vert,
mv->flag |= DYNVERT_NEED_TRIANGULATE;
}
if (!fset1) {
fset1 = fset;
vfset1 = v2;
}
else if (!fset2 && fset != fset1) {
fset2 = fset;
}
else if (!fset3 && fset != fset1 && fset != fset2) {
fset3 = fset;
bool ok = true;
for (int i = 0; i < BLI_array_len(fsets); i++) {
if (fsets[i] == fset) {
ok = false;
}
}
if (!vfset2 && fset == fset2 && v2 != vfset1) {
vfset2 = v2;
if (ok) {
BLI_array_append(fsets, fset);
}
if (fset == fset1) {
fset1_count++;
}
if (fset == fset2) {
fset2_count++;
}
first = false;
bool bound = (e->l == e->l->radial_next) ||
(abs(BM_ELEM_CD_GET_INT(e->l->f, cd_faceset_offset)) !=
abs(BM_ELEM_CD_GET_INT(e->l->radial_next->f, cd_faceset_offset)));
FSetTemp fs = {.fset = fset, .v = v2, .boundary = bound};
BLI_array_append(fsets, fs);
// also check e->l->radial_next, in case we are not manifold
// which can mess up the loop order
if (e->l->radial_next != e->l) {
// fset = abs(BM_ELEM_CD_GET_INT(e->l->radial_next->f, cd_faceset_offset));
fset = BKE_pbvh_do_fset_symmetry(
int fset2 = BKE_pbvh_do_fset_symmetry(
BM_ELEM_CD_GET_INT(e->l->radial_next->f, cd_faceset_offset), bound_symmetry, v2->co);
bool ok2 = true;
for (int i = 0; i < BLI_array_len(fsets); i++) {
if (fsets[i] == fset2) {
ok2 = false;
}
}
if (ok2) {
BLI_array_append(fsets, fset2);
}
if (e->l->radial_next->f->len > 3) {
mv->flag |= DYNVERT_NEED_TRIANGULATE;
}
if (!fset1) {
fset1 = fset;
}
else if (!fset2 && fset != fset1) {
fset2 = fset;
}
else if (!fset3 && fset != fset1 && fset != fset2) {
fset3 = fset;
}
if (fset == fset1) {
fset1_count++;
}
if (fset == fset2) {
fset2_count++;
}
if (!vfset2 && fset == fset2 && v2 != vfset1) {
vfset2 = v2;
}
FSetTemp fs = {.fset = fset, .v = v2, .boundary = bound};
BLI_array_append(fsets, fs);
}
}
@ -1527,60 +1492,16 @@ void bke_pbvh_update_vert_boundary(int cd_dyn_vert,
}
val++;
e = e->v1 == v ? e->v1_disk_link.next : e->v2_disk_link.next;
} while (e != v->e);
} while ((e = BM_DISK_EDGE_NEXT(e, v)) != v->e);
#if 0
if (fset2 && !fset3) {
int n = MIN2(fset1_count, fset2_count);
float maxth = 0;
float maxth2 = 0;
// find widest angle
for (int i = 0; n < 7 && i < BLI_array_len(fsets); i++) {
if (!fsets[i].boundary) {
continue;
}
for (int j = 0; j < BLI_array_len(fsets); j++) {
if (!fsets[j].boundary) {
continue;
}
if (i != j && fsets[j].fset == fset1 && fsets[i].v != fsets[j].v) {
float th = sculpt_corner_angle(v->co, fsets[i].v->co, fsets[j].v->co);
if (th > maxth) {
maxth = th;
}
}
if (i != j && fsets[j].fset == fset2 && fsets[i].v != fsets[j].v) {
float th = sculpt_corner_angle(v->co, fsets[i].v->co, fsets[j].v->co);
if (th > maxth2) {
maxth2 = th;
}
}
}
}
bool ok = maxth > 0.25 && maxth < M_PI * 0.55;
ok = ok || (maxth2 > 0.25 && maxth2 < M_PI * 0.55);
// 45 degrees
if (ok) {
mv->flag |= DYNVERT_FSET_CORNER;
}
}
else
#endif
if (fset2 && fset3) {
mv->flag |= DYNVERT_FSET_CORNER;
}
if (fset1 && fset2) {
if (BLI_array_len(fsets) > 1) {
mv->flag |= DYNVERT_FSET_BOUNDARY;
}
if (BLI_array_len(fsets) > 2) {
mv->flag |= DYNVERT_FSET_CORNER;
}
if (sharpcount == 1) {
mv->flag |= DYNVERT_SHARP_CORNER;
}
@ -2043,7 +1964,7 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node)
return true;
}
ATTR_NO_OPT BLI_INLINE PBVHTri *pbvh_tribuf_add_tri(PBVHTriBuf *tribuf)
BLI_INLINE PBVHTri *pbvh_tribuf_add_tri(PBVHTriBuf *tribuf)
{
tribuf->tottri++;
@ -2157,7 +2078,7 @@ static void pbvh_init_tribuf(PBVHNode *node, PBVHTriBuf *tribuf)
* (currently just raycast), store the node's triangles and vertices.
*
* Skips triangles that are hidden. */
ATTR_NO_OPT bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node)
bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node)
{
BMesh *bm = pbvh->bm;
@ -2281,7 +2202,7 @@ ATTR_NO_OPT bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node)
tri->l[j] = (intptr_t)l;
val = NULL;
if (!BLI_smallhash_ensure_p(&tribufs[mat_nr].vertmap, l->v, &val)) {
if (!BLI_smallhash_ensure_p(&mat_tribuf->vertmap, l->v, &val)) {
SculptVertRef sv = {(intptr_t)l->v};
minmax_v3v3_v3(min, max, l->v->co);
@ -2371,8 +2292,8 @@ ATTR_NO_OPT bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node)
pbvh_tribuf_add_edge(node->tribuf, v1, v2);
v1 = (int)BLI_smallhash_lookup(&tribufs[mat_nr].vertmap, (void *)l->e->v1);
v2 = (int)BLI_smallhash_lookup(&tribufs[mat_nr].vertmap, (void *)l->e->v2);
v1 = (int)BLI_smallhash_lookup(&mat_tribuf->vertmap, (void *)l->e->v1);
v2 = (int)BLI_smallhash_lookup(&mat_tribuf->vertmap, (void *)l->e->v2);
pbvh_tribuf_add_edge(mat_tribuf, v1, v2);
} while ((l = l->next) != f->l_first);
@ -2503,10 +2424,20 @@ void BKE_pbvh_bmesh_update_valence(int cd_dyn_vert, SculptVertRef vertex)
mv->valence = 0;
e = v->e;
if (!e) {
return;
}
do {
mv->valence++;
e = v == e->v1 ? e->v1_disk_link.next : e->v2_disk_link.next;
if (!e) {
printf("bmesh error!\n");
break;
}
} while (e != v->e);
}
@ -3217,7 +3148,7 @@ void BKE_pbvh_bmesh_after_stroke(PBVH *pbvh)
BKE_pbvh_update_bounds(pbvh, (PBVH_UpdateBB | PBVH_UpdateOriginalBB | PBVH_UpdateRedraw));
if (pbvh->balance_counter++ == 5) {
if (pbvh->balance_counter++ == 10) {
pbvh_bmesh_balance_tree(pbvh);
pbvh_bmesh_check_nodes(pbvh);
pbvh->balance_counter = 0;
@ -5004,7 +4935,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out)
#include "BLI_smallhash.h"
ATTR_NO_OPT static void hash_test()
static void hash_test()
{
const int count = 1024 * 1024 * 4;

View File

@ -141,6 +141,8 @@ struct PBVH {
int idgen;
bool dyntopo_stop;
PBVHNode *nodes;
int node_mem_count, totnode;

View File

@ -39,7 +39,7 @@ typedef struct {
#define SMSTACKSIZE 131
typedef struct SmallHash {
unsigned int nbuckets;
unsigned int nentries;
unsigned int nentries, nfreecells;
unsigned int cursize;
SmallHashEntry *buckets;

View File

@ -2,19 +2,21 @@
#include "BLI_compiler_attrs.h"
#include "BLI_compiler_compat.h"
#
#include "BLI_smallhash.h"
#include "BLI_utildefines.h"
#include "BLI_ghash.h"
#define PTR_TO_IDX(ts) ((GHash *)ts->ptr_to_idx.buckets)
//#define PTR_TO_IDX(ts) ((GHash *)ts->ptr_to_idx.buckets)
#define PTR_TO_IDX(ts) &(ts)->ptr_to_idx
TableGSet *BLI_table_gset_new(const char *info)
{
TableGSet *ts = MEM_callocN(sizeof(TableGSet), info);
ts->ptr_to_idx.buckets = (void *)BLI_ghash_ptr_new(info);
// ts->ptr_to_idx.buckets = (void *)BLI_ghash_ptr_new(info);
BLI_smallhash_init(&ts->ptr_to_idx);
return ts;
}
@ -23,7 +25,9 @@ TableGSet *BLI_table_gset_new_ex(const char *info, int size)
{
TableGSet *ts = MEM_callocN(sizeof(TableGSet), info);
ts->ptr_to_idx.buckets = (void *)BLI_ghash_ptr_new_ex(info, (uint)size);
// ts->ptr_to_idx.buckets = (void *)BLI_ghash_ptr_new_ex(info, (uint)size);
BLI_smallhash_init(&ts->ptr_to_idx);
if (size) {
ts->elems = MEM_callocN(sizeof(void *) * (uint)size, info);
ts->size = size;
@ -44,22 +48,13 @@ void BLI_table_gset_free(TableGSet *ts, GHashKeyFreeFP freefp)
MEM_freeN(ts->elems);
}
BLI_ghash_free(PTR_TO_IDX(ts), freefp, NULL);
// BLI_ghash_free(PTR_TO_IDX(ts), freefp, NULL);
BLI_smallhash_release(&ts->ptr_to_idx);
MEM_freeN(ts);
}
bool BLI_table_gset_add(TableGSet *ts, void *elem)
{
if (BLI_table_gset_haskey(ts, elem)) {
return true;
}
BLI_table_gset_insert(ts, elem);
return false;
}
void BLI_table_gset_insert(TableGSet *ts, void *elem)
static void table_gset_resize(TableGSet *ts)
{
if (ts->cur >= ts->size) {
uint newsize = (uint)(ts->cur + 1);
@ -73,7 +68,7 @@ void BLI_table_gset_insert(TableGSet *ts, void *elem)
ts->elems = (void *)MEM_reallocN(ts->elems, newsize * sizeof(void *));
}
BLI_ghash_clear(PTR_TO_IDX(ts), NULL, NULL);
// BLI_smallhash_clear(PTR_TO_IDX(ts), 0ULL);
// compact
int i = 0, j = 0;
@ -81,7 +76,12 @@ void BLI_table_gset_insert(TableGSet *ts, void *elem)
void *elem2 = ts->elems[i];
if (elem2) {
BLI_ghash_insert(PTR_TO_IDX(ts), elem2, (void *)j);
void **val;
BLI_smallhash_ensure_p(PTR_TO_IDX(ts), (uintptr_t)elem2, &val);
// BLI_smallhash_insert(PTR_TO_IDX(ts), elem2, (void *)j);
*val = POINTER_FROM_INT(j);
ts->elems[j++] = elem2;
}
}
@ -89,8 +89,32 @@ void BLI_table_gset_insert(TableGSet *ts, void *elem)
ts->size = (int)newsize;
ts->cur = j;
}
}
bool BLI_table_gset_add(TableGSet *ts, void *elem)
{
void **val;
table_gset_resize(ts);
bool ret = BLI_smallhash_ensure_p(PTR_TO_IDX(ts), (uintptr_t)elem, &val);
if (!ret) {
*val = ts->cur;
ts->elems[ts->cur++] = elem;
ts->length++;
}
return ret;
}
void BLI_table_gset_insert(TableGSet *ts, void *elem)
{
table_gset_resize(ts);
BLI_smallhash_insert(PTR_TO_IDX(ts), elem, (void *)ts->cur);
BLI_ghash_insert(PTR_TO_IDX(ts), elem, (void *)ts->cur);
ts->elems[ts->cur++] = elem;
ts->length++;
}
@ -101,14 +125,14 @@ void BLI_table_gset_remove(TableGSet *ts, void *elem, GHashKeyFreeFP freefp)
return;
}
int *idx = (int *)BLI_ghash_lookup_p(PTR_TO_IDX(ts), elem);
int *idx = (int *)BLI_smallhash_lookup_p(PTR_TO_IDX(ts), elem);
if (!idx) {
return;
}
int idx2 = *idx;
BLI_ghash_remove(PTR_TO_IDX(ts), elem, freefp, NULL);
BLI_smallhash_remove(PTR_TO_IDX(ts), elem);
if (!ts->elems || ts->elems[idx2] != elem) {
return;
@ -120,7 +144,7 @@ void BLI_table_gset_remove(TableGSet *ts, void *elem, GHashKeyFreeFP freefp)
bool BLI_table_gset_haskey(TableGSet *ts, void *elem)
{
return BLI_ghash_haskey(PTR_TO_IDX(ts), elem);
return BLI_smallhash_haskey(PTR_TO_IDX(ts), elem);
}
int BLI_table_gset_len(TableGSet *ts)

View File

@ -134,10 +134,16 @@ BLI_INLINE uintptr_t smallhash_key(const uintptr_t key)
/**
* Check if the number of items in the smallhash is large enough to require more buckets.
*/
BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries, const uint nbuckets)
BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries,
const uint nbuckets,
const uint nfreecells)
{
if (nfreecells < 3) {
return true;
}
/* (approx * 1.5) */
return (nentries + (nentries >> 1)) > nbuckets;
return (nentries + (nentries >> 1)) > nbuckets || nfreecells < 3;
}
BLI_INLINE void smallhash_init_empty(SmallHash *sh)
@ -159,8 +165,9 @@ BLI_INLINE void smallhash_init_empty(SmallHash *sh)
*/
BLI_INLINE void smallhash_buckets_reserve(SmallHash *sh, const uint nentries_reserve)
{
while (smallhash_test_expand_buckets(nentries_reserve, sh->nbuckets)) {
while (smallhash_test_expand_buckets(nentries_reserve, sh->nbuckets, sh->nbuckets + 5)) {
sh->nbuckets = hashsizes[++sh->cursize];
sh->nfreecells = sh->nbuckets;
}
}
@ -192,8 +199,7 @@ BLI_INLINE SmallHashEntry *smallhash_lookup(SmallHash *sh, const uintptr_t key)
return NULL;
}
ATTR_NO_OPT BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh,
const uintptr_t key)
BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uintptr_t key)
{
check_stack_move(sh);
@ -238,6 +244,8 @@ BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
}
sh->nbuckets = nbuckets;
sh->nfreecells = nbuckets;
sh->nentries = 0;
BLI_asan_poison(&sh->buckets, sizeof(void *));
smallhash_init_empty(sh);
@ -245,8 +253,12 @@ BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
for (i = 0; i < nbuckets_old; i++) {
if (smallhash_val_is_used(buckets_old[i].val)) {
SmallHashEntry *e = smallhash_lookup_first_free(sh, buckets_old[i].key);
e->key = buckets_old[i].key;
e->val = buckets_old[i].val;
sh->nfreecells--;
sh->nentries++;
}
}
@ -263,6 +275,7 @@ void BLI_smallhash_init_ex(SmallHash *sh, const uint nentries_reserve)
sh->cursize = 2;
sh->using_stack = true;
sh->nbuckets = hashsizes[sh->cursize];
sh->nfreecells = sh->nbuckets;
sh->buckets = sh->buckets_stack;
@ -295,12 +308,12 @@ void BLI_smallhash_release(SmallHash *sh)
BLI_asan_unpoison(&sh->buckets, sizeof(void *));
if (sh->buckets != sh->buckets_stack) {
if (sh->buckets && sh->buckets != sh->buckets_stack) {
MEM_freeN(sh->buckets);
}
}
ATTR_NO_OPT bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item)
bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item)
{
check_stack_move(sh);
@ -308,7 +321,7 @@ ATTR_NO_OPT bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***it
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
if (UNLIKELY(smallhash_test_expand_buckets(sh->nentries, sh->nbuckets))) {
if (UNLIKELY(smallhash_test_expand_buckets(sh->nentries, sh->nbuckets, sh->nfreecells))) {
smallhash_resize_buckets(sh, hashsizes[++sh->cursize]);
}
@ -333,6 +346,7 @@ ATTR_NO_OPT bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***it
if (e->val == SMHASH_CELL_FREE || e->val == SMHASH_CELL_UNUSED) {
sh->nentries++;
sh->nfreecells--;
ret = false;
}
else {
@ -355,11 +369,20 @@ void BLI_smallhash_insert(SmallHash *sh, uintptr_t key, void *item)
BLI_assert(smallhash_val_is_used(item));
BLI_assert(BLI_smallhash_haskey(sh, key) == false);
if (UNLIKELY(smallhash_test_expand_buckets(++sh->nentries, sh->nbuckets))) {
if (UNLIKELY(smallhash_test_expand_buckets(sh->nentries, sh->nbuckets, sh->nfreecells))) {
smallhash_resize_buckets(sh, hashsizes[++sh->cursize]);
}
e = smallhash_lookup_first_free(sh, key);
if (e->val == SMHASH_CELL_FREE) {
sh->nentries++;
sh->nfreecells--;
}
else if (e->val == SMHASH_CELL_UNUSED) {
sh->nentries++;
}
e->key = key;
e->val = item;
}
@ -386,11 +409,30 @@ bool BLI_smallhash_reinsert(SmallHash *sh, uintptr_t key, void *item)
#ifdef USE_REMOVE
bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
check_stack_move(sh);
// SmallHashEntry *e = smallhash_lookup(sh, key);
SmallHashEntry *e;
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
if (e->key == key) {
/* should never happen because unused keys are zero'd */
BLI_assert(e->val != SMHASH_CELL_UNUSED);
break;
}
}
if (e && e->key == key) {
h = SMHASH_NEXT(h, hoff);
SmallHashEntry *e2 = &sh->buckets[h & sh->nbuckets];
if (e) {
e->key = SMHASH_KEY_UNUSED;
e->val = SMHASH_CELL_UNUSED;
sh->nentries--;
return true;
@ -446,6 +488,8 @@ void BLI_smallhash_clear(SmallHash *sh, uintptr_t key)
e->val = SMHASH_CELL_FREE;
}
sh->nentries = 0;
BLI_asan_poison(&sh->buckets, sizeof(void *));
}

View File

@ -955,6 +955,11 @@ static void bm_log_vert_values_swap(
continue;
}
if (v->head.htype != BM_VERT) {
printf("not a vertex: %d\n", v->head.htype);
continue;
}
swap_v3_v3(v->co, lv->co);
swap_v3_v3(v->no, lv->no);
@ -1942,6 +1947,11 @@ BMLogEntry *BM_log_all_ids(BMesh *bm, BMLog *log, BMLogEntry *entry)
entry = bm_log_entry_add_ex(bm, log, true, LOG_ENTRY_MESH_IDS, entry);
}
if (!entry) {
// log was dead
return NULL;
}
log_idmap_save(bm, log, entry);
return entry;
}
@ -2042,6 +2052,26 @@ void BM_log_redo_skip(BMesh *bm, BMLog *log)
}
}
void BM_log_undo_single(BMesh *bm,
BMLog *log,
BMLogCallbacks *callbacks,
const char *node_layer_id)
{
BMLogEntry *entry = log->current_entry;
log->bm = bm;
if (!entry) {
return;
}
BMLogEntry *preventry = entry->prev;
bm_log_undo_intern(bm, log, entry, callbacks, node_layer_id);
entry = entry->combined_prev;
log->current_entry = entry ? entry : preventry;
}
void BM_log_undo(BMesh *bm, BMLog *log, BMLogCallbacks *callbacks, const char *node_layer_id)
{
BMLogEntry *entry = log->current_entry;
@ -2152,6 +2182,7 @@ void BM_log_redo(BMesh *bm, BMLog *log, BMLogCallbacks *callbacks, const char *n
log->current_entry = nextentry;
}
/* Log a vertex before it is modified
*
* Before modifying vertex coordinates, masks, or hflags, call this

View File

@ -386,6 +386,10 @@ void BM_mesh_clear(BMesh *bm)
bm->idmap.flag = idmap_flags;
if (bm->idmap.flag & BM_HAS_IDS) {
bm->idmap.map = NULL;
bm->idmap.ghash = NULL;
bm->idmap.map_size = 0;
#ifndef WITH_BM_ID_FREELIST
bm->idmap.idtree = range_tree_uint_alloc(0, (uint)-1);
#else
@ -1970,7 +1974,7 @@ bool BM_defragment_vertex(BMesh *bm,
{
BMEdge *e = v->e;
#if 0
#if 1
int cd_vcol = CustomData_get_offset(&bm->vdata, CD_PROP_COLOR);
if (cd_vcol >= 0) {
@ -1985,7 +1989,7 @@ bool BM_defragment_vertex(BMesh *bm,
}
#endif
return false;
// return false;
// return false;

View File

@ -79,6 +79,7 @@ set(SRC
sculpt_uv.c
sculpt_displacement.c
sculpt_displacement.h
sculpt_replay.c
paint_intern.h
sculpt_intern.h

View File

@ -1735,7 +1735,7 @@ SculptCornerType SCULPT_vertex_is_corner(const SculptSession *ss,
SculptCornerType cornertype)
{
bool check_facesets = cornertype & SCULPT_CORNER_FACE_SET;
bool ret = false;
SculptCornerType ret = 0;
switch (BKE_pbvh_type(ss->pbvh)) {
case PBVH_BMESH: {
@ -2221,7 +2221,7 @@ static bool sculpt_tool_needs_original(const char sculpt_tool)
SCULPT_TOOL_POSE);
}
static bool sculpt_tool_is_proxy_used(const char sculpt_tool)
bool sculpt_tool_is_proxy_used(const char sculpt_tool)
{
return ELEM(sculpt_tool,
SCULPT_TOOL_SMOOTH,
@ -7190,6 +7190,8 @@ int SCULPT_get_symmetry_pass(const SculptSession *ss)
typedef struct DynTopoAutomaskState {
AutomaskingCache *cache;
SculptSession *ss;
AutomaskingCache _fixed;
bool free_automasking;
} DynTopoAutomaskState;
static float sculpt_topology_automasking_cb(SculptVertRef vertex, void *vdata)
@ -7208,8 +7210,9 @@ static float sculpt_topology_automasking_mask_cb(SculptVertRef vertex, void *vda
}
bool SCULPT_dyntopo_automasking_init(const SculptSession *ss,
Sculpt *sd,
const Brush *br,
const Sculpt *sd,
Object *ob,
DyntopoMaskCB *r_mask_cb,
void **r_mask_cb_data)
{
@ -7217,7 +7220,14 @@ bool SCULPT_dyntopo_automasking_init(const SculptSession *ss,
if (CustomData_has_layer(&ss->bm->vdata, CD_PAINT_MASK)) {
DynTopoAutomaskState *state = MEM_callocN(sizeof(DynTopoAutomaskState),
"DynTopoAutomaskState");
state->cache = ss->cache->automasking;
if (!ss->cache) {
state->cache = SCULPT_automasking_cache_init(sd, br, ob);
}
else {
state->cache = ss->cache->automasking;
}
state->ss = (SculptSession *)ss;
*r_mask_cb_data = (void *)state;
@ -7233,7 +7243,14 @@ bool SCULPT_dyntopo_automasking_init(const SculptSession *ss,
}
DynTopoAutomaskState *state = MEM_callocN(sizeof(DynTopoAutomaskState), "DynTopoAutomaskState");
state->cache = ss->cache->automasking;
if (!ss->cache) {
state->cache = SCULPT_automasking_cache_init(sd, br, ob);
state->free_automasking = true;
}
else {
state->cache = ss->cache->automasking;
}
state->ss = (SculptSession *)ss;
*r_mask_cb_data = (void *)state;
@ -7316,7 +7333,7 @@ static void sculpt_topology_update(Sculpt *sd,
void *mask_cb_data;
DyntopoMaskCB mask_cb;
SCULPT_dyntopo_automasking_init(ss, brush, sd, &mask_cb, &mask_cb_data);
SCULPT_dyntopo_automasking_init(ss, sd, brush, ob, &mask_cb, &mask_cb_data);
/* do nodes under the brush cursor */
modified = BKE_pbvh_bmesh_update_topology_nodes(
@ -7384,7 +7401,7 @@ static void do_brush_action_task_cb(void *__restrict userdata,
}
}
static void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSettings *ups)
void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSettings *ups)
{
SculptSession *ss = ob->sculpt;
int totnode;
@ -7535,6 +7552,8 @@ static void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSe
bool invert = ss->cache->pen_flip || ss->cache->invert || brush->flag & BRUSH_DIR_IN;
SCULPT_replay_log_append(sd, ss, ob);
/* Apply one type of brush action. */
switch (brush->sculpt_tool) {
case SCULPT_TOOL_DRAW:
@ -7839,7 +7858,7 @@ static void sculpt_combine_proxies_task_cb(void *__restrict userdata,
BKE_pbvh_node_free_proxies(data->nodes[n]);
}
static void sculpt_combine_proxies(Sculpt *sd, Object *ob)
void sculpt_combine_proxies(Sculpt *sd, Object *ob)
{
SculptSession *ss = ob->sculpt;
Brush *brush = BKE_paint_brush(&sd->paint);
@ -8904,7 +8923,7 @@ static void sculpt_update_cache_variants(bContext *C, Sculpt *sd, Object *ob, Po
/* Returns true if any of the smoothing modes are active (currently
* one of smooth brush, autosmooth, mask smooth, or shift-key
* smooth). */
static bool sculpt_needs_connectivity_info(const Sculpt *sd,
static bool sculpt_needs_connectivity_info(Sculpt *sd,
const Brush *brush,
SculptSession *ss,
int stroke_mode)
@ -9401,7 +9420,8 @@ void SCULPT_flush_update_step(bContext *C, SculptUpdateType update_flags)
SCULPT_update_object_bounding_box(ob);
}
if (SCULPT_get_redraw_rect(region, CTX_wm_region_view3d(C), ob, &r)) {
if (CTX_wm_region_view3d(C) &&
SCULPT_get_redraw_rect(region, CTX_wm_region_view3d(C), ob, &r)) {
if (ss->cache) {
ss->cache->current_r = r;
}
@ -9561,7 +9581,7 @@ static bool sculpt_stroke_test_start(bContext *C, struct wmOperator *op, const f
return false;
}
static void sculpt_stroke_update_step(bContext *C, struct PaintStroke *stroke, PointerRNA *itemptr)
void sculpt_stroke_update_step(bContext *C, struct PaintStroke *stroke, PointerRNA *itemptr)
{
UnifiedPaintSettings *ups = &CTX_data_tool_settings(C)->unified_paint_settings;
Sculpt *sd = CTX_data_tool_settings(C)->sculpt;
@ -9575,6 +9595,7 @@ static void sculpt_stroke_update_step(bContext *C, struct PaintStroke *stroke, P
if (SCULPT_stroke_is_first_brush_step(ss->cache)) {
ss->cache->last_dyntopo_t = 0.0f;
memset((void *)ss->cache->last_smooth_t, 0, sizeof(ss->cache->last_smooth_t));
memset((void *)ss->cache->last_rake_t, 0, sizeof(ss->cache->last_rake_t));
}
@ -9582,7 +9603,9 @@ static void sculpt_stroke_update_step(bContext *C, struct PaintStroke *stroke, P
BKE_brush_get_dyntopo(brush, sd, &brush->cached_dyntopo);
SCULPT_stroke_modifiers_check(C, ob, brush);
sculpt_update_cache_variants(C, sd, ob, itemptr);
if (itemptr) {
sculpt_update_cache_variants(C, sd, ob, itemptr);
}
sculpt_restore_mesh(sd, ob);
int boundsym = BKE_get_fset_boundary_symflag(ob);

View File

@ -73,9 +73,7 @@ AutomaskingCache *SCULPT_automasking_active_cache_get(SculptSession *ss)
return NULL;
}
bool SCULPT_is_automasking_mode_enabled(const Sculpt *sd,
const Brush *br,
const eAutomasking_flag mode)
bool SCULPT_is_automasking_mode_enabled(Sculpt *sd, const Brush *br, const eAutomasking_flag mode)
{
if (br) {
return br->automasking_flags & mode || sd->automasking_flags & mode;
@ -83,7 +81,7 @@ bool SCULPT_is_automasking_mode_enabled(const Sculpt *sd,
return sd->automasking_flags & mode;
}
bool SCULPT_is_automasking_enabled(const Sculpt *sd, const SculptSession *ss, const Brush *br)
bool SCULPT_is_automasking_enabled(Sculpt *sd, const SculptSession *ss, const Brush *br)
{
/*
if (br && SCULPT_stroke_is_dynamic_topology(ss, br)) {
@ -112,7 +110,7 @@ bool SCULPT_is_automasking_enabled(const Sculpt *sd, const SculptSession *ss, co
return false;
}
static int sculpt_automasking_mode_effective_bits(const Sculpt *sculpt, const Brush *brush)
static int sculpt_automasking_mode_effective_bits(Sculpt *sculpt, const Brush *brush)
{
if (brush) {
return sculpt->automasking_flags | brush->automasking_flags;
@ -132,7 +130,7 @@ static float sculpt_concavity_factor(AutomaskingCache *automasking, float fac)
return fac;
}
static bool SCULPT_automasking_needs_factors_cache(const Sculpt *sd, const Brush *brush)
static bool SCULPT_automasking_needs_factors_cache(Sculpt *sd, const Brush *brush)
{
const int automasking_flags = sculpt_automasking_mode_effective_bits(sd, brush);
@ -215,7 +213,7 @@ void SCULPT_automasking_cache_free(AutomaskingCache *automasking)
MEM_SAFE_FREE(automasking);
}
static bool sculpt_automasking_is_constrained_by_radius(Brush *br)
static bool sculpt_automasking_is_constrained_by_radius(const Brush *br)
{
/* 2D falloff is not constrained by radius. */
if (br->falloff_shape == PAINT_FALLOFF_SHAPE_TUBE) {
@ -257,7 +255,7 @@ static void SCULPT_topology_automasking_init(Sculpt *sd,
SculptCustomLayer *factorlayer)
{
SculptSession *ss = ob->sculpt;
Brush *brush = BKE_paint_brush(&sd->paint);
const Brush *brush = BKE_paint_brush(&sd->paint);
if (BKE_pbvh_type(ss->pbvh) == PBVH_FACES && !ss->pmap) {
BLI_assert_msg(0, "Topology masking: pmap missing");
@ -389,7 +387,7 @@ void SCULPT_boundary_automasking_init(Object *ob,
static void SCULPT_automasking_cache_settings_update(AutomaskingCache *automasking,
SculptSession *ss,
Sculpt *sd,
Brush *brush)
const Brush *brush)
{
automasking->settings.flags = sculpt_automasking_mode_effective_bits(sd, brush);
@ -431,7 +429,7 @@ float SCULPT_calc_concavity(SculptSession *ss, SculptVertRef vref)
}
static void SCULPT_concavity_automasking_init(Object *ob,
Brush *brush,
const Brush *brush,
AutomaskingCache *automasking,
SculptCustomLayer *factorlayer)
{
@ -453,7 +451,7 @@ static void SCULPT_concavity_automasking_init(Object *ob,
// BKE_pbvh_vertex_iter_begin
}
AutomaskingCache *SCULPT_automasking_cache_init(Sculpt *sd, Brush *brush, Object *ob)
AutomaskingCache *SCULPT_automasking_cache_init(Sculpt *sd, const Brush *brush, Object *ob)
{
SculptSession *ss = ob->sculpt;
const int totvert = SCULPT_vertex_count_get(ss);

View File

@ -119,7 +119,7 @@ static int sculpt_detail_flood_fill_exec(bContext *C, wmOperator *UNUSED(op))
DyntopoMaskCB mask_cb;
void *mask_cb_data;
SCULPT_dyntopo_automasking_init(ss, NULL, sd, &mask_cb, &mask_cb_data);
SCULPT_dyntopo_automasking_init(ss, sd, NULL, ob, &mask_cb, &mask_cb_data);
while (BKE_pbvh_bmesh_update_topology(ss->pbvh,
PBVH_Collapse | PBVH_Subdivide,

View File

@ -84,12 +84,12 @@ typedef enum SculptUpdateType {
SCULPT_UPDATE_COLOR = 1 << 3,
} SculptUpdateType;
void SCULPT_flush_update_step(bContext *C, SculptUpdateType update_flags);
void SCULPT_flush_update_done(const bContext *C, Object *ob, SculptUpdateType update_flags);
void SCULPT_flush_update_step(struct bContext *C, SculptUpdateType update_flags);
void SCULPT_flush_update_done(const struct bContext *C, Object *ob, SculptUpdateType update_flags);
void SCULPT_flush_stroke_deform(struct Sculpt *sd, Object *ob, bool is_proxy_used);
/* Should be used after modifying the mask or Face Sets IDs. */
void SCULPT_tag_update_overlays(bContext *C);
void SCULPT_tag_update_overlays(struct bContext *C);
/* Stroke */
@ -437,13 +437,11 @@ float SCULPT_automasking_factor_get(struct AutomaskingCache *automasking,
* brushes and filter. */
struct AutomaskingCache *SCULPT_automasking_active_cache_get(SculptSession *ss);
struct AutomaskingCache *SCULPT_automasking_cache_init(Sculpt *sd, Brush *brush, Object *ob);
struct AutomaskingCache *SCULPT_automasking_cache_init(Sculpt *sd, const Brush *brush, Object *ob);
void SCULPT_automasking_cache_free(struct AutomaskingCache *automasking);
bool SCULPT_is_automasking_mode_enabled(const Sculpt *sd,
const Brush *br,
const eAutomasking_flag mode);
bool SCULPT_is_automasking_enabled(const Sculpt *sd, const SculptSession *ss, const Brush *br);
bool SCULPT_is_automasking_mode_enabled(Sculpt *sd, const Brush *br, const eAutomasking_flag mode);
bool SCULPT_is_automasking_enabled(Sculpt *sd, const SculptSession *ss, const Brush *br);
typedef enum eBoundaryAutomaskMode {
AUTOMASK_INIT_BOUNDARY_EDGES = 1,
@ -1623,8 +1621,9 @@ bool SCULPT_temp_customlayer_get(
SculptSession *ss, AttributeDomain domain, int proptype, char *name, SculptCustomLayer *scl);
bool SCULPT_dyntopo_automasking_init(const SculptSession *ss,
Sculpt *sd,
const Brush *br,
const Sculpt *sd,
Object *ob,
DyntopoMaskCB *r_mask_cb,
void **r_mask_cb_data);
void SCULPT_dyntopo_automasking_end(void *mask_data);
@ -1672,3 +1671,24 @@ void SCULPT_edge_get_verts(const SculptSession *ss,
SculptVertRef SCULPT_edge_other_vertex(const SculptSession *ss,
const SculptEdgeRef edge,
const SculptVertRef vertex);
#define SCULPT_REPLAY
#ifdef SCULPT_REPLAY
struct SculptReplayLog;
struct SculptBrushSample;
# ifdef WIN32
# define REPLAY_EXPORT __declspec(dllexport)
# else
# define REPLAY_EXPORT
# endif
void SCULPT_replay_log_free(struct SculptReplayLog *log);
struct SculptReplayLog *SCULPT_replay_log_create();
void SCULPT_replay_log_end();
void SCULPT_replay_log_start();
char *SCULPT_replay_serialize();
void SCULPT_replay_log_append(struct Sculpt *sd, struct SculptSession *ss, struct Object *ob);
void SCULPT_replay_test(void);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -296,9 +296,21 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
add_v3_v3(result, co);
}
if (SCULPT_vertex_is_corner(ss, vertex, ctype)) {
interp_v3_v3v3(result, result, co, 1.0f - slide_fset);
SculptCornerType c = SCULPT_vertex_is_corner(ss, vertex, ctype);
float corner_smooth;
if (c == 0) {
return;
}
if (c & SCULPT_CORNER_FACE_SET) {
corner_smooth = MAX2(slide_fset, bound_smooth);
}
else {
corner_smooth = bound_smooth;
}
interp_v3_v3v3(result, result, co, 1.0f - corner_smooth);
}
void SCULPT_neighbor_coords_average_interior_velocity(SculptSession *ss,
@ -485,7 +497,7 @@ void SCULPT_bmesh_four_neighbor_average(SculptSession *ss,
SculptVertRef vertex = {.i = (intptr_t)v};
int val = SCULPT_vertex_valence_get(ss, vertex);
areas = BLI_array_alloca(areas, val);
areas = BLI_array_alloca(areas, val * 2);
BKE_pbvh_get_vert_face_areas(ss->pbvh, vertex, areas, val);
}
@ -1061,9 +1073,9 @@ static void do_smooth_brush_task_cb_ex(void *__restrict userdata,
else {
float avg[3], val[3];
if (SCULPT_vertex_is_corner(ss, vd.vertex, ctype) & ~SCULPT_CORNER_FACE_SET) {
// continue;
}
// if (SCULPT_vertex_is_corner(ss, vd.vertex, ctype) & ~SCULPT_CORNER_FACE_SET) {
// continue;
//}
SCULPT_neighbor_coords_average_interior(ss, avg, vd.vertex, projection, bound_scl);
sub_v3_v3v3(val, avg, vd.co);

View File

@ -2479,3 +2479,61 @@ void sculpt_undo_print_nodes(void *active)
#endif
}
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
void BM_log_undo_single(BMesh *bm,
BMLog *log,
BMLogCallbacks *callbacks,
const char *node_layer_id);
void SCULPT_substep_undo(bContext *C, int dir)
{
Scene *scene = CTX_data_scene(C);
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
if (!scene || !ob || !ob->sculpt) {
printf("not in sculpt mode\n");
return;
}
SculptSession *ss = ob->sculpt;
if (!ss->bm) {
printf("not in dyntopo mode\n");
return;
}
BmeshUndoData data = {ss->pbvh,
ss->bm,
false,
false,
ss->cd_face_node_offset,
ss->cd_vert_node_offset,
ss->cd_dyn_vert,
false,
false};
BMLogCallbacks callbacks = {bmesh_undo_on_vert_add,
bmesh_undo_on_vert_kill,
bmesh_undo_on_vert_change,
bmesh_undo_on_edge_add,
bmesh_undo_on_edge_kill,
bmesh_undo_on_edge_change,
bmesh_undo_on_face_add,
bmesh_undo_on_face_kill,
bmesh_undo_on_face_change,
bmesh_undo_full_mesh,
NULL,
(void *)&data};
BM_log_undo_single(ss->bm, ss->bm_log, &callbacks, dyntopop_node_idx_layer_id);
BKE_sculpt_update_object_for_edit(depsgraph, ob, true, false, false);
DEG_id_tag_update(&ob->id, ID_RECALC_SHADING);
}

View File

@ -542,6 +542,58 @@ static bool rna_ImaPaint_detect_data(ImagePaintSettings *imapaint)
return imapaint->missing_data == 0;
}
void SCULPT_replay_log_free(struct SculptReplayLog *log);
struct SculptReplayLog *SCULPT_replay_log_create();
void SCULPT_replay_log_end();
void SCULPT_replay_log_start();
char *SCULPT_replay_serialize();
void SCULPT_replay_log_append(struct Sculpt *sd, struct SculptSession *ss, struct Object *ob);
void SCULPT_replay_test(void);
void SCULPT_replay_parse(const char *buf);
void SCULPT_replay(bContext *ctx);
static void rna_SCULPT_replay_test(Sculpt *sculpt)
{
SCULPT_replay_test();
}
static void rna_SCULPT_replay_start(Sculpt *sculpt)
{
SCULPT_replay_log_start();
}
static const char *rna_SCULPT_replay_serialize(Sculpt *sculpt)
{
return SCULPT_replay_serialize();
}
static void rna_SCULPT_replay_parse(Sculpt *sculpt, const char *buf)
{
SCULPT_replay_parse(buf);
}
static void rna_SCULPT_replay_free(Sculpt *sculpt)
{
SCULPT_replay_log_end();
}
static void rna_SCULPT_replay_replay(bContext *ctx)
{
SCULPT_replay(ctx);
}
void SCULPT_replay_make_cube(struct bContext *C, int steps);
static void rna_SCULPT_replay_make_cube(bContext *ctx, int steps)
{
SCULPT_replay_make_cube(ctx, steps);
}
void SCULPT_substep_undo(bContext *ctx, int dir);
static void rna_SCULPT_substep_undo(bContext *ctx, int dir)
{
SCULPT_substep_undo(ctx, dir);
}
static char *rna_GPencilSculptSettings_path(PointerRNA *UNUSED(ptr))
{
return BLI_strdup("tool_settings.gpencil_sculpt");
@ -914,6 +966,41 @@ static void rna_def_sculpt(BlenderRNA *brna)
RNA_def_property_ui_text(
prop, "Orientation", "Object whose Z axis defines orientation of gravity");
RNA_def_property_update(prop, NC_SCENE | ND_TOOLSETTINGS, NULL);
/* functions */
FunctionRNA *func;
func = RNA_def_function(srna, "test_replay", "rna_SCULPT_replay_test");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
func = RNA_def_function(srna, "replay_start", "rna_SCULPT_replay_start");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
func = RNA_def_function(srna, "replay_free", "rna_SCULPT_replay_free");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
func = RNA_def_function(srna, "replay_replay", "rna_SCULPT_replay_replay");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
RNA_def_function_flag(func, FUNC_NO_SELF | FUNC_USE_CONTEXT);
func = RNA_def_function(srna, "replay_make_cube", "rna_SCULPT_replay_make_cube");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
RNA_def_function_flag(func, FUNC_NO_SELF | FUNC_USE_CONTEXT);
RNA_def_int(func, "steps", 15, 1, 500, "steps", "steps", 1, 250);
func = RNA_def_function(srna, "debug_substep_undo", "rna_SCULPT_substep_undo");
RNA_def_function_ui_description(func, "Test function");
RNA_def_function_flag(func, FUNC_NO_SELF | FUNC_USE_CONTEXT);
RNA_def_int(func, "dir", -1, -1, 1, "dir", "dir", -1, 1);
func = RNA_def_function(srna, "replay_serialize", "rna_SCULPT_replay_serialize");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
RNA_def_function_return(func, RNA_def_string(func, "ret", NULL, 1024 * 32, "return", "return"));
func = RNA_def_function(srna, "replay_parse", "rna_SCULPT_replay_parse");
RNA_def_string(func, "buf", NULL, 1024 * 32, "buf", "buf");
RNA_def_function_ui_description(func, "Test sculpt replay serialization");
}
static void rna_def_uv_sculpt(BlenderRNA *brna)