Sculpt: revert past several commits

Note that I never pushed any of these to remote.

Decided to work on this in a local branch. I am
keeping the lock-free mempool code though.
This commit is contained in:
Joseph Eagar 2021-10-20 17:22:40 -07:00
parent 6e74907f4f
commit c563169e32
27 changed files with 212 additions and 895 deletions

View File

@ -601,11 +601,11 @@ static void armature_deform_coords_impl(const Object *ob_arm,
BLI_parallel_mempool_settings_defaults(&settings);
if (use_dverts) {
BM_task_parallel_mempool(
BLI_task_parallel_mempool(
em_target->bm->vpool, &data, armature_vert_task_editmesh, &settings);
}
else {
BM_task_parallel_mempool(
BLI_task_parallel_mempool(
em_target->bm->vpool, &data, armature_vert_task_editmesh_no_dvert, &settings);
}
}

View File

@ -163,15 +163,6 @@ void BKE_curvemapping_copy_data_tag_ex(CurveMapping *target,
}
}
static void *debug_dupalloc_id(const void *mem, const char *id)
{
int len = MEM_allocN_len(mem);
void *cpy = MEM_mallocN(len, id);
memcpy(cpy, mem, len);
return cpy;
}
void BKE_curvemapping_copy_data(CurveMapping *target, const CurveMapping *cumap)
{
int a;
@ -186,14 +177,13 @@ void BKE_curvemapping_copy_data(CurveMapping *target, const CurveMapping *cumap)
for (a = 0; a < CM_TOT; a++) {
if (cumap->cm[a].curve) {
target->cm[a].curve = debug_dupalloc_id(cumap->cm[a].curve, "curvemapping.curve");
target->cm[a].curve = MEM_dupallocN(cumap->cm[a].curve);
}
if (cumap->cm[a].table) {
target->cm[a].table = debug_dupalloc_id(cumap->cm[a].table, "curvemapping.table");
target->cm[a].table = MEM_dupallocN(cumap->cm[a].table);
}
if (cumap->cm[a].premultable) {
target->cm[a].premultable = debug_dupalloc_id(cumap->cm[a].premultable,
"curvemapping.premultable");
target->cm[a].premultable = MEM_dupallocN(cumap->cm[a].premultable);
}
}
}
@ -201,7 +191,7 @@ void BKE_curvemapping_copy_data(CurveMapping *target, const CurveMapping *cumap)
CurveMapping *BKE_curvemapping_copy(const CurveMapping *cumap)
{
if (cumap) {
CurveMapping *cumapn = debug_dupalloc_id(cumap, "CurveMapping");
CurveMapping *cumapn = MEM_dupallocN(cumap);
BKE_curvemapping_copy_data(cumapn, cumap);
cumapn->flag &= ~CUMA_PART_OF_CACHE;
return cumapn;

View File

@ -3980,12 +3980,7 @@ void CustomData_bmesh_init_pool_ex(CustomData *data,
/* If there are no layers, no pool is needed just yet */
if (data->totlayer) {
#ifndef BM_LOCKFREE_MEMPOOL
data->pool = BLI_mempool_create_ex(data->totsize, totelem, chunksize, BLI_MEMPOOL_NOP, memtag);
#else
data->pool = (BLI_mempool *)BM_mempool_create(
data->totsize, totelem, chunksize, BLI_MEMPOOL_NOP);
#endif
}
}
@ -4073,7 +4068,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
}
if (destold.pool) {
BM_mempool_destroy(destold.pool);
BLI_mempool_destroy(destold.pool);
}
if (destold.layers) {
MEM_freeN(destold.layers);
@ -4101,7 +4096,7 @@ void CustomData_bmesh_free_block(CustomData *data, void **block)
}
if (data->totsize) {
BM_mempool_free((BM_mempool *)data->pool, *block);
BLI_mempool_free(data->pool, *block);
}
*block = NULL;
@ -4142,7 +4137,7 @@ static void CustomData_bmesh_alloc_block(CustomData *data, void **block)
}
if (data->totsize > 0) {
*block = BM_mempool_alloc((BM_mempool *)data->pool);
*block = BLI_mempool_alloc(data->pool);
CustomData_bmesh_asan_poison(data, *block);

View File

@ -190,273 +190,6 @@ void bmesh_radial_loop_append(BMEdge *e, BMLoop *l);
void bm_kill_only_edge(BMesh *bm, BMEdge *e);
void bm_kill_only_loop(BMesh *bm, BMLoop *l);
void bm_kill_only_face(BMesh *bm, BMFace *f);
static bool bm_elem_is_free(BMElem *elem, int htype);
extern char dyntopop_node_idx_layer_id[];
extern char dyntopop_faces_areas_layer_id[];
#ifdef WITH_DYNTOPO_EDGE_LOCKS
char *cdlayer_lock_attr_name = "__bm_lock";
ATTR_NO_OPT static int cdlayer_lock_begin(PBVH *pbvh, BMesh *bm)
{
int idx = CustomData_get_named_layer_index(&bm->edata, CD_PROP_INT32, cdlayer_lock_attr_name);
if (idx == -1) {
BM_data_layer_add_named(bm, &bm->edata, CD_PROP_INT32, cdlayer_lock_attr_name);
idx = CustomData_get_named_layer_index(&bm->edata, CD_PROP_INT32, cdlayer_lock_attr_name);
bm->edata.layers[idx].flag |= CD_FLAG_TEMPORARY | CD_FLAG_ELEM_NOCOPY | CD_FLAG_ELEM_NOINTERP;
pbvh->cd_vert_node_offset = CustomData_get_named_layer_index(
&pbvh->bm->vdata, CD_PROP_INT32, dyntopop_node_idx_layer_id);
pbvh->cd_face_node_offset = CustomData_get_named_layer_index(
&pbvh->bm->pdata, CD_PROP_INT32, dyntopop_node_idx_layer_id);
pbvh->cd_vert_node_offset = bm->vdata.layers[pbvh->cd_vert_node_offset].offset;
pbvh->cd_face_node_offset = bm->pdata.layers[pbvh->cd_face_node_offset].offset;
}
return bm->edata.layers[idx].offset;
}
ATTR_NO_OPT static bool cdlayer_elem_lock(BMElem *elem, int cd_lock, int thread_nr)
{
thread_nr++;
if (bm_elem_is_free(elem, BM_EDGE)) {
return false;
}
int *lock = BM_ELEM_CD_GET_VOID_P(elem, cd_lock);
int old = *lock;
if (old == thread_nr) {
return true;
}
int i = 0;
while (old > 0 || old != atomic_cas_int32(lock, old, thread_nr)) {
if (bm_elem_is_free(elem, BM_EDGE)) {
return false;
}
old = *lock;
if (i++ > 100000) {
return false;
}
}
return true;
}
static void cdlayer_elem_unlock(BMElem *elem, int cd_lock, int thread_nr)
{
thread_nr++;
int *lock = BM_ELEM_CD_GET_VOID_P(elem, cd_lock);
// int old = *lock;
*lock = 0;
}
ATTR_NO_OPT static bool cdlayer_lock_edge(BMEdge *e, int cd_lock, int thread_nr)
{
if (!cdlayer_elem_lock((BMElem *)e, cd_lock, thread_nr)) {
return false;
}
BMEdge **es = NULL;
BLI_array_staticdeclare(es, 32);
for (int i = 0; i < 2; i++) {
BMVert *v = i ? e->v2 : e->v1;
BMEdge *e2 = v->e;
do {
BMLoop *l = e2->l;
if (!l) {
if (!cdlayer_elem_lock((BMElem *)e2, cd_lock, thread_nr)) {
return false;
}
BLI_array_append(es, e2);
continue;
}
do {
BMLoop *l2 = l;
do {
if (BM_elem_is_free((BMElem *)l2, BM_LOOP)) {
return false;
}
if (!cdlayer_elem_lock((BMElem *)l2->e, cd_lock, thread_nr)) {
return false;
}
BLI_array_append(es, l2->e);
} while ((l2 = l2->next) != l);
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v)) != v->e);
}
BLI_array_free(es);
return true;
error:
for (int i = 0; i < BLI_array_len(es); i++) {
if (!es[i]) {
continue;
}
// eliminate duplicates
for (int j = i + 1; j < BLI_array_len(es); j++) {
if (es[i] == es[j]) {
es[j] = NULL;
}
}
cdlayer_elem_unlock((BMElem *)es[i], cd_lock, thread_nr);
}
BLI_array_free(es);
return false;
}
static void cdlayer_unlock_edge(BMEdge *e, int cd_lock, int thread_nr)
{
if (BM_ELEM_CD_GET_INT(e, cd_lock) == thread_nr + 1) {
return;
}
BMEdge **es = NULL;
BLI_array_staticdeclare(es, 32);
const int tag = BM_ELEM_TAG_ALT;
for (int i = 0; i < 2; i++) {
BMVert *v = i ? e->v2 : e->v1;
BMEdge *e2 = v->e;
do {
BMLoop *l = e2->l;
if (!l) {
BLI_array_append(es, e2);
continue;
}
do {
BMLoop *l2 = l;
do {
l2->e->head.hflag &= ~tag;
} while ((l2 = l2->next) != l);
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v)) != v->e);
}
for (int i = 0; i < 2; i++) {
BMVert *v = i ? e->v2 : e->v1;
BMEdge *e2 = v->e;
do {
BMLoop *l = e2->l;
if (!l) {
BLI_array_append(es, e2);
continue;
}
do {
BMLoop *l2 = l;
do {
if (!(l2->e->head.hflag & tag)) {
l2->e->head.hflag |= tag;
BLI_array_append(es, l2->e);
}
} while ((l2 = l2->next) != l);
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v)) != v->e);
}
for (int i = 0; i < BLI_array_len(es); i++) {
BMEdge *e2 = es[i];
if (!bm_elem_is_free((BMElem *)e2, BM_EDGE) &&
BM_ELEM_CD_GET_INT(e2, cd_lock) == thread_nr + 1) {
cdlayer_elem_unlock((BMElem *)e2, cd_lock, thread_nr);
}
}
BLI_array_free(es);
}
static void cdlayer_unlock_vert(BMVert *v, int cd_lock, int thread_nr)
{
BMEdge **es = NULL;
BLI_array_staticdeclare(es, 32);
if (!v->e) {
return;
}
const int tag = BM_ELEM_TAG_ALT;
BMEdge *e2 = v->e;
do {
BMLoop *l = e2->l;
if (!l) {
e2->head.hflag &= ~tag;
continue;
}
do {
BMLoop *l2 = l;
do {
l2->e->head.hflag &= ~tag;
} while ((l2 = l2->next) != l);
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v)) != v->e);
e2 = v->e;
do {
BMLoop *l = e2->l;
if (!l) {
e2->head.hflag |= tag;
BLI_array_append(es, e2);
continue;
}
do {
BMLoop *l2 = l;
do {
if (!(l2->e->head.hflag & tag)) {
l2->e->head.hflag |= tag;
BLI_array_append(es, l2->e);
}
} while ((l2 = l2->next) != l);
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v)) != v->e);
for (int i = 0; i < BLI_array_len(es); i++) {
BMEdge *e2 = es[i];
if (!bm_elem_is_free((BMElem *)e2, BM_EDGE) &&
BM_ELEM_CD_GET_INT(e2, cd_lock) == thread_nr + 1) {
cdlayer_elem_unlock((BMElem *)e2, cd_lock, thread_nr);
}
}
BLI_array_free(es);
}
#endif
static void fix_mesh(PBVH *pbvh, BMesh *bm)
{
@ -1085,10 +818,6 @@ static BMVert *pbvh_bmesh_vert_create(PBVH *pbvh,
BLI_assert((pbvh->totnode == 1 || node_index) && node_index <= pbvh->totnode);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(node->lock);
#endif
/* avoid initializing customdata because its quite involved */
BMVert *v = BM_vert_create(pbvh->bm, co, NULL, BM_CREATE_NOP);
MSculptVert *mv = BKE_PBVH_SCULPTVERT(pbvh->cd_sculpt_vert, v);
@ -1126,10 +855,6 @@ static BMVert *pbvh_bmesh_vert_create(PBVH *pbvh,
BM_log_vert_added(pbvh->bm_log, v, cd_vert_mask_offset);
v->head.index = pbvh->bm->totvert; // set provisional index
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_unlock(node->lock);
#endif
return v;
}
@ -1456,7 +1181,6 @@ static bool pbvh_bmesh_vert_relink(PBVH *pbvh, BMVert *v)
static void pbvh_bmesh_vert_remove(PBVH *pbvh, BMVert *v)
{
/* never match for first time */
int f_node_index_prev = DYNTOPO_NODE_NONE;
const int updateflag = PBVH_UpdateDrawBuffers | PBVH_UpdateBB | PBVH_UpdateTris |
@ -1465,10 +1189,6 @@ static void pbvh_bmesh_vert_remove(PBVH *pbvh, BMVert *v)
PBVHNode *v_node = pbvh_bmesh_node_from_vert(pbvh, v);
if (v_node) {
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(v_node->lock);
#endif
BLI_table_gset_remove(v_node->bm_unique_verts, v, NULL);
v_node->flag |= updateflag;
}
@ -1490,23 +1210,12 @@ static void pbvh_bmesh_vert_remove(PBVH *pbvh, BMVert *v)
f_node_index_prev = f_node_index;
PBVHNode *f_node = &pbvh->nodes[f_node_index];
// int flag = f_node->flag | updateflag;
// flag update of bm_other_verts
// atomic_add_and_fetch_int32((int32_t *)(&f_node->flag), flag);
f_node->flag |= updateflag;
f_node->flag |= updateflag; // flag update of bm_other_verts
BLI_assert(!BLI_table_gset_haskey(f_node->bm_unique_verts, v));
}
}
BM_FACES_OF_VERT_ITER_END;
#ifdef WITH_DYNTOPO_EDGE_LOCKS
if (v_node) {
BLI_ticket_mutex_unlock(v_node->lock);
}
#endif
}
static void pbvh_bmesh_face_remove(
@ -1515,15 +1224,11 @@ static void pbvh_bmesh_face_remove(
PBVHNode *f_node = pbvh_bmesh_node_from_face(pbvh, f);
if (!f_node || !(f_node->flag & PBVH_Leaf)) {
printf("%s: pbvh corruption; node: %p\n", __func__, f_node);
printf("pbvh corruption\n");
fflush(stdout);
return;
}
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(f_node->lock);
#endif
bm_logstack_push();
/* Check if any of this face's vertices need to be removed
@ -1543,15 +1248,7 @@ static void pbvh_bmesh_face_remove(
// BLI_assert(new_node || BM_vert_face_count_is_equal(v, 1));
if (new_node) {
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(new_node->lock);
#endif
pbvh_bmesh_vert_ownership_transfer(pbvh, new_node, v);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_unlock(new_node->lock);
#endif
}
else if (ensure_ownership_transfer && !BM_vert_face_count_is_equal(v, 1)) {
pbvh_bmesh_vert_remove(pbvh, v);
@ -1578,9 +1275,6 @@ static void pbvh_bmesh_face_remove(
PBVH_UpdateOtherVerts;
bm_logstack_pop();
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_unlock(f_node->lock);
#endif
}
void BKE_pbvh_bmesh_remove_face(PBVH *pbvh, BMFace *f, bool log_face)
@ -2055,7 +1749,6 @@ typedef struct EdgeQueueThreadData {
int size;
bool is_collapse;
int seed;
int cd_lock;
} EdgeQueueThreadData;
static void edge_thread_data_insert(EdgeQueueThreadData *tdata, BMEdge *e)
@ -3844,10 +3537,8 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
return NULL;
}
#ifndef WITH_DYNTOPO_EDGE_LOCKS
pbvh_check_vert_boundary(pbvh, v1);
pbvh_check_vert_boundary(pbvh, v2);
#endif
const int mupdateflag = SCULPTVERT_NEED_VALENCE | SCULPTVERT_NEED_BOUNDARY |
SCULPTVERT_NEED_DISK_SORT;
@ -3855,10 +3546,8 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
validate_edge(pbvh, pbvh->bm, e, true, true);
#ifndef WITH_DYNTOPO_EDGE_LOCKS
check_vert_fan_are_tris(pbvh, e->v1);
check_vert_fan_are_tris(pbvh, e->v2);
#endif
MSculptVert *mv1 = BKE_PBVH_SCULPTVERT(pbvh->cd_sculpt_vert, v1);
MSculptVert *mv2 = BKE_PBVH_SCULPTVERT(pbvh->cd_sculpt_vert, v2);
@ -4096,9 +3785,7 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
BMLoop *l = e2->l;
if (e2 != e && !(e2->head.hflag & tag)) {
#ifndef WITH_DYNTOPO_EDGE_LOCKS
BM_log_edge_topo_pre(pbvh->bm_log, e2);
#endif
}
e2->head.hflag |= tag;
@ -4110,9 +3797,7 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
do {
if (BM_ELEM_CD_GET_INT(l->f, pbvh->cd_face_node_offset) != DYNTOPO_NODE_NONE) {
pbvh_bmesh_face_remove(pbvh, l->f, false, false, false);
#ifndef WITH_DYNTOPO_EDGE_LOCKS
BM_log_face_topo_pre(pbvh->bm_log, l->f);
#endif
}
} while ((l = l->radial_next) != e2->l);
} while ((e2 = BM_DISK_EDGE_NEXT(e2, v_step)) != v_step->e);
@ -4120,10 +3805,8 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
pbvh_bmesh_vert_remove(pbvh, v_del);
#ifndef WITH_DYNTOPO_EDGE_LOCKS
BM_log_edge_topo_pre(pbvh->bm_log, e);
BM_log_vert_removed(pbvh->bm_log, v_del, pbvh->cd_vert_mask_offset);
#endif
BLI_ghash_insert(deleted_verts, (void *)v_del, NULL);
@ -4248,9 +3931,7 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
if (e2->head.hflag & tag) {
e2->head.hflag &= ~tag;
#ifndef WITH_DYNTOPO_EDGE_LOCKS
BM_log_edge_topo_post(pbvh->bm_log, e2);
#endif
}
BMLoop *lnext;
@ -4281,9 +3962,7 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
if (!fbad && BM_ELEM_CD_GET_INT(l->f, pbvh->cd_face_node_offset) == DYNTOPO_NODE_NONE) {
BKE_pbvh_bmesh_add_face(pbvh, l->f, false, false);
#ifndef WITH_DYNTOPO_EDGE_LOCKS
BM_log_face_topo_post(pbvh->bm_log, l->f);
#endif
}
if (!lnext) {
@ -4328,37 +4007,6 @@ static BMVert *pbvh_bmesh_collapse_edge(PBVH *pbvh,
return v_conn;
}
#ifdef WITH_DYNTOPO_EDGE_LOCKS
ATTR_NO_OPT static void pbvh_bmesh_collapse_short_edges_cb(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict tls)
{
EdgeQueueThreadData *tdata = ((EdgeQueueThreadData *)userdata) + n;
const int thread_nr = n;
const int cd_lock = tdata->cd_lock;
GHash *unused = BLI_ghash_ptr_new("unused");
BLI_buffer_declare_static(BMFace *, deleted_faces, BLI_BUFFER_NOP, 32);
for (int i = 0; i < tdata->totedge; i++) {
BMEdge *e = tdata->edges[i];
if (BM_elem_is_free((BMElem *)e, BM_EDGE) || !cdlayer_lock_edge(e, cd_lock, thread_nr)) {
continue;
}
BMVert *v_conn = pbvh_bmesh_collapse_edge(
tdata->pbvh, e, e->v1, e->v2, unused, &deleted_faces, tdata->eq_ctx);
if (v_conn) {
cdlayer_unlock_vert(v_conn, cd_lock, thread_nr);
}
}
BLI_ghash_free(unused, NULL, NULL);
BLI_buffer_free(&deleted_faces);
}
#endif
static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
PBVH *pbvh,
BLI_Buffer *deleted_faces,
@ -4387,28 +4035,6 @@ static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
BMVert **checkvs = NULL;
BLI_array_declare(checkvs);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
int cd_lock = cdlayer_lock_begin(pbvh, pbvh->bm);
const int totthread = 8;
EdgeQueueThreadData *tdata = MEM_callocN(sizeof(EdgeQueueThreadData) * totthread,
"EdgeQueueThreadData");
int totedge = max_steps / totthread + 1;
int curthread = 0;
if (totedge * totthread < max_steps) {
totedge += ((totedge * totthread) % max_steps) + 100;
}
for (int i = 0; i < totthread; i++) {
tdata[i].edges = MEM_mallocN(sizeof(void *) * totedge, "edge queue thread data edges");
tdata[i].pbvh = pbvh;
tdata[i].eq_ctx = eq_ctx;
tdata[i].cd_lock = cd_lock;
}
#endif
while (!BLI_heapsimple_is_empty(eq_ctx->q->heap)) {
if (step++ > max_steps) {
break;
@ -4459,14 +4085,9 @@ static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
continue;
}
#ifdef WITH_DYNTOPO_EDGE_LOCKS
tdata[curthread].edges[tdata[curthread].totedge++] = e;
curthread = (curthread + 1) % totthread;
#else
# ifdef USE_EDGEQUEUE_TAG
#ifdef USE_EDGEQUEUE_TAG
EDGE_QUEUE_DISABLE(e);
# endif
#endif
if (calc_weighted_edge_collapse(eq_ctx, v1, v2) >= limit_len_squared) {
continue;
@ -4491,28 +4112,13 @@ static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
BLI_array_append(checkvs, v_conn);
}
# ifdef TEST_COLLAPSE
#ifdef TEST_COLLAPSE
if (_i++ > 10) {
break;
}
# endif
#endif
}
#ifdef WITH_DYNTOPO_EDGE_LOCKS
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = true;
BLI_task_parallel_range(0, totthread, tdata, pbvh_bmesh_collapse_short_edges_cb, &settings);
for (int i = 0; i < totthread; i++) {
MEM_SAFE_FREE(tdata[i].edges);
}
MEM_SAFE_FREE(tdata);
#endif
// add log subentry
BM_log_entry_add_ex(pbvh->bm, pbvh->bm_log, true);
@ -6054,6 +5660,9 @@ static void pbvh_split_edges(EdgeQueueContext *eq_ctx,
#endif
}
extern char dyntopop_node_idx_layer_id[];
extern char dyntopop_faces_areas_layer_id[];
typedef struct DynTopoState {
PBVH *pbvh;
bool is_fake_pbvh;
@ -6078,9 +5687,6 @@ DynTopoState *BKE_dyntopo_init(BMesh *bm, PBVH *existing_pbvh)
node->flag = PBVH_Leaf | PBVH_UpdateTris | PBVH_UpdateTriAreas;
node->bm_faces = BLI_table_gset_new_ex("node->bm_faces", bm->totface);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
node->lock = BLI_ticket_mutex_alloc();
#endif
node->bm_unique_verts = BLI_table_gset_new_ex("node->bm_unique_verts", bm->totvert);
}
else {

View File

@ -405,10 +405,11 @@ static void lattice_deform_coords_impl(const Object *ob_lattice,
BLI_parallel_mempool_settings_defaults(&settings);
if (cd_dvert_offset != -1) {
BM_task_parallel_mempool(em_target->bm->vpool, &data, lattice_vert_task_editmesh, &settings);
BLI_task_parallel_mempool(
em_target->bm->vpool, &data, lattice_vert_task_editmesh, &settings);
}
else {
BM_task_parallel_mempool(
BLI_task_parallel_mempool(
em_target->bm->vpool, &data, lattice_vert_task_editmesh_no_dvert, &settings);
}
}

View File

@ -365,11 +365,6 @@ static void pbvh_bmesh_node_split(
c1->bm_other_verts = c2->bm_other_verts = NULL;
#ifdef WITH_DYNTOPO_EDGE_LOCKS
c1->lock = BLI_ticket_mutex_alloc();
c2->lock = BLI_ticket_mutex_alloc();
#endif
/* Partition the parent node's faces between the two children */
TGSET_ITER (f, n->bm_faces) {
const BBC *bbc = &bbc_array[BM_elem_index_get(f)];
@ -548,10 +543,6 @@ void bke_pbvh_insert_face_finalize(PBVH *pbvh, BMFace *f, const int ni)
return;
}
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(node->lock);
#endif
BLI_table_gset_add(node->bm_faces, f);
int updateflag = PBVH_UpdateTris | PBVH_UpdateBB | PBVH_UpdateDrawBuffers |
@ -578,9 +569,6 @@ void bke_pbvh_insert_face_finalize(PBVH *pbvh, BMFace *f, const int ni)
PBVHNode *node2 = pbvh->nodes + ni2;
if (ni != ni2) {
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_lock(node2->lock);
#endif
BLI_table_gset_add(node->bm_other_verts, l->v);
}
@ -588,19 +576,9 @@ void bke_pbvh_insert_face_finalize(PBVH *pbvh, BMFace *f, const int ni)
BB_expand(&node2->vb, l->v->co);
BB_expand(&node2->orig_vb, mv->origco);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
if (ni != ni2) {
BLI_ticket_mutex_unlock(node2->lock);
}
#endif
}
l = l->next;
} while (l != f->l_first);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BLI_ticket_mutex_unlock(node->lock);
#endif
}
void bke_pbvh_insert_face(PBVH *pbvh, struct BMFace *f)
@ -1409,10 +1387,6 @@ static void pbvh_bmesh_create_nodes_fast_recursive(
n->flag = PBVH_Leaf | PBVH_UpdateTris;
n->bm_faces = BLI_table_gset_new_ex("bm_faces", node->totface);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n->lock = BLI_ticket_mutex_alloc();
#endif
/* Create vert hash sets */
n->bm_unique_verts = BLI_table_gset_new("bm_unique_verts");
n->bm_other_verts = BLI_table_gset_new("bm_other_verts");
@ -2797,10 +2771,6 @@ static void BKE_pbvh_bmesh_correct_tree(PBVH *pbvh, PBVHNode *node, PBVHNode *pa
node->bm_other_verts = BLI_table_gset_new("bm_other_verts");
node->bm_faces = BLI_table_gset_new("bm_faces");
#ifdef WITH_DYNTOPO_EDGE_LOCKS
node->lock = BLI_ticket_mutex_alloc();
#endif
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + node->children_offset, node);
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + node->children_offset + 1, node);
@ -2885,10 +2855,6 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh)
n3->bm_other_verts = BLI_table_gset_new("bm_other_verts");
n3->bm_faces = BLI_table_gset_new("bm_faces");
n3->tribuf = NULL;
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n3->lock = BLI_ticket_mutex_alloc();
#endif
}
else if ((n1->flag & PBVH_Delete) && (n2->flag & PBVH_Delete)) {
n->children_offset = 0;
@ -2900,9 +2866,6 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh)
n->bm_other_verts = BLI_table_gset_new("bm_other_verts");
n->bm_faces = BLI_table_gset_new("bm_faces");
n->tribuf = NULL;
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n->lock = BLI_ticket_mutex_alloc();
#endif
}
}
}
@ -3018,9 +2981,6 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh)
n->bm_unique_verts = BLI_table_gset_new("bleh");
n->bm_other_verts = BLI_table_gset_new("bleh");
n->bm_faces = BLI_table_gset_new("bleh");
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n->lock = BLI_ticket_mutex_alloc();
#endif
}
BMVert *v;
@ -3297,9 +3257,6 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh)
n3->bm_unique_verts = BLI_table_gset_new("bm_unique_verts");
n3->bm_other_verts = BLI_table_gset_new("bm_other_verts");
n3->bm_faces = BLI_table_gset_new("bm_faces");
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n3->lock = BLI_ticket_mutex_alloc();
#endif
n3->tribuf = NULL;
}
else if ((n1->flag & PBVH_Delete) && (n2->flag & PBVH_Delete)) {
@ -3311,9 +3268,6 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh)
n->bm_unique_verts = BLI_table_gset_new("bm_unique_verts");
n->bm_other_verts = BLI_table_gset_new("bm_other_verts");
n->bm_faces = BLI_table_gset_new("bm_faces");
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n->lock = BLI_ticket_mutex_alloc();
#endif
n->tribuf = NULL;
}
}
@ -3430,9 +3384,6 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh)
n->bm_unique_verts = BLI_table_gset_new("bleh");
n->bm_other_verts = BLI_table_gset_new("bleh");
n->bm_faces = BLI_table_gset_new("bleh");
#ifdef WITH_DYNTOPO_EDGE_LOCKS
n->lock = BLI_ticket_mutex_alloc();
#endif
}
BMVert *v;
@ -3614,18 +3565,18 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge)
for (int i = 0; i < totedge; i++) {
BMEdge *e = edges[i];
BMVert *v2 = BM_mempool_alloc(bm->vpool);
BMVert *v2 = BLI_mempool_alloc(bm->vpool);
memset(v2, 0, sizeof(*v2));
v2->head.data = (BLI_mempool *)BM_mempool_alloc((BM_mempool *)bm->vdata.pool);
v2->head.data = BLI_mempool_alloc(bm->vdata.pool);
BLI_array_append(newverts, v2);
BMEdge *e2 = BM_mempool_alloc(bm->epool);
BMEdge *e2 = BLI_mempool_alloc(bm->epool);
BLI_array_append(newedges, e2);
memset(e2, 0, sizeof(*e2));
if (bm->edata.pool) {
e2->head.data = (BLI_mempool *)BM_mempool_alloc((BM_mempool *)bm->edata.pool);
e2->head.data = BLI_mempool_alloc(bm->edata.pool);
}
BMLoop *l = e->l;
@ -3636,7 +3587,7 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge)
do {
BLI_array_append(faces, l->f);
BMFace *f2 = BM_mempool_alloc(bm->fpool);
BMFace *f2 = BLI_mempool_alloc(bm->fpool);
BLI_array_append(faces, l->f);
BLI_array_append(fmap, v2);
@ -3647,15 +3598,15 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge)
BLI_array_append(emap, i);
memset(f2, 0, sizeof(*f2));
f2->head.data = (BLI_mempool *)BM_mempool_alloc((BM_mempool *)bm->ldata.pool);
f2->head.data = BLI_mempool_alloc(bm->ldata.pool);
BMLoop *prev = NULL;
BMLoop *l2 = NULL;
for (int j = 0; j < 3; j++) {
l2 = BM_mempool_alloc(bm->lpool);
l2 = BLI_mempool_alloc(bm->lpool);
memset(l2, 0, sizeof(*l2));
l2->head.data = (BLI_mempool *)BM_mempool_alloc((BM_mempool *)bm->ldata.pool);
l2->head.data = BLI_mempool_alloc(bm->ldata.pool);
l2->prev = prev;
@ -3878,13 +3829,13 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh)
}
}
BM_mempool_iter loopiter;
BM_mempool_iternew((BM_mempool *)pbvh->bm->lpool, &loopiter);
BMLoop *l = BM_mempool_iterstep(&loopiter);
BLI_mempool_iter loopiter;
BLI_mempool_iternew(pbvh->bm->lpool, &loopiter);
BMLoop *l = BLI_mempool_iterstep(&loopiter);
BMEdge *e;
BMFace *f;
for (i = 0; l; l = BM_mempool_iterstep(&loopiter), i++) {
for (i = 0; l; l = BLI_mempool_iterstep(&loopiter), i++) {
l->head.hflag &= ~flag;
}
BM_ITER_MESH (e, &iter, pbvh->bm, BM_EDGES_OF_MESH) {
@ -4011,10 +3962,10 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh)
fidx[i] = (uint)f->head.index;
}
BM_mempool_iternew(pbvh->bm->lpool, &loopiter);
l = BM_mempool_iterstep(&loopiter);
BLI_mempool_iternew(pbvh->bm->lpool, &loopiter);
l = BLI_mempool_iterstep(&loopiter);
for (i = 0; l; l = BM_mempool_iterstep(&loopiter), i++) {
for (i = 0; l; l = BLI_mempool_iterstep(&loopiter), i++) {
// handle orphaned loops
if (!(l->head.hflag & flag)) {
printf("warning in %s: orphaned loop!\n", __func__);
@ -5034,15 +4985,15 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out)
.no_reuse_ids = false}));
// reinit pools
BM_mempool_destroy(bm->vpool);
BM_mempool_destroy(bm->epool);
BM_mempool_destroy(bm->lpool);
BM_mempool_destroy(bm->fpool);
BLI_mempool_destroy(bm->vpool);
BLI_mempool_destroy(bm->epool);
BLI_mempool_destroy(bm->lpool);
BLI_mempool_destroy(bm->fpool);
bm->vpool = BM_mempool_create(sizeof(BMVert), 0, (int)params->vchunk, BLI_MEMPOOL_ALLOW_ITER);
bm->epool = BM_mempool_create(sizeof(BMEdge), 0, (int)params->echunk, BLI_MEMPOOL_ALLOW_ITER);
bm->lpool = BM_mempool_create(sizeof(BMLoop), 0, (int)params->lchunk, BLI_MEMPOOL_ALLOW_ITER);
bm->fpool = BM_mempool_create(sizeof(BMFace), 0, (int)params->pchunk, BLI_MEMPOOL_ALLOW_ITER);
bm->vpool = BLI_mempool_create(sizeof(BMVert), 0, (int)params->vchunk, BLI_MEMPOOL_ALLOW_ITER);
bm->epool = BLI_mempool_create(sizeof(BMEdge), 0, (int)params->echunk, BLI_MEMPOOL_ALLOW_ITER);
bm->lpool = BLI_mempool_create(sizeof(BMLoop), 0, (int)params->lchunk, BLI_MEMPOOL_ALLOW_ITER);
bm->fpool = BLI_mempool_create(sizeof(BMFace), 0, (int)params->pchunk, BLI_MEMPOOL_ALLOW_ITER);
GHash *vhash = BLI_ghash_ptr_new("vhash");

View File

@ -38,18 +38,6 @@ typedef struct {
float bmin[3], bmax[3], bcentroid[3];
} BBC;
//#define WITH_DYNTOPO_EDGE_LOCKS
#ifdef WITH_DYNTOPO_EDGE_LOCKS
# ifndef BM_LOCKFREE_MEMPOOL
# error \
"Cannot have WITH_DYNTOPO_EDGE_LOCKS without BM_LOCKFREE_MEMPOOL (set it in bmesh_class.h)"
# endif
#endif
#ifdef WITH_DYNTOPO_EDGE_LOCKS
# include "BLI_threads.h"
#endif
/* NOTE: this structure is getting large, might want to split it into
* union'd structs */
struct PBVHNode {
@ -111,7 +99,7 @@ struct PBVHNode {
/* Indicates whether this node is a leaf or not; also used for
* marking various updates that need to be applied. */
PBVHNodeFlags flag;
PBVHNodeFlags flag : 32;
/* Used for raycasting: how close bb is to the ray point. */
float tmin;
@ -138,10 +126,6 @@ struct PBVHNode {
#ifdef PROXY_ADVANCED
ProxyVertArray proxyverts;
#endif
#ifdef WITH_DYNTOPO_EDGE_LOCKS
TicketMutex *lock;
#endif
};
typedef enum {

View File

@ -34,7 +34,6 @@ extern "C" {
#endif
struct BLI_mempool;
struct BLI_lfmempool;
/* Task Scheduler
*
@ -237,10 +236,6 @@ void BLI_task_parallel_mempool(struct BLI_mempool *mempool,
void *userdata,
TaskParallelMempoolFunc func,
const TaskParallelSettings *settings);
void BLI_task_parallel_lfmempool(struct BLI_lfmempool *mempool,
void *userdata,
TaskParallelMempoolFunc func,
const TaskParallelSettings *settings);
/* TODO(sergey): Think of a better place for this. */
BLI_INLINE void BLI_parallel_range_settings_defaults(TaskParallelSettings *settings)

View File

@ -90,9 +90,9 @@ set(SRC
intern/kdtree_2d.c
intern/kdtree_3d.c
intern/kdtree_4d.c
intern/lockfree_mempool.cc
intern/lasso_2d.c
intern/listbase.c
intern/lockfree_mempool.cc
intern/math_base.c
intern/math_base_inline.c
intern/math_base_safe_inline.c
@ -263,6 +263,7 @@ set(SRC
BLI_memory_utils.h
BLI_memory_utils.hh
BLI_mempool.h
BLI_mempool_lockfree.h
BLI_mesh_boolean.hh
BLI_mesh_intersect.hh
BLI_mmap.h

View File

@ -29,7 +29,6 @@
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_mempool.h"
#include "BLI_mempool_lockfree.h"
#include "BLI_mempool_private.h"
#include "BLI_task.h"
#include "BLI_threads.h"
@ -389,18 +388,6 @@ static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata)
}
}
static void parallel_lfmempool_func(TaskPool *__restrict pool, void *taskdata)
{
ParallelMempoolState *__restrict state = BLI_task_pool_user_data(pool);
BLI_lfmempool_iter *iter = &((ParallelLFMempoolTaskData *)taskdata)->ts_iter;
TaskParallelTLS *tls = &((ParallelLFMempoolTaskData *)taskdata)->tls;
MempoolIterData *item;
while ((item = BLI_lfmempool_iterstep(iter)) != NULL) {
state->func(state->userdata, item, tls);
}
}
/**
* This function allows to parallelize for loops over Mempool items.
*
@ -508,109 +495,6 @@ void BLI_task_parallel_mempool(BLI_mempool *mempool,
mempool_iter_threadsafe_destroy(mempool_iterator_data);
}
/**
* This function allows to parallelize for loops over Mempool items.
*
* \param mempool: The iterable BLI_mempool to loop over.
* \param userdata: Common userdata passed to all instances of \a func.
* \param func: Callback function.
* \param settings: See public API doc of TaskParallelSettings for description of all settings.
*
* \note There is no static scheduling here.
*/
void BLI_task_parallel_lfmempool(BLI_lfmempool *mempool,
void *userdata,
TaskParallelMempoolFunc func,
const TaskParallelSettings *settings)
{
void *userdata_chunk = settings->userdata_chunk;
const size_t userdata_chunk_size = settings->userdata_chunk_size;
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
if (!settings->use_threading) {
TaskParallelTLS tls = {NULL};
if (use_userdata_chunk) {
if (settings->func_init != NULL) {
settings->func_init(userdata, userdata_chunk);
}
tls.userdata_chunk = userdata_chunk;
}
BLI_lfmempool_iter iter;
BLI_lfmempool_iternew(mempool, &iter);
void *item;
while ((item = BLI_lfmempool_iterstep(&iter))) {
func(userdata, item, &tls);
}
if (use_userdata_chunk) {
if (settings->func_free != NULL) {
/* `func_free` should only free data that was created during execution of `func`. */
settings->func_free(userdata, userdata_chunk);
}
}
return;
}
ParallelMempoolState state;
TaskPool *task_pool = BLI_task_pool_create(&state, TASK_PRIORITY_HIGH);
const int num_threads = BLI_task_scheduler_num_threads();
/* The idea here is to prevent creating task for each of the loop iterations
* and instead have tasks which are evenly distributed across CPU cores and
* pull next item to be crunched using the threaded-aware BLI_mempool_iter.
*/
const int num_tasks = num_threads + 2;
state.userdata = userdata;
state.func = func;
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
}
ParallelLFMempoolTaskData *mempool_iterator_data = lfmempool_iter_threadsafe_create(
mempool, (size_t)num_tasks);
for (int i = 0; i < num_tasks; i++) {
void *userdata_chunk_local = NULL;
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
if (settings->func_init != NULL) {
settings->func_init(userdata, userdata_chunk_local);
}
}
mempool_iterator_data[i].tls.userdata_chunk = userdata_chunk_local;
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push(task_pool, parallel_lfmempool_func, &mempool_iterator_data[i], false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
if (use_userdata_chunk) {
if ((settings->func_free != NULL) || (settings->func_reduce != NULL)) {
for (int i = 0; i < num_tasks; i++) {
if (settings->func_reduce) {
settings->func_reduce(
userdata, userdata_chunk, mempool_iterator_data[i].tls.userdata_chunk);
}
if (settings->func_free) {
settings->func_free(userdata, mempool_iterator_data[i].tls.userdata_chunk);
}
}
}
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
}
lfmempool_iter_threadsafe_destroy(mempool_iterator_data);
}
#undef MALLOCA
#undef MALLOCA_FREE

View File

@ -32,7 +32,6 @@
* Usage: msgfmt input.po output.po
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

View File

@ -34,38 +34,6 @@
* these are ifdef'd because they use more memory and can't be saved in DNA currently */
// #define USE_BMESH_HOLES
//#define BM_LOCKFREE_MEMPOOL
#ifdef BM_LOCKFREE_MEMPOOL
# include "BLI_mempool_lockfree.h"
# include "BLI_threads.h"
# define BM_task_parallel_mempool BLI_task_parallel_lfmempool
# define BM_mempool BLI_lfmempool
# define BM_mempool_create(esize, totelem, pchunk, flag) BLI_lfmempool_create(esize, pchunk)
# define BM_mempool_alloc(pool) BLI_lfmempool_alloc(pool)
# define BM_mempool_calloc(pool) BLI_lfmempool_calloc(pool)
# define BM_mempool_destroy(pool) BLI_lfmempool_destroy((BLI_lfmempool *)pool)
# define BM_mempool_iter BLI_lfmempool_iter
# define BM_mempool_iternew(pool, iter) BLI_lfmempool_iternew(pool, iter)
# define BM_mempool_iterstep(iter) BLI_lfmempool_iterstep(iter)
# define BM_mempool_free(pool, elem) BLI_lfmempool_free(pool, elem)
# define BM_mempool_findelem(pool, elem) BLI_lfmempool_findelem(pool, elem)
#else
# define BM_task_parallel_mempool BLI_task_parallel_mempool
# define BM_mempool BLI_mempool
# define BM_mempool_create(esize, totelem, pchunk, flag) \
BLI_mempool_create(esize, totelem, pchunk, flag)
# define BM_mempool_alloc(pool) BLI_mempool_alloc(pool)
# define BM_mempool_calloc(pool) BLI_mempool_calloc(pool)
# define BM_mempool_destroy(pool) BLI_mempool_destroy(pool)
# define BM_mempool_iter BLI_mempool_iter
# define BM_mempool_iternew(pool, iter) BLI_mempool_iternew(pool, iter)
# define BM_mempool_iterstep(iter) BLI_mempool_iterstep(iter)
# define BM_mempool_free(pool, elem) BLI_mempool_free(pool, elem)
# define BM_mempool_findelem(pool, elem) BLI_mempool_findelem(pool, elem)
#endif
struct BMEdge;
struct BMFace;
struct BMLoop;
@ -344,7 +312,7 @@ typedef struct BMesh {
char elem_table_dirty;
/* element pools */
struct BM_mempool *vpool, *epool, *lpool, *fpool;
struct BLI_mempool *vpool, *epool, *lpool, *fpool;
/* mempool lookup tables (optional)
* index tables, to map indices to elements via
@ -361,7 +329,7 @@ typedef struct BMesh {
int ftable_tot;
/* operator api stuff (must be all NULL or all alloc'd) */
struct BM_mempool *vtoolflagpool, *etoolflagpool, *ftoolflagpool;
struct BLI_mempool *vtoolflagpool, *etoolflagpool, *ftoolflagpool;
uint use_toolflags : 1;
@ -427,13 +395,7 @@ typedef struct BMesh {
struct GHash *ghash; // used if BM_NO_REUSE_IDS is true
int map_size;
int cd_id_off[15];
#ifdef BM_LOCKFREE_MEMPOOL
// XXX locks!
TicketMutex *lock;
#endif
} idmap;
} BMesh;
enum {

View File

@ -43,14 +43,6 @@
#define SELECT 1
#ifdef BM_LOCKFREE_MEMPOOL
# define BM_ID_LOCK(bm) BLI_ticket_mutex_lock(bm->idmap.lock)
# define BM_ID_UNLOCK(bm) BLI_ticket_mutex_unlock(bm->idmap.lock)
#else
# define BM_ID_LOCK(bm)
# define BM_ID_UNLOCK(bm)
#endif
#ifdef WITH_BM_ID_FREELIST
static uint bm_id_freelist_pop(BMesh *bm)
{
@ -173,8 +165,6 @@ void bm_assign_id_intern(BMesh *bm, BMElem *elem, uint id)
void bm_assign_id(BMesh *bm, BMElem *elem, uint id, bool check_unqiue)
{
BM_ID_LOCK(bm);
if (check_unqiue && (bm->idmap.flag & BM_HAS_ID_MAP)) {
if (BM_ELEM_FROM_ID(bm, id)) {
@ -188,8 +178,6 @@ void bm_assign_id(BMesh *bm, BMElem *elem, uint id, bool check_unqiue)
range_tree_uint_retake(bm->idmap.idtree, id);
#endif
bm_assign_id_intern(bm, elem, id);
BM_ID_UNLOCK(bm);
}
void bm_alloc_id(BMesh *bm, BMElem *elem)
@ -198,8 +186,6 @@ void bm_alloc_id(BMesh *bm, BMElem *elem)
return;
}
BM_ID_LOCK(bm);
#ifdef WITH_BM_ID_FREELIST
uint id;
@ -214,7 +200,6 @@ void bm_alloc_id(BMesh *bm, BMElem *elem)
#endif
bm_assign_id_intern(bm, elem, id);
BM_ID_UNLOCK(bm);
}
void bm_free_id(BMesh *bm, BMElem *elem)
@ -223,8 +208,6 @@ void bm_free_id(BMesh *bm, BMElem *elem)
return;
}
BM_ID_LOCK(bm);
uint id = (uint)BM_ELEM_CD_GET_INT(elem, bm->idmap.cd_id_off[elem->head.htype]);
#ifndef WITH_BM_ID_FREELIST
@ -244,8 +227,6 @@ void bm_free_id(BMesh *bm, BMElem *elem)
BLI_ghash_remove(bm->idmap.ghash, POINTER_FROM_UINT(id), NULL, NULL);
}
}
BM_ID_UNLOCK(bm);
}
/**
@ -1307,11 +1288,11 @@ void bm_rebuild_idmap(BMesh *bm)
cd_off = CustomData_get_offset(cdatas[i], CD_MESH_ID);
if (bm->idmap.flag & BM_NO_REUSE_IDS) {
BM_mempool_iter iter;
BLI_mempool_iter iter;
BM_mempool_iternew((&bm->vpool)[i], &iter);
BMElem *elem = (BMElem *)BM_mempool_iterstep(&iter);
for (; elem; elem = (BMElem *)BM_mempool_iterstep(&iter)) {
BLI_mempool_iternew((&bm->vpool)[i], &iter);
BMElem *elem = (BMElem *)BLI_mempool_iterstep(&iter);
for (; elem; elem = (BMElem *)BLI_mempool_iterstep(&iter)) {
void **val;
if (!BLI_ghash_ensure_p(bm->idmap.ghash, (void *)elem, &val)) {
@ -1320,11 +1301,11 @@ void bm_rebuild_idmap(BMesh *bm)
}
}
else {
BM_mempool_iter iter;
BLI_mempool_iter iter;
BM_mempool_iternew((&bm->vpool)[i], &iter);
BMElem *elem = (BMElem *)BM_mempool_iterstep(&iter);
for (; elem; elem = (BMElem *)BM_mempool_iterstep(&iter)) {
BLI_mempool_iternew((&bm->vpool)[i], &iter);
BMElem *elem = (BMElem *)BLI_mempool_iterstep(&iter);
for (; elem; elem = (BMElem *)BLI_mempool_iterstep(&iter)) {
void **val;
int id = BM_ELEM_CD_GET_INT(elem, cd_off);

View File

@ -61,7 +61,7 @@ void bm_elem_check_toolflags(BMesh *bm, BMElem *elem)
{
int cd_off = -1;
MToolFlags *flags;
BM_mempool *flagpool;
BLI_mempool *flagpool;
switch (elem->head.htype) {
case BM_VERT:
@ -87,7 +87,7 @@ void bm_elem_check_toolflags(BMesh *bm, BMElem *elem)
flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(elem, cd_off);
if (!flags->flag) {
flags->flag = BM_mempool_calloc(flagpool);
flags->flag = BLI_mempool_calloc(flagpool);
}
}
@ -99,7 +99,7 @@ BMVert *BM_vert_create(BMesh *bm,
const BMVert *v_example,
const eBMCreateFlag create_flag)
{
BMVert *v = BM_mempool_alloc(bm->vpool);
BMVert *v = BLI_mempool_alloc(bm->vpool);
BLI_assert((v_example == NULL) || (v_example->head.htype == BM_VERT));
BLI_assert(!(create_flag & 1));
@ -174,7 +174,7 @@ BMVert *BM_vert_create(BMesh *bm,
if (bm->use_toolflags && v->head.data) {
int cd_tflags = bm->vdata.layers[bm->vdata.typemap[CD_TOOLFLAGS]].offset;
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(v, cd_tflags);
flags->flag = BM_mempool_calloc(bm->vtoolflagpool);
flags->flag = BLI_mempool_calloc(bm->vtoolflagpool);
}
BM_CHECK_ELEMENT(v);
@ -203,7 +203,7 @@ BMEdge *BM_edge_create(
return e;
}
e = BM_mempool_alloc(bm->epool);
e = BLI_mempool_alloc(bm->epool);
/* --- assign all members --- */
e->head.data = NULL;
@ -252,7 +252,7 @@ BMEdge *BM_edge_create(
if (bm->use_toolflags && e->head.data) {
int cd_tflags = bm->edata.layers[bm->edata.typemap[CD_TOOLFLAGS]].offset;
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(e, cd_tflags);
flags->flag = BM_mempool_calloc(bm->etoolflagpool);
flags->flag = BLI_mempool_calloc(bm->etoolflagpool);
}
BM_CHECK_ELEMENT(e);
@ -274,7 +274,7 @@ static BMLoop *bm_loop_create(BMesh *bm,
{
BMLoop *l = NULL;
l = BM_mempool_alloc(bm->lpool);
l = BLI_mempool_alloc(bm->lpool);
BLI_assert((l_example == NULL) || (l_example->head.htype == BM_LOOP));
BLI_assert(!(create_flag & 1));
@ -339,7 +339,7 @@ static BMLoop *bm_face_boundary_add(
BMesh *bm, BMFace *f, BMVert *startv, BMEdge *starte, const eBMCreateFlag create_flag)
{
#ifdef USE_BMESH_HOLES
BMLoopList *lst = BM_mempool_calloc(bm->looplistpool);
BMLoopList *lst = BLI_mempool_calloc(bm->looplistpool);
#endif
BMLoop *l = bm_loop_create(bm, startv, starte, f, NULL /* starte->l */, create_flag);
@ -432,7 +432,7 @@ BLI_INLINE BMFace *bm_face_create__internal(BMesh *bm)
{
BMFace *f;
f = BM_mempool_alloc(bm->fpool);
f = BLI_mempool_alloc(bm->fpool);
/* --- assign all members --- */
f->head.data = NULL;
@ -552,7 +552,7 @@ BMFace *BM_face_create(BMesh *bm,
if (bm->use_toolflags && f->head.data) {
int cd_tflags = bm->pdata.layers[bm->pdata.typemap[CD_TOOLFLAGS]].offset;
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(f, cd_tflags);
flags->flag = BM_mempool_calloc(bm->ftoolflagpool);
flags->flag = BLI_mempool_calloc(bm->ftoolflagpool);
}
BM_CHECK_ELEMENT(f);
@ -833,7 +833,7 @@ static void bm_kill_only_vert(BMesh *bm, BMVert *v)
MToolFlags *flags = BM_ELEM_CD_GET_VOID_P(
v, bm->vdata.layers[bm->vdata.typemap[CD_TOOLFLAGS]].offset);
BM_mempool_free(bm->vtoolflagpool, flags->flag);
BLI_mempool_free(bm->vtoolflagpool, flags->flag);
flags->flag = NULL;
if (bleh) {
@ -845,7 +845,7 @@ static void bm_kill_only_vert(BMesh *bm, BMVert *v)
CustomData_bmesh_free_block(&bm->vdata, &v->head.data);
}
BM_mempool_free(bm->vpool, v);
BLI_mempool_free(bm->vpool, v);
}
#ifdef WITH_BM_ID_FREELIST
@ -943,14 +943,14 @@ void bm_kill_only_edge(BMesh *bm, BMEdge *e)
MToolFlags *flags = BM_ELEM_CD_GET_VOID_P(
e, bm->edata.layers[bm->edata.typemap[CD_TOOLFLAGS]].offset);
BM_mempool_free(bm->etoolflagpool, flags->flag);
BLI_mempool_free(bm->etoolflagpool, flags->flag);
}
if (e->head.data) {
CustomData_bmesh_free_block(&bm->edata, &e->head.data);
}
BM_mempool_free(bm->epool, e);
BLI_mempool_free(bm->epool, e);
}
/**
@ -976,14 +976,14 @@ void bm_kill_only_face(BMesh *bm, BMFace *f)
MToolFlags *flags = BM_ELEM_CD_GET_VOID_P(
f, bm->pdata.layers[bm->pdata.typemap[CD_TOOLFLAGS]].offset);
BM_mempool_free(bm->ftoolflagpool, flags->flag);
BLI_mempool_free(bm->ftoolflagpool, flags->flag);
}
if (f->head.data) {
CustomData_bmesh_free_block(&bm->pdata, &f->head.data);
}
BM_mempool_free(bm->fpool, f);
BLI_mempool_free(bm->fpool, f);
}
/**
@ -1002,7 +1002,7 @@ void bm_kill_only_loop(BMesh *bm, BMLoop *l)
CustomData_bmesh_free_block(&bm->ldata, &l->head.data);
}
BM_mempool_free(bm->lpool, l);
BLI_mempool_free(bm->lpool, l);
}
/**
@ -1087,7 +1087,7 @@ void BM_face_kill(BMesh *bm, BMFace *f)
} while ((l_iter = l_next) != l_first);
#ifdef USE_BMESH_HOLES
BM_mempool_free(bm->looplistpool, ls);
BLI_mempool_free(bm->looplistpool, ls);
#endif
}
@ -1146,7 +1146,7 @@ void BM_face_kill_loose(BMesh *bm, BMFace *f)
} while ((l_iter = l_next) != l_first);
#ifdef USE_BMESH_HOLES
BM_mempool_free(bm->looplistpool, ls);
BLI_mempool_free(bm->looplistpool, ls);
#endif
}
@ -1581,7 +1581,7 @@ static BMFace *bm_face_create__sfme(BMesh *bm, BMFace *f_example)
f = bm_face_create__internal(bm);
#ifdef USE_BMESH_HOLES
lst = BM_mempool_calloc(bm->looplistpool);
lst = BLI_mempool_calloc(bm->looplistpool);
BLI_addtail(&f->loops, lst);
#endif
@ -1595,7 +1595,7 @@ static BMFace *bm_face_create__sfme(BMesh *bm, BMFace *f_example)
if (bm->use_toolflags && f->head.data) {
int cd_tflags = bm->pdata.layers[bm->pdata.typemap[CD_TOOLFLAGS]].offset;
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(f, cd_tflags);
flags->flag = BM_mempool_calloc(bm->ftoolflagpool);
flags->flag = BLI_mempool_calloc(bm->ftoolflagpool);
}
return f;
@ -1766,7 +1766,7 @@ BMFace *bmesh_kernel_split_face_make_edge(BMesh *bm,
// printf("warning: call to split face euler without holes argument; holes will be tossed.\n");
for (lst = f->loops.last; lst != f->loops.first; lst = lst2) {
lst2 = lst->prev;
BM_mempool_free(bm->looplistpool, lst);
BLI_mempool_free(bm->looplistpool, lst);
}
}
#endif
@ -2512,8 +2512,7 @@ static void trigger_jvke_error(int err, char *obj_text)
printf("========= ERROR %s============\n\n%s\n\n", get_err_str(err), obj_text);
}
//#define JVKE_DEBUG
#ifdef JVKE_DEBUG
#if 0
# define JVKE_CHECK_ELEMENT(elem) \
{ \
int err = 0; \
@ -2530,11 +2529,9 @@ BMVert *bmesh_kernel_join_vert_kill_edge(
{
BMVert *v_conn = BM_edge_other_vert(e, v_kill);
#ifdef JVKE_DEBUG
char buf[LOCAL_OBJ_SIZE];
char *saved_obj = bm_save_local_obj_text(bm, 2, buf, "e", e);
bm_local_obj_free(saved_obj, buf);
#endif
BMFace **fs = NULL;
BMEdge **deles = NULL;
@ -3015,23 +3012,23 @@ BMFace *bmesh_kernel_join_face_kill_edge(BMesh *bm, BMFace *f1, BMFace *f2, BMEd
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(
l_f1->e, bm->edata.layers[bm->edata.typemap[CD_TOOLFLAGS]].offset);
BM_mempool_free(bm->etoolflagpool, flags->flag);
BLI_mempool_free(bm->etoolflagpool, flags->flag);
}
BM_mempool_free(bm->epool, l_f1->e);
BLI_mempool_free(bm->epool, l_f1->e);
bm->totedge--;
BM_mempool_free(bm->lpool, l_f1);
BLI_mempool_free(bm->lpool, l_f1);
bm->totloop--;
BM_mempool_free(bm->lpool, l_f2);
BLI_mempool_free(bm->lpool, l_f2);
bm->totloop--;
if (bm->ftoolflagpool) {
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(
f2, bm->pdata.layers[bm->pdata.typemap[CD_TOOLFLAGS]].offset);
BM_mempool_free(bm->ftoolflagpool, flags->flag);
BLI_mempool_free(bm->ftoolflagpool, flags->flag);
}
BM_mempool_free(bm->fpool, f2);
BLI_mempool_free(bm->fpool, f2);
bm->totface--;
/* account for both above */
bm->elem_index_dirty |= BM_EDGE | BM_LOOP | BM_FACE;

View File

@ -1292,7 +1292,7 @@ void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
{
BMIter iter;
BM_mempool *oldpool = (BM_mempool *)olddata->pool;
BLI_mempool *oldpool = olddata->pool;
void *block;
CustomDataLayer **nocopy_layers = NULL;
@ -1376,7 +1376,7 @@ static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
/* this should never happen but can when dissolve fails - T28960. */
BLI_assert(data->pool != oldpool);
BM_mempool_destroy((BM_mempool *)oldpool);
BLI_mempool_destroy(oldpool);
}
}

View File

@ -421,17 +421,17 @@ int BM_iter_mesh_count_flag(const char itype, BMesh *bm, const char hflag, const
void bmiter__elem_of_mesh_begin(struct BMIter__elem_of_mesh *iter)
{
#ifdef USE_IMMUTABLE_ASSERT
((BMIter *)iter)->count = BM_mempool_len(iter->pooliter.pool);
((BMIter *)iter)->count = BLI_mempool_len(iter->pooliter.pool);
#endif
BM_mempool_iternew(iter->pooliter.pool, &iter->pooliter);
BLI_mempool_iternew(iter->pooliter.pool, &iter->pooliter);
}
void *bmiter__elem_of_mesh_step(struct BMIter__elem_of_mesh *iter)
{
#ifdef USE_IMMUTABLE_ASSERT
BLI_assert(((BMIter *)iter)->count <= BM_mempool_len(iter->pooliter.pool));
BLI_assert(((BMIter *)iter)->count <= BLI_mempool_len(iter->pooliter.pool));
#endif
return BM_mempool_iterstep(&iter->pooliter);
return BLI_mempool_iterstep(&iter->pooliter);
}
#ifdef USE_IMMUTABLE_ASSERT

View File

@ -110,7 +110,7 @@ extern const char bm_iter_itype_htype_map[BM_ITYPE_MAX];
/* iterator type structs */
struct BMIter__elem_of_mesh {
BM_mempool_iter pooliter;
BLI_mempool_iter pooliter;
};
struct BMIter__edge_of_vert {
BMVert *vdata;

View File

@ -192,13 +192,13 @@ BLI_INLINE void BM_iter_parallel(BMesh *bm,
/* inlining optimizes out this switch when called with the defined type */
switch ((BMIterType)itype) {
case BM_VERTS_OF_MESH:
BM_task_parallel_mempool(bm->vpool, userdata, func, settings);
BLI_task_parallel_mempool(bm->vpool, userdata, func, settings);
break;
case BM_EDGES_OF_MESH:
BM_task_parallel_mempool(bm->epool, userdata, func, settings);
BLI_task_parallel_mempool(bm->epool, userdata, func, settings);
break;
case BM_FACES_OF_MESH:
BM_task_parallel_mempool(bm->fpool, userdata, func, settings);
BLI_task_parallel_mempool(bm->fpool, userdata, func, settings);
break;
default:
/* should never happen */

View File

@ -689,7 +689,7 @@ static void bm_log_vert_customdata(
if (lv->customdata) {
CustomData_bmesh_asan_unpoison(&entry->vdata, lv->customdata);
BM_mempool_free((BM_mempool *)entry->vdata.pool, lv->customdata);
BLI_mempool_free(entry->vdata.pool, lv->customdata);
lv->customdata = NULL;
}
@ -706,7 +706,7 @@ static void bm_log_edge_customdata(
{
if (le->customdata) {
CustomData_bmesh_asan_unpoison(&entry->edata, le->customdata);
BM_mempool_free((BM_mempool *)entry->edata.pool, le->customdata);
BLI_mempool_free(entry->edata.pool, le->customdata);
le->customdata = NULL;
}
@ -724,7 +724,7 @@ static void bm_log_face_customdata(BMesh *bm, BMLog *log, BMFace *f, BMLogFace *
if (lf->customdata_f) {
CustomData_bmesh_asan_unpoison(&entry->pdata, lf->customdata_f);
BM_mempool_free((BM_mempool *)entry->pdata.pool, lf->customdata_f);
BLI_mempool_free(entry->pdata.pool, lf->customdata_f);
lf->customdata_f = NULL;
}
@ -738,7 +738,7 @@ static void bm_log_face_customdata(BMesh *bm, BMLog *log, BMFace *f, BMLogFace *
do {
if (lf->customdata[i]) {
CustomData_bmesh_asan_unpoison(&entry->ldata, lf->customdata[i]);
BM_mempool_free((BM_mempool *)entry->ldata.pool, lf->customdata[i]);
BLI_mempool_free(entry->ldata.pool, lf->customdata[i]);
lf->customdata[i] = NULL;
}
@ -910,14 +910,14 @@ static void bm_log_face_bmface_copy(
if (lf->customdata_f) {
CustomData_bmesh_asan_unpoison(&entry->pdata, lf->customdata_f);
BM_mempool_free((BM_mempool *)entry->pdata.pool, lf->customdata_f);
BLI_mempool_free(entry->pdata.pool, lf->customdata_f);
lf->customdata_f = NULL;
}
for (uint i = 0; i < lf->len; i++) {
if (lf->customdata[i]) {
CustomData_bmesh_asan_unpoison(&entry->ldata, lf->customdata[i]);
BM_mempool_free((BM_mempool *)entry->ldata.pool, lf->customdata[i]);
BLI_mempool_free(entry->ldata.pool, lf->customdata[i]);
lf->customdata[i] = NULL;
}
}
@ -1413,7 +1413,7 @@ static void bm_log_faces_restore(
static void bm_log_vert_values_swap(
BMesh *bm, BMLog *log, GHash *verts, BMLogEntry *entry, BMLogCallbacks *callbacks)
{
void *scratch = bm->vdata.pool ? BM_mempool_alloc((BM_mempool *)bm->vdata.pool) : NULL;
void *scratch = bm->vdata.pool ? BLI_mempool_alloc(bm->vdata.pool) : NULL;
GHashIterator gh_iter;
GHASH_ITER (gh_iter, verts) {
@ -1462,14 +1462,14 @@ static void bm_log_vert_values_swap(
}
if (scratch) {
BM_mempool_free((BM_mempool *)bm->vdata.pool, scratch);
BLI_mempool_free(bm->vdata.pool, scratch);
}
}
static void bm_log_edge_values_swap(
BMesh *bm, BMLog *log, GHash *edges, BMLogEntry *entry, BMLogCallbacks *callbacks)
{
void *scratch = bm->edata.pool ? BM_mempool_alloc((BM_mempool *)bm->edata.pool) : NULL;
void *scratch = bm->edata.pool ? BLI_mempool_alloc(bm->edata.pool) : NULL;
GHashIterator gh_iter;
GHASH_ITER (gh_iter, edges) {
@ -1497,7 +1497,7 @@ static void bm_log_edge_values_swap(
}
if (scratch) {
BM_mempool_free((BM_mempool *)bm->edata.pool, scratch);
BLI_mempool_free(bm->edata.pool, scratch);
}
}
@ -1506,7 +1506,7 @@ static void bm_log_face_values_swap(BMLog *log,
BMLogEntry *entry,
BMLogCallbacks *callbacks)
{
void *scratch = log->bm->pdata.pool ? BM_mempool_alloc((BM_mempool *)log->bm->pdata.pool) : NULL;
void *scratch = log->bm->pdata.pool ? BLI_mempool_alloc(log->bm->pdata.pool) : NULL;
GHashIterator gh_iter;
GHASH_ITER (gh_iter, faces) {
@ -1545,7 +1545,7 @@ static void bm_log_face_values_swap(BMLog *log,
}
if (scratch) {
BM_mempool_free((BM_mempool *)log->bm->pdata.pool, scratch);
BLI_mempool_free(log->bm->pdata.pool, scratch);
}
}
@ -1657,11 +1657,11 @@ static void bm_log_entry_free_direct(BMLogEntry *entry)
int cd_mdisps = CustomData_get_offset(&entry->ldata, CD_MDISPS);
/* iterate over cdata blocks directly */
BM_mempool_iter iter;
BM_mempool_iternew((BM_mempool *)entry->ldata.pool, &iter);
void *block = BM_mempool_iterstep(&iter);
BLI_mempool_iter iter;
BLI_mempool_iternew(entry->ldata.pool, &iter);
void *block = BLI_mempool_iterstep(&iter);
for (; block; block = BM_mempool_iterstep(&iter)) {
for (; block; block = BLI_mempool_iterstep(&iter)) {
BMElem elem;
elem.head.data = block;
@ -1673,16 +1673,16 @@ static void bm_log_entry_free_direct(BMLogEntry *entry)
}
if (entry->vdata.pool) {
BM_mempool_destroy(entry->vdata.pool);
BLI_mempool_destroy(entry->vdata.pool);
}
if (entry->edata.pool) {
BM_mempool_destroy(entry->edata.pool);
BLI_mempool_destroy(entry->edata.pool);
}
if (entry->ldata.pool) {
BM_mempool_destroy(entry->ldata.pool);
BLI_mempool_destroy(entry->ldata.pool);
}
if (entry->pdata.pool) {
BM_mempool_destroy(entry->pdata.pool);
BLI_mempool_destroy(entry->pdata.pool);
}
CustomData_free(&entry->vdata, 0);
@ -3426,12 +3426,11 @@ static int bmlog_entry_memsize(BMLogEntry *entry)
ret += (int)BLI_mempool_get_size(entry->pool_verts);
ret += (int)BLI_mempool_get_size(entry->pool_edges);
ret += (int)BLI_mempool_get_size(entry->pool_faces);
#ifndef BM_LOCKFREE_MEMPOOL
ret += entry->vdata.pool ? (int)BLI_mempool_get_size(entry->vdata.pool) : 0;
ret += entry->edata.pool ? (int)BLI_mempool_get_size(entry->edata.pool) : 0;
ret += entry->ldata.pool ? (int)BLI_mempool_get_size(entry->ldata.pool) : 0;
ret += entry->pdata.pool ? (int)BLI_mempool_get_size(entry->pdata.pool) : 0;
#endif
ret += BLI_memarena_size(entry->arena);
if (BLI_memarena_size(entry->arena)) {

View File

@ -49,10 +49,10 @@ static void bm_alloc_toolflags(BMesh *bm);
static void bm_mempool_init_ex(const BMAllocTemplate *allocsize,
const bool use_toolflags,
BM_mempool **r_vpool,
BM_mempool **r_epool,
BM_mempool **r_lpool,
BM_mempool **r_fpool)
BLI_mempool **r_vpool,
BLI_mempool **r_epool,
BLI_mempool **r_lpool,
BLI_mempool **r_fpool)
{
size_t vert_size, edge_size, loop_size, face_size;
@ -70,19 +70,19 @@ static void bm_mempool_init_ex(const BMAllocTemplate *allocsize,
}
if (r_vpool) {
*r_vpool = BM_mempool_create(
*r_vpool = BLI_mempool_create(
vert_size, allocsize->totvert, bm_mesh_chunksize_default.totvert, BLI_MEMPOOL_ALLOW_ITER);
}
if (r_epool) {
*r_epool = BM_mempool_create(
*r_epool = BLI_mempool_create(
edge_size, allocsize->totedge, bm_mesh_chunksize_default.totedge, BLI_MEMPOOL_ALLOW_ITER);
}
if (r_lpool) {
*r_lpool = BM_mempool_create(
*r_lpool = BLI_mempool_create(
loop_size, allocsize->totloop, bm_mesh_chunksize_default.totloop, BLI_MEMPOOL_ALLOW_ITER);
}
if (r_fpool) {
*r_fpool = BM_mempool_create(
*r_fpool = BLI_mempool_create(
face_size, allocsize->totface, bm_mesh_chunksize_default.totface, BLI_MEMPOOL_ALLOW_ITER);
}
}
@ -92,7 +92,7 @@ static void bm_mempool_init(BMesh *bm, const BMAllocTemplate *allocsize, const b
bm_mempool_init_ex(allocsize, use_toolflags, &bm->vpool, &bm->epool, &bm->lpool, &bm->fpool);
#ifdef USE_BMESH_HOLES
bm->looplistpool = BM_mempool_create(sizeof(BMLoopList), 512, 512, BLI_MEMPOOL_NOP);
bm->looplistpool = BLI_mempool_create(sizeof(BMLoopList), 512, 512, BLI_MEMPOOL_NOP);
#endif
}
@ -104,9 +104,9 @@ void BM_mesh_elem_toolflags_ensure(BMesh *bm)
return;
}
bm->vtoolflagpool = BM_mempool_create(sizeof(BMFlagLayer), bm->totvert, 512, BLI_MEMPOOL_NOP);
bm->etoolflagpool = BM_mempool_create(sizeof(BMFlagLayer), bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BM_mempool_create(sizeof(BMFlagLayer), bm->totface, 512, BLI_MEMPOOL_NOP);
bm->vtoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totvert, 512, BLI_MEMPOOL_NOP);
bm->etoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totface, 512, BLI_MEMPOOL_NOP);
bm_alloc_toolflags(bm);
@ -118,15 +118,15 @@ void BM_mesh_elem_toolflags_clear(BMesh *bm)
bool haveflags = bm->vtoolflagpool || bm->etoolflagpool || bm->ftoolflagpool;
if (bm->vtoolflagpool) {
BM_mempool_destroy(bm->vtoolflagpool);
BLI_mempool_destroy(bm->vtoolflagpool);
bm->vtoolflagpool = NULL;
}
if (bm->etoolflagpool) {
BM_mempool_destroy(bm->etoolflagpool);
BLI_mempool_destroy(bm->etoolflagpool);
bm->etoolflagpool = NULL;
}
if (bm->ftoolflagpool) {
BM_mempool_destroy(bm->ftoolflagpool);
BLI_mempool_destroy(bm->ftoolflagpool);
bm->ftoolflagpool = NULL;
}
@ -168,10 +168,6 @@ BMesh *BM_mesh_create(const BMAllocTemplate *allocsize, const struct BMeshCreate
/* allocate the structure */
BMesh *bm = MEM_callocN(sizeof(BMesh), __func__);
#ifdef BM_LOCKFREE_MEMPOOL
bm->idmap.lock = BLI_ticket_mutex_alloc();
#endif
/* allocate the memory pools for the mesh elements */
bm_mempool_init(bm, allocsize, params->use_toolflags);
@ -318,16 +314,16 @@ void BM_mesh_data_free(BMesh *bm)
/* Free custom data pools, This should probably go in CustomData_free? */
if (bm->vdata.totlayer) {
BM_mempool_destroy(bm->vdata.pool);
BLI_mempool_destroy(bm->vdata.pool);
}
if (bm->edata.totlayer) {
BM_mempool_destroy(bm->edata.pool);
BLI_mempool_destroy(bm->edata.pool);
}
if (bm->ldata.totlayer) {
BM_mempool_destroy(bm->ldata.pool);
BLI_mempool_destroy(bm->ldata.pool);
}
if (bm->pdata.totlayer) {
BM_mempool_destroy(bm->pdata.pool);
BLI_mempool_destroy(bm->pdata.pool);
}
/* free custom data */
@ -337,10 +333,10 @@ void BM_mesh_data_free(BMesh *bm)
CustomData_free(&bm->pdata, 0);
/* destroy element pools */
BM_mempool_destroy(bm->vpool);
BM_mempool_destroy(bm->epool);
BM_mempool_destroy(bm->lpool);
BM_mempool_destroy(bm->fpool);
BLI_mempool_destroy(bm->vpool);
BLI_mempool_destroy(bm->epool);
BLI_mempool_destroy(bm->lpool);
BLI_mempool_destroy(bm->fpool);
if (bm->vtable) {
MEM_freeN(bm->vtable);
@ -355,20 +351,20 @@ void BM_mesh_data_free(BMesh *bm)
/* destroy flag pools */
if (bm->vtoolflagpool) {
BM_mempool_destroy(bm->vtoolflagpool);
BLI_mempool_destroy(bm->vtoolflagpool);
bm->vtoolflagpool = NULL;
}
if (bm->etoolflagpool) {
BM_mempool_destroy(bm->etoolflagpool);
BLI_mempool_destroy(bm->etoolflagpool);
bm->etoolflagpool = NULL;
}
if (bm->ftoolflagpool) {
BM_mempool_destroy(bm->ftoolflagpool);
BLI_mempool_destroy(bm->ftoolflagpool);
bm->ftoolflagpool = NULL;
}
#ifdef USE_BMESH_HOLES
BM_mempool_destroy(bm->looplistpool);
BLI_mempool_destroy(bm->looplistpool);
#endif
BLI_freelistN(&bm->selected);
@ -842,17 +838,17 @@ void BM_mesh_elem_table_free(BMesh *bm, const char htype)
BMVert *BM_vert_at_index_find(BMesh *bm, const int index)
{
return BM_mempool_findelem(bm->vpool, index);
return BLI_mempool_findelem(bm->vpool, index);
}
BMEdge *BM_edge_at_index_find(BMesh *bm, const int index)
{
return BM_mempool_findelem(bm->epool, index);
return BLI_mempool_findelem(bm->epool, index);
}
BMFace *BM_face_at_index_find(BMesh *bm, const int index)
{
return BM_mempool_findelem(bm->fpool, index);
return BLI_mempool_findelem(bm->fpool, index);
}
BMLoop *BM_loop_at_index_find(BMesh *bm, const int index)
@ -1040,12 +1036,12 @@ void BM_mesh_remap(BMesh *bm,
BMLoop **ltable = MEM_malloc_arrayN(bm->totloop, sizeof(*ltable), "ltable");
BMLoop *ed;
BM_mempool_iter liter;
BM_mempool_iternew(bm->lpool, &liter);
BMLoop *l = (BMLoop *)BM_mempool_iterstep(&liter);
BLI_mempool_iter liter;
BLI_mempool_iternew(bm->lpool, &liter);
BMLoop *l = (BMLoop *)BLI_mempool_iterstep(&liter);
int i = 0;
for (; l; l = (BMLoop *)BM_mempool_iterstep(&liter), i++) {
for (; l; l = (BMLoop *)BLI_mempool_iterstep(&liter), i++) {
l->head.index = i;
ltable[i] = l;
}
@ -1416,10 +1412,10 @@ void BM_mesh_remap(BMesh *bm,
*/
void BM_mesh_rebuild(BMesh *bm,
const struct BMeshCreateParams *params,
BM_mempool *vpool_dst,
BM_mempool *epool_dst,
BM_mempool *lpool_dst,
BM_mempool *fpool_dst)
BLI_mempool *vpool_dst,
BLI_mempool *epool_dst,
BLI_mempool *lpool_dst,
BLI_mempool *fpool_dst)
{
const char remap = (vpool_dst ? BM_VERT : 0) | (epool_dst ? BM_EDGE : 0) |
(lpool_dst ? BM_LOOP : 0) | (fpool_dst ? BM_FACE : 0);
@ -1440,13 +1436,13 @@ void BM_mesh_rebuild(BMesh *bm,
int index;
BMVert *v_src;
BM_ITER_MESH_INDEX (v_src, &iter, bm, BM_VERTS_OF_MESH, index) {
BMVert *v_dst = BM_mempool_alloc(vpool_dst);
BMVert *v_dst = BLI_mempool_alloc(vpool_dst);
memcpy(v_dst, v_src, sizeof(BMVert));
if (use_toolflags) {
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(
v_dst, bm->vdata.layers[bm->vdata.typemap[CD_TOOLFLAGS]].offset);
flags->flag = bm->vtoolflagpool ? BM_mempool_calloc(bm->vtoolflagpool) : NULL;
flags->flag = bm->vtoolflagpool ? BLI_mempool_calloc(bm->vtoolflagpool) : NULL;
}
vtable_dst[index] = v_dst;
@ -1459,13 +1455,13 @@ void BM_mesh_rebuild(BMesh *bm,
int index;
BMEdge *e_src;
BM_ITER_MESH_INDEX (e_src, &iter, bm, BM_EDGES_OF_MESH, index) {
BMEdge *e_dst = BM_mempool_alloc(epool_dst);
BMEdge *e_dst = BLI_mempool_alloc(epool_dst);
memcpy(e_dst, e_src, sizeof(BMEdge));
if (use_toolflags) {
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(
e_dst, bm->edata.layers[bm->edata.typemap[CD_TOOLFLAGS]].offset);
flags->flag = bm->etoolflagpool ? BM_mempool_calloc(bm->etoolflagpool) : NULL;
flags->flag = bm->etoolflagpool ? BLI_mempool_calloc(bm->etoolflagpool) : NULL;
}
etable_dst[index] = e_dst;
@ -1480,14 +1476,14 @@ void BM_mesh_rebuild(BMesh *bm,
BM_ITER_MESH_INDEX (f_src, &iter, bm, BM_FACES_OF_MESH, index) {
if (remap & BM_FACE) {
BMFace *f_dst = BM_mempool_alloc(fpool_dst);
BMFace *f_dst = BLI_mempool_alloc(fpool_dst);
memcpy(f_dst, f_src, sizeof(BMFace));
if (use_toolflags) {
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(
f_dst, bm->pdata.layers[bm->pdata.typemap[CD_TOOLFLAGS]].offset);
flags->flag = bm->ftoolflagpool ? BM_mempool_calloc(bm->ftoolflagpool) : NULL;
flags->flag = bm->ftoolflagpool ? BLI_mempool_calloc(bm->ftoolflagpool) : NULL;
}
ftable_dst[index] = f_dst;
@ -1499,7 +1495,7 @@ void BM_mesh_rebuild(BMesh *bm,
BMLoop *l_iter_src, *l_first_src;
l_iter_src = l_first_src = BM_FACE_FIRST_LOOP((BMFace *)f_src);
do {
BMLoop *l_dst = BM_mempool_alloc(lpool_dst);
BMLoop *l_dst = BLI_mempool_alloc(lpool_dst);
memcpy(l_dst, l_iter_src, sizeof(BMLoop));
ltable_dst[index_loop] = l_dst;
BM_elem_index_set(l_iter_src, index_loop++); /* set_ok */
@ -1635,7 +1631,7 @@ void BM_mesh_rebuild(BMesh *bm,
bm->elem_table_dirty &= ~BM_VERT;
}
MEM_freeN(vtable_dst);
BM_mempool_destroy(bm->vpool);
BLI_mempool_destroy(bm->vpool);
bm->vpool = vpool_dst;
}
@ -1646,14 +1642,14 @@ void BM_mesh_rebuild(BMesh *bm,
bm->elem_table_dirty &= ~BM_EDGE;
}
MEM_freeN(etable_dst);
BM_mempool_destroy(bm->epool);
BLI_mempool_destroy(bm->epool);
bm->epool = epool_dst;
}
if (remap & BM_LOOP) {
/* no loop table */
MEM_freeN(ltable_dst);
BM_mempool_destroy(bm->lpool);
BLI_mempool_destroy(bm->lpool);
bm->lpool = lpool_dst;
}
@ -1664,7 +1660,7 @@ void BM_mesh_rebuild(BMesh *bm,
bm->elem_table_dirty &= ~BM_FACE;
}
MEM_freeN(ftable_dst);
BM_mempool_destroy(bm->fpool);
BLI_mempool_destroy(bm->fpool);
bm->fpool = fpool_dst;
}
@ -1712,21 +1708,21 @@ static void bm_alloc_toolflags(BMesh *bm)
bm_alloc_toolflags_cdlayers(bm, true);
CustomData *cdatas[3] = {&bm->vdata, &bm->edata, &bm->pdata};
BM_mempool *flagpools[3] = {bm->vtoolflagpool, bm->etoolflagpool, bm->ftoolflagpool};
BM_mempool *elempools[3] = {bm->vpool, bm->epool, bm->fpool};
BLI_mempool *flagpools[3] = {bm->vtoolflagpool, bm->etoolflagpool, bm->ftoolflagpool};
BLI_mempool *elempools[3] = {bm->vpool, bm->epool, bm->fpool};
for (int i = 0; i < 3; i++) {
CustomData *cdata = cdatas[i];
int cd_tflags = CustomData_get_offset(cdata, CD_TOOLFLAGS);
BM_mempool_iter iter;
BM_mempool_iternew(elempools[i], &iter);
BMElem *elem = (BMElem *)BM_mempool_iterstep(&iter);
BLI_mempool_iter iter;
BLI_mempool_iternew(elempools[i], &iter);
BMElem *elem = (BMElem *)BLI_mempool_iterstep(&iter);
for (; elem; elem = (BMElem *)BM_mempool_iterstep(&iter)) {
for (; elem; elem = (BMElem *)BLI_mempool_iterstep(&iter)) {
MToolFlags *flags = (MToolFlags *)BM_ELEM_CD_GET_VOID_P(elem, cd_tflags);
flags->flag = BM_mempool_calloc(flagpools[i]);
flags->flag = BLI_mempool_calloc(flagpools[i]);
}
}
}
@ -1741,9 +1737,9 @@ void BM_mesh_toolflags_set(BMesh *bm, bool use_toolflags)
}
if (use_toolflags == false) {
BM_mempool_destroy(bm->vtoolflagpool);
BM_mempool_destroy(bm->etoolflagpool);
BM_mempool_destroy(bm->ftoolflagpool);
BLI_mempool_destroy(bm->vtoolflagpool);
BLI_mempool_destroy(bm->etoolflagpool);
BLI_mempool_destroy(bm->ftoolflagpool);
bm->vtoolflagpool = NULL;
bm->etoolflagpool = NULL;
@ -2054,15 +2050,14 @@ bool BM_defragment_vertex(BMesh *bm,
void (*on_vert_swap)(BMVert *a, BMVert *b, void *userdata),
void *userdata)
{
#if 0
BMEdge *e = v->e;
# if 1
#if 1
int cd_vcol = CustomData_get_offset(&bm->vdata, CD_PROP_COLOR);
if (cd_vcol >= 0) {
float *color = BM_ELEM_CD_GET_VOID_P(v, cd_vcol);
int idx = BM_mempool_find_real_index(bm->vpool, (void *)v);
int idx = BLI_mempool_find_real_index(bm->vpool, (void *)v);
int size = BLI_mempool_get_size(bm->vpool);
float f = (float)idx / (float)size / 2.0f;
@ -2070,7 +2065,7 @@ bool BM_defragment_vertex(BMesh *bm,
color[0] = color[1] = color[2] = f;
color[3] = 1.0f;
}
# endif
#endif
// return false;
@ -2179,7 +2174,7 @@ bool BM_defragment_vertex(BMesh *bm,
on_vert_swap(v2, elems[i], userdata);
BM_swap_verts(bm, v2, elems[i]);
# if 0
#if 0
BMIter iter;
BMEdge *et;
int f = 0;
@ -2191,7 +2186,7 @@ bool BM_defragment_vertex(BMesh *bm,
BM_ITER_ELEM (et, &iter, v, BM_EDGES_OF_VERT) {
printf("an 1edge %d\n", f++);
}
# endif
#endif
// BM_swap_verts(bm, v2, elems[i]);
@ -2203,7 +2198,7 @@ bool BM_defragment_vertex(BMesh *bm,
break;
}
} while ((e = BM_DISK_EDGE_NEXT(e, v)) != v->e);
#endif
return true;
}
/** \} */

View File

@ -116,10 +116,10 @@ void BM_mesh_remap(BMesh *bm,
void BM_mesh_rebuild(BMesh *bm,
const struct BMeshCreateParams *params,
struct BM_mempool *vpool,
struct BM_mempool *epool,
struct BM_mempool *lpool,
struct BM_mempool *fpool);
struct BLI_mempool *vpool,
struct BLI_mempool *epool,
struct BLI_mempool *lpool,
struct BLI_mempool *fpool);
typedef struct BMAllocTemplate {
int totvert, totedge, totloop, totface;

View File

@ -100,16 +100,16 @@
static void bm_free_cd_pools(BMesh *bm)
{
if (bm->vdata.pool) {
BM_mempool_destroy(bm->vdata.pool);
BLI_mempool_destroy(bm->vdata.pool);
}
if (bm->edata.pool) {
BM_mempool_destroy(bm->edata.pool);
BLI_mempool_destroy(bm->edata.pool);
}
if (bm->ldata.pool) {
BM_mempool_destroy(bm->ldata.pool);
BLI_mempool_destroy(bm->ldata.pool);
}
if (bm->pdata.pool) {
BM_mempool_destroy(bm->pdata.pool);
BLI_mempool_destroy(bm->pdata.pool);
}
}
@ -138,7 +138,7 @@ static void bm_mark_temp_cdlayers(BMesh *bm)
CustomDataLayer *cl = (srcdata)->layers, *cl2 = (destdata)->layers; \
int size = 0; \
if (!*block) { \
*block = BM_mempool_alloc((destdata)->pool); \
*block = BLI_mempool_alloc((destdata)->pool); \
} \
for (int j = 0; j < (srcdata)->totlayer; j++, cl++) { \
if ((destdata)->typemap[cl->type] < 0) { \
@ -280,9 +280,9 @@ void BM_enter_multires_space(Object *ob, BMesh *bm, int space)
*/
void BM_mesh_bm_from_me(Object *ob,
BMesh *bm,
const Mesh *me,
const struct BMeshFromMeshParams *params)
BMesh *bm,
const Mesh *me,
const struct BMeshFromMeshParams *params)
{
const bool is_new = !(bm->totvert || (bm->vdata.totlayer || bm->edata.totlayer ||
bm->pdata.totlayer || bm->ldata.totlayer));
@ -480,11 +480,11 @@ void BM_mesh_bm_from_me(Object *ob,
bm_alloc_toolflags_cdlayers(bm, !is_new);
if (!bm->vtoolflagpool) {
bm->vtoolflagpool = BM_mempool_create(
bm->vtoolflagpool = BLI_mempool_create(
sizeof(BMFlagLayer), bm->totvert, 512, BLI_MEMPOOL_NOP);
bm->etoolflagpool = BM_mempool_create(
bm->etoolflagpool = BLI_mempool_create(
sizeof(BMFlagLayer), bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BM_mempool_create(
bm->ftoolflagpool = BLI_mempool_create(
sizeof(BMFlagLayer), bm->totface, 512, BLI_MEMPOOL_NOP);
bm->totflags = 1;
@ -838,7 +838,7 @@ void BM_mesh_bm_from_me(Object *ob,
memset(bm->idmap.free_ids, 0, bm->idmap.free_ids_size * sizeof(*bm->idmap.free_ids));
BM_mempool_iter miter;
BLI_mempool_iter miter;
for (int i = 0; i < 4; i++) {
int htype = 1 << i;
@ -846,11 +846,11 @@ void BM_mesh_bm_from_me(Object *ob,
continue;
}
BM_mempool *pool = (&bm->vpool)[i];
BM_mempool_iternew(pool, &miter);
BMElem *elem = (BMElem *)BM_mempool_iterstep(&miter);
BLI_mempool *pool = (&bm->vpool)[i];
BLI_mempool_iternew(pool, &miter);
BMElem *elem = (BMElem *)BLI_mempool_iterstep(&miter);
for (; elem; elem = (BMElem *)BM_mempool_iterstep(&miter)) {
for (; elem; elem = (BMElem *)BLI_mempool_iterstep(&miter)) {
uint id = (uint)BM_ELEM_GET_ID(bm, elem);
BLI_BITMAP_SET(bm->idmap.free_ids, id, true);

View File

@ -318,10 +318,6 @@ BMesh *BM_mesh_bm_from_me_threaded(BMesh *bm,
const Mesh *me,
const struct BMeshFromMeshParams *params)
{
# ifdef BM_LOCKFREE_MEMPOOL
BM_mesh_bm_from_me(ob, bm, me, params);
return bm;
# else
if (!bm) {
bm = MEM_callocN(sizeof(BMesh), "BM_mesh_bm_from_me_threaded bm");
}
@ -589,7 +585,6 @@ BMesh *BM_mesh_bm_from_me_threaded(BMesh *bm,
bm->elem_table_dirty = BM_VERT | BM_EDGE | BM_FACE;
return bm;
# endif
}
static void bm_unmark_temp_cdlayers(BMesh *bm)

View File

@ -26,7 +26,6 @@
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_mempool.h"
#include "BLI_mempool_lockfree.h"
#include "BLI_string.h"
#include "BLI_utildefines.h"
@ -1261,13 +1260,13 @@ static void bmo_flag_layer_do(BMesh *bm,
int htype,
int totelem,
int new_totflags,
BM_mempool **pool_ptr))
BLI_mempool **pool_ptr))
{
int iters[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH};
int types[3] = {BM_VERT, BM_EDGE, BM_FACE};
int tots[3] = {bm->totvert, bm->totedge, bm->totface};
BM_mempool **pools[3] = {&bm->vtoolflagpool, &bm->etoolflagpool, &bm->ftoolflagpool};
BLI_mempool **pools[3] = {&bm->vtoolflagpool, &bm->etoolflagpool, &bm->ftoolflagpool};
CustomData *cdatas[3] = {&bm->vdata, &bm->edata, &bm->pdata};
for (int i = 0; i < 3; i++) {
@ -1289,22 +1288,22 @@ static void bmo_flag_layer_alloc_do(BMesh *bm,
int htype,
int totelem,
int new_totflags,
BM_mempool **pool_ptr)
BLI_mempool **pool_ptr)
{
BMIter iter;
BMElem *elem;
int i;
const size_t old_totflags_size = (bm->totflags * sizeof(BMFlagLayer));
BM_mempool *oldpool = *pool_ptr;
BM_mempool *newpool = BM_mempool_create(
BLI_mempool *oldpool = *pool_ptr;
BLI_mempool *newpool = BLI_mempool_create(
sizeof(BMFlagLayer) * new_totflags, totelem, 512, BLI_MEMPOOL_NOP);
BM_ITER_MESH_INDEX (elem, &iter, bm, itertype, i) {
MToolFlags *flags = BM_ELEM_CD_GET_VOID_P(elem, cd_tflags);
short *oldflags = flags->flag;
flags->flag = BM_mempool_calloc(newpool);
flags->flag = BLI_mempool_calloc(newpool);
memcpy(flags->flag, oldflags, old_totflags_size);
BM_elem_index_set(elem, i); /* set_inline */
@ -1312,7 +1311,7 @@ static void bmo_flag_layer_alloc_do(BMesh *bm,
}
*pool_ptr = newpool;
BM_mempool_destroy(oldpool);
BLI_mempool_destroy(oldpool);
}
/**
@ -1347,7 +1346,7 @@ static void bmo_flag_layer_clear_do(BMesh *bm,
int htype,
int totelem,
int totflag,
BM_mempool **pool_ptr)
BLI_mempool **pool_ptr)
{
BMIter iter;
BMElem *elem;

View File

@ -5059,7 +5059,6 @@ static void sculpt_topology_update(Sculpt *sd,
SCULPT_get_int(ss, dyntopo_disable_smooth, sd, brush));
SCULPT_dyntopo_automasking_end(mask_cb_data);
SCULPT_update_customdata_refs(ss);
/* Update average stroke position. */
copy_v3_v3(location, ss->cache->true_location);

View File

@ -587,9 +587,6 @@ int SCULPT_dyntopo_get_templayer(SculptSession *ss, int type, const char *name)
}
char dyntopop_faces_areas_layer_id[] = "__dyntopo_face_areas";
#ifdef WITH_DYNTOPO_EDGE_LOCKS
extern char *cdlayer_lock_attr_name;
#endif
void SCULPT_dyntopo_node_layers_add(SculptSession *ss)
{
@ -603,24 +600,15 @@ void SCULPT_dyntopo_node_layers_add(SculptSession *ss)
{CD_DYNTOPO_VERT, NULL, CD_FLAG_TEMPORARY | CD_FLAG_NOCOPY},
{CD_PROP_INT32, dyntopop_node_idx_layer_id, CD_FLAG_TEMPORARY | CD_FLAG_NOCOPY}};
BM_data_layers_ensure(ss->bm, &ss->bm->vdata, vlayers, ARRAY_SIZE(vlayers));
BM_data_layers_ensure(ss->bm, &ss->bm->vdata, vlayers, 3);
ss->cd_vert_mask_offset = CustomData_get_offset(&ss->bm->vdata, CD_PAINT_MASK);
#ifdef WITH_DYNTOPO_EDGE_LOCKS
BMCustomLayerReq elayers[] = {CD_PROP_INT32,
cdlayer_lock_attr_name,
CD_FLAG_TEMPORARY | CD_FLAG_ELEM_NOCOPY | CD_FLAG_ELEM_NOINTERP};
BM_data_layers_ensure(ss->bm, &ss->bm->edata, elayers, 1);
#endif
BMCustomLayerReq flayers[] = {
{CD_PROP_INT32, dyntopop_node_idx_layer_id, CD_FLAG_TEMPORARY | CD_FLAG_NOCOPY},
{CD_PROP_FLOAT, dyntopop_faces_areas_layer_id, CD_FLAG_TEMPORARY | CD_FLAG_NOCOPY},
};
BM_data_layers_ensure(ss->bm, &ss->bm->pdata, flayers, ARRAY_SIZE(flayers));
BM_data_layers_ensure(ss->bm, &ss->bm->pdata, flayers, 2);
// get indices again, as they might have changed after adding new layers
cd_node_layer_index = CustomData_get_named_layer_index(

View File

@ -1536,9 +1536,9 @@ void SCULPT_smooth(Sculpt *sd,
}
}
#ifdef PROXY_ADVANCED
nodes = nodes ? MEM_dupallocN(nodes) : NULL;
#ifdef PROXY_ADVANCED
int datamask = PV_CO | PV_NEIGHBORS | PV_NO | PV_INDEX | PV_MASK;
BKE_pbvh_ensure_proxyarrays(ss, ss->pbvh, nodes, totnode, datamask);
@ -1584,10 +1584,6 @@ void SCULPT_smooth(Sculpt *sd,
BKE_pbvh_gather_proxyarray(ss->pbvh, nodes, totnode);
#endif
}
#ifdef PROXY_ADVANCED
MEM_SAFE_FREE(nodes);
#endif
}
void SCULPT_do_smooth_brush(