UV Island support for vertex & edge slide

This ensures slide with `Correct UVs` enabled, keeps UV's (any loop custom-data) contiguous.

Adds 2 key functions `BM_vert_loop_groups_data_layer_create` and `BM_vert_loop_groups_data_layer_merge`

They work by collecting matching loop custom-data around the vertices loop-fan,
and merging the custom-data after edits are made.

Thanks to @mont29 for review!
This commit is contained in:
Campbell Barton 2015-02-11 19:40:54 +11:00
parent f2951c23cf
commit 2d1e072686
4 changed files with 289 additions and 178 deletions

View File

@ -40,6 +40,8 @@
#include "BKE_customdata.h"
#include "BKE_multires.h"
#include "BLI_memarena.h"
#include "BLI_linklist.h"
#include "bmesh.h"
#include "intern/bmesh_private.h"
@ -893,3 +895,179 @@ void BM_elem_float_data_set(CustomData *cd, void *element, int type, const float
float *f = CustomData_bmesh_get(cd, ((BMHeader *)element)->data, type);
if (f) *f = val;
}
/** \name Loop interpolation functions: BM_vert_loop_groups_data_layer_***
*
* Handling loop custom-data such as UV's, while keeping contiguous fans is rather tedious.
* Especially when a verts loops can have multiple CustomData layers,
* and each layer can have multiple (different) contiguous fans.
* Said differently, a single vertices loops may span multiple UV islands.
*
* These functions snapshot vertices loops, storing each contiguous fan in its own group.
* The caller can manipulate the loops, then re-combine the CustomData values.
*
* While these functions don't explicitly handle multiple layers at once,
* the caller can simply store its own list.
*
* \note Currently they are averaged back together (weighted by loop angle)
* but we could copy add other methods to re-combine CustomData-Loop-Fans.
*
* \{ */
struct LoopWalkCtx {
/* same for all groups */
int type;
int cd_layer_offset;
MemArena *arena;
/* --- Per loop fan vars --- */
/* reference for this contiguous fan */
const void *data_ref;
int data_len;
/* both arrays the size of the 'BM_vert_face_count(v)'
* each contiguous fan gets a slide of these arrays */
void **data_array;
float *weight_array;
/* accumulate 'LoopGroupCD.weight' to make unit length */
float weight_accum;
};
/* Store vars to pass into 'CustomData_bmesh_interp' */
struct LoopGroupCD {
/* direct customdata pointer array */
void **data;
/* weights (aligned with 'data') */
float *data_weights;
/* number of loops in the fan */
int data_len;
};
static void bm_loop_walk_add(struct LoopWalkCtx *lwc, BMLoop *l)
{
const float w = BM_loop_calc_face_angle(l);
BM_elem_flag_enable(l, BM_ELEM_INTERNAL_TAG);
lwc->data_array[lwc->data_len] = BM_ELEM_CD_GET_VOID_P(l, lwc->cd_layer_offset);
lwc->weight_array[lwc->data_len] = w;
lwc->weight_accum += w;
lwc->data_len += 1;
}
/**
* called recursively, keep stack-usage minimal.
*
* \note called for fan matching so we're pretty much safe not to break the stack
*/
static void bm_loop_walk_data(struct LoopWalkCtx *lwc, BMLoop *l_walk)
{
BLI_assert(CustomData_data_equals(lwc->type, lwc->data_ref, BM_ELEM_CD_GET_VOID_P(l_walk, lwc->cd_layer_offset)));
BLI_assert(BM_elem_flag_test(l_walk, BM_ELEM_INTERNAL_TAG) == false);
bm_loop_walk_add(lwc, l_walk);
#define WALK_LOOP(l_test) \
{ \
BMLoop *l_other = l_test; \
if (l_other->v != l_walk->v) { \
l_other = l_other->next; \
} \
BLI_assert(l_other->v == l_walk->v); \
if (!BM_elem_flag_test(l_other, BM_ELEM_INTERNAL_TAG)) { \
if (CustomData_data_equals(lwc->type, lwc->data_ref, BM_ELEM_CD_GET_VOID_P(l_other, lwc->cd_layer_offset))) { \
bm_loop_walk_data(lwc, l_other); \
} \
} \
} (void)0
if (l_walk->radial_next != l_walk) {
WALK_LOOP(l_walk->radial_next);
}
if (l_walk->prev->radial_next != l_walk->prev) {
WALK_LOOP(l_walk->prev->radial_next);
}
}
LinkNode *BM_vert_loop_groups_data_layer_create(BMesh *bm, BMVert *v, int layer_n, MemArena *arena)
{
struct LoopWalkCtx lwc;
LinkNode *groups = NULL;
BMLoop *l;
BMIter liter;
int loop_num;
lwc.type = bm->ldata.layers[layer_n].type;
lwc.cd_layer_offset = bm->ldata.layers[layer_n].offset;
lwc.arena = arena;
loop_num = 0;
BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
BM_elem_flag_disable(l, BM_ELEM_INTERNAL_TAG);
loop_num++;
}
lwc.data_len = 0;
lwc.data_array = BLI_memarena_alloc(lwc.arena, sizeof(void *) * loop_num);
lwc.weight_array = BLI_memarena_alloc(lwc.arena, sizeof(float) * loop_num);
BM_ITER_ELEM (l, &liter, v, BM_LOOPS_OF_VERT) {
if (!BM_elem_flag_test(l, BM_ELEM_INTERNAL_TAG)) {
struct LoopGroupCD *lf = BLI_memarena_alloc(lwc.arena, sizeof(*lf));
int len_prev = lwc.data_len;
lwc.data_ref = BM_ELEM_CD_GET_VOID_P(l, lwc.cd_layer_offset);
/* assign len-last */
lf->data = &lwc.data_array[lwc.data_len];
lf->data_weights = &lwc.weight_array[lwc.data_len];
lwc.weight_accum = 0.0f;
/* new group */
bm_loop_walk_data(&lwc, l);
lf->data_len = lwc.data_len - len_prev;
if (LIKELY(lwc.weight_accum != 0.0f)) {
mul_vn_fl(lf->data_weights, lf->data_len, 1.0f / lwc.weight_accum);
}
else {
fill_vn_fl(lf->data_weights, lf->data_len, 1.0f / (float)lf->data_len);
}
BLI_linklist_prepend_arena(&groups, lf, lwc.arena);
}
}
BLI_assert(lwc.data_len == loop_num);
return groups;
}
static void bm_vert_loop_groups_data_layer_merge__single(BMesh *bm, void *lf_p, void *data, int type)
{
struct LoopGroupCD *lf = lf_p;
int i;
CustomData_bmesh_interp(&bm->ldata, lf->data, lf->data_weights, NULL, lf->data_len, data);
for (i = 0; i < lf->data_len; i++) {
CustomData_copy_elements(type, data, lf->data[i], 1);
}
}
/**
* Take existing custom data and merge each fan's data.
*/
void BM_vert_loop_groups_data_layer_merge(BMesh *bm, LinkNode *groups, int layer_n)
{
int type = bm->ldata.layers[layer_n].type;
int size = CustomData_sizeof(type);
void *data = alloca(size);
do {
bm_vert_loop_groups_data_layer_merge__single(bm, groups->link, data, type);
} while ((groups = groups->next));
}
/** \} */

View File

@ -27,6 +27,9 @@
* \ingroup bmesh
*/
struct LinkNode;
struct MemArena;
void BM_loop_interp_multires(BMesh *bm, BMLoop *target, BMFace *source);
void BM_vert_interp_from_face(BMesh *bm, BMVert *v, BMFace *source);
@ -49,5 +52,7 @@ void BM_loop_interp_from_face(BMesh *bm, BMLoop *target, BMFace *source,
const bool do_vertex, const bool do_multires);
void BM_face_multires_bounds_smooth(BMesh *bm, BMFace *f);
struct LinkNode *BM_vert_loop_groups_data_layer_create(BMesh *bm, BMVert *v, int layer_n, struct MemArena *arena);
void BM_vert_loop_groups_data_layer_merge(BMesh *bm, struct LinkNode *groups, int layer_n);
#endif /* __BMESH_INTERP_H__ */

View File

@ -50,6 +50,7 @@
#include "BLI_listbase.h"
#include "BLI_string.h"
#include "BLI_ghash.h"
#include "BLI_memarena.h"
#include "BKE_nla.h"
#include "BKE_editmesh_bvh.h"
@ -5132,7 +5133,8 @@ static void slide_origdata_init_flag(
if ((t->settings->uvcalc_flag & UVCALC_TRANSFORM_CORRECT) &&
/* don't do this at all for non-basis shape keys, too easy to
* accidentally break uv maps or vertex colors then */
(bm->shapenr <= 1))
(bm->shapenr <= 1) &&
CustomData_has_math(&bm->ldata))
{
sod->use_origfaces = true;
}
@ -5155,26 +5157,89 @@ static void slide_origdata_init_data(
}
}
static void slide_origdata_create_date(
static void slide_origdata_create_data(
TransInfo *t, SlideOrigData *sod,
BMVert **v_pt, unsigned int v_stride, unsigned int v_num)
TransDataGenericSlideVert *sv, unsigned int v_stride, unsigned int v_num)
{
if (sod->use_origfaces) {
BMEditMesh *em = BKE_editmesh_from_object(t->obedit);
BMesh *bm = em->bm;
unsigned int i;
for (i = 0; i < v_num; i++, v_pt = (void *)(((char *)v_pt) + v_stride)) {
const int *layer_math_map;
int layer_index_dst;
int layer_groups_array_size;
int j;
/* over alloc, only 'math' layers are indexed */
sod->layer_math_map = MEM_mallocN(bm->ldata.totlayer * sizeof(int), __func__);
layer_index_dst = 0;
for (j = 0; j < bm->ldata.totlayer; j++) {
if (CustomData_layer_has_math(&bm->ldata, j)) {
sod->layer_math_map[layer_index_dst++] = j;
}
}
BLI_assert(layer_index_dst != 0);
layer_math_map = sod->layer_math_map;
layer_groups_array_size = layer_index_dst * sizeof(void *);
sod->layer_math_map_num = layer_index_dst;
sod->arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
for (i = 0; i < v_num; i++, sv = (void *)(((char *)sv) + v_stride)) {
BMIter fiter;
BMFace *f;
BMVert *v = *v_pt;
BM_ITER_ELEM (f, &fiter, v, BM_FACES_OF_VERT) {
/* copy face data */
BM_ITER_ELEM (f, &fiter, sv->v, BM_FACES_OF_VERT) {
if (!BLI_ghash_haskey(sod->origfaces, f)) {
BMFace *f_copy = BM_face_copy(sod->bm_origfaces, bm, f, true, true);
BLI_ghash_insert(sod->origfaces, f, f_copy);
}
}
/* store cd_loop_groups */
sv->cd_loop_groups = BLI_memarena_alloc(sod->arena, layer_groups_array_size);
for (j = 0; j < layer_index_dst; j++) {
const int layer_nr = layer_math_map[j];
sv->cd_loop_groups[j] = BM_vert_loop_groups_data_layer_create(bm, sv->v, layer_nr, sod->arena);
}
}
}
}
static void slide_origdata_interp_data(
TransInfo *t, SlideOrigData *sod,
TransDataGenericSlideVert *sv, unsigned int v_stride, unsigned int v_num,
bool is_final)
{
if (sod->use_origfaces) {
BMEditMesh *em = BKE_editmesh_from_object(t->obedit);
unsigned int i;
const int *layer_math_map = sod->layer_math_map;
for (i = 0; i < v_num; i++, sv = (void *)(((char *)sv) + v_stride)) {
BMIter fiter;
BMLoop *l;
int j;
BM_ITER_ELEM (l, &fiter, sv->v, BM_LOOPS_OF_VERT) {
BMFace *f_copy; /* the copy of 'f' */
f_copy = BLI_ghash_lookup(sod->origfaces, l->f);
/* only loop data, no vertex data since that contains shape keys,
* and we do not want to mess up other shape keys */
BM_loop_interp_from_face(em->bm, l, f_copy, false, is_final);
/* make sure face-attributes are correct (e.g. MTexPoly) */
BM_elem_attrs_copy(sod->bm_origfaces, em->bm, f_copy, l->f);
}
for (j = 0; j < sod->layer_math_map_num; j++) {
BM_vert_loop_groups_data_layer_merge(em->bm, sv->cd_loop_groups[j], layer_math_map[j]);
}
}
}
}
@ -5192,6 +5257,13 @@ static void slide_origdata_free_date(
BLI_ghash_free(sod->origfaces, NULL, NULL);
sod->origfaces = NULL;
}
if (sod->arena) {
BLI_memarena_free(sod->arena);
sod->arena = NULL;
}
MEM_SAFE_FREE(sod->layer_math_map);
}
}
@ -5847,7 +5919,7 @@ static bool createEdgeSlideVerts(TransInfo *t)
bmesh_edit_begin(bm, BMO_OPTYPE_FLAG_UNTAN_MULTIRES);
slide_origdata_init_data(t, &sld->orig_data);
slide_origdata_create_date(t, &sld->orig_data, &sld->sv->v, sizeof(*sld->sv), sld->totsv);
slide_origdata_create_data(t, &sld->orig_data, (TransDataGenericSlideVert *)sld->sv, sizeof(*sld->sv), sld->totsv);
/*create copies of faces for customdata projection*/
sv_array = sld->sv;
@ -5896,147 +5968,12 @@ void projectEdgeSlideData(TransInfo *t, bool is_final)
{
EdgeSlideData *sld = t->customData;
SlideOrigData *sod = &sld->orig_data;
TransDataEdgeSlideVert *sv;
BMEditMesh *em = sld->em;
int i;
if (sod->use_origfaces == false) {
return;
}
for (i = 0, sv = sld->sv; i < sld->totsv; sv++, i++) {
BMIter fiter;
BMLoop *l;
BM_ITER_ELEM (l, &fiter, sv->v, BM_LOOPS_OF_VERT) {
BMFace *f_copy; /* the copy of 'f' */
BMFace *f_copy_flip; /* the copy of 'f' or detect if we need to flip to the shorter side. */
f_copy = BLI_ghash_lookup(sod->origfaces, l->f);
/* project onto copied projection face */
f_copy_flip = f_copy;
if (BM_elem_flag_test(l->e, BM_ELEM_SELECT) || BM_elem_flag_test(l->prev->e, BM_ELEM_SELECT)) {
/* the loop is attached of the selected edges that are sliding */
BMLoop *l_ed_sel = l;
if (!BM_elem_flag_test(l->e, BM_ELEM_SELECT))
l_ed_sel = l_ed_sel->prev;
if (sld->perc < 0.0f) {
if (BM_vert_in_face(sv->v_b, l_ed_sel->radial_next->f)) {
f_copy_flip = BLI_ghash_lookup(sod->origfaces, l_ed_sel->radial_next->f);
}
}
else if (sld->perc > 0.0f) {
if (BM_vert_in_face(sv->v_a, l_ed_sel->radial_next->f)) {
f_copy_flip = BLI_ghash_lookup(sod->origfaces, l_ed_sel->radial_next->f);
}
}
BLI_assert(f_copy_flip != NULL);
if (!f_copy_flip) {
continue; /* shouldn't happen, but protection */
}
}
else {
/* the loop is attached to only one vertex and not a selected edge,
* this means we have to find a selected edges face going in the right direction
* to copy from else we get bad distortion see: [#31080] */
BMIter eiter;
BMEdge *e_sel;
BLI_assert(l->v == sv->v);
BM_ITER_ELEM (e_sel, &eiter, sv->v, BM_EDGES_OF_VERT) {
if (BM_elem_flag_test(e_sel, BM_ELEM_SELECT)) {
break;
}
}
if (e_sel) {
/* warning if the UV's are not contiguous, this will copy from the _wrong_ UVs
* in fact whenever the face being copied is not 'f_copy' this can happen,
* we could be a lot smarter about this but would need to deal with every UV channel or
* add a way to mask out lauers when calling #BM_loop_interp_from_face() */
/*
* + +----------------+
* \ | |
* (this) l_adj| |
* \ | |
* \| e_sel |
* +----------+----------------+ <- the edge we are sliding.
* /|sv->v |
* / | |
* (or) l_adj| |
* / | |
* + +----------------+
* (above)
* 'other connected loops', attached to sv->v slide faces.
*
* NOTE: The faces connected to the edge may not have contiguous UV's
* so step around the loops to find l_adj.
* However if the 'other loops' are not cotiguous it will still give problems.
*
* A full solution to this would have to store
* per-customdata-layer map of which loops are contiguous
* and take this into account when interpolating.
*
* NOTE: If l_adj's edge isnt manifold then use then
* interpolate the loop from its own face.
* Can happen when 'other connected loops' are disconnected from the face-fan.
*/
BMLoop *l_adj = NULL;
if (sld->perc < 0.0f) {
if (BM_vert_in_face(sv->v_b, e_sel->l->f)) {
l_adj = e_sel->l;
}
else if (BM_vert_in_face(sv->v_b, e_sel->l->radial_next->f)) {
l_adj = e_sel->l->radial_next;
}
}
else if (sld->perc > 0.0f) {
if (BM_vert_in_face(sv->v_a, e_sel->l->f)) {
l_adj = e_sel->l;
}
else if (BM_vert_in_face(sv->v_a, e_sel->l->radial_next->f)) {
l_adj = e_sel->l->radial_next;
}
}
/* step across to the face */
if (l_adj) {
l_adj = BM_loop_other_edge_loop(l_adj, sv->v);
if (!BM_edge_is_boundary(l_adj->e)) {
l_adj = l_adj->radial_next;
}
else {
/* disconnected face-fan, fallback to self */
l_adj = l;
}
f_copy_flip = BLI_ghash_lookup(sod->origfaces, l_adj->f);
}
}
}
/* only loop data, no vertex data since that contains shape keys,
* and we do not want to mess up other shape keys */
BM_loop_interp_from_face(em->bm, l, f_copy_flip, false, false);
if (is_final) {
BM_loop_interp_multires(em->bm, l, f_copy_flip);
if (f_copy != f_copy_flip) {
BM_loop_interp_multires(em->bm, l, f_copy);
}
}
/* make sure face-attributes are correct (e.g. MTexPoly) */
BM_elem_attrs_copy(sod->bm_origfaces, em->bm, f_copy, l->f);
}
}
slide_origdata_interp_data(t, sod, (TransDataGenericSlideVert *)sld->sv, sizeof(*sld->sv), sld->totsv, is_final);
}
void freeEdgeSlideTempFaces(EdgeSlideData *sld)
@ -6544,7 +6481,7 @@ static bool createVertSlideVerts(TransInfo *t)
bmesh_edit_begin(bm, BMO_OPTYPE_FLAG_UNTAN_MULTIRES);
slide_origdata_init_data(t, &sld->orig_data);
slide_origdata_create_date(t, &sld->orig_data, &sld->sv->v, sizeof(*sld->sv), sld->totsv);
slide_origdata_create_data(t, &sld->orig_data, (TransDataGenericSlideVert *)sld->sv, sizeof(*sld->sv), sld->totsv);
sld->em = em;
@ -6564,42 +6501,12 @@ void projectVertSlideData(TransInfo *t, bool is_final)
{
VertSlideData *sld = t->customData;
SlideOrigData *sod = &sld->orig_data;
TransDataVertSlideVert *sv;
BMEditMesh *em = sld->em;
int i;
if (sod->use_origfaces == false) {
return;
}
for (i = 0, sv = sld->sv; i < sld->totsv; sv++, i++) {
BMIter fiter;
BMLoop *l;
BM_ITER_ELEM (l, &fiter, sv->v, BM_LOOPS_OF_VERT) {
BMFace *f_copy; /* the copy of 'f' */
BMFace *f_copy_flip; /* the copy of 'f' or detect if we need to flip to the shorter side. */
f_copy = BLI_ghash_lookup(sod->origfaces, l->f);
/* project onto copied projection face */
f_copy_flip = f_copy;
/* only loop data, no vertex data since that contains shape keys,
* and we do not want to mess up other shape keys */
BM_loop_interp_from_face(em->bm, l, f_copy_flip, false, false);
if (is_final) {
BM_loop_interp_multires(em->bm, l, f_copy_flip);
if (f_copy != f_copy_flip) {
BM_loop_interp_multires(em->bm, l, f_copy);
}
}
/* make sure face-attributes are correct (e.g. MTexPoly) */
BM_elem_attrs_copy(sod->bm_origfaces, em->bm, f_copy, l->f);
}
}
slide_origdata_interp_data(t, sod, (TransDataGenericSlideVert *)sld->sv, sizeof(*sld->sv), sld->totsv, is_final);
}
void freeVertSlideTempFaces(VertSlideData *sld)

View File

@ -200,9 +200,19 @@ typedef struct TransDataNla {
struct LinkNode;
struct GHash;
typedef struct TransDataEdgeSlideVert {
struct BMVert *v_a, *v_b;
/* header of TransDataEdgeSlideVert, TransDataEdgeSlideEdge */
typedef struct TransDataGenericSlideVert {
struct BMVert *v;
struct LinkNode **cd_loop_groups;
} TransDataGenericSlideVert;
typedef struct TransDataEdgeSlideVert {
/* TransDataGenericSlideVert */
struct BMVert *v;
struct LinkNode **cd_loop_groups;
/* end generic */
struct BMVert *v_a, *v_b;
float v_co_orig[3];
float edge_len;
@ -220,6 +230,13 @@ typedef struct SlideOrigData {
bool use_origfaces;
struct GHash *origfaces;
struct BMesh *bm_origfaces;
struct MemArena *arena;
/* number of math BMLoop layers */
int layer_math_map_num;
/* array size of 'layer_math_map_num'
* maps TransDataVertSlideVert.cd_group index to absolute CustomData layer index */
int *layer_math_map;
} SlideOrigData;
typedef struct EdgeSlideData {
@ -241,7 +258,11 @@ typedef struct EdgeSlideData {
typedef struct TransDataVertSlideVert {
/* TransDataGenericSlideVert */
BMVert *v;
struct LinkNode **cd_loop_groups;
/* end generic */
float co_orig_3d[3];
float co_orig_2d[2];
float (*co_link_orig_3d)[3];