Sculpt-dev: cleanup SCULPT_neighbor_coords_average_interior

* The "boundary smooth" (psuedo-bevel) functionality of
  SCULPT_neighbor_coords_average_interior is now its
  own function.
* Fixed a few more pedantic gcc errors.
This commit is contained in:
Joseph Eagar 2021-12-11 13:10:44 -08:00
parent cde09a4b24
commit 792b39e193
3 changed files with 317 additions and 401 deletions

View File

@ -112,7 +112,7 @@ namespace bassrelief {
struct ReliefVertex {
float co[3], origco[3];
float no[3], origno[3];
int index;
uint index;
float targetco[3];
float targetno[3];
float ray[3], ray_dist, origray[3], origray_dist;
@ -148,23 +148,23 @@ struct ReliefOptimizer {
float normalScale, boundWidth;
ReliefOptimizer(const float (*cos)[3],
const MVert *mvert,
int totvert,
MEdge *medge,
int totedge,
const MVert *mvert_,
int totvert_,
MEdge *medge_,
int totedge_,
MPropCol *_debugColors[MAX_BASSRELIEF_DEBUG_COLORS],
const MLoopTri *_mlooptri,
int totlooptri,
const MLoop *mloop,
int totlooptri_,
const MLoop *mloop_,
float optimizeNormalsScale,
float boundSmoothScale,
int boundSmoothSteps)
: totvert(totvert),
totlooptri(totlooptri),
mloop(mloop),
boundSmoothSteps(boundSmoothSteps),
float boundSmoothScale_,
int boundSmoothSteps_)
: totvert(totvert_),
totlooptri(totlooptri_),
mloop(mloop_),
boundSmoothSteps(boundSmoothSteps_),
normalScale(optimizeNormalsScale),
boundWidth(boundSmoothScale)
boundWidth(boundSmoothScale_)
{
rmindis = rmaxdis = bmindis = bmaxdis = 0.0f;
rdis_scale = bdis_scale = 0.0f;
@ -185,10 +185,10 @@ struct ReliefOptimizer {
verts = new ReliefVertex[totvert];
compress_ratio = 0.5f;
const MVert *mv = mvert;
const MVert *mv = mvert_;
ReliefVertex *rv = verts;
for (int i = 0; i < totvert; i++, rv++, mv++) {
for (uint i = 0; i < (uint)totvert; i++, rv++, mv++) {
memset(static_cast<void *>(rv), 0, sizeof(ReliefVertex));
copy_v3_v3(rv->co, cos[i]);
@ -213,8 +213,8 @@ struct ReliefOptimizer {
rv->flag = 0;
}
const MEdge *me = medge;
for (int i = 0; i < totedge; i++, me++) {
const MEdge *me = medge_;
for (int i = 0; i < totedge_; i++, me++) {
verts[me->v1].totneighbor++;
verts[me->v2].totneighbor++;
}
@ -227,8 +227,8 @@ struct ReliefOptimizer {
BLI_memarena_alloc(arena, sizeof(float) * rv->totneighbor * 2));
}
me = medge;
for (int i = 0; i < totedge; i++, me++) {
me = medge_;
for (int i = 0; i < totedge_; i++, me++) {
for (int j = 0; j < 2; j++) {
ReliefVertex *rv = j ? verts + me->v2 : verts + me->v1;
ReliefVertex *rv_other = j ? verts + me->v1 : verts + me->v2;
@ -579,7 +579,7 @@ struct ReliefOptimizer {
void smooth_geodesic()
{
std::vector<float> dists(totvert);
std::vector<float> dists((size_t)totvert);
blender::threading::parallel_for(IndexRange(totvert), 512, [&](IndexRange subrange) {
for (auto i : subrange) {
@ -619,7 +619,7 @@ struct ReliefOptimizer {
void smooth_tangent_field()
{
std::vector<float> dists(totvert);
std::vector<float> dists((size_t)totvert);
blender::threading::parallel_for(IndexRange(totvert), 512, [&](IndexRange subrange) {
for (auto i : subrange) {

View File

@ -397,16 +397,11 @@ MINLINE float safe_shell_angle_to_dist(const float angle)
return (UNLIKELY(angle < 1.e-8f)) ? 1.0f : fabsf(1.0f / th);
}
void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
float result[3],
SculptVertRef vertex,
SculptSmoothArgs *args)
static void SCULPT_neighbor_coords_average_interior_boundary(SculptSession *ss,
float result[3],
SculptVertRef vertex,
SculptSmoothArgs *args)
{
if (args->do_origco) {
// copy_v3_v3(result, SCULPT_vertex_co_get(ss, vertex));
// return;
}
float avg[3] = {0.0f, 0.0f, 0.0f};
const float bevel_smooth_factor = 1.0f - args->bevel_smooth_factor;
@ -467,39 +462,32 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
// bool have_bmesh = ss->bm;
if (weighted || bound_scl) {
int val = SCULPT_vertex_valence_get(ss, vertex);
areas = BLI_array_alloca(areas, val);
int val = SCULPT_vertex_valence_get(ss, vertex);
areas = BLI_array_alloca(areas, val);
BKE_pbvh_get_vert_face_areas(ss->pbvh, vertex, areas, val);
BKE_pbvh_get_vert_face_areas(ss->pbvh, vertex, areas, val);
/* normalize areas, then apply a 0.25/val floor */
/* normalize areas, then apply a 0.25/val floor */
float totarea = 0.0f;
float totarea = 0.0f;
for (int i = 0; i < val; i++) {
totarea += areas[i];
}
for (int i = 0; i < val; i++) {
totarea += areas[i];
}
totarea = totarea != 0.0f ? 1.0f / totarea : 0.0f;
totarea = totarea != 0.0f ? 1.0f / totarea : 0.0f;
float df = 0.25f / (float)val;
float df = 0.25f / (float)val;
for (int i = 0; i < val; i++) {
areas[i] = (areas[i] * totarea) + df;
}
for (int i = 0; i < val; i++) {
areas[i] = (areas[i] * totarea) + df;
}
float *b1 = NULL, btot = 0.0f, b1_orig;
if (bound_scl) {
b1 = SCULPT_temp_cdata_get(vertex, bound_scl);
b1_orig = *b1;
if (1 || is_boundary) {
*b1 = 0.0f;
}
}
b1 = SCULPT_temp_cdata_get(vertex, bound_scl);
b1_orig = *b1;
*b1 = 0.0f;
float vel[3] = {0.0f, 0.0f, 0.0f};
int totvel = 0;
@ -509,13 +497,6 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
MSculptVert *mv2 = SCULPT_vertex_get_sculptvert(ss, ni.vertex);
const float *co2;
if (args->vel_scl) {
// propagate velocities
float *vel2 = SCULPT_temp_cdata_get(ni.vertex, args->vel_scl);
add_v3_v3(vel, vel2);
totvel++;
}
if (!do_origco || mv2->stroke_id != ss->stroke_id) {
co2 = SCULPT_vertex_co_get(ss, ni.vertex);
}
@ -562,12 +543,7 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
do_diffuse = bound_scl != NULL;
if (final_boundary) {
if (totbound == 0) {
copy_v3_v3(bound1, co2);
}
else {
copy_v3_v3(bound2, co2);
}
copy_v3_v3(!totbound ? bound1 : bound2, co2);
totbound++;
}
@ -661,11 +637,6 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
SCULPT_vertex_color_set(ss, ni.vertex, color);
#endif
/* jump above the v,no2 plane, using distance from plane (which doubles after this)*/
// sub_v3_v3(tmp, co);
// madd_v3_v3fl(tmp, no2, th * dot_v3v3(no2, tmp));
// add_v3_v3(tmp, co);
float th = min_ff(b1_orig / radius, bevel_smooth_factor);
/*smooth bevel edges slightly to avoid artifacts.
@ -702,7 +673,6 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
if (btot != 0.0f) {
*b1 /= btot;
//*b1 += (b1_orig - *b1) * 0.95f;
}
else if (b1) {
*b1 = b1_orig;
@ -720,24 +690,9 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
cross_v3_v3v3(tan, bound, no);
normalize_v3(tan);
#if 0
float tan1[3],tan2[3];
cross_v3_v3v3(tan1,bound1,no);
cross_v3_v3v3(tan2,bound2,no);
normalize_v3(tan1);
normalize_v3(tan2);
interp_v3_v3v3(tan1,tan1,tan,0.5f);
interp_v3_v3v3(tan2,tan2,tan,0.5f);
madd_v3_v3fl(avg,tan1,-dot_v3v3(bound1,tan) * 0.975);
madd_v3_v3fl(avg,tan2,dot_v3v3(bound2,tan) * 0.975);
#else
// project to plane, remember we negated bound2 earlier
madd_v3_v3fl(avg, tan, -dot_v3v3(bound1, tan) * 0.75);
madd_v3_v3fl(avg, tan, dot_v3v3(bound2, tan) * 0.75);
#endif
}
if (args->vel_scl && totvel > 1) {
@ -777,7 +732,7 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
}
if (c & (SCULPT_CORNER_FACE_SET | SCULPT_CORNER_SEAM | SCULPT_CORNER_UV)) {
corner_smooth = MAX2(slide_fset, bound_smooth);
corner_smooth = MAX2(slide_fset, 2.0f * bound_smooth);
}
else {
corner_smooth = 2.0f * bound_smooth;
@ -787,6 +742,267 @@ void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
PBVH_CHECK_NAN(co);
}
void SCULPT_neighbor_coords_average_interior(SculptSession *ss,
float result[3],
SculptVertRef vertex,
SculptSmoothArgs *args)
{
if (args->bound_smooth > 0.0f && args->bound_scl) {
SCULPT_neighbor_coords_average_interior_boundary(ss, result, vertex, args);
return;
}
float avg[3] = {0.0f, 0.0f, 0.0f};
float projection = args->projection;
float slide_fset = args->slide_fset;
float bound_smooth = args->bound_smooth;
bool do_origco = args->do_origco;
MSculptVert *mv = SCULPT_vertex_get_sculptvert(ss, vertex);
float bound1[3], bound2[3];
int totbound = 0;
if (do_origco) {
SCULPT_vertex_check_origdata(ss, vertex);
}
float total = 0.0f;
int neighbor_count = 0;
bool check_fsets = args->preserve_fset_boundaries;
int bflag = SCULPT_BOUNDARY_MESH | SCULPT_BOUNDARY_SHARP;
slide_fset = MAX2(slide_fset, bound_smooth);
if (check_fsets) {
bflag |= SCULPT_BOUNDARY_FACE_SET | SCULPT_BOUNDARY_SEAM | SCULPT_BOUNDARY_UV;
}
const SculptBoundaryType is_boundary = SCULPT_vertex_is_boundary(ss, vertex, bflag);
const float *co = do_origco ? mv->origco : SCULPT_vertex_co_get(ss, vertex);
float no[3];
PBVH_CHECK_NAN(co);
if (do_origco) {
copy_v3_v3(no, mv->origno);
}
else {
SCULPT_vertex_normal_get(ss, vertex, no);
}
float startco[3], startno[3];
copy_v3_v3(startco, co);
copy_v3_v3(startno, no);
const bool weighted = args->do_weighted_smooth && !is_boundary;
float *areas = NULL;
SculptCornerType ctype = SCULPT_CORNER_MESH | SCULPT_CORNER_SHARP;
if (check_fsets) {
ctype |= SCULPT_CORNER_FACE_SET | SCULPT_CORNER_SEAM | SCULPT_CORNER_UV;
}
if (weighted) {
int val = SCULPT_vertex_valence_get(ss, vertex);
areas = BLI_array_alloca(areas, val);
BKE_pbvh_get_vert_face_areas(ss->pbvh, vertex, areas, val);
/* normalize areas, then apply a 0.25/val floor */
float totarea = 0.0f;
for (int i = 0; i < val; i++) {
totarea += areas[i];
}
totarea = totarea != 0.0f ? 1.0f / totarea : 0.0f;
float df = 0.25f / (float)val;
for (int i = 0; i < val; i++) {
areas[i] = (areas[i] * totarea) + df;
}
}
float vel[3] = {0.0f, 0.0f, 0.0f};
int totvel = 0;
SculptVertexNeighborIter ni;
SCULPT_VERTEX_NEIGHBORS_ITER_BEGIN (ss, vertex, ni) {
MSculptVert *mv2 = SCULPT_vertex_get_sculptvert(ss, ni.vertex);
const float *co2;
if (args->vel_scl) {
/* propagate velocities */
float *vel2 = SCULPT_temp_cdata_get(ni.vertex, args->vel_scl);
add_v3_v3(vel, vel2);
totvel++;
}
if (!do_origco || mv2->stroke_id != ss->stroke_id) {
co2 = SCULPT_vertex_co_get(ss, ni.vertex);
}
else {
co2 = mv2->origco;
}
neighbor_count++;
float tmp[3], w;
bool ok = false;
if (weighted) {
w = areas[ni.i];
}
else {
w = 1.0f;
}
/* use the new edge api if edges are available, if not estimate boundary
from verts
*/
SculptBoundaryType final_boundary = 0;
if (ni.has_edge) {
final_boundary = SCULPT_edge_is_boundary(ss, ni.edge, bflag);
#ifdef SCULPT_DIAGONAL_EDGE_MARKS
if (ss->bm) {
BMEdge *e = (BMEdge *)ni.edge.i;
if (!(e->head.hflag & BM_ELEM_DRAW)) {
neighbor_count--;
continue;
}
}
#endif
}
else {
final_boundary = is_boundary & SCULPT_vertex_is_boundary(ss, ni.vertex, bflag);
}
if (final_boundary) {
copy_v3_v3(!totbound ? bound1 : bound2, co2);
totbound++;
}
if (is_boundary) {
/*
Boundary rules:
Hard edges: Boundary vertices use only other boundary vertices.
Slide: Boundary vertices use normal component of non-boundary vertices
*/
bool slide = slide_fset > 0.0f &&
(is_boundary &
(SCULPT_BOUNDARY_FACE_SET | SCULPT_BOUNDARY_SEAM | SCULPT_BOUNDARY_UV));
slide = slide && !final_boundary;
if (slide) {
/* project non-boundary offset onto boundary normal*/
float t[3];
w *= slide_fset;
sub_v3_v3v3(t, co2, co);
madd_v3_v3v3fl(tmp, co, no, dot_v3v3(t, no));
ok = true;
}
else if (final_boundary & is_boundary) {
copy_v3_v3(tmp, co2);
ok = true;
}
else {
ok = false;
}
}
else {
copy_v3_v3(tmp, co2);
ok = true;
}
if (!ok) {
continue;
}
if (projection > 0.0f) {
sub_v3_v3(tmp, co);
float fac = dot_v3v3(tmp, no);
madd_v3_v3fl(tmp, no, -fac * projection);
madd_v3_v3fl(avg, tmp, w);
}
else {
madd_v3_v3fl(avg, tmp, w);
}
total += w;
}
SCULPT_VERTEX_NEIGHBORS_ITER_END(ni);
PBVH_CHECK_NAN(co);
PBVH_CHECK_NAN(avg);
/*try to prevent shrinkage of smooth closed boundaries like circles*/
if (totbound == 2) {
/* find tangent to boundary */
sub_v3_v3(bound1, co);
sub_v3_v3(bound2, co);
negate_v3(bound2);
float bound[3];
add_v3_v3v3(bound, bound1, bound2);
float tan[3];
cross_v3_v3v3(tan, bound, no);
normalize_v3(tan);
/* project to plane, remember we negated bound2 earlier */
madd_v3_v3fl(avg, tan, -dot_v3v3(bound1, tan) * 0.75);
madd_v3_v3fl(avg, tan, dot_v3v3(bound2, tan) * 0.75);
}
if (args->vel_scl && totvel > 1) {
float *final_vel = SCULPT_temp_cdata_get(vertex, args->vel_scl);
mul_v3_fl(vel, 1.0f / (float)totvel);
interp_v3_v3v3(final_vel, final_vel, vel, args->vel_smooth_fac);
}
/* Do not modify corner vertices. */
if (total == 0.0f || (neighbor_count <= 2 && is_boundary)) {
copy_v3_v3(result, co);
return;
}
mul_v3_v3fl(result, avg, 1.0f / total);
PBVH_CHECK_NAN(co);
if (projection > 0.0f) {
add_v3_v3(result, co);
}
PBVH_CHECK_NAN(co);
SculptCornerType c = SCULPT_vertex_is_corner(ss, vertex, ctype);
if (!c) {
return;
}
if (c & (SCULPT_CORNER_FACE_SET | SCULPT_CORNER_SEAM | SCULPT_CORNER_UV)) {
interp_v3_v3v3(result, result, co, 1.0f - slide_fset);
}
PBVH_CHECK_NAN(result);
}
int closest_vec_to_perp(float dir[3], float r_dir2[3], float no[3], float *buckets, float w)
{
int bits = 0;

View File

@ -123,26 +123,6 @@ struct GPU_PBVH_Buffers {
bool show_overlay;
};
#ifdef NEW_ATTR_SYSTEM
typedef struct CDLayerType {
int type;
char gpu_attr_name[32];
GPUVertCompType source_type;
GPUVertCompType comp_type;
uint comp_len;
GPUVertFetchMode fetch_mode;
char gpu_attr_code[8];
} CDLayerType;
typedef struct CDAttrLayers {
CDLayerType type;
uint totlayer;
uint *layers;
int *offsets;
uint *attrs;
} CDAttrLayers;
#endif
typedef struct GPUAttrRef {
uchar domain, type;
ushort cd_offset;
@ -158,13 +138,6 @@ typedef struct PBVHGPUFormat {
uint uv[MAX_GPU_ATTR];
int totcol, totuv;
#ifdef NEW_ATTR_SYSTEM
CDAttrLayers *vertex_attrs;
CDAttrLayers *loop_attrs;
int vertex_attrs_len;
int loop_attrs_len;
#endif
bool active_vcol_only;
bool need_full_render;
bool fast_mode;
@ -172,7 +145,7 @@ typedef struct PBVHGPUFormat {
static PBVHGPUFormat g_vbo_id = {{0}};
static int gpu_pbvh_gpu_make_attr_offs(AttributeDomainMask domain_mask,
static int gpu_pbvh_make_attr_offs(AttributeDomainMask domain_mask,
CustomDataMask type_mask,
const CustomData *vdata,
const CustomData *edata,
@ -185,193 +158,6 @@ static int gpu_pbvh_gpu_make_attr_offs(AttributeDomainMask domain_mask,
const CustomDataLayer *active_vcol_layer,
const CustomDataLayer *render_vcol_layer);
#ifdef NEW_ATTR_SYSTEM
static CDLayerType cd_vert_layers[] = {
{CD_PROP_COLOR, "c", GPU_COMP_F32, GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT, "c"}};
static CDLayerType cd_loop_layers[] = {
{CD_MLOOPUV, "uvs", GPU_COMP_F32, GPU_COMP_F32, 2, GPU_FETCH_FLOAT, "u"}};
static void build_cd_layers(GPUVertFormat *format,
CDAttrLayers *cdattr,
CustomData *cd,
CDLayerType *type)
{
uint *layers = NULL;
int *offsets = NULL;
uint *attrs = NULL;
BLI_array_declare(layers);
BLI_array_declare(offsets);
BLI_array_declare(attrs);
cdattr->type = *type;
cdattr->totlayer = 0;
int act = 0;
int actidx = CustomData_get_active_layer_index(cd, type->type);
for (int i = 0; i < cd->totlayer; i++) {
CustomDataLayer *cl = cd->layers + i;
if (cl->type != type->type || (cl->flag & CD_FLAG_TEMPORARY)) {
continue;
}
cdattr->totlayer++;
/*
g_vbo_id.col[ci++] = GPU_vertformat_attr_add(
&g_vbo_id.format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.totcol++;
DRW_make_cdlayer_attr_aliases(&g_vbo_id.format, "c", vdata, cl);
if (idx == act) {
GPU_vertformat_alias_add(&g_vbo_id.format, "ac");
}
*/
uint attr = GPU_vertformat_attr_add(
format, type->gpu_attr_name, type->comp_type, type->comp_len, type->fetch_mode);
BLI_array_append(layers, i);
BLI_array_append(offsets, cl->offset);
BLI_array_append(attrs, attr);
DRW_make_cdlayer_attr_aliases(format, type->gpu_attr_code, cd, cl);
if (i == actidx) {
char buf[128];
BLI_snprintf(buf, sizeof(buf), "a%s", type->gpu_attr_code);
GPU_vertformat_alias_add(&g_vbo_id.format, buf);
}
}
cdattr->offsets = offsets;
cdattr->layers = layers;
cdattr->attrs = attrs;
}
/*must match GPUVertCompType*/
static int gpu_comp_map[] = {
1, // GPU_COMP_I8 = 0,
1, // GPU_COMP_U8,
2, // GPU_COMP_I16,
2, // GPU_COMP_U16,
4, // GPU_COMP_I32,
4, // GPU_COMP_U32,
4, // GPU_COMP_F32,
4 // GPU_COMP_I10,
};
static void convert_gpu_data(void *src,
void *dst,
GPUVertCompType srcType,
GPUVertCompType dstType)
{
if (srcType == dstType) {
memcpy(dst, src, gpu_comp_map[(int)srcType]);
return;
}
double val = 0;
switch (srcType) {
case GPU_COMP_I8:
val = ((float)*((signed char *)(src))) / 127.0;
break;
case GPU_COMP_U8:
val = ((float)*((unsigned char *)(src))) / 255.0;
break;
case GPU_COMP_I16:
val = ((float)*((unsigned short *)(src))) / 32767.0;
break;
case GPU_COMP_U16:
val = ((float)*((signed short *)(src))) / 65535.0;
break;
case GPU_COMP_I32:
val = ((float)*((signed int *)(src))) / 2147483647.0;
break;
case GPU_COMP_U32:
val = ((float)*((unsigned int *)(src))) / 4294967295.0;
break;
case GPU_COMP_F32:
val = *(float *)src;
break;
case GPU_COMP_I10: // handle elsewhere
break;
}
switch (dstType) {
case GPU_COMP_I8:
*((signed char *)dst) = (signed char)(val * 127.0);
break;
case GPU_COMP_U8:
*((unsigned char *)dst) = (unsigned char)(val * 255.0);
break;
case GPU_COMP_I16:
*((signed short *)dst) = (signed short)(val * 32767.0);
break;
case GPU_COMP_U16:
*((unsigned short *)dst) = (unsigned short)(val * 65535.0);
break;
case GPU_COMP_I32:
*((signed int *)dst) = (signed int)(val * 2147483647.0);
break;
case GPU_COMP_U32:
*((unsigned int *)dst) = (unsigned int)(val * 4294967295.0);
break;
case GPU_COMP_F32:
*((float *)dst) = (float)val;
break;
case GPU_COMP_I10: // handle elsewhere
break;
}
}
/*
GPUVertBuf *vert_buf
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
*/
static void set_cd_data_bmesh(
GPUVertBuf *vert_buf, CDAttrLayers *attr_array, int attr_array_len, BMElem *elem, int vertex)
{
for (int i = 0; i < attr_array_len; i++) {
CDAttrLayers *attr = attr_array + i;
int dst_size = gpu_comp_map[(int)attr->type.comp_type];
int src_size = gpu_comp_map[(int)attr->type.source_type];
void *dest = alloca(dst_size *
attr->type.comp_len); // ensure proper alignment by making this a
void *dest2 = dest;
for (int j = 0; j < attr->totlayer; j++) {
void *data = BM_ELEM_CD_GET_VOID_P(elem, attr->offsets[j]);
for (int k = 0; k < attr->type.comp_len; k++) {
convert_gpu_data(data, dest2, attr->type.source_type, attr->type.comp_type);
data = (void *)(((char *)data) + src_size);
dest2 = (void *)(((char *)dest2) + dst_size);
}
GPU_vertbuf_attr_set(vert_buf, attr->attrs[j], vertex, dest);
}
}
}
static void free_cd_layers(CDAttrLayers *cdattr)
{
MEM_SAFE_FREE(cdattr->layers);
MEM_SAFE_FREE(cdattr->offsets);
MEM_SAFE_FREE(cdattr->attrs);
}
#endif
/** \} */
@ -408,17 +194,7 @@ static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
/* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
* Not that format initialization match in both blocks.
* Do this to keep braces balanced - otherwise indentation breaks. */
#if 0
if (buffers->vert_buf == NULL) {
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_DYNAMIC);
}
if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
/* Allocate buffer if not allocated yet or size changed. */
GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
#else
if (buffers->vert_buf == NULL) {
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_STATIC);
@ -428,7 +204,6 @@ static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
/* Allocate buffer if not allocated yet or size changed. */
GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
#endif
return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
}
@ -496,7 +271,7 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
GPUAttrRef vcol_refs[MAX_GPU_ATTR];
GPUAttrRef cd_uvs[MAX_GPU_ATTR];
int totcol = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
int totcol = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
CD_MASK_PROP_COLOR | CD_MASK_MLOOPCOL,
vdata,
NULL,
@ -509,7 +284,7 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
active_vcol_layer,
render_vcol_layer);
int cd_uv_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
int cd_uv_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
@ -1281,13 +1056,6 @@ static void gpu_bmesh_vert_to_buffer_copy(BMesh *bm,
/* Vertex should always be visible if it's used by a visible face. */
BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
#ifdef NEW_ATTR_SYSTEM
// static void set_cd_data_bmesh(GPUVertBuf *vert_buf, CDAttrLayers *attr, BMElem *elem, int
// vertex)
set_cd_data_bmesh(
vert_buf, g_vbo_id.vertex_attrs, g_vbo_id.vertex_attrs_len, (BMElem *)v, v_index);
#endif
short no_short[3];
/* Set coord, normal, and mask */
@ -1305,7 +1073,6 @@ static void gpu_bmesh_vert_to_buffer_copy(BMesh *bm,
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
#ifndef GPU_PERF_TEST
if (show_mask) {
float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
@ -1320,7 +1087,6 @@ static void gpu_bmesh_vert_to_buffer_copy(BMesh *bm,
*empty_mask = *empty_mask && (cmask == 0);
}
# ifndef NEW_ATTR_SYSTEM
if (show_vcol && totvcol > 0) {
for (int i = 0; i < totvcol; i++) {
float color[4];
@ -1341,19 +1107,12 @@ static void gpu_bmesh_vert_to_buffer_copy(BMesh *bm,
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col[0], v_index, vcol);
}
# else
if (show_vcol && totvcol == 0) { // ensure first vcol attribute is not zero
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col[0], v_index, vcol);
}
# endif
if (!g_vbo_id.fast_mode) {
/* Add default face sets color to avoid artifacts. */
const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, &face_set);
}
#endif
}
/* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
@ -1409,7 +1168,7 @@ void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
}
static int gpu_pbvh_gpu_make_attr_offs(AttributeDomainMask domain_mask,
static int gpu_pbvh_make_attr_offs(AttributeDomainMask domain_mask,
CustomDataMask type_mask,
const CustomData *vdata,
const CustomData *edata,
@ -1501,11 +1260,6 @@ static bool gpu_pbvh_format_equals(PBVHGPUFormat *a, PBVHGPUFormat *b)
bad |= a->fast_mode != b->fast_mode;
bad |= a->need_full_render != b->need_full_render;
#ifdef NEW_ATTR_SYSTEM
bad |= a->vertex_attrs_len != b->vertex_attrs_len;
bad |= a->loop_attrs_len != b->loop_attrs_len;
#endif
bad |= a->pos != b->pos;
bad |= a->fset != b->fset;
bad |= a->msk != b->msk;
@ -1560,50 +1314,11 @@ bool GPU_pbvh_update_attribute_names(CustomData *vdata,
g_vbo_id.totcol = 0;
#ifdef NEW_ATTR_SYSTEM
if (g_vbo_id.loop_attrs) {
free_cd_layers(g_vbo_id.loop_attrs);
}
if (g_vbo_id.vertex_attrs) {
free_cd_layers(g_vbo_id.vertex_attrs);
}
CDAttrLayers *vattr = NULL, *lattr = NULL;
BLI_array_declare(vattr);
BLI_array_declare(lattr);
for (int i = 0; vdata && i < ARRAY_SIZE(cd_vert_layers); i++) {
if (!CustomData_has_layer(vdata, cd_vert_layers[i].type)) {
continue;
}
CDAttrLayers attr;
build_cd_layers(&g_vbo_id.format, &attr, vdata, cd_vert_layers + i);
BLI_array_append(vattr, attr);
}
for (int i = 0; ldata && i < ARRAY_SIZE(cd_loop_layers); i++) {
if (!CustomData_has_layer(ldata, cd_loop_layers[i].type)) {
continue;
}
CDAttrLayers attr;
build_cd_layers(&g_vbo_id.format, &attr, ldata, cd_loop_layers + i);
BLI_array_append(lattr, attr);
}
g_vbo_id.vertex_attrs = vattr;
g_vbo_id.loop_attrs = lattr;
g_vbo_id.vertex_attrs_len = BLI_array_len(vattr);
g_vbo_id.loop_attrs_len = BLI_array_len(lattr);
#endif
#if !defined(NEW_ATTR_SYSTEM) && !defined(GPU_PERF_TEST)
if (active_vcol_type != -1) {
int ci = 0;
GPUAttrRef vcol_layers[MAX_GPU_ATTR];
int totlayer = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
CD_MASK_PROP_COLOR | CD_MASK_MLOOPCOL,
vdata,
NULL,
@ -1643,19 +1358,7 @@ bool GPU_pbvh_update_attribute_names(CustomData *vdata,
GPU_vertformat_alias_add(&g_vbo_id.format, "ac");
}
#elif !defined(GPU_PERF_TEST)
// ensure at least one vertex color layer
if (!vdata || !CustomData_has_layer(vdata, CD_PROP_COLOR)) {
g_vbo_id.col[0] = GPU_vertformat_attr_add(
&g_vbo_id.format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.totcol = 1;
GPU_vertformat_alias_add(&g_vbo_id.format, "ac");
}
#endif
#ifndef GPU_PERF_TEST
if (!fast_mode) {
g_vbo_id.fset = GPU_vertformat_attr_add(
&g_vbo_id.format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
@ -1676,7 +1379,7 @@ bool GPU_pbvh_update_attribute_names(CustomData *vdata,
render = ldata->layers + idx;
}
int totlayer = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
@ -1703,7 +1406,6 @@ bool GPU_pbvh_update_attribute_names(CustomData *vdata,
DRW_make_cdlayer_attr_aliases(&g_vbo_id.format, "u", ldata, cl, cl == render, is_active);
}
}
#endif
}
if (!gpu_pbvh_format_equals(&old_format, &g_vbo_id)) {
@ -1780,7 +1482,7 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
GPUAttrRef cd_vcols[MAX_GPU_ATTR];
GPUAttrRef cd_uvs[MAX_GPU_ATTR];
const int cd_vcol_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_POINT |
const int cd_vcol_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_POINT |
ATTR_DOMAIN_MASK_CORNER,
CD_MASK_PROP_COLOR | CD_MASK_MLOOPCOL,
&bm->vdata,
@ -1794,7 +1496,7 @@ static void GPU_pbvh_bmesh_buffers_update_flat_vcol(GPU_PBVH_Buffers *buffers,
active_vcol_layer,
render_vcol_layer);
int cd_uv_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
int cd_uv_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
@ -2038,7 +1740,7 @@ static void GPU_pbvh_bmesh_buffers_update_indexed(GPU_PBVH_Buffers *buffers,
GPUAttrRef cd_vcols[MAX_GPU_ATTR];
GPUAttrRef cd_uvs[MAX_GPU_ATTR];
int cd_vcol_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
int cd_vcol_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
CD_MASK_PROP_COLOR | CD_MASK_MLOOPCOL,
&bm->vdata,
NULL,
@ -2051,7 +1753,7 @@ static void GPU_pbvh_bmesh_buffers_update_indexed(GPU_PBVH_Buffers *buffers,
active_vcol_layer,
render_vcol_layer);
int cd_uv_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
int cd_uv_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
@ -2234,7 +1936,7 @@ void GPU_pbvh_bmesh_buffers_update(PBVHGPUBuildArgs *args)
GPUAttrRef cd_vcols[MAX_GPU_ATTR];
GPUAttrRef cd_uvs[MAX_GPU_ATTR];
int cd_vcol_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
int cd_vcol_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_POINT | ATTR_DOMAIN_MASK_CORNER,
CD_MASK_PROP_COLOR | CD_MASK_MLOOPCOL,
&bm->vdata,
NULL,
@ -2247,7 +1949,7 @@ void GPU_pbvh_bmesh_buffers_update(PBVHGPUBuildArgs *args)
args->active_vcol_layer,
args->render_vcol_layer);
int cd_uv_count = gpu_pbvh_gpu_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
int cd_uv_count = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
@ -2377,7 +2079,6 @@ void GPU_pbvh_bmesh_buffers_update(PBVHGPUBuildArgs *args)
&empty_mask,
cd_vcols,
cd_vcol_count);
#ifndef GPU_PERF_TEST
if (have_uv) {
for (int k = 0; k < cd_uv_count; k++) {
@ -2389,7 +2090,6 @@ void GPU_pbvh_bmesh_buffers_update(PBVHGPUBuildArgs *args)
if (show_face_sets) {
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, v_index, face_set_color);
}
#endif
v_index++;
}
}