Sculpt: PBVH Draw Support for EEVEE

This patch adds support for PBVH drawing in EEVEE.

Notes:
  # PBVH_FACES only.  For Multires we'll need an API to get/cache attributes.  DynTopo support will be merged in later with sculpt-dev's DynTopo implementation.
  # Supports vertex color and UV attributes only; other types can be added fairly easily though.
  # Workbench only sends the active vertex color and UV layers to the GPU.
  # Added a new draw engine API method, DRW_cdlayer_attr_aliases_add.  Please review.
  # The vertex format object is now stored in the pbvh.

Reviewed By: Clément Foucault & Brecht Van Lommel & Jeroen Bakker
Differential Revision: https://developer.blender.org/D13897
Ref D13897
This commit is contained in:
Joseph Eagar 2022-06-08 12:30:01 -07:00
parent 9c28f0eb37
commit 285a68b7bb
Notes: blender-bot 2023-02-14 00:29:15 +01:00
Referenced by issue #104040, Regression: contact shadows doesn't update in sculpt mode
Referenced by issue #101925, Regression: Cycles Don`t updated Color Attributes
Referenced by issue #100906, Regression: Lag in sculpt mode when navigating around an object with modifiers
Referenced by issue #99196, Regression: Rendering in the viewport is constantly restarted by some sculpting brushes and the position of mouse cursor
Referenced by issue #99196, Regression: Rendering in the viewport is constantly restarted by some sculpting brushes and the position of mouse cursor
Referenced by issue #98886, Regression: Smooth shading issues with multiresolution
Referenced by issue #98842, Regression: Switching shading color type crashes blender (using the sculpting template)
Referenced by issue #98040, PBVH Draw Support for EEVEE
12 changed files with 661 additions and 184 deletions

View File

@ -221,7 +221,8 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data, struct GPU_PBVH_Buffers *buffers),
void *user_data);
void *user_data,
bool full_render);
void BKE_pbvh_draw_debug_cb(
PBVH *pbvh,

View File

@ -2293,8 +2293,8 @@ bool BKE_sculptsession_use_pbvh_draw(const Object *ob, const View3D *v3d)
if (BKE_pbvh_type(ss->pbvh) == PBVH_FACES) {
/* Regular mesh only draws from PBVH without modifiers and shape keys. */
const bool full_shading = (v3d && (v3d->shading.type > OB_SOLID));
return !(ss->shapekey_active || ss->deform_modifiers_active || full_shading);
return !(ss->shapekey_active || ss->deform_modifiers_active);
}
/* Multires and dyntopo always draw directly from the PBVH. */

View File

@ -714,6 +714,10 @@ void BKE_pbvh_free(PBVH *pbvh)
MEM_SAFE_FREE(pbvh->vert_bitmap);
if (pbvh->vbo_id) {
GPU_pbvh_free_format(pbvh->vbo_id);
}
MEM_freeN(pbvh);
}
@ -1299,6 +1303,17 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
PBVH *pbvh = data->pbvh;
PBVHNode *node = data->nodes[n];
CustomData *vdata, *ldata;
if (!pbvh->bm) {
vdata = pbvh->vdata;
ldata = pbvh->ldata;
}
else {
vdata = &pbvh->bm->vdata;
ldata = &pbvh->bm->ldata;
}
if (node->flag & PBVH_RebuildDrawBuffers) {
switch (pbvh->type) {
case PBVH_GRIDS:
@ -1326,7 +1341,8 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
const int update_flags = pbvh_get_buffers_update_flags(pbvh);
switch (pbvh->type) {
case PBVH_GRIDS:
GPU_pbvh_grid_buffers_update(node->draw_buffers,
GPU_pbvh_grid_buffers_update(pbvh->vbo_id,
node->draw_buffers,
pbvh->subdiv_ccg,
pbvh->grids,
pbvh->grid_flag_mats,
@ -1339,26 +1355,22 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
update_flags);
break;
case PBVH_FACES: {
CustomDataLayer *layer = NULL;
eAttrDomain domain;
BKE_pbvh_get_color_layer(pbvh->mesh, &layer, &domain);
GPU_pbvh_mesh_buffers_update(node->draw_buffers,
GPU_pbvh_mesh_buffers_update(pbvh->vbo_id,
node->draw_buffers,
pbvh->verts,
pbvh->vert_normals,
vdata,
ldata,
CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK),
layer ? layer->data : NULL,
layer ? layer->type : -1,
layer ? domain : ATTR_DOMAIN_AUTO,
CustomData_get_layer(pbvh->pdata, CD_SCULPT_FACE_SETS),
pbvh->face_sets_color_seed,
pbvh->face_sets_color_default,
update_flags);
update_flags,
pbvh->vert_normals);
break;
}
case PBVH_BMESH:
GPU_pbvh_bmesh_buffers_update(node->draw_buffers,
GPU_pbvh_bmesh_buffers_update(pbvh->vbo_id,
node->draw_buffers,
pbvh->bm,
node->bm_faces,
node->bm_unique_verts,
@ -1379,8 +1391,49 @@ void pbvh_free_draw_buffers(PBVH *pbvh, PBVHNode *node)
}
}
static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode, int update_flag)
static void pbvh_update_draw_buffers(
PBVH *pbvh, PBVHNode **nodes, int totnode, int update_flag, bool full_render)
{
const CustomData *vdata;
const CustomData *ldata;
if (!pbvh->vbo_id) {
pbvh->vbo_id = GPU_pbvh_make_format();
}
switch (pbvh->type) {
case PBVH_BMESH:
if (!pbvh->bm) {
/* BMesh hasn't been created yet */
return;
}
vdata = &pbvh->bm->vdata;
ldata = &pbvh->bm->ldata;
break;
case PBVH_FACES:
vdata = pbvh->vdata;
ldata = pbvh->ldata;
break;
case PBVH_GRIDS:
ldata = vdata = NULL;
break;
}
const bool active_attrs_only = !full_render;
/* rebuild all draw buffers if attribute layout changed */
if (GPU_pbvh_attribute_names_update(pbvh->type, pbvh->vbo_id, vdata, ldata, active_attrs_only)) {
/* attribute layout changed; force rebuild */
for (int i = 0; i < pbvh->totnode; i++) {
PBVHNode *node = pbvh->nodes + i;
if (node->flag & PBVH_Leaf) {
node->flag |= PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw;
}
}
}
if ((update_flag & PBVH_RebuildDrawBuffers) || ELEM(pbvh->type, PBVH_GRIDS, PBVH_BMESH)) {
/* Free buffers uses OpenGL, so not in parallel. */
for (int n = 0; n < totnode; n++) {
@ -2783,8 +2836,11 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data, GPU_PBVH_Buffers *buffers),
void *user_data)
void *user_data,
bool full_render)
{
pbvh->draw_cache_invalid = false;
PBVHNode **nodes;
int totnode;
int update_flag = 0;
@ -2808,7 +2864,7 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
/* Update draw buffers. */
if (totnode != 0 && (update_flag & (PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers))) {
pbvh_update_draw_buffers(pbvh, nodes, totnode, update_flag);
pbvh_update_draw_buffers(pbvh, nodes, totnode, update_flag, full_render);
}
MEM_SAFE_FREE(nodes);
@ -3156,6 +3212,11 @@ bool BKE_pbvh_is_drawing(const PBVH *pbvh)
return pbvh->is_drawing;
}
bool BKE_pbvh_draw_cache_invalid(const PBVH *pbvh)
{
return pbvh->draw_cache_invalid;
}
void BKE_pbvh_is_drawing_set(PBVH *pbvh, bool val)
{
pbvh->is_drawing = val;
@ -3229,8 +3290,3 @@ void BKE_pbvh_ensure_node_loops(PBVH *pbvh)
MEM_SAFE_FREE(visit);
}
bool BKE_pbvh_draw_cache_invalid(const PBVH *pbvh)
{
return pbvh->draw_cache_invalid;
}

View File

@ -376,6 +376,9 @@ static bool pbvh_bmesh_node_limit_ensure(PBVH *pbvh, int node_index)
return false;
}
/* Trigger draw manager cache invalidation. */
pbvh->draw_cache_invalid = true;
/* For each BMFace, store the AABB and AABB centroid */
BBC *bbc_array = MEM_mallocN(sizeof(BBC) * bm_faces_size, "BBC");

View File

@ -2,6 +2,8 @@
#pragma once
struct PBVHGPUFormat;
/** \file
* \ingroup bke
*/
@ -123,9 +125,7 @@ struct PBVHNode {
PBVHPixelsNode pixels;
};
typedef enum {
PBVH_DYNTOPO_SMOOTH_SHADING = 1,
} PBVHFlags;
typedef enum { PBVH_DYNTOPO_SMOOTH_SHADING = 1 } PBVHFlags;
typedef struct PBVHBMeshLog PBVHBMeshLog;
@ -204,6 +204,8 @@ struct PBVH {
/* Used by DynTopo to invalidate the draw cache. */
bool draw_cache_invalid;
struct PBVHGPUFormat *vbo_id;
};
/* pbvh.c */

View File

@ -23,6 +23,9 @@ struct DrawEngineType;
struct GHash;
struct GPUMaterial;
struct GPUOffScreen;
struct GPUVertFormat;
struct CustomDataLayer;
struct CustomData;
struct GPUViewport;
struct ID;
struct Main;
@ -218,6 +221,12 @@ void DRW_opengl_context_activate(bool drw_state);
*/
void DRW_draw_cursor_2d_ex(const struct ARegion *region, const float cursor[2]);
void DRW_cdlayer_attr_aliases_add(struct GPUVertFormat *format,
char *base_name,
const struct CustomData *data,
const struct CustomDataLayer *cl,
bool is_active_render,
bool is_active_layer);
#ifdef __cplusplus
}
#endif

View File

@ -97,7 +97,7 @@ static void workbench_cache_sculpt_populate(WORKBENCH_PrivateData *wpd,
{
const bool use_single_drawcall = !ELEM(color_type, V3D_SHADING_MATERIAL_COLOR);
if (use_single_drawcall) {
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, 0, color_type, NULL);
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, ob->actcol, color_type, NULL);
DRW_shgroup_call_sculpt(grp, ob, false, false);
}
else {
@ -323,7 +323,8 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
}
}
if (is_sculpt_pbvh && color_type == V3D_SHADING_TEXTURE_COLOR) {
if (is_sculpt_pbvh && color_type == V3D_SHADING_TEXTURE_COLOR &&
BKE_pbvh_type(ob->sculpt->pbvh) != PBVH_FACES) {
/* Force use of material color for sculpt. */
color_type = V3D_SHADING_MATERIAL_COLOR;
}

View File

@ -3410,3 +3410,35 @@ void DRW_batch_cache_free_old(Object *ob, int ctime)
}
/** \} */
void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format,
char *base_name,
const CustomData *data,
const CustomDataLayer *cl,
bool is_active_render,
bool is_active_layer)
{
char attr_name[32], attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
const char *layer_name = cl->name;
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
/* Attribute layer name. */
BLI_snprintf(attr_name, sizeof(attr_name), "%s%s", base_name, attr_safe_name);
GPU_vertformat_alias_add(format, attr_name);
/* Auto layer name. */
BLI_snprintf(attr_name, sizeof(attr_name), "a%s", attr_safe_name);
GPU_vertformat_alias_add(format, attr_name);
/* Active render layer name. */
if (is_active_render) {
GPU_vertformat_alias_add(format, base_name);
}
/* Active display layer name. */
if (is_active_layer) {
BLI_snprintf(attr_name, sizeof(attr_name), "a%s", base_name);
GPU_vertformat_alias_add(format, attr_name);
}
}

View File

@ -744,7 +744,8 @@ static bool mesh_batch_cache_valid(Object *object, Mesh *me)
}
if (object->sculpt && object->sculpt->pbvh) {
if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh)) {
if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh) ||
BKE_pbvh_draw_cache_invalid(object->sculpt->pbvh)) {
return false;
}

View File

@ -1238,7 +1238,7 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
&update_frustum,
&draw_frustum,
(void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
scd);
scd, scd->use_mats);
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;

View File

@ -10,6 +10,7 @@
#include <stddef.h>
#include "BKE_attribute.h"
#include "BKE_pbvh.h"
#ifdef __cplusplus
extern "C" {
@ -20,6 +21,7 @@ struct CCGElem;
struct CCGKey;
struct DMFlagMat;
struct GSet;
struct TableGSet;
struct MLoop;
struct MLoopCol;
struct MLoopTri;
@ -29,6 +31,9 @@ struct MVert;
struct Mesh;
struct PBVH;
struct SubdivCCG;
struct CustomData;
typedef struct PBVHGPUFormat PBVHGPUFormat;
/**
* Buffers for drawing from PBVH grids.
@ -78,36 +83,46 @@ enum {
};
/**
* Creates a vertex buffer (coordinate, normal, color) and,
* if smooth shading, an element index buffer.
* Threaded: do not call any functions that use OpenGL calls!
*/
void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
const struct MVert *mvert,
const float (*vert_normals)[3],
const CustomData *vdata,
const CustomData *ldata,
const float *vmask,
const void *vcol_data,
int vcol_type,
eAttrDomain vcol_domain,
const int *sculpt_face_sets,
int face_sets_color_seed,
int face_sets_color_default,
int update_flags);
const int face_sets_color_seed,
const int face_sets_color_default,
const int update_flags,
const float (*vert_normals)[3]);
bool GPU_pbvh_attribute_names_update(PBVHType pbvh_type,
PBVHGPUFormat *vbo_id,
const struct CustomData *vdata,
const struct CustomData *ldata,
bool active_attrs_only);
/**
* Creates a vertex buffer (coordinate, normal, color) and,
* if smooth shading, an element index buffer.
* Threaded: do not call any functions that use OpenGL calls!
*/
void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_bmesh_buffers_update(PBVHGPUFormat *vbo_id,
struct GPU_PBVH_Buffers *buffers,
struct BMesh *bm,
struct GSet *bm_faces,
struct GSet *bm_unique_verts,
struct GSet *bm_other_verts,
int update_flags);
const int update_flags);
/**
* Threaded: do not call any functions that use OpenGL calls!
*/
void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_grid_buffers_update(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
struct SubdivCCG *subdiv_ccg,
struct CCGElem **grids,
const struct DMFlagMat *grid_flag_mats,
@ -120,7 +135,8 @@ void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
int update_flags);
/**
* Finish update. Not thread safe, must run in OpenGL main thread.
* Finish update. Not thread safe, must run in OpenGL main
* thread.
*/
void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers);
@ -133,9 +149,11 @@ void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers);
struct GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires);
short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers);
bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers);
PBVHGPUFormat *GPU_pbvh_make_format();
void GPU_pbvh_free_format(PBVHGPUFormat *vbo_id);
#ifdef __cplusplus
}
#endif

View File

@ -9,18 +9,23 @@
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include "MEM_guardedalloc.h"
#include "BLI_alloca.h"
#include "BLI_array.h"
#include "BLI_bitmap.h"
#include "BLI_ghash.h"
#include "BLI_hash.h"
#include "BLI_math.h"
#include "BLI_math_color.h"
#include "BLI_math_color_blend.h"
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_userdef_types.h"
@ -28,6 +33,7 @@
#include "BKE_attribute.h"
#include "BKE_ccg.h"
#include "BKE_customdata.h"
#include "BKE_global.h"
#include "BKE_mesh.h"
#include "BKE_paint.h"
#include "BKE_pbvh.h"
@ -36,17 +42,12 @@
#include "GPU_batch.h"
#include "GPU_buffers.h"
#include "DRW_engine.h"
#include "gpu_private.h"
#include "bmesh.h"
/* XXX: the rest of the code in this file is used for optimized PBVH
* drawing and doesn't interact at all with the buffer code above */
/* -------------------------------------------------------------------- */
/** \name Private Types
* \{ */
struct GPU_PBVH_Buffers {
GPUIndexBuf *index_buf, *index_buf_fast;
GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
@ -88,10 +89,52 @@ struct GPU_PBVH_Buffers {
bool show_overlay;
};
static struct {
typedef struct GPUAttrRef {
uchar domain, type;
ushort cd_offset;
int layer_idx;
} GPUAttrRef;
#define MAX_GPU_ATTR 256
typedef struct PBVHGPUFormat {
GPUVertFormat format;
uint pos, nor, msk, col, fset;
} g_vbo_id = {{0}};
uint pos, nor, msk, fset;
uint col[MAX_GPU_ATTR];
uint uv[MAX_GPU_ATTR];
int totcol, totuv;
/* Upload only the active color and UV attributes,
* used for workbench mode. */
bool active_attrs_only;
} PBVHGPUFormat;
PBVHGPUFormat *GPU_pbvh_make_format()
{
PBVHGPUFormat *vbo_id = MEM_callocN(sizeof(PBVHGPUFormat), "PBVHGPUFormat");
GPU_pbvh_attribute_names_update(PBVH_FACES, vbo_id, NULL, NULL, false);
return vbo_id;
}
void GPU_pbvh_free_format(PBVHGPUFormat *vbo_id)
{
MEM_SAFE_FREE(vbo_id);
}
static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask,
eCustomDataMask type_mask,
const CustomData *vdata,
const CustomData *edata,
const CustomData *ldata,
const CustomData *pdata,
GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
bool active_only,
int active_type,
int active_domain,
const CustomDataLayer *active_layer,
const CustomDataLayer *render_layer);
/** \} */
@ -101,20 +144,6 @@ static struct {
void gpu_pbvh_init()
{
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
if (g_vbo_id.format.attr_len == 0) {
g_vbo_id.pos = GPU_vertformat_attr_add(
&g_vbo_id.format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
g_vbo_id.nor = GPU_vertformat_attr_add(
&g_vbo_id.format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
g_vbo_id.msk = GPU_vertformat_attr_add(
&g_vbo_id.format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.col = GPU_vertformat_attr_add(
&g_vbo_id.format, "ac", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.fset = GPU_vertformat_attr_add(
&g_vbo_id.format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
}
void gpu_pbvh_exit()
@ -122,33 +151,37 @@ void gpu_pbvh_exit()
/* Nothing to do. */
}
static CustomDataLayer *get_active_layer(const CustomData *cdata, int type)
{
int idx = CustomData_get_active_layer_index(cdata, type);
return idx != -1 ? cdata->layers + idx : NULL;
}
static CustomDataLayer *get_render_layer(const CustomData *cdata, int type)
{
int idx = CustomData_get_render_layer_index(cdata, type);
return idx != -1 ? cdata->layers + idx : NULL;
}
/* Allocates a non-initialized buffer to be sent to GPU.
* Return is false it indicates that the memory map failed. */
static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
static bool gpu_pbvh_vert_buf_data_set(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
uint vert_len)
{
/* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
* Not that format initialization match in both blocks.
* Do this to keep braces balanced - otherwise indentation breaks. */
#if 0
if (buffers->vert_buf == NULL) {
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_DYNAMIC);
GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
else if (vert_len != buffers->vert_buf->vertex_len) {
GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
}
#else
if (buffers->vert_buf == NULL) {
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_STATIC);
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&vbo_id->format, GPU_USAGE_STATIC);
}
if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
/* Allocate buffer if not allocated yet or size changed. */
GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
#endif
return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
}
@ -194,25 +227,62 @@ static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt,
sculpt_face_sets[lt->poly] > SCULPT_FACE_SET_NONE);
}
void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
const MVert *mvert,
const float (*vert_normals)[3],
const CustomData *vdata,
const CustomData *ldata,
const float *vmask,
const void *vcol_data,
int vcol_type,
eAttrDomain vcol_domain,
const int *sculpt_face_sets,
int face_sets_color_seed,
int face_sets_color_default,
int update_flags)
int update_flags,
const float (*vert_normals)[3])
{
const MPropCol *vtcol = vcol_type == CD_PROP_COLOR ? vcol_data : NULL;
const MLoopCol *vcol = vcol_type == CD_PROP_BYTE_COLOR ? vcol_data : NULL;
const float(*f3col)[3] = vcol_type == CD_PROP_FLOAT3 ? vcol_data : NULL;
GPUAttrRef vcol_refs[MAX_GPU_ATTR];
GPUAttrRef cd_uvs[MAX_GPU_ATTR];
const bool color_loops = vcol_domain == ATTR_DOMAIN_CORNER;
const bool show_vcol = (vtcol || vcol || f3col) &&
(update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
Mesh me_query;
BKE_id_attribute_copy_domains_temp(ID_ME, vdata, NULL, ldata, NULL, NULL, &me_query.id);
CustomDataLayer *actcol = BKE_id_attributes_active_color_get(&me_query.id);
eAttrDomain actcol_domain = actcol ? BKE_id_attribute_domain(&me_query.id, actcol) :
ATTR_DOMAIN_AUTO;
CustomDataLayer *rendercol = BKE_id_attributes_render_color_get(&me_query.id);
int totcol;
if (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) {
totcol = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_COLOR,
CD_MASK_COLOR_ALL,
vdata,
NULL,
ldata,
NULL,
vcol_refs,
vbo_id->active_attrs_only,
actcol ? actcol->type : 0,
actcol_domain,
actcol,
rendercol);
}
else {
totcol = 0;
}
int totuv = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
ldata,
NULL,
cd_uvs,
vbo_id->active_attrs_only,
CD_MLOOPUV,
ATTR_DOMAIN_CORNER,
get_active_layer(ldata, CD_MLOOPUV),
get_render_layer(ldata, CD_MLOOPUV));
const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_face_sets = sculpt_face_sets &&
@ -224,25 +294,106 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
const int totelem = buffers->tot_tri * 3;
/* Build VBO */
if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) {
if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totelem)) {
GPUVertBufRaw pos_step = {0};
GPUVertBufRaw nor_step = {0};
GPUVertBufRaw msk_step = {0};
GPUVertBufRaw fset_step = {0};
GPUVertBufRaw col_step = {0};
GPUVertBufRaw uv_step = {0};
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.pos, &pos_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.nor, &nor_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.msk, &msk_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.fset, &fset_step);
if (show_vcol) {
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.col, &col_step);
}
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->pos, &pos_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->nor, &nor_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->msk, &msk_step);
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->fset, &fset_step);
/* calculate normal for each polygon only once */
uint mpoly_prev = UINT_MAX;
short no[3] = {0, 0, 0};
if (totuv > 0) {
for (int uv_i = 0; uv_i < totuv; uv_i++) {
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->uv[uv_i], &uv_step);
GPUAttrRef *ref = cd_uvs + uv_i;
CustomDataLayer *layer = ldata->layers + ref->layer_idx;
MLoopUV *muv = layer->data;
for (uint i = 0; i < buffers->face_indices_len; i++) {
const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
continue;
}
for (uint j = 0; j < 3; j++) {
MLoopUV *muv2 = muv + lt->tri[j];
memcpy(GPU_vertbuf_raw_step(&uv_step), muv2->uv, sizeof(muv2->uv));
}
}
}
}
for (int col_i = 0; col_i < totcol; col_i++) {
GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->col[col_i], &col_step);
MPropCol *pcol = NULL;
MLoopCol *mcol = NULL;
GPUAttrRef *ref = vcol_refs + col_i;
const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? vdata : ldata;
CustomDataLayer *layer = cdata->layers + ref->layer_idx;
bool color_loops = ref->domain == ATTR_DOMAIN_CORNER;
if (layer->type == CD_PROP_COLOR) {
pcol = (MPropCol *)layer->data;
}
else {
mcol = (MLoopCol *)layer->data;
}
for (uint i = 0; i < buffers->face_indices_len; i++) {
const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
const uint vtri[3] = {
buffers->mloop[lt->tri[0]].v,
buffers->mloop[lt->tri[1]].v,
buffers->mloop[lt->tri[2]].v,
};
if (!gpu_pbvh_is_looptri_visible(lt, mvert, buffers->mloop, sculpt_face_sets)) {
continue;
}
for (uint j = 0; j < 3; j++) {
/* Vertex Colors. */
const uint loop_index = lt->tri[j];
ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
if (pcol) {
MPropCol *pcol2 = pcol + (color_loops ? loop_index : vtri[j]);
scol[0] = unit_float_to_ushort_clamp(pcol2->color[0]);
scol[1] = unit_float_to_ushort_clamp(pcol2->color[1]);
scol[2] = unit_float_to_ushort_clamp(pcol2->color[2]);
scol[3] = unit_float_to_ushort_clamp(pcol2->color[3]);
}
else {
const MLoopCol *mcol2 = mcol + (color_loops ? loop_index : vtri[j]);
scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->r]);
scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->g]);
scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->b]);
scol[3] = unit_float_to_ushort_clamp(mcol2->a * (1.0f / 255.0f));
}
memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
}
}
}
for (uint i = 0; i < buffers->face_indices_len; i++) {
const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
const uint vtri[3] = {
@ -296,50 +447,6 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers,
*(uchar *)GPU_vertbuf_raw_step(&msk_step) = cmask;
empty_mask = empty_mask && (cmask == 0);
/* Vertex Colors. */
if (show_vcol) {
ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
if (vtcol) {
if (color_loops) {
scol[0] = unit_float_to_ushort_clamp(vtcol[lt->tri[j]].color[0]);
scol[1] = unit_float_to_ushort_clamp(vtcol[lt->tri[j]].color[1]);
scol[2] = unit_float_to_ushort_clamp(vtcol[lt->tri[j]].color[2]);
scol[3] = unit_float_to_ushort_clamp(vtcol[lt->tri[j]].color[3]);
}
else {
scol[0] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[0]);
scol[1] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[1]);
scol[2] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[2]);
scol[3] = unit_float_to_ushort_clamp(vtcol[vtri[j]].color[3]);
}
memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
}
else if (f3col) {
if (color_loops) {
scol[0] = unit_float_to_ushort_clamp(f3col[lt->tri[j]][0]);
scol[1] = unit_float_to_ushort_clamp(f3col[lt->tri[j]][1]);
scol[2] = unit_float_to_ushort_clamp(f3col[lt->tri[j]][2]);
scol[3] = USHRT_MAX;
}
else {
scol[0] = unit_float_to_ushort_clamp(f3col[vtri[j]][0]);
scol[1] = unit_float_to_ushort_clamp(f3col[vtri[j]][1]);
scol[2] = unit_float_to_ushort_clamp(f3col[vtri[j]][2]);
scol[3] = USHRT_MAX;
}
memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
}
else if (vcol) {
const uint loop_index = lt->tri[j];
const MLoopCol *mcol = vcol + (color_loops ? loop_index : vtri[j]);
scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->r]);
scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->g]);
scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->b]);
scol[3] = unit_float_to_ushort_clamp(mcol->a * (1.0f / 255.0f));
memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
}
}
/* Face Sets. */
memcpy(GPU_vertbuf_raw_step(&fset_step), face_set_color, sizeof(uchar[3]));
}
@ -604,7 +711,8 @@ void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers,
}
}
void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_grid_buffers_update(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
SubdivCCG *subdiv_ccg,
CCGElem **grids,
const struct DMFlagMat *grid_flag_mats,
@ -628,8 +736,6 @@ void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
/* Build VBO */
const int has_mask = key->has_mask;
buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
uint vert_per_grid = (buffers->smooth) ? key->grid_area : (square_i(key->grid_size - 1) * 4);
uint vert_count = totgrid * vert_per_grid;
@ -653,7 +759,7 @@ void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
uint vbo_index_offset = 0;
/* Build VBO */
if (gpu_pbvh_vert_buf_data_set(buffers, vert_count)) {
if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, vert_count)) {
GPUIndexBufBuilder elb_lines;
if (buffers->index_lines_buf == NULL) {
@ -683,25 +789,25 @@ void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
for (x = 0; x < key->grid_size; x++) {
CCGElem *elem = CCG_grid_elem(key, grid, x, y);
GPU_vertbuf_attr_set(
buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
buffers->vert_buf, vbo_id->pos, vbo_index, CCG_elem_co(key, elem));
short no_short[3];
normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index, no_short);
if (has_mask && show_mask) {
float fmask = *CCG_elem_mask(key, elem);
uchar cmask = (uchar)(fmask * 255);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index, &cmask);
empty_mask = empty_mask && (cmask == 0);
}
if (show_vcol) {
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index, &vcol);
}
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index, &face_set_color);
vbo_index += 1;
}
@ -730,37 +836,37 @@ void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
normal_float_to_short_v3(no_short, fno);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 0, co[0]);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 0, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 1, co[1]);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 1, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 2, co[2]);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 2, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index + 3, co[3]);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index + 3, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 0, co[0]);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 0, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 1, co[1]);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 1, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 2, co[2]);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 2, no_short);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 3, co[3]);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 3, no_short);
if (has_mask && show_mask) {
float fmask = (*CCG_elem_mask(key, elems[0]) + *CCG_elem_mask(key, elems[1]) +
*CCG_elem_mask(key, elems[2]) + *CCG_elem_mask(key, elems[3])) *
0.25f;
uchar cmask = (uchar)(fmask * 255);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 0, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 1, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 2, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index + 3, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 0, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 1, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 2, &cmask);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 3, &cmask);
empty_mask = empty_mask && (cmask == 0);
}
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 0, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 1, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 2, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index + 3, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 0, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 1, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 2, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 3, &vcol);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 0, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 1, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 2, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.fset, vbo_index + 3, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 0, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 1, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 2, &face_set_color);
GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 3, &face_set_color);
vbo_index += 4;
}
@ -805,7 +911,8 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hid
* \{ */
/* Output a BMVert into a VertexBufferFormat array at v_index. */
static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
static void gpu_bmesh_vert_to_buffer_copy(PBVHGPUFormat *vbo_id,
BMVert *v,
GPUVertBuf *vert_buf,
int v_index,
const float fno[3],
@ -819,27 +926,27 @@ static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
/* Set coord, normal, and mask */
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
GPU_vertbuf_attr_set(vert_buf, vbo_id->pos, v_index, v->co);
short no_short[3];
normal_float_to_short_v3(no_short, fno ? fno : v->no);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
GPU_vertbuf_attr_set(vert_buf, vbo_id->nor, v_index, no_short);
if (show_mask) {
float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
uchar cmask = (uchar)(effective_mask * 255);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &cmask);
GPU_vertbuf_attr_set(vert_buf, vbo_id->msk, v_index, &cmask);
*empty_mask = *empty_mask && (cmask == 0);
}
if (show_vcol) {
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, &vcol);
GPU_vertbuf_attr_set(vert_buf, vbo_id->col[0], v_index, &vcol);
}
/* Add default face sets color to avoid artifacts. */
const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, &face_set);
GPU_vertbuf_attr_set(vert_buf, vbo_id->fset, v_index, &face_set);
}
/* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
@ -896,7 +1003,8 @@ void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
}
}
void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
void GPU_pbvh_bmesh_buffers_update(PBVHGPUFormat *vbo_id,
GPU_PBVH_Buffers *buffers,
BMesh *bm,
GSet *bm_faces,
GSet *bm_unique_verts,
@ -935,7 +1043,7 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
/* Fill vertex buffer */
if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
if (!gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totvert)) {
/* Memory map failed */
return;
}
@ -965,7 +1073,8 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
/* Add vertex to the vertex buffer each time a new one is encountered */
*idx_p = POINTER_FROM_UINT(v_index);
gpu_bmesh_vert_to_buffer_copy(v[i],
gpu_bmesh_vert_to_buffer_copy(vbo_id,
v[i],
buffers->vert_buf,
v_index,
NULL,
@ -1032,7 +1141,8 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
for (i = 0; i < 3; i++) {
gpu_bmesh_vert_to_buffer_copy(v[i],
gpu_bmesh_vert_to_buffer_copy(vbo_id,
v[i],
buffers->vert_buf,
v_index++,
f->no,
@ -1075,6 +1185,250 @@ GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
return buffers;
}
/**
* Builds a list of attributes from a set of domains and a set of
* customdata types.
*
* \param active_only Returns only one item, a GPUAttrRef to active_layer
* \param active_layer CustomDataLayer to use for the active layer
* \param active_layer CustomDataLayer to use for the render layer
*/
static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask,
eCustomDataMask type_mask,
const CustomData *vdata,
const CustomData *edata,
const CustomData *ldata,
const CustomData *pdata,
GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
bool active_only,
int active_type,
int active_domain,
const CustomDataLayer *active_layer,
const CustomDataLayer *render_layer)
{
const CustomData *cdata = active_domain == ATTR_DOMAIN_POINT ? vdata : ldata;
if (!cdata) {
return 0;
}
if (active_only) {
int idx = active_layer ? active_layer - cdata->layers : -1;
if (idx >= 0 && idx < cdata->totlayer) {
r_cd_attrs[0].cd_offset = cdata->layers[idx].offset;
r_cd_attrs[0].domain = active_domain;
r_cd_attrs[0].type = active_type;
r_cd_attrs[0].layer_idx = idx;
return 1;
}
return 0;
}
const CustomData *datas[4] = {vdata, edata, pdata, ldata};
int count = 0;
for (eAttrDomain domain = 0; domain < 4; domain++) {
const CustomData *cdata = datas[domain];
if (!cdata || !((1 << domain) & domain_mask)) {
continue;
}
CustomDataLayer *cl = cdata->layers;
for (int i = 0; count < MAX_GPU_ATTR && i < cdata->totlayer; i++, cl++) {
if ((CD_TYPE_AS_MASK(cl->type) & type_mask) && !(cl->flag & CD_FLAG_TEMPORARY)) {
GPUAttrRef *ref = r_cd_attrs + count;
ref->cd_offset = cl->offset;
ref->type = cl->type;
ref->layer_idx = i;
ref->domain = domain;
count++;
}
}
}
/* ensure render layer is last
draw cache code seems to need this
*/
for (int i = 0; i < count; i++) {
GPUAttrRef *ref = r_cd_attrs + i;
const CustomData *cdata = datas[ref->domain];
if (cdata->layers + ref->layer_idx == render_layer) {
SWAP(GPUAttrRef, r_cd_attrs[i], r_cd_attrs[count - 1]);
break;
}
}
return count;
}
static bool gpu_pbvh_format_equals(PBVHGPUFormat *a, PBVHGPUFormat *b)
{
bool bad = false;
bad |= a->active_attrs_only != b->active_attrs_only;
bad |= a->pos != b->pos;
bad |= a->fset != b->fset;
bad |= a->msk != b->msk;
bad |= a->nor != b->nor;
for (int i = 0; i < MIN2(a->totuv, b->totuv); i++) {
bad |= a->uv[i] != b->uv[i];
}
for (int i = 0; i < MIN2(a->totcol, b->totcol); i++) {
bad |= a->col[i] != b->col[i];
}
bad |= a->totuv != b->totuv;
bad |= a->totcol != b->totcol;
return !bad;
}
bool GPU_pbvh_attribute_names_update(PBVHType pbvh_type,
PBVHGPUFormat *vbo_id,
const CustomData *vdata,
const CustomData *ldata,
bool active_attrs_only)
{
const bool active_only = active_attrs_only;
PBVHGPUFormat old_format = *vbo_id;
GPU_vertformat_clear(&vbo_id->format);
vbo_id->active_attrs_only = active_attrs_only;
if (vbo_id->format.attr_len == 0) {
vbo_id->pos = GPU_vertformat_attr_add(
&vbo_id->format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
vbo_id->nor = GPU_vertformat_attr_add(
&vbo_id->format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
/* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
vbo_id->msk = GPU_vertformat_attr_add(
&vbo_id->format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
vbo_id->totcol = 0;
if (pbvh_type == PBVH_FACES) {
int ci = 0;
Mesh me_query;
BKE_id_attribute_copy_domains_temp(ID_ME, vdata, NULL, ldata, NULL, NULL, &me_query.id);
CustomDataLayer *active_color_layer = BKE_id_attributes_active_color_get(&me_query.id);
CustomDataLayer *render_color_layer = BKE_id_attributes_render_color_get(&me_query.id);
eAttrDomain active_color_domain = active_color_layer ?
BKE_id_attribute_domain(&me_query.id,
active_color_layer) :
ATTR_DOMAIN_NUM;
GPUAttrRef vcol_layers[MAX_GPU_ATTR];
int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_COLOR,
CD_MASK_COLOR_ALL,
vdata,
NULL,
ldata,
NULL,
vcol_layers,
active_only,
active_color_layer ? active_color_layer->type : -1,
active_color_domain,
active_color_layer,
render_color_layer);
for (int i = 0; i < totlayer; i++) {
GPUAttrRef *ref = vcol_layers + i;
const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? vdata : ldata;
const CustomDataLayer *layer = cdata->layers + ref->layer_idx;
if (vbo_id->totcol < MAX_GPU_ATTR) {
vbo_id->col[ci++] = GPU_vertformat_attr_add(
&vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
vbo_id->totcol++;
bool is_render = render_color_layer == layer;
bool is_active = active_color_layer == layer;
DRW_cdlayer_attr_aliases_add(&vbo_id->format, "c", cdata, layer, is_render, is_active);
}
}
}
/* ensure at least one vertex color layer */
if (vbo_id->totcol == 0) {
vbo_id->col[0] = GPU_vertformat_attr_add(
&vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
vbo_id->totcol = 1;
GPU_vertformat_alias_add(&vbo_id->format, "ac");
}
vbo_id->fset = GPU_vertformat_attr_add(
&vbo_id->format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
vbo_id->totuv = 0;
if (pbvh_type == PBVH_FACES && ldata && CustomData_has_layer(ldata, CD_MLOOPUV)) {
GPUAttrRef uv_layers[MAX_GPU_ATTR];
CustomDataLayer *active = NULL, *render = NULL;
active = get_active_layer(ldata, CD_MLOOPUV);
render = get_render_layer(ldata, CD_MLOOPUV);
int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
CD_MASK_MLOOPUV,
NULL,
NULL,
ldata,
NULL,
uv_layers,
active_only,
CD_MLOOPUV,
ATTR_DOMAIN_CORNER,
active,
render);
vbo_id->totuv = totlayer;
for (int i = 0; i < totlayer; i++) {
GPUAttrRef *ref = uv_layers + i;
vbo_id->uv[i] = GPU_vertformat_attr_add(
&vbo_id->format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
CustomDataLayer *cl = ldata->layers + ref->layer_idx;
bool is_active = ref->layer_idx == CustomData_get_active_layer_index(ldata, CD_MLOOPUV);
DRW_cdlayer_attr_aliases_add(&vbo_id->format, "u", ldata, cl, cl == render, is_active);
/* Apparently the render attribute is 'a' while active is 'au',
* at least going by the draw cache extractor code.
*/
if (cl == render) {
GPU_vertformat_alias_add(&vbo_id->format, "a");
}
}
}
}
if (!gpu_pbvh_format_equals(&old_format, vbo_id)) {
return true;
}
return false;
}
GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
{
if (wires) {