Eevee: support accessing custom mesh attributes

This adds generic attribute rendering support for meshes for Eevee and
Workbench. Each attribute is stored inside of the `MeshBufferList` as a
separate VBO, with a maximum of `GPU_MAX_ATTR` VBOs for consistency with
the GPU shader compilation code.

Since `DRW_MeshCDMask` is not general enough, attribute requests are
stored in new `DRW_AttributeRequest` structures inside of a convenient
`DRW_MeshAttributes` structure. The latter is used in a similar manner
as `DRW_MeshCDMask`, with the `MeshBatchCache` keeping track of needed,
used, and used-over-time attributes. Again, `GPU_MAX_ATTR` is used in
`DRW_MeshAttributes` to prevent too many attributes being used.

To ensure thread-safety when updating the used attributes list, a mutex
is added to the Mesh runtime. This mutex will also be used in the future
for other things when other part of the rendre pre-processing are multi-threaded.

`GPU_BATCH_VBO_MAX_LEN` was increased to 16 in order to accommodate for
this design.

Since `CD_PROP_COLOR` are a valid attribute type, sculpt vertex colors
are now handled using this system to avoid to complicate things. In the
future regular vertex colors will also use this. From this change, bit
operations for DRW_MeshCDMask are now using uint32_t (to match the
representation now used by the compiler).

Due to the difference in behavior for implicit type conversion for scalar types
between OpenGL and what users expect (a scalar `s` is converted to
`vec4(s, 0, 0, 1)` by OpenGL, vs. `vec4(s, s, s, 1)` in Blender's various node graphs) ,
all scalar types are using a float3 internally for now, which increases memory usage.
This will be resolved during or after the EEVEE rewrite as properly handling
this involves much deeper changes.

Ref T85075

Reviewed By: fclem

Maniphest Tasks: T85075

Differential Revision: https://developer.blender.org/D12969
This commit is contained in:
Kévin Dietrich 2021-10-26 18:16:22 +02:00
parent 8ddfdfd2b2
commit 03013d19d1
Notes: blender-bot 2023-02-13 19:46:40 +01:00
Referenced by commit 622e6f05f1, Fix T92750: sculpt vertex colors missing in object mode
Referenced by commit e7fedf6dba, Revert "Eevee: support accessing custom mesh attributes"
Referenced by issue #94898, Vertex Color doesn't show up (in Eevee using the Vertex Color shader node or in workbench vertex shading mode)
Referenced by issue #93123, Massive lag when enabling Viewport Shading -> Color -> Vertex and using Sculpt Vertex Colors
Referenced by issue #92750, Regression: Sculpt Vertex Colors shown as black
Referenced by issue #85075, Geometry Nodes: Support rendering generated attributes.
10 changed files with 741 additions and 123 deletions

View File

@ -52,6 +52,8 @@ void BKE_mesh_runtime_reset(Mesh *mesh)
memset(&mesh->runtime, 0, sizeof(mesh->runtime));
mesh->runtime.eval_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime eval_mutex");
BLI_mutex_init(mesh->runtime.eval_mutex);
mesh->runtime.render_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime render_mutex");
BLI_mutex_init(mesh->runtime.render_mutex);
}
/* Clear all pointers which we don't want to be shared on copying the datablock.
@ -71,6 +73,9 @@ void BKE_mesh_runtime_reset_on_copy(Mesh *mesh, const int UNUSED(flag))
mesh->runtime.eval_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime eval_mutex");
BLI_mutex_init(mesh->runtime.eval_mutex);
mesh->runtime.render_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime render_mutex");
BLI_mutex_init(mesh->runtime.render_mutex);
}
void BKE_mesh_runtime_clear_cache(Mesh *mesh)
@ -80,6 +85,11 @@ void BKE_mesh_runtime_clear_cache(Mesh *mesh)
MEM_freeN(mesh->runtime.eval_mutex);
mesh->runtime.eval_mutex = NULL;
}
if (mesh->runtime.render_mutex != NULL) {
BLI_mutex_end(mesh->runtime.render_mutex);
MEM_freeN(mesh->runtime.render_mutex);
mesh->runtime.render_mutex = NULL;
}
if (mesh->runtime.mesh_eval != NULL) {
mesh->runtime.mesh_eval->edit_mesh = NULL;
BKE_id_free(NULL, mesh->runtime.mesh_eval);

View File

@ -61,6 +61,7 @@ set(SRC
intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc
intern/mesh_extractors/extract_mesh_ibo_points.cc
intern/mesh_extractors/extract_mesh_ibo_tris.cc
intern/mesh_extractors/extract_mesh_vbo_attributes.cc
intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
intern/mesh_extractors/extract_mesh_vbo_edit_data.cc
intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc

View File

@ -24,6 +24,10 @@
struct TaskGraph;
#include "DNA_customdata_types.h"
#include "BKE_attribute.h"
#include "GPU_batch.h"
#include "GPU_index_buffer.h"
#include "GPU_vertex_buffer.h"
@ -56,7 +60,6 @@ typedef struct DRW_MeshCDMask {
uint32_t uv : 8;
uint32_t tan : 8;
uint32_t vcol : 8;
uint32_t sculpt_vcol : 8;
uint32_t orco : 1;
uint32_t tan_orco : 1;
uint32_t sculpt_overlays : 1;
@ -64,10 +67,10 @@ typedef struct DRW_MeshCDMask {
* modifiers could remove it. (see T68857) */
uint32_t edit_uv : 1;
} DRW_MeshCDMask;
/* Keep `DRW_MeshCDMask` struct within an `uint64_t`.
/* Keep `DRW_MeshCDMask` struct within an `uint32_t`.
* bit-wise and atomic operations are used to compare and update the struct.
* See `mesh_cd_layers_type_*` functions. */
BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint64_t), "DRW_MeshCDMask exceeds 64 bits")
BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
typedef enum eMRIterType {
MR_ITER_LOOPTRI = 1 << 0,
MR_ITER_POLY = 1 << 1,
@ -76,6 +79,17 @@ typedef enum eMRIterType {
} eMRIterType;
ENUM_OPERATORS(eMRIterType, MR_ITER_LVERT)
typedef struct DRW_AttributeRequest {
CustomDataType cd_type;
int layer_index;
AttributeDomain domain;
} DRW_AttributeRequest;
typedef struct DRW_MeshAttributes {
DRW_AttributeRequest requests[GPU_MAX_ATTR];
int num_requests;
} DRW_MeshAttributes;
typedef enum eMRDataType {
MR_DATA_NONE = 0,
MR_DATA_POLY_NOR = 1 << 1,
@ -133,6 +147,7 @@ typedef struct MeshBufferList {
GPUVertBuf *edge_idx; /* extend */
GPUVertBuf *poly_idx;
GPUVertBuf *fdot_idx;
GPUVertBuf *attr[GPU_MAX_ATTR];
} vbo;
/* Index Buffers:
* Only need to be updated when topology changes. */
@ -285,6 +300,8 @@ typedef struct MeshBatchCache {
DRW_MeshCDMask cd_used, cd_needed, cd_used_over_time;
DRW_MeshAttributes attr_used, attr_needed, attr_used_over_time;
int lastmatch;
/* Valid only if edge_detection is up to date. */

View File

@ -650,6 +650,9 @@ static void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
EXTRACT_ADD_REQUESTED(vbo, vert_idx);
EXTRACT_ADD_REQUESTED(vbo, fdot_idx);
EXTRACT_ADD_REQUESTED(vbo, skin_roots);
for (int i = 0; i < GPU_MAX_ATTR; i++) {
EXTRACT_ADD_REQUESTED(vbo, attr[i]);
}
EXTRACT_ADD_REQUESTED(ibo, tris);
if (DRW_ibo_requested(mbuflist->ibo.lines_loose)) {

View File

@ -41,6 +41,7 @@
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "BKE_attribute.h"
#include "BKE_customdata.h"
#include "BKE_deform.h"
#include "BKE_editmesh.h"
@ -121,6 +122,8 @@
# define _MDEPS_ASSERT6(b, n1, n2, n3, n4, n5) _MDEPS_ASSERT5(b, n1, n2, n3, n4); _MDEPS_ASSERT2(b, n5)
# define _MDEPS_ASSERT7(b, n1, n2, n3, n4, n5, n6) _MDEPS_ASSERT6(b, n1, n2, n3, n4, n5); _MDEPS_ASSERT2(b, n6)
# define _MDEPS_ASSERT8(b, n1, n2, n3, n4, n5, n6, n7) _MDEPS_ASSERT7(b, n1, n2, n3, n4, n5, n6); _MDEPS_ASSERT2(b, n7)
# define _MDEPS_ASSERT21(b, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15, n16, n17, n18, n19, n20) _MDEPS_ASSERT8(b, n1, n2, n3, n4, n5, n6, n7); _MDEPS_ASSERT8(b, n8, n9, n10, n11, n12, n13, n14); _MDEPS_ASSERT7(b, n15, n16, n17, n18, n19, n20)
# define _MDEPS_ASSERT22(b, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15, n16, n17, n18, n19, n20, n21) _MDEPS_ASSERT21(b, n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15, n16, n17, n18, n19, n20); _MDEPS_ASSERT2(b, n21);
# define MDEPS_ASSERT_FLAG(...) VA_NARGS_CALL_OVERLOAD(_MDEPS_ASSERT, __VA_ARGS__)
# define MDEPS_ASSERT(batch_name, ...) MDEPS_ASSERT_FLAG(BATCH_FLAG(batch_name), __VA_ARGS__)
@ -192,6 +195,21 @@ static const DRWBatchFlag g_buffer_deps[] = {
[BUFFER_INDEX(vbo.edge_idx)] = BATCH_FLAG(edit_selection_edges),
[BUFFER_INDEX(vbo.poly_idx)] = BATCH_FLAG(edit_selection_faces),
[BUFFER_INDEX(vbo.fdot_idx)] = BATCH_FLAG(edit_selection_fdots),
[BUFFER_INDEX(vbo.attr[0])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[1])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[2])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[3])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[4])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[5])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[6])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[7])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[8])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[9])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[10])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[11])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[12])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[13])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(vbo.attr[14])] = BATCH_FLAG(surface) | SURFACE_PER_MAT_FLAG,
[BUFFER_INDEX(ibo.tris)] = BATCH_FLAG(surface,
surface_weights,
@ -240,12 +258,12 @@ static void mesh_batch_cache_discard_batch(MeshBatchCache *cache, const DRWBatch
/* Return true is all layers in _b_ are inside _a_. */
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
{
return (*((uint64_t *)&a) & *((uint64_t *)&b)) == *((uint64_t *)&b);
return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
}
BLI_INLINE bool mesh_cd_layers_type_equal(DRW_MeshCDMask a, DRW_MeshCDMask b)
{
return *((uint64_t *)&a) == *((uint64_t *)&b);
return *((uint32_t *)&a) == *((uint32_t *)&b);
}
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
@ -253,12 +271,11 @@ BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
uint32_t *a_p = (uint32_t *)a;
uint32_t *b_p = (uint32_t *)&b;
atomic_fetch_and_or_uint32(a_p, *b_p);
atomic_fetch_and_or_uint32(a_p + 1, *(b_p + 1));
}
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
{
*((uint64_t *)a) = 0;
*((uint32_t *)a) = 0;
}
BLI_INLINE const Mesh *editmesh_final_or_this(const Mesh *me)
@ -271,6 +288,95 @@ static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *c
cd_used->edit_uv = 1;
}
/** \name DRW_MeshAttributes
*
* Utilities for handling requested attributes.
* \{ */
/* Return true if the given DRW_AttributeRequest is already in the requests. */
static bool has_request(const DRW_MeshAttributes *requests, DRW_AttributeRequest req)
{
for (int i = 0; i < requests->num_requests; i++) {
const DRW_AttributeRequest src_req = requests->requests[i];
if (src_req.domain != req.domain) {
continue;
}
if (src_req.layer_index != req.layer_index) {
continue;
}
if (src_req.cd_type != req.cd_type) {
continue;
}
return true;
}
return false;
}
static void mesh_attrs_merge_requests(const DRW_MeshAttributes *src_requests,
DRW_MeshAttributes *dst_requests)
{
for (int i = 0; i < src_requests->num_requests; i++) {
if (dst_requests->num_requests == GPU_MAX_ATTR) {
return;
}
if (has_request(dst_requests, src_requests->requests[i])) {
continue;
}
dst_requests->requests[dst_requests->num_requests] = src_requests->requests[i];
dst_requests->num_requests += 1;
}
}
static void drw_mesh_attributes_clear(DRW_MeshAttributes *attributes)
{
memset(attributes, 0, sizeof(DRW_MeshAttributes));
}
static void drw_mesh_attributes_merge(DRW_MeshAttributes *dst,
const DRW_MeshAttributes *src,
ThreadMutex *mesh_render_mutex)
{
BLI_mutex_lock(mesh_render_mutex);
mesh_attrs_merge_requests(src, dst);
BLI_mutex_unlock(mesh_render_mutex);
}
/* Return true if all requests in b are in a. */
static bool drw_mesh_attributes_overlap(DRW_MeshAttributes *a, DRW_MeshAttributes *b)
{
if (a->num_requests != b->num_requests) {
return false;
}
for (int i = 0; i < a->num_requests; i++) {
if (!has_request(a, b->requests[i])) {
return false;
}
}
return true;
}
static void drw_mesh_attributes_add_request(DRW_MeshAttributes *attrs,
CustomDataType type,
int layer,
AttributeDomain domain)
{
if (attrs->num_requests >= GPU_MAX_ATTR) {
return;
}
DRW_AttributeRequest *req = &attrs->requests[attrs->num_requests];
req->cd_type = type;
req->layer_index = layer;
req->domain = domain;
attrs->num_requests += 1;
}
/** \} */
BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
{
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
@ -286,6 +392,36 @@ BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
return &me->ldata;
}
BLI_INLINE const CustomData *mesh_cd_pdata_get_from_mesh(const Mesh *me)
{
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
case ME_WRAPPER_TYPE_MDATA:
return &me->pdata;
break;
case ME_WRAPPER_TYPE_BMESH:
return &me->edit_mesh->bm->pdata;
break;
}
BLI_assert(0);
return &me->pdata;
}
BLI_INLINE const CustomData *mesh_cd_edata_get_from_mesh(const Mesh *me)
{
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
case ME_WRAPPER_TYPE_MDATA:
return &me->edata;
break;
case ME_WRAPPER_TYPE_BMESH:
return &me->edit_mesh->bm->edata;
break;
}
BLI_assert(0);
return &me->edata;
}
BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
{
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
@ -321,14 +457,14 @@ static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd
}
}
static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshAttributes *attrs_used)
{
const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR);
if (layer != -1) {
cd_used->sculpt_vcol |= (1 << layer);
drw_mesh_attributes_add_request(attrs_used, CD_PROP_COLOR, layer, ATTR_DOMAIN_POINT);
}
}
@ -343,13 +479,45 @@ static void mesh_cd_calc_active_mloopcol_layer(const Mesh *me, DRW_MeshCDMask *c
}
}
static bool custom_data_match_attribute(const CustomData *custom_data,
const char *name,
int *r_layer_index,
int *r_type)
{
const int possible_attribute_types[6] = {
CD_PROP_BOOL,
CD_PROP_INT32,
CD_PROP_FLOAT,
CD_PROP_FLOAT2,
CD_PROP_FLOAT3,
CD_PROP_COLOR,
};
for (int i = 0; i < ARRAY_SIZE(possible_attribute_types); i++) {
const int attr_type = possible_attribute_types[i];
int layer_index = CustomData_get_named_layer(custom_data, attr_type, name);
if (layer_index == -1) {
continue;
}
*r_layer_index = layer_index;
*r_type = attr_type;
return true;
}
return false;
}
static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
struct GPUMaterial **gpumat_array,
int gpumat_array_len)
int gpumat_array_len,
DRW_MeshAttributes *attributes)
{
const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
const CustomData *cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
const CustomData *cd_edata = mesh_cd_edata_get_from_mesh(me_final);
/* See: DM_vertex_attributes_from_gpu for similar logic */
DRW_MeshCDMask cd_used;
@ -363,6 +531,8 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
const char *name = gpu_attr->name;
int type = gpu_attr->type;
int layer = -1;
/* ATTR_DOMAIN_NUM is standard for "invalid value". */
AttributeDomain domain = ATTR_DOMAIN_NUM;
if (type == CD_AUTO_FROM_NAME) {
/* We need to deduct what exact layer is used.
@ -373,13 +543,6 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
type = CD_MTFACE;
if (layer == -1) {
if (U.experimental.use_sculpt_vertex_colors) {
layer = CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name);
type = CD_PROP_COLOR;
}
}
if (layer == -1) {
layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
type = CD_MCOL;
@ -391,6 +554,27 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
type = CD_TANGENT;
}
#endif
if (layer == -1) {
/* Try to match a generic attribute, we use the first attribute domain with a
* matching name. */
if (custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
domain = ATTR_DOMAIN_POINT;
}
else if (custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
domain = ATTR_DOMAIN_CORNER;
}
else if (custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
domain = ATTR_DOMAIN_FACE;
}
else if (custom_data_match_attribute(cd_edata, name, &layer, &type)) {
domain = ATTR_DOMAIN_EDGE;
}
else {
layer = -1;
domain = ATTR_DOMAIN_NUM;
}
}
if (layer == -1) {
continue;
}
@ -432,31 +616,6 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
}
break;
}
case CD_PROP_COLOR: {
/* Sculpt Vertex Colors */
bool use_mloop_cols = false;
if (layer == -1) {
layer = (name[0] != '\0') ?
CustomData_get_named_layer(cd_vdata, CD_PROP_COLOR, name) :
CustomData_get_render_layer(cd_vdata, CD_PROP_COLOR);
/* Fallback to Vertex Color data */
if (layer == -1) {
layer = (name[0] != '\0') ?
CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
CustomData_get_render_layer(cd_ldata, CD_MLOOPCOL);
use_mloop_cols = true;
}
}
if (layer != -1) {
if (use_mloop_cols) {
cd_used.vcol |= (1 << layer);
}
else {
cd_used.sculpt_vcol |= (1 << layer);
}
}
break;
}
case CD_MCOL: {
/* Vertex Color Data */
if (layer == -1) {
@ -473,6 +632,17 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
cd_used.orco = 1;
break;
}
case CD_PROP_BOOL:
case CD_PROP_INT32:
case CD_PROP_FLOAT:
case CD_PROP_FLOAT2:
case CD_PROP_FLOAT3:
case CD_PROP_COLOR: {
if (layer != -1 && domain != ATTR_DOMAIN_NUM) {
drw_mesh_attributes_add_request(attributes, type, layer, domain);
}
break;
}
}
}
}
@ -935,14 +1105,14 @@ static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
static void sculpt_request_active_vcol(MeshBatchCache *cache, Mesh *me)
{
DRW_MeshCDMask cd_needed;
mesh_cd_layers_type_clear(&cd_needed);
mesh_cd_calc_active_vcol_layer(me, &cd_needed);
DRW_MeshAttributes attrs_needed;
drw_mesh_attributes_clear(&attrs_needed);
mesh_cd_calc_active_vcol_layer(me, &attrs_needed);
BLI_assert(cd_needed.sculpt_vcol != 0 &&
BLI_assert(attrs_needed.num_requests != 0 &&
"No MPropCol layer available in Sculpt, but batches requested anyway!");
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
drw_mesh_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime.render_mutex);
}
GPUBatch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
@ -1015,11 +1185,16 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
uint gpumat_array_len)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(me, gpumat_array, gpumat_array_len);
DRW_MeshAttributes attrs_needed;
drw_mesh_attributes_clear(&attrs_needed);
DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
me, gpumat_array, gpumat_array_len, &attrs_needed);
BLI_assert(gpumat_array_len == cache->mat_len);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
drw_mesh_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
@ -1296,11 +1471,25 @@ void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
cache->lastmatch = ctime;
}
if (drw_mesh_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
cache->lastmatch = ctime;
}
if (ctime - cache->lastmatch > U.vbotimeout) {
mesh_batch_cache_discard_shaded_tri(cache);
}
mesh_cd_layers_type_clear(&cache->cd_used_over_time);
drw_mesh_attributes_clear(&cache->attr_used_over_time);
}
static void drw_add_attributes_vbo(GPUBatch *batch,
MeshBufferList *mbuflist,
DRW_MeshAttributes *attr_used)
{
for (int i = 0; i < attr_used->num_requests; i++) {
DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
}
}
#ifdef DEBUG
@ -1409,12 +1598,15 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
}
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
/* Verify that all surface batches have needed attribute layers.
*/
/* TODO(fclem): We could be a bit smarter here and only do it per
* material. */
bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
if (cd_overlap == false) {
bool attr_overlap = drw_mesh_attributes_overlap(&cache->attr_used, &cache->attr_needed);
if (cd_overlap == false || attr_overlap == false) {
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.uv);
@ -1430,11 +1622,14 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.sculpt_data);
}
if (((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) ||
((cache->cd_used.sculpt_vcol & cache->cd_needed.sculpt_vcol) !=
cache->cd_needed.sculpt_vcol)) {
if ((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.vcol);
}
if (!drw_mesh_attributes_overlap(&cache->attr_used, &cache->attr_needed)) {
for (int i = 0; i < GPU_MAX_ATTR; i++) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
}
}
}
/* We can't discard batches at this point as they have been
* referenced for drawing. Just clear them in place. */
@ -1445,9 +1640,13 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
drw_mesh_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
}
mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
mesh_cd_layers_type_clear(&cache->cd_needed);
drw_mesh_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
drw_mesh_attributes_clear(&cache->attr_needed);
}
if (batch_requested & MBC_EDITUV) {
@ -1506,7 +1705,27 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
MeshBufferList *mbuflist = &cache->final.buff;
/* Initialize batches and request VBO's & IBO's. */
MDEPS_ASSERT(surface, ibo.tris, vbo.lnor, vbo.pos_nor, vbo.uv, vbo.vcol);
MDEPS_ASSERT(surface,
ibo.tris,
vbo.lnor,
vbo.pos_nor,
vbo.uv,
vbo.vcol,
vbo.attr[0],
vbo.attr[1],
vbo.attr[2],
vbo.attr[3],
vbo.attr[4],
vbo.attr[5],
vbo.attr[6],
vbo.attr[7],
vbo.attr[8],
vbo.attr[9],
vbo.attr[10],
vbo.attr[11],
vbo.attr[12],
vbo.attr[13],
vbo.attr[14]);
if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.surface, &mbuflist->ibo.tris);
/* Order matters. First ones override latest VBO's attributes. */
@ -1515,9 +1734,10 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if (cache->cd_used.uv != 0) {
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.uv);
}
if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
if (cache->cd_used.vcol != 0) {
DRW_vbo_request(cache->batch.surface, &mbuflist->vbo.vcol);
}
drw_add_attributes_vbo(cache->batch.surface, mbuflist, &cache->attr_used);
}
MDEPS_ASSERT(all_verts, vbo.pos_nor);
if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
@ -1580,8 +1800,28 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
/* Per Material */
MDEPS_ASSERT_FLAG(
SURFACE_PER_MAT_FLAG, vbo.lnor, vbo.pos_nor, vbo.uv, vbo.tan, vbo.vcol, vbo.orco);
MDEPS_ASSERT_FLAG(SURFACE_PER_MAT_FLAG,
vbo.lnor,
vbo.pos_nor,
vbo.uv,
vbo.tan,
vbo.vcol,
vbo.orco,
vbo.attr[0],
vbo.attr[1],
vbo.attr[2],
vbo.attr[3],
vbo.attr[4],
vbo.attr[5],
vbo.attr[6],
vbo.attr[7],
vbo.attr[8],
vbo.attr[9],
vbo.attr[10],
vbo.attr[11],
vbo.attr[12],
vbo.attr[13],
vbo.attr[14]);
MDEPS_ASSERT_INDEX(TRIS_PER_MAT_INDEX, SURFACE_PER_MAT_FLAG);
for (int i = 0; i < cache->mat_len; i++) {
if (DRW_batch_requested(cache->surface_per_mat[i], GPU_PRIM_TRIS)) {
@ -1595,12 +1835,13 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.tan);
}
if (cache->cd_used.vcol != 0 || cache->cd_used.sculpt_vcol != 0) {
if (cache->cd_used.vcol != 0) {
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.vcol);
}
if (cache->cd_used.orco != 0) {
DRW_vbo_request(cache->surface_per_mat[i], &mbuflist->vbo.orco);
}
drw_add_attributes_vbo(cache->surface_per_mat[i], mbuflist, &cache->attr_used);
}
}
@ -1751,6 +1992,9 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
MDEPS_ASSERT_MAP(vbo.edituv_stretch_angle);
MDEPS_ASSERT_MAP(vbo.fdots_uv);
MDEPS_ASSERT_MAP(vbo.fdots_edituv_data);
for (int i = 0; i < GPU_MAX_ATTR; i++) {
MDEPS_ASSERT_MAP(vbo.attr[i]);
}
MDEPS_ASSERT_MAP(ibo.tris);
MDEPS_ASSERT_MAP(ibo.lines);

View File

@ -328,6 +328,7 @@ extern const MeshExtract extract_poly_idx;
extern const MeshExtract extract_edge_idx;
extern const MeshExtract extract_vert_idx;
extern const MeshExtract extract_fdot_idx;
extern const MeshExtract extract_attr[GPU_MAX_ATTR];
#ifdef __cplusplus
}

View File

@ -0,0 +1,398 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2021 by Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup draw
*/
#include "MEM_guardedalloc.h"
#include <functional>
#include "BLI_float2.hh"
#include "BLI_float3.hh"
#include "BLI_float4.hh"
#include "BLI_string.h"
#include "BKE_attribute.h"
#include "extract_mesh.h"
namespace blender::draw {
/* ---------------------------------------------------------------------- */
/** \name Extract Attributes
* \{ */
static CustomData *get_custom_data_for_domain(const MeshRenderData *mr, AttributeDomain domain)
{
switch (domain) {
default: {
return nullptr;
}
case ATTR_DOMAIN_POINT: {
return (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->vdata : &mr->me->vdata;
}
case ATTR_DOMAIN_CORNER: {
return (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
}
case ATTR_DOMAIN_FACE: {
return (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->pdata : &mr->me->pdata;
}
case ATTR_DOMAIN_EDGE: {
return (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->edata : &mr->me->edata;
}
}
}
/* Utility to convert from the type used in the attributes to the types for the VBO.
* This is mostly used to promote integers and booleans to floats, as other types (float, float2,
* etc.) directly map to avalaible GPU types. Booleans are still converted as attributes are vec4
* in the shader.
*/
template<typename AttributeType, typename VBOType> struct attribute_type_converter {
static VBOType convert_value(AttributeType value)
{
if constexpr (std::is_same_v<AttributeType, VBOType>) {
return value;
}
/* This should only concern bools which are converted to floats. */
return static_cast<VBOType>(value);
}
};
/* Similar to the one in #extract_mesh_vcol_vbo.cc */
struct gpuMeshCol {
ushort r, g, b, a;
};
template<> struct attribute_type_converter<MPropCol, gpuMeshCol> {
static gpuMeshCol convert_value(MPropCol value)
{
gpuMeshCol result;
result.r = unit_float_to_ushort_clamp(value.color[0]);
result.g = unit_float_to_ushort_clamp(value.color[1]);
result.b = unit_float_to_ushort_clamp(value.color[2]);
result.a = unit_float_to_ushort_clamp(value.color[3]);
return result;
}
};
/* Return the number of component for the attribute's value type, or 0 if is it unsupported. */
static uint gpu_component_size_for_attribute_type(CustomDataType type)
{
switch (type) {
case CD_PROP_BOOL:
case CD_PROP_INT32:
case CD_PROP_FLOAT: {
/* TODO(kevindietrich) : should be 1 when scalar attributes conversion is handled by us. See
* comment #extract_attr_init. */
return 3;
}
case CD_PROP_FLOAT2: {
return 2;
}
case CD_PROP_FLOAT3: {
return 3;
}
case CD_PROP_COLOR: {
return 4;
}
default: {
return 0;
}
}
}
static GPUVertFetchMode get_fetch_mode_for_type(CustomDataType type)
{
switch (type) {
case CD_PROP_INT32: {
return GPU_FETCH_INT_TO_FLOAT;
}
case CD_PROP_COLOR: {
return GPU_FETCH_INT_TO_FLOAT_UNIT;
}
default: {
return GPU_FETCH_FLOAT;
}
}
}
static GPUVertCompType get_comp_type_for_type(CustomDataType type)
{
switch (type) {
case CD_PROP_INT32: {
return GPU_COMP_I32;
}
case CD_PROP_COLOR: {
return GPU_COMP_U16;
}
default: {
return GPU_COMP_F32;
}
}
}
static void init_vbo_for_attribute(const MeshRenderData *mr,
GPUVertBuf *vbo,
const DRW_AttributeRequest &request)
{
GPUVertCompType comp_type = get_comp_type_for_type(request.cd_type);
GPUVertFetchMode fetch_mode = get_fetch_mode_for_type(request.cd_type);
const uint comp_size = gpu_component_size_for_attribute_type(request.cd_type);
/* We should not be here if the attribute type is not supported. */
BLI_assert(comp_size != 0);
const CustomData *custom_data = get_custom_data_for_domain(mr, request.domain);
char attr_name[32], attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
const char *layer_name = CustomData_get_layer_name(
custom_data, request.cd_type, request.layer_index);
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
/* Attributes use auto-name. */
BLI_snprintf(attr_name, sizeof(attr_name), "a%s", attr_safe_name);
GPUVertFormat format = {0};
GPU_vertformat_deinterleave(&format);
GPU_vertformat_attr_add(&format, attr_name, comp_type, comp_size, fetch_mode);
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, static_cast<uint32_t>(mr->loop_len));
}
template<typename AttributeType, typename VBOType>
static void fill_vertbuf_with_attribute(const MeshRenderData *mr,
VBOType *vbo_data,
const DRW_AttributeRequest &request)
{
const CustomData *custom_data = get_custom_data_for_domain(mr, request.domain);
BLI_assert(custom_data);
const int layer_index = request.layer_index;
const MPoly *mpoly = mr->mpoly;
const MLoop *mloop = mr->mloop;
const AttributeType *attr_data = static_cast<AttributeType *>(
CustomData_get_layer_n(custom_data, request.cd_type, layer_index));
using converter = attribute_type_converter<AttributeType, VBOType>;
switch (request.domain) {
default: {
BLI_assert(false);
break;
}
case ATTR_DOMAIN_POINT: {
for (int ml_index = 0; ml_index < mr->loop_len; ml_index++, vbo_data++, mloop++) {
*vbo_data = converter::convert_value(attr_data[mloop->v]);
}
break;
}
case ATTR_DOMAIN_CORNER: {
for (int ml_index = 0; ml_index < mr->loop_len; ml_index++, vbo_data++) {
*vbo_data = converter::convert_value(attr_data[ml_index]);
}
break;
}
case ATTR_DOMAIN_EDGE: {
for (int ml_index = 0; ml_index < mr->loop_len; ml_index++, vbo_data++, mloop++) {
*vbo_data = converter::convert_value(attr_data[mloop->e]);
}
break;
}
case ATTR_DOMAIN_FACE: {
for (int mp_index = 0; mp_index < mr->poly_len; mp_index++) {
const MPoly &poly = mpoly[mp_index];
const VBOType value = converter::convert_value(attr_data[mp_index]);
for (int l = 0; l < poly.totloop; l++) {
*vbo_data++ = value;
}
}
break;
}
}
}
template<typename AttributeType, typename VBOType>
static void fill_vertbuf_with_attribute_bm(const MeshRenderData *mr,
VBOType *&vbo_data,
const DRW_AttributeRequest &request)
{
const CustomData *custom_data = get_custom_data_for_domain(mr, request.domain);
BLI_assert(custom_data);
const int layer_index = request.layer_index;
int cd_ofs = CustomData_get_n_offset(custom_data, request.cd_type, layer_index);
using converter = attribute_type_converter<AttributeType, VBOType>;
BMIter f_iter;
BMFace *efa;
BM_ITER_MESH (efa, &f_iter, mr->bm, BM_FACES_OF_MESH) {
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP(efa);
do {
const AttributeType *attr_data = nullptr;
if (request.domain == ATTR_DOMAIN_POINT) {
attr_data = static_cast<const AttributeType *>(BM_ELEM_CD_GET_VOID_P(l_iter->v, cd_ofs));
}
else if (request.domain == ATTR_DOMAIN_CORNER) {
attr_data = static_cast<const AttributeType *>(BM_ELEM_CD_GET_VOID_P(l_iter, cd_ofs));
}
else if (request.domain == ATTR_DOMAIN_FACE) {
attr_data = static_cast<const AttributeType *>(BM_ELEM_CD_GET_VOID_P(efa, cd_ofs));
}
else if (request.domain == ATTR_DOMAIN_EDGE) {
attr_data = static_cast<const AttributeType *>(BM_ELEM_CD_GET_VOID_P(l_iter->e, cd_ofs));
}
else {
BLI_assert(false);
continue;
}
*vbo_data = converter::convert_value(*attr_data);
vbo_data++;
} while ((l_iter = l_iter->next) != l_first);
}
}
template<typename AttributeType, typename VBOType = AttributeType>
static void extract_attr_generic(const MeshRenderData *mr,
GPUVertBuf *vbo,
const DRW_AttributeRequest &request)
{
VBOType *vbo_data = static_cast<VBOType *>(GPU_vertbuf_get_data(vbo));
if (mr->extract_type == MR_EXTRACT_BMESH) {
fill_vertbuf_with_attribute_bm<AttributeType>(mr, vbo_data, request);
}
else {
fill_vertbuf_with_attribute<AttributeType>(mr, vbo_data, request);
}
}
static void extract_attr_init(const MeshRenderData *mr,
struct MeshBatchCache *cache,
void *buf,
void *UNUSED(tls_data),
int index)
{
const DRW_MeshAttributes *attrs_used = &cache->attr_used;
const DRW_AttributeRequest &request = attrs_used->requests[index];
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
init_vbo_for_attribute(mr, vbo, request);
/* TODO(kevindietrich) : float3 is used for scalar attributes as the implicit conversion done by
* OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following the
* Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a similar
* texture as for volume attribute, so we can control the conversion ourselves. */
switch (request.cd_type) {
case CD_PROP_BOOL: {
extract_attr_generic<bool, float3>(mr, vbo, request);
break;
}
case CD_PROP_INT32: {
extract_attr_generic<int32_t, float3>(mr, vbo, request);
break;
}
case CD_PROP_FLOAT: {
extract_attr_generic<float, float3>(mr, vbo, request);
break;
}
case CD_PROP_FLOAT2: {
extract_attr_generic<float2>(mr, vbo, request);
break;
}
case CD_PROP_FLOAT3: {
extract_attr_generic<float3>(mr, vbo, request);
break;
}
case CD_PROP_COLOR: {
extract_attr_generic<MPropCol, gpuMeshCol>(mr, vbo, request);
break;
}
default: {
BLI_assert(false);
}
}
}
/* Wrappers around extract_attr_init so we can pass the index of the attribute that we want to
* extract. The overall API does not allow us to pass this in a convenient way. */
#define EXTRACT_INIT_WRAPPER(index) \
static void extract_attr_init##index( \
const MeshRenderData *mr, struct MeshBatchCache *cache, void *buf, void *tls_data) \
{ \
extract_attr_init(mr, cache, buf, tls_data, index); \
}
EXTRACT_INIT_WRAPPER(0)
EXTRACT_INIT_WRAPPER(1)
EXTRACT_INIT_WRAPPER(2)
EXTRACT_INIT_WRAPPER(3)
EXTRACT_INIT_WRAPPER(4)
EXTRACT_INIT_WRAPPER(5)
EXTRACT_INIT_WRAPPER(6)
EXTRACT_INIT_WRAPPER(7)
EXTRACT_INIT_WRAPPER(8)
EXTRACT_INIT_WRAPPER(9)
EXTRACT_INIT_WRAPPER(10)
EXTRACT_INIT_WRAPPER(11)
EXTRACT_INIT_WRAPPER(12)
EXTRACT_INIT_WRAPPER(13)
EXTRACT_INIT_WRAPPER(14)
template<int index> constexpr MeshExtract create_extractor_attr(ExtractInitFn fn)
{
MeshExtract extractor = {nullptr};
extractor.init = fn;
extractor.data_type = MR_DATA_NONE;
extractor.data_size = 0;
extractor.use_threading = false;
extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.attr[index]);
return extractor;
}
/** \} */
} // namespace blender::draw
extern "C" {
#define CREATE_EXTRACTOR_ATTR(index) \
blender::draw::create_extractor_attr<index>(blender::draw::extract_attr_init##index)
const MeshExtract extract_attr[GPU_MAX_ATTR] = {
CREATE_EXTRACTOR_ATTR(0),
CREATE_EXTRACTOR_ATTR(1),
CREATE_EXTRACTOR_ATTR(2),
CREATE_EXTRACTOR_ATTR(3),
CREATE_EXTRACTOR_ATTR(4),
CREATE_EXTRACTOR_ATTR(5),
CREATE_EXTRACTOR_ATTR(6),
CREATE_EXTRACTOR_ATTR(7),
CREATE_EXTRACTOR_ATTR(8),
CREATE_EXTRACTOR_ATTR(9),
CREATE_EXTRACTOR_ATTR(10),
CREATE_EXTRACTOR_ATTR(11),
CREATE_EXTRACTOR_ATTR(12),
CREATE_EXTRACTOR_ATTR(13),
CREATE_EXTRACTOR_ATTR(14),
};
}

View File

@ -43,9 +43,7 @@ static void extract_vcol_init(const MeshRenderData *mr,
GPU_vertformat_deinterleave(&format);
CustomData *cd_ldata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
CustomData *cd_vdata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->vdata : &mr->me->vdata;
uint32_t vcol_layers = cache->cd_used.vcol;
uint32_t svcol_layers = cache->cd_used.sculpt_vcol;
for (int i = 0; i < MAX_MCOL; i++) {
if (vcol_layers & (1 << i)) {
@ -64,42 +62,14 @@ static void extract_vcol_init(const MeshRenderData *mr,
}
/* Gather number of auto layers. */
/* We only do `vcols` that are not overridden by `uvs` and sculpt vertex colors. */
if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, layer_name) == -1 &&
CustomData_get_named_layer_index(cd_vdata, CD_PROP_COLOR, layer_name) == -1) {
/* We only do `vcols` that are not overridden by `uvs`. */
if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, layer_name) == -1) {
BLI_snprintf(attr_name, sizeof(attr_name), "a%s", attr_safe_name);
GPU_vertformat_alias_add(&format, attr_name);
}
}
}
/* Sculpt Vertex Colors */
if (U.experimental.use_sculpt_vertex_colors) {
for (int i = 0; i < 8; i++) {
if (svcol_layers & (1 << i)) {
char attr_name[32], attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
const char *layer_name = CustomData_get_layer_name(cd_vdata, CD_PROP_COLOR, i);
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
BLI_snprintf(attr_name, sizeof(attr_name), "c%s", attr_safe_name);
GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
if (i == CustomData_get_render_layer(cd_vdata, CD_PROP_COLOR)) {
GPU_vertformat_alias_add(&format, "c");
}
if (i == CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR)) {
GPU_vertformat_alias_add(&format, "ac");
}
/* Gather number of auto layers. */
/* We only do `vcols` that are not overridden by `uvs`. */
if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, layer_name) == -1) {
BLI_snprintf(attr_name, sizeof(attr_name), "a%s", attr_safe_name);
GPU_vertformat_alias_add(&format, attr_name);
}
}
}
}
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len);
@ -108,7 +78,6 @@ static void extract_vcol_init(const MeshRenderData *mr,
};
gpuMeshVcol *vcol_data = (gpuMeshVcol *)GPU_vertbuf_get_data(vbo);
MLoop *loops = (MLoop *)CustomData_get_layer(cd_ldata, CD_MLOOP);
for (int i = 0; i < MAX_MCOL; i++) {
if (vcol_layers & (1 << i)) {
@ -139,35 +108,6 @@ static void extract_vcol_init(const MeshRenderData *mr,
}
}
}
if (svcol_layers & (1 << i) && U.experimental.use_sculpt_vertex_colors) {
if (mr->extract_type == MR_EXTRACT_BMESH) {
int cd_ofs = CustomData_get_n_offset(cd_vdata, CD_PROP_COLOR, i);
BMIter f_iter;
BMFace *efa;
BM_ITER_MESH (efa, &f_iter, mr->bm, BM_FACES_OF_MESH) {
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP(efa);
do {
const MPropCol *prop_col = (const MPropCol *)BM_ELEM_CD_GET_VOID_P(l_iter->v, cd_ofs);
vcol_data->r = unit_float_to_ushort_clamp(prop_col->color[0]);
vcol_data->g = unit_float_to_ushort_clamp(prop_col->color[1]);
vcol_data->b = unit_float_to_ushort_clamp(prop_col->color[2]);
vcol_data->a = unit_float_to_ushort_clamp(prop_col->color[3]);
vcol_data++;
} while ((l_iter = l_iter->next) != l_first);
}
}
else {
MPropCol *vcol = (MPropCol *)CustomData_get_layer_n(cd_vdata, CD_PROP_COLOR, i);
for (int ml_index = 0; ml_index < mr->loop_len; ml_index++, vcol_data++) {
vcol_data->r = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[0]);
vcol_data->g = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[1]);
vcol_data->b = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[2]);
vcol_data->a = unit_float_to_ushort_clamp(vcol[loops[ml_index].v].color[3]);
}
}
}
}
}

View File

@ -32,7 +32,7 @@
#include "GPU_shader.h"
#include "GPU_vertex_buffer.h"
#define GPU_BATCH_VBO_MAX_LEN 6
#define GPU_BATCH_VBO_MAX_LEN 16
#define GPU_BATCH_INST_VBO_MAX_LEN 2
#define GPU_BATCH_VAO_STATIC_LEN 3
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
@ -54,11 +54,11 @@ typedef enum eGPUBatchFlag {
GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_INST_VBO_MAX << 1),
/** Has been initialized. At least one VBO is set. */
GPU_BATCH_INIT = (1 << 16),
GPU_BATCH_INIT = (1 << 26),
/** Batch is initialized but its VBOs are still being populated. (optional) */
GPU_BATCH_BUILDING = (1 << 16),
GPU_BATCH_BUILDING = (1 << 26),
/** Cached data need to be rebuild. (VAO, PSO, ...) */
GPU_BATCH_DIRTY = (1 << 17),
GPU_BATCH_DIRTY = (1 << 27),
} eGPUBatchFlag;
#define GPU_BATCH_OWNS_NONE GPU_BATCH_INVALID

View File

@ -127,6 +127,10 @@ typedef struct Mesh_Runtime {
/** Needed in case we need to lazily initialize the mesh. */
CustomData_MeshMasks cd_mask_extra;
/** Needed to ensure some thread-safety during render data pre-processing. */
void *render_mutex;
void *_pad3;
} Mesh_Runtime;
typedef struct Mesh {