Mesh Batch Cache: Split UV an tangent into 2 distinct VBOs
This is done because they don't have the same update frequency. UV can be persistent even on geometry update (ex: skinned object) but tangents can change if the normals change. Also the name buffer per vbo was too small to contain all names.
This commit is contained in:
parent
deb5416a1a
commit
80e9eb66d5
|
@ -86,7 +86,8 @@ typedef struct MeshBufferCache {
|
|||
GPUVertBuf *lnor; /* extend */
|
||||
GPUVertBuf *edge_fac; /* extend */
|
||||
GPUVertBuf *weights; /* extend */
|
||||
GPUVertBuf *uv_tan;
|
||||
GPUVertBuf *uv;
|
||||
GPUVertBuf *tan;
|
||||
GPUVertBuf *vcol;
|
||||
GPUVertBuf *orco;
|
||||
/* Only for edit mode. */
|
||||
|
|
|
@ -1554,21 +1554,16 @@ const MeshExtract extract_lnor = {extract_lnor_init,
|
|||
/** \} */
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/** \name Extract UV / Tangent layers
|
||||
/** \name Extract UV layers
|
||||
* \{ */
|
||||
|
||||
static void *extract_uv_tan_init(const MeshRenderData *mr, void *buf)
|
||||
static void *extract_uv_init(const MeshRenderData *mr, void *buf)
|
||||
{
|
||||
GPUVertFormat format = {0};
|
||||
GPU_vertformat_deinterleave(&format);
|
||||
|
||||
CustomData *cd_ldata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
|
||||
CustomData *cd_vdata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->vdata : &mr->me->vdata;
|
||||
uint32_t uv_layers = mr->cache->cd_used.uv;
|
||||
uint32_t tan_layers = mr->cache->cd_used.tan;
|
||||
float(*orco)[3] = CustomData_get_layer(cd_vdata, CD_ORCO);
|
||||
bool orco_allocated = false;
|
||||
const bool use_orco_tan = mr->cache->cd_used.tan_orco != 0;
|
||||
|
||||
for (int i = 0; i < MAX_MTFACE; i++) {
|
||||
if (uv_layers & (1 << i)) {
|
||||
|
@ -1599,6 +1594,65 @@ static void *extract_uv_tan_init(const MeshRenderData *mr, void *buf)
|
|||
}
|
||||
}
|
||||
|
||||
int v_len = mr->loop_len;
|
||||
if (format.attr_len == 0) {
|
||||
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
|
||||
/* VBO will not be used, only allocate minimum of memory. */
|
||||
v_len = 1;
|
||||
}
|
||||
|
||||
GPUVertBuf *vbo = buf;
|
||||
GPU_vertbuf_init_with_format(vbo, &format);
|
||||
GPU_vertbuf_data_alloc(vbo, v_len);
|
||||
|
||||
float(*uv_data)[2] = (float(*)[2])vbo->data;
|
||||
for (int i = 0; i < MAX_MTFACE; i++) {
|
||||
if (uv_layers & (1 << i)) {
|
||||
if (mr->extract_type == MR_EXTRACT_BMESH) {
|
||||
int cd_ofs = CustomData_get_n_offset(cd_ldata, CD_MLOOPUV, i);
|
||||
BMIter f_iter, l_iter;
|
||||
BMFace *efa;
|
||||
BMLoop *loop;
|
||||
BM_ITER_MESH (efa, &f_iter, mr->bm, BM_FACES_OF_MESH) {
|
||||
BM_ITER_ELEM (loop, &l_iter, efa, BM_LOOPS_OF_FACE) {
|
||||
MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_ofs);
|
||||
memcpy(uv_data, luv->uv, sizeof(*uv_data));
|
||||
uv_data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
MLoopUV *layer_data = CustomData_get_layer_n(cd_ldata, CD_MLOOPUV, i);
|
||||
for (int l = 0; l < mr->loop_len; l++, uv_data++, layer_data++) {
|
||||
memcpy(uv_data, layer_data->uv, sizeof(*uv_data));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const MeshExtract extract_uv = {
|
||||
extract_uv_init, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, false};
|
||||
/** \} */
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
/** \name Extract Tangent layers
|
||||
* \{ */
|
||||
|
||||
static void *extract_tan_init(const MeshRenderData *mr, void *buf)
|
||||
{
|
||||
GPUVertFormat format = {0};
|
||||
GPU_vertformat_deinterleave(&format);
|
||||
|
||||
CustomData *cd_ldata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
|
||||
CustomData *cd_vdata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->vdata : &mr->me->vdata;
|
||||
uint32_t tan_layers = mr->cache->cd_used.tan;
|
||||
float(*orco)[3] = CustomData_get_layer(cd_vdata, CD_ORCO);
|
||||
bool orco_allocated = false;
|
||||
const bool use_orco_tan = mr->cache->cd_used.tan_orco != 0;
|
||||
|
||||
int tan_len = 0;
|
||||
char tangent_names[MAX_MTFACE][MAX_CUSTOMDATA_LAYER_NAME];
|
||||
|
||||
|
@ -1705,32 +1759,7 @@ static void *extract_uv_tan_init(const MeshRenderData *mr, void *buf)
|
|||
GPU_vertbuf_init_with_format(vbo, &format);
|
||||
GPU_vertbuf_data_alloc(vbo, v_len);
|
||||
|
||||
float(*uv_data)[2] = (float(*)[2])vbo->data;
|
||||
for (int i = 0; i < MAX_MTFACE; i++) {
|
||||
if (uv_layers & (1 << i)) {
|
||||
if (mr->extract_type == MR_EXTRACT_BMESH) {
|
||||
int cd_ofs = CustomData_get_n_offset(cd_ldata, CD_MLOOPUV, i);
|
||||
BMIter f_iter, l_iter;
|
||||
BMFace *efa;
|
||||
BMLoop *loop;
|
||||
BM_ITER_MESH (efa, &f_iter, mr->bm, BM_FACES_OF_MESH) {
|
||||
BM_ITER_ELEM (loop, &l_iter, efa, BM_LOOPS_OF_FACE) {
|
||||
MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_ofs);
|
||||
memcpy(uv_data, luv->uv, sizeof(*uv_data));
|
||||
uv_data++;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
MLoopUV *layer_data = CustomData_get_layer_n(cd_ldata, CD_MLOOPUV, i);
|
||||
for (int l = 0; l < mr->loop_len; l++, uv_data++, layer_data++) {
|
||||
memcpy(uv_data, layer_data->uv, sizeof(*uv_data));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Start tan_data after uv_data. */
|
||||
float(*tan_data)[4] = (float(*)[4])uv_data;
|
||||
float(*tan_data)[4] = (float(*)[4])vbo->data;
|
||||
for (int i = 0; i < tan_len; i++) {
|
||||
void *layer_data = CustomData_get_layer_named(cd_ldata, CD_TANGENT, tangent_names[i]);
|
||||
memcpy(tan_data, layer_data, sizeof(*tan_data) * mr->loop_len);
|
||||
|
@ -1746,18 +1775,18 @@ static void *extract_uv_tan_init(const MeshRenderData *mr, void *buf)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
const MeshExtract extract_uv_tan = {extract_uv_tan_init,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
MR_DATA_POLY_NOR | MR_DATA_TAN_LOOP_NOR | MR_DATA_LOOPTRI,
|
||||
false};
|
||||
const MeshExtract extract_tan = {extract_tan_init,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
MR_DATA_POLY_NOR | MR_DATA_TAN_LOOP_NOR | MR_DATA_LOOPTRI,
|
||||
false};
|
||||
|
||||
/** \} */
|
||||
|
||||
|
@ -4149,7 +4178,8 @@ void mesh_buffer_cache_create_requested(MeshBatchCache *cache,
|
|||
|
||||
TEST_ASSIGN(VBO, vbo, pos_nor);
|
||||
TEST_ASSIGN(VBO, vbo, lnor);
|
||||
TEST_ASSIGN(VBO, vbo, uv_tan);
|
||||
TEST_ASSIGN(VBO, vbo, uv);
|
||||
TEST_ASSIGN(VBO, vbo, tan);
|
||||
TEST_ASSIGN(VBO, vbo, vcol);
|
||||
TEST_ASSIGN(VBO, vbo, orco);
|
||||
TEST_ASSIGN(VBO, vbo, edge_fac);
|
||||
|
@ -4214,7 +4244,8 @@ void mesh_buffer_cache_create_requested(MeshBatchCache *cache,
|
|||
|
||||
EXTRACT(vbo, pos_nor);
|
||||
EXTRACT(vbo, lnor);
|
||||
EXTRACT(vbo, uv_tan);
|
||||
EXTRACT(vbo, uv);
|
||||
EXTRACT(vbo, tan);
|
||||
EXTRACT(vbo, vcol);
|
||||
EXTRACT(vbo, orco);
|
||||
EXTRACT(vbo, edge_fac);
|
||||
|
|
|
@ -464,7 +464,8 @@ static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
|
|||
FOREACH_MESH_BUFFER_CACHE(cache, mbufcache)
|
||||
{
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.pos_nor);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv_tan);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.tan);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
|
||||
}
|
||||
|
@ -492,7 +493,7 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
|
|||
{
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.stretch_angle);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.stretch_area);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv_tan);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.uv);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.edituv_data);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_uv);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.fdots_edituv_data);
|
||||
|
@ -1013,10 +1014,12 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
if (cd_overlap == false) {
|
||||
FOREACH_MESH_BUFFER_CACHE(cache, mbuffercache)
|
||||
{
|
||||
if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv ||
|
||||
(cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
|
||||
if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv);
|
||||
}
|
||||
if ((cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
|
||||
cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv_tan);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.tan);
|
||||
}
|
||||
if (cache->cd_used.orco != cache->cd_needed.orco) {
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.orco);
|
||||
|
@ -1050,7 +1053,7 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.edituv_data);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.stretch_angle);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.stretch_area);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv_tan);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.uv);
|
||||
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_uv);
|
||||
GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_tris);
|
||||
GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_lines);
|
||||
|
@ -1094,7 +1097,7 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.lnor);
|
||||
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.pos_nor);
|
||||
if (cache->cd_used.uv != 0) {
|
||||
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.uv);
|
||||
}
|
||||
if (cache->cd_used.vcol != 0) {
|
||||
DRW_vbo_request(cache->batch.surface, &mbufcache->vbo.vcol);
|
||||
|
@ -1133,7 +1136,7 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
DRW_ibo_request(cache->batch.wire_loops_uvs, &mbufcache->ibo.edituv_lines);
|
||||
/* For paint overlay. Active layer should have been queried. */
|
||||
if (cache->cd_used.uv != 0) {
|
||||
DRW_vbo_request(cache->batch.wire_loops_uvs, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.wire_loops_uvs, &mbufcache->vbo.uv);
|
||||
}
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edit_mesh_analysis, GPU_PRIM_TRIS)) {
|
||||
|
@ -1149,9 +1152,11 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
/* Order matters. First ones override latest vbos' attribs. */
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.lnor);
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.pos_nor);
|
||||
if ((cache->cd_used.uv != 0) || (cache->cd_used.tan != 0) ||
|
||||
(cache->cd_used.tan_orco != 0)) {
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.uv_tan);
|
||||
if (cache->cd_used.uv != 0) {
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.uv);
|
||||
}
|
||||
if ((cache->cd_used.tan != 0) || (cache->cd_used.tan_orco != 0)) {
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.tan);
|
||||
}
|
||||
if (cache->cd_used.vcol != 0) {
|
||||
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.vcol);
|
||||
|
@ -1227,29 +1232,29 @@ void DRW_mesh_batch_cache_create_requested(
|
|||
/* Edit UV */
|
||||
if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRIS)) {
|
||||
DRW_ibo_request(cache->batch.edituv_faces, &mbufcache->ibo.edituv_tris);
|
||||
DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.uv);
|
||||
DRW_vbo_request(cache->batch.edituv_faces, &mbufcache->vbo.edituv_data);
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edituv_faces_strech_area, GPU_PRIM_TRIS)) {
|
||||
DRW_ibo_request(cache->batch.edituv_faces_strech_area, &mbufcache->ibo.edituv_tris);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_area, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_area, &mbufcache->vbo.uv);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_area, &mbufcache->vbo.edituv_data);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_area, &mbufcache->vbo.stretch_area);
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edituv_faces_strech_angle, GPU_PRIM_TRIS)) {
|
||||
DRW_ibo_request(cache->batch.edituv_faces_strech_angle, &mbufcache->ibo.edituv_tris);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &mbufcache->vbo.uv);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &mbufcache->vbo.edituv_data);
|
||||
DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &mbufcache->vbo.stretch_angle);
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
|
||||
DRW_ibo_request(cache->batch.edituv_edges, &mbufcache->ibo.edituv_lines);
|
||||
DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.uv);
|
||||
DRW_vbo_request(cache->batch.edituv_edges, &mbufcache->vbo.edituv_data);
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
|
||||
DRW_ibo_request(cache->batch.edituv_verts, &mbufcache->ibo.edituv_points);
|
||||
DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.uv_tan);
|
||||
DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.uv);
|
||||
DRW_vbo_request(cache->batch.edituv_verts, &mbufcache->vbo.edituv_data);
|
||||
}
|
||||
if (DRW_batch_requested(cache->batch.edituv_fdots, GPU_PRIM_POINTS)) {
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef enum {
|
|||
GPU_BATCH_READY_TO_DRAW,
|
||||
} GPUBatchPhase;
|
||||
|
||||
#define GPU_BATCH_VBO_MAX_LEN 5
|
||||
#define GPU_BATCH_VBO_MAX_LEN 6
|
||||
#define GPU_BATCH_VAO_STATIC_LEN 3
|
||||
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
|
||||
|
||||
|
|
Loading…
Reference in New Issue