GPUVertBuf: Make GPUVertBuf private to the GPU module

This is just a cleanup to isolate the internals of the vertbuf.
This adds some getters to avoid refactor of existing code.
This commit is contained in:
Clément Foucault 2020-09-06 16:40:07 +02:00
parent 98fc3f263c
commit 7ffff04e49
24 changed files with 211 additions and 116 deletions

View File

@ -480,7 +480,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
for (int i = 0; i < MB_CURR; i++) {
GPUVertBuf *vbo = mb_geom->vbo[i];
if (vbo && batch) {
if (vbo->vertex_len != batch->verts[0]->vertex_len) {
if (GPU_vertbuf_get_vertex_len(vbo) != GPU_vertbuf_get_vertex_len(batch->verts[0])) {
/* Vertex count mismatch, disable deform motion blur. */
mb_geom->use_deform = false;
}
@ -506,9 +506,9 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
/* Perform a copy to avoid loosing it after RE_engine_frame_set(). */
mb_geom->vbo[mb_step] = vbo = GPU_vertbuf_duplicate(vbo);
/* Find and replace "pos" attrib name. */
int attrib_id = GPU_vertformat_attr_id_get(&vbo->format, "pos");
GPU_vertformat_attr_rename(
&vbo->format, attrib_id, (mb_step == MB_PREV) ? "prv" : "nxt");
GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
int attrib_id = GPU_vertformat_attr_id_get(format, "pos");
GPU_vertformat_attr_rename(format, attrib_id, (mb_step == MB_PREV) ? "prv" : "nxt");
}
}
break;
@ -573,8 +573,9 @@ void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata)
if (mb_geom->vbo[MB_NEXT]) {
GPUVertBuf *vbo = mb_geom->vbo[MB_NEXT];
int attrib_id = GPU_vertformat_attr_id_get(&vbo->format, "nxt");
GPU_vertformat_attr_rename(&vbo->format, attrib_id, "prv");
GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
int attrib_id = GPU_vertformat_attr_id_get(format, "nxt");
GPU_vertformat_attr_rename(format, attrib_id, "prv");
}
break;

View File

@ -68,7 +68,9 @@ static GPUVertBuf *mpath_vbo_get(bMotionPath *mpath)
mpath->points_vbo = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(mpath->points_vbo, mpath->length);
/* meh... a useless memcpy. */
memcpy(mpath->points_vbo->data, mpath->points, sizeof(bMotionPathVert) * mpath->length);
memcpy(GPU_vertbuf_get_data(mpath->points_vbo),
mpath->points,
sizeof(bMotionPathVert) * mpath->length);
}
return mpath->points_vbo;
}

View File

@ -1923,7 +1923,7 @@ static void *extract_pos_nor_init(const MeshRenderData *mr,
/* Pack normals per vert, reduce amount of computation. */
size_t packed_nor_len = sizeof(GPUPackedNormal) * mr->vert_len;
MeshExtract_PosNor_Data *data = MEM_mallocN(sizeof(*data) + packed_nor_len, __func__);
data->vbo_data = (PosNorLoop *)vbo->data;
data->vbo_data = (PosNorLoop *)GPU_vertbuf_get_data(vbo);
/* Quicker than doing it for each loop. */
if (mr->extract_type == MR_EXTRACT_BMESH) {
@ -2096,7 +2096,7 @@ static void *extract_lnor_hq_init(const MeshRenderData *mr,
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
static void extract_lnor_hq_iter_poly_bm(const MeshRenderData *mr,
@ -2184,7 +2184,7 @@ static void *extract_lnor_init(const MeshRenderData *mr,
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
static void extract_lnor_iter_poly_bm(const MeshRenderData *mr,
@ -2319,7 +2319,7 @@ static void *extract_uv_init(const MeshRenderData *mr, struct MeshBatchCache *ca
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, v_len);
float(*uv_data)[2] = (float(*)[2])vbo->data;
float(*uv_data)[2] = (float(*)[2])GPU_vertbuf_get_data(vbo);
for (int i = 0; i < MAX_MTFACE; i++) {
if (uv_layers & (1 << i)) {
if (mr->extract_type == MR_EXTRACT_BMESH) {
@ -2487,7 +2487,7 @@ static void extract_tan_ex(const MeshRenderData *mr,
GPU_vertbuf_data_alloc(vbo, v_len);
if (do_hq) {
short(*tan_data)[4] = (short(*)[4])vbo->data;
short(*tan_data)[4] = (short(*)[4])GPU_vertbuf_get_data(vbo);
for (int i = 0; i < tan_len; i++) {
const char *name = tangent_names[i];
float(*layer_data)[4] = (float(*)[4])CustomData_get_layer_named(
@ -2508,7 +2508,7 @@ static void extract_tan_ex(const MeshRenderData *mr,
}
}
else {
GPUPackedNormal *tan_data = (GPUPackedNormal *)vbo->data;
GPUPackedNormal *tan_data = (GPUPackedNormal *)GPU_vertbuf_get_data(vbo);
for (int i = 0; i < tan_len; i++) {
const char *name = tangent_names[i];
float(*layer_data)[4] = (float(*)[4])CustomData_get_layer_named(
@ -2639,7 +2639,7 @@ static void *extract_vcol_init(const MeshRenderData *mr, struct MeshBatchCache *
ushort r, g, b, a;
} gpuMeshVcol;
gpuMeshVcol *vcol_data = (gpuMeshVcol *)vbo->data;
gpuMeshVcol *vcol_data = (gpuMeshVcol *)GPU_vertbuf_get_data(vbo);
MLoop *loops = CustomData_get_layer(cd_ldata, CD_MLOOP);
for (int i = 0; i < MAX_MCOL; i++) {
@ -2743,7 +2743,7 @@ static void *extract_orco_init(const MeshRenderData *mr,
CustomData *cd_vdata = &mr->me->vdata;
MeshExtract_Orco_Data *data = MEM_mallocN(sizeof(*data), __func__);
data->vbo_data = (float(*)[4])vbo->data;
data->vbo_data = (float(*)[4])GPU_vertbuf_get_data(vbo);
data->orco = CustomData_get_layer(cd_vdata, CD_ORCO);
/* Make sure `orco` layer was requested only if needed! */
BLI_assert(data->orco);
@ -2859,7 +2859,7 @@ static void *extract_edge_fac_init(const MeshRenderData *mr,
data->use_edge_render = true;
}
data->vbo_data = vbo->data;
data->vbo_data = GPU_vertbuf_get_data(vbo);
return data;
}
@ -2969,14 +2969,14 @@ static void extract_edge_fac_finish(const MeshRenderData *mr,
GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
}
/* We keep the data reference in data->vbo_data. */
vbo->data = NULL;
data->vbo_data = GPU_vertbuf_steal_data(vbo);
GPU_vertbuf_clear(vbo);
int buf_len = mr->loop_len + mr->loop_loose_len;
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, buf_len);
float *fdata = (float *)vbo->data;
float *fdata = (float *)GPU_vertbuf_get_data(vbo);
for (int ml_index = 0; ml_index < buf_len; ml_index++, fdata++) {
*fdata = data->vbo_data[ml_index] / 255.0f;
}
@ -3075,7 +3075,7 @@ static void *extract_weights_init(const MeshRenderData *mr,
GPU_vertbuf_data_alloc(vbo, mr->loop_len + mr->loop_loose_len);
MeshExtract_Weight_Data *data = MEM_callocN(sizeof(*data), __func__);
data->vbo_data = (float *)vbo->data;
data->vbo_data = (float *)GPU_vertbuf_get_data(vbo);
data->wstate = &cache->weight_state;
if (data->wstate->defgroup_active == -1) {
@ -3313,7 +3313,7 @@ static void *extract_edit_data_init(const MeshRenderData *mr,
GPUVertBuf *vbo = buf;
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len + mr->loop_loose_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
static void extract_edit_data_iter_poly_bm(const MeshRenderData *mr,
@ -3471,7 +3471,7 @@ static void *extract_edituv_data_init(const MeshRenderData *mr,
CustomData *cd_ldata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
MeshExtract_EditUVData_Data *data = MEM_callocN(sizeof(*data), __func__);
data->vbo_data = (EditLoopData *)vbo->data;
data->vbo_data = (EditLoopData *)GPU_vertbuf_get_data(vbo);
data->cd_ofs = CustomData_get_offset(cd_ldata, CD_MLOOPUV);
return data;
}
@ -3635,7 +3635,7 @@ static void mesh_stretch_area_finish(const MeshRenderData *mr,
/* Copy face data for each loop. */
GPUVertBuf *vbo = buf;
uint16_t *loop_stretch = (uint16_t *)vbo->data;
uint16_t *loop_stretch = (uint16_t *)GPU_vertbuf_get_data(vbo);
if (mr->extract_type == MR_EXTRACT_BMESH) {
BMFace *efa;
@ -3746,7 +3746,7 @@ static void *extract_stretch_angle_init(const MeshRenderData *mr,
GPU_vertbuf_data_alloc(vbo, mr->loop_len);
MeshExtract_StretchAngle_Data *data = MEM_callocN(sizeof(*data), __func__);
data->vbo_data = (UVStretchAngle *)vbo->data;
data->vbo_data = (UVStretchAngle *)GPU_vertbuf_get_data(vbo);
/* Special iterator needed to save about half of the computing cost. */
if (mr->extract_type == MR_EXTRACT_BMESH) {
@ -4446,7 +4446,7 @@ static void extract_mesh_analysis_finish(const MeshRenderData *mr,
BLI_assert(mr->edit_bmesh);
GPUVertBuf *vbo = buf;
float *l_weight = (float *)vbo->data;
float *l_weight = (float *)GPU_vertbuf_get_data(vbo);
switch (mr->toolsettings->statvis.type) {
case SCE_STATVIS_OVERHANG:
@ -4493,7 +4493,7 @@ static void *extract_fdots_pos_init(const MeshRenderData *mr,
GPUVertBuf *vbo = buf;
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->poly_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
static void extract_fdots_pos_iter_poly_bm(const MeshRenderData *mr,
@ -4592,7 +4592,7 @@ static void extract_fdots_nor_finish(const MeshRenderData *mr,
{
static float invalid_normal[3] = {0.0f, 0.0f, 0.0f};
GPUVertBuf *vbo = buf;
GPUPackedNormal *nor = (GPUPackedNormal *)vbo->data;
GPUPackedNormal *nor = (GPUPackedNormal *)GPU_vertbuf_get_data(vbo);
BMFace *efa;
/* Quicker than doing it for each loop. */
@ -4669,11 +4669,11 @@ static void *extract_fdots_uv_init(const MeshRenderData *mr,
if (!mr->use_subsurf_fdots) {
/* Clear so we can accumulate on it. */
memset(vbo->data, 0x0, mr->poly_len * vbo->format.stride);
memset(GPU_vertbuf_get_data(vbo), 0x0, mr->poly_len * GPU_vertbuf_get_format(vbo)->stride);
}
MeshExtract_FdotUV_Data *data = MEM_callocN(sizeof(*data), __func__);
data->vbo_data = (float(*)[2])vbo->data;
data->vbo_data = (float(*)[2])GPU_vertbuf_get_data(vbo);
if (mr->extract_type == MR_EXTRACT_BMESH) {
data->cd_ofs = CustomData_get_offset(&mr->bm->ldata, CD_MLOOPUV);
@ -4763,7 +4763,7 @@ static void *extract_fdots_edituv_data_init(const MeshRenderData *mr,
GPU_vertbuf_data_alloc(vbo, mr->poly_len);
MeshExtract_EditUVFdotData_Data *data = MEM_callocN(sizeof(*data), __func__);
data->vbo_data = (EditLoopData *)vbo->data;
data->vbo_data = (EditLoopData *)GPU_vertbuf_get_data(vbo);
data->cd_ofs = CustomData_get_offset(&mr->bm->ldata, CD_MLOOPUV);
return data;
}
@ -4842,7 +4842,7 @@ static void *extract_skin_roots_init(const MeshRenderData *mr,
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->bm->totvert);
SkinRootData *vbo_data = (SkinRootData *)vbo->data;
SkinRootData *vbo_data = (SkinRootData *)GPU_vertbuf_get_data(vbo);
int root_len = 0;
int cd_ofs = CustomData_get_offset(&mr->bm->vdata, CD_MVERT_SKIN);
@ -4889,7 +4889,7 @@ static void *extract_select_idx_init(const MeshRenderData *mr,
GPUVertBuf *vbo = buf;
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len + mr->loop_loose_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
/* TODO Use #glVertexID to get loop index and use the data structure on the CPU to retrieve the
@ -5083,7 +5083,7 @@ static void *extract_select_fdot_idx_init(const MeshRenderData *mr,
GPUVertBuf *vbo = buf;
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, mr->poly_len);
return vbo->data;
return GPU_vertbuf_get_data(vbo);
}
static void extract_fdot_idx_iter_poly_bm(const MeshRenderData *mr,

View File

@ -236,8 +236,8 @@ void DRW_vertbuf_create_wiredata(GPUVertBuf *vbo, const int vert_len)
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, vert_len);
if (vbo->format.stride == 1) {
memset(vbo->data, 0xFF, (size_t)vert_len);
if (GPU_vertbuf_get_format(vbo)->stride == 1) {
memset(GPU_vertbuf_get_data(vbo), 0xFF, (size_t)vert_len);
}
else {
GPUVertBufRaw wd_step;

View File

@ -395,8 +395,8 @@ static void gpencil_batches_ensure(Object *ob, GpencilBatchCache *cache, int cfr
/* Add extra space at the end of the buffer because of quad load. */
GPU_vertbuf_data_alloc(cache->vbo, iter.vert_len + 2);
GPU_vertbuf_data_alloc(cache->vbo_col, iter.vert_len + 2);
iter.verts = (gpStrokeVert *)cache->vbo->data;
iter.cols = (gpColorVert *)cache->vbo_col->data;
iter.verts = (gpStrokeVert *)GPU_vertbuf_get_data(cache->vbo);
iter.cols = (gpColorVert *)GPU_vertbuf_get_data(cache->vbo_col);
/* Create IBO. */
GPU_indexbuf_init(&iter.ibo, GPU_PRIM_TRIS, iter.tri_len, iter.vert_len);
@ -471,7 +471,8 @@ GPUBatch *DRW_cache_gpencil_face_wireframe_get(Object *ob)
.ibo = {0},
};
GPU_indexbuf_init_ex(&iter.ibo, GPU_PRIM_LINE_STRIP, vbo->vertex_len, vbo->vertex_len);
uint vert_len = GPU_vertbuf_get_vertex_len(vbo);
GPU_indexbuf_init_ex(&iter.ibo, GPU_PRIM_LINE_STRIP, vert_len, vert_len);
/* IMPORTANT: Keep in sync with gpencil_edit_batches_ensure() */
bool do_onion = true;
@ -558,8 +559,8 @@ static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_stroke, bool do_
/* Add extra space at the end (and start) of the buffer because of quad load and cyclic. */
GPU_vertbuf_data_alloc(vbo, 1 + vert_len + 1 + 2);
GPU_vertbuf_data_alloc(vbo_col, 1 + vert_len + 1 + 2);
gpStrokeVert *verts = (gpStrokeVert *)vbo->data;
gpColorVert *cols = (gpColorVert *)vbo_col->data;
gpStrokeVert *verts = (gpStrokeVert *)GPU_vertbuf_get_data(vbo);
gpColorVert *cols = (gpColorVert *)GPU_vertbuf_get_data(vbo_col);
/* Fill buffers with data. */
gpencil_buffer_add_stroke(verts, cols, gps);
@ -711,7 +712,7 @@ static void gpencil_edit_batches_ensure(Object *ob, GpencilBatchCache *cache, in
/* Vertex counting has already been done for cache->vbo. */
BLI_assert(cache->vbo);
int vert_len = cache->vbo->vertex_len;
int vert_len = GPU_vertbuf_get_vertex_len(cache->vbo);
gpEditIterData iter;
iter.vgindex = ob->actdef - 1;
@ -724,7 +725,7 @@ static void gpencil_edit_batches_ensure(Object *ob, GpencilBatchCache *cache, in
cache->edit_vbo = GPU_vertbuf_create_with_format(format);
/* Add extra space at the end of the buffer because of quad load. */
GPU_vertbuf_data_alloc(cache->edit_vbo, vert_len);
iter.verts = (gpEditVert *)cache->edit_vbo->data;
iter.verts = (gpEditVert *)GPU_vertbuf_get_data(cache->edit_vbo);
/* Fill buffers with data. */
BKE_gpencil_visible_stroke_iter(

View File

@ -176,7 +176,7 @@ static void pointcloud_batch_cache_ensure_pos(Object *ob, PointCloudBatchCache *
GPU_vertbuf_data_alloc(cache->pos, pointcloud->totpoint);
if (has_radius) {
float(*vbo_data)[4] = (float(*)[4])cache->pos->data;
float(*vbo_data)[4] = (float(*)[4])GPU_vertbuf_get_data(cache->pos);
for (int i = 0; i < pointcloud->totpoint; i++) {
copy_v3_v3(vbo_data[i], pointcloud->co[i]);
/* TODO(fclem) remove multiplication here. Here only for keeping the size correct for now. */

View File

@ -89,21 +89,12 @@ BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
*vbo = GPU_vertbuf_create(GPU_USAGE_STATIC);
}
if (batch != NULL) {
/* HACK set first vbo if not init. */
if (batch->verts[0] == NULL) {
GPU_batch_vertbuf_add(batch, *vbo);
}
else {
/* HACK: bypass assert */
int vbo_vert_len = (*vbo)->vertex_len;
(*vbo)->vertex_len = batch->verts[0]->vertex_len;
GPU_batch_vertbuf_add(batch, *vbo);
(*vbo)->vertex_len = vbo_vert_len;
}
/* HACK we set vbos that may not yet be valid. */
GPU_batch_vertbuf_add(batch, *vbo);
}
}
BLI_INLINE bool DRW_vbo_requested(GPUVertBuf *vbo)
{
return (vbo != NULL && vbo->format.attr_len == 0);
return (vbo != NULL && (GPU_vertbuf_get_status(vbo) & GPU_VERTBUF_INIT) == 0);
}

View File

@ -150,7 +150,8 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
}
GPUBatch *batch = handle->batch;
bool instancer_compat = buf ? ((batch->inst[0] == buf) && (buf->vbo_id != 0)) :
bool instancer_compat = buf ? ((batch->inst[0] == buf) &&
(GPU_vertbuf_get_status(buf) & GPU_VERTBUF_DATA_UPLOADED)) :
((batch->inst[0] == instancer->verts[0]) &&
(batch->inst[1] == instancer->verts[1]));
bool is_compatible = (batch->prim_type == geom->prim_type) && instancer_compat &&
@ -184,8 +185,8 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
}
GPUBatch *batch = *batch_ptr;
bool is_compatible = (batch->verts[0] == buf) && (buf->vbo_id != 0) &&
(batch->prim_type == prim_type);
bool is_compatible = (batch->verts[0] == buf) && (batch->prim_type == prim_type) &&
(GPU_vertbuf_get_status(buf) & GPU_VERTBUF_DATA_UPLOADED);
if (!is_compatible) {
GPU_batch_clear(batch);
GPU_batch_init(batch, prim_type, buf, NULL);
@ -220,7 +221,7 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
if (handle->vert_len != NULL) {
uint vert_len = *(handle->vert_len);
uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
if (target_buf_size < handle->buf->vertex_alloc) {
if (target_buf_size < GPU_vertbuf_get_vertex_alloc(handle->buf)) {
GPU_vertbuf_data_resize(handle->buf, target_buf_size);
}
GPU_vertbuf_data_len_set(handle->buf, vert_len);

View File

@ -1115,7 +1115,7 @@ DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup,
void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
{
GPUVertBuf *buf = callbuf->buf;
const bool resize = (callbuf->count == buf->vertex_alloc);
const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
if (UNLIKELY(resize)) {
GPU_vertbuf_data_resize(buf, callbuf->count + DRW_BUFFER_VERTS_CHUNK);
@ -1136,9 +1136,9 @@ void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
{
GPUVertBuf *buf = callbuf->buf;
const bool resize = (callbuf->count == buf->vertex_alloc);
const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
BLI_assert(attr_len == buf->format.attr_len);
BLI_assert(attr_len == GPU_vertbuf_get_format(buf)->attr_len);
UNUSED_VARS_NDEBUG(attr_len);
if (UNLIKELY(resize)) {

View File

@ -646,9 +646,10 @@ BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
const bool is_instancing = (batch->inst[0] != NULL);
int start = 0;
int count = 1;
int tot = is_instancing ? batch->inst[0]->vertex_len : batch->verts[0]->vertex_len;
int tot = is_instancing ? GPU_vertbuf_get_vertex_len(batch->inst[0]) :
GPU_vertbuf_get_vertex_len(batch->verts[0]);
/* Hack : get "vbo" data without actually drawing. */
int *select_id = (void *)state->select_buf->data;
int *select_id = (void *)GPU_vertbuf_get_data(state->select_buf);
/* Batching */
if (!is_instancing) {

View File

@ -148,6 +148,7 @@ set(SRC
intern/gpu_state_private.hh
intern/gpu_texture_private.hh
intern/gpu_uniform_buffer_private.hh
intern/gpu_vertex_buffer_private.hh
intern/gpu_vertex_format_private.h
opengl/gl_backend.hh

View File

@ -25,8 +25,23 @@
#pragma once
#include "BLI_utildefines.h"
#include "GPU_vertex_format.h"
typedef enum {
/** Initial state. */
GPU_VERTBUF_INVALID = 0,
/** Was init with a vertex format. */
GPU_VERTBUF_INIT = (1 << 0),
/** Data has been touched and need to be reuploaded. */
GPU_VERTBUF_DATA_DIRTY = (1 << 1),
/** The buffer has been created inside GPU memory. */
GPU_VERTBUF_DATA_UPLOADED = (1 << 2),
} GPUVertBufStatus;
ENUM_OPERATORS(GPUVertBufStatus)
#ifdef __cplusplus
extern "C" {
#endif
@ -40,8 +55,6 @@ extern "C" {
* 4) GPU_vertbuf_attr_fill(verts, pos, application_pos_buffer)
*/
/* Is GPUVertBuf always used as part of a GPUBatch? */
typedef enum {
/* can be extended to support more types */
GPU_USAGE_STREAM,
@ -49,22 +62,7 @@ typedef enum {
GPU_USAGE_DYNAMIC,
} GPUUsageType;
typedef struct GPUVertBuf {
GPUVertFormat format;
/** Number of verts we want to draw. */
uint vertex_len;
/** Number of verts data. */
uint vertex_alloc;
/** 0 indicates not yet allocated. */
uint32_t vbo_id;
/** Usage hint for GL optimisation. */
GPUUsageType usage;
/** This counter will only avoid freeing the GPUVertBuf, not the data. */
char handle_refcount;
/** Data has been touched and need to be reuploaded to GPU. */
bool dirty;
uchar *data; /* NULL indicates data in VRAM (unmapped) */
} GPUVertBuf;
typedef struct GPUVertBuf GPUVertBuf;
GPUVertBuf *GPU_vertbuf_create(GPUUsageType);
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType);
@ -135,6 +133,14 @@ GPU_INLINE uint GPU_vertbuf_raw_used(GPUVertBufRaw *a)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access);
void *GPU_vertbuf_steal_data(GPUVertBuf *verts);
void *GPU_vertbuf_get_data(const GPUVertBuf *verts);
const GPUVertFormat *GPU_vertbuf_get_format(const GPUVertBuf *verts);
uint GPU_vertbuf_get_vertex_alloc(const GPUVertBuf *verts);
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts);
GPUVertBufStatus GPU_vertbuf_get_status(const GPUVertBuf *verts);
void GPU_vertbuf_use(GPUVertBuf *);
/* Metrics */

View File

@ -36,11 +36,12 @@
#include "GPU_shader.h"
#include "gpu_backend.hh"
#include "gpu_batch_private.hh"
#include "gpu_context_private.hh"
#include "gpu_index_buffer_private.hh"
#include "gpu_shader_private.hh"
#include "gpu_vertex_format_private.h"
#include "gpu_vertex_buffer_private.hh"
#include "gpu_batch_private.hh"
#include <string.h>
@ -198,7 +199,8 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
if (batch->verts[v] == NULL) {
/* for now all VertexBuffers must have same vertex_len */
if (batch->verts[0] != NULL) {
BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
/* This is an issue for the HACK inside DRW_vbo_request(). */
// BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
}
batch->verts[v] = verts;
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));

View File

@ -153,13 +153,14 @@ static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, uint vert_len)
/* Initialize vertex buffer (match 'VertexBufferFormat'). */
buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&g_vbo_id.format, GPU_USAGE_STATIC);
}
if (buffers->vert_buf->data == NULL || buffers->vert_buf->vertex_len != vert_len) {
if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
/* Allocate buffer if not allocated yet or size changed. */
GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
#endif
return buffers->vert_buf->data != NULL;
return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
}
static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
@ -1119,7 +1120,7 @@ void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
}
/* Force flushing to the GPU. */
if (buffers->vert_buf && buffers->vert_buf->data) {
if (buffers->vert_buf && GPU_vertbuf_get_data(buffers->vert_buf)) {
GPU_vertbuf_use(buffers->vert_buf);
}
}

View File

@ -34,6 +34,7 @@
#include "gpu_context_private.hh"
#include "gpu_immediate_private.hh"
#include "gpu_shader_private.hh"
#include "gpu_vertex_buffer_private.hh"
#include "gpu_vertex_format_private.h"
using namespace blender::gpu;

View File

@ -23,7 +23,7 @@
#include "BLI_span.hh"
#include "GPU_shader.h"
#include "GPU_vertex_buffer.h"
#include "gpu_vertex_buffer_private.hh"
#include "gpu_shader_interface.hh"
namespace blender {

View File

@ -29,6 +29,7 @@
#include "gpu_backend.hh"
#include "gpu_context_private.hh"
#include "gpu_framebuffer_private.hh"
#include "gpu_vertex_buffer_private.hh"
#include "gpu_texture_private.hh"

View File

@ -25,13 +25,10 @@
#include "MEM_guardedalloc.h"
#include "GPU_vertex_buffer.h"
#include "gpu_context_private.hh"
#include "gpu_vertex_format_private.h"
#include <stdlib.h>
#include <string.h>
#include "gpu_vertex_buffer_private.hh"
#define KEEP_SINGLE_COPY 1
@ -66,6 +63,7 @@ GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *format, GPUUs
if (!format->packed) {
VertexFormat_pack(&verts->format);
}
verts->flag |= GPU_VERTBUF_INIT;
return verts;
/* this function might seem redundant, but there is potential for memory savings here... */
@ -76,7 +74,7 @@ void GPU_vertbuf_init(GPUVertBuf *verts, GPUUsageType usage)
{
memset(verts, 0, sizeof(GPUVertBuf));
verts->usage = usage;
verts->dirty = true;
verts->flag = GPU_VERTBUF_DATA_DIRTY;
verts->handle_refcount = 1;
}
@ -89,6 +87,7 @@ void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts,
if (!format->packed) {
VertexFormat_pack(&verts->format);
}
verts->flag |= GPU_VERTBUF_INIT;
}
GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts)
@ -181,7 +180,7 @@ void GPU_vertbuf_data_alloc(GPUVertBuf *verts, uint v_len)
uint new_size = vertex_buffer_size(&verts->format, v_len);
vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->vertex_len = verts->vertex_alloc = v_len;
verts->data = (uchar *)MEM_mallocN(sizeof(GLubyte) * GPU_vertbuf_size_get(verts), __func__);
}
@ -198,7 +197,7 @@ void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
uint new_size = vertex_buffer_size(&verts->format, v_len);
vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->vertex_len = verts->vertex_alloc = v_len;
verts->data = (uchar *)MEM_reallocN(verts->data, sizeof(GLubyte) * GPU_vertbuf_size_get(verts));
}
@ -230,7 +229,7 @@ void GPU_vertbuf_attr_set(GPUVertBuf *verts, uint a_idx, uint v_idx, const void
assert(v_idx < verts->vertex_alloc);
assert(verts->data != NULL);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy((GLubyte *)verts->data + a->offset + v_idx * format->stride, data, a->sz);
}
@ -256,7 +255,7 @@ void GPU_vertbuf_vert_set(GPUVertBuf *verts, uint v_idx, const void *data)
assert(v_idx < verts->vertex_alloc);
assert(verts->data != NULL);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy((GLubyte *)verts->data + v_idx * format->stride, data, format->stride);
}
@ -269,7 +268,7 @@ void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts, uint a_idx, uint stride, co
assert(a_idx < format->attr_len);
assert(verts->data != NULL);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
const uint vertex_len = verts->vertex_len;
if (format->attr_len == 1 && stride == format->stride) {
@ -296,7 +295,7 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts, uint a_idx, GPUVertBufRaw
assert(verts->data != NULL);
#endif
verts->dirty = true;
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
access->size = a->sz;
access->stride = format->stride;
@ -307,6 +306,44 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts, uint a_idx, GPUVertBufRaw
#endif
}
/* NOTE: Be careful when using this. The data needs to match the expected format. */
void *GPU_vertbuf_get_data(const GPUVertBuf *verts)
{
/* TODO Assert that the format has no padding. */
return verts->data;
}
/* Returns the data buffer and set it to null internally to avoid freeing.
* NOTE: Be careful when using this. The data needs to match the expected format. */
void *GPU_vertbuf_steal_data(GPUVertBuf *verts)
{
/* TODO Assert that the format has no padding. */
BLI_assert(verts->data);
void *data = verts->data;
verts->data = nullptr;
return data;
}
const GPUVertFormat *GPU_vertbuf_get_format(const GPUVertBuf *verts)
{
return &verts->format;
}
uint GPU_vertbuf_get_vertex_alloc(const GPUVertBuf *verts)
{
return verts->vertex_alloc;
}
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
{
return verts->vertex_len;
}
GPUVertBufStatus GPU_vertbuf_get_status(const GPUVertBuf *verts)
{
return verts->flag;
}
static void VertBuffer_upload_data(GPUVertBuf *verts)
{
uint buffer_sz = GPU_vertbuf_size_get(verts);
@ -320,7 +357,8 @@ static void VertBuffer_upload_data(GPUVertBuf *verts)
MEM_freeN(verts->data);
verts->data = NULL;
}
verts->dirty = false;
verts->flag &= ~GPU_VERTBUF_DATA_DIRTY;
verts->flag |= GPU_VERTBUF_DATA_UPLOADED;
}
void GPU_vertbuf_use(GPUVertBuf *verts)
@ -330,7 +368,7 @@ void GPU_vertbuf_use(GPUVertBuf *verts)
verts->vbo_id = GPU_buf_alloc();
}
glBindBuffer(GL_ARRAY_BUFFER, verts->vbo_id);
if (verts->dirty) {
if (verts->flag & GPU_VERTBUF_DATA_DIRTY) {
VertBuffer_upload_data(verts);
}
}

View File

@ -0,0 +1,46 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2016 by Mike Erwin.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* GPU vertex buffer
*/
#pragma once
#include "GPU_vertex_buffer.h"
struct GPUVertBuf {
GPUVertFormat format;
/** Number of verts we want to draw. */
uint vertex_len;
/** Number of verts data. */
uint vertex_alloc;
/** 0 indicates not yet allocated. */
uint32_t vbo_id;
/** Usage hint for GL optimisation. */
GPUUsageType usage;
/** Status flag. */
GPUVertBufStatus flag;
/** This counter will only avoid freeing the GPUVertBuf, not the data. */
char handle_refcount;
/** NULL indicates data in VRAM (unmapped) */
uchar *data;
};

View File

@ -33,6 +33,7 @@
#include "gpu_context_private.hh"
#include "gpu_drawlist_private.hh"
#include "gpu_vertex_buffer_private.hh"
#include "gl_backend.hh"
#include "gl_drawlist.hh"

View File

@ -32,6 +32,7 @@
#include "gl_backend.hh"
#include "gl_debug.hh"
#include "gl_state.hh"
#include "gpu_vertex_buffer_private.hh" /* TODO shoud be gl_vertex_buffer.hh */
#include "gl_texture.hh"

View File

@ -21,11 +21,8 @@
* \ingroup gpu
*/
#include "GPU_glew.h"
#include "GPU_vertex_buffer.h"
#include "gpu_shader_interface.hh"
#include "gpu_vertex_buffer_private.hh"
#include "gpu_vertex_format_private.h"
#include "gl_batch.hh"

View File

@ -141,11 +141,12 @@ static PyObject *bpygpu_Batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_b
return NULL;
}
if (self->batch->verts[0]->vertex_len != py_buf->buf->vertex_len) {
if (GPU_vertbuf_get_vertex_len(self->batch->verts[0]) !=
GPU_vertbuf_get_vertex_len(py_buf->buf)) {
PyErr_Format(PyExc_TypeError,
"Expected %d length, got %d",
self->batch->verts[0]->vertex_len,
py_buf->buf->vertex_len);
GPU_vertbuf_get_vertex_len(self->batch->verts[0]),
GPU_vertbuf_get_vertex_len(py_buf->buf));
return NULL;
}

View File

@ -124,7 +124,8 @@ static bool bpygpu_vertbuf_fill_impl(GPUVertBuf *vbo,
const char *exc_str_size_mismatch = "Expected a %s of size %d, got %u";
bool ok = true;
const GPUVertAttr *attr = &vbo->format.attrs[data_id];
const GPUVertAttr *attr = &GPU_vertbuf_get_format(vbo)->attrs[data_id];
uint vert_len = GPU_vertbuf_get_vertex_len(vbo);
if (PyObject_CheckBuffer(seq)) {
Py_buffer pybuffer;
@ -136,9 +137,9 @@ static bool bpygpu_vertbuf_fill_impl(GPUVertBuf *vbo,
const uint comp_len = pybuffer.ndim == 1 ? 1 : (uint)pybuffer.shape[1];
if (pybuffer.shape[0] != vbo->vertex_len) {
if (pybuffer.shape[0] != vert_len) {
PyErr_Format(
PyExc_ValueError, exc_str_size_mismatch, "sequence", vbo->vertex_len, pybuffer.shape[0]);
PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, pybuffer.shape[0]);
ok = false;
}
else if (comp_len != attr->comp_len) {
@ -162,8 +163,8 @@ static bool bpygpu_vertbuf_fill_impl(GPUVertBuf *vbo,
const uint seq_len = PySequence_Fast_GET_SIZE(seq_fast);
if (seq_len != vbo->vertex_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "sequence", vbo->vertex_len, seq_len);
if (seq_len != vert_len) {
PyErr_Format(PyExc_ValueError, exc_str_size_mismatch, "sequence", vert_len, seq_len);
}
PyObject **seq_items = PySequence_Fast_ITEMS(seq_fast);
@ -217,12 +218,12 @@ static int bpygpu_attr_fill(GPUVertBuf *buf,
PyObject *py_seq_data,
const char *error_prefix)
{
if (id < 0 || id >= buf->format.attr_len) {
if (id < 0 || id >= GPU_vertbuf_get_format(buf)->attr_len) {
PyErr_Format(PyExc_ValueError, "Format id %d out of range", id);
return 0;
}
if (buf->data == NULL) {
if (GPU_vertbuf_get_data(buf) == NULL) {
PyErr_SetString(PyExc_ValueError, "Can't fill, static buffer already in use");
return 0;
}
@ -288,8 +289,9 @@ static PyObject *bpygpu_VertBuf_attr_fill(BPyGPUVertBuf *self, PyObject *args, P
id = PyLong_AsLong(identifier);
}
else if (PyUnicode_Check(identifier)) {
const GPUVertFormat *format = GPU_vertbuf_get_format(self->buf);
const char *name = PyUnicode_AsUTF8(identifier);
id = GPU_vertformat_attr_id_get(&self->buf->format, name);
id = GPU_vertformat_attr_id_get(format, name);
if (id == -1) {
PyErr_SetString(PyExc_ValueError, "Unknown attribute name");
return NULL;