GPUVertBuf: Rename GPUVertBuf to VertBuf and add some getters

to avoid more typecasts.
This commit is contained in:
Clément Foucault 2020-09-06 23:45:51 +02:00
parent 99e3541d3b
commit bb2aeb4504
20 changed files with 359 additions and 166 deletions

View File

@ -100,6 +100,7 @@ set(SRC
opengl/gl_texture.cc
opengl/gl_uniform_buffer.cc
opengl/gl_vertex_array.cc
opengl/gl_vertex_buffer.cc
GPU_batch.h
GPU_batch_presets.h
@ -166,6 +167,7 @@ set(SRC
opengl/gl_texture.hh
opengl/gl_uniform_buffer.hh
opengl/gl_vertex_array.hh
opengl/gl_vertex_buffer.hh
)
set(LIB

View File

@ -46,7 +46,6 @@ ENUM_OPERATORS(GPUVertBufStatus)
extern "C" {
#endif
#define VRAM_USAGE 1
/**
* How to create a #GPUVertBuf:
* 1) verts = GPU_vertbuf_calloc()
@ -64,9 +63,7 @@ typedef enum {
typedef struct GPUVertBuf GPUVertBuf;
#define GPU_vertbuf_calloc() GPU_vertbuf_create(GPU_USAGE_STATIC);
GPUVertBuf *GPU_vertbuf_create(GPUUsageType);
GPUVertBuf *GPU_vertbuf_calloc(void);
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType);
#define GPU_vertbuf_create_with_format(format) \
@ -79,7 +76,6 @@ void GPU_vertbuf_discard(GPUVertBuf *);
void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts);
void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts);
void GPU_vertbuf_init(GPUVertBuf *, GPUUsageType);
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsageType);
#define GPU_vertbuf_init_with_format(verts, format) \
@ -87,7 +83,6 @@ void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsa
GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts);
uint GPU_vertbuf_size_get(const GPUVertBuf *);
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len);
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len);
void GPU_vertbuf_data_len_set(GPUVertBuf *, uint v_len);

View File

@ -37,6 +37,7 @@ class IndexBuf;
class Shader;
class Texture;
class UniformBuf;
class VertBuf;
class GPUBackend {
public:
@ -55,6 +56,7 @@ class GPUBackend {
virtual Shader *shader_alloc(const char *name) = 0;
virtual Texture *texture_alloc(const char *name) = 0;
virtual UniformBuf *uniformbuf_alloc(int size, const char *name) = 0;
virtual VertBuf *vertbuf_alloc(void) = 0;
};
} // namespace gpu

View File

@ -267,14 +267,14 @@ void GPU_batch_draw_advanced(
v_count = batch->elem_()->index_len_get();
}
else {
v_count = batch->verts[0]->vertex_len;
v_count = batch->verts_(0)->vertex_len;
}
}
if (i_count == 0) {
i_count = (batch->inst[0]) ? batch->inst[0]->vertex_len : 1;
i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
/* Meh. This is to be able to use different numbers of verts in instance vbos. */
if (batch->inst[1] != NULL) {
i_count = min_ii(i_count, batch->inst[1]->vertex_len);
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
}
}

View File

@ -30,6 +30,7 @@
#include "GPU_context.h"
#include "gpu_index_buffer_private.hh"
#include "gpu_vertex_buffer_private.hh"
namespace blender {
namespace gpu {
@ -46,10 +47,18 @@ class Batch : public GPUBatch {
virtual void draw(int v_first, int v_count, int i_first, int i_count) = 0;
/* Convenience casts. */
IndexBuf *elem_(void)
IndexBuf *elem_(void) const
{
return unwrap(elem);
};
}
VertBuf *verts_(const int index) const
{
return unwrap(verts[index]);
}
VertBuf *inst_(const int index) const
{
return unwrap(inst[index]);
}
};
} // namespace gpu

View File

@ -165,7 +165,7 @@ GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
GPUVertBuf *verts = GPU_vertbuf_create_with_format(&imm->vertex_format);
GPU_vertbuf_data_alloc(verts, vertex_len);
imm->vertex_data = verts->data;
imm->vertex_data = (uchar *)GPU_vertbuf_get_data(verts);
imm->batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
imm->batch->flag |= GPU_BATCH_BUILDING;

View File

@ -124,7 +124,7 @@ bool Texture::init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format)
if (format == GPU_DEPTH_COMPONENT24) {
return false;
}
w_ = vbo->vertex_len;
w_ = GPU_vertbuf_get_vertex_len(vbo);
h_ = 0;
d_ = 0;
format_ = format;
@ -303,7 +303,7 @@ GPUTexture *GPU_texture_create_compressed_2d(
GPUTexture *GPU_texture_create_from_vertbuf(const char *name, GPUVertBuf *vert)
{
eGPUTextureFormat tex_format = to_texture_format(&vert->format);
eGPUTextureFormat tex_format = to_texture_format(GPU_vertbuf_get_format(vert));
Texture *tex = GPUBackend::get()->texture_alloc(name);
bool success = tex->init_buffer(vert, tex_format);

View File

@ -25,64 +25,61 @@
#include "MEM_guardedalloc.h"
#include "gpu_context_private.hh"
#include "gpu_backend.hh"
#include "gpu_vertex_format_private.h"
#include "gl_vertex_buffer.hh" /* TODO remove */
#include "gpu_context_private.hh" /* TODO remove */
#include "gpu_vertex_buffer_private.hh"
#define KEEP_SINGLE_COPY 1
#include <cstring>
static uint vbo_memory_usage;
#define VRAM_USAGE 1
static GLenum convert_usage_type_to_gl(GPUUsageType type)
/* -------------------------------------------------------------------- */
/** \name VertBuf
* \{ */
namespace blender::gpu {
size_t VertBuf::memory_usage = 0;
} // namespace blender::gpu
/** \} */
/* -------------------------------------------------------------------- */
/** \name C-API
* \{ */
using namespace blender;
using namespace blender::gpu;
static uint GPU_vertbuf_size_get(const VertBuf *verts);
/* -------- Creation & deletion -------- */
GPUVertBuf *GPU_vertbuf_calloc(void)
{
switch (type) {
case GPU_USAGE_STREAM:
return GL_STREAM_DRAW;
case GPU_USAGE_DYNAMIC:
return GL_DYNAMIC_DRAW;
case GPU_USAGE_STATIC:
return GL_STATIC_DRAW;
default:
BLI_assert(0);
return GL_STATIC_DRAW;
}
}
GPUVertBuf *GPU_vertbuf_create(GPUUsageType usage)
{
GPUVertBuf *verts = (GPUVertBuf *)MEM_mallocN(sizeof(GPUVertBuf), "GPUVertBuf");
GPU_vertbuf_init(verts, usage);
return verts;
return wrap(GPUBackend::get()->vertbuf_alloc());
}
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *format, GPUUsageType usage)
{
GPUVertBuf *verts = GPU_vertbuf_create(usage);
GPU_vertformat_copy(&verts->format, format);
if (!format->packed) {
VertexFormat_pack(&verts->format);
}
verts->flag |= GPU_VERTBUF_INIT;
GPUVertBuf *verts = GPU_vertbuf_calloc();
GPU_vertbuf_init_with_format_ex(verts, format, usage);
return verts;
/* this function might seem redundant, but there is potential for memory savings here... */
/* TODO: implement those memory savings */
}
void GPU_vertbuf_init(GPUVertBuf *verts, GPUUsageType usage)
{
memset(verts, 0, sizeof(GPUVertBuf));
verts->usage = usage;
verts->flag = GPU_VERTBUF_DATA_DIRTY;
verts->handle_refcount = 1;
}
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts,
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts_,
const GPUVertFormat *format,
GPUUsageType usage)
{
GPU_vertbuf_init(verts, usage);
VertBuf *verts = unwrap(verts_);
verts->usage = usage;
verts->flag = GPU_VERTBUF_DATA_DIRTY;
verts->handle_refcount = 1;
GPU_vertformat_copy(&verts->format, format);
if (!format->packed) {
VertexFormat_pack(&verts->format);
@ -90,11 +87,13 @@ void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts,
verts->flag |= GPU_VERTBUF_INIT;
}
GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts)
GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts_)
{
GPUVertBuf *verts_dst = GPU_vertbuf_create(GPU_USAGE_STATIC);
VertBuf *verts = unwrap(verts_);
VertBuf *verts_dst = unwrap(GPU_vertbuf_calloc());
/* Full copy. */
*verts_dst = *verts;
verts_dst->handle_refcount = 1;
GPU_vertformat_copy(&verts_dst->format, &verts->format);
if (verts->vbo_id) {
@ -105,33 +104,32 @@ GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts)
glBindBuffer(GL_COPY_READ_BUFFER, verts->vbo_id);
glBindBuffer(GL_COPY_WRITE_BUFFER, verts_dst->vbo_id);
glBufferData(GL_COPY_WRITE_BUFFER, buffer_sz, NULL, convert_usage_type_to_gl(verts->usage));
glBufferData(GL_COPY_WRITE_BUFFER, buffer_sz, NULL, to_gl(verts->usage));
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, buffer_sz);
#if VRAM_USAGE
vbo_memory_usage += GPU_vertbuf_size_get(verts);
#endif
VertBuf::memory_usage += GPU_vertbuf_size_get(verts);
}
if (verts->data) {
verts_dst->data = (uchar *)MEM_dupallocN(verts->data);
}
return verts_dst;
return wrap(verts_dst);
}
/** Same as discard but does not free. */
void GPU_vertbuf_clear(GPUVertBuf *verts)
void GPU_vertbuf_clear(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
if (verts->vbo_id) {
GPU_buf_free(verts->vbo_id);
verts->vbo_id = 0;
#if VRAM_USAGE
vbo_memory_usage -= GPU_vertbuf_size_get(verts);
#endif
VertBuf::memory_usage -= GPU_vertbuf_size_get(verts);
}
if (verts->data) {
MEM_SAFE_FREE(verts->data);
}
verts->flag = GPU_VERTBUF_INVALID;
}
void GPU_vertbuf_discard(GPUVertBuf *verts)
@ -140,13 +138,15 @@ void GPU_vertbuf_discard(GPUVertBuf *verts)
GPU_vertbuf_handle_ref_remove(verts);
}
void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts)
void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
verts->handle_refcount++;
}
void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts)
void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
BLI_assert(verts->handle_refcount > 0);
verts->handle_refcount--;
if (verts->handle_refcount == 0) {
@ -156,14 +156,17 @@ void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts)
}
}
uint GPU_vertbuf_size_get(const GPUVertBuf *verts)
uint GPU_vertbuf_size_get(const VertBuf *verts)
{
return vertex_buffer_size(&verts->format, verts->vertex_len);
}
/* -------- Data update -------- */
/* create a new allocation, discarding any existing data */
void GPU_vertbuf_data_alloc(GPUVertBuf *verts, uint v_len)
void GPU_vertbuf_data_alloc(GPUVertBuf *verts_, uint v_len)
{
VertBuf *verts = unwrap(verts_);
GPUVertFormat *format = &verts->format;
if (!format->packed) {
VertexFormat_pack(format);
@ -176,27 +179,27 @@ void GPU_vertbuf_data_alloc(GPUVertBuf *verts, uint v_len)
if (verts->data) {
MEM_freeN(verts->data);
}
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
VertBuf::memory_usage += new_size - GPU_vertbuf_size_get(verts);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->vertex_len = verts->vertex_alloc = v_len;
verts->data = (uchar *)MEM_mallocN(sizeof(GLubyte) * GPU_vertbuf_size_get(verts), __func__);
}
/* resize buffer keeping existing data */
void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
void GPU_vertbuf_data_resize(GPUVertBuf *verts_, uint v_len)
{
VertBuf *verts = unwrap(verts_);
#if TRUST_NO_ONE
assert(verts->data != NULL);
assert(verts->vertex_alloc != v_len);
#endif
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
VertBuf::memory_usage += new_size - GPU_vertbuf_size_get(verts);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->vertex_len = verts->vertex_alloc = v_len;
verts->data = (uchar *)MEM_reallocN(verts->data, sizeof(GLubyte) * GPU_vertbuf_size_get(verts));
@ -205,22 +208,21 @@ void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
/* Set vertex count but does not change allocation.
* Only this many verts will be uploaded to the GPU and rendered.
* This is useful for streaming data. */
void GPU_vertbuf_data_len_set(GPUVertBuf *verts, uint v_len)
void GPU_vertbuf_data_len_set(GPUVertBuf *verts_, uint v_len)
{
#if TRUST_NO_ONE
assert(verts->data != NULL); /* only for dynamic data */
assert(v_len <= verts->vertex_alloc);
#endif
VertBuf *verts = unwrap(verts_);
BLI_assert(verts->data != NULL); /* Only for dynamic data. */
BLI_assert(v_len <= verts->vertex_alloc);
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
VertBuf::memory_usage += new_size - GPU_vertbuf_size_get(verts);
verts->vertex_len = v_len;
}
void GPU_vertbuf_attr_set(GPUVertBuf *verts, uint a_idx, uint v_idx, const void *data)
void GPU_vertbuf_attr_set(GPUVertBuf *verts_, uint a_idx, uint v_idx, const void *data)
{
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
@ -230,11 +232,12 @@ void GPU_vertbuf_attr_set(GPUVertBuf *verts, uint a_idx, uint v_idx, const void
assert(verts->data != NULL);
#endif
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy((GLubyte *)verts->data + a->offset + v_idx * format->stride, data, a->sz);
memcpy((uchar *)verts->data + a->offset + v_idx * format->stride, data, a->sz);
}
void GPU_vertbuf_attr_fill(GPUVertBuf *verts, uint a_idx, const void *data)
void GPU_vertbuf_attr_fill(GPUVertBuf *verts_, uint a_idx, const void *data)
{
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
@ -242,13 +245,15 @@ void GPU_vertbuf_attr_fill(GPUVertBuf *verts, uint a_idx, const void *data)
assert(a_idx < format->attr_len);
#endif
const uint stride = a->sz; /* tightly packed input data */
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
GPU_vertbuf_attr_fill_stride(verts, a_idx, stride, data);
GPU_vertbuf_attr_fill_stride(verts_, a_idx, stride, data);
}
/** Fills a whole vertex (all attributes). Data must match packed layout. */
void GPU_vertbuf_vert_set(GPUVertBuf *verts, uint v_idx, const void *data)
void GPU_vertbuf_vert_set(GPUVertBuf *verts_, uint v_idx, const void *data)
{
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
#if TRUST_NO_ONE
@ -256,11 +261,12 @@ void GPU_vertbuf_vert_set(GPUVertBuf *verts, uint v_idx, const void *data)
assert(verts->data != NULL);
#endif
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy((GLubyte *)verts->data + v_idx * format->stride, data, format->stride);
memcpy((uchar *)verts->data + v_idx * format->stride, data, format->stride);
}
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts, uint a_idx, uint stride, const void *data)
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts_, uint a_idx, uint stride, const void *data)
{
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
@ -278,15 +284,16 @@ void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts, uint a_idx, uint stride, co
else {
/* we must copy it per vertex */
for (uint v = 0; v < vertex_len; v++) {
memcpy((GLubyte *)verts->data + a->offset + v * format->stride,
(const GLubyte *)data + v * stride,
memcpy((uchar *)verts->data + a->offset + v * format->stride,
(const uchar *)data + v * stride,
a->sz);
}
}
}
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts, uint a_idx, GPUVertBufRaw *access)
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts_, uint a_idx, GPUVertBufRaw *access)
{
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
@ -296,27 +303,30 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts, uint a_idx, GPUVertBufRaw
#endif
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->flag &= ~GPU_VERTBUF_DATA_UPLOADED;
access->size = a->sz;
access->stride = format->stride;
access->data = (GLubyte *)verts->data + a->offset;
access->data = (uchar *)verts->data + a->offset;
access->data_init = access->data;
#if TRUST_NO_ONE
access->_data_end = access->data_init + (size_t)(verts->vertex_alloc * format->stride);
#endif
}
/* -------- Getters -------- */
/* NOTE: Be careful when using this. The data needs to match the expected format. */
void *GPU_vertbuf_get_data(const GPUVertBuf *verts)
{
/* TODO Assert that the format has no padding. */
return verts->data;
return unwrap(verts)->data;
}
/* Returns the data buffer and set it to null internally to avoid freeing.
* NOTE: Be careful when using this. The data needs to match the expected format. */
void *GPU_vertbuf_steal_data(GPUVertBuf *verts)
void *GPU_vertbuf_steal_data(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
/* TODO Assert that the format has no padding. */
BLI_assert(verts->data);
void *data = verts->data;
@ -326,54 +336,57 @@ void *GPU_vertbuf_steal_data(GPUVertBuf *verts)
const GPUVertFormat *GPU_vertbuf_get_format(const GPUVertBuf *verts)
{
return &verts->format;
return &unwrap(verts)->format;
}
uint GPU_vertbuf_get_vertex_alloc(const GPUVertBuf *verts)
{
return verts->vertex_alloc;
return unwrap(verts)->vertex_alloc;
}
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
{
return verts->vertex_len;
return unwrap(verts)->vertex_len;
}
GPUVertBufStatus GPU_vertbuf_get_status(const GPUVertBuf *verts)
{
return verts->flag;
return unwrap(verts)->flag;
}
static void VertBuffer_upload_data(GPUVertBuf *verts)
uint GPU_vertbuf_get_memory_usage(void)
{
return VertBuf::memory_usage;
}
static void VertBuffer_upload_data(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
uint buffer_sz = GPU_vertbuf_size_get(verts);
/* orphan the vbo to avoid sync */
glBufferData(GL_ARRAY_BUFFER, buffer_sz, NULL, convert_usage_type_to_gl(verts->usage));
glBufferData(GL_ARRAY_BUFFER, buffer_sz, NULL, to_gl(verts->usage));
/* upload data */
glBufferSubData(GL_ARRAY_BUFFER, 0, buffer_sz, verts->data);
if (verts->usage == GPU_USAGE_STATIC) {
MEM_freeN(verts->data);
verts->data = NULL;
MEM_SAFE_FREE(verts->data);
}
verts->flag &= ~GPU_VERTBUF_DATA_DIRTY;
verts->flag |= GPU_VERTBUF_DATA_UPLOADED;
}
void GPU_vertbuf_use(GPUVertBuf *verts)
void GPU_vertbuf_use(GPUVertBuf *verts_)
{
VertBuf *verts = unwrap(verts_);
/* only create the buffer the 1st time */
if (verts->vbo_id == 0) {
verts->vbo_id = GPU_buf_alloc();
}
glBindBuffer(GL_ARRAY_BUFFER, verts->vbo_id);
if (verts->flag & GPU_VERTBUF_DATA_DIRTY) {
VertBuffer_upload_data(verts);
VertBuffer_upload_data(verts_);
}
}
uint GPU_vertbuf_get_memory_usage(void)
{
return vbo_memory_usage;
}
/** \} */

View File

@ -27,20 +27,40 @@
#include "GPU_vertex_buffer.h"
struct GPUVertBuf {
GPUVertFormat format;
namespace blender::gpu {
struct VertBuf {
static size_t memory_usage;
GPUVertFormat format = {};
/** Number of verts we want to draw. */
uint vertex_len;
uint vertex_len = 0;
/** Number of verts data. */
uint vertex_alloc;
uint vertex_alloc = 0;
/** 0 indicates not yet allocated. */
uint32_t vbo_id;
uint32_t vbo_id = 0;
/** Usage hint for GL optimisation. */
GPUUsageType usage;
GPUUsageType usage = GPU_USAGE_STATIC;
/** Status flag. */
GPUVertBufStatus flag;
GPUVertBufStatus flag = GPU_VERTBUF_INVALID;
/** This counter will only avoid freeing the GPUVertBuf, not the data. */
char handle_refcount;
char handle_refcount = 0;
/** NULL indicates data in VRAM (unmapped) */
uchar *data;
};
uchar *data = NULL;
};
/* Syntacting suggar. */
static inline GPUVertBuf *wrap(VertBuf *vert)
{
return reinterpret_cast<GPUVertBuf *>(vert);
}
static inline VertBuf *unwrap(GPUVertBuf *vert)
{
return reinterpret_cast<VertBuf *>(vert);
}
static inline const VertBuf *unwrap(const GPUVertBuf *vert)
{
return reinterpret_cast<const VertBuf *>(vert);
}
} // namespace blender::gpu

View File

@ -64,31 +64,6 @@ void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat *src)
memcpy(dest, src, sizeof(GPUVertFormat));
}
GLenum convert_comp_type_to_gl(GPUVertCompType type)
{
switch (type) {
case GPU_COMP_I8:
return GL_BYTE;
case GPU_COMP_U8:
return GL_UNSIGNED_BYTE;
case GPU_COMP_I16:
return GL_SHORT;
case GPU_COMP_U16:
return GL_UNSIGNED_SHORT;
case GPU_COMP_I32:
return GL_INT;
case GPU_COMP_U32:
return GL_UNSIGNED_INT;
case GPU_COMP_F32:
return GL_FLOAT;
case GPU_COMP_I10:
return GL_INT_2_10_10_10_REV;
default:
BLI_assert(0);
return GL_FLOAT;
}
}
static uint comp_sz(GPUVertCompType type)
{
#if TRUST_NO_ONE

View File

@ -29,10 +29,11 @@
extern "C" {
#endif
void VertexFormat_pack(GPUVertFormat *format);
struct GPUVertFormat;
void VertexFormat_pack(struct GPUVertFormat *format);
uint padding(uint offset, uint alignment);
uint vertex_buffer_size(const GPUVertFormat *format, uint vertex_len);
GLenum convert_comp_type_to_gl(GPUVertCompType type);
uint vertex_buffer_size(const struct GPUVertFormat *format, uint vertex_len);
#ifdef __cplusplus
}

View File

@ -35,6 +35,7 @@
#include "gl_shader.hh"
#include "gl_texture.hh"
#include "gl_uniform_buffer.hh"
#include "gl_vertex_buffer.hh"
namespace blender {
namespace gpu {
@ -103,6 +104,11 @@ class GLBackend : public GPUBackend {
return new GLUniformBuf(size, name);
};
VertBuf *vertbuf_alloc(void) override
{
return new GLVertBuf();
};
/* TODO remove */
void buf_free(GLuint buf_id);
void tex_free(GLuint tex_id);

View File

@ -309,7 +309,7 @@ void GLBatch::bind(int i_first)
#if GPU_TRACK_INDEX_RANGE
/* Can be removed if GL 4.3 is required. */
if (!GLEW_ARB_ES3_compatibility && (elem != NULL)) {
glPrimitiveRestartIndex(this->gl_elem()->restart_index());
glPrimitiveRestartIndex(this->elem_()->restart_index());
}
#endif
@ -334,7 +334,7 @@ void GLBatch::draw(int v_first, int v_count, int i_first, int i_count)
GLenum gl_type = to_gl(prim_type);
if (elem) {
const GLIndexBuf *el = this->gl_elem();
const GLIndexBuf *el = this->elem_();
GLenum index_type = to_gl(el->index_type_);
GLint base_index = el->index_base_;
void *v_first_ofs = el->offset_ptr(v_first);

View File

@ -31,6 +31,7 @@
#include "gpu_batch_private.hh"
#include "gl_index_buffer.hh"
#include "gl_vertex_buffer.hh"
#include "glew-mx.h"
@ -102,10 +103,18 @@ class GLBatch : public Batch {
void bind(int i_first);
/* Convenience getters. */
GLIndexBuf *gl_elem(void)
GLIndexBuf *elem_(void) const
{
return static_cast<GLIndexBuf *>(unwrap(elem));
}
GLVertBuf *verts_(const int index) const
{
return static_cast<GLVertBuf *>(unwrap(verts[index]));
}
GLVertBuf *inst_(const int index) const
{
return static_cast<GLVertBuf *>(unwrap(inst[index]));
}
MEM_CXX_CLASS_ALLOC_FUNCS("GLBatch");
};

View File

@ -139,10 +139,10 @@ void GLDrawList::append(GPUBatch *gpu_batch, int i_first, int i_count)
this->submit();
batch_ = batch;
/* Cached for faster access. */
GLIndexBuf *el = batch_->gl_elem();
GLIndexBuf *el = batch_->elem_();
base_index_ = el ? el->index_base_ : UINT_MAX;
v_first_ = el ? el->index_start_ : 0;
v_count_ = el ? el->index_len_ : batch->verts[0]->vertex_len;
v_count_ = el ? el->index_len_ : batch->verts_(0)->vertex_len;
}
if (v_count_ == 0) {
@ -202,7 +202,7 @@ void GLDrawList::submit(void)
batch_->bind(0);
if (MDI_INDEXED) {
GLenum gl_type = to_gl(batch_->gl_elem()->index_type_);
GLenum gl_type = to_gl(batch_->elem_()->index_type_);
glMultiDrawElementsIndirect(prim, gl_type, offset, command_len_, 0);
}
else {

View File

@ -28,6 +28,8 @@
#include "GPU_extensions.h"
#include "GPU_platform.h"
#include "gl_vertex_buffer.hh"
#include "gl_shader.hh"
#include "gl_shader_interface.hh"
@ -260,12 +262,14 @@ void GLShader::transform_feedback_names_set(Span<const char *> name_list,
transform_feedback_type_ = geom_type;
}
bool GLShader::transform_feedback_enable(GPUVertBuf *buf)
bool GLShader::transform_feedback_enable(GPUVertBuf *buf_)
{
if (transform_feedback_type_ == GPU_SHADER_TFB_NONE) {
return false;
}
GLVertBuf *buf = static_cast<GLVertBuf *>(unwrap(buf_));
BLI_assert(buf->vbo_id != 0);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, buf->vbo_id);

View File

@ -111,6 +111,7 @@ bool GLTexture::init_internal(void)
/* Return true on success. */
bool GLTexture::init_internal(GPUVertBuf *vbo)
{
GLVertBuf *gl_vbo = static_cast<GLVertBuf *>(unwrap(vbo));
target_ = to_gl_target(type_);
/* We need to bind once to define the texture type. */
@ -119,10 +120,10 @@ bool GLTexture::init_internal(GPUVertBuf *vbo)
GLenum internal_format = to_gl_internal_format(format_);
if (GLEW_ARB_direct_state_access) {
glTextureBuffer(tex_id_, internal_format, vbo->vbo_id);
glTextureBuffer(tex_id_, internal_format, gl_vbo->vbo_id);
}
else {
glTexBuffer(target_, internal_format, vbo->vbo_id);
glTexBuffer(target_, internal_format, gl_vbo->vbo_id);
}
#ifndef __APPLE__

View File

@ -28,6 +28,7 @@
#include "gl_batch.hh"
#include "gl_context.hh"
#include "gl_index_buffer.hh"
#include "gl_vertex_buffer.hh"
#include "gl_vertex_array.hh"
@ -62,7 +63,7 @@ static uint16_t vbo_bind(const ShaderInterface *interface,
}
const GLvoid *pointer = (const GLubyte *)0 + offset + v_first * stride;
const GLenum type = convert_comp_type_to_gl(static_cast<GPUVertCompType>(a->comp_type));
const GLenum type = to_gl(static_cast<GPUVertCompType>(a->comp_type));
for (uint n_idx = 0; n_idx < a->name_len; n_idx++) {
const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
@ -108,27 +109,28 @@ static uint16_t vbo_bind(const ShaderInterface *interface,
/* Update the Attrib Binding of the currently bound VAO. */
void GLVertArray::update_bindings(const GLuint vao,
const GPUBatch *batch,
const GPUBatch *batch_, /* Should be GLBatch. */
const ShaderInterface *interface,
const int base_instance)
{
const GLBatch *batch = static_cast<const GLBatch *>(batch_);
uint16_t attr_mask = interface->enabled_attr_mask_;
glBindVertexArray(vao);
/* Reverse order so first VBO'S have more prevalence (in term of attribute override). */
for (int v = GPU_BATCH_VBO_MAX_LEN - 1; v > -1; v--) {
GPUVertBuf *vbo = batch->verts[v];
GLVertBuf *vbo = batch->verts_(v);
if (vbo) {
GPU_vertbuf_use(vbo);
GPU_vertbuf_use(batch->verts[v]);
attr_mask &= ~vbo_bind(interface, &vbo->format, 0, vbo->vertex_len, false);
}
}
for (int v = GPU_BATCH_INST_VBO_MAX_LEN - 1; v > -1; v--) {
GPUVertBuf *vbo = batch->inst[v];
GLVertBuf *vbo = batch->inst_(v);
if (vbo) {
GPU_vertbuf_use(vbo);
GPU_vertbuf_use(batch->inst[v]);
attr_mask &= ~vbo_bind(interface, &vbo->format, base_instance, vbo->vertex_len, true);
}
}

View File

@ -0,0 +1,56 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2016 by Mike Erwin.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*/
#include "gl_backend.hh"
#include "gl_vertex_buffer.hh"
namespace blender::gpu {
void GLVertBuf::bind(void)
{
}
void GLVertBuf::upload_data(void)
{
}
uchar *GLVertBuf::acquire_data(void)
{
return nullptr;
}
uchar *GLVertBuf::resize_data(void)
{
return nullptr;
}
void GLVertBuf::release_data(void)
{
}
void GLVertBuf::duplicate_data(VertBuf *)
{
}
} // namespace blender::gpu

View File

@ -0,0 +1,98 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "glew-mx.h"
#include "gpu_vertex_buffer_private.hh"
namespace blender {
namespace gpu {
class GLVertBuf : public VertBuf {
friend class GLTexture; /* For buffer texture. */
friend class GLShader; /* For transform feedback. */
private:
// GLuint vbo_id_ = 0;
/** Size on the GPU. */
// size_t vbo_size_ = 0;
public:
void bind(void);
protected:
uchar *acquire_data(void);
uchar *resize_data(void);
void release_data(void);
void upload_data(void);
void duplicate_data(VertBuf *dst);
MEM_CXX_CLASS_ALLOC_FUNCS("GLVertBuf");
};
static inline GLenum to_gl(GPUUsageType type)
{
switch (type) {
case GPU_USAGE_STREAM:
return GL_STREAM_DRAW;
case GPU_USAGE_DYNAMIC:
return GL_DYNAMIC_DRAW;
case GPU_USAGE_STATIC:
return GL_STATIC_DRAW;
default:
BLI_assert(0);
return GL_STATIC_DRAW;
}
}
static inline GLenum to_gl(GPUVertCompType type)
{
switch (type) {
case GPU_COMP_I8:
return GL_BYTE;
case GPU_COMP_U8:
return GL_UNSIGNED_BYTE;
case GPU_COMP_I16:
return GL_SHORT;
case GPU_COMP_U16:
return GL_UNSIGNED_SHORT;
case GPU_COMP_I32:
return GL_INT;
case GPU_COMP_U32:
return GL_UNSIGNED_INT;
case GPU_COMP_F32:
return GL_FLOAT;
case GPU_COMP_I10:
return GL_INT_2_10_10_10_REV;
default:
BLI_assert(0);
return GL_FLOAT;
}
}
} // namespace gpu
} // namespace blender