GPUImmediate: GL backend isolation

This is part of the Vulkan backend task T68990.

This is mostly a cleanup, however, there is a small change:
We don't use a special Vertex Array binding function for Immediate
anymore and just reuse the one for batches.
This might create a bit more state changes but this could be fixed
easily if it causes perf regression.

# Conflicts:
#	source/blender/gpu/intern/gpu_context.cc
This commit is contained in:
Clément Foucault 2020-08-31 15:14:47 +02:00
parent 1b3a0ae231
commit 1804eb57fd
Notes: blender-bot 2023-02-14 04:56:36 +01:00
Referenced by issue #80538, Knife Tool Glitches
Referenced by issue #80327, Glitches in UI drawing
11 changed files with 558 additions and 352 deletions

View File

@ -93,6 +93,7 @@ set(SRC
opengl/gl_context.cc
opengl/gl_drawlist.cc
opengl/gl_framebuffer.cc
opengl/gl_immediate.cc
opengl/gl_shader.cc
opengl/gl_shader_interface.cc
opengl/gl_state.cc
@ -136,6 +137,7 @@ set(SRC
intern/gpu_context_private.hh
intern/gpu_drawlist_private.hh
intern/gpu_framebuffer_private.hh
intern/gpu_immediate_private.hh
intern/gpu_material_library.h
intern/gpu_matrix_private.h
intern/gpu_node_graph.h
@ -153,6 +155,8 @@ set(SRC
opengl/gl_context.hh
opengl/gl_drawlist.hh
opengl/gl_framebuffer.hh
opengl/gl_immediate.hh
opengl/gl_primitive.hh
opengl/gl_shader.hh
opengl/gl_shader_interface.hh
opengl/gl_state.hh

View File

@ -75,6 +75,7 @@ GPUContext::~GPUContext()
delete back_left;
delete front_right;
delete back_right;
delete imm;
}
bool GPUContext::is_active_on_thread(void)

View File

@ -30,6 +30,7 @@
#include "GPU_context.h"
#include "gpu_framebuffer_private.hh"
#include "gpu_immediate_private.hh"
#include "gpu_shader_private.hh"
#include "gpu_state_private.hh"
@ -48,6 +49,7 @@ struct GPUContext {
blender::gpu::FrameBuffer *active_fb = NULL;
GPUMatrixState *matrix_state = NULL;
blender::gpu::GPUStateManager *state_manager = NULL;
blender::gpu::Immediate *imm = NULL;
/**
* All 4 window framebuffers.

View File

@ -20,7 +20,7 @@
/** \file
* \ingroup gpu
*
* GPU immediate mode work-alike
* Mimics old style opengl immediate mode drawing.
*/
#ifndef GPU_STANDALONE
@ -34,122 +34,53 @@
#include "gpu_attr_binding_private.h"
#include "gpu_context_private.hh"
#include "gpu_immediate_private.hh"
#include "gpu_primitive_private.h"
#include "gpu_shader_private.hh"
#include "gpu_vertex_format_private.h"
#include <stdlib.h>
#include <string.h>
using namespace blender::gpu;
typedef struct ImmediateDrawBuffer {
GLuint vbo_id;
GLubyte *buffer_data;
uint buffer_offset;
uint buffer_size;
} ImmediateDrawBuffer;
typedef struct {
GPUBatch *batch;
GPUContext *context;
/* current draw call */
bool strict_vertex_len;
uint vertex_len;
uint buffer_bytes_mapped;
ImmediateDrawBuffer *active_buffer;
GPUPrimType prim_type;
GPUVertFormat vertex_format;
ImmediateDrawBuffer draw_buffer;
ImmediateDrawBuffer draw_buffer_strict;
/* current vertex */
uint vertex_idx;
GLubyte *vertex_data;
/** which attributes of current vertex have not been given values? */
uint16_t unassigned_attr_bits;
GLuint vao_id;
GPUShader *bound_program;
GPUAttrBinding attr_binding;
uint16_t prev_enabled_attr_bits; /* <-- only affects this VAO, so we're ok */
} Immediate;
/* size of internal buffer */
#define DEFAULT_INTERNAL_BUFFER_SIZE (4 * 1024 * 1024)
static bool initialized = false;
static Immediate imm;
static Immediate *imm = NULL;
void immInit(void)
{
BLI_assert(!initialized);
memset(&imm, 0, sizeof(Immediate));
imm.draw_buffer.vbo_id = GPU_buf_alloc();
imm.draw_buffer.buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
glBindBuffer(GL_ARRAY_BUFFER, imm.draw_buffer.vbo_id);
glBufferData(GL_ARRAY_BUFFER, imm.draw_buffer.buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.draw_buffer_strict.vbo_id = GPU_buf_alloc();
imm.draw_buffer_strict.buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
glBindBuffer(GL_ARRAY_BUFFER, imm.draw_buffer_strict.vbo_id);
glBufferData(GL_ARRAY_BUFFER, imm.draw_buffer_strict.buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
glBindBuffer(GL_ARRAY_BUFFER, 0);
initialized = true;
/* TODO Remove */
}
void immActivate(void)
{
BLI_assert(initialized);
BLI_assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we're not between a Begin/End pair */
BLI_assert(imm.vao_id == 0);
imm.vao_id = GPU_vao_alloc();
imm.context = GPU_context_active_get();
imm = GPU_context_active_get()->imm;
}
void immDeactivate(void)
{
BLI_assert(initialized);
BLI_assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we're not between a Begin/End pair */
BLI_assert(imm.vao_id != 0);
GPU_vao_free(imm.vao_id, imm.context);
imm.vao_id = 0;
imm.prev_enabled_attr_bits = 0;
imm = NULL;
}
void immDestroy(void)
{
GPU_buf_free(imm.draw_buffer.vbo_id);
GPU_buf_free(imm.draw_buffer_strict.vbo_id);
initialized = false;
/* TODO Remove */
}
GPUVertFormat *immVertexFormat(void)
{
GPU_vertformat_clear(&imm.vertex_format);
return &imm.vertex_format;
GPU_vertformat_clear(&imm->vertex_format);
return &imm->vertex_format;
}
void immBindShader(GPUShader *shader)
{
BLI_assert(imm.bound_program == NULL);
BLI_assert(imm->shader == NULL);
imm.bound_program = shader;
imm->shader = shader;
if (!imm.vertex_format.packed) {
VertexFormat_pack(&imm.vertex_format);
if (!imm->vertex_format.packed) {
VertexFormat_pack(&imm->vertex_format);
imm->enabled_attr_bits = 0xFFFFu & ~(0xFFFFu << imm->vertex_format.attr_len);
}
GPU_shader_bind(shader);
get_attr_locations(&imm.vertex_format, &imm.attr_binding, shader);
GPU_matrix_bind(shader);
GPU_shader_set_srgb_uniform(shader);
}
@ -162,16 +93,16 @@ void immBindBuiltinProgram(eGPUBuiltinShader shader_id)
void immUnbindProgram(void)
{
BLI_assert(imm.bound_program != NULL);
BLI_assert(imm->shader != NULL);
GPU_shader_unbind();
imm.bound_program = NULL;
imm->shader = NULL;
}
/* XXX do not use it. Special hack to use OCIO with batch API. */
GPUShader *immGetShader(void)
{
return imm.bound_program;
return imm->shader;
}
#ifndef NDEBUG
@ -205,267 +136,122 @@ static bool vertex_count_makes_sense_for_primitive(uint vertex_len, GPUPrimType
void immBegin(GPUPrimType prim_type, uint vertex_len)
{
BLI_assert(initialized);
BLI_assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we haven't already begun */
BLI_assert(imm->prim_type == GPU_PRIM_NONE); /* Make sure we haven't already begun. */
BLI_assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
BLI_assert(imm.active_buffer == NULL);
GPU_context_active_get()->state_manager->apply_state();
imm->prim_type = prim_type;
imm->vertex_len = vertex_len;
imm->vertex_idx = 0;
imm->unassigned_attr_bits = imm->enabled_attr_bits;
imm.prim_type = prim_type;
imm.vertex_len = vertex_len;
imm.vertex_idx = 0;
imm.unassigned_attr_bits = imm.attr_binding.enabled_bits;
/* how many bytes do we need for this draw call? */
const uint bytes_needed = vertex_buffer_size(&imm.vertex_format, vertex_len);
ImmediateDrawBuffer *active_buffer = imm.strict_vertex_len ? &imm.draw_buffer_strict :
&imm.draw_buffer;
imm.active_buffer = active_buffer;
glBindBuffer(GL_ARRAY_BUFFER, active_buffer->vbo_id);
/* does the current buffer have enough room? */
const uint available_bytes = active_buffer->buffer_size - active_buffer->buffer_offset;
bool recreate_buffer = false;
if (bytes_needed > active_buffer->buffer_size) {
/* expand the internal buffer */
active_buffer->buffer_size = bytes_needed;
recreate_buffer = true;
}
else if (bytes_needed < DEFAULT_INTERNAL_BUFFER_SIZE &&
active_buffer->buffer_size > DEFAULT_INTERNAL_BUFFER_SIZE) {
/* shrink the internal buffer */
active_buffer->buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
recreate_buffer = true;
}
/* ensure vertex data is aligned */
/* Might waste a little space, but it's safe. */
const uint pre_padding = padding(active_buffer->buffer_offset, imm.vertex_format.stride);
if (!recreate_buffer && ((bytes_needed + pre_padding) <= available_bytes)) {
active_buffer->buffer_offset += pre_padding;
}
else {
/* orphan this buffer & start with a fresh one */
/* this method works on all platforms, old & new */
glBufferData(GL_ARRAY_BUFFER, active_buffer->buffer_size, NULL, GL_DYNAMIC_DRAW);
active_buffer->buffer_offset = 0;
}
/* printf("mapping %u to %u\n", imm.buffer_offset, imm.buffer_offset + bytes_needed - 1); */
#ifndef NDEBUG
{
GLint bufsize;
glGetBufferParameteriv(GL_ARRAY_BUFFER, GL_BUFFER_SIZE, &bufsize);
BLI_assert(active_buffer->buffer_offset + bytes_needed <= bufsize);
}
#endif
active_buffer->buffer_data = (GLubyte *)glMapBufferRange(
GL_ARRAY_BUFFER,
active_buffer->buffer_offset,
bytes_needed,
GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT |
(imm.strict_vertex_len ? 0 : GL_MAP_FLUSH_EXPLICIT_BIT));
BLI_assert(active_buffer->buffer_data != NULL);
imm.buffer_bytes_mapped = bytes_needed;
imm.vertex_data = active_buffer->buffer_data;
imm->vertex_data = imm->begin();
}
void immBeginAtMost(GPUPrimType prim_type, uint vertex_len)
{
BLI_assert(vertex_len > 0);
imm.strict_vertex_len = false;
imm->strict_vertex_len = false;
immBegin(prim_type, vertex_len);
}
GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
{
BLI_assert(initialized);
BLI_assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we haven't already begun */
BLI_assert(imm->prim_type == GPU_PRIM_NONE); /* Make sure we haven't already begun. */
BLI_assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
imm.prim_type = prim_type;
imm.vertex_len = vertex_len;
imm.vertex_idx = 0;
imm.unassigned_attr_bits = imm.attr_binding.enabled_bits;
imm->prim_type = prim_type;
imm->vertex_len = vertex_len;
imm->vertex_idx = 0;
imm->unassigned_attr_bits = imm->enabled_attr_bits;
GPUVertBuf *verts = GPU_vertbuf_create_with_format(&imm.vertex_format);
GPUVertBuf *verts = GPU_vertbuf_create_with_format(&imm->vertex_format);
GPU_vertbuf_data_alloc(verts, vertex_len);
imm.buffer_bytes_mapped = GPU_vertbuf_size_get(verts);
imm.vertex_data = verts->data;
imm->vertex_data = verts->data;
imm.batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
imm.batch->flag |= GPU_BATCH_BUILDING;
imm->batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
imm->batch->flag |= GPU_BATCH_BUILDING;
return imm.batch;
return imm->batch;
}
GPUBatch *immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
{
imm.strict_vertex_len = false;
BLI_assert(vertex_len > 0);
imm->strict_vertex_len = false;
return immBeginBatch(prim_type, vertex_len);
}
static void immDrawSetup(void)
{
/* set up VAO -- can be done during Begin or End really */
glBindVertexArray(imm.vao_id);
/* Enable/Disable vertex attributes as needed. */
if (imm.attr_binding.enabled_bits != imm.prev_enabled_attr_bits) {
for (uint loc = 0; loc < GPU_VERT_ATTR_MAX_LEN; loc++) {
bool is_enabled = imm.attr_binding.enabled_bits & (1 << loc);
bool was_enabled = imm.prev_enabled_attr_bits & (1 << loc);
if (is_enabled && !was_enabled) {
glEnableVertexAttribArray(loc);
}
else if (was_enabled && !is_enabled) {
glDisableVertexAttribArray(loc);
}
}
imm.prev_enabled_attr_bits = imm.attr_binding.enabled_bits;
}
const uint stride = imm.vertex_format.stride;
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; a_idx++) {
const GPUVertAttr *a = &imm.vertex_format.attrs[a_idx];
const uint offset = imm.active_buffer->buffer_offset + a->offset;
const GLvoid *pointer = (const GLubyte *)0 + offset;
const uint loc = read_attr_location(&imm.attr_binding, a_idx);
const GLenum type = convert_comp_type_to_gl(static_cast<GPUVertCompType>(a->comp_type));
switch (a->fetch_mode) {
case GPU_FETCH_FLOAT:
case GPU_FETCH_INT_TO_FLOAT:
glVertexAttribPointer(loc, a->comp_len, type, GL_FALSE, stride, pointer);
break;
case GPU_FETCH_INT_TO_FLOAT_UNIT:
glVertexAttribPointer(loc, a->comp_len, type, GL_TRUE, stride, pointer);
break;
case GPU_FETCH_INT:
glVertexAttribIPointer(loc, a->comp_len, type, stride, pointer);
}
}
if (GPU_matrix_dirty_get()) {
GPU_matrix_bind(imm.bound_program);
}
}
void immEnd(void)
{
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm.active_buffer || imm.batch);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* Make sure we're between a Begin/End pair. */
BLI_assert(imm->vertex_data || imm->batch);
uint buffer_bytes_used;
if (imm.strict_vertex_len) {
BLI_assert(imm.vertex_idx == imm.vertex_len); /* with all vertices defined */
buffer_bytes_used = imm.buffer_bytes_mapped;
if (imm->strict_vertex_len) {
BLI_assert(imm->vertex_idx == imm->vertex_len); /* With all vertices defined. */
}
else {
BLI_assert(imm.vertex_idx <= imm.vertex_len);
if (imm.vertex_idx == imm.vertex_len) {
buffer_bytes_used = imm.buffer_bytes_mapped;
}
else {
BLI_assert(imm.vertex_idx == 0 ||
vertex_count_makes_sense_for_primitive(imm.vertex_idx, imm.prim_type));
imm.vertex_len = imm.vertex_idx;
buffer_bytes_used = vertex_buffer_size(&imm.vertex_format, imm.vertex_len);
/* unused buffer bytes are available to the next immBegin */
}
/* tell OpenGL what range was modified so it doesn't copy the whole mapped range */
glFlushMappedBufferRange(GL_ARRAY_BUFFER, 0, buffer_bytes_used);
BLI_assert(imm->vertex_idx <= imm->vertex_len);
BLI_assert(imm->vertex_idx == 0 ||
vertex_count_makes_sense_for_primitive(imm->vertex_idx, imm->prim_type));
}
if (imm.batch) {
if (buffer_bytes_used != imm.buffer_bytes_mapped) {
GPU_vertbuf_data_resize(imm.batch->verts[0], imm.vertex_len);
if (imm->batch) {
if (imm->vertex_idx < imm->vertex_len) {
GPU_vertbuf_data_resize(imm->batch->verts[0], imm->vertex_len);
/* TODO: resize only if vertex count is much smaller */
}
GPU_batch_set_shader(imm.batch, imm.bound_program);
imm.batch->flag &= ~GPU_BATCH_BUILDING;
imm.batch = NULL; /* don't free, batch belongs to caller */
GPU_batch_set_shader(imm->batch, imm->shader);
imm->batch->flag &= ~GPU_BATCH_BUILDING;
imm->batch = NULL; /* don't free, batch belongs to caller */
}
else {
glUnmapBuffer(GL_ARRAY_BUFFER);
if (imm.vertex_len > 0) {
immDrawSetup();
#ifdef __APPLE__
glDisable(GL_PRIMITIVE_RESTART);
#endif
glDrawArrays(convert_prim_type_to_gl(imm.prim_type), 0, imm.vertex_len);
#ifdef __APPLE__
glEnable(GL_PRIMITIVE_RESTART);
#endif
}
/* These lines are causing crash on startup on some old GPU + drivers.
* They are not required so just comment them. (T55722) */
// glBindBuffer(GL_ARRAY_BUFFER, 0);
// glBindVertexArray(0);
/* prep for next immBegin */
imm.active_buffer->buffer_offset += buffer_bytes_used;
imm->end();
}
/* prep for next immBegin */
imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
imm.active_buffer = NULL;
/* Prepare for next immBegin. */
imm->prim_type = GPU_PRIM_NONE;
imm->strict_vertex_len = true;
imm->vertex_data = NULL;
}
static void setAttrValueBit(uint attr_id)
{
uint16_t mask = 1 << attr_id;
BLI_assert(imm.unassigned_attr_bits & mask); /* not already set */
imm.unassigned_attr_bits &= ~mask;
BLI_assert(imm->unassigned_attr_bits & mask); /* not already set */
imm->unassigned_attr_bits &= ~mask;
}
/* --- generic attribute functions --- */
void immAttr1f(uint attr_id, float x)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_F32);
BLI_assert(attr->comp_len == 1);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
float *data = (float *)(imm.vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
float *data = (float *)(imm->vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
data[0] = x;
}
void immAttr2f(uint attr_id, float x, float y)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_F32);
BLI_assert(attr->comp_len == 2);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
float *data = (float *)(imm.vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
float *data = (float *)(imm->vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
data[0] = x;
data[1] = y;
@ -473,16 +259,16 @@ void immAttr2f(uint attr_id, float x, float y)
void immAttr3f(uint attr_id, float x, float y, float z)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_F32);
BLI_assert(attr->comp_len == 3);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
float *data = (float *)(imm.vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
float *data = (float *)(imm->vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
data[0] = x;
data[1] = y;
@ -491,16 +277,16 @@ void immAttr3f(uint attr_id, float x, float y, float z)
void immAttr4f(uint attr_id, float x, float y, float z, float w)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_F32);
BLI_assert(attr->comp_len == 4);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
float *data = (float *)(imm.vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
float *data = (float *)(imm->vertex_data + attr->offset);
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
data[0] = x;
data[1] = y;
@ -510,30 +296,30 @@ void immAttr4f(uint attr_id, float x, float y, float z, float w)
void immAttr1u(uint attr_id, uint x)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_U32);
BLI_assert(attr->comp_len == 1);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
uint *data = (uint *)(imm.vertex_data + attr->offset);
uint *data = (uint *)(imm->vertex_data + attr->offset);
data[0] = x;
}
void immAttr2i(uint attr_id, int x, int y)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_I32);
BLI_assert(attr->comp_len == 2);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
int *data = (int *)(imm.vertex_data + attr->offset);
int *data = (int *)(imm->vertex_data + attr->offset);
data[0] = x;
data[1] = y;
@ -541,15 +327,15 @@ void immAttr2i(uint attr_id, int x, int y)
void immAttr2s(uint attr_id, short x, short y)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_I16);
BLI_assert(attr->comp_len == 2);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
short *data = (short *)(imm.vertex_data + attr->offset);
short *data = (short *)(imm->vertex_data + attr->offset);
data[0] = x;
data[1] = y;
@ -572,16 +358,16 @@ void immAttr4fv(uint attr_id, const float data[4])
void immAttr3ub(uint attr_id, uchar r, uchar g, uchar b)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_U8);
BLI_assert(attr->comp_len == 3);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
GLubyte *data = imm.vertex_data + attr->offset;
/* printf("%s %td %p\n", __FUNCTION__, data - imm.buffer_data, data); */
uchar *data = imm->vertex_data + attr->offset;
/* printf("%s %td %p\n", __FUNCTION__, data - imm->buffer_data, data); */
data[0] = r;
data[1] = g;
@ -590,16 +376,16 @@ void immAttr3ub(uint attr_id, uchar r, uchar g, uchar b)
void immAttr4ub(uint attr_id, uchar r, uchar g, uchar b, uchar a)
{
GPUVertAttr *attr = &imm.vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm.vertex_format.attr_len);
GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(attr->comp_type == GPU_COMP_U8);
BLI_assert(attr->comp_len == 4);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
GLubyte *data = imm.vertex_data + attr->offset;
/* printf("%s %td %p\n", __FUNCTION__, data - imm.buffer_data, data); */
uchar *data = imm->vertex_data + attr->offset;
/* printf("%s %td %p\n", __FUNCTION__, data - imm->buffer_data, data); */
data[0] = r;
data[1] = g;
@ -619,39 +405,39 @@ void immAttr4ubv(uint attr_id, const uchar data[4])
void immAttrSkip(uint attr_id)
{
BLI_assert(attr_id < imm.vertex_format.attr_len);
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(attr_id < imm->vertex_format.attr_len);
BLI_assert(imm->vertex_idx < imm->vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
setAttrValueBit(attr_id);
}
static void immEndVertex(void) /* and move on to the next vertex */
{
BLI_assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm.vertex_idx < imm.vertex_len);
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
/* Have all attributes been assigned values?
* If not, copy value from previous vertex. */
if (imm.unassigned_attr_bits) {
BLI_assert(imm.vertex_idx > 0); /* first vertex must have all attributes specified */
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; a_idx++) {
if ((imm.unassigned_attr_bits >> a_idx) & 1) {
const GPUVertAttr *a = &imm.vertex_format.attrs[a_idx];
if (imm->unassigned_attr_bits) {
BLI_assert(imm->vertex_idx > 0); /* first vertex must have all attributes specified */
for (uint a_idx = 0; a_idx < imm->vertex_format.attr_len; a_idx++) {
if ((imm->unassigned_attr_bits >> a_idx) & 1) {
const GPUVertAttr *a = &imm->vertex_format.attrs[a_idx];
#if 0
printf("copying %s from vertex %u to %u\n", a->name, imm.vertex_idx - 1, imm.vertex_idx);
printf("copying %s from vertex %u to %u\n", a->name, imm->vertex_idx - 1, imm->vertex_idx);
#endif
GLubyte *data = imm.vertex_data + a->offset;
memcpy(data, data - imm.vertex_format.stride, a->sz);
GLubyte *data = imm->vertex_data + a->offset;
memcpy(data, data - imm->vertex_format.stride, a->sz);
/* TODO: consolidate copy of adjacent attributes */
}
}
}
imm.vertex_idx++;
imm.vertex_data += imm.vertex_format.stride;
imm.unassigned_attr_bits = imm.attr_binding.enabled_bits;
imm->vertex_idx++;
imm->vertex_data += imm->vertex_format.stride;
imm->unassigned_attr_bits = imm->enabled_attr_bits;
}
void immVertex2f(uint attr_id, float x, float y)
@ -706,64 +492,64 @@ void immVertex2iv(uint attr_id, const int data[2])
void immUniform1f(const char *name, float x)
{
GPU_shader_uniform_1f(imm.bound_program, name, x);
GPU_shader_uniform_1f(imm->shader, name, x);
}
void immUniform2f(const char *name, float x, float y)
{
GPU_shader_uniform_2f(imm.bound_program, name, x, y);
GPU_shader_uniform_2f(imm->shader, name, x, y);
}
void immUniform2fv(const char *name, const float data[2])
{
GPU_shader_uniform_2fv(imm.bound_program, name, data);
GPU_shader_uniform_2fv(imm->shader, name, data);
}
void immUniform3f(const char *name, float x, float y, float z)
{
GPU_shader_uniform_3f(imm.bound_program, name, x, y, z);
GPU_shader_uniform_3f(imm->shader, name, x, y, z);
}
void immUniform3fv(const char *name, const float data[3])
{
GPU_shader_uniform_3fv(imm.bound_program, name, data);
GPU_shader_uniform_3fv(imm->shader, name, data);
}
void immUniform4f(const char *name, float x, float y, float z, float w)
{
GPU_shader_uniform_4f(imm.bound_program, name, x, y, z, w);
GPU_shader_uniform_4f(imm->shader, name, x, y, z, w);
}
void immUniform4fv(const char *name, const float data[4])
{
GPU_shader_uniform_4fv(imm.bound_program, name, data);
GPU_shader_uniform_4fv(imm->shader, name, data);
}
/* Note array index is not supported for name (i.e: "array[0]"). */
void immUniformArray4fv(const char *name, const float *data, int count)
{
GPU_shader_uniform_4fv_array(imm.bound_program, name, count, (float(*)[4])data);
GPU_shader_uniform_4fv_array(imm->shader, name, count, (float(*)[4])data);
}
void immUniformMatrix4fv(const char *name, const float data[4][4])
{
GPU_shader_uniform_mat4(imm.bound_program, name, data);
GPU_shader_uniform_mat4(imm->shader, name, data);
}
void immUniform1i(const char *name, int x)
{
GPU_shader_uniform_1i(imm.bound_program, name, x);
GPU_shader_uniform_1i(imm->shader, name, x);
}
void immBindTexture(const char *name, GPUTexture *tex)
{
int binding = GPU_shader_get_texture_binding(imm.bound_program, name);
int binding = GPU_shader_get_texture_binding(imm->shader, name);
GPU_texture_bind(tex, binding);
}
void immBindTextureSampler(const char *name, GPUTexture *tex, eGPUSamplerState state)
{
int binding = GPU_shader_get_texture_binding(imm.bound_program, name);
int binding = GPU_shader_get_texture_binding(imm->shader, name);
GPU_texture_bind_ex(tex, state, binding, true);
}
@ -771,10 +557,10 @@ void immBindTextureSampler(const char *name, GPUTexture *tex, eGPUSamplerState s
void immUniformColor4f(float r, float g, float b, float a)
{
int32_t uniform_loc = GPU_shader_get_builtin_uniform(imm.bound_program, GPU_UNIFORM_COLOR);
int32_t uniform_loc = GPU_shader_get_builtin_uniform(imm->shader, GPU_UNIFORM_COLOR);
BLI_assert(uniform_loc != -1);
float data[4] = {r, g, b, a};
GPU_shader_uniform_vector(imm.bound_program, uniform_loc, 4, 1, data);
GPU_shader_uniform_vector(imm->shader, uniform_loc, 4, 1, data);
}
void immUniformColor4fv(const float rgba[4])

View File

@ -0,0 +1,66 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2016 by Mike Erwin.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* Mimics old style opengl immediate mode drawing.
*/
#pragma once
#include "GPU_batch.h"
#include "GPU_primitive.h"
#include "GPU_shader.h"
#include "GPU_vertex_format.h"
namespace blender::gpu {
class Immediate {
public:
/** Pointer to the mapped buffer data for the currect vertex. */
uchar *vertex_data = NULL;
/** Current vertex index. */
uint vertex_idx = 0;
/** Length of the buffer in vertices. */
uint vertex_len = 0;
/** Which attributes of current vertex have not been given values? */
uint16_t unassigned_attr_bits = 0;
/** Attributes that needs to be set. One bit per attribute. */
uint16_t enabled_attr_bits = 0;
/** Current draw call specification. */
GPUPrimType prim_type = GPU_PRIM_NONE;
GPUVertFormat vertex_format;
GPUShader *shader = NULL;
/** Enforce strict vertex count (disabled when using immBeginAtMost). */
bool strict_vertex_len = true;
/** Batch in construction when using immBeginBatch. */
GPUBatch *batch = NULL;
public:
Immediate(){};
virtual ~Immediate(){};
virtual uchar *begin(void) = 0;
virtual void end(void) = 0;
};
} // namespace blender::gpu

View File

@ -31,6 +31,7 @@
#include "gpu_context_private.hh"
#include "gl_immediate.hh"
#include "gl_state.hh"
#include "gl_backend.hh" /* TODO remove */
@ -55,6 +56,7 @@ GLContext::GLContext(void *ghost_window, GLSharedOrphanLists &shared_orphan_list
glBindBuffer(GL_ARRAY_BUFFER, 0);
state_manager = new GLStateManager();
imm = new GLImmediate();
ghost_window_ = ghost_window;
if (ghost_window) {

View File

@ -0,0 +1,183 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2016 by Mike Erwin.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* Mimics old style opengl immediate mode drawing.
*/
#include "BKE_global.h"
#include "gpu_context_private.hh"
#include "gpu_shader_private.hh"
#include "gpu_vertex_format_private.h"
#include "gl_context.hh"
#include "gl_primitive.hh"
#include "gl_vertex_array.hh"
#include "gl_immediate.hh"
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
* \{ */
GLImmediate::GLImmediate()
{
glGenVertexArrays(1, &vao_id_);
buffer.buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
glGenBuffers(1, &buffer.vbo_id);
glBindBuffer(GL_ARRAY_BUFFER, buffer.vbo_id);
glBufferData(GL_ARRAY_BUFFER, buffer.buffer_size, NULL, GL_DYNAMIC_DRAW);
buffer_strict.buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
glGenBuffers(1, &buffer_strict.vbo_id);
glBindBuffer(GL_ARRAY_BUFFER, buffer_strict.vbo_id);
glBufferData(GL_ARRAY_BUFFER, buffer_strict.buffer_size, NULL, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
#ifndef __APPLE__
if ((G.debug & G_DEBUG_GPU) && (GLEW_VERSION_4_3 || GLEW_KHR_debug)) {
glObjectLabel(GL_VERTEX_ARRAY, vao_id_, -1, "VAO-Immediate");
glObjectLabel(GL_BUFFER, buffer.vbo_id, -1, "VBO-ImmediateBuffer");
glObjectLabel(GL_BUFFER, buffer_strict.vbo_id, -1, "VBO-ImmediateBufferStrict");
}
#endif
}
GLImmediate::~GLImmediate()
{
glDeleteVertexArrays(1, &vao_id_);
glDeleteBuffers(1, &buffer.vbo_id);
glDeleteBuffers(1, &buffer_strict.vbo_id);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Buffer management
* \{ */
uchar *GLImmediate::begin()
{
/* How many bytes do we need for this draw call? */
const size_t bytes_needed = vertex_buffer_size(&vertex_format, vertex_len);
/* Does the current buffer have enough room? */
const size_t available_bytes = buffer_size() - buffer_offset();
glBindBuffer(GL_ARRAY_BUFFER, vbo_id());
bool recreate_buffer = false;
if (bytes_needed > buffer_size()) {
/* expand the internal buffer */
buffer_size() = bytes_needed;
recreate_buffer = true;
}
else if (bytes_needed < DEFAULT_INTERNAL_BUFFER_SIZE &&
buffer_size() > DEFAULT_INTERNAL_BUFFER_SIZE) {
/* shrink the internal buffer */
buffer_size() = DEFAULT_INTERNAL_BUFFER_SIZE;
recreate_buffer = true;
}
/* ensure vertex data is aligned */
/* Might waste a little space, but it's safe. */
const uint pre_padding = padding(buffer_offset(), vertex_format.stride);
if (!recreate_buffer && ((bytes_needed + pre_padding) <= available_bytes)) {
buffer_offset() += pre_padding;
}
else {
/* orphan this buffer & start with a fresh one */
glBufferData(GL_ARRAY_BUFFER, buffer_size(), NULL, GL_DYNAMIC_DRAW);
buffer_offset() = 0;
}
#ifndef NDEBUG
{
GLint bufsize;
glGetBufferParameteriv(GL_ARRAY_BUFFER, GL_BUFFER_SIZE, &bufsize);
BLI_assert(buffer_offset() + bytes_needed <= bufsize);
}
#endif
GLbitfield access = GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT;
if (!strict_vertex_len) {
access |= GL_MAP_FLUSH_EXPLICIT_BIT;
}
void *data = glMapBufferRange(GL_ARRAY_BUFFER, buffer_offset(), bytes_needed, access);
BLI_assert(data != NULL);
bytes_mapped_ = bytes_needed;
return (uchar *)data;
}
void GLImmediate::end(void)
{
BLI_assert(prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
uint buffer_bytes_used = bytes_mapped_;
if (!strict_vertex_len) {
if (vertex_idx != vertex_len) {
vertex_len = vertex_idx;
buffer_bytes_used = vertex_buffer_size(&vertex_format, vertex_len);
/* unused buffer bytes are available to the next immBegin */
}
/* tell OpenGL what range was modified so it doesn't copy the whole mapped range */
glFlushMappedBufferRange(GL_ARRAY_BUFFER, 0, buffer_bytes_used);
}
glUnmapBuffer(GL_ARRAY_BUFFER);
if (vertex_len > 0) {
GPU_context_active_get()->state_manager->apply_state();
/* We convert the offset in vertex offset from the buffer's start.
* This works because we added some padding to align the first vertex vertex. */
uint v_first = buffer_offset() / vertex_format.stride;
GLVertArray::update_bindings(
vao_id_, v_first, &vertex_format, reinterpret_cast<Shader *>(shader)->interface);
/* Update matrices. */
GPU_shader_bind(shader);
#ifdef __APPLE__
glDisable(GL_PRIMITIVE_RESTART);
#endif
glDrawArrays(to_gl(prim_type), 0, vertex_len);
#ifdef __APPLE__
glEnable(GL_PRIMITIVE_RESTART);
#endif
/* These lines are causing crash on startup on some old GPU + drivers.
* They are not required so just comment them. (T55722) */
// glBindBuffer(GL_ARRAY_BUFFER, 0);
// glBindVertexArray(0);
}
buffer_offset() += buffer_bytes_used;
}
/** \} */
} // namespace blender::gpu

View File

@ -0,0 +1,81 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2016 by Mike Erwin.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* Mimics old style opengl immediate mode drawing.
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "glew-mx.h"
#include "gpu_immediate_private.hh"
namespace blender::gpu {
/* size of internal buffer */
#define DEFAULT_INTERNAL_BUFFER_SIZE (4 * 1024 * 1024)
class GLImmediate : public Immediate {
private:
/* Use two buffers for strict and unstrict vertex count to
* avoid some huge driver slowdown (see T70922).
* Use accessor functions to get / modify. */
struct {
/** Opengl Handle for this buffer. */
GLuint vbo_id = 0;
/** Offset of the mapped data in data. */
size_t buffer_offset = 0;
/** Size of the whole buffer in bytes. */
size_t buffer_size = 0;
} buffer, buffer_strict;
/** Size in bytes of the mapped region. */
size_t bytes_mapped_ = 0;
/** Vertex array for this immediate mode instance. */
GLuint vao_id_ = 0;
public:
GLImmediate();
~GLImmediate();
uchar *begin(void) override;
void end(void) override;
private:
GLuint &vbo_id(void)
{
return strict_vertex_len ? buffer_strict.vbo_id : buffer.vbo_id;
};
size_t &buffer_offset(void)
{
return strict_vertex_len ? buffer_strict.buffer_offset : buffer.buffer_offset;
};
size_t &buffer_size(void)
{
return strict_vertex_len ? buffer_strict.buffer_size : buffer.buffer_size;
};
};
} // namespace blender::gpu

View File

@ -0,0 +1,65 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* Encapsulation of Framebuffer states (attached textures, viewport, scissors).
*/
#pragma once
#include "BLI_assert.h"
#include "GPU_primitive.h"
#include "glew-mx.h"
namespace blender::gpu {
static inline GLenum to_gl(GPUPrimType prim_type)
{
BLI_assert(prim_type != GPU_PRIM_NONE);
switch (prim_type) {
default:
case GPU_PRIM_POINTS:
return GL_POINTS;
case GPU_PRIM_LINES:
return GL_LINES;
case GPU_PRIM_LINE_STRIP:
return GL_LINE_STRIP;
case GPU_PRIM_LINE_LOOP:
return GL_LINE_LOOP;
case GPU_PRIM_TRIS:
return GL_TRIANGLES;
case GPU_PRIM_TRI_STRIP:
return GL_TRIANGLE_STRIP;
case GPU_PRIM_TRI_FAN:
return GL_TRIANGLE_FAN;
case GPU_PRIM_LINES_ADJ:
return GL_LINES_ADJACENCY;
case GPU_PRIM_LINE_STRIP_ADJ:
return GL_LINE_STRIP_ADJACENCY;
case GPU_PRIM_TRIS_ADJ:
return GL_TRIANGLES_ADJACENCY;
};
}
} // namespace blender::gpu

View File

@ -155,6 +155,17 @@ void GLVertArray::update_bindings(const GLuint vao,
}
}
/* Another version of update_bindings for Immediate mode. */
void GLVertArray::update_bindings(const GLuint vao,
const uint v_first,
const GPUVertFormat *format,
const ShaderInterface *interface)
{
glBindVertexArray(vao);
vbo_bind(interface, format, v_first, 0, false);
}
/** \} */
} // namespace blender::gpu

View File

@ -38,6 +38,11 @@ void update_bindings(const GLuint vao,
const ShaderInterface *interface,
const int base_instance);
void update_bindings(const GLuint vao,
const uint v_first,
const GPUVertFormat *format,
const ShaderInterface *interface);
} // namespace GLVertArray
} // namespace gpu