GPUShader: Change shader state tracking to be part of the GPUContext
This remove the use of batch->program and replace it with batch->shader. This will allow GL abstraction latter.
This commit is contained in:
parent
186abf7d3b
commit
854c999d82
Notes:
blender-bot
2023-02-14 08:42:53 +01:00
Referenced by issue #89318, Weird glitch at some models Referenced by issue #83022, Transparency in Solid View Causes Glitch Artifacts
|
@ -661,18 +661,9 @@ BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
|
|||
|
||||
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
|
||||
{
|
||||
/* XXX hacking #GPUBatch. we don't want to call glUseProgram! (huge performance loss) */
|
||||
if (DST.batch) {
|
||||
DST.batch->program_in_use = false;
|
||||
}
|
||||
|
||||
DST.batch = geom;
|
||||
|
||||
GPU_batch_set_shader_no_bind(geom, shgroup->shader);
|
||||
|
||||
geom->program_in_use = true; /* XXX hacking #GPUBatch */
|
||||
|
||||
GPU_batch_bind(geom);
|
||||
GPU_batch_set_shader(geom, shgroup->shader);
|
||||
}
|
||||
|
||||
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup,
|
||||
|
@ -1096,10 +1087,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
|||
}
|
||||
GPU_shader_bind(shgroup->shader);
|
||||
DST.shader = shgroup->shader;
|
||||
/* XXX hacking gawain */
|
||||
if (DST.batch) {
|
||||
DST.batch->program_in_use = false;
|
||||
}
|
||||
DST.batch = NULL;
|
||||
}
|
||||
|
||||
|
@ -1290,7 +1277,6 @@ static void drw_draw_pass_ex(DRWPass *pass,
|
|||
}
|
||||
|
||||
if (DST.batch) {
|
||||
DST.batch->program_in_use = false;
|
||||
DST.batch = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1185,8 +1185,6 @@ void UI_widgetbase_draw_cache_flush(void)
|
|||
GPU_shader_set_srgb_uniform(batch->interface);
|
||||
GPU_batch_bind(batch);
|
||||
GPU_batch_draw_advanced(batch, 0, 0, 0, g_widget_base_batch.count);
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
}
|
||||
g_widget_base_batch.count = 0;
|
||||
}
|
||||
|
|
|
@ -1167,7 +1167,6 @@ static void knifetool_draw(const bContext *UNUSED(C), ARegion *UNUSED(region), v
|
|||
GPU_batch_draw_advanced(batch, snapped_verts_count, other_verts_count, 0, 0);
|
||||
}
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
GPU_batch_discard(batch);
|
||||
|
||||
GPU_blend(false);
|
||||
|
|
|
@ -343,6 +343,7 @@ static void drawscredge_area_draw(
|
|||
}
|
||||
|
||||
GPUBatch *batch = batch_screen_edges_get(NULL);
|
||||
GPU_batch_program_set_builtin(batch, GPU_SHADER_2D_AREA_EDGES);
|
||||
GPU_batch_uniform_4fv(batch, "rect", (float *)&rect);
|
||||
GPU_batch_draw(batch);
|
||||
}
|
||||
|
|
|
@ -273,8 +273,6 @@ static void draw_line_loop(const float coords[][3], int coords_len, const float
|
|||
|
||||
GPU_batch_draw(batch);
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
|
||||
GPU_batch_discard(batch);
|
||||
GPU_blend(false);
|
||||
}
|
||||
|
@ -310,8 +308,6 @@ static void draw_line_pairs(const float coords_a[][3],
|
|||
|
||||
GPU_batch_draw(batch);
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
|
||||
GPU_batch_discard(batch);
|
||||
GPU_blend(false);
|
||||
}
|
||||
|
@ -362,8 +358,6 @@ static void draw_line_bounds(const BoundBox *bounds, const float color[4])
|
|||
|
||||
GPU_batch_draw(batch);
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
|
||||
GPU_batch_discard(batch);
|
||||
GPU_blend(false);
|
||||
}
|
||||
|
|
|
@ -311,8 +311,6 @@ static void draw_uvs_texpaint(const Scene *scene, Object *ob, Depsgraph *depsgra
|
|||
if (prev_ma_match == true) {
|
||||
GPU_batch_draw_advanced(geom, draw_start, idx - draw_start, 0, 0);
|
||||
}
|
||||
|
||||
GPU_batch_program_use_end(geom);
|
||||
}
|
||||
else {
|
||||
GPU_batch_draw(geom);
|
||||
|
|
|
@ -57,11 +57,13 @@ typedef struct GPUBatch {
|
|||
GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
|
||||
/** NULL if element list not needed */
|
||||
GPUIndexBuf *elem;
|
||||
|
||||
GPUShader *shader;
|
||||
|
||||
GPUPrimType prim_type;
|
||||
|
||||
/* cached values (avoid dereferencing later) */
|
||||
uint32_t vao_id;
|
||||
uint32_t program;
|
||||
const struct GPUShaderInterface *interface;
|
||||
|
||||
/* book-keeping */
|
||||
|
@ -69,7 +71,6 @@ typedef struct GPUBatch {
|
|||
/** used to free all vaos. this implies all vaos were created under the same context. */
|
||||
struct GPUContext *context;
|
||||
GPUBatchPhase phase;
|
||||
bool program_in_use;
|
||||
|
||||
/* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
|
||||
* for each shader interface. Start with a static number of vaos and fallback to dynamic count
|
||||
|
@ -128,17 +129,11 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
|
|||
#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
|
||||
|
||||
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
|
||||
void GPU_batch_set_shader_no_bind(GPUBatch *batch, GPUShader *shader);
|
||||
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
|
||||
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
|
||||
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
|
||||
eGPUBuiltinShader shader_id,
|
||||
eGPUShaderConfig sh_cfg);
|
||||
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
|
||||
/* Vertex shader's inputs must be compatible with the batch's vertex format. */
|
||||
|
||||
void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
|
||||
void GPU_batch_program_use_end(GPUBatch *);
|
||||
|
||||
void GPU_batch_uniform_1ui(GPUBatch *, const char *name, uint value);
|
||||
void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
|
||||
|
|
|
@ -376,21 +376,13 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
|||
return new_vao;
|
||||
}
|
||||
|
||||
void GPU_batch_set_shader_no_bind(GPUBatch *batch, GPUShader *shader)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(glIsProgram(shader->program));
|
||||
assert(batch->program_in_use == 0);
|
||||
#endif
|
||||
batch->interface = shader->interface;
|
||||
batch->program = shader->program;
|
||||
batch->vao_id = batch_vao_get(batch);
|
||||
}
|
||||
|
||||
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
||||
{
|
||||
GPU_batch_set_shader_no_bind(batch, shader);
|
||||
GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
|
||||
batch->interface = shader->interface;
|
||||
batch->shader = shader;
|
||||
batch->vao_id = batch_vao_get(batch);
|
||||
GPU_shader_bind(batch->shader); /* hack! to make Batch_Uniform* simpler */
|
||||
GPU_batch_bind(batch);
|
||||
}
|
||||
|
||||
void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
|
||||
|
@ -523,29 +515,6 @@ static void batch_update_program_bindings(GPUBatch *batch, uint i_first)
|
|||
}
|
||||
}
|
||||
|
||||
void GPU_batch_program_use_begin(GPUBatch *batch)
|
||||
{
|
||||
/* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
|
||||
* the GL context's active program.
|
||||
* use_program doesn't mark other programs as "not used". */
|
||||
/* TODO: make not fragile (somehow) */
|
||||
|
||||
if (!batch->program_in_use) {
|
||||
glUseProgram(batch->program);
|
||||
batch->program_in_use = true;
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_batch_program_use_end(GPUBatch *batch)
|
||||
{
|
||||
if (batch->program_in_use) {
|
||||
#if PROGRAM_NO_OPTI
|
||||
glUseProgram(0);
|
||||
#endif
|
||||
batch->program_in_use = false;
|
||||
}
|
||||
}
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
# define GET_UNIFORM \
|
||||
const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name); \
|
||||
|
@ -670,14 +639,14 @@ void GPU_batch_draw(GPUBatch *batch)
|
|||
assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
|
||||
assert(batch->verts[0]->vbo_id != 0);
|
||||
#endif
|
||||
GPU_batch_program_use_begin(batch);
|
||||
GPU_shader_bind(batch->shader);
|
||||
GPU_matrix_bind(batch->interface); // external call.
|
||||
GPU_shader_set_srgb_uniform(batch->interface);
|
||||
|
||||
GPU_batch_bind(batch);
|
||||
GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
|
||||
|
||||
GPU_batch_program_use_end(batch);
|
||||
GPU_shader_unbind();
|
||||
}
|
||||
|
||||
#if GPU_TRACK_INDEX_RANGE
|
||||
|
@ -690,7 +659,7 @@ void GPU_batch_draw(GPUBatch *batch)
|
|||
|
||||
void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count)
|
||||
{
|
||||
BLI_assert(batch->program_in_use);
|
||||
BLI_assert(GPU_context_active_get()->shader != NULL);
|
||||
/* TODO could assert that VAO is bound. */
|
||||
|
||||
if (v_count == 0) {
|
||||
|
|
|
@ -41,6 +41,7 @@ struct GPUMatrixState;
|
|||
struct GPUContext {
|
||||
public:
|
||||
/** State managment */
|
||||
GPUShader *shader = NULL;
|
||||
GPUFrameBuffer *current_fbo = NULL;
|
||||
GPUMatrixState *matrix_state = NULL;
|
||||
|
||||
|
|
|
@ -171,12 +171,8 @@ void immBindBuiltinProgram(eGPUBuiltinShader shader_id)
|
|||
|
||||
void immUnbindProgram(void)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(imm.bound_program != NULL);
|
||||
#endif
|
||||
#if PROGRAM_NO_OPTI
|
||||
glUseProgram(0);
|
||||
#endif
|
||||
BLI_assert(imm.bound_program != NULL);
|
||||
GPU_shader_unbind();
|
||||
imm.bound_program = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include "GPU_texture.h"
|
||||
#include "GPU_uniformbuffer.h"
|
||||
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_shader_private.h"
|
||||
|
||||
extern "C" char datatoc_gpu_shader_colorspace_lib_glsl[];
|
||||
|
@ -598,14 +599,23 @@ void GPU_shader_bind(GPUShader *shader)
|
|||
{
|
||||
BLI_assert(shader && shader->program);
|
||||
|
||||
glUseProgram(shader->program);
|
||||
GPU_matrix_bind(shader->interface);
|
||||
GPU_shader_set_srgb_uniform(shader->interface);
|
||||
GPUContext *ctx = GPU_context_active_get();
|
||||
|
||||
if (ctx->shader != shader) {
|
||||
ctx->shader = shader;
|
||||
glUseProgram(shader->program);
|
||||
GPU_matrix_bind(shader->interface);
|
||||
GPU_shader_set_srgb_uniform(shader->interface);
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_shader_unbind(void)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
GPUContext *ctx = GPU_context_active_get();
|
||||
ctx->shader = NULL;
|
||||
glUseProgram(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
|
|
@ -184,8 +184,7 @@ void GLDrawList::submit(void)
|
|||
/* Something's wrong if we get here without MDI support. */
|
||||
BLI_assert(MDI_ENABLED);
|
||||
BLI_assert(data_);
|
||||
/* TODO fix this assert */
|
||||
// BLI_assert(batch_->program_in_use);
|
||||
BLI_assert(GPU_context_active_get()->shader != NULL);
|
||||
/* TODO could assert that VAO is bound. */
|
||||
|
||||
/* Only do multi-draw indirect if doing more than 2 drawcall. This avoids the overhead of
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
|
||||
static bool bpygpu_batch_is_program_or_error(BPyGPUBatch *self)
|
||||
{
|
||||
if (!glIsProgram(self->batch->program)) {
|
||||
if (!self->batch->shader) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "batch does not have any program assigned to it");
|
||||
return false;
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ static PyObject *bpygpu_Batch_draw(BPyGPUBatch *self, PyObject *args)
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
else if (self->batch->program != GPU_shader_get_program(py_program->shader)) {
|
||||
else if (self->batch->shader != py_program->shader) {
|
||||
GPU_batch_set_shader(self->batch, py_program->shader);
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ static PyObject *bpygpu_Batch_program_use_begin(BPyGPUBatch *self)
|
|||
if (!bpygpu_batch_is_program_or_error(self)) {
|
||||
return NULL;
|
||||
}
|
||||
GPU_batch_program_use_begin(self->batch);
|
||||
GPU_shader_bind(self->batch->shader);
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
|
@ -249,7 +249,7 @@ static PyObject *bpygpu_Batch_program_use_end(BPyGPUBatch *self)
|
|||
if (!bpygpu_batch_is_program_or_error(self)) {
|
||||
return NULL;
|
||||
}
|
||||
GPU_batch_program_use_end(self->batch);
|
||||
GPU_shader_unbind();
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue