DRW: Remove automatic bind locations and use hardcoded locations for textures

This cleanup use the recent changes in shader interface to allow querying
the binding location a texture should use.

This should aleviate all issue we have with texture state change recompiling
the shaders at drawtime.

All binds are now treated like persistent binds and will stick until a new
shading group bind a different shader. The only difference is that you can
still change it with a new subgroup or same shader shgroup.

Since unbinding can be heavy we only do it when using `--debug-gpu`.
This commit is contained in:
Clément Foucault 2020-06-02 18:14:28 +02:00
parent 47eccac21c
commit a1f9eebc0b
Notes: blender-bot 2023-02-14 08:06:35 +01:00
Referenced by issue #77315, Selection Highlighting (Outline Selected) not working in complex scene
Referenced by issue #77296, Image viewer shows colour banding when the denoise node is used.
Referenced by issue #66231, Performance degradation of Principled Shader in Eevee when metallic is turned off
8 changed files with 120 additions and 272 deletions

View File

@ -591,9 +591,6 @@ static void drw_viewport_var_init(void)
ED_view3d_init_mats_rv3d(DST.draw_ctx.object_edit, rv3d);
}
/* Alloc array of texture reference. */
memset(&DST.RST, 0x0, sizeof(DST.RST));
if (G_draw.view_ubo == NULL) {
G_draw.view_ubo = DRW_uniformbuffer_create(sizeof(DRWViewUboStorage), NULL);
}

View File

@ -277,13 +277,9 @@ typedef enum {
DRW_UNIFORM_FLOAT,
DRW_UNIFORM_FLOAT_COPY,
DRW_UNIFORM_TEXTURE,
DRW_UNIFORM_TEXTURE_PERSIST,
DRW_UNIFORM_TEXTURE_REF,
DRW_UNIFORM_TEXTURE_REF_PERSIST,
DRW_UNIFORM_BLOCK,
DRW_UNIFORM_BLOCK_PERSIST,
DRW_UNIFORM_BLOCK_REF,
DRW_UNIFORM_BLOCK_REF_PERSIST,
DRW_UNIFORM_TFEEDBACK_TARGET,
/** Per drawcall uniforms/UBO */
DRW_UNIFORM_BLOCK_OBMATS,
@ -303,11 +299,25 @@ struct DRWUniform {
union {
/* For reference or array/vector types. */
const void *pvalue;
/* Single values. */
/* DRW_UNIFORM_TEXTURE */
struct {
union {
GPUTexture *texture;
GPUTexture **texture_ref;
};
eGPUSamplerState sampler_state;
};
/* DRW_UNIFORM_BLOCK */
union {
GPUUniformBuffer *block;
GPUUniformBuffer **block_ref;
};
/* DRW_UNIFORM_FLOAT_COPY */
float fvalue[4];
/* DRW_UNIFORM_INT_COPY */
int ivalue[4];
};
int location;
int location; /* Use as binding point for textures and ubos. */
uint32_t type : 5; /* DRWUniformType */
uint32_t length : 5; /* cannot be more than 16 */
uint32_t arraysize : 5; /* cannot be more than 16 too */
@ -549,20 +559,6 @@ typedef struct DRWManager {
GPUDrawList *draw_list;
/** GPU Resource State: Memory storage between drawing. */
struct {
/* High end GPUs supports up to 32 binds per shader stage.
* We only use textures during the vertex and fragment stage,
* so 2 * 32 slots is a nice limit. */
GPUTexture *bound_texs[DST_MAX_SLOTS];
uint64_t bound_tex_slots;
uint64_t bound_tex_slots_persist;
GPUUniformBuffer *bound_ubos[DST_MAX_SLOTS];
uint64_t bound_ubo_slots;
uint64_t bound_ubo_slots_persist;
} RST;
struct {
/* TODO(fclem) optimize: use chunks. */
DRWDebugLine *lines;

View File

@ -202,6 +202,20 @@ static DRWUniform *drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
BLI_assert(length <= 4);
memcpy(uni->fvalue, value, sizeof(float) * length);
break;
case DRW_UNIFORM_BLOCK:
uni->block = (GPUUniformBuffer *)value;
break;
case DRW_UNIFORM_BLOCK_REF:
uni->block_ref = (GPUUniformBuffer **)value;
break;
case DRW_UNIFORM_TEXTURE:
uni->texture = (GPUTexture *)value;
uni->sampler_state = GPU_SAMPLER_MAX; /* Use texture state for now. */
break;
case DRW_UNIFORM_TEXTURE_REF:
uni->texture_ref = (GPUTexture **)value;
uni->sampler_state = GPU_SAMPLER_MAX; /* Use texture state for now. */
break;
default:
uni->pvalue = (const float *)value;
break;
@ -228,12 +242,11 @@ static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
int arraysize)
{
int location;
if (ELEM(type,
DRW_UNIFORM_BLOCK,
DRW_UNIFORM_BLOCK_PERSIST,
DRW_UNIFORM_BLOCK_REF,
DRW_UNIFORM_BLOCK_REF_PERSIST)) {
location = GPU_shader_get_uniform_block(shgroup->shader, name);
if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_REF)) {
location = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
}
else if (ELEM(type, DRW_UNIFORM_TEXTURE, DRW_UNIFORM_TEXTURE_REF)) {
location = GPU_shader_get_texture_binding(shgroup->shader, name);
}
else {
location = GPU_shader_get_uniform(shgroup->shader, name);
@ -259,12 +272,13 @@ void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, con
/* Same as DRW_shgroup_uniform_texture but is guaranteed to be bound if shader does not change
* between shgrp. */
/* TODO remove */
void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup,
const char *name,
const GPUTexture *tex)
{
BLI_assert(tex != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
}
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
@ -275,12 +289,13 @@ void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name,
/* Same as DRW_shgroup_uniform_texture_ref but is guaranteed to be bound if shader does not change
* between shgrp. */
/* TODO remove */
void DRW_shgroup_uniform_texture_ref_persistent(DRWShadingGroup *shgroup,
const char *name,
GPUTexture **tex)
{
BLI_assert(tex != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF_PERSIST, tex, 0, 1);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
}
void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
@ -293,12 +308,13 @@ void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
/* Same as DRW_shgroup_uniform_block but is guaranteed to be bound if shader does not change
* between shgrp. */
/* TODO remove */
void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup,
const char *name,
const GPUUniformBuffer *ubo)
{
BLI_assert(ubo != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
}
void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup,
@ -311,12 +327,13 @@ void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup,
/* Same as DRW_shgroup_uniform_block_ref but is guaranteed to be bound if shader does not change
* between shgrp. */
/* TODO remove */
void DRW_shgroup_uniform_block_ref_persistent(DRWShadingGroup *shgroup,
const char *name,
GPUUniformBuffer **ubo)
{
BLI_assert(ubo != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_REF_PERSIST, ubo, 0, 1);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_REF, ubo, 0, 1);
}
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
@ -1208,9 +1225,9 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
shgroup->uniforms = NULL;
/* TODO(fclem) make them builtin. */
int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
int model_ubo_location = GPU_shader_get_uniform_block(shader, "modelBlock");
int info_ubo_location = GPU_shader_get_uniform_block(shader, "infoBlock");
int view_ubo_location = GPU_shader_get_uniform_block_binding(shader, "viewBlock");
int model_ubo_location = GPU_shader_get_uniform_block_binding(shader, "modelBlock");
int info_ubo_location = GPU_shader_get_uniform_block_binding(shader, "infoBlock");
int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE);
int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK);
int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID);
@ -1264,7 +1281,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
if (view_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, G_draw.view_ubo, 0, 1);
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 1);
}
else {
/* Only here to support builtin shaders. This should not be used by engines. */

View File

@ -22,6 +22,7 @@
#include "draw_manager.h"
#include "BLI_alloca.h"
#include "BLI_math.h"
#include "BLI_math_bits.h"
#include "BLI_memblock.h"
@ -744,107 +745,6 @@ BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *s
}
}
enum {
BIND_NONE = 0,
BIND_TEMP = 1, /* Release slot after this shading group. */
BIND_PERSIST = 2, /* Release slot only after the next shader change. */
};
static void set_bound_flags(uint64_t *slots, uint64_t *persist_slots, int slot_idx, char bind_type)
{
uint64_t slot = 1llu << (unsigned long)slot_idx;
*slots |= slot;
if (bind_type == BIND_PERSIST) {
*persist_slots |= slot;
}
}
static int get_empty_slot_index(uint64_t slots)
{
uint64_t empty_slots = ~slots;
/* Find first empty slot using bitscan. */
if (empty_slots != 0) {
if ((empty_slots & 0xFFFFFFFFlu) != 0) {
return (int)bitscan_forward_uint(empty_slots);
}
else {
return (int)bitscan_forward_uint(empty_slots >> 32) + 32;
}
}
else {
/* Greater than GPU_max_textures() */
return 99999;
}
}
static void bind_texture(GPUTexture *tex, char bind_type)
{
int idx = GPU_texture_bound_number(tex);
if (idx == -1) {
/* Texture isn't bound yet. Find an empty slot and bind it. */
idx = get_empty_slot_index(DST.RST.bound_tex_slots);
if (idx < GPU_max_textures()) {
GPUTexture **gpu_tex_slot = &DST.RST.bound_texs[idx];
/* Unbind any previous texture. */
if (*gpu_tex_slot != NULL) {
GPU_texture_unbind(*gpu_tex_slot);
}
GPU_texture_bind(tex, idx);
*gpu_tex_slot = tex;
}
else {
printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
return;
}
}
else {
/* This texture slot was released but the tex
* is still bound. Just flag the slot again. */
BLI_assert(DST.RST.bound_texs[idx] == tex);
}
set_bound_flags(&DST.RST.bound_tex_slots, &DST.RST.bound_tex_slots_persist, idx, bind_type);
}
static void bind_ubo(GPUUniformBuffer *ubo, char bind_type)
{
int idx = GPU_uniformbuffer_bindpoint(ubo);
if (idx == -1) {
/* UBO isn't bound yet. Find an empty slot and bind it. */
idx = get_empty_slot_index(DST.RST.bound_ubo_slots);
/* [0..1] are reserved ubo slots. */
idx += 2;
if (idx < GPU_max_ubo_binds()) {
GPUUniformBuffer **gpu_ubo_slot = &DST.RST.bound_ubos[idx];
/* Unbind any previous UBO. */
if (*gpu_ubo_slot != NULL) {
GPU_uniformbuffer_unbind(*gpu_ubo_slot);
}
GPU_uniformbuffer_bind(ubo, idx);
*gpu_ubo_slot = ubo;
}
else {
/* printf so user can report bad behavior */
printf("Not enough ubo slots! This should not happen!\n");
/* This is not depending on user input.
* It is our responsibility to make sure there is enough slots. */
BLI_assert(0);
return;
}
}
else {
BLI_assert(idx < 64);
/* This UBO slot was released but the UBO is
* still bound here. Just flag the slot again. */
BLI_assert(DST.RST.bound_ubos[idx] == ubo);
}
/* Remove offset for flag bitfield. */
idx -= 2;
set_bound_flags(&DST.RST.bound_ubo_slots, &DST.RST.bound_ubo_slots_persist, idx, bind_type);
}
#ifndef NDEBUG
/**
* Opengl specification is strict on buffer binding.
@ -900,28 +800,6 @@ static bool ubo_bindings_validate(DRWShadingGroup *shgroup)
}
#endif
static void release_texture_slots(bool with_persist)
{
if (with_persist) {
DST.RST.bound_tex_slots = 0;
DST.RST.bound_tex_slots_persist = 0;
}
else {
DST.RST.bound_tex_slots &= DST.RST.bound_tex_slots_persist;
}
}
static void release_ubo_slots(bool with_persist)
{
if (with_persist) {
DST.RST.bound_ubo_slots = 0;
DST.RST.bound_ubo_slots_persist = 0;
}
else {
DST.RST.bound_ubo_slots &= DST.RST.bound_ubo_slots_persist;
}
}
static void draw_update_uniforms(DRWShadingGroup *shgroup,
DRWCommandsState *state,
bool *use_tfeedback)
@ -929,78 +807,42 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
for (DRWUniformChunk *unichunk = shgroup->uniforms; unichunk; unichunk = unichunk->next) {
DRWUniform *uni = unichunk->uniforms;
for (int i = 0; i < unichunk->uniform_used; i++, uni++) {
GPUTexture *tex;
GPUUniformBuffer *ubo;
const void *data = uni->pvalue;
if (ELEM(uni->type, DRW_UNIFORM_INT_COPY, DRW_UNIFORM_FLOAT_COPY)) {
data = uni->fvalue;
}
switch (uni->type) {
case DRW_UNIFORM_INT_COPY:
GPU_shader_uniform_vector_int(
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->ivalue);
break;
case DRW_UNIFORM_INT:
GPU_shader_uniform_vector_int(
shgroup->shader, uni->location, uni->length, uni->arraysize, data);
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
break;
case DRW_UNIFORM_FLOAT_COPY:
GPU_shader_uniform_vector(
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->fvalue);
break;
case DRW_UNIFORM_FLOAT:
GPU_shader_uniform_vector(
shgroup->shader, uni->location, uni->length, uni->arraysize, data);
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
break;
case DRW_UNIFORM_TEXTURE:
tex = (GPUTexture *)uni->pvalue;
BLI_assert(tex);
bind_texture(tex, BIND_TEMP);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_TEXTURE_PERSIST:
tex = (GPUTexture *)uni->pvalue;
BLI_assert(tex);
bind_texture(tex, BIND_PERSIST);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
GPU_texture_bind_ex(uni->texture, uni->location, false);
break;
case DRW_UNIFORM_TEXTURE_REF:
tex = *((GPUTexture **)uni->pvalue);
BLI_assert(tex);
bind_texture(tex, BIND_TEMP);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_TEXTURE_REF_PERSIST:
tex = *((GPUTexture **)uni->pvalue);
BLI_assert(tex);
bind_texture(tex, BIND_PERSIST);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
GPU_texture_bind_ex(*uni->texture_ref, uni->location, false);
break;
case DRW_UNIFORM_BLOCK:
ubo = (GPUUniformBuffer *)uni->pvalue;
bind_ubo(ubo, BIND_TEMP);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
break;
case DRW_UNIFORM_BLOCK_PERSIST:
ubo = (GPUUniformBuffer *)uni->pvalue;
bind_ubo(ubo, BIND_PERSIST);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
GPU_uniformbuffer_bind(uni->block, uni->location);
break;
case DRW_UNIFORM_BLOCK_REF:
ubo = *((GPUUniformBuffer **)uni->pvalue);
bind_ubo(ubo, BIND_TEMP);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
break;
case DRW_UNIFORM_BLOCK_REF_PERSIST:
ubo = *((GPUUniformBuffer **)uni->pvalue);
bind_ubo(ubo, BIND_PERSIST);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
GPU_uniformbuffer_bind(*uni->block_ref, uni->location);
break;
case DRW_UNIFORM_BLOCK_OBMATS:
state->obmats_loc = uni->location;
ubo = DST.vmempool->matrices_ubo[0];
GPU_uniformbuffer_bind(ubo, 0);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
GPU_uniformbuffer_bind(DST.vmempool->matrices_ubo[0], uni->location);
break;
case DRW_UNIFORM_BLOCK_OBINFOS:
state->obinfos_loc = uni->location;
ubo = DST.vmempool->obinfos_ubo[0];
GPU_uniformbuffer_bind(ubo, 1);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
GPU_uniformbuffer_bind(DST.vmempool->obinfos_ubo[0], uni->location);
break;
case DRW_UNIFORM_RESOURCE_CHUNK:
state->chunkid_loc = uni->location;
@ -1010,9 +852,9 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
state->resourceid_loc = uni->location;
break;
case DRW_UNIFORM_TFEEDBACK_TARGET:
BLI_assert(data && (*use_tfeedback == false));
*use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
((GPUVertBuf *)data)->vbo_id);
BLI_assert(uni->pvalue && (*use_tfeedback == false));
*use_tfeedback = GPU_shader_transform_feedback_enable(
shgroup->shader, ((GPUVertBuf *)uni->pvalue)->vbo_id);
break;
/* Legacy/Fallback support. */
case DRW_UNIFORM_BASE_INSTANCE:
@ -1119,7 +961,7 @@ static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHa
}
if (state->obmats_loc != -1) {
GPU_uniformbuffer_unbind(DST.vmempool->matrices_ubo[state->resource_chunk]);
GPU_uniformbuffer_bind(DST.vmempool->matrices_ubo[chunk], 0);
GPU_uniformbuffer_bind(DST.vmempool->matrices_ubo[chunk], state->obmats_loc);
}
if (state->obinfos_loc != -1) {
GPU_uniformbuffer_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
@ -1282,6 +1124,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
if (shader_changed) {
if (DST.shader) {
GPU_shader_unbind();
GPU_texture_unbind_all();
}
GPU_shader_bind(shgroup->shader);
DST.shader = shgroup->shader;
@ -1292,9 +1135,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
DST.batch = NULL;
}
release_ubo_slots(shader_changed);
release_texture_slots(shader_changed);
draw_update_uniforms(shgroup, &state, &use_tfeedback);
drw_state_set(pass_state);
@ -1476,24 +1316,9 @@ static void drw_draw_pass_ex(DRWPass *pass,
}
}
/* Clear Bound textures */
for (int i = 0; i < DST_MAX_SLOTS; i++) {
if (DST.RST.bound_texs[i] != NULL) {
GPU_texture_unbind(DST.RST.bound_texs[i]);
DST.RST.bound_texs[i] = NULL;
}
}
/* Clear Bound Ubos */
for (int i = 0; i < DST_MAX_SLOTS; i++) {
if (DST.RST.bound_ubos[i] != NULL) {
GPU_uniformbuffer_unbind(DST.RST.bound_ubos[i]);
DST.RST.bound_ubos[i] = NULL;
}
}
if (DST.shader) {
GPU_shader_unbind();
GPU_texture_unbind_all();
DST.shader = NULL;
}

View File

@ -94,13 +94,15 @@ void GPU_shader_set_srgb_uniform(const struct GPUShaderInterface *interface);
int GPU_shader_get_uniform(GPUShader *shader, const char *name);
int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin);
int GPU_shader_get_uniform_block(GPUShader *shader, const char *name);
int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name);
int GPU_shader_get_texture_binding(GPUShader *shader, const char *name);
void GPU_shader_uniform_vector(
GPUShader *shader, int location, int length, int arraysize, const float *value);
void GPU_shader_uniform_vector_int(
GPUShader *shader, int location, int length, int arraysize, const int *value);
void GPU_shader_uniform_buffer(GPUShader *shader, int location, struct GPUUniformBuffer *ubo);
void GPU_shader_uniform_texture(GPUShader *shader, int location, struct GPUTexture *tex);
void GPU_shader_uniform_float(GPUShader *shader, int location, float value);
void GPU_shader_uniform_int(GPUShader *shader, int location, int value);

View File

@ -261,8 +261,9 @@ void GPU_texture_free(GPUTexture *tex);
void GPU_texture_ref(GPUTexture *tex);
void GPU_texture_bind(GPUTexture *tex, int number);
void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number);
void GPU_texture_unbind(GPUTexture *tex);
int GPU_texture_bound_number(GPUTexture *tex);
void GPU_texture_unbind_all(void);
void GPU_texture_copy(GPUTexture *dst, GPUTexture *src);

View File

@ -746,6 +746,20 @@ int GPU_shader_get_uniform_block(GPUShader *shader, const char *name)
return ubo ? ubo->location : -1;
}
int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name)
{
BLI_assert(shader && shader->program);
const GPUShaderInput *ubo = GPU_shaderinterface_ubo(shader->interface, name);
return ubo ? ubo->binding : -1;
}
int GPU_shader_get_texture_binding(GPUShader *shader, const char *name)
{
BLI_assert(shader && shader->program);
const GPUShaderInput *tex = GPU_shaderinterface_uniform(shader->interface, name);
return tex ? tex->binding : -1;
}
void *GPU_shader_get_interface(GPUShader *shader)
{
return shader->interface;
@ -833,34 +847,6 @@ void GPU_shader_uniform_int(GPUShader *UNUSED(shader), int location, int value)
glUniform1i(location, value);
}
void GPU_shader_uniform_buffer(GPUShader *shader, int location, GPUUniformBuffer *ubo)
{
int bindpoint = GPU_uniformbuffer_bindpoint(ubo);
if (location == -1) {
return;
}
glUniformBlockBinding(shader->program, location, bindpoint);
}
void GPU_shader_uniform_texture(GPUShader *UNUSED(shader), int location, GPUTexture *tex)
{
int number = GPU_texture_bound_number(tex);
if (number == -1) {
fprintf(stderr, "Texture is not bound.\n");
BLI_assert(0);
return;
}
if (location == -1) {
return;
}
glUniform1i(location, number);
}
void GPU_shader_set_srgb_uniform(const GPUShaderInterface *interface)
{
const GPUShaderInput *srgb_uniform = GPU_shaderinterface_uniform_builtin(

View File

@ -1712,7 +1712,8 @@ void GPU_invalid_tex_free(void)
}
}
void GPU_texture_bind(GPUTexture *tex, int unit)
/* set_number is to save the the texture unit for setting texture parameters. */
void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number)
{
BLI_assert(unit >= 0);
@ -1721,7 +1722,7 @@ void GPU_texture_bind(GPUTexture *tex, int unit)
return;
}
if ((G.debug & G_DEBUG)) {
if (G.debug & G_DEBUG) {
for (int i = 0; i < GPU_TEX_MAX_FBO_ATTACHED; i++) {
if (tex->fb[i] && GPU_framebuffer_bound(tex->fb[i])) {
fprintf(stderr,
@ -1733,7 +1734,10 @@ void GPU_texture_bind(GPUTexture *tex, int unit)
}
}
tex->number = unit;
if (set_number) {
tex->number = unit;
}
glActiveTexture(GL_TEXTURE0 + unit);
if (tex->bindcode != 0) {
@ -1746,6 +1750,11 @@ void GPU_texture_bind(GPUTexture *tex, int unit)
}
}
void GPU_texture_bind(GPUTexture *tex, int unit)
{
GPU_texture_bind_ex(tex, unit, true);
}
void GPU_texture_unbind(GPUTexture *tex)
{
if (tex->number == -1) {
@ -1758,10 +1767,25 @@ void GPU_texture_unbind(GPUTexture *tex)
tex->number = -1;
}
int GPU_texture_bound_number(GPUTexture *tex)
void GPU_texture_unbind_all(void)
{
/* TODO remove. Makes no sense now. */
return tex->number;
/* Unbinding can be costly. Skip in normal condition. */
if (G.debug & G_DEBUG_GPU) {
for (int i = 0; i < GPU_max_textures(); i++) {
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture(GL_TEXTURE_2D, 0);
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
glBindTexture(GL_TEXTURE_1D, 0);
glBindTexture(GL_TEXTURE_1D_ARRAY, 0);
glBindTexture(GL_TEXTURE_3D, 0);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
glBindTexture(GL_TEXTURE_BUFFER, 0);
if (GPU_arb_texture_cube_map_array_is_supported()) {
glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY_ARB, 0);
}
glBindSampler(i, 0);
}
}
}
#define WARN_NOT_BOUND(_tex) \
@ -1785,8 +1809,8 @@ void GPU_texture_generate_mipmap(GPUTexture *tex)
if (GPU_texture_depth(tex)) {
/* Some drivers have bugs when using glGenerateMipmap with depth textures (see T56789).
* In this case we just create a complete texture with mipmaps manually without down-sampling.
* You must initialize the texture levels using other methods like
* In this case we just create a complete texture with mipmaps manually without
* down-sampling. You must initialize the texture levels using other methods like
* GPU_framebuffer_recursive_downsample(). */
eGPUDataFormat data_format = gpu_get_data_format_from_tex_format(tex->format);
for (int i = 1; i < levels; i++) {