Eevee: Fix T52738: Probes are black.

This fix the crappy binding logic.

Note the current method is doing a lot of useless binding. We should somewhat order the texture so that reused textures are already bound most of the time.
This commit is contained in:
Clément Foucault 2017-09-15 20:08:31 +02:00
parent 208d6f28c8
commit f565d8c4ae
Notes: blender-bot 2023-02-14 06:57:56 +01:00
Referenced by issue #52738, Probes are black
2 changed files with 63 additions and 28 deletions

View File

@ -364,8 +364,8 @@ static struct DRWResourceState {
GPUTexture **bound_texs;
GPUUniformBuffer **bound_ubos;
int bind_tex_inc;
int bind_ubo_inc;
bool *bound_tex_slots;
bool *bound_ubo_slots;
} RST = {NULL};
static struct DRWMatrixOveride {
@ -1833,35 +1833,55 @@ static void draw_geometry(DRWShadingGroup *shgroup, Gwn_Batch *geom, const float
draw_geometry_execute(shgroup, geom);
}
static void draw_bind_texture(GPUTexture *tex)
static void bind_texture(GPUTexture *tex)
{
if (RST.bound_texs[RST.bind_tex_inc] != tex) {
if (RST.bind_tex_inc >= 0) {
if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
int bind_num = GPU_texture_bound_number(tex);
if (bind_num == -1) {
for (int i = 0; i < GPU_max_textures(); ++i) {
if (RST.bound_tex_slots[i] == false) {
GPU_texture_bind(tex, i);
RST.bound_texs[i] = tex;
RST.bound_tex_slots[i] = true;
return;
}
RST.bound_texs[RST.bind_tex_inc] = tex;
GPU_texture_bind(tex, RST.bind_tex_inc);
}
else {
printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
}
printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
}
RST.bind_tex_inc--;
RST.bound_tex_slots[bind_num] = true;
}
static void draw_bind_ubo(GPUUniformBuffer *ubo)
static void bind_ubo(GPUUniformBuffer *ubo)
{
if (RST.bound_ubos[RST.bind_ubo_inc] != ubo) {
if (RST.bind_ubo_inc >= 0) {
RST.bound_ubos[RST.bind_ubo_inc] = ubo;
GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
}
else {
printf("Not enough ubo slots!\n");
int bind_num = GPU_uniformbuffer_bindpoint(ubo);
if (bind_num == -1) {
for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
if (RST.bound_ubo_slots[i] == false) {
GPU_uniformbuffer_bind(ubo, i);
RST.bound_ubos[i] = ubo;
RST.bound_ubo_slots[i] = true;
return;
}
}
/* This is not depending on user input.
* It is our responsability to make sure there enough slots. */
BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
/* printf so user can report bad behaviour */
printf("Not enough ubo slots! This should not happen!\n");
}
RST.bind_ubo_inc--;
RST.bound_ubo_slots[bind_num] = true;
}
static void release_texture_slots(void)
{
memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
}
static void release_ubo_slots(void)
{
memset(RST.bound_ubo_slots, 0x0, sizeof(bool) * GPU_max_ubo_binds());
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
@ -1869,8 +1889,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
BLI_assert(shgroup->shader);
BLI_assert(shgroup->interface);
RST.bind_tex_inc = GPU_max_textures() - 1; /* Reset texture counter. */
RST.bind_ubo_inc = GPU_max_ubo_binds() - 1; /* Reset UBO counter. */
DRWInterface *interface = shgroup->interface;
GPUTexture *tex;
GPUUniformBuffer *ubo;
@ -1889,8 +1907,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
shgroup_dynamic_batch_from_calls(shgroup);
}
release_texture_slots();
release_ubo_slots();
DRW_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
/* Binding Uniform */
/* Don't check anything, Interface should already contain the least uniform as possible */
for (DRWUniform *uni = interface->uniforms.first; uni; uni = uni->next) {
@ -1919,7 +1941,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
case DRW_UNIFORM_TEXTURE:
tex = (GPUTexture *)uni->value;
BLI_assert(tex);
draw_bind_texture(tex);
bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BUFFER:
@ -1928,12 +1950,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
tex = *((GPUTexture **)uni->value);
BLI_assert(tex);
draw_bind_texture(tex);
bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BLOCK:
ubo = (GPUUniformBuffer *)uni->value;
draw_bind_ubo(ubo);
bind_ubo(ubo);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
break;
}
@ -2054,7 +2076,10 @@ static void DRW_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWSha
/* Clear Bound Ubos */
for (int i = 0; i < GPU_max_ubo_binds(); i++) {
RST.bound_ubos[i] = NULL;
if (RST.bound_ubos[i] != NULL) {
GPU_uniformbuffer_unbind(RST.bound_ubos[i]);
RST.bound_ubos[i] = NULL;
}
}
if (DST.shader) {
@ -2527,11 +2552,17 @@ static void DRW_viewport_var_init(void)
if (RST.bound_texs == NULL) {
RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
}
if (RST.bound_tex_slots == NULL) {
RST.bound_tex_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound Texture Slots");
}
/* Alloc array of ubos reference. */
if (RST.bound_ubos == NULL) {
RST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
}
if (RST.bound_ubo_slots == NULL) {
RST.bound_ubo_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound UBO Slots");
}
}
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
@ -3671,6 +3702,8 @@ void DRW_engines_free(void)
MEM_SAFE_FREE(RST.bound_texs);
MEM_SAFE_FREE(RST.bound_ubos);
MEM_SAFE_FREE(RST.bound_tex_slots);
MEM_SAFE_FREE(RST.bound_ubo_slots);
#ifdef WITH_CLAY_ENGINE
BLI_remlink(&R_engines, &DRW_engine_viewport_clay_type);

View File

@ -97,6 +97,7 @@ GPUUniformBuffer *GPU_uniformbuffer_create(int size, const void *data, char err_
{
GPUUniformBuffer *ubo = MEM_callocN(sizeof(GPUUniformBufferStatic), "GPUUniformBufferStatic");
ubo->size = size;
ubo->bindpoint = -1;
/* Generate Buffer object */
glGenBuffers(1, &ubo->bindcode);
@ -134,6 +135,7 @@ GPUUniformBuffer *GPU_uniformbuffer_dynamic_create(ListBase *inputs, char err_ou
GPUUniformBufferDynamic *ubo = MEM_callocN(sizeof(GPUUniformBufferDynamic), "GPUUniformBufferDynamic");
ubo->buffer.type = GPU_UBO_DYNAMIC;
ubo->buffer.bindpoint = -1;
ubo->flag = GPU_UBO_FLAG_DIRTY;
/* Generate Buffer object. */