DRW: Fix leak (2nd attempt) cause by texture "state" tracking
This should get rid of the leak once for all.
This commit is contained in:
parent
2b2277ecbc
commit
e9d8b780ae
|
@ -323,10 +323,6 @@ enum {
|
|||
static struct DRWGlobalState {
|
||||
/* Rendering state */
|
||||
GPUShader *shader;
|
||||
GPUTexture **bound_texs;
|
||||
GPUUniformBuffer **bound_ubos;
|
||||
int bind_tex_inc;
|
||||
int bind_ubo_inc;
|
||||
|
||||
/* Managed by `DRW_state_set`, `DRW_state_reset` */
|
||||
DRWState state;
|
||||
|
@ -363,6 +359,15 @@ static struct DRWGlobalState {
|
|||
double cache_time;
|
||||
} DST = {NULL};
|
||||
|
||||
/* GPU Resource State */
|
||||
static struct DRWResourceState {
|
||||
GPUTexture **bound_texs;
|
||||
GPUUniformBuffer **bound_ubos;
|
||||
|
||||
int bind_tex_inc;
|
||||
int bind_ubo_inc;
|
||||
} RST = {NULL};
|
||||
|
||||
static struct DRWMatrixOveride {
|
||||
float mat[6][4][4];
|
||||
bool override[6];
|
||||
|
@ -1832,14 +1837,14 @@ static void draw_bind_texture(GPUTexture *tex)
|
|||
{
|
||||
int bind = GPU_texture_bound_number(tex);
|
||||
if (bind == -1) {
|
||||
if (DST.bind_tex_inc >= 0) {
|
||||
if (DST.bound_texs[DST.bind_tex_inc] != NULL) {
|
||||
GPU_texture_unbind(DST.bound_texs[DST.bind_tex_inc]);
|
||||
if (RST.bind_tex_inc >= 0) {
|
||||
if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
|
||||
GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
|
||||
}
|
||||
DST.bound_texs[DST.bind_tex_inc] = tex;
|
||||
GPU_texture_bind(tex, DST.bind_tex_inc);
|
||||
RST.bound_texs[RST.bind_tex_inc] = tex;
|
||||
GPU_texture_bind(tex, RST.bind_tex_inc);
|
||||
|
||||
DST.bind_tex_inc--;
|
||||
RST.bind_tex_inc--;
|
||||
}
|
||||
else {
|
||||
printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
|
||||
|
@ -1849,16 +1854,16 @@ static void draw_bind_texture(GPUTexture *tex)
|
|||
|
||||
static void draw_bind_ubo(GPUUniformBuffer *ubo)
|
||||
{
|
||||
if (DST.bound_ubos[DST.bind_ubo_inc] != ubo) {
|
||||
if (DST.bind_ubo_inc >= 0) {
|
||||
DST.bound_ubos[DST.bind_ubo_inc] = ubo;
|
||||
GPU_uniformbuffer_bind(ubo, DST.bind_ubo_inc);
|
||||
if (RST.bound_ubos[RST.bind_ubo_inc] != ubo) {
|
||||
if (RST.bind_ubo_inc >= 0) {
|
||||
RST.bound_ubos[RST.bind_ubo_inc] = ubo;
|
||||
GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
|
||||
}
|
||||
else {
|
||||
printf("Not enough ubo slots!\n");
|
||||
}
|
||||
}
|
||||
DST.bind_ubo_inc--;
|
||||
RST.bind_ubo_inc--;
|
||||
}
|
||||
|
||||
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||
|
@ -1866,8 +1871,8 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
|||
BLI_assert(shgroup->shader);
|
||||
BLI_assert(shgroup->interface);
|
||||
|
||||
DST.bind_tex_inc = GPU_max_textures() - 1; /* Reset texture counter. */
|
||||
DST.bind_ubo_inc = GPU_max_ubo_binds() - 1; /* Reset UBO counter. */
|
||||
RST.bind_tex_inc = GPU_max_textures() - 1; /* Reset texture counter. */
|
||||
RST.bind_ubo_inc = GPU_max_ubo_binds() - 1; /* Reset UBO counter. */
|
||||
DRWInterface *interface = shgroup->interface;
|
||||
GPUTexture *tex;
|
||||
GPUUniformBuffer *ubo;
|
||||
|
@ -2043,9 +2048,9 @@ static void DRW_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWSha
|
|||
|
||||
/* Clear Bound textures */
|
||||
for (int i = 0; i < GPU_max_textures(); i++) {
|
||||
if (DST.bound_texs[i] != NULL) {
|
||||
GPU_texture_unbind(DST.bound_texs[i]);
|
||||
DST.bound_texs[i] = NULL;
|
||||
if (RST.bound_texs[i] != NULL) {
|
||||
GPU_texture_unbind(RST.bound_texs[i]);
|
||||
RST.bound_texs[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2516,13 +2521,13 @@ static void DRW_viewport_var_init(void)
|
|||
glFrontFace(DST.frontface);
|
||||
|
||||
/* Alloc array of texture reference. */
|
||||
if (DST.bound_texs == NULL) {
|
||||
DST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
|
||||
if (RST.bound_texs == NULL) {
|
||||
RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
|
||||
}
|
||||
|
||||
/* Alloc array of ubos reference. */
|
||||
if (DST.bound_ubos == NULL) {
|
||||
DST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
|
||||
if (RST.bound_ubos == NULL) {
|
||||
RST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3234,8 +3239,6 @@ void DRW_draw_render_loop_ex(
|
|||
|
||||
#ifdef DEBUG
|
||||
/* Avoid accidental reuse. */
|
||||
MEM_freeN(DST.bound_texs);
|
||||
MEM_freeN(DST.bound_ubos);
|
||||
memset(&DST, 0xFF, sizeof(DST));
|
||||
#endif
|
||||
}
|
||||
|
@ -3388,8 +3391,6 @@ void DRW_draw_select_loop(
|
|||
|
||||
#ifdef DEBUG
|
||||
/* Avoid accidental reuse. */
|
||||
MEM_freeN(DST.bound_texs);
|
||||
MEM_freeN(DST.bound_ubos);
|
||||
memset(&DST, 0xFF, sizeof(DST));
|
||||
#endif
|
||||
|
||||
|
@ -3478,8 +3479,6 @@ void DRW_draw_depth_loop(
|
|||
|
||||
#ifdef DEBUG
|
||||
/* Avoid accidental reuse. */
|
||||
MEM_freeN(DST.bound_texs);
|
||||
MEM_freeN(DST.bound_ubos);
|
||||
memset(&DST, 0xFF, sizeof(DST));
|
||||
#endif
|
||||
|
||||
|
@ -3667,13 +3666,13 @@ void DRW_engines_free(void)
|
|||
if (globals_ramp)
|
||||
GPU_texture_free(globals_ramp);
|
||||
|
||||
if (DST.bound_texs) {
|
||||
MEM_freeN(DST.bound_texs);
|
||||
DST.bound_texs = NULL;
|
||||
if (RST.bound_texs) {
|
||||
MEM_freeN(RST.bound_texs);
|
||||
RST.bound_texs = NULL;
|
||||
}
|
||||
if (DST.bound_ubos) {
|
||||
MEM_freeN(DST.bound_ubos);
|
||||
DST.bound_ubos = NULL;
|
||||
if (RST.bound_ubos) {
|
||||
MEM_freeN(RST.bound_ubos);
|
||||
RST.bound_ubos = NULL;
|
||||
}
|
||||
|
||||
#ifdef WITH_CLAY_ENGINE
|
||||
|
|
Loading…
Reference in New Issue