Eevee: Render: Fix softlock if rendering before lazy shader compil ends.

Calling the rendering operator seems to kill any other WM_job running, leaving
uncompiled materials into a GPU_MAT_QUEUED state. This then made the probe update
looping indefinitely (all_materials_updated remaining to false).

To fix this, we resume compilation for materials that are in this state.
Cancelling Render before all material compilation could make certain material
remain uncompiled. Fortunately, this is not allowed as of now.
This commit is contained in:
Clément Foucault 2018-03-13 03:58:00 +01:00
parent 0acccda4a4
commit 0f1d7a5796
3 changed files with 59 additions and 14 deletions

View File

@ -634,7 +634,7 @@ struct GPUMaterial *EEVEE_material_world_lightprobe_get(struct Scene *scene, Wor
const void *engine = &DRW_engine_viewport_eevee_type;
const int options = VAR_WORLD_PROBE;
GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
if (mat != NULL) {
return mat;
}
@ -649,7 +649,7 @@ struct GPUMaterial *EEVEE_material_world_background_get(struct Scene *scene, Wor
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_WORLD_BACKGROUND;
GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
if (mat != NULL) {
return mat;
}
@ -664,7 +664,7 @@ struct GPUMaterial *EEVEE_material_world_volume_get(struct Scene *scene, World *
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_WORLD_VOLUME;
GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
if (mat != NULL) {
return mat;
}
@ -699,7 +699,7 @@ struct GPUMaterial *EEVEE_material_mesh_get(
options |= eevee_material_shadow_option(shadow_method);
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
if (mat) {
return mat;
}
@ -721,7 +721,7 @@ struct GPUMaterial *EEVEE_material_mesh_volume_get(struct Scene *scene, Material
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_MAT_VOLUME;
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
if (mat != NULL) {
return mat;
}
@ -755,7 +755,7 @@ struct GPUMaterial *EEVEE_material_mesh_depth_get(
if (is_shadow)
options |= VAR_MAT_SHADOW;
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
if (mat) {
return mat;
}
@ -787,7 +787,7 @@ struct GPUMaterial *EEVEE_material_hair_get(
options |= eevee_material_shadow_option(shadow_method);
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
if (mat) {
return mat;
}

View File

@ -273,6 +273,8 @@ struct GPUShader *DRW_shader_create_2D(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3D(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3D_depth_only(void);
struct GPUMaterial *DRW_shader_find_from_world(struct World *wo, const void *engine_type, int options);
struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma, const void *engine_type, int options);
struct GPUMaterial *DRW_shader_create_from_world(
struct Scene *scene, struct World *wo, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines);

View File

@ -162,8 +162,11 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
static void drw_deferred_shader_add(
GPUMaterial *mat, const char *vert, const char *geom, const char *frag_lib, const char *defines)
{
/* Do not deferre the compilation if we are rendering for image. */
if (DRW_state_is_image_render()) {
/* Do not deferre the compilation if we are rendering for image. */
/* Double checking that this GPUMaterial is not going to be
* compiled by another thread. */
DRW_deferred_shader_remove(mat);
GPU_material_generate_pass(mat, vert, geom, frag_lib, defines);
return;
}
@ -302,13 +305,46 @@ GPUShader *DRW_shader_create_3D_depth_only(void)
return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
}
GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options)
{
GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
if (DRW_state_is_image_render()) {
if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
/* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
* with the shader code and we will resume the compilation from there. */
return NULL;
}
}
return mat;
}
GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options)
{
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
if (DRW_state_is_image_render()) {
if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
/* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
* with the shader code and we will resume the compilation from there. */
return NULL;
}
}
return mat;
}
GPUMaterial *DRW_shader_create_from_world(
struct Scene *scene, World *wo, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines)
{
GPUMaterial *mat = GPU_material_from_nodetree(
scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
vert, geom, frag_lib, defines, true);
GPUMaterial *mat = NULL;
if (DRW_state_is_image_render()) {
mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
}
if (mat == NULL) {
mat = GPU_material_from_nodetree(
scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
vert, geom, frag_lib, defines, true);
}
drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
@ -319,9 +355,16 @@ GPUMaterial *DRW_shader_create_from_material(
struct Scene *scene, Material *ma, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines)
{
GPUMaterial *mat = GPU_material_from_nodetree(
scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
vert, geom, frag_lib, defines, true);
GPUMaterial *mat = NULL;
if (DRW_state_is_image_render()) {
mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
}
if (mat == NULL) {
mat = GPU_material_from_nodetree(
scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
vert, geom, frag_lib, defines, true);
}
drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);