DRW: Add new features from lightcache branch

- Change gl_context_mutex to a ticket mutex ensuring interactivity even
  when rendering.
- Add DRW_custom_pipeline for using DRW outside of the viewport and render
  pipeline.
- DRW_opengl_render_context_*** and DRW_gawain_render_context_*** to use
  with DRW_custom_pipeline.
- Add possibility to bypass deferred compilation on demand (not User
  demand).
- Add union to access DRWMatrixState members more easily.
This commit is contained in:
Clément Foucault 2018-07-10 14:40:28 +02:00
parent 873d7f7e14
commit c90a0d5dda
6 changed files with 150 additions and 49 deletions

View File

@ -136,6 +136,11 @@ void DRW_opengl_context_destroy(void);
void DRW_opengl_context_enable(void);
void DRW_opengl_context_disable(void);
void DRW_opengl_render_context_enable(void *re_gl_context);
void DRW_opengl_render_context_disable(void *re_gl_context);
void DRW_gawain_render_context_enable(void *re_gwn_context);
void DRW_gawain_render_context_disable(void *re_gwn_context);
void DRW_deferred_shader_remove(struct GPUMaterial *mat);
struct DrawDataList *DRW_drawdatalist_from_id(struct ID *id);

View File

@ -654,14 +654,14 @@ struct GPUMaterial *EEVEE_material_world_lightprobe_get(struct Scene *scene, Wor
const void *engine = &DRW_engine_viewport_eevee_type;
const int options = VAR_WORLD_PROBE;
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options, false);
if (mat != NULL) {
return mat;
}
return DRW_shader_create_from_world(
scene, wo, engine, options,
datatoc_background_vert_glsl, NULL, e_data.frag_shader_lib,
SHADER_DEFINES "#define PROBE_CAPTURE\n");
SHADER_DEFINES "#define PROBE_CAPTURE\n", false);
}
struct GPUMaterial *EEVEE_material_world_background_get(struct Scene *scene, World *wo)
@ -669,14 +669,14 @@ struct GPUMaterial *EEVEE_material_world_background_get(struct Scene *scene, Wor
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_WORLD_BACKGROUND;
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options, true);
if (mat != NULL) {
return mat;
}
return DRW_shader_create_from_world(
scene, wo, engine, options,
datatoc_background_vert_glsl, NULL, e_data.frag_shader_lib,
SHADER_DEFINES "#define WORLD_BACKGROUND\n");
SHADER_DEFINES "#define WORLD_BACKGROUND\n", true);
}
struct GPUMaterial *EEVEE_material_world_volume_get(struct Scene *scene, World *wo)
@ -684,7 +684,7 @@ struct GPUMaterial *EEVEE_material_world_volume_get(struct Scene *scene, World *
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_WORLD_VOLUME;
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options);
GPUMaterial *mat = DRW_shader_find_from_world(wo, engine, options, true);
if (mat != NULL) {
return mat;
}
@ -694,7 +694,7 @@ struct GPUMaterial *EEVEE_material_world_volume_get(struct Scene *scene, World *
mat = DRW_shader_create_from_world(
scene, wo, engine, options,
datatoc_volumetric_vert_glsl, datatoc_volumetric_geom_glsl, e_data.volume_shader_lib,
defines);
defines, true);
MEM_freeN(defines);
@ -719,7 +719,7 @@ struct GPUMaterial *EEVEE_material_mesh_get(
options |= eevee_material_shadow_option(shadow_method);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options, true);
if (mat) {
return mat;
}
@ -729,7 +729,7 @@ struct GPUMaterial *EEVEE_material_mesh_get(
mat = DRW_shader_create_from_material(
scene, ma, engine, options,
e_data.vert_shader_str, NULL, e_data.frag_shader_lib,
defines);
defines, true);
MEM_freeN(defines);
@ -741,7 +741,7 @@ struct GPUMaterial *EEVEE_material_mesh_volume_get(struct Scene *scene, Material
const void *engine = &DRW_engine_viewport_eevee_type;
int options = VAR_MAT_VOLUME;
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options, true);
if (mat != NULL) {
return mat;
}
@ -751,7 +751,7 @@ struct GPUMaterial *EEVEE_material_mesh_volume_get(struct Scene *scene, Material
mat = DRW_shader_create_from_material(
scene, ma, engine, options,
datatoc_volumetric_vert_glsl, datatoc_volumetric_geom_glsl, e_data.volume_shader_lib,
defines);
defines, true);
MEM_freeN(defines);
@ -775,7 +775,7 @@ struct GPUMaterial *EEVEE_material_mesh_depth_get(
if (is_shadow)
options |= VAR_MAT_SHADOW;
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options, true);
if (mat) {
return mat;
}
@ -791,7 +791,8 @@ struct GPUMaterial *EEVEE_material_mesh_depth_get(
(is_shadow) ? datatoc_shadow_vert_glsl : e_data.vert_shader_str,
NULL,
frag_str,
defines);
defines,
true);
MEM_freeN(frag_str);
MEM_freeN(defines);
@ -807,7 +808,7 @@ struct GPUMaterial *EEVEE_material_hair_get(
options |= eevee_material_shadow_option(shadow_method);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options);
GPUMaterial *mat = DRW_shader_find_from_material(ma, engine, options, true);
if (mat) {
return mat;
}
@ -817,7 +818,7 @@ struct GPUMaterial *EEVEE_material_hair_get(
mat = DRW_shader_create_from_material(
scene, ma, engine, options,
e_data.vert_shader_str, NULL, e_data.frag_shader_lib,
defines);
defines, true);
MEM_freeN(defines);

View File

@ -240,14 +240,14 @@ struct GPUShader *DRW_shader_create_2D(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3D(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3D_depth_only(void);
struct GPUMaterial *DRW_shader_find_from_world(struct World *wo, const void *engine_type, int options);
struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma, const void *engine_type, int options);
struct GPUMaterial *DRW_shader_find_from_world(struct World *wo, const void *engine_type, int options, bool no_deferred);
struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma, const void *engine_type, int options, bool no_deferred);
struct GPUMaterial *DRW_shader_create_from_world(
struct Scene *scene, struct World *wo, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines);
const char *vert, const char *geom, const char *frag_lib, const char *defines, bool no_deferred);
struct GPUMaterial *DRW_shader_create_from_material(
struct Scene *scene, struct Material *ma, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines);
const char *vert, const char *geom, const char *frag_lib, const char *defines, bool no_deferred);
void DRW_shader_free(struct GPUShader *shader);
#define DRW_SHADER_FREE_SAFE(shader) do { \
if (shader != NULL) { \
@ -410,6 +410,7 @@ void DRW_pass_sort_shgroup_z(DRWPass *pass);
/* Viewport */
typedef enum {
/* keep in sync with the union struct DRWMatrixState. */
DRW_MAT_PERS = 0,
DRW_MAT_PERSINV,
DRW_MAT_VIEW,
@ -421,7 +422,18 @@ typedef enum {
} DRWViewportMatrixType;
typedef struct DRWMatrixState {
float mat[DRW_MAT_COUNT][4][4];
union {
float mat[DRW_MAT_COUNT][4][4];
struct {
/* keep in sync with the enum DRWViewportMatrixType. */
float persmat[4][4];
float persinv[4][4];
float viewmat[4][4];
float viewinv[4][4];
float winmat[4][4];
float wininv[4][4];
};
};
} DRWMatrixState;
void DRW_viewport_init(const bContext *C);
@ -454,6 +466,12 @@ void DRW_render_object_iter(
void (*callback)(void *vedata, struct Object *ob, struct RenderEngine *engine, struct Depsgraph *depsgraph));
void DRW_render_instance_buffer_finish(void);
void DRW_custom_pipeline(
DrawEngineType *draw_engine_type,
struct Depsgraph *depsgraph,
void (*callback)(void *vedata, void *user_data),
void *user_data);
/* ViewLayers */
void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type);
void **DRW_view_layer_engine_data_ensure_ex(

View File

@ -1256,7 +1256,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
/* XXX Really nasty locking. But else this could
* be executed by the material previews thread
* while rendering a viewport. */
BLI_mutex_lock(&DST.gl_context_mutex);
BLI_ticket_mutex_lock(DST.gl_context_mutex);
/* Reset before using it. */
drw_state_prepare_clean_for_draw(&DST);
@ -1284,7 +1284,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
drw_engines_disable();
BLI_mutex_unlock(&DST.gl_context_mutex);
BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}
}
@ -1554,14 +1554,10 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
/* Changing Context */
if (re_gl_context != NULL) {
/* TODO get rid of the blocking. Only here because of the static global DST. */
BLI_mutex_lock(&DST.gl_context_mutex);
WM_opengl_context_activate(re_gl_context);
DRW_opengl_render_context_enable(re_gl_context);
/* We need to query gwn context after a gl context has been bound. */
re_gwn_context = RE_gwn_context_get(render);
if (GWN_context_active_get() == NULL) {
GWN_context_active_set(re_gwn_context);
}
DRW_shape_cache_reset(); /* XXX fix that too. */
DRW_gawain_render_context_enable(re_gwn_context);
}
else {
DRW_opengl_context_enable();
@ -1640,12 +1636,8 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
/* Changing Context */
if (re_gl_context != NULL) {
DRW_shape_cache_reset(); /* XXX fix that too. */
glFlush();
GWN_context_active_set(NULL);
WM_opengl_context_release(re_gl_context);
/* TODO get rid of the blocking. */
BLI_mutex_unlock(&DST.gl_context_mutex);
DRW_gawain_render_context_disable(re_gwn_context);
DRW_opengl_render_context_disable(re_gl_context);
}
else {
DRW_opengl_context_disable();
@ -1671,6 +1663,56 @@ void DRW_render_object_iter(
DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END
}
/* Assume a valid gl context is bound (and that the gl_context_mutex has been aquired).
* This function only setup DST and execute the given function.
* Warning: similar to DRW_render_to_image you cannot use default lists (dfbl & dtxl). */
void DRW_custom_pipeline(
DrawEngineType *draw_engine_type,
struct Depsgraph *depsgraph,
void (*callback)(void *vedata, void *user_data),
void *user_data)
{
Scene *scene = DEG_get_evaluated_scene(depsgraph);
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
/* Reset before using it. */
drw_state_prepare_clean_for_draw(&DST);
DST.options.is_image_render = true;
DST.options.is_scene_render = true;
DST.options.draw_background = false;
DST.draw_ctx = (DRWContextState){
.scene = scene,
.view_layer = view_layer,
.engine_type = NULL,
.depsgraph = depsgraph,
.object_mode = OB_MODE_OBJECT,
};
drw_context_state_init();
DST.viewport = GPU_viewport_create();
const int size[2] = {1, 1};
GPU_viewport_size_set(DST.viewport, size);
drw_viewport_var_init();
DRW_hair_init();
ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
/* Execute the callback */
callback(data, user_data);
DST.buffer_finish_called = false;
GPU_viewport_free(DST.viewport);
GPU_framebuffer_restore();
#ifdef DEBUG
/* Avoid accidental reuse. */
drw_state_ensure_not_reused(&DST);
#endif
}
static struct DRWSelectBuffer {
struct GPUFrameBuffer *framebuffer;
struct GPUTexture *texture_depth;
@ -2280,7 +2322,7 @@ void DRW_opengl_context_create(void)
{
BLI_assert(DST.gl_context == NULL); /* Ensure it's called once */
BLI_mutex_init(&DST.gl_context_mutex);
DST.gl_context_mutex = BLI_ticket_mutex_alloc();
if (!G.background) {
immDeactivate();
}
@ -2305,7 +2347,7 @@ void DRW_opengl_context_destroy(void)
GWN_context_active_set(DST.gwn_context);
GWN_context_discard(DST.gwn_context);
WM_opengl_context_dispose(DST.gl_context);
BLI_mutex_end(&DST.gl_context_mutex);
BLI_ticket_mutex_free(DST.gl_context_mutex);
}
}
@ -2315,7 +2357,7 @@ void DRW_opengl_context_enable(void)
/* IMPORTANT: We dont support immediate mode in render mode!
* This shall remain in effect until immediate mode supports
* multiple threads. */
BLI_mutex_lock(&DST.gl_context_mutex);
BLI_ticket_mutex_lock(DST.gl_context_mutex);
if (BLI_thread_is_main()) {
if (!G.background) {
immDeactivate();
@ -2349,8 +2391,43 @@ void DRW_opengl_context_disable(void)
GWN_context_active_set(NULL);
}
BLI_mutex_unlock(&DST.gl_context_mutex);
BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}
}
void DRW_opengl_render_context_enable(void *re_gl_context)
{
/* If thread is main you should use DRW_opengl_context_enable(). */
BLI_assert(!BLI_thread_is_main());
/* TODO get rid of the blocking. Only here because of the static global DST. */
BLI_ticket_mutex_lock(DST.gl_context_mutex);
WM_opengl_context_activate(re_gl_context);
}
void DRW_opengl_render_context_disable(void *re_gl_context)
{
glFlush();
WM_opengl_context_release(re_gl_context);
/* TODO get rid of the blocking. */
BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}
/* Needs to be called AFTER DRW_opengl_render_context_enable() */
void DRW_gawain_render_context_enable(void *re_gwn_context)
{
/* If thread is main you should use DRW_opengl_context_enable(). */
BLI_assert(!BLI_thread_is_main());
GWN_context_active_set(re_gwn_context);
DRW_shape_cache_reset(); /* XXX fix that too. */
}
/* Needs to be called BEFORE DRW_opengl_render_context_disable() */
void DRW_gawain_render_context_disable(void *UNUSED(re_gwn_context))
{
DRW_shape_cache_reset(); /* XXX fix that too. */
GWN_context_active_set(NULL);
}
/** \} */

View File

@ -373,7 +373,7 @@ typedef struct DRWManager {
* the top portion of the struct so DO NOT MOVE IT! */
void *gl_context; /* Unique ghost context used by the draw manager. */
Gwn_Context *gwn_context;
ThreadMutex gl_context_mutex; /* Mutex to lock the drw manager and avoid concurent context usage. */
TicketMutex *gl_context_mutex; /* Mutex to lock the drw manager and avoid concurent context usage. */
/** GPU Resource State: Memory storage between drawing. */
struct {

View File

@ -154,10 +154,10 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
MEM_freeN(comp);
}
static void drw_deferred_shader_add(GPUMaterial *mat)
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
{
/* Do not deferre the compilation if we are rendering for image. */
if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
/* Double checking that this GPUMaterial is not going to be
* compiled by another thread. */
DRW_deferred_shader_remove(mat);
@ -308,10 +308,10 @@ GPUShader *DRW_shader_create_3D_depth_only(void)
return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
}
GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options)
GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
{
GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
if (DRW_state_is_image_render()) {
if (DRW_state_is_image_render() || !deferred) {
if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
/* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
* with the shader code and we will resume the compilation from there. */
@ -321,10 +321,10 @@ GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int
return mat;
}
GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options)
GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
{
GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
if (DRW_state_is_image_render()) {
if (DRW_state_is_image_render() || !deferred) {
if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
/* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
* with the shader code and we will resume the compilation from there. */
@ -336,7 +336,7 @@ GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type
GPUMaterial *DRW_shader_create_from_world(
struct Scene *scene, World *wo, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines)
const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
{
GPUMaterial *mat = NULL;
if (DRW_state_is_image_render()) {
@ -350,7 +350,7 @@ GPUMaterial *DRW_shader_create_from_world(
}
if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
drw_deferred_shader_add(mat);
drw_deferred_shader_add(mat, deferred);
}
return mat;
@ -358,7 +358,7 @@ GPUMaterial *DRW_shader_create_from_world(
GPUMaterial *DRW_shader_create_from_material(
struct Scene *scene, Material *ma, const void *engine_type, int options,
const char *vert, const char *geom, const char *frag_lib, const char *defines)
const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
{
GPUMaterial *mat = NULL;
if (DRW_state_is_image_render()) {
@ -372,7 +372,7 @@ GPUMaterial *DRW_shader_create_from_material(
}
if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
drw_deferred_shader_add(mat);
drw_deferred_shader_add(mat, deferred);
}
return mat;