EEVEE: Motion Blur: Add accumulation motion blur for better precision

This revisit the render pipeline to support time slicing for better motion
blur.

We support accumulation with or without the Post-process motion blur.

If using the post-process, we reuse last step next motion data to avoid
another scene reevaluation.

This also adds support for hair motion blur which is handled in a similar
way as mesh motion blur.

The total number of samples is distributed evenly accross all timesteps to
avoid sampling weighting issues. For this reason, the sample count is
(internally) rounded up to the next multiple of the step count.

Only FX Motion BLur: {F8632258}

FX Motion Blur + 4 time steps: {F8632260}

FX Motion Blur + 32 time steps: {F8632261}

Reviewed By: jbakker

Differential Revision: https://developer.blender.org/D8079
This commit is contained in:
Clément Foucault 2020-06-23 13:59:55 +02:00
parent cc3e808ab4
commit 439b40e601
Notes: blender-bot 2023-02-14 09:44:56 +01:00
Referenced by issue #78321, Eevee Motion blur crash rendering animation with steps set to higher value
Referenced by issue #78190, 2.90 broken Eevee shadow pass
20 changed files with 663 additions and 273 deletions

View File

@ -176,6 +176,7 @@ class RENDER_PT_eevee_motion_blur(RenderButtonsPanel, Panel):
col.prop(props, "motion_blur_shutter")
col.prop(props, "motion_blur_depth_scale")
col.prop(props, "motion_blur_max")
col.prop(props, "motion_blur_steps", text="Steps")
class RENDER_PT_eevee_depth_of_field(RenderButtonsPanel, Panel):

View File

@ -291,6 +291,12 @@ void blo_do_versions_290(FileData *fd, Library *UNUSED(lib), Main *bmain)
}
}
if (!DNA_struct_elem_find(fd->filesdna, "SceneEEVEE", "int", "motion_blur_steps")) {
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
scene->eevee.motion_blur_steps = 1;
}
}
/* Transition to saving expansion for all of a constraint's subpanels. */
if (!DNA_struct_elem_find(fd->filesdna, "bConstraint", "short", "ui_expand_flag")) {
for (Object *object = bmain->objects.first; object != NULL; object = object->id.next) {

View File

@ -41,8 +41,19 @@
static void eevee_motion_blur_mesh_data_free(void *val)
{
EEVEE_GeometryMotionData *geom_mb = (EEVEE_GeometryMotionData *)val;
for (int i = 0; i < ARRAY_SIZE(geom_mb->vbo); i++) {
GPU_VERTBUF_DISCARD_SAFE(geom_mb->vbo[i]);
switch (geom_mb->type) {
case EEVEE_HAIR_GEOM_MOTION_DATA:
for (int i = 0; i < ARRAY_SIZE(geom_mb->vbo); i++) {
GPU_VERTBUF_DISCARD_SAFE(geom_mb->hair_pos[i]);
DRW_TEXTURE_FREE_SAFE(geom_mb->hair_pos_tx[i]);
}
break;
case EEVEE_MESH_GEOM_MOTION_DATA:
for (int i = 0; i < ARRAY_SIZE(geom_mb->vbo); i++) {
GPU_VERTBUF_DISCARD_SAFE(geom_mb->vbo[i]);
}
break;
}
MEM_freeN(val);
}
@ -103,14 +114,17 @@ void EEVEE_motion_blur_data_free(EEVEE_MotionBlurData *mb)
}
}
EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb, Object *ob)
EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb,
Object *ob,
bool hair)
{
if (mb->object == NULL) {
return NULL;
}
EEVEE_ObjectKey key, *key_p;
key.ob = ob;
/* Small hack to avoid another comparisson. */
key.ob = (Object *)((char *)ob + hair);
DupliObject *dup = DRW_object_get_dupli(ob);
if (dup) {
key.parent = DRW_object_get_dupli_parent(ob);
@ -133,7 +147,9 @@ EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *
return ob_step;
}
EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurData *mb, Object *ob)
EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurData *mb,
Object *ob,
bool hair)
{
if (mb->geom == NULL) {
return NULL;
@ -142,10 +158,12 @@ EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurDa
/* Use original data as key to ensure matching accross update. */
Object *ob_orig = DEG_get_original_object(ob);
EEVEE_GeometryMotionData *geom_step = BLI_ghash_lookup(mb->geom, ob_orig->data);
void *key = (char *)ob_orig->data + hair;
EEVEE_GeometryMotionData *geom_step = BLI_ghash_lookup(mb->geom, key);
if (geom_step == NULL) {
geom_step = MEM_callocN(sizeof(EEVEE_GeometryMotionData), __func__);
BLI_ghash_insert(mb->geom, ob_orig->data, geom_step);
geom_step->type = (hair) ? EEVEE_HAIR_GEOM_MOTION_DATA : EEVEE_MESH_GEOM_MOTION_DATA;
BLI_ghash_insert(mb->geom, key, geom_step);
}
return geom_step;
@ -229,6 +247,8 @@ static void eevee_object_data_init(DrawData *dd)
{
EEVEE_ObjectEngineData *eevee_data = (EEVEE_ObjectEngineData *)dd;
eevee_data->shadow_caster_id = -1;
eevee_data->need_update = false;
eevee_data->geom_update = false;
}
EEVEE_ObjectEngineData *EEVEE_object_data_get(Object *ob)

View File

@ -153,7 +153,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
effects->enabled_effects = 0;
effects->enabled_effects |= (G.debug_value == 9) ? EFFECT_VELOCITY_BUFFER : 0;
effects->enabled_effects |= EEVEE_motion_blur_init(sldata, vedata, camera);
effects->enabled_effects |= EEVEE_motion_blur_init(sldata, vedata);
effects->enabled_effects |= EEVEE_bloom_init(sldata, vedata);
effects->enabled_effects |= EEVEE_depth_of_field_init(sldata, vedata, camera);
effects->enabled_effects |= EEVEE_temporal_sampling_init(sldata, vedata);

View File

@ -167,6 +167,7 @@ static void eevee_cache_finish(void *vedata)
EEVEE_lights_cache_finish(sldata, vedata);
EEVEE_lightprobes_cache_finish(sldata, vedata);
EEVEE_subsurface_draw_init(sldata, vedata);
EEVEE_effects_draw_init(sldata, vedata);
EEVEE_volumes_draw_init(sldata, vedata);
@ -381,6 +382,7 @@ static void eevee_id_object_update(void *UNUSED(vedata), Object *object)
EEVEE_ObjectEngineData *oedata = EEVEE_object_data_get(object);
if (oedata != NULL && oedata->dd.recalc != 0) {
oedata->need_update = true;
oedata->geom_update = (oedata->dd.recalc & (ID_RECALC_GEOMETRY)) != 0;
oedata->dd.recalc = 0;
}
}
@ -400,7 +402,7 @@ static void eevee_id_world_update(void *vedata, World *wo)
}
}
static void eevee_id_update(void *vedata, ID *id)
void eevee_id_update(void *vedata, ID *id)
{
/* Handle updates based on ID type. */
switch (GS(id->name)) {
@ -416,6 +418,12 @@ static void eevee_id_update(void *vedata, ID *id)
}
}
static void eevee_render_reset_passes(EEVEE_Data *vedata)
{
/* Reset passlist. This is safe as they are stored into managed memory chunks. */
memset(vedata->psl, 0, sizeof(*vedata->psl));
}
static void eevee_render_to_image(void *vedata,
RenderEngine *engine,
struct RenderLayer *render_layer,
@ -423,74 +431,114 @@ static void eevee_render_to_image(void *vedata,
{
EEVEE_Data *ved = (EEVEE_Data *)vedata;
const DRWContextState *draw_ctx = DRW_context_state_get();
Depsgraph *depsgraph = draw_ctx->depsgraph;
Scene *scene = DEG_get_evaluated_scene(depsgraph);
EEVEE_ViewLayerData *sldata = EEVEE_view_layer_data_ensure();
const bool do_motion_blur = (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED) != 0;
const bool do_motion_blur_fx = do_motion_blur && (scene->eevee.motion_blur_max > 0);
if (EEVEE_render_do_motion_blur(draw_ctx->depsgraph)) {
Scene *scene = DEG_get_evaluated_scene(draw_ctx->depsgraph);
float shutter = scene->eevee.motion_blur_shutter * 0.5f;
float time = CFRA;
/* Centered on frame for now. */
float start_time = time - shutter;
float end_time = time + shutter;
{
EEVEE_motion_blur_step_set(ved, MB_PREV);
RE_engine_frame_set(engine, floorf(start_time), fractf(start_time));
if (!EEVEE_render_init(vedata, engine, draw_ctx->depsgraph)) {
return;
}
if (RE_engine_test_break(engine)) {
return;
}
DRW_render_object_iter(vedata, engine, draw_ctx->depsgraph, EEVEE_render_cache);
EEVEE_motion_blur_cache_finish(vedata);
/* Reset passlist. This is safe as they are stored into managed memory chunks. */
memset(ved->psl, 0, sizeof(*ved->psl));
/* Fix memleak */
BLI_ghash_free(ved->stl->g_data->material_hash, NULL, NULL);
ved->stl->g_data->material_hash = NULL;
}
{
EEVEE_motion_blur_step_set(ved, MB_NEXT);
RE_engine_frame_set(engine, floorf(end_time), fractf(end_time));
EEVEE_render_init(vedata, engine, draw_ctx->depsgraph);
DRW_render_object_iter(vedata, engine, draw_ctx->depsgraph, EEVEE_render_cache);
EEVEE_motion_blur_cache_finish(vedata);
/* Reset passlist. This is safe as they are stored into managed memory chunks. */
memset(ved->psl, 0, sizeof(*ved->psl));
/* Fix memleak */
BLI_ghash_free(ved->stl->g_data->material_hash, NULL, NULL);
ved->stl->g_data->material_hash = NULL;
}
/* Current frame. */
EEVEE_motion_blur_step_set(ved, MB_CURR);
RE_engine_frame_set(engine, time, 0.0f);
}
if (!EEVEE_render_init(vedata, engine, draw_ctx->depsgraph)) {
if (!EEVEE_render_init(vedata, engine, depsgraph)) {
return;
}
EEVEE_PrivateData *g_data = ved->stl->g_data;
int steps = max_ii(1, scene->eevee.motion_blur_steps);
int time_steps_tot = (do_motion_blur) ? steps : 1;
g_data->render_tot_samples = divide_ceil_u(scene->eevee.taa_render_samples, time_steps_tot);
/* Centered on frame for now. */
float time = CFRA - scene->eevee.motion_blur_shutter / 2.0f;
float time_step = scene->eevee.motion_blur_shutter / time_steps_tot;
for (int i = 0; i < time_steps_tot && !RE_engine_test_break(engine); i++) {
float time_prev = time;
float time_curr = time + time_step * 0.5f;
float time_next = time + time_step;
time += time_step;
/* Previous motion step. */
if (do_motion_blur_fx) {
if (i > 0) {
/* The previous step of this iteration N is exactly the next step of iteration N - 1.
* So we just swap the resources to avoid too much re-evaluation. */
EEVEE_motion_blur_swap_data(vedata);
}
else {
EEVEE_motion_blur_step_set(ved, MB_PREV);
RE_engine_frame_set(engine, floorf(time_prev), fractf(time_prev));
EEVEE_render_view_sync(vedata, engine, depsgraph);
EEVEE_render_cache_init(sldata, vedata);
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
EEVEE_motion_blur_cache_finish(vedata);
EEVEE_materials_cache_finish(sldata, vedata);
eevee_render_reset_passes(vedata);
}
}
/* Next motion step. */
if (do_motion_blur_fx) {
EEVEE_motion_blur_step_set(ved, MB_NEXT);
RE_engine_frame_set(engine, floorf(time_next), fractf(time_next));
EEVEE_render_view_sync(vedata, engine, depsgraph);
EEVEE_render_cache_init(sldata, vedata);
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
EEVEE_motion_blur_cache_finish(vedata);
EEVEE_materials_cache_finish(sldata, vedata);
eevee_render_reset_passes(vedata);
}
/* Current motion step. */
{
if (do_motion_blur) {
EEVEE_motion_blur_step_set(ved, MB_CURR);
RE_engine_frame_set(engine, floorf(time_curr), fractf(time_curr));
}
EEVEE_render_view_sync(vedata, engine, depsgraph);
EEVEE_render_cache_init(sldata, vedata);
DRW_render_object_iter(vedata, engine, depsgraph, EEVEE_render_cache);
EEVEE_motion_blur_cache_finish(vedata);
EEVEE_volumes_cache_finish(sldata, vedata);
EEVEE_materials_cache_finish(sldata, vedata);
EEVEE_lights_cache_finish(sldata, vedata);
EEVEE_lightprobes_cache_finish(sldata, vedata);
EEVEE_subsurface_draw_init(sldata, vedata);
EEVEE_effects_draw_init(sldata, vedata);
EEVEE_volumes_draw_init(sldata, vedata);
}
/* Actual drawing. */
{
if (i == 0) {
EEVEE_renderpasses_output_init(
sldata, vedata, g_data->render_tot_samples * time_steps_tot);
}
EEVEE_temporal_sampling_create_view(vedata);
EEVEE_render_draw(vedata, engine, render_layer, rect);
DRW_cache_restart();
}
}
EEVEE_volumes_free_smoke_textures();
EEVEE_motion_blur_data_free(&ved->stl->effects->motion_blur);
if (RE_engine_test_break(engine)) {
return;
}
DRW_render_object_iter(vedata, engine, draw_ctx->depsgraph, EEVEE_render_cache);
EEVEE_render_read_result(vedata, engine, render_layer, rect);
EEVEE_motion_blur_cache_finish(vedata);
/* Actually do the rendering. */
EEVEE_render_draw(vedata, engine, render_layer, rect);
EEVEE_volumes_free_smoke_textures();
EEVEE_motion_blur_data_free(&ved->stl->effects->motion_blur);
/* Restore original viewport size. */
DRW_render_viewport_size_set((int[2]){g_data->size_orig[0], g_data->size_orig[1]});
}
static void eevee_engine_free(void)

View File

@ -849,6 +849,7 @@ static void eevee_lightbake_cache_create(EEVEE_Data *vedata, EEVEE_LightBake *lb
/* Disable volumetrics when baking. */
stl->effects->enabled_effects &= ~EFFECT_VOLUMETRIC;
EEVEE_subsurface_draw_init(sldata, vedata);
EEVEE_effects_draw_init(sldata, vedata);
EEVEE_volumes_draw_init(sldata, vedata);

View File

@ -801,6 +801,8 @@ static void eevee_hair_cache_populate(EEVEE_Data *vedata,
*matcache.shadow_grp_p = DRW_shgroup_hair_create_sub(ob, psys, md, matcache.shadow_grp);
*cast_shadow = true;
}
EEVEE_motion_blur_hair_cache_populate(sldata, vedata, ob, psys, md);
}
#define ADD_SHGROUP_CALL(shgrp, ob, geom, oedata) \
@ -949,17 +951,15 @@ void EEVEE_object_hair_cache_populate(EEVEE_Data *vedata,
eevee_hair_cache_populate(vedata, sldata, ob, NULL, NULL, HAIR_MATERIAL_NR, cast_shadow);
}
void EEVEE_materials_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
void EEVEE_materials_cache_finish(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
{
EEVEE_PrivateData *pd = vedata->stl->g_data;
EEVEE_EffectsInfo *effects = vedata->stl->effects;
BLI_ghash_free(pd->material_hash, NULL, NULL);
pd->material_hash = NULL;
SET_FLAG_FROM_TEST(effects->enabled_effects, effects->sss_surface_count > 0, EFFECT_SSS);
/* TODO(fclem) this is not really clean. Init should not be done in cache finish. */
EEVEE_subsurface_draw_init(sldata, vedata);
}
void EEVEE_materials_free(void)

View File

@ -25,6 +25,7 @@
#include "DRW_render.h"
#include "BLI_rand.h"
#include "BLI_string_utils.h"
#include "BKE_animsys.h"
#include "BKE_camera.h"
@ -34,6 +35,8 @@
#include "DNA_anim_types.h"
#include "DNA_camera_types.h"
#include "DNA_mesh_types.h"
#include "DNA_modifier_types.h"
#include "DNA_particle_types.h"
#include "DNA_screen_types.h"
#include "ED_screen.h"
@ -49,6 +52,7 @@ static struct {
/* Motion Blur */
struct GPUShader *motion_blur_sh;
struct GPUShader *motion_blur_object_sh;
struct GPUShader *motion_blur_hair_sh;
struct GPUShader *velocity_tiles_sh;
struct GPUShader *velocity_tiles_expand_sh;
} e_data = {NULL}; /* Engine data */
@ -57,6 +61,7 @@ extern char datatoc_effect_velocity_tile_frag_glsl[];
extern char datatoc_effect_motion_blur_frag_glsl[];
extern char datatoc_object_motion_frag_glsl[];
extern char datatoc_object_motion_vert_glsl[];
extern char datatoc_common_hair_lib_glsl[];
extern char datatoc_common_view_lib_glsl[];
#define EEVEE_VELOCITY_TILE_SIZE 32
@ -79,9 +84,14 @@ static void eevee_create_shader_motion_blur(void)
datatoc_effect_velocity_tile_frag_glsl,
"#define TILE_EXPANSION\n"
"#define EEVEE_VELOCITY_TILE_SIZE " STRINGIFY(EEVEE_VELOCITY_TILE_SIZE) "\n");
char *vert = BLI_string_joinN(datatoc_common_hair_lib_glsl, datatoc_object_motion_vert_glsl);
e_data.motion_blur_hair_sh = DRW_shader_create_with_lib(
vert, NULL, datatoc_object_motion_frag_glsl, datatoc_common_view_lib_glsl, "#define HAIR\n");
MEM_freeN(vert);
}
int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata, Object *camera)
int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
{
EEVEE_StorageList *stl = vedata->stl;
EEVEE_FramebufferList *fbl = vedata->fbl;
@ -95,7 +105,9 @@ int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *veda
return 0;
}
if (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED) {
effects->motion_blur_max = max_ii(0, scene->eevee.motion_blur_max);
if ((effects->motion_blur_max > 0) && (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED)) {
if (!e_data.motion_blur_sh) {
eevee_create_shader_motion_blur();
}
@ -107,17 +119,6 @@ int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *veda
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persinv, true);
}
if (camera != NULL) {
Camera *cam = camera->data;
effects->motion_blur_near_far[0] = cam->clip_start;
effects->motion_blur_near_far[1] = cam->clip_end;
}
else {
/* Not supported yet. */
BLI_assert(0);
}
effects->motion_blur_max = max_ii(0, scene->eevee.motion_blur_max);
const float *fs_size = DRW_viewport_size_get();
int tx_size[2] = {1 + ((int)fs_size[0] / EEVEE_VELOCITY_TILE_SIZE),
1 + ((int)fs_size[1] / EEVEE_VELOCITY_TILE_SIZE)};
@ -146,13 +147,23 @@ int EEVEE_motion_blur_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *veda
void EEVEE_motion_blur_step_set(EEVEE_Data *vedata, int step)
{
BLI_assert(step < 3);
/* Meh, code duplication. Could be avoided if render init would not contain cache init. */
if (vedata->stl->effects == NULL) {
vedata->stl->effects = MEM_callocN(sizeof(*vedata->stl->effects), __func__);
}
vedata->stl->effects->motion_blur_step = step;
}
static void eevee_motion_blur_sync_camera(EEVEE_Data *vedata)
{
EEVEE_EffectsInfo *effects = vedata->stl->effects;
if (DRW_state_is_scene_render()) {
int mb_step = effects->motion_blur_step;
DRW_view_viewmat_get(NULL, effects->motion_blur.camera[mb_step].viewmat, false);
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persmat, false);
DRW_view_persmat_get(NULL, effects->motion_blur.camera[mb_step].persinv, true);
}
effects->motion_blur_near_far[0] = fabsf(DRW_view_near_distance_get(NULL));
effects->motion_blur_near_far[1] = fabsf(DRW_view_far_distance_get(NULL));
}
void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
{
EEVEE_PassList *psl = vedata->psl;
@ -167,6 +178,9 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
const float *fs_size = DRW_viewport_size_get();
int tx_size[2] = {GPU_texture_width(effects->velocity_tiles_tx),
GPU_texture_height(effects->velocity_tiles_tx)};
eevee_motion_blur_sync_camera(vedata);
DRWShadingGroup *grp;
{
DRW_PASS_CREATE(psl->velocity_tiles_x, DRW_STATE_WRITE_COLOR);
@ -230,6 +244,15 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
DRW_shgroup_uniform_mat4(grp, "prevViewProjMatrix", mb_data->camera[MB_PREV].persmat);
DRW_shgroup_uniform_mat4(grp, "currViewProjMatrix", mb_data->camera[MB_CURR].persmat);
DRW_shgroup_uniform_mat4(grp, "nextViewProjMatrix", mb_data->camera[MB_NEXT].persmat);
DRW_PASS_CREATE(psl->velocity_hair, DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
mb_data->hair_grp = grp = DRW_shgroup_create(e_data.motion_blur_hair_sh, psl->velocity_hair);
DRW_shgroup_uniform_mat4(grp, "prevViewProjMatrix", mb_data->camera[MB_PREV].persmat);
DRW_shgroup_uniform_mat4(grp, "currViewProjMatrix", mb_data->camera[MB_CURR].persmat);
DRW_shgroup_uniform_mat4(grp, "nextViewProjMatrix", mb_data->camera[MB_NEXT].persmat);
DRW_pass_link(psl->velocity_object, psl->velocity_hair);
}
EEVEE_motion_blur_data_init(mb_data);
@ -237,6 +260,59 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
else {
psl->motion_blur = NULL;
psl->velocity_object = NULL;
psl->velocity_hair = NULL;
}
}
void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
EEVEE_Data *vedata,
Object *ob,
ParticleSystem *psys,
ModifierData *md)
{
EEVEE_PassList *psl = vedata->psl;
EEVEE_StorageList *stl = vedata->stl;
EEVEE_EffectsInfo *effects = stl->effects;
DRWShadingGroup *grp = NULL;
if (!DRW_state_is_scene_render() || psl->velocity_hair == NULL) {
return;
}
/* For now we assume hair objects are always moving. */
EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(
&effects->motion_blur, ob, true);
if (mb_data) {
int mb_step = effects->motion_blur_step;
/* Store transform */
DRW_hair_duplimat_get(ob, psys, md, mb_data->obmat[mb_step]);
EEVEE_GeometryMotionData *mb_geom = EEVEE_motion_blur_geometry_data_get(
&effects->motion_blur, ob, true);
if (mb_step == MB_CURR) {
/* Fill missing matrices if the object was hidden in previous or next frame. */
if (is_zero_m4(mb_data->obmat[MB_PREV])) {
copy_m4_m4(mb_data->obmat[MB_PREV], mb_data->obmat[MB_CURR]);
}
if (is_zero_m4(mb_data->obmat[MB_NEXT])) {
copy_m4_m4(mb_data->obmat[MB_NEXT], mb_data->obmat[MB_CURR]);
}
grp = DRW_shgroup_hair_create_sub(ob, psys, md, effects->motion_blur.hair_grp);
DRW_shgroup_uniform_mat4(grp, "prevModelMatrix", mb_data->obmat[MB_PREV]);
DRW_shgroup_uniform_mat4(grp, "currModelMatrix", mb_data->obmat[MB_CURR]);
DRW_shgroup_uniform_mat4(grp, "nextModelMatrix", mb_data->obmat[MB_NEXT]);
DRW_shgroup_uniform_texture(grp, "prvBuffer", mb_geom->hair_pos_tx[MB_PREV]);
DRW_shgroup_uniform_texture(grp, "nxtBuffer", mb_geom->hair_pos_tx[MB_NEXT]);
DRW_shgroup_uniform_bool(grp, "useDeform", &mb_geom->use_deform, 1);
}
else {
/* Store vertex position buffer. */
mb_geom->hair_pos[mb_step] = DRW_hair_pos_buffer_get(ob, psys, md);
mb_geom->use_deform = true;
}
}
}
@ -262,15 +338,16 @@ void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
return;
}
EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(&effects->motion_blur, ob);
EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(
&effects->motion_blur, ob, false);
if (mb_data) {
int mb_step = effects->motion_blur_step;
/* Store transform */
copy_m4_m4(mb_data->obmat[mb_step], ob->obmat);
EEVEE_GeometryMotionData *mb_geom = EEVEE_motion_blur_geometry_data_get(&effects->motion_blur,
ob);
EEVEE_GeometryMotionData *mb_geom = EEVEE_motion_blur_geometry_data_get(
&effects->motion_blur, ob, false);
if (mb_step == MB_CURR) {
GPUBatch *batch = DRW_cache_object_surface_get(ob);
@ -295,6 +372,17 @@ void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
DRW_shgroup_call(grp, batch, ob);
if (mb_geom->use_deform) {
EEVEE_ObjectEngineData *oedata = EEVEE_object_data_ensure(ob);
if (!oedata->geom_update) {
/* FIXME(fclem) There can be false positive where the actual mesh is not updated.
* This avoids a crash but removes the motion blur from some object.
* Maybe an issue with depsgraph tagging. */
mb_geom->use_deform = false;
oedata->geom_update = false;
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_NEXT]);
}
/* Keep to modify later (after init). */
mb_geom->batch = batch;
}
@ -321,52 +409,151 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
return;
}
int mb_step = effects->motion_blur_step;
if (mb_step != MB_CURR) {
/* Push instances attributes to the GPU. */
DRW_render_instance_buffer_finish();
/* Need to be called after DRW_render_instance_buffer_finish() */
/* Also we weed to have a correct fbo bound for DRW_hair_update */
GPU_framebuffer_bind(vedata->fbl->main_fb);
DRW_hair_update();
DRW_cache_restart();
}
for (BLI_ghashIterator_init(&ghi, effects->motion_blur.geom);
BLI_ghashIterator_done(&ghi) == false;
BLI_ghashIterator_step(&ghi)) {
EEVEE_GeometryMotionData *mb_geom = BLI_ghashIterator_getValue(&ghi);
int mb_step = effects->motion_blur_step;
if (!mb_geom->use_deform) {
continue;
}
if (mb_step == MB_CURR) {
/* Modify batch to have data from adjacent frames. */
GPUBatch *batch = mb_geom->batch;
for (int i = 0; i < MB_CURR; i++) {
GPUVertBuf *vbo = mb_geom->vbo[i];
if (vbo && batch) {
if (vbo->vertex_len != batch->verts[0]->vertex_len) {
/* Vertex count mismatch, disable deform motion blur. */
mb_geom->use_deform = false;
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_NEXT]);
break;
}
/* Modify the batch to include the previous position. */
GPU_batch_vertbuf_add_ex(batch, vbo, true);
/* TODO(fclem) keep the vbo around for next (sub)frames. */
/* Only do once. */
mb_geom->vbo[i] = NULL;
switch (mb_geom->type) {
case EEVEE_HAIR_GEOM_MOTION_DATA:
if (mb_step == MB_CURR) {
/* TODO(fclem) Check if vertex count mismatch. */
mb_geom->use_deform = true;
}
}
else {
mb_geom->hair_pos[mb_step] = GPU_vertbuf_duplicate(mb_geom->hair_pos[mb_step]);
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(mb_geom->hair_pos[mb_step]);
mb_geom->hair_pos_tx[mb_step] = GPU_texture_create_from_vertbuf(
mb_geom->hair_pos[mb_step]);
}
break;
case EEVEE_MESH_GEOM_MOTION_DATA:
if (mb_step == MB_CURR) {
/* Modify batch to have data from adjacent frames. */
GPUBatch *batch = mb_geom->batch;
for (int i = 0; i < MB_CURR; i++) {
GPUVertBuf *vbo = mb_geom->vbo[i];
if (vbo && batch) {
if (vbo->vertex_len != batch->verts[0]->vertex_len) {
/* Vertex count mismatch, disable deform motion blur. */
mb_geom->use_deform = false;
}
if (mb_geom->use_deform == false) {
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_NEXT]);
break;
}
else {
/* Modify the batch to include the previous & next position. */
if (i == MB_PREV) {
GPU_batch_vertbuf_add_ex(batch, vbo, true);
mb_geom->vbo[i] = NULL;
}
else {
/* This VBO can be reuse by next time step. Don't pass ownership. */
GPU_batch_vertbuf_add_ex(batch, vbo, false);
}
}
}
}
}
else {
GPUVertBuf *vbo = mb_geom->vbo[mb_step];
/* If this assert fails, it means that different EEVEE_GeometryMotionDatas
* has been used for each motion blur step. */
BLI_assert(vbo);
if (vbo) {
/* Use the vbo to perform the copy on the GPU. */
GPU_vertbuf_use(vbo);
/* Perform a copy to avoid loosing it after RE_engine_frame_set(). */
mb_geom->vbo[mb_step] = vbo = GPU_vertbuf_duplicate(vbo);
/* Find and replace "pos" attrib name. */
int attrib_id = GPU_vertformat_attr_id_get(&vbo->format, "pos");
GPU_vertformat_attr_rename(
&vbo->format, attrib_id, (mb_step == MB_PREV) ? "prv" : "nxt");
}
}
break;
default:
BLI_assert(0);
break;
}
else {
GPUVertBuf *vbo = mb_geom->vbo[mb_step];
/* If this assert fails, it means that different EEVEE_GeometryMotionDatas
* has been used for each motion blur step. */
BLI_assert(vbo);
if (vbo) {
/* Use the vbo to perform the copy on the GPU. */
GPU_vertbuf_use(vbo);
/* Perform a copy to avoid loosing it after RE_engine_frame_set(). */
mb_geom->vbo[mb_step] = vbo = GPU_vertbuf_duplicate(vbo);
/* Find and replace "pos" attrib name. */
int attrib_id = GPU_vertformat_attr_id_get(&vbo->format, "pos");
GPU_vertformat_attr_rename(&vbo->format, attrib_id, (mb_step == MB_PREV) ? "prv" : "nxt");
}
}
}
void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata)
{
EEVEE_StorageList *stl = vedata->stl;
EEVEE_EffectsInfo *effects = stl->effects;
GHashIterator ghi;
BLI_assert((effects->enabled_effects & EFFECT_MOTION_BLUR) != 0);
/* Camera Data. */
effects->motion_blur.camera[MB_PREV] = effects->motion_blur.camera[MB_CURR];
/* Object Data. */
for (BLI_ghashIterator_init(&ghi, effects->motion_blur.object);
BLI_ghashIterator_done(&ghi) == false;
BLI_ghashIterator_step(&ghi)) {
EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
copy_m4_m4(mb_data->obmat[MB_PREV], mb_data->obmat[MB_NEXT]);
}
/* Deformation Data. */
for (BLI_ghashIterator_init(&ghi, effects->motion_blur.geom);
BLI_ghashIterator_done(&ghi) == false;
BLI_ghashIterator_step(&ghi)) {
EEVEE_GeometryMotionData *mb_geom = BLI_ghashIterator_getValue(&ghi);
switch (mb_geom->type) {
case EEVEE_HAIR_GEOM_MOTION_DATA:
GPU_VERTBUF_DISCARD_SAFE(mb_geom->hair_pos[MB_PREV]);
DRW_TEXTURE_FREE_SAFE(mb_geom->hair_pos_tx[MB_PREV]);
mb_geom->hair_pos[MB_PREV] = mb_geom->hair_pos[MB_NEXT];
mb_geom->hair_pos_tx[MB_PREV] = mb_geom->hair_pos_tx[MB_NEXT];
break;
case EEVEE_MESH_GEOM_MOTION_DATA:
GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
mb_geom->vbo[MB_PREV] = mb_geom->vbo[MB_NEXT];
if (mb_geom->vbo[MB_NEXT]) {
GPUVertBuf *vbo = mb_geom->vbo[MB_NEXT];
int attrib_id = GPU_vertformat_attr_id_get(&vbo->format, "nxt");
GPU_vertformat_attr_rename(&vbo->format, attrib_id, "prv");
}
break;
default:
BLI_assert(0);
break;
}
}
}
@ -381,12 +568,6 @@ void EEVEE_motion_blur_draw(EEVEE_Data *vedata)
/* Motion Blur */
if ((effects->enabled_effects & EFFECT_MOTION_BLUR) != 0) {
int sample = DRW_state_is_image_render() ? effects->taa_render_sample :
effects->taa_current_sample;
double r;
BLI_halton_1d(2, 0.0, sample - 1, &r);
effects->motion_blur_sample_offset = r;
/* Create velocity max tiles in 2 passes. One for each dimension. */
GPU_framebuffer_bind(fbl->velocity_tiles_fb[0]);
DRW_draw_pass(psl->velocity_tiles_x);
@ -421,6 +602,7 @@ void EEVEE_motion_blur_free(void)
{
DRW_SHADER_FREE_SAFE(e_data.motion_blur_sh);
DRW_SHADER_FREE_SAFE(e_data.motion_blur_object_sh);
DRW_SHADER_FREE_SAFE(e_data.motion_blur_hair_sh);
DRW_SHADER_FREE_SAFE(e_data.velocity_tiles_sh);
DRW_SHADER_FREE_SAFE(e_data.velocity_tiles_expand_sh);
}

View File

@ -259,6 +259,7 @@ typedef struct EEVEE_PassList {
struct DRWPass *color_downsample_ps;
struct DRWPass *color_downsample_cube_ps;
struct DRWPass *velocity_object;
struct DRWPass *velocity_hair;
struct DRWPass *velocity_resolve;
struct DRWPass *velocity_tiles_x;
struct DRWPass *velocity_tiles;
@ -578,6 +579,7 @@ typedef struct EEVEE_MotionBlurData {
float persmat[4][4];
float persinv[4][4];
} camera[3];
DRWShadingGroup *hair_grp;
} EEVEE_MotionBlurData;
typedef struct EEVEE_ObjectKey {
@ -593,10 +595,26 @@ typedef struct EEVEE_ObjectMotionData {
float obmat[3][4][4];
} EEVEE_ObjectMotionData;
typedef enum eEEVEEMotionData {
EEVEE_MESH_GEOM_MOTION_DATA = 0,
EEVEE_HAIR_GEOM_MOTION_DATA,
} eEEVEEMotionData;
typedef struct EEVEE_GeometryMotionData {
struct GPUBatch *batch; /* Batch for time = t. */
struct GPUVertBuf *vbo[2]; /* Vbo for time = t +/- step. */
int use_deform; /* To disable deform mb if vertcount mismatch. */
eEEVEEMotionData type;
int use_deform; /* To disable deform mb if vertcount mismatch. */
union {
struct {
/* Mesh */
struct GPUBatch *batch; /* Batch for time = t. */
struct GPUVertBuf *vbo[2]; /* Vbo for time = t +/- step. */
};
struct {
/* Hair */
struct GPUVertBuf *hair_pos[2]; /* Position buffer for time = t +/- step. */
struct GPUTexture *hair_pos_tx[2]; /* Buffer Texture of the corresponding VBO. */
};
};
} EEVEE_GeometryMotionData;
/* ************ EFFECTS DATA ************* */
@ -667,7 +685,6 @@ typedef struct EEVEE_EffectsInfo {
float past_world_to_view[4][4];
CameraParams past_cam_params;
CameraParams current_cam_params;
float motion_blur_sample_offset;
char motion_blur_step; /* Which step we are evaluating. */
int motion_blur_max; /* Maximum distance in pixels a motion blured pixel can cover. */
float motion_blur_near_far[2]; /* Camera near/far clip distances (positive). */
@ -858,6 +875,7 @@ typedef struct EEVEE_ObjectEngineData {
bool ob_vis, ob_vis_dirty;
bool need_update;
bool geom_update;
uint shadow_caster_id;
} EEVEE_ObjectEngineData;
@ -933,6 +951,8 @@ typedef struct EEVEE_PrivateData {
struct DRWView *world_views[6];
/** For rendering planar reflections. */
struct DRWView *planar_views[MAX_PLANAR];
int render_tot_samples;
} EEVEE_PrivateData; /* Transient data */
/* eevee_data.c */
@ -944,9 +964,12 @@ EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure_ex(struct ViewLayer *view_laye
EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure(void);
EEVEE_ObjectEngineData *EEVEE_object_data_get(Object *ob);
EEVEE_ObjectEngineData *EEVEE_object_data_ensure(Object *ob);
EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb, Object *ob);
EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb,
Object *ob,
bool hair);
EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurData *mb,
Object *ob);
Object *ob,
bool hair);
EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_get(Object *ob);
EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_ensure(Object *ob);
EEVEE_LightEngineData *EEVEE_light_data_get(Object *ob);
@ -954,6 +977,8 @@ EEVEE_LightEngineData *EEVEE_light_data_ensure(Object *ob);
EEVEE_WorldEngineData *EEVEE_world_data_get(World *wo);
EEVEE_WorldEngineData *EEVEE_world_data_ensure(World *wo);
void eevee_id_update(void *vedata, ID *id);
/* eevee_materials.c */
struct GPUTexture *EEVEE_materials_get_util_tex(void); /* XXX */
void EEVEE_materials_init(EEVEE_ViewLayerData *sldata,
@ -1170,10 +1195,16 @@ void EEVEE_subsurface_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data
void EEVEE_subsurface_free(void);
/* eevee_motion_blur.c */
int EEVEE_motion_blur_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, Object *camera);
int EEVEE_motion_blur_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_motion_blur_step_set(EEVEE_Data *vedata, int step);
void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, Object *ob);
void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
Object *ob,
struct ParticleSystem *psys,
struct ModifierData *md);
void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata);
void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata);
void EEVEE_motion_blur_draw(EEVEE_Data *vedata);
void EEVEE_motion_blur_free(void);
@ -1201,6 +1232,7 @@ bool EEVEE_renderpasses_only_first_sample_pass_active(EEVEE_Data *vedata);
/* eevee_temporal_sampling.c */
void EEVEE_temporal_sampling_reset(EEVEE_Data *vedata);
void EEVEE_temporal_sampling_create_view(EEVEE_Data *vedata);
int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_temporal_sampling_offset_calc(const double ht_point[2],
const float filter_size,
@ -1244,6 +1276,10 @@ void EEVEE_effects_free(void);
bool EEVEE_render_init(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct Depsgraph *depsgraph);
void EEVEE_render_view_sync(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct Depsgraph *depsgraph);
void EEVEE_render_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_render_cache(void *vedata,
struct Object *ob,
struct RenderEngine *engine,
@ -1252,10 +1288,13 @@ void EEVEE_render_draw(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct RenderLayer *render_layer,
const struct rcti *rect);
void EEVEE_render_read_result(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct RenderLayer *rl,
const rcti *rect);
void EEVEE_render_update_passes(struct RenderEngine *engine,
struct Scene *scene,
struct ViewLayer *view_layer);
bool EEVEE_render_do_motion_blur(const struct Depsgraph *depsgraph);
/** eevee_lookdev.c */
void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,

View File

@ -46,12 +46,6 @@
#include "eevee_private.h"
bool EEVEE_render_do_motion_blur(const struct Depsgraph *depsgraph)
{
Scene *scene = DEG_get_evaluated_scene(depsgraph);
return (scene->eevee.flag & SCE_EEVEE_MOTION_BLUR_ENABLED) != 0;
}
/* Return true if init properly. */
bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, struct Depsgraph *depsgraph)
{
@ -137,6 +131,29 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, struct Depsgraph *
&sldata->common_data);
}
EEVEE_render_view_sync(vedata, engine, depsgraph);
/* TODO(sergey): Shall render hold pointer to an evaluated camera instead? */
struct Object *ob_camera_eval = DEG_get_evaluated_object(depsgraph, RE_GetCamera(engine->re));
DRWView *view = (DRWView *)DRW_view_default_get();
DRW_view_camtexco_set(view, camtexcofac);
/* `EEVEE_renderpasses_init` will set the active render passes used by `EEVEE_effects_init`.
* `EEVEE_effects_init` needs to go second for TAA. */
EEVEE_renderpasses_init(vedata);
EEVEE_effects_init(sldata, vedata, ob_camera_eval, false);
EEVEE_materials_init(sldata, vedata, stl, fbl);
EEVEE_shadows_init(sldata);
EEVEE_lightprobes_init(sldata, vedata);
return true;
}
void EEVEE_render_view_sync(EEVEE_Data *vedata, RenderEngine *engine, struct Depsgraph *depsgraph)
{
EEVEE_PrivateData *g_data = vedata->stl->g_data;
/* Set the pers & view matrix. */
float winmat[4][4], viewmat[4][4], viewinv[4][4];
/* TODO(sergey): Shall render hold pointer to an evaluated camera instead? */
@ -149,20 +166,13 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, struct Depsgraph *
invert_m4_m4(viewmat, viewinv);
DRWView *view = DRW_view_create(viewmat, winmat, NULL, NULL, NULL);
DRW_view_camtexco_set(view, camtexcofac);
DRW_view_reset();
DRW_view_default_set(view);
DRW_view_set_active(view);
}
/* `EEVEE_renderpasses_init` will set the active render passes used by `EEVEE_effects_init`.
* `EEVEE_effects_init` needs to go second for TAA. */
EEVEE_renderpasses_init(vedata);
EEVEE_effects_init(sldata, vedata, ob_camera_eval, false);
EEVEE_materials_init(sldata, vedata, stl, fbl);
EEVEE_shadows_init(sldata);
EEVEE_lightprobes_init(sldata, vedata);
/* INIT CACHE */
void EEVEE_render_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
{
EEVEE_bloom_cache_init(sldata, vedata);
EEVEE_depth_of_field_cache_init(sldata, vedata);
EEVEE_effects_cache_init(sldata, vedata);
@ -175,8 +185,6 @@ bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, struct Depsgraph *
EEVEE_subsurface_cache_init(sldata, vedata);
EEVEE_temporal_sampling_cache_init(sldata, vedata);
EEVEE_volumes_cache_init(sldata, vedata);
return true;
}
/* Used by light cache. in this case engine is NULL. */
@ -189,6 +197,8 @@ void EEVEE_render_cache(void *vedata,
EEVEE_LightProbesInfo *pinfo = sldata->probes;
bool cast_shadow = false;
eevee_id_update(vedata, &ob->id);
if (pinfo->vis_data.collection) {
/* Used for rendering probe with visibility groups. */
bool ob_vis = BKE_collection_has_object_recursive(pinfo->vis_data.collection, ob);
@ -485,27 +495,12 @@ static void eevee_render_draw_background(EEVEE_Data *vedata)
void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl, const rcti *rect)
{
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
const char *viewname = RE_GetActiveRenderView(engine->re);
EEVEE_PassList *psl = vedata->psl;
EEVEE_StorageList *stl = vedata->stl;
EEVEE_FramebufferList *fbl = vedata->fbl;
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
EEVEE_ViewLayerData *sldata = EEVEE_view_layer_data_ensure();
EEVEE_PrivateData *g_data = stl->g_data;
/* FINISH CACHE */
EEVEE_volumes_cache_finish(sldata, vedata);
EEVEE_materials_cache_finish(sldata, vedata);
EEVEE_lights_cache_finish(sldata, vedata);
EEVEE_lightprobes_cache_finish(sldata, vedata);
EEVEE_effects_draw_init(sldata, vedata);
EEVEE_volumes_draw_init(sldata, vedata);
/* Sort transparents before the loop. */
DRW_pass_sort_shgroup_z(psl->transparent_pass);
/* Push instances attributes to the GPU. */
DRW_render_instance_buffer_finish();
@ -515,20 +510,17 @@ void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl
GPU_framebuffer_bind(fbl->main_fb);
DRW_hair_update();
uint tot_sample = scene_eval->eevee.taa_render_samples;
/* Sort transparents before the loop. */
DRW_pass_sort_shgroup_z(psl->transparent_pass);
uint tot_sample = stl->g_data->render_tot_samples;
uint render_samples = 0;
/* SSR needs one iteration to start properly. */
if (stl->effects->enabled_effects & EFFECT_SSR) {
if ((stl->effects->enabled_effects & EFFECT_SSR) && !stl->effects->ssr_was_valid_double_buffer) {
tot_sample += 1;
}
EEVEE_renderpasses_output_init(sldata, vedata, tot_sample);
if (RE_engine_test_break(engine)) {
return;
}
while (render_samples < tot_sample && !RE_engine_test_break(engine)) {
float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
float clear_depth = 1.0f;
@ -624,6 +616,15 @@ void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl
RE_engine_update_progress(engine, (float)(render_samples++) / (float)tot_sample);
}
}
void EEVEE_render_read_result(EEVEE_Data *vedata,
RenderEngine *engine,
RenderLayer *rl,
const rcti *rect)
{
const char *viewname = RE_GetActiveRenderView(engine->re);
EEVEE_ViewLayerData *sldata = EEVEE_view_layer_data_ensure();
eevee_render_result_combined(rl, viewname, rect, vedata, sldata);
eevee_render_result_mist(rl, viewname, rect, vedata, sldata);
@ -638,9 +639,6 @@ void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl
eevee_render_result_bloom(rl, viewname, rect, vedata, sldata);
eevee_render_result_volume_scatter(rl, viewname, rect, vedata, sldata);
eevee_render_result_volume_transmittance(rl, viewname, rect, vedata, sldata);
/* Restore original viewport size. */
DRW_render_viewport_size_set((int[2]){g_data->size_orig[0], g_data->size_orig[1]});
}
void EEVEE_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)

View File

@ -75,18 +75,8 @@ static void eevee_create_shader_subsurface(void)
MEM_freeN(frag_str);
}
void EEVEE_subsurface_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
void EEVEE_subsurface_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *UNUSED(vedata))
{
EEVEE_CommonUniformBuffer *common_data = &sldata->common_data;
EEVEE_StorageList *stl = vedata->stl;
EEVEE_EffectsInfo *effects = stl->effects;
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
effects->sss_sample_count = 1 + scene_eval->eevee.sss_samples * 2;
effects->sss_surface_count = 0;
common_data->sss_jitter_threshold = scene_eval->eevee.sss_jitter_threshold;
}
void EEVEE_subsurface_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
@ -204,15 +194,24 @@ void EEVEE_subsurface_output_init(EEVEE_ViewLayerData *UNUSED(sldata),
}
}
void EEVEE_subsurface_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
void EEVEE_subsurface_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
{
EEVEE_CommonUniformBuffer *common_data = &sldata->common_data;
EEVEE_EffectsInfo *effects = vedata->stl->effects;
EEVEE_PassList *psl = vedata->psl;
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);
/* Shaders */
if (!e_data.sss_sh[0]) {
eevee_create_shader_subsurface();
}
effects->sss_sample_count = 1 + scene_eval->eevee.sss_samples * 2;
effects->sss_surface_count = 0;
common_data->sss_jitter_threshold = scene_eval->eevee.sss_jitter_threshold;
/** Screen Space SubSurface Scattering overview
* TODO
*/

View File

@ -186,6 +186,18 @@ void EEVEE_temporal_sampling_reset(EEVEE_Data *vedata)
vedata->stl->effects->taa_current_sample = 1;
}
void EEVEE_temporal_sampling_create_view(EEVEE_Data *vedata)
{
EEVEE_EffectsInfo *effects = vedata->stl->effects;
/* Create a sub view to disable clipping planes (if any). */
const DRWView *default_view = DRW_view_default_get();
float viewmat[4][4], winmat[4][4];
DRW_view_viewmat_get(default_view, viewmat, false);
DRW_view_winmat_get(default_view, winmat, false);
effects->taa_view = DRW_view_create_sub(default_view, viewmat, winmat);
DRW_view_clip_planes_set(effects->taa_view, NULL, 0);
}
int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data *vedata)
{
EEVEE_StorageList *stl = vedata->stl;
@ -201,15 +213,8 @@ int EEVEE_temporal_sampling_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Data
* we accumulate the redraw inside the drawing loop in eevee_draw_scene().
**/
effects->taa_render_sample = 1;
effects->taa_view = NULL;
/* Create a sub view to disable clipping planes (if any). */
const DRWView *default_view = DRW_view_default_get();
float viewmat[4][4], winmat[4][4];
DRW_view_viewmat_get(default_view, viewmat, false);
DRW_view_winmat_get(default_view, winmat, false);
effects->taa_view = DRW_view_create_sub(default_view, viewmat, winmat);
DRW_view_clip_planes_set(effects->taa_view, NULL, 0);
EEVEE_temporal_sampling_create_view(vedata);
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene_eval = DEG_get_evaluated_scene(draw_ctx->depsgraph);

View File

@ -4,9 +4,14 @@ uniform mat4 prevModelMatrix;
uniform mat4 nextModelMatrix;
uniform bool useDeform;
#ifdef HAIR
uniform samplerBuffer prvBuffer; /* RGBA32F */
uniform samplerBuffer nxtBuffer; /* RGBA32F */
#else
in vec3 pos;
in vec3 prv; /* Previous frame position. */
in vec3 nxt; /* Next frame position. */
#endif
out vec3 currWorldPos;
out vec3 prevWorldPos;
@ -14,10 +19,36 @@ out vec3 nextWorldPos;
void main()
{
#ifdef HAIR
bool is_persp = (ProjectionMatrix[3][3] == 0.0);
float time, thick_time, thickness;
vec3 tan, binor;
vec3 wpos;
hair_get_pos_tan_binor_time(is_persp,
ModelMatrixInverse,
ViewMatrixInverse[3].xyz,
ViewMatrixInverse[2].xyz,
wpos,
tan,
binor,
time,
thickness,
thick_time);
int id = hair_get_base_id();
vec3 pos = texelFetch(hairPointBuffer, id).point_position;
vec3 prv = texelFetch(prvBuffer, id).point_position;
vec3 nxt = texelFetch(nxtBuffer, id).point_position;
#endif
prevWorldPos = (prevModelMatrix * vec4(useDeform ? prv : pos, 1.0)).xyz;
currWorldPos = (currModelMatrix * vec4(pos, 1.0)).xyz;
nextWorldPos = (nextModelMatrix * vec4(useDeform ? nxt : pos, 1.0)).xyz;
/* Use jittered projmatrix to be able to match exact sample depth (depth equal test).
* Note that currModelMatrix needs to also be equal to ModelMatrix for the samples to match. */
#ifndef HAIR
gl_Position = ViewProjectionMatrix * vec4(currWorldPos, 1.0);
#else
gl_Position = ViewProjectionMatrix * vec4(wpos, 1.0);
#endif
}

View File

@ -630,6 +630,8 @@ void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
void (*callback)(void *vedata, void *user_data),
void *user_data);
void DRW_cache_restart(void);
/* ViewLayers */
void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type);
void **DRW_view_layer_engine_data_ensure_ex(struct ViewLayer *view_layer,

View File

@ -176,6 +176,13 @@ struct DRWShadingGroup *DRW_shgroup_hair_create_sub(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md,
struct DRWShadingGroup *shgrp);
struct GPUVertBuf *DRW_hair_pos_buffer_get(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md);
void DRW_hair_duplimat_get(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md,
float (*dupli_mat)[4]);
void DRW_hair_init(void);
void DRW_hair_update(void);

View File

@ -124,32 +124,109 @@ void DRW_hair_init(void)
}
}
static ParticleHairCache *drw_hair_particle_cache_get(
Object *object, ParticleSystem *psys, ModifierData *md, int subdiv, int thickness_res)
{
bool update;
ParticleHairCache *cache;
if (psys) {
/* Old particle hair. */
update = particles_ensure_procedural_data(object, psys, md, &cache, subdiv, thickness_res);
}
else {
/* New hair object. */
update = hair_ensure_procedural_data(object, &cache, subdiv, thickness_res);
}
if (update) {
int final_points_len = cache->final[subdiv].strands_res * cache->strands_len;
if (final_points_len > 0) {
GPUShader *tf_shader = hair_refine_shader_get(PART_REFINE_CATMULL_ROM);
#ifdef USE_TRANSFORM_FEEDBACK
DRWShadingGroup *tf_shgrp = DRW_shgroup_transform_feedback_create(
tf_shader, g_tf_pass, cache->final[subdiv].proc_buf);
#else
DRWShadingGroup *tf_shgrp = DRW_shgroup_create(tf_shader, g_tf_pass);
ParticleRefineCall *pr_call = MEM_mallocN(sizeof(*pr_call), __func__);
pr_call->next = g_tf_calls;
pr_call->vbo = cache->final[subdiv].proc_buf;
pr_call->shgrp = tf_shgrp;
pr_call->vert_len = final_points_len;
g_tf_calls = pr_call;
DRW_shgroup_uniform_int(tf_shgrp, "targetHeight", &g_tf_target_height, 1);
DRW_shgroup_uniform_int(tf_shgrp, "targetWidth", &g_tf_target_width, 1);
DRW_shgroup_uniform_int(tf_shgrp, "idOffset", &g_tf_id_offset, 1);
#endif
DRW_shgroup_uniform_texture(tf_shgrp, "hairPointBuffer", cache->point_tex);
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandBuffer", cache->strand_tex);
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandSegBuffer", cache->strand_seg_tex);
DRW_shgroup_uniform_int(tf_shgrp, "hairStrandsRes", &cache->final[subdiv].strands_res, 1);
DRW_shgroup_call_procedural_points(tf_shgrp, NULL, final_points_len);
}
}
return cache;
}
/* Note: Only valid after DRW_hair_update(). */
GPUVertBuf *DRW_hair_pos_buffer_get(Object *object, ParticleSystem *psys, ModifierData *md)
{
const DRWContextState *draw_ctx = DRW_context_state_get();
Scene *scene = draw_ctx->scene;
int subdiv = scene->r.hair_subdiv;
int thickness_res = (scene->r.hair_type == SCE_HAIR_SHAPE_STRAND) ? 1 : 2;
ParticleHairCache *cache = drw_hair_particle_cache_get(object, psys, md, subdiv, thickness_res);
return cache->final[subdiv].proc_buf;
}
void DRW_hair_duplimat_get(Object *object,
ParticleSystem *psys,
ModifierData *UNUSED(md),
float (*dupli_mat)[4])
{
Object *dupli_parent = DRW_object_get_dupli_parent(object);
DupliObject *dupli_object = DRW_object_get_dupli(object);
if (psys) {
if ((dupli_parent != NULL) && (dupli_object != NULL)) {
if (dupli_object->type & OB_DUPLICOLLECTION) {
copy_m4_m4(dupli_mat, dupli_parent->obmat);
}
else {
copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
invert_m4(dupli_mat);
mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
}
}
else {
unit_m4(dupli_mat);
}
}
else {
/* New hair object. */
copy_m4_m4(dupli_mat, object->obmat);
}
}
DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
ParticleSystem *psys,
ModifierData *md,
DRWShadingGroup *shgrp_parent)
{
/* TODO(fclem): Pass the scene as parameter */
const DRWContextState *draw_ctx = DRW_context_state_get();
Scene *scene = draw_ctx->scene;
float dupli_mat[4][4];
Object *dupli_parent = DRW_object_get_dupli_parent(object);
DupliObject *dupli_object = DRW_object_get_dupli(object);
int subdiv = scene->r.hair_subdiv;
int thickness_res = (scene->r.hair_type == SCE_HAIR_SHAPE_STRAND) ? 1 : 2;
ParticleHairCache *hair_cache;
bool need_ft_update;
if (psys) {
/* Old particle hair. */
need_ft_update = particles_ensure_procedural_data(
object, psys, md, &hair_cache, subdiv, thickness_res);
}
else {
/* New hair object. */
need_ft_update = hair_ensure_procedural_data(object, &hair_cache, subdiv, thickness_res);
}
ParticleHairCache *hair_cache = drw_hair_particle_cache_get(
object, psys, md, subdiv, thickness_res);
DRWShadingGroup *shgrp = DRW_shgroup_create_sub(shgrp_parent);
@ -177,25 +254,7 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
DRW_shgroup_uniform_texture(shgrp, "ac", g_dummy_texture);
}
if (psys) {
if ((dupli_parent != NULL) && (dupli_object != NULL)) {
if (dupli_object->type & OB_DUPLICOLLECTION) {
copy_m4_m4(dupli_mat, dupli_parent->obmat);
}
else {
copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
invert_m4(dupli_mat);
mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
}
}
else {
unit_m4(dupli_mat);
}
}
else {
/* New hair object. */
copy_m4_m4(dupli_mat, object->obmat);
}
DRW_hair_duplimat_get(object, psys, md, dupli_mat);
/* Get hair shape parameters. */
float hair_rad_shape, hair_rad_root, hair_rad_tip;
@ -229,38 +288,6 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
GPUBatch *geom = hair_cache->final[subdiv].proc_hairs[thickness_res - 1];
DRW_shgroup_call_no_cull(shgrp, geom, object);
/* Transform Feedback subdiv. */
if (need_ft_update) {
int final_points_len = hair_cache->final[subdiv].strands_res * hair_cache->strands_len;
if (final_points_len) {
GPUShader *tf_shader = hair_refine_shader_get(PART_REFINE_CATMULL_ROM);
#ifdef USE_TRANSFORM_FEEDBACK
DRWShadingGroup *tf_shgrp = DRW_shgroup_transform_feedback_create(
tf_shader, g_tf_pass, hair_cache->final[subdiv].proc_buf);
#else
DRWShadingGroup *tf_shgrp = DRW_shgroup_create(tf_shader, g_tf_pass);
ParticleRefineCall *pr_call = MEM_mallocN(sizeof(*pr_call), __func__);
pr_call->next = g_tf_calls;
pr_call->vbo = hair_cache->final[subdiv].proc_buf;
pr_call->shgrp = tf_shgrp;
pr_call->vert_len = final_points_len;
g_tf_calls = pr_call;
DRW_shgroup_uniform_int(tf_shgrp, "targetHeight", &g_tf_target_height, 1);
DRW_shgroup_uniform_int(tf_shgrp, "targetWidth", &g_tf_target_width, 1);
DRW_shgroup_uniform_int(tf_shgrp, "idOffset", &g_tf_id_offset, 1);
#endif
DRW_shgroup_uniform_texture(tf_shgrp, "hairPointBuffer", hair_cache->point_tex);
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandBuffer", hair_cache->strand_tex);
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandSegBuffer", hair_cache->strand_seg_tex);
DRW_shgroup_uniform_int(
tf_shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
DRW_shgroup_call_procedural_points(tf_shgrp, NULL, final_points_len);
}
}
return shgrp;
}

View File

@ -1942,6 +1942,19 @@ void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
#endif
}
/* Used when the render engine want to redo another cache populate inside the same render frame. */
void DRW_cache_restart(void)
{
/* Force cache to reset. */
drw_viewport_cache_resize();
drw_viewport_var_init();
DST.buffer_finish_called = false;
DRW_hair_init();
}
static struct DRWSelectBuffer {
struct GPUFrameBuffer *framebuffer_depth_only;
struct GPUTexture *texture_depth;

View File

@ -222,6 +222,7 @@
.motion_blur_shutter = 0.5f, \
.motion_blur_depth_scale = 100.0f, \
.motion_blur_max = 32, \
.motion_blur_steps = 1, \
\
.shadow_cube_size = 512, \
.shadow_cascade_size = 1024, \

View File

@ -1590,7 +1590,6 @@ typedef struct SceneEEVEE {
float gi_irradiance_smoothing;
float gi_glossy_clamp;
float gi_filter_quality;
char _pad[4];
float gi_cubemap_draw_size;
float gi_irradiance_draw_size;
@ -1630,6 +1629,7 @@ typedef struct SceneEEVEE {
int motion_blur_samples DNA_DEPRECATED;
int motion_blur_max;
int motion_blur_steps;
float motion_blur_shutter;
float motion_blur_depth_scale;

View File

@ -7169,8 +7169,18 @@ static void rna_def_scene_eevee(BlenderRNA *brna)
prop = RNA_def_property(srna, "motion_blur_max", PROP_INT, PROP_PIXEL);
RNA_def_property_ui_text(prop, "Max Blur", "Maximum blur distance a pixel can spread over");
RNA_def_property_range(prop, 1, 2048);
RNA_def_property_ui_range(prop, 1, 512, 1, -1);
RNA_def_property_range(prop, 0, 2048);
RNA_def_property_ui_range(prop, 0, 512, 1, -1);
RNA_def_property_override_flag(prop, PROPOVERRIDE_OVERRIDABLE_LIBRARY);
RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, NULL);
prop = RNA_def_property(srna, "motion_blur_steps", PROP_INT, PROP_NONE);
RNA_def_property_ui_text(prop,
"Motion steps",
"Controls accuracy of motion blur, "
"more steps means longer render time");
RNA_def_property_range(prop, 1, INT_MAX);
RNA_def_property_ui_range(prop, 1, 64, 1, -1);
RNA_def_property_override_flag(prop, PROPOVERRIDE_OVERRIDABLE_LIBRARY);
RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, NULL);