EEVEE: Rewrite: Implement nodetree support with every geometry types

This commit introduce back support for all geometry types and all nodetree support.
Only the forward shading pipeline is implemented for now.

Vertex Displacement is automatically enabled for now.

Lighting & Shading is placeholder.

Related Task: T93220

# Conflicts:
#	source/blender/draw/engines/eevee_next/eevee_engine.cc
#	source/blender/gpu/CMakeLists.txt
This commit is contained in:
Clément Foucault 2022-05-02 09:22:14 +02:00
parent f0f44fd92f
commit 8ece0816d9
Notes: blender-bot 2023-02-14 08:28:46 +01:00
Referenced by issue #98663, Crash when rendering with cryptomatte enable
39 changed files with 4285 additions and 9 deletions

View File

@ -543,6 +543,18 @@ Container &move_assign_container(Container &dst, Container &&src) noexcept(
return dst;
}
/**
* Returns true if the value is different and was assigned.
*/
template<typename T> inline bool assign_if_different(T &old_value, T new_value)
{
if (old_value != new_value) {
old_value = std::move(new_value);
return true;
}
return false;
}
} // namespace blender
namespace blender::detail {

View File

@ -134,6 +134,13 @@ set(SRC
engines/eevee/eevee_temporal_sampling.c
engines/eevee/eevee_volumes.c
engines/eevee_next/eevee_engine.cc
engines/eevee_next/eevee_instance.cc
engines/eevee_next/eevee_material.cc
engines/eevee_next/eevee_pipeline.cc
engines/eevee_next/eevee_shader.cc
engines/eevee_next/eevee_sync.cc
engines/eevee_next/eevee_view.cc
engines/eevee_next/eevee_world.cc
engines/workbench/workbench_data.c
engines/workbench/workbench_effect_antialiasing.c
engines/workbench/workbench_effect_cavity.c
@ -343,6 +350,18 @@ set(GLSL_SRC
engines/eevee/shaders/volumetric_integration_frag.glsl
engines/eevee/shaders/world_vert.glsl
engines/eevee_next/shaders/eevee_attributes_lib.glsl
engines/eevee_next/shaders/eevee_geom_curves_vert.glsl
engines/eevee_next/shaders/eevee_geom_gpencil_vert.glsl
engines/eevee_next/shaders/eevee_geom_mesh_vert.glsl
engines/eevee_next/shaders/eevee_geom_world_vert.glsl
engines/eevee_next/shaders/eevee_nodetree_lib.glsl
engines/eevee_next/shaders/eevee_surf_deferred_frag.glsl
engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
engines/eevee_next/shaders/eevee_surf_lib.glsl
engines/eevee_next/shaders/eevee_surf_world_frag.glsl
engines/workbench/shaders/workbench_cavity_lib.glsl
engines/workbench/shaders/workbench_common_lib.glsl
engines/workbench/shaders/workbench_composite_frag.glsl

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*/
namespace blender::eevee {
class Instance;
static const float cubeface_mat[6][4][4] = {
/* Pos X */
{{0.0f, 0.0f, -1.0f, 0.0f},
{0.0f, -1.0f, 0.0f, 0.0f},
{-1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
/* Neg X */
{{0.0f, 0.0f, 1.0f, 0.0f},
{0.0f, -1.0f, 0.0f, 0.0f},
{1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
/* Pos Y */
{{1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 0.0f, -1.0f, 0.0f},
{0.0f, 1.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
/* Neg Y */
{{1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 1.0f, 0.0f},
{0.0f, -1.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
/* Pos Z */
{{1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, -1.0f, 0.0f, 0.0f},
{0.0f, 0.0f, -1.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
/* Neg Z */
{{-1.0f, 0.0f, 0.0f, 0.0f},
{0.0f, -1.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 1.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 1.0f}},
};
} // namespace blender::eevee

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* List of defines that are shared with the GPUShaderCreateInfos. We do this to avoid
* dragging larger headers into the createInfo pipeline which would cause problems.
*/
#pragma once
/* Number of items in a culling batch. Needs to be Power of 2. Must be <= to 65536. */
/* Current limiting factor is the sorting phase which is single pass and only sort within a
* threadgroup which maximum size is 1024. */
#define CULLING_BATCH_SIZE 1024
/**
* IMPORTANT: Some data packing are tweaked for these values.
* Be sure to update them accordingly.
* SHADOW_TILEMAP_RES max is 32 because of the shared bitmaps used for LOD tagging.
* It is also limited by the maximum thread group size (1024).
*/
#define SHADOW_TILEMAP_RES 16
#define SHADOW_TILEMAP_LOD 4 /* LOG2(SHADOW_TILEMAP_RES) */
#define SHADOW_TILEMAP_PER_ROW 64
#define SHADOW_PAGE_COPY_GROUP_SIZE 32
#define SHADOW_DEPTH_SCAN_GROUP_SIZE 32
#define SHADOW_AABB_TAG_GROUP_SIZE 64
#define SHADOW_MAX_TILEMAP 4096
#define SHADOW_MAX_PAGE 4096
#define SHADOW_PAGE_PER_ROW 64
#define HIZ_MIP_COUNT 6u
/* Group size is 2x smaller because we simply copy the level 0. */
#define HIZ_GROUP_SIZE 1u << (HIZ_MIP_COUNT - 2u)
#define RAYTRACE_GROUP_SIZE 16
#define RAYTRACE_MAX_TILES (16384 / RAYTRACE_GROUP_SIZE) * (16384 / RAYTRACE_GROUP_SIZE)
/* Minimum visibility size. */
#define LIGHTPROBE_FILTER_VIS_GROUP_SIZE 16

View File

@ -13,38 +13,91 @@
#include "eevee_engine.h" /* Own include. */
#include "eevee_instance.hh"
using namespace blender;
struct EEVEE_Data {
DrawEngineType *engine_type;
DRWViewportEmptyList *fbl;
DRWViewportEmptyList *txl;
DRWViewportEmptyList *psl;
DRWViewportEmptyList *stl;
void *instance;
eevee::Instance *instance;
};
static void eevee_engine_init(void *vedata)
{
UNUSED_VARS(vedata);
EEVEE_Data *ved = reinterpret_cast<EEVEE_Data *>(vedata);
if (ved->instance == nullptr) {
ved->instance = new eevee::Instance();
}
const DRWContextState *ctx_state = DRW_context_state_get();
Depsgraph *depsgraph = ctx_state->depsgraph;
Scene *scene = ctx_state->scene;
View3D *v3d = ctx_state->v3d;
const ARegion *region = ctx_state->region;
RegionView3D *rv3d = ctx_state->rv3d;
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
int2 size = int2(GPU_texture_width(dtxl->color), GPU_texture_height(dtxl->color));
const DRWView *default_view = DRW_view_default_get();
Object *camera = nullptr;
/* Get render borders. */
rcti rect;
BLI_rcti_init(&rect, 0, size[0], 0, size[1]);
if (v3d) {
if (rv3d && (rv3d->persp == RV3D_CAMOB)) {
camera = v3d->camera;
}
if (v3d->flag2 & V3D_RENDER_BORDER) {
if (camera) {
rctf viewborder;
/* TODO(fclem) Might be better to get it from DRW. */
ED_view3d_calc_camera_border(scene, depsgraph, region, v3d, rv3d, &viewborder, false);
float viewborder_sizex = BLI_rctf_size_x(&viewborder);
float viewborder_sizey = BLI_rctf_size_y(&viewborder);
rect.xmin = floorf(viewborder.xmin + (scene->r.border.xmin * viewborder_sizex));
rect.ymin = floorf(viewborder.ymin + (scene->r.border.ymin * viewborder_sizey));
rect.xmax = floorf(viewborder.xmin + (scene->r.border.xmax * viewborder_sizex));
rect.ymax = floorf(viewborder.ymin + (scene->r.border.ymax * viewborder_sizey));
}
else {
rect.xmin = v3d->render_border.xmin * size[0];
rect.ymin = v3d->render_border.ymin * size[1];
rect.xmax = v3d->render_border.xmax * size[0];
rect.ymax = v3d->render_border.ymax * size[1];
}
}
}
ved->instance->init(
size, &rect, nullptr, depsgraph, nullptr, camera, nullptr, default_view, v3d, rv3d);
}
static void eevee_draw_scene(void *vedata)
{
UNUSED_VARS(vedata);
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
reinterpret_cast<EEVEE_Data *>(vedata)->instance->draw_viewport(dfbl);
}
static void eevee_cache_init(void *vedata)
{
UNUSED_VARS(vedata);
reinterpret_cast<EEVEE_Data *>(vedata)->instance->begin_sync();
}
static void eevee_cache_populate(void *vedata, Object *object)
{
UNUSED_VARS(vedata, object);
reinterpret_cast<EEVEE_Data *>(vedata)->instance->object_sync(object);
}
static void eevee_cache_finish(void *vedata)
{
UNUSED_VARS(vedata);
reinterpret_cast<EEVEE_Data *>(vedata)->instance->end_sync();
}
static void eevee_engine_free()
@ -53,7 +106,7 @@ static void eevee_engine_free()
static void eevee_instance_free(void *instance)
{
UNUSED_VARS(instance);
delete reinterpret_cast<eevee::Instance *>(instance);
}
static void eevee_render_to_image(void *UNUSED(vedata),

View File

@ -7,6 +7,9 @@
#pragma once
#include "DRW_render.h"
#include "RE_engine.h"
#ifdef __cplusplus
extern "C" {
#endif

View File

@ -0,0 +1,175 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* An instance contains all structures needed to do a complete render.
*/
#include "BKE_global.h"
#include "BKE_object.h"
#include "BLI_rect.h"
#include "DEG_depsgraph_query.h"
#include "DNA_ID.h"
#include "DNA_lightprobe_types.h"
#include "DNA_modifier_types.h"
#include "eevee_instance.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name Init
*
* Init funcions need to be called once at the start of a frame.
* Active camera, render extent and enabled render passes are immutable until next init.
* This takes care of resizing output buffers and view in case a parameter changed.
* IMPORTANT: xxx.init() functions are NOT meant to acquire and allocate DRW resources.
* Any attempt to do so will likely produce use after free situations.
* \{ */
void Instance::init(const int2 &output_res,
const rcti *output_rect,
RenderEngine *render_,
Depsgraph *depsgraph_,
const LightProbe *light_probe_,
Object *camera_object_,
const RenderLayer *render_layer_,
const DRWView *drw_view_,
const View3D *v3d_,
const RegionView3D *rv3d_)
{
render = render_;
depsgraph = depsgraph_;
render_layer = render_layer_;
drw_view = drw_view_;
v3d = v3d_;
rv3d = rv3d_;
update_eval_members();
main_view.init(output_res);
}
void Instance::update_eval_members(void)
{
scene = DEG_get_evaluated_scene(depsgraph);
view_layer = DEG_get_evaluated_view_layer(depsgraph);
// camera_eval_object = (camera_orig_object) ?
// DEG_get_evaluated_object(depsgraph, camera_orig_object) :
// nullptr;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sync
*
* Sync will gather data from the scene that can change over a time step (i.e: motion steps).
* IMPORTANT: xxx.sync() functions area responsible for creating DRW resources (i.e: DRWView) as
* well as querying temp texture pool. All DRWPasses should be ready by the end end_sync().
* \{ */
void Instance::begin_sync()
{
materials.begin_sync();
pipelines.sync();
main_view.sync();
world.sync();
}
void Instance::object_sync(Object *ob)
{
const bool is_renderable_type = ELEM(ob->type, OB_CURVES, OB_GPENCIL, OB_MESH);
const int ob_visibility = DRW_object_visibility_in_active_context(ob);
const bool partsys_is_visible = (ob_visibility & OB_VISIBLE_PARTICLES) != 0 &&
(ob->type == OB_MESH);
const bool object_is_visible = DRW_object_is_renderable(ob) &&
(ob_visibility & OB_VISIBLE_SELF) != 0;
if (!is_renderable_type || (!partsys_is_visible && !object_is_visible)) {
return;
}
ObjectHandle &ob_handle = sync.sync_object(ob);
if (partsys_is_visible && ob != DRW_context_state_get()->object_edit) {
LISTBASE_FOREACH (ModifierData *, md, &ob->modifiers) {
if (md->type == eModifierType_ParticleSystem) {
sync.sync_curves(ob, ob_handle, md);
}
}
}
if (object_is_visible) {
switch (ob->type) {
case OB_LAMP:
break;
case OB_MESH:
case OB_CURVES_LEGACY:
case OB_SURF:
case OB_FONT:
case OB_MBALL: {
sync.sync_mesh(ob, ob_handle);
break;
}
case OB_VOLUME:
break;
case OB_CURVES:
sync.sync_curves(ob, ob_handle);
break;
case OB_GPENCIL:
sync.sync_gpencil(ob, ob_handle);
break;
default:
break;
}
}
ob_handle.reset_recalc_flag();
}
void Instance::end_sync(void)
{
}
void Instance::render_sync(void)
{
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Rendering
* \{ */
/**
* Conceptually renders one sample per pixel.
* Everything based on random sampling should be done here (i.e: DRWViews jitter)
**/
void Instance::render_sample(void)
{
main_view.render();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Interface
* \{ */
void Instance::render_frame(RenderLayer *render_layer, const char *view_name)
{
}
void Instance::draw_viewport(DefaultFramebufferList *dfbl)
{
render_sample();
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* An renderer instance that contains all data to render a full frame.
*/
#pragma once
#include "BKE_object.h"
#include "DEG_depsgraph.h"
#include "DNA_lightprobe_types.h"
#include "DRW_render.h"
#include "eevee_material.hh"
#include "eevee_pipeline.hh"
#include "eevee_shader.hh"
#include "eevee_sync.hh"
#include "eevee_view.hh"
#include "eevee_world.hh"
namespace blender::eevee {
/**
* \class Instance
* \brief A running instance of the engine.
*/
class Instance {
public:
ShaderModule &shaders;
SyncModule sync;
MaterialModule materials;
PipelineModule pipelines;
MainView main_view;
World world;
/** Input data. */
Depsgraph *depsgraph;
/** Evaluated IDs. */
Scene *scene;
ViewLayer *view_layer;
/** Only available when rendering for final render. */
const RenderLayer *render_layer;
RenderEngine *render;
/** Only available when rendering for viewport. */
const DRWView *drw_view;
const View3D *v3d;
const RegionView3D *rv3d;
/* Info string displayed at the top of the render / viewport. */
char info[64];
public:
Instance()
: shaders(*ShaderModule::module_get()),
sync(*this),
materials(*this),
pipelines(*this),
main_view(*this),
world(*this){};
~Instance(){};
void init(const int2 &output_res,
const rcti *output_rect,
RenderEngine *render,
Depsgraph *depsgraph,
const LightProbe *light_probe_ = nullptr,
Object *camera_object = nullptr,
const RenderLayer *render_layer = nullptr,
const DRWView *drw_view = nullptr,
const View3D *v3d = nullptr,
const RegionView3D *rv3d = nullptr);
void begin_sync(void);
void object_sync(Object *ob);
void end_sync(void);
void render_sync(void);
void render_frame(RenderLayer *render_layer, const char *view_name);
void draw_viewport(DefaultFramebufferList *dfbl);
private:
void render_sample(void);
void mesh_sync(Object *ob, ObjectHandle &ob_handle);
void update_eval_members(void);
};
} // namespace blender::eevee

View File

@ -0,0 +1,320 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*/
#include "DNA_material_types.h"
#include "BKE_lib_id.h"
#include "BKE_material.h"
#include "BKE_node.h"
#include "NOD_shader.h"
#include "eevee_instance.hh"
#include "eevee_material.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name Default Material
*
* \{ */
DefaultSurfaceNodeTree::DefaultSurfaceNodeTree()
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_PRINCIPLED);
bNode *output = nodeAddStaticNode(nullptr, ntree, SH_NODE_OUTPUT_MATERIAL);
bNodeSocket *bsdf_out = nodeFindSocket(bsdf, SOCK_OUT, "BSDF");
bNodeSocket *output_in = nodeFindSocket(output, SOCK_IN, "Surface");
nodeAddLink(ntree, bsdf, bsdf_out, output, output_in);
nodeSetActive(ntree, output);
color_socket_ =
(bNodeSocketValueRGBA *)nodeFindSocket(bsdf, SOCK_IN, "Base Color")->default_value;
metallic_socket_ =
(bNodeSocketValueFloat *)nodeFindSocket(bsdf, SOCK_IN, "Metallic")->default_value;
roughness_socket_ =
(bNodeSocketValueFloat *)nodeFindSocket(bsdf, SOCK_IN, "Roughness")->default_value;
specular_socket_ =
(bNodeSocketValueFloat *)nodeFindSocket(bsdf, SOCK_IN, "Specular")->default_value;
ntree_ = ntree;
}
DefaultSurfaceNodeTree::~DefaultSurfaceNodeTree()
{
ntreeFreeEmbeddedTree(ntree_);
MEM_SAFE_FREE(ntree_);
}
/* Configure a default nodetree with the given material. */
bNodeTree *DefaultSurfaceNodeTree::nodetree_get(::Material *ma)
{
/* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
copy_v3_fl3(color_socket_->value, ma->r, ma->g, ma->b);
metallic_socket_->value = ma->metallic;
roughness_socket_->value = ma->roughness;
specular_socket_->value = ma->spec;
return ntree_;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Material
*
* \{ */
MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
{
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
diffuse_mat_ = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default diffuse");
diffuse_mat_->nodetree = ntree;
diffuse_mat_->use_nodes = true;
/* To use the forward pipeline. */
diffuse_mat_->blend_method = MA_BM_BLEND;
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_DIFFUSE);
bNodeSocket *base_color = nodeFindSocket(bsdf, SOCK_IN, "Color");
copy_v3_fl(((bNodeSocketValueRGBA *)base_color->default_value)->value, 0.8f);
bNode *output = nodeAddStaticNode(nullptr, ntree, SH_NODE_OUTPUT_MATERIAL);
nodeAddLink(ntree,
bsdf,
nodeFindSocket(bsdf, SOCK_OUT, "BSDF"),
output,
nodeFindSocket(output, SOCK_IN, "Surface"));
nodeSetActive(ntree, output);
}
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
glossy_mat_ = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default metal");
glossy_mat_->nodetree = ntree;
glossy_mat_->use_nodes = true;
/* To use the forward pipeline. */
glossy_mat_->blend_method = MA_BM_BLEND;
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_GLOSSY);
bNodeSocket *base_color = nodeFindSocket(bsdf, SOCK_IN, "Color");
copy_v3_fl(((bNodeSocketValueRGBA *)base_color->default_value)->value, 1.0f);
bNodeSocket *roughness = nodeFindSocket(bsdf, SOCK_IN, "Roughness");
((bNodeSocketValueFloat *)roughness->default_value)->value = 0.0f;
bNode *output = nodeAddStaticNode(nullptr, ntree, SH_NODE_OUTPUT_MATERIAL);
nodeAddLink(ntree,
bsdf,
nodeFindSocket(bsdf, SOCK_OUT, "BSDF"),
output,
nodeFindSocket(output, SOCK_IN, "Surface"));
nodeSetActive(ntree, output);
}
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
error_mat_ = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default error");
error_mat_->nodetree = ntree;
error_mat_->use_nodes = true;
/* Use emission and output material to be compatible with both World and Material. */
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_EMISSION);
bNodeSocket *color = nodeFindSocket(bsdf, SOCK_IN, "Color");
copy_v3_fl3(((bNodeSocketValueRGBA *)color->default_value)->value, 1.0f, 0.0f, 1.0f);
bNode *output = nodeAddStaticNode(nullptr, ntree, SH_NODE_OUTPUT_MATERIAL);
nodeAddLink(ntree,
bsdf,
nodeFindSocket(bsdf, SOCK_OUT, "Emission"),
output,
nodeFindSocket(output, SOCK_IN, "Surface"));
nodeSetActive(ntree, output);
}
}
MaterialModule::~MaterialModule()
{
for (Material *mat : material_map_.values()) {
delete mat;
}
BKE_id_free(nullptr, glossy_mat_);
BKE_id_free(nullptr, diffuse_mat_);
BKE_id_free(nullptr, error_mat_);
}
void MaterialModule::begin_sync(void)
{
queued_shaders_count_ = 0;
for (Material *mat : material_map_.values()) {
mat->init = false;
}
shader_map_.clear();
}
MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type)
{
bNodeTree *ntree = (blender_mat->use_nodes && blender_mat->nodetree != nullptr) ?
blender_mat->nodetree :
default_surface_ntree_.nodetree_get(blender_mat);
MaterialPass matpass;
matpass.gpumat = inst_.shaders.material_shader_get(
blender_mat, ntree, pipeline_type, geometry_type, true);
switch (GPU_material_status(matpass.gpumat)) {
case GPU_MAT_SUCCESS:
break;
case GPU_MAT_QUEUED:
queued_shaders_count_++;
blender_mat = (geometry_type == MAT_GEOM_VOLUME) ? BKE_material_default_volume() :
BKE_material_default_surface();
matpass.gpumat = inst_.shaders.material_shader_get(
blender_mat, blender_mat->nodetree, pipeline_type, geometry_type, false);
break;
case GPU_MAT_FAILED:
default:
matpass.gpumat = inst_.shaders.material_shader_get(
error_mat_, error_mat_->nodetree, pipeline_type, geometry_type, false);
break;
}
/* Returned material should be ready to be drawn. */
BLI_assert(GPU_material_status(matpass.gpumat) == GPU_MAT_SUCCESS);
if (GPU_material_recalc_flag_get(matpass.gpumat)) {
// inst_.sampling.reset();
}
if ((pipeline_type == MAT_PIPE_DEFERRED) &&
GPU_material_flag_get(matpass.gpumat, GPU_MATFLAG_SHADER_TO_RGBA)) {
pipeline_type = MAT_PIPE_FORWARD;
}
if ((pipeline_type == MAT_PIPE_FORWARD) &&
GPU_material_flag_get(matpass.gpumat, GPU_MATFLAG_TRANSPARENT)) {
/* Transparent needs to use one shgroup per object to support reordering. */
matpass.shgrp = inst_.pipelines.material_add(blender_mat, matpass.gpumat, pipeline_type);
}
else {
ShaderKey shader_key(matpass.gpumat, geometry_type, pipeline_type);
auto add_cb = [&]() -> DRWShadingGroup * {
/* First time encountering this shader. Create a shading group. */
return inst_.pipelines.material_add(blender_mat, matpass.gpumat, pipeline_type);
};
DRWShadingGroup *grp = shader_map_.lookup_or_add_cb(shader_key, add_cb);
if (grp != nullptr) {
/* Shading group for this shader already exists. Create a sub one for this material. */
/* IMPORTANT: We always create a subgroup so that all subgroups are inserted after the
* first "empty" shgroup. This avoids messing the order of subgroups when there is more
* nested subgroup (i.e: hair drawing). */
/* TODO(fclem) Remove material resource binding from the first group creation. */
matpass.shgrp = DRW_shgroup_create_sub(grp);
DRW_shgroup_add_material_resources(matpass.shgrp, matpass.gpumat);
}
}
return matpass;
}
Material &MaterialModule::material_sync(::Material *blender_mat, eMaterialGeometry geometry_type)
{
eMaterialPipeline surface_pipe = (blender_mat->blend_method == MA_BM_BLEND) ? MAT_PIPE_FORWARD :
MAT_PIPE_DEFERRED;
eMaterialPipeline prepass_pipe = (blender_mat->blend_method == MA_BM_BLEND) ?
MAT_PIPE_FORWARD_PREPASS :
MAT_PIPE_DEFERRED_PREPASS;
/* Test */
surface_pipe = MAT_PIPE_FORWARD;
prepass_pipe = MAT_PIPE_FORWARD_PREPASS;
MaterialKey material_key(blender_mat, geometry_type, surface_pipe);
/* TODO allocate in blocks to avoid memory fragmentation. */
auto add_cb = [&]() { return new Material(); };
Material &mat = *material_map_.lookup_or_add_cb(material_key, add_cb);
/* Forward pipeline needs to use one shgroup per object. */
if (mat.init == false || (surface_pipe == MAT_PIPE_FORWARD)) {
mat.init = true;
/* Order is important for transparent. */
mat.prepass = material_pass_get(blender_mat, prepass_pipe, geometry_type);
mat.shading = material_pass_get(blender_mat, surface_pipe, geometry_type);
if (blender_mat->blend_shadow == MA_BS_NONE) {
mat.shadow = MaterialPass();
}
else {
mat.shadow = material_pass_get(blender_mat, MAT_PIPE_SHADOW, geometry_type);
}
mat.is_alpha_blend_transparent = (blender_mat->blend_method == MA_BM_BLEND) &&
GPU_material_flag_get(mat.prepass.gpumat,
GPU_MATFLAG_TRANSPARENT);
}
return mat;
}
/* Return correct material or empty default material if slot is empty. */
::Material *MaterialModule::material_from_slot(Object *ob, int slot)
{
if (ob->base_flag & BASE_HOLDOUT) {
return BKE_material_default_holdout();
}
::Material *ma = BKE_object_material_get(ob, slot + 1);
if (ma == nullptr) {
if (ob->type == OB_VOLUME) {
return BKE_material_default_volume();
}
else {
return BKE_material_default_surface();
}
}
return ma;
}
/* Returned Material references are valid until the next call to this function or
* material_get(). */
MaterialArray &MaterialModule::material_array_get(Object *ob)
{
material_array_.materials.clear();
material_array_.gpu_materials.clear();
const int materials_len = DRW_cache_object_material_count_get(ob);
for (auto i : IndexRange(materials_len)) {
::Material *blender_mat = material_from_slot(ob, i);
Material &mat = material_sync(blender_mat, to_material_geometry(ob));
material_array_.materials.append(&mat);
material_array_.gpu_materials.append(mat.shading.gpumat);
}
return material_array_;
}
/* Returned Material references are valid until the next call to this function or
* material_array_get(). */
Material &MaterialModule::material_get(Object *ob, int mat_nr, eMaterialGeometry geometry_type)
{
::Material *blender_mat = material_from_slot(ob, mat_nr);
Material &mat = material_sync(blender_mat, geometry_type);
return mat;
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,257 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*/
#pragma once
#include "DRW_render.h"
#include "BLI_map.hh"
#include "BLI_vector.hh"
#include "GPU_material.h"
#include "eevee_sync.hh"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name MaterialKey
*
* \{ */
enum eMaterialPipeline {
MAT_PIPE_DEFERRED = 0,
MAT_PIPE_FORWARD = 1,
MAT_PIPE_DEFERRED_PREPASS = 2,
MAT_PIPE_FORWARD_PREPASS = 3,
MAT_PIPE_VOLUME = 4,
MAT_PIPE_SHADOW = 5,
};
enum eMaterialGeometry {
MAT_GEOM_MESH = 0,
MAT_GEOM_CURVES = 1,
MAT_GEOM_GPENCIL = 2,
MAT_GEOM_VOLUME = 3,
MAT_GEOM_WORLD = 4,
};
static inline void material_type_from_shader_uuid(uint64_t shader_uuid,
eMaterialPipeline &pipeline_type,
eMaterialGeometry &geometry_type)
{
const uint64_t geometry_mask = ((1u << 3u) - 1u);
const uint64_t pipeline_mask = ((1u << 3u) - 1u);
geometry_type = static_cast<eMaterialGeometry>(shader_uuid & geometry_mask);
pipeline_type = static_cast<eMaterialPipeline>((shader_uuid >> 3u) & pipeline_mask);
}
static inline uint64_t shader_uuid_from_material_type(eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type)
{
return geometry_type | (pipeline_type << 3);
}
ENUM_OPERATORS(eClosureBits, CLOSURE_AMBIENT_OCCLUSION)
static inline eClosureBits shader_closure_bits_from_flag(const GPUMaterial *gpumat)
{
eClosureBits closure_bits = eClosureBits(0);
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_DIFFUSE)) {
closure_bits |= CLOSURE_DIFFUSE;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
closure_bits |= CLOSURE_TRANSPARENCY;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_EMISSION)) {
closure_bits |= CLOSURE_EMISSION;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_GLOSSY)) {
closure_bits |= CLOSURE_REFLECTION;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_SUBSURFACE)) {
closure_bits |= CLOSURE_SSS;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_REFRACT)) {
closure_bits |= CLOSURE_REFRACTION;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_HOLDOUT)) {
closure_bits |= CLOSURE_HOLDOUT;
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_AO)) {
closure_bits |= CLOSURE_AMBIENT_OCCLUSION;
}
return closure_bits;
}
static inline eMaterialGeometry to_material_geometry(const Object *ob)
{
switch (ob->type) {
case OB_CURVES:
return MAT_GEOM_CURVES;
case OB_VOLUME:
return MAT_GEOM_VOLUME;
case OB_GPENCIL:
return MAT_GEOM_GPENCIL;
default:
return MAT_GEOM_MESH;
}
}
/** Unique key to identify each material in the hashmap. */
struct MaterialKey {
Material *mat;
uint64_t options;
MaterialKey(::Material *mat_, eMaterialGeometry geometry, eMaterialPipeline surface_pipeline)
: mat(mat_)
{
options = shader_uuid_from_material_type(surface_pipeline, geometry);
}
uint64_t hash(void) const
{
BLI_assert(options < sizeof(*mat));
return (uint64_t)mat + options;
}
bool operator<(const MaterialKey &k) const
{
return (mat < k.mat) || (options < k.options);
}
bool operator==(const MaterialKey &k) const
{
return (mat == k.mat) && (options == k.options);
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name ShaderKey
*
* \{ */
struct ShaderKey {
GPUShader *shader;
uint64_t options;
ShaderKey(GPUMaterial *gpumat, eMaterialGeometry geometry, eMaterialPipeline pipeline)
{
shader = GPU_material_get_shader(gpumat);
options = shader_uuid_from_material_type(pipeline, geometry);
options = (options << 16u) | shader_closure_bits_from_flag(gpumat);
}
uint64_t hash(void) const
{
return (uint64_t)shader + options;
}
bool operator<(const ShaderKey &k) const
{
return (shader == k.shader) ? (options < k.options) : (shader < k.shader);
}
bool operator==(const ShaderKey &k) const
{
return (shader == k.shader) && (options == k.options);
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Default Material Nodetree
*
* In order to support materials without nodetree we reuse and configure a standalone nodetree that
* we pass for shader generation. The GPUMaterial is still stored inside the Material even if
* it does not use the same nodetree.
*
* \{ */
class DefaultSurfaceNodeTree {
private:
bNodeTree *ntree_;
bNodeSocketValueRGBA *color_socket_;
bNodeSocketValueFloat *metallic_socket_;
bNodeSocketValueFloat *roughness_socket_;
bNodeSocketValueFloat *specular_socket_;
public:
DefaultSurfaceNodeTree();
~DefaultSurfaceNodeTree();
bNodeTree *nodetree_get(::Material *ma);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Material
*
* \{ */
struct MaterialPass {
GPUMaterial *gpumat = nullptr;
DRWShadingGroup *shgrp = nullptr;
};
struct Material {
bool init = false;
bool is_alpha_blend_transparent;
MaterialPass shadow, shading, prepass;
};
struct MaterialArray {
Vector<Material *> materials;
Vector<GPUMaterial *> gpu_materials;
};
class MaterialModule {
public:
::Material *diffuse_mat_;
::Material *glossy_mat_;
private:
Instance &inst_;
Map<MaterialKey, Material *> material_map_;
Map<ShaderKey, DRWShadingGroup *> shader_map_;
MaterialArray material_array_;
DefaultSurfaceNodeTree default_surface_ntree_;
::Material *error_mat_;
int64_t queued_shaders_count_ = 0;
public:
MaterialModule(Instance &inst);
~MaterialModule();
void begin_sync(void);
MaterialArray &material_array_get(Object *ob);
Material &material_get(Object *ob, int mat_nr, eMaterialGeometry geometry_type);
private:
Material &material_sync(::Material *blender_mat, eMaterialGeometry geometry_type);
::Material *material_from_slot(Object *ob, int slot);
MaterialPass material_pass_get(::Material *blender_mat,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type);
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,220 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Shading passes contain drawcalls specific to shading pipelines.
* They are to be shared across views.
* This file is only for shading passes. Other passes are declared in their own module.
*/
#include "eevee_instance.hh"
#include "eevee_pipeline.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name World Pipeline
*
* Used to draw background.
* \{ */
void WorldPipeline::sync(GPUMaterial *gpumat)
{
DRWState state = DRW_STATE_WRITE_COLOR;
world_ps_ = DRW_pass_create("World", state);
/* Push a matrix at the same location as the camera. */
float4x4 camera_mat = float4x4::identity();
// copy_v3_v3(camera_mat[3], inst_.camera.data_get().viewinv[3]);
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, world_ps_);
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
DRW_shgroup_call_obmat(grp, DRW_cache_fullscreen_quad_get(), camera_mat.ptr());
}
void WorldPipeline::render(void)
{
DRW_draw_pass(world_ps_);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Forward Pass
*
* NPR materials (using Closure to RGBA) or material using ALPHA_BLEND.
* \{ */
void ForwardPipeline::sync(void)
{
{
DRWState state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS;
prepass_ps_ = DRW_pass_create("Forward.Opaque.Prepass", state);
state |= DRW_STATE_CULL_BACK;
prepass_culled_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Culled", state);
DRW_pass_link(prepass_ps_, prepass_culled_ps_);
}
{
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
opaque_ps_ = DRW_pass_create("Forward.Opaque", state);
state |= DRW_STATE_CULL_BACK;
opaque_culled_ps_ = DRW_pass_create("Forward.Opaque.Culled", state);
DRW_pass_link(opaque_ps_, opaque_culled_ps_);
}
{
DRWState state = DRW_STATE_DEPTH_LESS_EQUAL;
transparent_ps_ = DRW_pass_create("Forward.Transparent", state);
}
}
DRWShadingGroup *ForwardPipeline::material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
{
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? opaque_culled_ps_ : opaque_ps_;
// LightModule &lights = inst_.lights;
// LightProbeModule &lightprobes = inst_.lightprobes;
// RaytracingModule &raytracing = inst_.raytracing;
// eGPUSamplerState no_interp = GPU_SAMPLER_DEFAULT;
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, pass);
// lights.shgroup_resources(grp);
// DRW_shgroup_uniform_block(grp, "sampling_buf", inst_.sampling.ubo_get());
// DRW_shgroup_uniform_block(grp, "grids_buf", lightprobes.grid_ubo_get());
// DRW_shgroup_uniform_block(grp, "cubes_buf", lightprobes.cube_ubo_get());
// DRW_shgroup_uniform_block(grp, "probes_buf", lightprobes.info_ubo_get());
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_grid_tx", lightprobes.grid_tx_ref_get());
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_cube_tx", lightprobes.cube_tx_ref_get());
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
/* TODO(fclem): Make this only needed if material uses it ... somehow. */
// if (true) {
// DRW_shgroup_uniform_texture_ref(
// grp, "sss_transmittance_tx", inst_.subsurface.transmittance_ref_get());
// }
// if (raytracing.enabled()) {
// DRW_shgroup_uniform_block(grp, "rt_diffuse_buf", raytracing.diffuse_data);
// DRW_shgroup_uniform_block(grp, "rt_reflection_buf", raytracing.reflection_data);
// DRW_shgroup_uniform_block(grp, "rt_refraction_buf", raytracing.refraction_data);
// DRW_shgroup_uniform_texture_ref_ex(grp, "radiance_tx", &input_screen_radiance_tx_,
// no_interp);
// }
// if (raytracing.enabled()) {
// DRW_shgroup_uniform_block(grp, "hiz_buf", inst_.hiz.ubo_get());
// DRW_shgroup_uniform_texture_ref(grp, "hiz_tx", inst_.hiz_front.texture_ref_get());
// }
return grp;
}
DRWShadingGroup *ForwardPipeline::prepass_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
{
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? prepass_culled_ps_ :
prepass_ps_;
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, pass);
return grp;
}
DRWShadingGroup *ForwardPipeline::material_transparent_add(::Material *blender_mat,
GPUMaterial *gpumat)
{
// LightModule &lights = inst_.lights;
// LightProbeModule &lightprobes = inst_.lightprobes;
// RaytracingModule &raytracing = inst_.raytracing;
// eGPUSamplerState no_interp = GPU_SAMPLER_DEFAULT;
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, transparent_ps_);
// lights.shgroup_resources(grp);
// DRW_shgroup_uniform_block(grp, "sampling_buf", inst_.sampling.ubo_get());
// DRW_shgroup_uniform_block(grp, "grids_buf", lightprobes.grid_ubo_get());
// DRW_shgroup_uniform_block(grp, "cubes_buf", lightprobes.cube_ubo_get());
// DRW_shgroup_uniform_block(grp, "probes_buf", lightprobes.info_ubo_get());
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_grid_tx", lightprobes.grid_tx_ref_get());
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_cube_tx", lightprobes.cube_tx_ref_get());
// DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
/* TODO(fclem): Make this only needed if material uses it ... somehow. */
// if (true) {
// DRW_shgroup_uniform_texture_ref(
// grp, "sss_transmittance_tx", inst_.subsurface.transmittance_ref_get());
// }
// if (raytracing.enabled()) {
// DRW_shgroup_uniform_block(grp, "rt_diffuse_buf", raytracing.diffuse_data);
// DRW_shgroup_uniform_block(grp, "rt_reflection_buf", raytracing.reflection_data);
// DRW_shgroup_uniform_block(grp, "rt_refraction_buf", raytracing.refraction_data);
// DRW_shgroup_uniform_texture_ref_ex(
// grp, "rt_radiance_tx", &input_screen_radiance_tx_, no_interp);
// }
// if (raytracing.enabled()) {
// DRW_shgroup_uniform_block(grp, "hiz_buf", inst_.hiz.ubo_get());
// DRW_shgroup_uniform_texture_ref(grp, "hiz_tx", inst_.hiz_front.texture_ref_get());
// }
DRWState state_disable = DRW_STATE_WRITE_DEPTH;
DRWState state_enable = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
if (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) {
state_enable |= DRW_STATE_CULL_BACK;
}
DRW_shgroup_state_disable(grp, state_disable);
DRW_shgroup_state_enable(grp, state_enable);
return grp;
}
DRWShadingGroup *ForwardPipeline::prepass_transparent_add(::Material *blender_mat,
GPUMaterial *gpumat)
{
if ((blender_mat->blend_flag & MA_BL_HIDE_BACKFACE) == 0) {
return nullptr;
}
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, transparent_ps_);
DRWState state_disable = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
DRWState state_enable = DRW_STATE_WRITE_DEPTH;
if (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) {
state_enable |= DRW_STATE_CULL_BACK;
}
DRW_shgroup_state_disable(grp, state_disable);
DRW_shgroup_state_enable(grp, state_enable);
return grp;
}
void ForwardPipeline::render(const DRWView *view,
GPUTexture *depth_tx,
GPUTexture *UNUSED(combined_tx))
{
// HiZBuffer &hiz = inst_.hiz_front;
DRW_stats_group_start("ForwardOpaque");
DRW_draw_pass(prepass_ps_);
// hiz.set_dirty();
// if (inst_.raytracing.enabled()) {
// rt_buffer.radiance_copy(combined_tx);
// hiz.update(depth_tx);
// }
// inst_.shadows.set_view(view, depth_tx);
DRW_draw_pass(opaque_ps_);
DRW_stats_group_end();
DRW_stats_group_start("ForwardTransparent");
/* TODO(fclem) This is suboptimal. We could sort during sync. */
/* FIXME(fclem) This wont work for panoramic, where we need
* to sort by distance to camera, not by z. */
DRW_pass_sort_shgroup_z(transparent_ps_);
DRW_draw_pass(transparent_ps_);
DRW_stats_group_end();
// if (inst_.raytracing.enabled()) {
// gbuffer.ray_radiance_tx.release();
// }
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,216 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Shading passes contain drawcalls specific to shading pipelines.
* They are shared across views.
* This file is only for shading passes. Other passes are declared in their own module.
*/
#pragma once
#include "DRW_render.h"
/* TODO(fclem): Move it to GPU/DRAW. */
#include "../eevee/eevee_lut.h"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name World Pipeline
*
* Render world values.
* \{ */
class WorldPipeline {
private:
Instance &inst_;
DRWPass *world_ps_ = nullptr;
public:
WorldPipeline(Instance &inst) : inst_(inst){};
void sync(GPUMaterial *gpumat);
void render(void);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Forward Pass
*
* Handles alpha blended surfaces and NPR materials (using Closure to RGBA).
* \{ */
class ForwardPipeline {
private:
Instance &inst_;
DRWPass *prepass_ps_ = nullptr;
DRWPass *prepass_culled_ps_ = nullptr;
DRWPass *opaque_ps_ = nullptr;
DRWPass *opaque_culled_ps_ = nullptr;
DRWPass *transparent_ps_ = nullptr;
// GPUTexture *input_screen_radiance_tx_ = nullptr;
public:
ForwardPipeline(Instance &inst) : inst_(inst){};
void sync(void);
DRWShadingGroup *material_add(::Material *blender_mat, GPUMaterial *gpumat)
{
return (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) ?
material_transparent_add(blender_mat, gpumat) :
material_opaque_add(blender_mat, gpumat);
}
DRWShadingGroup *prepass_add(::Material *blender_mat, GPUMaterial *gpumat)
{
return (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) ?
prepass_transparent_add(blender_mat, gpumat) :
prepass_opaque_add(blender_mat, gpumat);
}
DRWShadingGroup *material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *prepass_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *material_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *prepass_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
void render(const DRWView *view, GPUTexture *depth_tx, GPUTexture *combined_tx);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility texture
*
* 64x64 2D array texture containing LUT tables and blue noises.
* \{ */
class UtilityTexture : public Texture {
struct Layer {
float data[UTIL_TEX_SIZE * UTIL_TEX_SIZE][4];
};
static constexpr int lut_size = UTIL_TEX_SIZE;
static constexpr int lut_size_sqr = lut_size * lut_size;
static constexpr int layer_count = 4 + UTIL_BTDF_LAYER_COUNT;
public:
UtilityTexture() : Texture("UtilityTx", GPU_RGBA16F, int2(lut_size), layer_count, nullptr)
{
#ifdef RUNTIME_LUT_CREATION
float *bsdf_ggx_lut = EEVEE_lut_update_ggx_brdf(lut_size);
float(*btdf_ggx_lut)[lut_size_sqr * 2] = (float(*)[lut_size_sqr * 2])
EEVEE_lut_update_ggx_btdf(lut_size, UTIL_BTDF_LAYER_COUNT);
#else
const float *bsdf_ggx_lut = bsdf_split_sum_ggx;
const float(*btdf_ggx_lut)[lut_size_sqr * 2] = btdf_split_sum_ggx;
#endif
Vector<Layer> data(layer_count);
{
Layer &layer = data[UTIL_BLUE_NOISE_LAYER];
memcpy(layer.data, blue_noise, sizeof(layer));
}
{
Layer &layer = data[UTIL_LTC_MAT_LAYER];
memcpy(layer.data, ltc_mat_ggx, sizeof(layer));
}
{
Layer &layer = data[UTIL_LTC_MAG_LAYER];
for (auto i : IndexRange(lut_size_sqr)) {
layer.data[i][0] = bsdf_ggx_lut[i * 2 + 0];
layer.data[i][1] = bsdf_ggx_lut[i * 2 + 1];
layer.data[i][2] = ltc_mag_ggx[i * 2 + 0];
layer.data[i][3] = ltc_mag_ggx[i * 2 + 1];
}
BLI_assert(UTIL_LTC_MAG_LAYER == UTIL_BSDF_LAYER);
}
{
Layer &layer = data[UTIL_DISK_INTEGRAL_LAYER];
for (auto i : IndexRange(lut_size_sqr)) {
layer.data[i][UTIL_DISK_INTEGRAL_COMP] = ltc_disk_integral[i];
}
}
{
for (auto layer_id : IndexRange(16)) {
Layer &layer = data[3 + layer_id];
for (auto i : IndexRange(lut_size_sqr)) {
layer.data[i][0] = btdf_ggx_lut[layer_id][i * 2 + 0];
layer.data[i][1] = btdf_ggx_lut[layer_id][i * 2 + 1];
}
}
}
GPU_texture_update_mipmap(*this, 0, GPU_DATA_FLOAT, data.data());
}
~UtilityTexture(){};
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Pipelines
*
* Contains Shading passes. Shared between views. Objects will subscribe to at least one of them.
* \{ */
class PipelineModule {
public:
WorldPipeline world;
// DeferredPipeline deferred;
ForwardPipeline forward;
// ShadowPipeline shadow;
// VelocityPipeline velocity;
UtilityTexture utility_tx;
public:
PipelineModule(Instance &inst) : world(inst), forward(inst){};
void sync()
{
// deferred.sync();
forward.sync();
// shadow.sync();
// velocity.sync();
}
DRWShadingGroup *material_add(::Material *blender_mat,
GPUMaterial *gpumat,
eMaterialPipeline pipeline_type)
{
switch (pipeline_type) {
case MAT_PIPE_DEFERRED_PREPASS:
// return deferred.prepass_add(blender_mat, gpumat);
break;
case MAT_PIPE_FORWARD_PREPASS:
return forward.prepass_add(blender_mat, gpumat);
case MAT_PIPE_DEFERRED:
// return deferred.material_add(blender_mat, gpumat);
break;
case MAT_PIPE_FORWARD:
return forward.material_add(blender_mat, gpumat);
case MAT_PIPE_VOLUME:
/* TODO(fclem) volume pass. */
return nullptr;
case MAT_PIPE_SHADOW:
// return shadow.material_add(blender_mat, gpumat);
break;
}
return nullptr;
}
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,381 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Shader module that manage shader libraries, deferred compilation,
* and static shader usage.
*/
#include "gpu_shader_create_info.hh"
#include "eevee_shader.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name Module
*
* \{ */
ShaderModule *ShaderModule::g_shader_module = nullptr;
ShaderModule *ShaderModule::module_get()
{
if (g_shader_module == nullptr) {
/* TODO(fclem) threadsafety. */
g_shader_module = new ShaderModule();
}
return g_shader_module;
}
void ShaderModule::module_free()
{
if (g_shader_module != nullptr) {
/* TODO(fclem) threadsafety. */
delete g_shader_module;
g_shader_module = nullptr;
}
}
ShaderModule::ShaderModule()
{
for (GPUShader *&shader : shaders_) {
shader = nullptr;
}
#ifdef DEBUG
/* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i));
if (name == nullptr) {
std::cerr << "EEVEE: Missing case for eShaderType(" << i
<< ") in static_shader_create_info_name_get().";
BLI_assert(0);
}
const GPUShaderCreateInfo *create_info = GPU_shader_create_info_get(name);
BLI_assert_msg(create_info != nullptr, "EEVEE: Missing create info for static shader.");
}
#endif
}
ShaderModule::~ShaderModule()
{
for (GPUShader *&shader : shaders_) {
DRW_SHADER_FREE_SAFE(shader);
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Static shaders
*
* \{ */
const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
{
switch (shader_type) {
/* To avoid compiler warning about missing case. */
case MAX_SHADER_TYPE:
return "";
}
return "";
}
GPUShader *ShaderModule::static_shader_get(eShaderType shader_type)
{
if (shaders_[shader_type] == nullptr) {
const char *shader_name = static_shader_create_info_name_get(shader_type);
shaders_[shader_type] = GPU_shader_create_from_info_name(shader_name);
if (shaders_[shader_type] == nullptr) {
fprintf(stderr, "EEVEE: error: Could not compile static shader \"%s\"\n", shader_name);
}
BLI_assert(shaders_[shader_type] != nullptr);
}
return shaders_[shader_type];
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU Materials
*
* \{ */
void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOutput *codegen_)
{
using namespace blender::gpu::shader;
uint64_t shader_uuid = GPU_material_uuid_get(gpumat);
eMaterialPipeline pipeline_type;
eMaterialGeometry geometry_type;
material_type_from_shader_uuid(shader_uuid, pipeline_type, geometry_type);
GPUCodegenOutput &codegen = *codegen_;
ShaderCreateInfo &info = *reinterpret_cast<ShaderCreateInfo *>(codegen.create_info);
info.auto_resource_location(true);
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
info.define("MAT_TRANSPARENT");
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_BARYCENTRIC)) {
switch (geometry_type) {
case MAT_GEOM_MESH:
/* Support using gpu builtin barycentrics. */
info.define("USE_BARYCENTRICS");
info.builtins(BuiltinBits::BARYCENTRIC_COORD);
break;
case MAT_GEOM_CURVES:
/* Support using one vec2 attribute. See #hair_get_barycentric(). */
info.define("USE_BARYCENTRICS");
break;
default:
/* No support */
break;
}
}
std::stringstream global_vars;
switch (geometry_type) {
case MAT_GEOM_MESH:
/** Noop. */
break;
case MAT_GEOM_CURVES:
/** Hair attributes comme from sampler buffer. Transfer attributes to sampler. */
for (auto &input : info.vertex_inputs_) {
if (input.name == "orco") {
/** NOTE: Orco is generated from strand position for now. */
global_vars << input.type << " " << input.name << ";\n";
}
else {
info.sampler(0, ImageType::FLOAT_BUFFER, input.name, Frequency::BATCH);
}
}
info.vertex_inputs_.clear();
break;
case MAT_GEOM_WORLD:
/**
* Only orco layer is supported by world and it is procedurally generated. These are here to
* make the attribs_load function calls valids.
*/
ATTR_FALLTHROUGH;
case MAT_GEOM_GPENCIL:
/**
* Only one uv and one color attribute layer are supported by gpencil objects and they are
* already declared in another createInfo. These are here to make the attribs_load
* function calls valids.
*/
for (auto &input : info.vertex_inputs_) {
global_vars << input.type << " " << input.name << ";\n";
}
info.vertex_inputs_.clear();
break;
case MAT_GEOM_VOLUME:
/** No attributes supported. */
info.vertex_inputs_.clear();
break;
}
const bool do_fragment_attrib_load = (geometry_type == MAT_GEOM_WORLD);
if (do_fragment_attrib_load && !info.vertex_out_interfaces_.is_empty()) {
/* Codegen outputs only one interface. */
const StageInterfaceInfo &iface = *info.vertex_out_interfaces_.first();
/* Globals the attrib_load() can write to when it is in the fragment shader. */
global_vars << "struct " << iface.name << " {\n";
for (auto &inout : iface.inouts) {
global_vars << " " << inout.type << " " << inout.name << ";\n";
}
global_vars << "};\n";
global_vars << iface.name << " " << iface.instance_name << ";\n";
info.vertex_out_interfaces_.clear();
}
std::stringstream attr_load;
attr_load << "void attrib_load()\n";
attr_load << "{\n";
attr_load << ((codegen.attr_load) ? codegen.attr_load : "");
attr_load << "}\n\n";
std::stringstream vert_gen, frag_gen;
if (do_fragment_attrib_load) {
frag_gen << global_vars.str() << attr_load.str();
}
else {
vert_gen << global_vars.str() << attr_load.str();
}
{
/* Only mesh and curves support vertex displacement for now. */
if (ELEM(geometry_type, MAT_GEOM_MESH, MAT_GEOM_CURVES, MAT_GEOM_GPENCIL)) {
vert_gen << "vec3 nodetree_displacement()\n";
vert_gen << "{\n";
vert_gen << ((codegen.displacement) ? codegen.displacement : "return vec3(0);\n");
vert_gen << "}\n\n";
}
info.vertex_source_generated = vert_gen.str();
}
{
frag_gen << ((codegen.material_functions) ? codegen.material_functions : "\n");
if (codegen.displacement) {
/* Bump displacement. Needed to recompute normals after displacement. */
info.define("MAT_DISPLACEMENT_BUMP");
frag_gen << "vec3 nodetree_displacement()\n";
frag_gen << "{\n";
frag_gen << codegen.displacement;
frag_gen << "}\n\n";
}
frag_gen << "Closure nodetree_surface()\n";
frag_gen << "{\n";
frag_gen << " closure_weights_reset();\n";
frag_gen << ((codegen.surface) ? codegen.surface : "return Closure(0);\n");
frag_gen << "}\n\n";
frag_gen << "Closure nodetree_volume()\n";
frag_gen << "{\n";
frag_gen << " closure_weights_reset();\n";
frag_gen << ((codegen.volume) ? codegen.volume : "return Closure(0);\n");
frag_gen << "}\n\n";
frag_gen << "float nodetree_thickness()\n";
frag_gen << "{\n";
/* TODO(fclem): Better default. */
frag_gen << ((codegen.thickness) ? codegen.thickness : "return 0.1;\n");
frag_gen << "}\n\n";
info.fragment_source_generated = frag_gen.str();
}
/* Geometry Info. */
switch (geometry_type) {
case MAT_GEOM_WORLD:
info.additional_info("eevee_geom_world");
break;
case MAT_GEOM_VOLUME:
info.additional_info("eevee_geom_volume");
break;
case MAT_GEOM_GPENCIL:
info.additional_info("eevee_geom_gpencil");
break;
case MAT_GEOM_CURVES:
info.additional_info("eevee_geom_curves");
break;
case MAT_GEOM_MESH:
default:
info.additional_info("eevee_geom_mesh");
break;
}
/* Pipeline Info. */
switch (geometry_type) {
case MAT_GEOM_WORLD:
info.additional_info("eevee_surf_world");
break;
case MAT_GEOM_VOLUME:
break;
default:
switch (pipeline_type) {
case MAT_PIPE_FORWARD_PREPASS:
case MAT_PIPE_DEFERRED_PREPASS:
case MAT_PIPE_SHADOW:
info.additional_info("eevee_surf_depth");
break;
case MAT_PIPE_DEFERRED:
info.additional_info("eevee_surf_deferred");
break;
case MAT_PIPE_FORWARD:
info.additional_info("eevee_surf_forward");
break;
default:
BLI_assert(0);
break;
}
break;
}
}
/* WATCH: This can be called from another thread! Needs to not touch the shader module in any
* thread unsafe manner. */
static void codegen_callback(void *thunk, GPUMaterial *mat, GPUCodegenOutput *codegen)
{
reinterpret_cast<ShaderModule *>(thunk)->material_create_info_ammend(mat, codegen);
}
GPUMaterial *ShaderModule::material_shader_get(::Material *blender_mat,
struct bNodeTree *nodetree,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type,
bool deferred_compilation)
{
uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, geometry_type);
bool is_volume = (pipeline_type == MAT_PIPE_VOLUME);
return DRW_shader_from_material(
blender_mat, nodetree, shader_uuid, is_volume, deferred_compilation, codegen_callback, this);
}
GPUMaterial *ShaderModule::world_shader_get(::World *blender_world, struct bNodeTree *nodetree)
{
eMaterialPipeline pipeline_type = MAT_PIPE_DEFERRED; /* Unused. */
eMaterialGeometry geometry_type = MAT_GEOM_WORLD;
uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, geometry_type);
bool is_volume = (pipeline_type == MAT_PIPE_VOLUME);
bool deferred_compilation = false;
return DRW_shader_from_world(blender_world,
nodetree,
shader_uuid,
is_volume,
deferred_compilation,
codegen_callback,
this);
}
/* Variation to compile a material only with a nodetree. Caller needs to maintain the list of
* materials and call GPU_material_free on it to update the material. */
GPUMaterial *ShaderModule::material_shader_get(const char *name,
ListBase &materials,
struct bNodeTree *nodetree,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type,
bool is_lookdev)
{
uint64_t shader_uuid = shader_uuid_from_material_type(pipeline_type, geometry_type);
bool is_volume = (pipeline_type == MAT_PIPE_VOLUME);
GPUMaterial *gpumat = GPU_material_from_nodetree(nullptr,
nullptr,
nodetree,
&materials,
name,
shader_uuid,
is_volume,
is_lookdev,
codegen_callback,
this);
GPU_material_status_set(gpumat, GPU_MAT_QUEUED);
GPU_material_compile(gpumat);
return gpumat;
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Shader module that manage shader libraries, deferred compilation,
* and static shader usage.
*/
#pragma once
#include <array>
#include <string>
#include "BLI_string_ref.hh"
#include "DRW_render.h"
#include "GPU_material.h"
#include "GPU_shader.h"
#include "eevee_material.hh"
#include "eevee_sync.hh"
namespace blender::eevee {
/* Keep alphabetical order and clean prefix. */
enum eShaderType {
MAX_SHADER_TYPE = 0,
};
/**
* Shader module. shared between instances.
*/
class ShaderModule {
private:
std::array<GPUShader *, MAX_SHADER_TYPE> shaders_;
/** Shared shader module accross all engine instances. */
static ShaderModule *g_shader_module;
public:
ShaderModule();
~ShaderModule();
GPUShader *static_shader_get(eShaderType shader_type);
GPUMaterial *material_shader_get(::Material *blender_mat,
struct bNodeTree *nodetree,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type,
bool deferred_compilation);
GPUMaterial *world_shader_get(::World *blender_world, struct bNodeTree *nodetree);
GPUMaterial *material_shader_get(const char *name,
ListBase &materials,
struct bNodeTree *nodetree,
eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type,
bool is_lookdev);
void material_create_info_ammend(GPUMaterial *mat, GPUCodegenOutput *codegen);
/** Only to be used by Instance constructor. */
static ShaderModule *module_get();
static void module_free();
private:
const char *static_shader_create_info_name_get(eShaderType shader_type);
};
} // namespace blender::eevee

View File

@ -0,0 +1,86 @@
/**
* Shared structures, enums & defines between C++ and GLSL.
* Can also include some math functions but they need to be simple enough to be valid in both
* language.
*/
#ifndef USE_GPU_SHADER_CREATE_INFO
# pragma once
# include "BLI_memory_utils.hh"
# include "DRW_gpu_wrapper.hh"
// # include "eevee_defines.hh"
# include "GPU_shader_shared.h"
namespace blender::eevee {
using draw::Framebuffer;
using draw::Texture;
using draw::TextureFromPool;
#endif
#define UBO_MIN_MAX_SUPPORTED_SIZE 1 << 14
/* -------------------------------------------------------------------- */
/** \name Raytracing
* \{ */
enum eClosureBits : uint32_t {
/** NOTE: Theses are used as stencil bits. So we are limited to 8bits. */
CLOSURE_DIFFUSE = (1u << 0u),
CLOSURE_SSS = (1u << 1u),
CLOSURE_REFLECTION = (1u << 2u),
CLOSURE_REFRACTION = (1u << 3u),
/* Non-stencil bits. */
CLOSURE_TRANSPARENCY = (1u << 8u),
CLOSURE_EMISSION = (1u << 9u),
CLOSURE_HOLDOUT = (1u << 10u),
CLOSURE_VOLUME = (1u << 11u),
CLOSURE_AMBIENT_OCCLUSION = (1u << 12u),
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility Texture
* \{ */
#define UTIL_TEX_SIZE 64
#define UTIL_BTDF_LAYER_COUNT 16
/* Scale and bias to avoid interpolation of the border pixel.
* Remap UVs to the border pixels centers. */
#define UTIL_TEX_UV_SCALE ((UTIL_TEX_SIZE - 1.0f) / UTIL_TEX_SIZE)
#define UTIL_TEX_UV_BIAS (0.5f / UTIL_TEX_SIZE)
#define UTIL_BLUE_NOISE_LAYER 0
#define UTIL_LTC_MAT_LAYER 1
#define UTIL_LTC_MAG_LAYER 2
#define UTIL_BSDF_LAYER 2
#define UTIL_BTDF_LAYER 3
#define UTIL_DISK_INTEGRAL_LAYER 3
#define UTIL_DISK_INTEGRAL_COMP 2
#ifndef __cplusplus
/* Fetch texel. Wrapping if above range. */
float4 utility_tx_fetch(sampler2DArray util_tx, float2 texel, float layer)
{
return texelFetch(util_tx, int3(int2(texel) % UTIL_TEX_SIZE, layer), 0);
}
/* Sample at uv position. Filtered & Wrapping enabled. */
float4 utility_tx_sample(sampler2DArray util_tx, float2 uv, float layer)
{
return textureLod(util_tx, float3(uv, layer), 0.0);
}
#endif
/** \} */
#ifdef __cplusplus
} // namespace blender::eevee
#endif

View File

@ -0,0 +1,319 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Converts the different renderable object types to drawcalls.
*/
#include "eevee_engine.h"
#include "BKE_gpencil.h"
#include "BKE_object.h"
#include "DEG_depsgraph_query.h"
#include "DNA_curves_types.h"
#include "DNA_gpencil_types.h"
#include "DNA_modifier_types.h"
#include "DNA_particle_types.h"
#include "eevee_instance.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name Draw Data
*
* \{ */
static void draw_data_init_cb(struct DrawData *dd)
{
/* Object has just been created or was never evaluated by the engine. */
dd->recalc = ID_RECALC_ALL;
}
ObjectHandle &SyncModule::sync_object(Object *ob)
{
DrawEngineType *owner = (DrawEngineType *)&DRW_engine_viewport_eevee_next_type;
struct DrawData *dd = DRW_drawdata_ensure(
(ID *)ob, owner, sizeof(eevee::ObjectHandle), draw_data_init_cb, nullptr);
ObjectHandle &eevee_dd = *reinterpret_cast<ObjectHandle *>(dd);
if (eevee_dd.object_key.ob == nullptr) {
eevee_dd.object_key = ObjectKey(ob);
}
const int recalc_flags = ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_SHADING |
ID_RECALC_GEOMETRY;
if ((eevee_dd.recalc & recalc_flags) != 0) {
// inst_.sampling.reset();
UNUSED_VARS(inst_);
}
return eevee_dd;
}
WorldHandle &SyncModule::sync_world(::World *world)
{
DrawEngineType *owner = (DrawEngineType *)&DRW_engine_viewport_eevee_next_type;
struct DrawData *dd = DRW_drawdata_ensure(
(ID *)world, owner, sizeof(eevee::WorldHandle), draw_data_init_cb, nullptr);
WorldHandle &eevee_dd = *reinterpret_cast<WorldHandle *>(dd);
const int recalc_flags = ID_RECALC_ALL;
if ((eevee_dd.recalc & recalc_flags) != 0) {
// inst_.sampling.reset();
}
return eevee_dd;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Common
* \{ */
static inline void shgroup_geometry_call(DRWShadingGroup *grp,
Object *ob,
GPUBatch *geom,
int v_first = -1,
int v_count = -1,
bool use_instancing = false)
{
if (grp == nullptr) {
return;
}
if (v_first == -1) {
DRW_shgroup_call(grp, geom, ob);
}
else if (use_instancing) {
DRW_shgroup_call_instance_range(grp, ob, geom, v_first, v_count);
}
else {
DRW_shgroup_call_range(grp, ob, geom, v_first, v_count);
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Mesh
* \{ */
void SyncModule::sync_mesh(Object *ob, ObjectHandle &ob_handle)
{
MaterialArray &material_array = inst_.materials.material_array_get(ob);
GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
ob, material_array.gpu_materials.data(), material_array.gpu_materials.size());
if (mat_geom == nullptr) {
return;
}
bool is_shadow_caster = false;
bool is_alpha_blend = false;
for (auto i : material_array.gpu_materials.index_range()) {
GPUBatch *geom = mat_geom[i];
if (geom == nullptr) {
continue;
}
Material *material = material_array.materials[i];
shgroup_geometry_call(material->shading.shgrp, ob, geom);
shgroup_geometry_call(material->prepass.shgrp, ob, geom);
shgroup_geometry_call(material->shadow.shgrp, ob, geom);
is_shadow_caster = is_shadow_caster || material->shadow.shgrp != nullptr;
is_alpha_blend = is_alpha_blend || material->is_alpha_blend_transparent;
}
// shading_passes.velocity.mesh_add(ob, ob_handle);
// shadows.sync_object(ob, ob_handle, is_shadow_caster, is_alpha_blend);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPencil
* \{ */
#define DO_BATCHING true
struct gpIterData {
Instance &inst;
Object *ob;
MaterialArray &material_array;
int cfra;
/* Drawcall batching. */
GPUBatch *geom = nullptr;
Material *material = nullptr;
int vfirst = 0;
int vcount = 0;
bool instancing = false;
gpIterData(Instance &inst_, Object *ob_)
: inst(inst_), ob(ob_), material_array(inst_.materials.material_array_get(ob_))
{
cfra = DEG_get_ctime(inst.depsgraph);
};
};
static void gpencil_drawcall_flush(gpIterData &iter)
{
if (iter.geom != nullptr) {
shgroup_geometry_call(iter.material->shading.shgrp,
iter.ob,
iter.geom,
iter.vfirst,
iter.vcount,
iter.instancing);
shgroup_geometry_call(iter.material->prepass.shgrp,
iter.ob,
iter.geom,
iter.vfirst,
iter.vcount,
iter.instancing);
shgroup_geometry_call(iter.material->shadow.shgrp,
iter.ob,
iter.geom,
iter.vfirst,
iter.vcount,
iter.instancing);
}
iter.geom = nullptr;
iter.vfirst = -1;
iter.vcount = 0;
}
/* Group draw-calls that are consecutive and with the same type. Reduces GPU driver overhead. */
static void gpencil_drawcall_add(gpIterData &iter,
GPUBatch *geom,
Material *material,
int v_first,
int v_count,
bool instancing)
{
int last = iter.vfirst + iter.vcount;
/* Interrupt draw-call grouping if the sequence is not consecutive. */
if (!DO_BATCHING || (geom != iter.geom) || (material != iter.material) || (v_first - last > 3)) {
gpencil_drawcall_flush(iter);
}
iter.geom = geom;
iter.material = material;
iter.instancing = instancing;
if (iter.vfirst == -1) {
iter.vfirst = v_first;
}
iter.vcount = v_first + v_count - iter.vfirst;
}
static void gpencil_stroke_sync(bGPDlayer *UNUSED(gpl),
bGPDframe *UNUSED(gpf),
bGPDstroke *gps,
void *thunk)
{
gpIterData &iter = *(gpIterData *)thunk;
Material *material = iter.material_array.materials[gps->mat_nr];
MaterialGPencilStyle *gp_style = BKE_gpencil_material_settings(iter.ob, gps->mat_nr + 1);
bool hide_material = (gp_style->flag & GP_MATERIAL_HIDE) != 0;
bool show_stroke = ((gp_style->flag & GP_MATERIAL_STROKE_SHOW) != 0) ||
(!DRW_state_is_image_render() && ((gps->flag & GP_STROKE_NOFILL) != 0));
bool show_fill = (gps->tot_triangles > 0) && ((gp_style->flag & GP_MATERIAL_FILL_SHOW) != 0);
if (hide_material) {
return;
}
if (show_fill) {
GPUBatch *geom = DRW_cache_gpencil_fills_get(iter.ob, iter.cfra);
int vfirst = gps->runtime.fill_start * 3;
int vcount = gps->tot_triangles * 3;
gpencil_drawcall_add(iter, geom, material, vfirst, vcount, false);
}
if (show_stroke) {
GPUBatch *geom = DRW_cache_gpencil_strokes_get(iter.ob, iter.cfra);
/* Start one vert before to have gl_InstanceID > 0 (see shader). */
int vfirst = gps->runtime.stroke_start - 1;
/* Include "potential" cyclic vertex and start adj vertex (see shader). */
int vcount = gps->totpoints + 1 + 1;
gpencil_drawcall_add(iter, geom, material, vfirst, vcount, true);
}
}
void SyncModule::sync_gpencil(Object *ob, ObjectHandle &ob_handle)
{
/* TODO(fclem): Waiting for a user option to use the render engine instead of gpencil engine. */
return;
gpIterData iter(inst_, ob);
BKE_gpencil_visible_stroke_iter((bGPdata *)ob->data, nullptr, gpencil_stroke_sync, &iter);
gpencil_drawcall_flush(iter);
/* TODO(fclem) Gpencil velocity. */
// shading_passes.velocity.gpencil_add(ob, ob_handle);
// bool is_caster = true; /* TODO material.shadow.shgrp. */
// bool is_alpha_blend = true; /* TODO material.is_alpha_blend. */
// shadows.sync_object(ob, ob_handle, is_caster, is_alpha_blend);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Hair
* \{ */
static void shgroup_curves_call(MaterialPass &matpass,
Object *ob,
ParticleSystem *part_sys = nullptr,
ModifierData *modifier_data = nullptr)
{
if (matpass.shgrp == nullptr) {
return;
}
DRW_shgroup_hair_create_sub(ob, part_sys, modifier_data, matpass.shgrp, matpass.gpumat);
}
void SyncModule::sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *modifier_data)
{
int mat_nr = CURVES_MATERIAL_NR;
ParticleSystem *part_sys = nullptr;
if (modifier_data != nullptr) {
part_sys = reinterpret_cast<ParticleSystemModifierData *>(modifier_data)->psys;
if (!DRW_object_is_visible_psys_in_active_context(ob, part_sys)) {
return;
}
ParticleSettings *part_settings = part_sys->part;
const int draw_as = (part_settings->draw_as == PART_DRAW_REND) ? part_settings->ren_as :
part_settings->draw_as;
if (draw_as != PART_DRAW_PATH) {
return;
}
mat_nr = part_settings->omat;
}
Material &material = inst_.materials.material_get(ob, mat_nr - 1, MAT_GEOM_CURVES);
shgroup_curves_call(material.shading, ob, part_sys, modifier_data);
shgroup_curves_call(material.prepass, ob, part_sys, modifier_data);
shgroup_curves_call(material.shadow, ob, part_sys, modifier_data);
/* TODO(fclem) Hair velocity. */
// shading_passes.velocity.gpencil_add(ob, ob_handle);
// bool is_caster = material.shadow.shgrp != nullptr;
// bool is_alpha_blend = material.is_alpha_blend_transparent;
// shadows.sync_object(ob, ob_handle, is_caster, is_alpha_blend);
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,159 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* Structures to identify unique data blocks. The keys are unique so we are able to
* match ids across frame updates.
*/
#pragma once
#include "BKE_duplilist.h"
#include "BLI_ghash.h"
#include "BLI_map.hh"
#include "DNA_object_types.h"
#include "DRW_render.h"
#include "GPU_material.h"
#include "eevee_shader_shared.hh"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name ObjectKey
*
* Unique key to identify each object in the hashmap.
* \{ */
struct ObjectKey {
/** Hash value of the key. */
uint64_t hash_value;
/** Original Object or source object for duplis. */
Object *ob;
/** Original Parent object for duplis. */
Object *parent;
/** Dupli objects recursive unique identifier */
int id[MAX_DUPLI_RECUR];
/** If object uses particle system hair. */
bool use_particle_hair;
#ifdef DEBUG
char name[64];
#endif
ObjectKey() : ob(nullptr), parent(nullptr){};
ObjectKey(Object *ob_, Object *parent_, int id_[MAX_DUPLI_RECUR], bool use_particle_hair_)
: ob(ob_), parent(parent_), use_particle_hair(use_particle_hair_)
{
if (id_) {
memcpy(id, id_, sizeof(id));
}
else {
memset(id, 0, sizeof(id));
}
/* Compute hash on creation so we avoid the cost of it for every sync. */
hash_value = BLI_ghashutil_ptrhash(ob);
hash_value = BLI_ghashutil_combine_hash(hash_value, BLI_ghashutil_ptrhash(parent));
for (int i = 0; i < MAX_DUPLI_RECUR; i++) {
if (id[i] != 0) {
hash_value = BLI_ghashutil_combine_hash(hash_value, BLI_ghashutil_inthash(id[i]));
}
else {
break;
}
}
#ifdef DEBUG
STRNCPY(name, ob->id.name);
#endif
}
ObjectKey(Object *ob, DupliObject *dupli, Object *parent)
: ObjectKey(ob, parent, dupli ? dupli->persistent_id : nullptr, false){};
ObjectKey(Object *ob)
: ObjectKey(ob, DRW_object_get_dupli(ob), DRW_object_get_dupli_parent(ob)){};
uint64_t hash(void) const
{
return hash_value;
}
bool operator<(const ObjectKey &k) const
{
if (ob != k.ob) {
return (ob < k.ob);
}
if (parent != k.parent) {
return (parent < k.parent);
}
if (use_particle_hair != k.use_particle_hair) {
return (use_particle_hair < k.use_particle_hair);
}
return memcmp(id, k.id, sizeof(id)) < 0;
}
bool operator==(const ObjectKey &k) const
{
if (ob != k.ob) {
return false;
}
if (parent != k.parent) {
return false;
}
if (use_particle_hair != k.use_particle_hair) {
return false;
}
return memcmp(id, k.id, sizeof(id)) == 0;
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sync Module
*
* \{ */
struct ObjectHandle : public DrawData {
ObjectKey object_key;
void reset_recalc_flag(void)
{
if (recalc != 0) {
recalc = 0;
}
}
};
struct WorldHandle : public DrawData {
void reset_recalc_flag(void)
{
if (recalc != 0) {
recalc = 0;
}
}
};
class SyncModule {
private:
Instance &inst_;
public:
SyncModule(Instance &inst) : inst_(inst){};
~SyncModule(){};
ObjectHandle &sync_object(Object *ob);
WorldHandle &sync_world(::World *world);
void sync_mesh(Object *ob, ObjectHandle &ob_handle);
void sync_gpencil(Object *ob, ObjectHandle &ob_handle);
void sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *modifier_data = nullptr);
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,207 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* A view is either:
* - The entire main view.
* - A fragment of the main view (for panoramic projections).
* - A shadow map view.
* - A lightprobe view (either planar, cubemap, irradiance grid).
*
* A pass is a container for scene data. It is view agnostic but has specific logic depending on
* its type. Passes are shared between views.
*/
#include "BKE_global.h"
#include "DRW_render.h"
#include "eevee_instance.hh"
#include "eevee_view.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name ShadingView
* \{ */
void ShadingView::init()
{
// dof_.init();
// mb_.init();
}
void ShadingView::sync(int2 render_extent_)
{
if (false /* inst_.camera.is_panoramic() */) {
int64_t render_pixel_count = render_extent_.x * (int64_t)render_extent_.y;
/* Divide pixel count between the 6 views. Rendering to a square target. */
extent_[0] = extent_[1] = ceilf(sqrtf(1 + (render_pixel_count / 6)));
/* TODO(fclem) Clip unused views heres. */
is_enabled_ = true;
}
else {
extent_ = render_extent_;
/* Only enable -Z view. */
is_enabled_ = (StringRefNull(name_) == "negZ_view");
}
if (!is_enabled_) {
return;
}
/* Create views. */
// const CameraData &data = inst_.camera.data_get();
float4x4 viewmat, winmat;
const float(*viewmat_p)[4] = viewmat.ptr(), (*winmat_p)[4] = winmat.ptr();
#if 0
if (false /* inst_.camera.is_panoramic() */) {
/* TODO(fclem) Overscans. */
/* For now a mandatory 5% overscan for DoF. */
float side = data.clip_near * 1.05f;
float near = data.clip_near;
float far = data.clip_far;
perspective_m4(winmat.ptr(), -side, side, -side, side, near, far);
viewmat = face_matrix_ * data.viewmat;
}
else {
viewmat_p = data.viewmat.ptr();
winmat_p = data.winmat.ptr();
}
#else
/* TEMP */
const DRWView *default_view = DRW_view_default_get();
DRW_view_winmat_get(default_view, winmat.ptr(), false);
DRW_view_viewmat_get(default_view, viewmat.ptr(), false);
#endif
main_view_ = DRW_view_create(viewmat_p, winmat_p, nullptr, nullptr, nullptr);
sub_view_ = DRW_view_create_sub(main_view_, viewmat_p, winmat_p);
render_view_ = DRW_view_create_sub(main_view_, viewmat_p, winmat_p);
// dof_.sync(winmat_p, extent_);
// mb_.sync(extent_);
// velocity_.sync(extent_);
// rt_buffer_opaque_.sync(extent_);
// rt_buffer_refract_.sync(extent_);
// inst_.hiz_back.view_sync(extent_);
// inst_.hiz_front.view_sync(extent_);
// inst_.gbuffer.view_sync(extent_);
combined_tx_.sync();
postfx_tx_.sync();
}
void ShadingView::render(void)
{
if (!is_enabled_) {
return;
}
/* Query temp textures and create framebuffers. */
/* HACK: View name should be unique and static.
* With this, we can reuse the same texture across views. */
DrawEngineType *owner = (DrawEngineType *)name_;
depth_tx_.ensure_2d(GPU_DEPTH24_STENCIL8, extent_);
combined_tx_.acquire(extent_, GPU_RGBA16F, owner);
view_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_), GPU_ATTACHMENT_TEXTURE(combined_tx_));
update_view();
DRW_stats_group_start(name_);
// DRW_view_set_active(render_view_);
/* Alpha stores transmittance. So start at 1. */
float4 clear_color = {0.0f, 0.0f, 0.0f, 1.0f};
// GPU_framebuffer_bind(view_fb_);
// GPU_framebuffer_clear_color_depth(view_fb_, clear_color, 1.0f);
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
GPU_framebuffer_bind(dfbl->default_fb);
GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_color, 1.0f);
inst_.pipelines.world.render();
// inst_.pipelines.deferred.render(
// render_view_, rt_buffer_opaque_, rt_buffer_refract_, depth_tx_, combined_tx_);
// inst_.lightprobes.draw_cache_display();
// inst_.lookdev.render_overlay(view_fb_);
inst_.pipelines.forward.render(render_view_, depth_tx_, combined_tx_);
// inst_.lights.debug_draw(view_fb_);
// inst_.shadows.debug_draw(view_fb_);
// velocity_.render(depth_tx_);
// if (inst_.render_passes.vector) {
// inst_.render_passes.vector->accumulate(velocity_.camera_vectors_get(), sub_view_);
// }
// GPUTexture *final_radiance_tx = render_post(combined_tx_);
// if (inst_.render_passes.combined) {
// inst_.render_passes.combined->accumulate(final_radiance_tx, sub_view_);
// }
// if (inst_.render_passes.depth) {
// inst_.render_passes.depth->accumulate(depth_tx_, sub_view_);
// }
DRW_stats_group_end();
combined_tx_.release();
postfx_tx_.release();
}
GPUTexture *ShadingView::render_post(GPUTexture *input_tx)
{
#if 0
if (!dof_.postfx_enabled() && !mb_.enabled()) {
return input_tx;
}
/* HACK: View name should be unique and static.
* With this, we can reuse the same texture across views. */
postfx_tx_.acquire(extent_, GPU_RGBA16F, (void *)name_);
GPUTexture *velocity_tx = velocity_.view_vectors_get();
GPUTexture *output_tx = postfx_tx_;
/* Swapping is done internally. Actual output is set to the next input. */
dof_.render(depth_tx_, &input_tx, &output_tx);
mb_.render(depth_tx_, velocity_tx, &input_tx, &output_tx);
#endif
return input_tx;
}
void ShadingView::update_view(void)
{
float4x4 viewmat, winmat;
DRW_view_viewmat_get(main_view_, viewmat.ptr(), false);
DRW_view_winmat_get(main_view_, winmat.ptr(), false);
/* Anti-Aliasing / Super-Sampling jitter. */
// float jitter_u = 2.0f * (inst_.sampling.rng_get(SAMPLING_FILTER_U) - 0.5f) / extent_[0];
// float jitter_v = 2.0f * (inst_.sampling.rng_get(SAMPLING_FILTER_V) - 0.5f) / extent_[1];
// window_translate_m4(winmat.ptr(), winmat.ptr(), jitter_u, jitter_v);
DRW_view_update_sub(sub_view_, viewmat.ptr(), winmat.ptr());
/* FIXME(fclem): The offset may be is noticeably large and the culling might make object pop
* out of the blurring radius. To fix this, use custom enlarged culling matrix. */
// dof_.jitter_apply(winmat, viewmat);
DRW_view_update_sub(render_view_, viewmat.ptr(), winmat.ptr());
// inst_.lightprobes.set_view(render_view_, extent_);
// inst_.lights.set_view(render_view_, extent_, !inst_.use_scene_lights());
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* A view is either:
* - The entire main view.
* - A portion of the main view (for panoramic projections).
* - A lightprobe view (either planar, cubemap, irradiance grid).
*
* A pass is a container for scene data. It is view agnostic but has specific logic depending on
* its type. Passes are shared between views.
*/
#pragma once
#include "DRW_render.h"
#include "eevee_camera.hh"
#include "eevee_pipeline.hh"
#include "eevee_shader.hh"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name ShadingView
*
* Render the scene and fill all render passes data.
* \{ */
class ShadingView {
private:
Instance &inst_;
/** Static string pointer. Used as debug name and as UUID for texture pool. */
const char *name_;
/** Matrix to apply to the viewmat. */
const float (*face_matrix_)[4];
/** Post-fx modules. */
// DepthOfField dof_;
// MotionBlur mb_;
// Velocity velocity_;
/** Raytracing persistent buffers. Only opaque and refraction can have surface tracing. */
// RaytraceBuffer rt_buffer_opaque_;
// RaytraceBuffer rt_buffer_refract_;
Framebuffer view_fb_;
Texture depth_tx_;
TextureFromPool combined_tx_;
TextureFromPool postfx_tx_;
/** Main views is created from the camera (or is from the viewport). It is not jittered. */
DRWView *main_view_ = nullptr;
/** Sub views is jittered versions or the main views. This allows jitter updates without trashing
* the visibility culling cache. */
DRWView *sub_view_ = nullptr;
/** Same as sub_view_ but has Depth Of Field jitter applied. */
DRWView *render_view_ = nullptr;
/** Render size of the view. Can change between scene sample eval. */
int2 extent_ = {-1, -1};
bool is_enabled_ = false;
public:
ShadingView(Instance &inst, const char *name, const float (*face_matrix)[4])
: inst_(inst), name_(name), face_matrix_(face_matrix){};
~ShadingView(){};
void init(void);
void sync(int2 render_extent_);
void render(void);
GPUTexture *render_post(GPUTexture *input_tx);
private:
void update_view(void);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Main View
*
* Container for all views needed to render the final image.
* We might need up to 6 views for panoramic cameras.
* All views are always available but only enabled for if need.
* \{ */
class MainView {
private:
/* WORKAROUND: Defining this as an array does not seems to work on GCC < 9.4.
* It tries to use the copy constructor and fails because ShadingView is non-copyable and
* non-movable. */
ShadingView shading_views_0;
ShadingView shading_views_1;
ShadingView shading_views_2;
ShadingView shading_views_3;
ShadingView shading_views_4;
ShadingView shading_views_5;
#define shading_views_ (&shading_views_0)
/** Internal render size. */
int render_extent_[2];
public:
MainView(Instance &inst)
: shading_views_0(inst, "posX_view", cubeface_mat[0]),
shading_views_1(inst, "negX_view", cubeface_mat[1]),
shading_views_2(inst, "posY_view", cubeface_mat[2]),
shading_views_3(inst, "negY_view", cubeface_mat[3]),
shading_views_4(inst, "posZ_view", cubeface_mat[4]),
shading_views_5(inst, "negZ_view", cubeface_mat[5])
{
}
void init(const int2 full_extent_)
{
/* TODO(fclem) parameter hidden in experimental. We need to figure out mipmap bias to preserve
* texture crispiness. */
float resolution_scale = 1.0f;
for (int i = 0; i < 2; i++) {
render_extent_[i] = max_ii(1, roundf(full_extent_[i] * resolution_scale));
}
for (auto i : IndexRange(6)) {
shading_views_[i].init();
}
}
void sync(void)
{
for (auto i : IndexRange(6)) {
shading_views_[i].sync(render_extent_);
}
}
void render(void)
{
for (auto i : IndexRange(6)) {
shading_views_[i].render();
}
}
#undef shading_views_
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*/
#include "BKE_lib_id.h"
#include "BKE_node.h"
#include "BKE_world.h"
#include "DEG_depsgraph_query.h"
#include "NOD_shader.h"
#include "eevee_instance.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name Default Material
*
* \{ */
DefaultWorldNodeTree::DefaultWorldNodeTree()
{
bNodeTree *ntree = ntreeAddTree(nullptr, "World Nodetree", ntreeType_Shader->idname);
bNode *background = nodeAddStaticNode(nullptr, ntree, SH_NODE_BACKGROUND);
bNode *output = nodeAddStaticNode(nullptr, ntree, SH_NODE_OUTPUT_WORLD);
bNodeSocket *background_out = nodeFindSocket(background, SOCK_OUT, "Background");
bNodeSocket *output_in = nodeFindSocket(output, SOCK_IN, "Surface");
nodeAddLink(ntree, background, background_out, output, output_in);
nodeSetActive(ntree, output);
color_socket_ =
(bNodeSocketValueRGBA *)nodeFindSocket(background, SOCK_IN, "Color")->default_value;
ntree_ = ntree;
}
DefaultWorldNodeTree::~DefaultWorldNodeTree()
{
ntreeFreeEmbeddedTree(ntree_);
MEM_SAFE_FREE(ntree_);
}
/* Configure a default nodetree with the given world. */
bNodeTree *DefaultWorldNodeTree::nodetree_get(::World *wo)
{
/* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
copy_v3_fl3(color_socket_->value, wo->horr, wo->horg, wo->horb);
return ntree_;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name World
*
* \{ */
void World::sync()
{
// if (inst_.lookdev.sync_world()) {
// return;
// }
::World *bl_world = inst_.scene->world;
if (bl_world == nullptr) {
// bl_world = BKE_world_default();
return;
}
else {
WorldHandle &wo_handle = inst_.sync.sync_world(bl_world);
if (wo_handle.recalc != 0) {
// inst_.lightprobes.set_world_dirty();
}
wo_handle.reset_recalc_flag();
}
/* TODO(fclem) This should be detected to scene level. */
::World *orig_world = (::World *)DEG_get_original_id(&bl_world->id);
if (assign_if_different(prev_original_world, orig_world)) {
// inst_.sampling.reset();
}
bNodeTree *ntree = (bl_world->nodetree && bl_world->use_nodes) ?
bl_world->nodetree :
default_tree.nodetree_get(bl_world);
GPUMaterial *gpumat = inst_.shaders.world_shader_get(bl_world, ntree);
inst_.pipelines.world.sync(gpumat);
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* World rendering with material handling. Also take care of lookdev
* HDRI and default material.
*/
#pragma once
#include "DNA_world_types.h"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name Default World Nodetree
*
* In order to support worlds without nodetree we reuse and configure a standalone nodetree that
* we pass for shader generation. The GPUMaterial is still stored inside the World even if
* it does not use a nodetree.
* \{ */
class DefaultWorldNodeTree {
private:
bNodeTree *ntree_;
bNodeSocketValueRGBA *color_socket_;
public:
DefaultWorldNodeTree();
~DefaultWorldNodeTree();
bNodeTree *nodetree_get(::World *world);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name World
*
* \{ */
class World {
private:
Instance &inst_;
DefaultWorldNodeTree default_tree;
/* Used to detect if world change. */
::World *prev_original_world = nullptr;
public:
World(Instance &inst) : inst_(inst){};
void sync(void);
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,305 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_codegen_lib.glsl)
#if defined(MAT_GEOM_MESH)
/* -------------------------------------------------------------------- */
/** \name Mesh
*
* Mesh objects attributes are loaded using vertex input attributes.
* \{ */
# ifdef OBINFO_LIB
vec3 attr_load_orco(vec4 orco)
{
/* We know when there is no orco layer when orco.w is 1.0 because it uses the generic vertex
* attrib (which is [0,0,0,1]). */
if (orco.w == 1.0) {
/* If the object does not have any deformation, the orco layer calculation is done on the fly
* using the orco_madd factors. */
return OrcoTexCoFactors[0].xyz + pos * OrcoTexCoFactors[1].xyz;
}
return orco.xyz * 0.5 + 0.5;
}
# endif
vec4 attr_load_tangent(vec4 tangent)
{
tangent.xyz = safe_normalize(normal_object_to_world(tangent.xyz));
return tangent;
}
vec4 attr_load_vec4(vec4 attr)
{
return attr;
}
vec3 attr_load_vec3(vec3 attr)
{
return attr;
}
vec2 attr_load_vec2(vec2 attr)
{
return attr;
}
float attr_load_float(float attr)
{
return attr;
}
vec4 attr_load_color(vec4 attr)
{
return attr;
}
vec3 attr_load_uv(vec3 attr)
{
return attr;
}
/** \} */
#elif defined(MAT_GEOM_GPENCIL)
/* -------------------------------------------------------------------- */
/** \name Grease Pencil
*
* Grease Pencil objects have one uv and one color attribute layer.
* \{ */
/* Globals to feed the load functions. */
vec2 g_uvs;
vec4 g_color;
# ifdef OBINFO_LIB
vec3 attr_load_orco(vec4 orco)
{
vec3 lP = point_world_to_object(interp.P);
return OrcoTexCoFactors[0].xyz + lP * OrcoTexCoFactors[1].xyz;
}
# endif
vec4 attr_load_tangent(vec4 tangent)
{
return vec4(0.0, 0.0, 0.0, 1.0);
}
vec3 attr_load_uv(vec3 dummy)
{
return vec3(g_uvs, 0.0);
}
vec4 attr_load_color(vec4 dummy)
{
return g_color;
}
vec4 attr_load_vec4(vec4 attr)
{
return vec4(0.0);
}
vec3 attr_load_vec3(vec3 attr)
{
return vec3(0.0);
}
vec2 attr_load_vec2(vec2 attr)
{
return vec2(0.0);
}
float attr_load_float(float attr)
{
return 0.0;
}
/** \} */
#elif defined(MAT_GEOM_CURVES)
/* -------------------------------------------------------------------- */
/** \name Curve
*
* Curve objects loads attributes from buffers through sampler buffers.
* \{ */
# ifdef OBINFO_LIB
vec3 attr_load_orco(vec4 orco)
{
vec3 P = hair_get_strand_pos();
vec3 lP = transform_point(ModelMatrixInverse, P);
return OrcoTexCoFactors[0].xyz + lP * OrcoTexCoFactors[1].xyz;
}
# endif
vec4 attr_load_tangent(samplerBuffer cd_buf)
{
/* Not supported for the moment. */
return vec4(0.0, 0.0, 0.0, 1.0);
}
vec3 attr_load_uv(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).rgb;
}
vec4 attr_load_color(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).rgba;
}
vec4 attr_load_vec4(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).rgba;
}
vec3 attr_load_vec3(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).rgb;
}
vec2 attr_load_vec2(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).rg;
}
float attr_load_float(samplerBuffer cd_buf)
{
return texelFetch(cd_buf, interp.curves_strand_id).r;
}
/** \} */
#elif defined(MAT_GEOM_VOLUME)
/* -------------------------------------------------------------------- */
/** \name Volume
*
* Volume objects loads attributes from "grids" in the form of 3D textures.
* Per grid transform order is following loading order.
* \{ */
# ifndef OBINFO_LIB
# error "draw_object_infos is mandatory for volume objects"
# endif
vec3 g_orco;
int g_attr_id = 0;
vec3 grid_coordinates()
{
vec3 co = g_orco;
/* Optional per-grid transform. */
if (drw_volume.grids_xform[g_attr_id][3][3] != 0.0) {
co = (drw_volume.grids_xform[g_attr_id] * vec4(objectPosition, 1.0)).xyz;
}
g_attr_id += 1;
return co;
}
vec3 attr_load_orco(sampler3D tex)
{
g_attr_id += 1;
return g_orco;
}
vec4 attr_load_tangent(sampler3D tex)
{
attr_id += 1;
return vec4(0);
}
vec4 attr_load_vec4(sampler3D tex)
{
return texture(tex, grid_coordinates());
}
vec3 attr_load_vec3(sampler3D tex)
{
return texture(tex, grid_coordinates()).rgb;
}
vec2 attr_load_vec2(sampler3D tex)
{
return texture(tex, grid_coordinates()).rg;
}
float attr_load_float(sampler3D tex)
{
return texture(tex, grid_coordinates()).r;
}
vec4 attr_load_color(sampler3D tex)
{
return texture(tex, grid_coordinates());
}
vec3 attr_load_uv(sampler3D attr)
{
attr_id += 1;
return vec3(0);
}
/** \} */
#elif defined(MAT_GEOM_WORLD)
/* -------------------------------------------------------------------- */
/** \name World
*
* World has no attributes other than orco.
* \{ */
vec3 attr_load_orco(vec4 orco)
{
return -g_data.N;
}
vec4 attr_load_tangent(vec4 tangent)
{
return vec4(0);
}
vec4 attr_load_vec4(vec4 attr)
{
return vec4(0);
}
vec3 attr_load_vec3(vec3 attr)
{
return vec3(0);
}
vec2 attr_load_vec2(vec2 attr)
{
return vec2(0);
}
float attr_load_float(float attr)
{
return 0.0;
}
vec4 attr_load_color(vec4 attr)
{
return vec4(0);
}
vec3 attr_load_uv(vec3 attr)
{
return vec3(0);
}
/** \} */
#endif
/* -------------------------------------------------------------------- */
/** \name Volume Attribute post
*
* TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
* the engine side. But as of now, the engines are reponsible for loading the attributes.
*
* \{ */
#if defined(MAT_GEOM_VOLUME)
float attr_load_temperature_post(float attr)
{
/* Bring the into standard range without having to modify the grid values */
attr = (attr > 0.01) ? (attr * drw_volume.temperature_mul + drw_volume.temperature_bias) : 0.0;
return attr;
}
vec4 attr_load_color_post(vec4 attr)
{
/* Density is premultiplied for interpolation, divide it out here. */
attr.rgb *= safe_rcp(attr.a);
attr.rgb *= drw_volume.color_mul.rgb;
attr.a = 1.0;
return attr;
}
#else /* Noop for any other surface. */
float attr_load_temperature_post(float attr)
{
return attr;
}
vec4 attr_load_color_post(vec4 attr)
{
return attr;
}
#endif
/** \} */

View File

@ -0,0 +1,37 @@
#pragma BLENDER_REQUIRE(common_hair_lib.glsl) /* TODO rename to curve. */
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
void main(void)
{
init_interface();
vec3 T;
bool is_persp = (ProjectionMatrix[3][3] == 0.0);
hair_get_pos_tan_binor_time(is_persp,
ModelMatrixInverse,
ViewMatrixInverse[3].xyz,
ViewMatrixInverse[2].xyz,
interp.P,
T,
interp.curves_binormal,
interp.curves_time,
interp.curves_thickness,
interp.curves_time_width);
interp.N = cross(T, interp.curves_binormal);
interp.curves_strand_id = hair_get_strand_id();
interp.barycentric_coords = hair_get_barycentric();
init_globals();
attrib_load();
interp.P += nodetree_displacement();
gl_Position = point_world_to_ndc(interp.P);
}

View File

@ -0,0 +1,46 @@
#pragma BLENDER_REQUIRE(common_gpencil_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
void main(void)
{
init_interface();
/* TODO(fclem): Expose through a node? */
vec4 sspos;
vec2 aspect;
float strength;
float hardness;
vec2 thickness;
gl_Position = gpencil_vertex(ma,
ma1,
ma2,
ma3,
pos,
pos1,
pos2,
pos3,
uv1,
uv2,
col1,
col2,
fcol1,
vec4(drw_view.viewport_size, drw_view.viewport_size_inverse),
interp.P,
interp.N,
g_color,
strength,
g_uvs,
sspos,
aspect,
thickness,
hardness);
init_globals();
attrib_load();
interp.P += nodetree_displacement();
}

View File

@ -0,0 +1,20 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
void main(void)
{
init_interface();
interp.P = point_object_to_world(pos);
interp.N = normal_object_to_world(nor);
init_globals();
attrib_load();
interp.P += nodetree_displacement();
gl_Position = point_world_to_ndc(interp.P);
}

View File

@ -0,0 +1,21 @@
/**
* Custom fullscreen triangle with placeholders varyings.
**/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
void main(void)
{
/* Fullscreen triangle. */
int v = gl_VertexID % 3;
float x = float((v & 1) << 2) - 1.0;
float y = float((v & 2) << 1) - 1.0;
gl_Position = vec4(x, y, 1.0, 1.0);
/* Pass view position to keep accuracy. */
interp.P = project_point(ProjectionMatrixInverse, gl_Position.xyz);
interp.N = vec3(1);
}

View File

@ -0,0 +1,360 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_codegen_lib.glsl)
vec3 g_emission;
vec3 g_transmittance;
float g_holdout;
/* The Closure type is never used. Use float as dummy type. */
#define Closure float
/* Sampled closure parameters. */
ClosureDiffuse g_diffuse_data;
ClosureReflection g_reflection_data;
ClosureRefraction g_refraction_data;
/* Random number per sampled closure type. */
float g_diffuse_rand;
float g_reflection_rand;
float g_refraction_rand;
/**
* Returns true if the closure is to be selected based on the input weight.
*/
bool closure_select(float weight, inout float total_weight, inout float r)
{
total_weight += weight;
float x = weight / total_weight;
bool chosen = (r < x);
/* Assuming that if r is in the interval [0,x] or [x,1], it's still uniformly distributed within
* that interval, so you remaping to [0,1] again to explore this space of probability. */
r = (chosen) ? (r / x) : ((r - x) / (1.0 - x));
return chosen;
}
#define SELECT_CLOSURE(destination, random, candidate) \
if (closure_select(candidate.weight, destination.weight, random)) { \
destination = candidate; \
}
void closure_weights_reset()
{
g_diffuse_data.weight = 0.0;
g_diffuse_data.color = vec3(0.0);
g_diffuse_data.N = vec3(0.0);
g_diffuse_data.sss_radius = vec3(0.0);
g_diffuse_data.sss_id = uint(0);
g_reflection_data.weight = 0.0;
g_reflection_data.color = vec3(0.0);
g_reflection_data.N = vec3(0.0);
g_reflection_data.roughness = 0.0;
g_refraction_data.weight = 0.0;
g_refraction_data.color = vec3(0.0);
g_refraction_data.N = vec3(0.0);
g_refraction_data.roughness = 0.0;
g_refraction_data.ior = 0.0;
/* TEMP */
#define P(x) ((x + 0.5) / 16.0)
const vec4 dither_mat4x4[4] = vec4[4](vec4(P(0.0), P(8.0), P(2.0), P(10.0)),
vec4(P(12.0), P(4.0), P(14.0), P(6.0)),
vec4(P(3.0), P(11.0), P(1.0), P(9.0)),
vec4(P(15.0), P(7.0), P(13.0), P(5.0)));
#undef P
#if defined(GPU_FRAGMENT_SHADER)
ivec2 pix = ivec2(gl_FragCoord.xy) % ivec2(4);
g_diffuse_rand = dither_mat4x4[pix.x][pix.y];
g_reflection_rand = dither_mat4x4[pix.x][pix.y];
g_refraction_rand = dither_mat4x4[pix.x][pix.y];
#else
g_diffuse_rand = 0.0;
g_reflection_rand = 0.0;
g_refraction_rand = 0.0;
#endif
g_emission = vec3(0.0);
g_transmittance = vec3(0.0);
g_holdout = 0.0;
}
/* Single BSDFs. */
Closure closure_eval(ClosureDiffuse diffuse)
{
SELECT_CLOSURE(g_diffuse_data, g_diffuse_rand, diffuse);
return Closure(0);
}
Closure closure_eval(ClosureTranslucent translucent)
{
/* TODO */
return Closure(0);
}
Closure closure_eval(ClosureReflection reflection)
{
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
return Closure(0);
}
Closure closure_eval(ClosureRefraction refraction)
{
SELECT_CLOSURE(g_refraction_data, g_refraction_rand, refraction);
return Closure(0);
}
Closure closure_eval(ClosureEmission emission)
{
g_emission += emission.emission * emission.weight;
return Closure(0);
}
Closure closure_eval(ClosureTransparency transparency)
{
g_transmittance += transparency.transmittance * transparency.weight;
g_holdout += transparency.holdout * transparency.weight;
return Closure(0);
}
Closure closure_eval(ClosureVolumeScatter volume_scatter)
{
/* TODO */
return Closure(0);
}
Closure closure_eval(ClosureVolumeAbsorption volume_absorption)
{
/* TODO */
return Closure(0);
}
Closure closure_eval(ClosureHair hair)
{
/* TODO */
return Closure(0);
}
/* Glass BSDF. */
Closure closure_eval(ClosureReflection reflection, ClosureRefraction refraction)
{
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
SELECT_CLOSURE(g_refraction_data, g_refraction_rand, refraction);
return Closure(0);
}
/* Dielectric BSDF. */
Closure closure_eval(ClosureDiffuse diffuse, ClosureReflection reflection)
{
SELECT_CLOSURE(g_diffuse_data, g_diffuse_rand, diffuse);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
return Closure(0);
}
/* ClearCoat BSDF. */
Closure closure_eval(ClosureReflection reflection, ClosureReflection clearcoat)
{
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, clearcoat);
return Closure(0);
}
/* Volume BSDF. */
Closure closure_eval(ClosureVolumeScatter volume_scatter,
ClosureVolumeAbsorption volume_absorption,
ClosureEmission emission)
{
/* TODO */
return Closure(0);
}
/* Specular BSDF. */
Closure closure_eval(ClosureDiffuse diffuse,
ClosureReflection reflection,
ClosureReflection clearcoat)
{
SELECT_CLOSURE(g_diffuse_data, g_diffuse_rand, diffuse);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, clearcoat);
return Closure(0);
}
/* Principled BSDF. */
Closure closure_eval(ClosureDiffuse diffuse,
ClosureReflection reflection,
ClosureReflection clearcoat,
ClosureRefraction refraction)
{
SELECT_CLOSURE(g_diffuse_data, g_diffuse_rand, diffuse);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, reflection);
SELECT_CLOSURE(g_reflection_data, g_reflection_rand, clearcoat);
SELECT_CLOSURE(g_refraction_data, g_refraction_rand, refraction);
return Closure(0);
}
/* Noop since we are sampling closures. */
Closure closure_add(Closure cl1, Closure cl2)
{
return Closure(0);
}
Closure closure_mix(Closure cl1, Closure cl2, float fac)
{
return Closure(0);
}
float ambient_occlusion_eval(vec3 normal,
float distance,
const float inverted,
const float sample_count)
{
/* TODO */
return 1.0;
}
#ifndef GPU_METAL
void attrib_load();
Closure nodetree_surface();
Closure nodetree_volume();
vec3 nodetree_displacement();
float nodetree_thickness();
vec4 closure_to_rgba(Closure cl);
#endif
/* Stubs. */
vec2 btdf_lut(float a, float b, float c)
{
return vec2(1, 0);
}
vec2 brdf_lut(float a, float b)
{
return vec2(1, 0);
}
vec3 F_brdf_multi_scatter(vec3 a, vec3 b, vec2 c)
{
return a;
}
vec3 F_brdf_single_scatter(vec3 a, vec3 b, vec2 c)
{
return a;
}
float F_eta(float a, float b)
{
return a;
}
void output_aov(vec4 color, float value, uint hash)
{
}
#ifdef EEVEE_MATERIAL_STUBS
# define attrib_load()
# define nodetree_displacement() vec3(0.0)
# define nodetree_surface() Closure(0)
# define nodetree_volume() Closure(0)
# define nodetree_thickness() 0.1
#endif
/* -------------------------------------------------------------------- */
/** \name Fragment Displacement
*
* Displacement happening in the fragment shader.
* Can be used in conjunction with a per vertex displacement.
*
* \{ */
#ifdef MAT_DISPLACEMENT_BUMP
/* Return new shading normal. */
vec3 displacement_bump()
{
# ifdef GPU_FRAGMENT_SHADER
vec2 dHd;
dF_branch(dot(nodetree_displacement(), g_data.N + dF_impl(g_data.N)), dHd);
vec3 dPdx = dFdx(g_data.P);
vec3 dPdy = dFdy(g_data.P);
/* Get surface tangents from normal. */
vec3 Rx = cross(dPdy, g_data.N);
vec3 Ry = cross(g_data.N, dPdx);
/* Compute surface gradient and determinant. */
float det = dot(dPdx, Rx);
vec3 surfgrad = dHd.x * Rx + dHd.y * Ry;
float facing = FrontFacing ? 1.0 : -1.0;
return normalize(abs(det) * g_data.N - facing * sign(det) * surfgrad);
# else
return g_data.N;
# endif
}
#endif
void fragment_displacement()
{
#ifdef MAT_DISPLACEMENT_BUMP
g_data.N = displacement_bump();
#endif
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Coordinate implementations
*
* Callbacks for the texture coordinate node.
*
* \{ */
vec3 coordinate_camera(vec3 P)
{
vec3 vP;
if (false /* probe */) {
/* Unsupported. It would make the probe camera-dependent. */
vP = P;
}
else {
#ifdef MAT_WORLD
vP = transform_direction(ViewMatrix, P);
#else
vP = transform_point(ViewMatrix, P);
#endif
}
vP.z = -vP.z;
return vP;
}
vec3 coordinate_screen(vec3 P)
{
vec3 window = vec3(0.0);
if (false /* probe */) {
/* Unsupported. It would make the probe camera-dependent. */
window.xy = vec2(0.5);
}
else {
/* TODO(fclem): Actual camera tranform. */
window.xy = project_point(ViewProjectionMatrix, P).xy * 0.5 + 0.5;
window.xy = window.xy * CameraTexCoFactors.xy + CameraTexCoFactors.zw;
}
return window;
}
vec3 coordinate_reflect(vec3 P, vec3 N)
{
#ifdef MAT_WORLD
return N;
#else
return -reflect(cameraVec(P), N);
#endif
}
vec3 coordinate_incoming(vec3 P)
{
#ifdef MAT_WORLD
return -P;
#else
return cameraVec(P);
#endif
}
/** \} */

View File

@ -0,0 +1,19 @@
/**
* Deferred lighting evaluation: Lighting is evaluated in a separate pass.
*
* Outputs shading parameter per pixel using a randomized set of BSDFs.
**/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
void main(void)
{
init_globals();
nodetree_surface();
}

View File

@ -0,0 +1,72 @@
/**
* Depth shader that can stochastically discard transparent pixel.
*/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
/* From the paper "Hashed Alpha Testing" by Chris Wyman and Morgan McGuire. */
float hash(vec2 a)
{
return fract(1e4 * sin(17.0 * a.x + 0.1 * a.y) * (0.1 + abs(sin(13.0 * a.y + a.x))));
}
float hash3d(vec3 a)
{
return hash(vec2(hash(a.xy), a.z));
}
float hashed_alpha_threshold(float hash_scale, float hash_offset, vec3 P)
{
/* Find the discretized derivatives of our coordinates. */
float max_deriv = max(length(dFdx(P)), length(dFdy(P)));
float pix_scale = 1.0 / (hash_scale * max_deriv);
/* Find two nearest log-discretized noise scales. */
float pix_scale_log = log2(pix_scale);
vec2 pix_scales;
pix_scales.x = exp2(floor(pix_scale_log));
pix_scales.y = exp2(ceil(pix_scale_log));
/* Compute alpha thresholds at our two noise scales. */
vec2 alpha;
alpha.x = hash3d(floor(pix_scales.x * P));
alpha.y = hash3d(floor(pix_scales.y * P));
/* Factor to interpolate lerp with. */
float fac = fract(log2(pix_scale));
/* Interpolate alpha threshold from noise at two scales. */
float x = mix(alpha.x, alpha.y, fac);
/* Pass into CDF to compute uniformly distrib threshold. */
float a = min(fac, 1.0 - fac);
float one_a = 1.0 - a;
float denom = 1.0 / (2 * a * one_a);
float one_x = (1 - x);
vec3 cases = vec3((x * x) * denom, (x - 0.5 * a) / one_a, 1.0 - (one_x * one_x * denom));
/* Find our final, uniformly distributed alpha threshold. */
float threshold = (x < one_a) ? ((x < a) ? cases.x : cases.y) : cases.z;
/* Jitter the threshold for TAA accumulation. */
threshold = fract(threshold + hash_offset);
/* Avoids threshold == 0. */
threshold = clamp(threshold, 1.0e-6, 1.0);
return threshold;
}
void main(void)
{
#ifdef MAT_TRANSPARENT
init_globals();
nodetree_surface();
// float noise_offset = sampling_rng_1D_get(sampling_buf, SAMPLING_TRANSPARENCY);
float noise_offset = 0.5;
float random_threshold = hashed_alpha_threshold(1.0, noise_offset, g_data.P);
float transparency = avg(g_transmittance);
if (transparency > random_threshold) {
discard;
}
#endif
}

View File

@ -0,0 +1,73 @@
/**
* Forward lighting evaluation: Lighting is evaluated during the geometry rasterization.
*
* This is used by alpha blended materials and materials using Shader to RGB nodes.
**/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
float spec_light(ClosureReflection ref)
{
float gloss = saturate(1.0 - ref.roughness);
float shininess = exp2(10.0 * gloss + 1.0);
vec3 N = ref.N;
vec3 L = vec3(0.0, 0.0, 1.0);
vec3 H = normalize(L + cameraVec(g_data.P));
float spec_angle = saturate(dot(N, H));
float normalization_factor = shininess * 0.125 + 1.0;
float spec_light = pow(spec_angle, shininess) * saturate(dot(N, L)) * normalization_factor;
return spec_light;
}
vec4 closure_to_rgba(Closure cl)
{
vec4 out_color;
out_color.rgb = g_emission;
out_color.rgb += g_diffuse_data.color * g_diffuse_data.weight *
saturate(g_diffuse_data.N.z * 0.5 + 0.5);
out_color.rgb += g_reflection_data.color * g_reflection_data.weight *
spec_light(g_reflection_data);
out_color.rgb += g_refraction_data.color * g_refraction_data.weight *
saturate(g_refraction_data.N.z * 0.5 + 0.5);
out_color.a = saturate(1.0 - avg(g_transmittance));
/* Reset for the next closure tree. */
closure_weights_reset();
return out_color;
}
void main(void)
{
init_globals();
fragment_displacement();
nodetree_surface();
g_holdout = saturate(g_holdout);
out_radiance.rgb = g_emission;
out_radiance.rgb += g_diffuse_data.color * g_diffuse_data.weight *
saturate(g_diffuse_data.N.z * 0.5 + 0.5);
out_radiance.rgb += g_reflection_data.color * g_reflection_data.weight *
spec_light(g_reflection_data);
out_radiance.rgb += g_refraction_data.color * g_refraction_data.weight *
saturate(g_refraction_data.N.z * 0.5 + 0.5);
out_radiance.a = 0.0;
out_radiance.rgb *= 1.0 - g_holdout;
out_transmittance.rgb = g_transmittance;
out_transmittance.a = saturate(avg(g_transmittance));
/* Test */
out_transmittance.a = 1.0 - out_transmittance.a;
out_radiance.a = 1.0 - out_radiance.a;
}

View File

@ -0,0 +1,104 @@
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_codegen_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#if defined(USE_BARYCENTRICS) && defined(GPU_FRAGMENT_SHADER) && defined(MAT_GEOM_MESH)
vec3 barycentric_distances_get()
{
/* NOTE: No need to undo perspective divide since it has not been applied. */
vec3 pos0 = (ProjectionMatrixInverse * gpu_position_at_vertex(0)).xyz;
vec3 pos1 = (ProjectionMatrixInverse * gpu_position_at_vertex(1)).xyz;
vec3 pos2 = (ProjectionMatrixInverse * gpu_position_at_vertex(2)).xyz;
vec3 edge21 = pos2 - pos1;
vec3 edge10 = pos1 - pos0;
vec3 edge02 = pos0 - pos2;
vec3 d21 = safe_normalize(edge21);
vec3 d10 = safe_normalize(edge10);
vec3 d02 = safe_normalize(edge02);
vec3 dists;
float d = dot(d21, edge02);
dists.x = sqrt(dot(edge02, edge02) - d * d);
d = dot(d02, edge10);
dists.y = sqrt(dot(edge10, edge10) - d * d);
d = dot(d10, edge21);
dists.z = sqrt(dot(edge21, edge21) - d * d);
return dists.xyz;
}
#endif
void init_globals_mesh(void)
{
#if defined(USE_BARYCENTRICS) && defined(GPU_FRAGMENT_SHADER) && defined(MAT_GEOM_MESH)
g_data.barycentric_coords = gpu_BaryCoord.xy;
g_data.barycentric_dists = barycentric_distances_get();
#endif
}
void init_globals_curves(void)
{
/* Shade as a cylinder. */
float cos_theta = interp.curves_time_width / interp.curves_thickness;
float sin_theta = sqrt(max(0.0, 1.0 - cos_theta * cos_theta));
g_data.N = normalize(interp.N * sin_theta + interp.curves_binormal * cos_theta);
g_data.is_strand = true;
g_data.hair_time = interp.curves_time;
g_data.hair_thickness = interp.curves_thickness;
g_data.hair_strand_id = interp.curves_strand_id;
#if defined(USE_BARYCENTRICS) && defined(GPU_FRAGMENT_SHADER) && defined(MAT_GEOM_CURVES)
g_data.barycentric_coords = hair_resolve_barycentric(interp.barycentric_coords);
#endif
}
void init_globals_gpencil(void)
{
/* Undo backface flip as the gpencil normal is already pointing towards the camera. */
g_data.N = interp.N;
}
void init_globals(void)
{
/* Default values. */
g_data.P = interp.P;
g_data.N = safe_normalize(interp.N);
g_data.Ng = g_data.N;
g_data.is_strand = false;
g_data.hair_time = 0.0;
g_data.hair_thickness = 0.0;
g_data.hair_strand_id = 0;
g_data.ray_type = RAY_TYPE_CAMERA; /* TODO */
g_data.ray_depth = 0.0;
g_data.ray_length = distance(g_data.P, cameraPos);
g_data.barycentric_coords = vec2(0.0);
g_data.barycentric_dists = vec3(0.0);
#ifdef GPU_FRAGMENT_SHADER
g_data.N = (FrontFacing) ? g_data.N : -g_data.N;
g_data.Ng = safe_normalize(cross(dFdx(g_data.P), dFdy(g_data.P)));
#endif
#if defined(MAT_GEOM_MESH)
init_globals_mesh();
#elif defined(MAT_GEOM_CURVES)
init_globals_curves();
#elif defined(MAT_GEOM_GPENCIL)
init_globals_gpencil();
#endif
}
/* Avoid some compiler issue with non set interface parameters. */
void init_interface()
{
#ifdef GPU_VERTEX_SHADER
interp.P = vec3(0.0);
interp.N = vec3(0.0);
interp.barycentric_coords = vec2(0.0);
interp.curves_binormal = vec3(0.0);
interp.curves_time = 0.0;
interp.curves_time_width = 0.0;
interp.curves_thickness = 0.0;
interp.curves_strand_id = 0;
drw_ResourceID_iface.resource_index = resource_id;
#endif
}

View File

@ -0,0 +1,29 @@
/**
* Background used to shade the world.
*
* Outputs shading parameter per pixel using a set of randomized BSDFs.
**/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
void main(void)
{
init_globals();
/* View position is passed to keep accuracy. */
g_data.N = normal_view_to_world(viewCameraVec(interp.P));
g_data.Ng = g_data.N;
g_data.P = -g_data.N + cameraPos;
attrib_load();
nodetree_surface();
g_holdout = saturate(g_holdout);
out_background.rgb = safe_color(g_emission) * (1.0 - g_holdout);
out_background.a = saturate(avg(g_transmittance)) * g_holdout;
}

View File

@ -0,0 +1,176 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Common
* \{ */
/* TODO(@fclem): This is a bit out of place at the moment. */
GPU_SHADER_CREATE_INFO(eevee_shared)
.typedef_source("eevee_defines.hh")
.typedef_source("eevee_shader_shared.hh");
GPU_SHADER_CREATE_INFO(eevee_sampling_data)
.additional_info("eevee_shared")
.uniform_buf(14, "SamplingData", "sampling_buf");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Surface Mesh Type
* \{ */
GPU_SHADER_CREATE_INFO(eevee_geom_mesh)
.define("MAT_GEOM_MESH")
.vertex_in(0, Type::VEC3, "pos")
.vertex_in(1, Type::VEC3, "nor")
.vertex_source("eevee_geom_mesh_vert.glsl")
.additional_info("draw_mesh", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_gpencil)
.define("MAT_GEOM_GPENCIL")
.vertex_source("eevee_geom_gpencil_vert.glsl")
.additional_info("draw_gpencil", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_curves)
.define("MAT_GEOM_CURVES")
.vertex_source("eevee_geom_curves_vert.glsl")
.additional_info("draw_hair", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_world)
.define("MAT_GEOM_WORLD")
.builtins(BuiltinBits::VERTEX_ID)
.vertex_source("eevee_geom_world_vert.glsl")
.additional_info("draw_modelmat", "draw_resource_id_varying", "draw_resource_handle");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Surface
* \{ */
GPU_SHADER_INTERFACE_INFO(eevee_surf_iface, "interp")
.smooth(Type::VEC3, "P")
.smooth(Type::VEC3, "N")
.smooth(Type::VEC2, "barycentric_coords")
.smooth(Type::VEC3, "curves_binormal")
.smooth(Type::FLOAT, "curves_time")
.smooth(Type::FLOAT, "curves_time_width")
.smooth(Type::FLOAT, "curves_thickness")
.flat(Type::INT, "curves_strand_id");
#define image_out(slot, qualifier, format, name) \
image(slot, format, qualifier, ImageType::FLOAT_2D, name, Frequency::PASS)
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
.vertex_out(eevee_surf_iface)
/* Note: This removes the possibility of using gl_FragDepth. */
// .early_fragment_test(true)
/* Direct output. */
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
/* Gbuffer. */
// .image_out(0, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_transmit_color")
// .image_out(1, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_transmit_data")
// .image_out(2, Qualifier::WRITE, GPU_RGBA16F, "gbuff_transmit_normal")
// .image_out(3, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_reflection_color")
// .image_out(4, Qualifier::WRITE, GPU_RGBA16F, "gbuff_reflection_normal")
// .image_out(5, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_emission")
/* Renderpasses. */
// .image_out(6, Qualifier::READ_WRITE, GPU_RGBA16F, "rpass_volume_light")
/* TODO: AOVs maybe? */
.fragment_source("eevee_surf_deferred_frag.glsl")
// .additional_info("eevee_sampling_data", "eevee_utility_texture")
;
#undef image_out
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
.auto_resource_location(true)
.vertex_out(eevee_surf_iface)
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
.fragment_source("eevee_surf_forward_frag.glsl")
// .additional_info("eevee_sampling_data",
// "eevee_lightprobe_data",
/* Optionnally added depending on the material. */
// "eevee_raytrace_data",
// "eevee_transmittance_data",
// "eevee_utility_texture",
// "eevee_light_data",
// "eevee_shadow_data"
// )
;
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
.vertex_out(eevee_surf_iface)
.fragment_source("eevee_surf_depth_frag.glsl")
// .additional_info("eevee_sampling_data", "eevee_utility_texture")
;
GPU_SHADER_CREATE_INFO(eevee_surf_world)
.vertex_out(eevee_surf_iface)
.fragment_out(0, Type::VEC4, "out_background")
.fragment_source("eevee_surf_world_frag.glsl")
// .additional_info("eevee_utility_texture")
;
/** \} */
/* -------------------------------------------------------------------- */
/** \name Volume
* \{ */
#if 0 /* TODO */
GPU_SHADER_INTERFACE_INFO(eevee_volume_iface, "interp")
.smooth(Type::VEC3, "P_start")
.smooth(Type::VEC3, "P_end");
GPU_SHADER_CREATE_INFO(eevee_volume_deferred)
.sampler(0, ImageType::DEPTH_2D, "depth_max_tx")
.vertex_in(0, Type::VEC3, "pos")
.vertex_out(eevee_volume_iface)
.fragment_out(0, Type::UVEC4, "out_volume_data")
.fragment_out(1, Type::VEC4, "out_transparency_data")
.additional_info("eevee_shared")
.vertex_source("eevee_volume_vert.glsl")
.fragment_source("eevee_volume_deferred_frag.glsl")
.additional_info("draw_fullscreen");
#endif
/** \} */
/* -------------------------------------------------------------------- */
/** \name Test shaders
*
* Variations that are only there to test shaders at compile time.
* \{ */
#ifdef DEBUG
/* Stub functions defined by the material evaluation. */
GPU_SHADER_CREATE_INFO(eevee_material_stub).define("EEVEE_MATERIAL_STUBS");
# define EEVEE_MAT_FINAL_VARIATION(name, ...) \
GPU_SHADER_CREATE_INFO(name) \
.additional_info(__VA_ARGS__) \
.auto_resource_location(true) \
.do_static_compilation(true);
# define EEVEE_MAT_GEOM_VARIATIONS(prefix, ...) \
EEVEE_MAT_FINAL_VARIATION(prefix##_world, "eevee_geom_world", __VA_ARGS__) \
EEVEE_MAT_FINAL_VARIATION(prefix##_gpencil, "eevee_geom_gpencil", __VA_ARGS__) \
EEVEE_MAT_FINAL_VARIATION(prefix##_hair, "eevee_geom_curves", __VA_ARGS__) \
EEVEE_MAT_FINAL_VARIATION(prefix##_mesh, "eevee_geom_mesh", __VA_ARGS__)
# define EEVEE_MAT_PIPE_VARIATIONS(name, ...) \
EEVEE_MAT_GEOM_VARIATIONS(name##_world, "eevee_surf_world", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_depth, "eevee_surf_depth", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_deferred, "eevee_surf_deferred", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_forward, "eevee_surf_forward", __VA_ARGS__)
EEVEE_MAT_PIPE_VARIATIONS(eevee_surface, "eevee_material_stub")
#endif
/** \} */

View File

@ -960,6 +960,8 @@ int DRW_cache_object_material_count_get(struct Object *ob)
return DRW_pointcloud_material_count_get(ob->data);
case OB_VOLUME:
return DRW_volume_material_count_get(ob->data);
case OB_GPENCIL:
return DRW_gpencil_material_count_get(ob->data);
default:
BLI_assert(0);
return 0;

View File

@ -20,6 +20,7 @@ struct ParticleSystem;
struct Volume;
struct VolumeGrid;
struct bGPDstroke;
struct bGPdata;
/**
* Shape resolution level of detail.
@ -270,6 +271,7 @@ struct GPUBatch *DRW_cache_gpencil_edit_curve_handles_get(struct Object *ob, int
struct GPUBatch *DRW_cache_gpencil_edit_curve_points_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_sbuffer_stroke_get(struct Object *ob);
struct GPUBatch *DRW_cache_gpencil_sbuffer_fill_get(struct Object *ob);
int DRW_gpencil_material_count_get(struct bGPdata *gpd);
struct GPUBatch *DRW_cache_gpencil_face_wireframe_get(struct Object *ob);

View File

@ -954,4 +954,9 @@ GPUBatch *DRW_cache_gpencil_edit_curve_points_get(Object *ob, int cfra)
return cache->edit_curve_points_batch;
}
int DRW_gpencil_material_count_get(bGPdata *gpd)
{
return max_ii(1, gpd->totcol);
}
/** \} */

View File

@ -7,6 +7,10 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
struct rcti;
void DRW_stats_free(void);
@ -27,3 +31,7 @@ void DRW_stats_query_start(const char *name);
void DRW_stats_query_end(void);
void DRW_stats_draw(const rcti *rect);
#ifdef __cplusplus
}
#endif

View File

@ -92,8 +92,6 @@ GPU_SHADER_CREATE_INFO(draw_mesh).additional_info("draw_modelmat", "draw_resourc
GPU_SHADER_CREATE_INFO(draw_hair)
.sampler(15, ImageType::FLOAT_BUFFER, "hairPointBuffer")
.sampler(14, ImageType::UINT_BUFFER, "hairStrandBuffer")
.sampler(13, ImageType::UINT_BUFFER, "hairStrandSegBuffer")
/* TODO(@fclem): Pack these into one UBO. */
.push_constant(Type::INT, "hairStrandsRes")
.push_constant(Type::INT, "hairThicknessRes")

View File

@ -442,6 +442,7 @@ list(APPEND INC ${CMAKE_CURRENT_BINARY_DIR})
set(SRC_SHADER_CREATE_INFOS
../draw/engines/basic/shaders/infos/basic_depth_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
../draw/engines/gpencil/shaders/infos/gpencil_info.hh
../draw/engines/gpencil/shaders/infos/gpencil_vfx_info.hh
../draw/engines/overlay/shaders/infos/antialiasing_info.hh