Materials: add custom object properties as uniform attributes.

This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.

In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.

The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.

In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.

This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.

The back-end Cycles code is already refactored and committed by Brecht.

Differential Revision: https://developer.blender.org/D2057
This commit is contained in:
Alexander Gavrilov 2020-08-05 19:14:40 +03:00
parent 91d320edc3
commit 6fdcca8de6
Notes: blender-bot 2023-02-14 04:39:18 +01:00
Referenced by issue #82521, Bump OpenImageIO minimum version 1.8 > 2.2.1 for install_deps.sh
25 changed files with 904 additions and 4 deletions

View File

@ -234,6 +234,10 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
/* special case not tracked by object update flags */
if (sync_object_attributes(b_instance, object)) {
object_updated = true;
}
/* holdout */
if (use_holdout != object->use_holdout) {
object->use_holdout = use_holdout;
@ -343,6 +347,132 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
return object;
}
/* This function mirrors drw_uniform_property_lookup in draw_instance_data.cpp */
static bool lookup_property(BL::ID b_id, const string &name, float4 *r_value)
{
PointerRNA ptr;
PropertyRNA *prop;
if (!RNA_path_resolve(&b_id.ptr, name.c_str(), &ptr, &prop)) {
return false;
}
PropertyType type = RNA_property_type(prop);
int arraylen = RNA_property_array_length(&ptr, prop);
if (arraylen == 0) {
float value;
if (type == PROP_FLOAT)
value = RNA_property_float_get(&ptr, prop);
else if (type == PROP_INT)
value = RNA_property_int_get(&ptr, prop);
else
return false;
*r_value = make_float4(value, value, value, 1.0f);
return true;
}
else if (type == PROP_FLOAT && arraylen <= 4) {
*r_value = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
RNA_property_float_get_array(&ptr, prop, &r_value->x);
return true;
}
return false;
}
/* This function mirrors drw_uniform_attribute_lookup in draw_instance_data.cpp */
static float4 lookup_instance_property(BL::DepsgraphObjectInstance &b_instance,
const string &name,
bool use_instancer)
{
string idprop_name = string_printf("[\"%s\"]", name.c_str());
float4 value;
/* If requesting instance data, check the parent particle system and object. */
if (use_instancer && b_instance.is_instance()) {
BL::ParticleSystem b_psys = b_instance.particle_system();
if (b_psys) {
if (lookup_property(b_psys.settings(), idprop_name, &value) ||
lookup_property(b_psys.settings(), name, &value)) {
return value;
}
}
if (lookup_property(b_instance.parent(), idprop_name, &value) ||
lookup_property(b_instance.parent(), name, &value)) {
return value;
}
}
/* Check the object and mesh. */
BL::Object b_ob = b_instance.object();
BL::ID b_data = b_ob.data();
if (lookup_property(b_ob, idprop_name, &value) || lookup_property(b_ob, name, &value) ||
lookup_property(b_data, idprop_name, &value) || lookup_property(b_data, name, &value)) {
return value;
}
return make_float4(0.0f);
}
bool BlenderSync::sync_object_attributes(BL::DepsgraphObjectInstance &b_instance, Object *object)
{
/* Find which attributes are needed. */
AttributeRequestSet requests = object->geometry->needed_attributes();
/* Delete attributes that became unnecessary. */
vector<ParamValue> &attributes = object->attributes;
bool changed = false;
for (int i = attributes.size() - 1; i >= 0; i--) {
if (!requests.find(attributes[i].name())) {
attributes.erase(attributes.begin() + i);
changed = true;
}
}
/* Update attribute values. */
foreach (AttributeRequest &req, requests.requests) {
ustring name = req.name;
std::string real_name;
BlenderAttributeType type = blender_attribute_name_split_type(name, &real_name);
if (type != BL::ShaderNodeAttribute::attribute_type_GEOMETRY) {
bool use_instancer = (type == BL::ShaderNodeAttribute::attribute_type_INSTANCER);
float4 value = lookup_instance_property(b_instance, real_name, use_instancer);
/* Try finding the existing attribute value. */
ParamValue *param = NULL;
for (size_t i = 0; i < attributes.size(); i++) {
if (attributes[i].name() == name) {
param = &attributes[i];
break;
}
}
/* Replace or add the value. */
ParamValue new_param(name, TypeDesc::TypeFloat4, 1, &value);
assert(new_param.datasize() == sizeof(value));
if (!param) {
changed = true;
attributes.push_back(new_param);
}
else if (memcmp(param->data(), &value, sizeof(value)) != 0) {
changed = true;
*param = new_param;
}
}
}
return changed;
}
/* Object Loop */
void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,

View File

@ -97,6 +97,53 @@ static ImageAlphaType get_image_alpha_type(BL::Image &b_image)
return (ImageAlphaType)validate_enum_value(value, IMAGE_ALPHA_NUM_TYPES, IMAGE_ALPHA_AUTO);
}
/* Attribute name translation utilities */
/* Since Eevee needs to know whether the attribute is uniform or varying
* at the time it compiles the shader for the material, Blender had to
* introduce different namespaces (types) in its attribute node. However,
* Cycles already has object attributes that form a uniform namespace with
* the more common varying attributes. Without completely reworking the
* attribute handling in Cycles to introduce separate namespaces (this could
* be especially hard for OSL which directly uses the name string), the
* space identifier has to be added to the attribute name as a prefix.
*
* The prefixes include a control character to ensure the user specified
* name can't accidentally include a special prefix.
*/
static const string_view object_attr_prefix("\x01object:");
static const string_view instancer_attr_prefix("\x01instancer:");
static ustring blender_attribute_name_add_type(const string &name, BlenderAttributeType type)
{
switch (type) {
case BL::ShaderNodeAttribute::attribute_type_OBJECT:
return ustring::concat(object_attr_prefix, name);
case BL::ShaderNodeAttribute::attribute_type_INSTANCER:
return ustring::concat(instancer_attr_prefix, name);
default:
return ustring(name);
}
}
BlenderAttributeType blender_attribute_name_split_type(ustring name, string *r_real_name)
{
string_view sname(name);
if (sname.substr(0, object_attr_prefix.size()) == object_attr_prefix) {
*r_real_name = sname.substr(object_attr_prefix.size());
return BL::ShaderNodeAttribute::attribute_type_OBJECT;
}
if (sname.substr(0, instancer_attr_prefix.size()) == instancer_attr_prefix) {
*r_real_name = sname.substr(instancer_attr_prefix.size());
return BL::ShaderNodeAttribute::attribute_type_INSTANCER;
}
return BL::ShaderNodeAttribute::attribute_type_GEOMETRY;
}
/* Graph */
static BL::NodeSocket get_node_output(BL::Node &b_node, const string &name)
@ -369,7 +416,8 @@ static ShaderNode *add_node(Scene *scene,
else if (b_node.is_a(&RNA_ShaderNodeAttribute)) {
BL::ShaderNodeAttribute b_attr_node(b_node);
AttributeNode *attr = graph->create_node<AttributeNode>();
attr->attribute = b_attr_node.attribute_name();
attr->attribute = blender_attribute_name_add_type(b_attr_node.attribute_name(),
b_attr_node.attribute_type());
node = attr;
}
else if (b_node.is_a(&RNA_ShaderNodeBackground)) {

View File

@ -149,6 +149,8 @@ class BlenderSync {
bool *use_portal,
TaskPool *geom_task_pool);
bool sync_object_attributes(BL::DepsgraphObjectInstance &b_instance, Object *object);
/* Volume */
void sync_volume(BL::Object &b_ob, Volume *volume);

View File

@ -40,6 +40,9 @@ float *BKE_image_get_float_pixels_for_frame(void *image, int frame, int tile);
CCL_NAMESPACE_BEGIN
typedef BL::ShaderNodeAttribute::attribute_type_enum BlenderAttributeType;
BlenderAttributeType blender_attribute_name_split_type(ustring name, string *r_real_name);
void python_thread_state_save(void **python_thread_state);
void python_thread_state_restore(void **python_thread_state);

View File

@ -106,7 +106,7 @@ typedef unsigned int BLI_bitmap;
#define BLI_BITMAP_RESIZE(_bitmap, _tot) \
{ \
CHECK_TYPE(_bitmap, BLI_bitmap *); \
(_bitmap) = MEM_reallocN(_bitmap, BLI_BITMAP_SIZE(_tot)); \
(_bitmap) = MEM_recallocN(_bitmap, BLI_BITMAP_SIZE(_tot)); \
} \
(void)0

View File

@ -36,6 +36,7 @@ struct ARegion;
struct DRWInstanceDataList;
struct Depsgraph;
struct DrawEngineType;
struct GHash;
struct GPUMaterial;
struct GPUOffScreen;
struct GPUViewport;
@ -140,6 +141,7 @@ void DRW_render_gpencil(struct RenderEngine *engine, struct Depsgraph *depsgraph
/* This is here because GPUViewport needs it */
struct DRWInstanceDataList *DRW_instance_data_list_create(void);
void DRW_instance_data_list_free(struct DRWInstanceDataList *idatalist);
void DRW_uniform_attrs_pool_free(struct GHash *table);
void DRW_render_context_enable(struct Render *render);
void DRW_render_context_disable(struct Render *render);

View File

@ -30,9 +30,20 @@
*/
#include "draw_instance_data.h"
#include "draw_manager.h"
#include "DRW_engine.h"
#include "DRW_render.h" /* For DRW_shgroup_get_instance_count() */
#include "GPU_material.h"
#include "DNA_particle_types.h"
#include "BKE_duplilist.h"
#include "RNA_access.h"
#include "BLI_bitmap.h"
#include "BLI_memblock.h"
#include "BLI_mempool.h"
#include "BLI_utildefines.h"
@ -408,3 +419,359 @@ void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sparse Uniform Buffer
* \{ */
#define CHUNK_LIST_STEP (1 << 4)
/** A chunked UBO manager that doesn't actually allocate unneeded chunks. */
typedef struct DRWSparseUniformBuf {
/* Memory buffers used to stage chunk data before transfer to UBOs. */
char **chunk_buffers;
/* Uniform buffer objects with flushed data. */
struct GPUUniformBuf **chunk_ubos;
/* True if the relevant chunk contains data (distinct from simply being allocated). */
BLI_bitmap *chunk_used;
int num_chunks;
unsigned int item_size, chunk_size, chunk_bytes;
} DRWSparseUniformBuf;
static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
unsigned int item_size,
unsigned int chunk_size)
{
buffer->chunk_buffers = NULL;
buffer->chunk_used = NULL;
buffer->chunk_ubos = NULL;
buffer->num_chunks = 0;
buffer->item_size = item_size;
buffer->chunk_size = chunk_size;
buffer->chunk_bytes = item_size * chunk_size;
}
/** Allocate a chunked UBO with the specified item and chunk size. */
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsigned int chunk_size)
{
DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
return buf;
}
/** Flush data from ordinary memory to UBOs. */
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
{
for (int i = 0; i < buffer->num_chunks; i++) {
if (BLI_BITMAP_TEST(buffer->chunk_used, i)) {
if (buffer->chunk_ubos[i] == NULL) {
buffer->chunk_ubos[i] = GPU_uniformbuf_create(buffer->chunk_bytes);
}
GPU_uniformbuf_update(buffer->chunk_ubos[i], buffer->chunk_buffers[i]);
}
}
}
/** Clean all buffers and free unused ones. */
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
{
int max_used_chunk = 0;
for (int i = 0; i < buffer->num_chunks; i++) {
/* Delete buffers that were not used since the last clear call. */
if (free_all || !BLI_BITMAP_TEST(buffer->chunk_used, i)) {
MEM_SAFE_FREE(buffer->chunk_buffers[i]);
if (buffer->chunk_ubos[i]) {
GPU_uniformbuf_free(buffer->chunk_ubos[i]);
buffer->chunk_ubos[i] = NULL;
}
}
else {
max_used_chunk = i + 1;
}
}
/* Shrink the chunk array if appropriate. */
const int old_num_chunks = buffer->num_chunks;
buffer->num_chunks = (max_used_chunk + CHUNK_LIST_STEP - 1) & ~(CHUNK_LIST_STEP - 1);
if (buffer->num_chunks == 0) {
/* Ensure that an empty pool holds no memory allocations. */
MEM_SAFE_FREE(buffer->chunk_buffers);
MEM_SAFE_FREE(buffer->chunk_used);
MEM_SAFE_FREE(buffer->chunk_ubos);
return;
}
if (buffer->num_chunks != old_num_chunks) {
buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
buffer->num_chunks * sizeof(void *));
buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
}
BLI_bitmap_set_all(buffer->chunk_used, false, buffer->num_chunks);
}
/** Frees the buffer. */
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer)
{
DRW_sparse_uniform_buffer_clear(buffer, true);
MEM_freeN(buffer);
}
/** Checks if the buffer contains any allocated chunks. */
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer)
{
return buffer->num_chunks == 0;
}
static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buffer, int chunk)
{
if (buffer && chunk < buffer->num_chunks && BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
return buffer->chunk_ubos[chunk];
}
else {
return NULL;
}
}
/** Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
{
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
if (ubo) {
GPU_uniformbuf_bind(ubo, location);
}
}
/** Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
{
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
if (ubo) {
GPU_uniformbuf_unbind(ubo);
}
}
/** Returns a pointer to the given item of the given chunk, allocating memory if necessary. */
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *pool, int chunk, int item)
{
if (chunk >= pool->num_chunks) {
pool->num_chunks = (chunk + CHUNK_LIST_STEP) & ~(CHUNK_LIST_STEP - 1);
pool->chunk_buffers = MEM_recallocN(pool->chunk_buffers, pool->num_chunks * sizeof(void *));
pool->chunk_ubos = MEM_recallocN(pool->chunk_ubos, pool->num_chunks * sizeof(void *));
BLI_BITMAP_RESIZE(pool->chunk_used, pool->num_chunks);
}
char *buffer = pool->chunk_buffers[chunk];
if (buffer == NULL) {
pool->chunk_buffers[chunk] = buffer = MEM_callocN(pool->chunk_bytes, __func__);
}
else if (!BLI_BITMAP_TEST(pool->chunk_used, chunk)) {
memset(buffer, 0, pool->chunk_bytes);
}
BLI_BITMAP_ENABLE(pool->chunk_used, chunk);
return buffer + pool->item_size * item;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Uniform Attribute Buffers
* \{ */
/** Sparse UBO buffer for a specific uniform attribute list. */
typedef struct DRWUniformAttrBuf {
/* Attribute list (also used as hash table key) handled by this buffer. */
GPUUniformAttrList key;
/* Sparse UBO buffer containing the attribute values. */
DRWSparseUniformBuf ubos;
/* Last handle used to update the buffer, checked for avoiding redundant updates. */
DRWResourceHandle last_handle;
/* Linked list pointer used for freeing the empty unneeded buffers. */
struct DRWUniformAttrBuf *next_empty;
} DRWUniformAttrBuf;
static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table, GPUUniformAttrList *key)
{
void **pkey, **pval;
if (!BLI_ghash_ensure_p_ex(table, key, &pkey, &pval)) {
DRWUniformAttrBuf *buffer = MEM_callocN(sizeof(*buffer), __func__);
*pkey = &buffer->key;
*pval = buffer;
GPU_uniform_attr_list_copy(&buffer->key, key);
drw_sparse_uniform_buffer_init(
&buffer->ubos, key->count * sizeof(float[4]), DRW_RESOURCE_CHUNK_LEN);
buffer->last_handle = (DRWResourceHandle)-1;
}
return (DRWUniformAttrBuf *)*pval;
}
/* This function mirrors lookup_property in cycles/blender/blender_object.cpp */
static bool drw_uniform_property_lookup(ID *id, const char *name, float r_data[4])
{
PointerRNA ptr, id_ptr;
PropertyRNA *prop;
if (!id) {
return false;
}
RNA_id_pointer_create(id, &id_ptr);
if (!RNA_path_resolve(&id_ptr, name, &ptr, &prop)) {
return false;
}
PropertyType type = RNA_property_type(prop);
int arraylen = RNA_property_array_length(&ptr, prop);
if (arraylen == 0) {
float value;
if (type == PROP_FLOAT)
value = RNA_property_float_get(&ptr, prop);
else if (type == PROP_INT)
value = RNA_property_int_get(&ptr, prop);
else
return false;
copy_v4_fl4(r_data, value, value, value, 1);
return true;
}
else if (type == PROP_FLOAT && arraylen <= 4) {
copy_v4_fl4(r_data, 0, 0, 0, 1);
RNA_property_float_get_array(&ptr, prop, r_data);
return true;
}
return false;
}
/* This function mirrors lookup_instance_property in cycles/blender/blender_object.cpp */
static void drw_uniform_attribute_lookup(GPUUniformAttr *attr,
Object *ob,
Object *dupli_parent,
DupliObject *dupli_source,
float r_data[4])
{
char idprop_name[sizeof(attr->name) + 4];
copy_v4_fl(r_data, 0);
sprintf(idprop_name, "[\"%s\"]", attr->name);
/* If requesting instance data, check the parent particle system and object. */
if (attr->use_dupli) {
if (dupli_source && dupli_source->particle_system) {
ParticleSettings *settings = dupli_source->particle_system->part;
if (drw_uniform_property_lookup((ID *)settings, idprop_name, r_data) ||
drw_uniform_property_lookup((ID *)settings, attr->name, r_data)) {
return;
}
}
if (drw_uniform_property_lookup((ID *)dupli_parent, idprop_name, r_data) ||
drw_uniform_property_lookup((ID *)dupli_parent, attr->name, r_data)) {
return;
}
}
/* Check the object and mesh. */
if (ob) {
if (drw_uniform_property_lookup((ID *)ob, idprop_name, r_data) ||
drw_uniform_property_lookup((ID *)ob, attr->name, r_data) ||
drw_uniform_property_lookup((ID *)ob->data, idprop_name, r_data) ||
drw_uniform_property_lookup((ID *)ob->data, attr->name, r_data)) {
return;
}
}
}
void drw_uniform_attrs_pool_update(GHash *table,
GPUUniformAttrList *key,
DRWResourceHandle *handle,
Object *ob,
Object *dupli_parent,
DupliObject *dupli_source)
{
DRWUniformAttrBuf *buffer = drw_uniform_attrs_pool_ensure(table, key);
if (buffer->last_handle != *handle) {
buffer->last_handle = *handle;
int chunk = DRW_handle_chunk_get(handle);
int item = DRW_handle_id_get(handle);
float(*values)[4] = DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item);
LISTBASE_FOREACH (GPUUniformAttr *, attr, &buffer->key.list) {
drw_uniform_attribute_lookup(attr, ob, dupli_parent, dupli_source, *values++);
}
}
}
DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
{
DRWUniformAttrBuf *buffer = BLI_ghash_lookup(table, key);
return buffer ? &buffer->ubos : NULL;
}
GHash *DRW_uniform_attrs_pool_new()
{
return GPU_uniform_attr_list_hash_new("obattr_hash");
}
void DRW_uniform_attrs_pool_flush_all(GHash *table)
{
GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
DRW_sparse_uniform_buffer_flush(&buffer->ubos);
}
GHASH_FOREACH_END();
}
static void drw_uniform_attrs_pool_free_cb(void *ptr)
{
DRWUniformAttrBuf *buffer = ptr;
GPU_uniform_attr_list_free(&buffer->key);
DRW_sparse_uniform_buffer_clear(&buffer->ubos, true);
MEM_freeN(buffer);
}
void DRW_uniform_attrs_pool_clear_all(GHash *table)
{
DRWUniformAttrBuf *remove_list = NULL;
GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
buffer->last_handle = (DRWResourceHandle)-1;
DRW_sparse_uniform_buffer_clear(&buffer->ubos, false);
if (DRW_sparse_uniform_buffer_is_empty(&buffer->ubos)) {
buffer->next_empty = remove_list;
remove_list = buffer;
}
}
GHASH_FOREACH_END();
while (remove_list) {
DRWUniformAttrBuf *buffer = remove_list;
remove_list = buffer->next_empty;
BLI_ghash_remove(table, &buffer->key, NULL, drw_uniform_attrs_pool_free_cb);
}
}
void DRW_uniform_attrs_pool_free(GHash *table)
{
BLI_ghash_free(table, NULL, drw_uniform_attrs_pool_free_cb);
}
/** \} */

View File

@ -31,8 +31,12 @@
#define DRW_BUFFER_VERTS_CHUNK 128
struct GHash;
struct GPUUniformAttrList;
typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
typedef struct DRWSparseUniformBuf DRWSparseUniformBuf;
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
@ -54,3 +58,21 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist);
/* Sparse chunked UBO manager. */
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size,
unsigned int chunk_size);
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer);
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all);
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer);
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer);
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location);
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk);
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item);
/* Uniform attribute UBO management. */
struct GHash *DRW_uniform_attrs_pool_new(void);
void DRW_uniform_attrs_pool_flush_all(struct GHash *table);
void DRW_uniform_attrs_pool_clear_all(struct GHash *table);
struct DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(struct GHash *table,
struct GPUUniformAttrList *key);

View File

@ -467,6 +467,8 @@ static void drw_viewport_cache_resize(void)
BLI_memblock_clear(DST.vmempool->passes, NULL);
BLI_memblock_clear(DST.vmempool->views, NULL);
BLI_memblock_clear(DST.vmempool->images, NULL);
DRW_uniform_attrs_pool_clear_all(DST.vmempool->obattrs_ubo_pool);
}
DRW_instance_data_list_free_unused(DST.idatalist);
@ -593,6 +595,9 @@ static void drw_viewport_var_init(void)
if (DST.vmempool->images == NULL) {
DST.vmempool->images = BLI_memblock_create(sizeof(GPUTexture *));
}
if (DST.vmempool->obattrs_ubo_pool == NULL) {
DST.vmempool->obattrs_ubo_pool = DRW_uniform_attrs_pool_new();
}
DST.resource_handle = 0;
DST.pass_handle = 0;

View File

@ -43,6 +43,9 @@
#include "draw_instance_data.h"
struct Object;
struct DupliObject;
/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
#define USE_GPU_SELECT
@ -286,6 +289,7 @@ typedef enum {
/** Per drawcall uniforms/UBO */
DRW_UNIFORM_BLOCK_OBMATS,
DRW_UNIFORM_BLOCK_OBINFOS,
DRW_UNIFORM_BLOCK_OBATTRS,
DRW_UNIFORM_RESOURCE_CHUNK,
DRW_UNIFORM_RESOURCE_ID,
/** Legacy / Fallback */
@ -317,6 +321,8 @@ struct DRWUniform {
float fvalue[4];
/* DRW_UNIFORM_INT_COPY */
int ivalue[4];
/* DRW_UNIFORM_BLOCK_OBATTRS */
struct GPUUniformAttrList *uniform_attrs;
};
int location; /* Uniform location or binding point for textures and ubos. */
uint8_t type; /* DRWUniformType */
@ -340,6 +346,9 @@ struct DRWShadingGroup {
struct {
int objectinfo; /* Equal to 1 if the shader needs obinfos. */
DRWResourceHandle pass_handle; /* Memblock key to parent pass. */
/* Set of uniform attributes used by this shader. */
struct GPUUniformAttrList *uniform_attrs;
};
/* This struct is used after cache populate if using the Z sorting.
* It will not conflict with the above struct. */
@ -598,3 +607,10 @@ void drw_resource_buffer_finish(ViewportMemoryPool *vmempool);
GPUBatch *drw_cache_procedural_points_get(void);
GPUBatch *drw_cache_procedural_lines_get(void);
GPUBatch *drw_cache_procedural_triangles_get(void);
void drw_uniform_attrs_pool_update(struct GHash *table,
struct GPUUniformAttrList *key,
DRWResourceHandle *handle,
struct Object *ob,
struct Object *dupli_parent,
struct DupliObject *dupli_source);

View File

@ -128,6 +128,8 @@ void drw_resource_buffer_finish(ViewportMemoryPool *vmempool)
GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos);
}
DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool);
/* Aligned alloc to avoid unaligned memcpy. */
DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk");
DRWCommandChunk *chunk;
@ -209,6 +211,9 @@ static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
uni->texture_ref = (GPUTexture **)value;
uni->sampler_state = sampler_state;
break;
case DRW_UNIFORM_BLOCK_OBATTRS:
uni->uniform_attrs = (GPUUniformAttrList *)value;
break;
default:
uni->pvalue = (const float *)value;
break;
@ -611,6 +616,15 @@ static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
}
}
if (shgroup->uniform_attrs) {
drw_uniform_attrs_pool_update(DST.vmempool->obattrs_ubo_pool,
shgroup->uniform_attrs,
&DST.ob_handle,
ob,
DST.dupli_parent,
DST.dupli_source);
}
return DST.ob_handle;
}
@ -1184,6 +1198,7 @@ void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
shgroup->uniforms = NULL;
shgroup->uniform_attrs = NULL;
int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
@ -1329,6 +1344,13 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial
if (ubo != NULL) {
DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
}
GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material);
if (uattrs != NULL) {
int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME);
drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1);
grp->uniform_attrs = uattrs;
}
}
GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],

View File

@ -57,12 +57,15 @@ typedef struct DRWCommandsState {
/* Resource location. */
int obmats_loc;
int obinfos_loc;
int obattrs_loc;
int baseinst_loc;
int chunkid_loc;
int resourceid_loc;
/* Legacy matrix support. */
int obmat_loc;
int obinv_loc;
/* Uniform Attributes. */
DRWSparseUniformBuf *obattrs_ubo;
/* Selection ID state. */
GPUVertBuf *select_buf;
uint select_id;
@ -648,6 +651,12 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
state->obinfos_loc = uni->location;
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[0], uni->location);
break;
case DRW_UNIFORM_BLOCK_OBATTRS:
state->obattrs_loc = uni->location;
state->obattrs_ubo = DRW_uniform_attrs_pool_find_ubo(DST.vmempool->obattrs_ubo_pool,
uni->uniform_attrs);
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
break;
case DRW_UNIFORM_RESOURCE_CHUNK:
state->chunkid_loc = uni->location;
GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
@ -762,6 +771,10 @@ static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHa
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
}
if (state->obattrs_loc != -1) {
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
}
state->resource_chunk = chunk;
}
@ -884,6 +897,9 @@ static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState
if (state->obinfos_loc != -1) {
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
}
if (state->obattrs_loc != -1) {
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
}
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
@ -893,11 +909,13 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
DRWCommandsState state = {
.obmats_loc = -1,
.obinfos_loc = -1,
.obattrs_loc = -1,
.baseinst_loc = -1,
.chunkid_loc = -1,
.resourceid_loc = -1,
.obmat_loc = -1,
.obinv_loc = -1,
.obattrs_ubo = NULL,
.drw_state_enabled = 0,
.drw_state_disabled = 0,
};

View File

@ -748,6 +748,7 @@ static void node_shader_buts_vect_transform(uiLayout *layout, bContext *UNUSED(C
static void node_shader_buts_attribute(uiLayout *layout, bContext *UNUSED(C), PointerRNA *ptr)
{
uiItemR(layout, ptr, "attribute_type", DEFAULT_FLAGS, IFACE_("Type"), ICON_NONE);
uiItemR(layout, ptr, "attribute_name", DEFAULT_FLAGS, IFACE_("Name"), ICON_NONE);
}

View File

@ -34,6 +34,7 @@
extern "C" {
#endif
struct GHash;
struct GPUMaterial;
struct GPUNode;
struct GPUNodeLink;
@ -143,6 +144,7 @@ typedef void (*GPUMaterialEvalCallbackFn)(GPUMaterial *mat,
GPUNodeLink *GPU_constant(const float *num);
GPUNodeLink *GPU_uniform(const float *num);
GPUNodeLink *GPU_attribute(GPUMaterial *mat, CustomDataType type, const char *name);
GPUNodeLink *GPU_uniform_attribute(GPUMaterial *mat, const char *name, bool use_dupli);
GPUNodeLink *GPU_image(GPUMaterial *mat,
struct Image *ima,
struct ImageUser *iuser,
@ -259,6 +261,31 @@ ListBase GPU_material_attributes(GPUMaterial *material);
ListBase GPU_material_textures(GPUMaterial *material);
ListBase GPU_material_volume_grids(GPUMaterial *material);
typedef struct GPUUniformAttr {
struct GPUUniformAttr *next, *prev;
/* Meaningful part of the attribute set key. */
char name[64]; /* MAX_CUSTOMDATA_LAYER_NAME */
bool use_dupli;
/* Helper fields used by code generation. */
short id;
int users;
} GPUUniformAttr;
typedef struct GPUUniformAttrList {
ListBase list; /* GPUUniformAttr */
/* List length and hash code precomputed for fast lookup and comparison. */
unsigned int count, hash_code;
} GPUUniformAttrList;
GPUUniformAttrList *GPU_material_uniform_attributes(GPUMaterial *material);
struct GHash *GPU_uniform_attr_list_hash_new(const char *info);
void GPU_uniform_attr_list_copy(GPUUniformAttrList *dest, GPUUniformAttrList *src);
void GPU_uniform_attr_list_free(GPUUniformAttrList *set);
#ifdef __cplusplus
}
#endif

View File

@ -413,6 +413,9 @@ void GPU_shader_free_builtin_shaders(void);
* This makes sure the GPUVertexFormat name buffer does not overflow. */
#define GPU_MAX_ATTR 15
/* Determined by the maximum uniform buffer size divided by chunk size. */
#define GPU_MAX_UNIFORM_ATTR 8
#ifdef __cplusplus
}
#endif

View File

@ -53,6 +53,7 @@ void GPU_uniformbuf_unbind(GPUUniformBuf *ubo);
void GPU_uniformbuf_unbind_all(void);
#define GPU_UBO_BLOCK_NAME "nodeTree"
#define GPU_ATTRIBUTE_UBO_BLOCK_NAME "uniformAttrs"
#ifdef __cplusplus
}

View File

@ -38,6 +38,7 @@ extern "C" {
#define GPU_INFO_SIZE 512 /* IMA_MAX_RENDER_TEXT */
#define GLA_PIXEL_OFS 0.375f
typedef struct GHash GHash;
typedef struct GPUViewport GPUViewport;
struct GPUFrameBuffer;
@ -57,6 +58,7 @@ typedef struct ViewportMemoryPool {
struct BLI_memblock *images;
struct GPUUniformBuf **matrices_ubo;
struct GPUUniformBuf **obinfos_ubo;
struct GHash *obattrs_ubo_pool;
uint ubo_len;
} ViewportMemoryPool;

View File

@ -377,6 +377,19 @@ static int codegen_process_uniforms_functions(GPUMaterial *material,
BLI_freelistN(&ubo_inputs);
}
/* Generate the uniform attribute UBO if necessary. */
if (!BLI_listbase_is_empty(&graph->uniform_attrs.list)) {
BLI_dynstr_append(ds, "\nstruct UniformAttributes {\n");
LISTBASE_FOREACH (GPUUniformAttr *, attr, &graph->uniform_attrs.list) {
BLI_dynstr_appendf(ds, " vec4 attr%d;\n", attr->id);
}
BLI_dynstr_append(ds, "};\n");
BLI_dynstr_appendf(ds, "layout (std140) uniform %s {\n", GPU_ATTRIBUTE_UBO_BLOCK_NAME);
BLI_dynstr_append(ds, " UniformAttributes uniform_attrs[DRW_RESOURCE_CHUNK_LEN];\n");
BLI_dynstr_append(ds, "};\n");
BLI_dynstr_append(ds, "#define GET_UNIFORM_ATTR(name) (uniform_attrs[resource_id].name)\n");
}
BLI_dynstr_append(ds, "\n");
return builtins;
@ -480,6 +493,9 @@ static void codegen_call_functions(DynStr *ds, GPUNodeGraph *graph, GPUOutput *f
else if (input->source == GPU_SOURCE_ATTR) {
codegen_convert_datatype(ds, input->attr->gputype, input->type, "var", input->attr->id);
}
else if (input->source == GPU_SOURCE_UNIFORM_ATTR) {
BLI_dynstr_appendf(ds, "GET_UNIFORM_ATTR(attr%d)", input->uniform_attr->id);
}
BLI_dynstr_append(ds, ", ");
}
@ -799,6 +815,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material,
/* Prune the unused nodes and extract attributes before compiling so the
* generated VBOs are ready to accept the future shader. */
gpu_node_graph_prune_unused(graph);
gpu_node_graph_finalize_uniform_attrs(graph);
int builtins = 0;
LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {

View File

@ -584,6 +584,12 @@ ListBase GPU_material_volume_grids(GPUMaterial *material)
return material->graph.volume_grids;
}
GPUUniformAttrList *GPU_material_uniform_attributes(GPUMaterial *material)
{
GPUUniformAttrList *attrs = &material->graph.uniform_attrs;
return attrs->count > 0 ? attrs : NULL;
}
void GPU_material_output_link(GPUMaterial *material, GPUNodeLink *link)
{
if (!material->graph.outlink) {

View File

@ -137,6 +137,10 @@ static void gpu_node_input_link(GPUNode *node, GPUNodeLink *link, const eGPUType
* can happen if mixing the generic Attribute node with specialized ones. */
CLAMP_MIN(input->attr->gputype, type);
break;
case GPU_NODE_LINK_UNIFORM_ATTR:
input->source = GPU_SOURCE_UNIFORM_ATTR;
input->uniform_attr = link->uniform_attr;
break;
case GPU_NODE_LINK_CONSTANT:
input->source = (type == GPU_CLOSURE) ? GPU_SOURCE_STRUCT : GPU_SOURCE_CONSTANT;
break;
@ -262,8 +266,90 @@ static void gpu_node_output(GPUNode *node, const eGPUType type, GPUNodeLink **li
BLI_addtail(&node->outputs, output);
}
/* Uniform Attribute Functions */
static int uniform_attr_sort_cmp(const void *a, const void *b)
{
const GPUUniformAttr *attr_a = a, *attr_b = b;
int cmps = strcmp(attr_a->name, attr_b->name);
if (cmps != 0) {
return cmps > 0 ? 1 : 0;
}
return (attr_a->use_dupli && !attr_b->use_dupli);
}
static unsigned int uniform_attr_list_hash(const void *key)
{
const GPUUniformAttrList *attrs = key;
return attrs->hash_code;
}
static bool uniform_attr_list_cmp(const void *a, const void *b)
{
const GPUUniformAttrList *set_a = a, *set_b = b;
if (set_a->hash_code != set_b->hash_code || set_a->count != set_b->count) {
return true;
}
GPUUniformAttr *attr_a = set_a->list.first, *attr_b = set_b->list.first;
for (; attr_a && attr_b; attr_a = attr_a->next, attr_b = attr_b->next) {
if (!STREQ(attr_a->name, attr_b->name) || attr_a->use_dupli != attr_b->use_dupli) {
return true;
}
}
return attr_a || attr_b;
}
struct GHash *GPU_uniform_attr_list_hash_new(const char *info)
{
return BLI_ghash_new(uniform_attr_list_hash, uniform_attr_list_cmp, info);
}
void GPU_uniform_attr_list_copy(GPUUniformAttrList *dest, GPUUniformAttrList *src)
{
dest->count = src->count;
dest->hash_code = src->hash_code;
BLI_duplicatelist(&dest->list, &src->list);
}
void GPU_uniform_attr_list_free(GPUUniformAttrList *set)
{
set->count = 0;
set->hash_code = 0;
BLI_freelistN(&set->list);
}
void gpu_node_graph_finalize_uniform_attrs(GPUNodeGraph *graph)
{
GPUUniformAttrList *attrs = &graph->uniform_attrs;
BLI_assert(attrs->count == BLI_listbase_count(&attrs->list));
/* Sort the attributes by name to ensure a stable order. */
BLI_listbase_sort(&attrs->list, uniform_attr_sort_cmp);
/* Compute the indices and the hash code. */
int next_id = 0;
attrs->hash_code = 0;
LISTBASE_FOREACH (GPUUniformAttr *, attr, &attrs->list) {
attr->id = next_id++;
attrs->hash_code ^= BLI_ghashutil_strhash_p(attr->name);
if (attr->use_dupli) {
attrs->hash_code ^= BLI_ghashutil_uinthash(attr->id);
}
}
}
/* Attributes and Textures */
/** Add a new varying attribute of given type and name. Returns NULL if out of slots. */
static GPUMaterialAttribute *gpu_node_graph_add_attribute(GPUNodeGraph *graph,
CustomDataType type,
const char *name)
@ -299,6 +385,38 @@ static GPUMaterialAttribute *gpu_node_graph_add_attribute(GPUNodeGraph *graph,
return attr;
}
/** Add a new uniform attribute of given type and name. Returns NULL if out of slots. */
static GPUUniformAttr *gpu_node_graph_add_uniform_attribute(GPUNodeGraph *graph,
const char *name,
bool use_dupli)
{
/* Find existing attribute. */
GPUUniformAttrList *attrs = &graph->uniform_attrs;
GPUUniformAttr *attr = attrs->list.first;
for (; attr; attr = attr->next) {
if (STREQ(attr->name, name) && attr->use_dupli == use_dupli) {
break;
}
}
/* Add new requested attribute if it's within GPU limits. */
if (attr == NULL && attrs->count < GPU_MAX_UNIFORM_ATTR) {
attr = MEM_callocN(sizeof(*attr), __func__);
STRNCPY(attr->name, name);
attr->use_dupli = use_dupli;
attr->id = -1;
BLI_addtail(&attrs->list, attr);
attrs->count++;
}
if (attr != NULL) {
attr->users++;
}
return attr;
}
static GPUMaterialTexture *gpu_node_graph_add_texture(GPUNodeGraph *graph,
Image *ima,
ImageUser *iuser,
@ -372,6 +490,7 @@ GPUNodeLink *GPU_attribute(GPUMaterial *mat, const CustomDataType type, const ch
GPUNodeGraph *graph = gpu_material_node_graph(mat);
GPUMaterialAttribute *attr = gpu_node_graph_add_attribute(graph, type, name);
/* Dummy fallback if out of slots. */
if (attr == NULL) {
static const float zero_data[GPU_MAX_CONSTANT_DATA] = {0.0f};
return GPU_constant(zero_data);
@ -383,6 +502,23 @@ GPUNodeLink *GPU_attribute(GPUMaterial *mat, const CustomDataType type, const ch
return link;
}
GPUNodeLink *GPU_uniform_attribute(GPUMaterial *mat, const char *name, bool use_dupli)
{
GPUNodeGraph *graph = gpu_material_node_graph(mat);
GPUUniformAttr *attr = gpu_node_graph_add_uniform_attribute(graph, name, use_dupli);
/* Dummy fallback if out of slots. */
if (attr == NULL) {
static const float zero_data[GPU_MAX_CONSTANT_DATA] = {0.0f};
return GPU_constant(zero_data);
}
GPUNodeLink *link = gpu_node_link_create();
link->link_type = GPU_NODE_LINK_UNIFORM_ATTR;
link->uniform_attr = attr;
return link;
}
GPUNodeLink *GPU_constant(const float *num)
{
GPUNodeLink *link = gpu_node_link_create();
@ -619,6 +755,9 @@ static void gpu_inputs_free(ListBase *inputs)
if (input->source == GPU_SOURCE_ATTR) {
input->attr->users--;
}
else if (input->source == GPU_SOURCE_UNIFORM_ATTR) {
input->uniform_attr->users--;
}
else if (ELEM(input->source, GPU_SOURCE_TEX, GPU_SOURCE_TEX_TILED_MAPPING)) {
input->texture->users--;
}
@ -674,6 +813,7 @@ void gpu_node_graph_free(GPUNodeGraph *graph)
BLI_freelistN(&graph->volume_grids);
BLI_freelistN(&graph->textures);
BLI_freelistN(&graph->attributes);
GPU_uniform_attr_list_free(&graph->uniform_attrs);
}
/* Prune Unused Nodes */
@ -738,4 +878,13 @@ void gpu_node_graph_prune_unused(GPUNodeGraph *graph)
BLI_freelinkN(&graph->volume_grids, grid);
}
}
GPUUniformAttrList *uattrs = &graph->uniform_attrs;
LISTBASE_FOREACH_MUTABLE (GPUUniformAttr *, attr, &uattrs->list) {
if (attr->users == 0) {
BLI_freelinkN(&uattrs->list, attr);
uattrs->count--;
}
}
}

View File

@ -42,6 +42,7 @@ typedef enum eGPUDataSource {
GPU_SOURCE_CONSTANT,
GPU_SOURCE_UNIFORM,
GPU_SOURCE_ATTR,
GPU_SOURCE_UNIFORM_ATTR,
GPU_SOURCE_BUILTIN,
GPU_SOURCE_STRUCT,
GPU_SOURCE_TEX,
@ -53,6 +54,7 @@ typedef enum eGPUDataSource {
typedef enum {
GPU_NODE_LINK_NONE = 0,
GPU_NODE_LINK_ATTR,
GPU_NODE_LINK_UNIFORM_ATTR,
GPU_NODE_LINK_BUILTIN,
GPU_NODE_LINK_COLORBAND,
GPU_NODE_LINK_CONSTANT,
@ -96,6 +98,8 @@ struct GPUNodeLink {
struct GPUOutput *output;
/* GPU_NODE_LINK_ATTR */
struct GPUMaterialAttribute *attr;
/* GPU_NODE_LINK_UNIFORM_ATTR */
struct GPUUniformAttr *uniform_attr;
/* GPU_NODE_LINK_IMAGE_BLENDER */
struct GPUMaterialTexture *texture;
};
@ -130,6 +134,8 @@ typedef struct GPUInput {
struct GPUMaterialTexture *texture;
/* GPU_SOURCE_ATTR */
struct GPUMaterialAttribute *attr;
/* GPU_SOURCE_UNIFORM_ATTR */
struct GPUUniformAttr *uniform_attr;
/* GPU_SOURCE_VOLUME_GRID | GPU_SOURCE_VOLUME_GRID_TRANSFORM */
struct GPUMaterialVolumeGrid *volume_grid;
};
@ -146,11 +152,15 @@ typedef struct GPUNodeGraph {
ListBase attributes;
ListBase textures;
ListBase volume_grids;
/* The list of uniform attributes. */
GPUUniformAttrList uniform_attrs;
} GPUNodeGraph;
/* Node Graph */
void gpu_node_graph_prune_unused(GPUNodeGraph *graph);
void gpu_node_graph_finalize_uniform_attrs(GPUNodeGraph *graph);
void gpu_node_graph_free_nodes(GPUNodeGraph *graph);
void gpu_node_graph_free(GPUNodeGraph *graph);

View File

@ -1023,6 +1023,9 @@ void GPU_viewport_free(GPUViewport *viewport)
}
BLI_memblock_destroy(viewport->vmempool.images, NULL);
}
if (viewport->vmempool.obattrs_ubo_pool != NULL) {
DRW_uniform_attrs_pool_free(viewport->vmempool.obattrs_ubo_pool);
}
for (int i = 0; i < viewport->vmempool.ubo_len; i++) {
GPU_uniformbuf_free(viewport->vmempool.matrices_ubo[i]);

View File

@ -923,6 +923,8 @@ typedef struct NodeTexMagic {
typedef struct NodeShaderAttribute {
char name[64];
int type;
char _pad[4];
} NodeShaderAttribute;
typedef struct NodeShaderVectTransform {
@ -1092,6 +1094,13 @@ typedef struct NodeDenoise {
#define SHD_VECT_TRANSFORM_SPACE_OBJECT 1
#define SHD_VECT_TRANSFORM_SPACE_CAMERA 2
/* attribute */
enum {
SHD_ATTRIBUTE_GEOMETRY = 0,
SHD_ATTRIBUTE_OBJECT = 1,
SHD_ATTRIBUTE_INSTANCER = 2,
};
/* toon modes */
#define SHD_TOON_DIFFUSE 0
#define SHD_TOON_GLOSSY 1

View File

@ -4362,10 +4362,38 @@ static void def_sh_vector_rotate(StructRNA *srna)
static void def_sh_attribute(StructRNA *srna)
{
static const EnumPropertyItem prop_attribute_type[] = {
{SHD_ATTRIBUTE_GEOMETRY,
"GEOMETRY",
0,
"Geometry",
"The attribute is associated with the object geometry, and its value "
"varies from vertex to vertex, or within the object volume"},
{SHD_ATTRIBUTE_OBJECT,
"OBJECT",
0,
"Object",
"The attribute is associated with the object or mesh datablock itself, "
"and its value is uniform"},
{SHD_ATTRIBUTE_INSTANCER,
"INSTANCER",
0,
"Instancer",
"The attribute is associated with the instancer particle system or object, "
"falling back to the Object mode if the attribute isn't found, or the object "
"is not instanced"},
{0, NULL, 0, NULL, NULL},
};
PropertyRNA *prop;
RNA_def_struct_sdna_from(srna, "NodeShaderAttribute", "storage");
prop = RNA_def_property(srna, "attribute_type", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_sdna(prop, NULL, "type");
RNA_def_property_enum_items(prop, prop_attribute_type);
RNA_def_property_ui_text(prop, "Attribute Type", "General type of the attribute");
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_update");
prop = RNA_def_property(srna, "attribute_name", PROP_STRING, PROP_NONE);
RNA_def_property_string_sdna(prop, NULL, "name");
RNA_def_property_ui_text(prop, "Attribute Name", "");

View File

@ -42,8 +42,9 @@ static int node_shader_gpu_attribute(GPUMaterial *mat,
GPUNodeStack *out)
{
NodeShaderAttribute *attr = node->storage;
bool is_varying = attr->type == SHD_ATTRIBUTE_GEOMETRY;
if (GPU_material_is_volume_shader(mat)) {
if (GPU_material_is_volume_shader(mat) && is_varying) {
if (out[0].hasoutput) {
out[0].link = GPU_volume_grid(mat, attr->name, GPU_VOLUME_DEFAULT_0);
}
@ -61,7 +62,15 @@ static int node_shader_gpu_attribute(GPUMaterial *mat,
return 1;
}
GPUNodeLink *cd_attr = GPU_attribute(mat, CD_AUTO_FROM_NAME, attr->name);
GPUNodeLink *cd_attr;
if (is_varying) {
cd_attr = GPU_attribute(mat, CD_AUTO_FROM_NAME, attr->name);
}
else {
cd_attr = GPU_uniform_attribute(mat, attr->name, attr->type == SHD_ATTRIBUTE_INSTANCER);
}
GPU_stack_link(mat, node, "node_attribute", in, out, cd_attr);
/* for each output. */