Cleanup: Move draw_cache_impl_pointcloud.c to C++

This commit is contained in:
Hans Goudey 2022-08-09 11:10:44 -05:00
parent d832d993c5
commit 4ab6baefb1
3 changed files with 286 additions and 3 deletions

View File

@ -77,7 +77,7 @@ set(SRC
intern/draw_cache_impl_mesh.cc
intern/draw_cache_impl_metaball.c
intern/draw_cache_impl_particles.c
intern/draw_cache_impl_pointcloud.c
intern/draw_cache_impl_pointcloud.cc
intern/draw_cache_impl_subdivision.cc
intern/draw_cache_impl_volume.c
intern/draw_color_management.cc

View File

@ -0,0 +1,281 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2017 Blender Foundation. All rights reserved. */
/** \file
* \ingroup draw
*
* \brief PointCloud API for render engines
*/
#include <string.h>
#include "MEM_guardedalloc.h"
#include "BLI_math_base.h"
#include "BLI_math_vector.h"
#include "BLI_utildefines.h"
#include "DNA_object_types.h"
#include "DNA_pointcloud_types.h"
#include "BKE_customdata.h"
#include "BKE_pointcloud.h"
#include "GPU_batch.h"
#include "draw_cache_impl.h" /* own include */
static void pointcloud_batch_cache_clear(PointCloud *pointcloud);
/* ---------------------------------------------------------------------- */
/* PointCloud GPUBatch Cache */
struct PointCloudBatchCache {
GPUVertBuf *pos; /* Position and radius. */
GPUVertBuf *geom; /* Instanced geometry for each point in the cloud (small sphere). */
GPUIndexBuf *geom_indices;
GPUBatch *dots;
GPUBatch *surface;
GPUBatch **surface_per_mat;
/* settings to determine if cache is invalid */
bool is_dirty;
int mat_len;
};
/* GPUBatch cache management. */
static PointCloudBatchCache *pointcloud_batch_cache_get(PointCloud *pointcloud)
{
return static_cast<PointCloudBatchCache *>(pointcloud->batch_cache);
}
static bool pointcloud_batch_cache_valid(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache == NULL) {
return false;
}
if (cache->mat_len != DRW_pointcloud_material_count_get(pointcloud)) {
return false;
}
return cache->is_dirty == false;
}
static void pointcloud_batch_cache_init(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (!cache) {
cache = MEM_cnew<PointCloudBatchCache>(__func__);
pointcloud->batch_cache = cache;
}
else {
memset(cache, 0, sizeof(*cache));
}
cache->mat_len = DRW_pointcloud_material_count_get(pointcloud);
cache->surface_per_mat = static_cast<GPUBatch **>(
MEM_callocN(sizeof(GPUBatch *) * cache->mat_len, __func__));
cache->is_dirty = false;
}
void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
{
if (!pointcloud_batch_cache_valid(pointcloud)) {
pointcloud_batch_cache_clear(pointcloud);
pointcloud_batch_cache_init(pointcloud);
}
}
void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache == NULL) {
return;
}
switch (mode) {
case BKE_POINTCLOUD_BATCH_DIRTY_ALL:
cache->is_dirty = true;
break;
default:
BLI_assert(0);
}
}
static void pointcloud_batch_cache_clear(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (!cache) {
return;
}
GPU_BATCH_DISCARD_SAFE(cache->dots);
GPU_BATCH_DISCARD_SAFE(cache->surface);
GPU_VERTBUF_DISCARD_SAFE(cache->pos);
GPU_VERTBUF_DISCARD_SAFE(cache->geom);
GPU_INDEXBUF_DISCARD_SAFE(cache->geom_indices);
if (cache->surface_per_mat) {
for (int i = 0; i < cache->mat_len; i++) {
GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
}
}
MEM_SAFE_FREE(cache->surface_per_mat);
}
void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
{
pointcloud_batch_cache_clear(pointcloud);
MEM_SAFE_FREE(pointcloud->batch_cache);
}
static void pointcloud_batch_cache_ensure_pos(Object *ob, PointCloudBatchCache *cache)
{
if (cache->pos != NULL) {
return;
}
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
const float(*positions)[3] = (float(*)[3])CustomData_get_layer_named(
&pointcloud->pdata, CD_PROP_FLOAT3, "position");
const float *radii = (float *)CustomData_get_layer_named(
&pointcloud->pdata, CD_PROP_FLOAT, "radius");
const bool has_radius = radii != NULL;
static GPUVertFormat format = {0};
static GPUVertFormat format_no_radius = {0};
static uint pos;
if (format.attr_len == 0) {
/* initialize vertex format */
/* From the opengl wiki:
* Note that size does not have to exactly match the size used by the vertex shader. If the
* vertex shader has fewer components than the attribute provides, then the extras are ignored.
* If the vertex shader has more components than the array provides, the extras are given
* values from the vector (0, 0, 0, 1) for the missing XYZW components.
*/
pos = GPU_vertformat_attr_add(&format_no_radius, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
cache->pos = GPU_vertbuf_create_with_format(has_radius ? &format : &format_no_radius);
GPU_vertbuf_data_alloc(cache->pos, pointcloud->totpoint);
if (has_radius) {
float(*vbo_data)[4] = (float(*)[4])GPU_vertbuf_get_data(cache->pos);
for (int i = 0; i < pointcloud->totpoint; i++) {
copy_v3_v3(vbo_data[i], positions[i]);
/* TODO(fclem): remove multiplication here.
* Here only for keeping the size correct for now. */
vbo_data[i][3] = radii[i] * 100.0f;
}
}
else {
GPU_vertbuf_attr_fill(cache->pos, pos, positions);
}
}
static const float half_octahedron_normals[5][3] = {
{0.0f, 0.0f, 1.0f},
{1.0f, 0.0f, 0.0f},
{0.0f, 1.0f, 0.0f},
{-1.0f, 0.0f, 0.0f},
{0.0f, -1.0f, 0.0f},
};
static const uint half_octahedron_tris[4][3] = {
{0, 1, 2},
{0, 2, 3},
{0, 3, 4},
{0, 4, 1},
};
static void pointcloud_batch_cache_ensure_geom(Object *UNUSED(ob), PointCloudBatchCache *cache)
{
if (cache->geom != NULL) {
return;
}
static GPUVertFormat format = {0};
static uint pos;
if (format.attr_len == 0) {
/* initialize vertex format */
pos = GPU_vertformat_attr_add(&format, "pos_inst", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "nor");
}
cache->geom = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache->geom, ARRAY_SIZE(half_octahedron_normals));
GPU_vertbuf_attr_fill(cache->geom, pos, half_octahedron_normals);
GPUIndexBufBuilder builder;
GPU_indexbuf_init(&builder,
GPU_PRIM_TRIS,
ARRAY_SIZE(half_octahedron_tris),
ARRAY_SIZE(half_octahedron_normals));
for (int i = 0; i < ARRAY_SIZE(half_octahedron_tris); i++) {
GPU_indexbuf_add_tri_verts(&builder, UNPACK3(half_octahedron_tris[i]));
}
cache->geom_indices = GPU_indexbuf_build(&builder);
}
GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
{
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache->dots == NULL) {
pointcloud_batch_cache_ensure_pos(ob, cache);
cache->dots = GPU_batch_create(GPU_PRIM_POINTS, cache->pos, NULL);
}
return cache->dots;
}
GPUBatch *DRW_pointcloud_batch_cache_get_surface(Object *ob)
{
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache->surface == NULL) {
pointcloud_batch_cache_ensure_pos(ob, cache);
pointcloud_batch_cache_ensure_geom(ob, cache);
cache->surface = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
GPU_batch_instbuf_add_ex(cache->surface, cache->pos, false);
}
return cache->surface;
}
GPUBatch **DRW_cache_pointcloud_surface_shaded_get(Object *ob,
struct GPUMaterial **UNUSED(gpumat_array),
uint gpumat_array_len)
{
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
BLI_assert(cache->mat_len == gpumat_array_len);
UNUSED_VARS(gpumat_array_len);
if (cache->surface_per_mat[0] == NULL) {
pointcloud_batch_cache_ensure_pos(ob, cache);
pointcloud_batch_cache_ensure_geom(ob, cache);
cache->surface_per_mat[0] = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
GPU_batch_instbuf_add_ex(cache->surface_per_mat[0], cache->pos, false);
}
return cache->surface_per_mat;
}
int DRW_pointcloud_material_count_get(PointCloud *pointcloud)
{
return max_ii(1, pointcloud->totcol);
}

View File

@ -93,8 +93,10 @@ void GPU_batch_init_ex(GPUBatch *batch,
*/
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
#define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
#define GPU_batch_init(batch, prim, verts, elem) GPU_batch_init_ex(batch, prim, verts, elem, 0)
#define GPU_batch_create(prim, verts, elem) \
GPU_batch_create_ex(prim, verts, elem, (eGPUBatchFlag)0)
#define GPU_batch_init(batch, prim, verts, elem) \
GPU_batch_init_ex(batch, prim, verts, elem, (eGPUBatchFlag)0)
/**
* Same as discard but does not free. (does not call free callback).