Mesh Batch Cache: Port Texture paint batches to new batch request

This commit is contained in:
Clément Foucault 2018-12-17 11:37:27 +01:00
parent 9bb70e59b8
commit 7ac49a07c6
7 changed files with 95 additions and 173 deletions

View File

@ -8,7 +8,8 @@ uniform mat3 NormalMatrix;
#ifndef HAIR_SHADER
in vec3 pos;
in vec3 nor;
in vec2 uv;
in vec2 u; /* active texture layer */
#define uv u
#else /* HAIR_SHADER */
# ifdef V3D_SHADING_TEXTURE_COLOR
uniform samplerBuffer u; /* active texture layer */

View File

@ -822,23 +822,16 @@ void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
const Mesh *me = ob->data;
if (me->mloopuv) {
const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
struct GPUBatch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
if (materials_len > 0 && geom_array) {
for (int i = 0; i < materials_len; i++) {
if (geom_array[i] == NULL) {
continue;
}
Material *mat = give_current_material(ob, i + 1);
Image *image;
ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
int color_type = workbench_material_determine_color_type(wpd, image, ob);
material = get_or_create_material_data(vedata, ob, mat, image, color_type);
DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
}
is_drawn = true;
struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
for (int i = 0; i < materials_len; i++) {
Material *mat = give_current_material(ob, i + 1);
Image *image;
ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
int color_type = workbench_material_determine_color_type(wpd, image, ob);
material = get_or_create_material_data(vedata, ob, mat, image, color_type);
DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
}
is_drawn = true;
}
}

View File

@ -503,33 +503,26 @@ void workbench_forward_cache_populate(WORKBENCH_Data *vedata, Object *ob)
const Mesh *me = ob->data;
if (me->mloopuv) {
const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
struct GPUBatch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
if (materials_len > 0 && geom_array) {
for (int i = 0; i < materials_len; i++) {
if (geom_array[i] == NULL) {
continue;
}
struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
for (int i = 0; i < materials_len; i++) {
Material *mat = give_current_material(ob, i + 1);
Image *image;
ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
/* use OB_SOLID when no texture could be determined */
Material *mat = give_current_material(ob, i + 1);
Image *image;
ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
int color_type = wpd->shading.color_type;
if (color_type == V3D_SHADING_TEXTURE_COLOR) {
/* use OB_SOLID when no texture could be determined */
int color_type = wpd->shading.color_type;
if (color_type == V3D_SHADING_TEXTURE_COLOR) {
/* use OB_SOLID when no texture could be determined */
if (image == NULL) {
color_type = V3D_SHADING_MATERIAL_COLOR;
}
if (image == NULL) {
color_type = V3D_SHADING_MATERIAL_COLOR;
}
material = get_or_create_material_data(vedata, ob, mat, image, color_type);
DRW_shgroup_call_object_add(material->shgrp_object_outline, geom_array[i], ob);
DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
}
is_drawn = true;
material = get_or_create_material_data(vedata, ob, mat, image, color_type);
DRW_shgroup_call_object_add(material->shgrp_object_outline, geom_array[i], ob);
DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
}
is_drawn = true;
}
}

View File

@ -323,6 +323,17 @@ static void mesh_cd_layers_type_merge(
}
}
static void mesh_cd_calc_active_uv_layer(
const Mesh *me, ushort cd_lused[CD_NUMTYPES])
{
const CustomData *cd_ldata = (me->edit_btmesh) ? &me->edit_btmesh->bm->ldata : &me->ldata;
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
if (layer != -1) {
cd_lused[CD_MLOOPUV] |= (1 << layer);
}
}
static void mesh_cd_calc_used_gpu_layers(
const Mesh *me, uchar cd_vused[CD_NUMTYPES], ushort cd_lused[CD_NUMTYPES],
struct GPUMaterial **gpumat_array, int gpumat_array_len)
@ -2625,84 +2636,6 @@ void DRW_mesh_batch_cache_free(Mesh *me)
/* GPUBatch cache usage. */
static GPUVertBuf *mesh_batch_cache_get_tri_uv_active(
MeshRenderData *rdata, MeshBatchCache *cache)
{
BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPUV));
if (cache->tri_aligned_uv == NULL) {
const MLoopUV *mloopuv = rdata->mloopuv;
int layer_offset;
BMEditMesh *embm = rdata->edit_bmesh;
/* edit mode */
if (rdata->edit_bmesh) {
BMesh *bm = embm->bm;
layer_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
if (layer_offset == -1) {
return NULL;
}
}
else if (mloopuv == NULL) {
return NULL;
}
uint vidx = 0;
static GPUVertFormat format = { 0 };
static struct { uint uv; } attr_id;
if (format.attr_len == 0) {
attr_id.uv = GPU_vertformat_attr_add(&format, "uv", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
const int tri_len = mesh_render_data_looptri_len_get(rdata);
GPUVertBuf *vbo = cache->tri_aligned_uv = GPU_vertbuf_create_with_format(&format);
const int vbo_len_capacity = tri_len * 3;
int vbo_len_used = 0;
GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
/* get uv's from active UVMap */
if (rdata->edit_bmesh) {
for (uint i = 0; i < tri_len; i++) {
const BMLoop **bm_looptri = (const BMLoop **)embm->looptris[i];
if (BM_elem_flag_test(bm_looptri[0]->f, BM_ELEM_HIDDEN)) {
continue;
}
for (uint t = 0; t < 3; t++) {
const BMLoop *loop = bm_looptri[t];
const int index = BM_elem_index_get(loop);
if (index != -1) {
const float *elem = ((MLoopUV *)BM_ELEM_CD_GET_VOID_P(loop, layer_offset))->uv;
GPU_vertbuf_attr_set(vbo, attr_id.uv, vidx++, elem);
}
}
}
}
else {
/* object mode */
for (int i = 0; i < tri_len; i++) {
const MLoopTri *mlt = &rdata->mlooptri[i];
GPU_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[0]].uv);
GPU_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[1]].uv);
GPU_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[2]].uv);
}
}
vbo_len_used = vidx;
if (vbo_len_capacity != vbo_len_used) {
GPU_vertbuf_data_resize(vbo, vbo_len_used);
}
UNUSED_VARS_NDEBUG(vbo_len_used);
}
return cache->tri_aligned_uv;
}
static void mesh_create_pos_and_nor_tess(MeshRenderData *rdata, GPUVertBuf *vbo, bool use_hide)
{
static GPUVertFormat format = { 0 };
@ -5155,13 +5088,12 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(
char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
BLI_assert(gpumat_array_len == cache->mat_len);
uchar cd_vneeded[CD_NUMTYPES] = {0};
ushort cd_lneeded[CD_NUMTYPES] = {0};
mesh_cd_calc_used_gpu_layers(me, cd_vneeded, cd_lneeded, gpumat_array, gpumat_array_len);
BLI_assert(gpumat_array_len == cache->mat_len);
bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_vused, cache->cd_lused,
cd_vneeded, cd_lneeded);
if (cd_overlap == false) {
@ -5175,48 +5107,50 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(
&cache->auto_layer_is_srgb,
&cache->auto_layer_len);
}
if (auto_layer_names) {
*auto_layer_names = cache->auto_layer_names;
*auto_layer_is_srgb = cache->auto_layer_is_srgb;
*auto_layer_count = cache->auto_layer_len;
}
for (int i = 0; i < cache->mat_len; ++i) {
DRW_batch_request(&cache->surf_per_mat[i]);
}
return cache->surf_per_mat;
}
static void texpaint_request_active_uv(MeshBatchCache *cache, Mesh *me)
{
uchar cd_vneeded[CD_NUMTYPES] = {0};
ushort cd_lneeded[CD_NUMTYPES] = {0};
mesh_cd_calc_active_uv_layer(me, cd_lneeded);
if (cd_lneeded[CD_MLOOPUV] == 0) {
/* This should not happen. */
BLI_assert(!"No uv layer available in texpaint, but batches requested anyway!");
}
bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_vused, cache->cd_lused,
cd_vneeded, cd_lneeded);
if (cd_overlap == false) {
/* XXX TODO(fclem): We are writting to batch cache here. Need to make this thread safe. */
mesh_cd_layers_type_merge(cache->cd_vneeded, cache->cd_lneeded,
cd_vneeded, cd_lneeded);
}
}
GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
UNUSED_VARS(cache);
/* TODO */
return NULL;
texpaint_request_active_uv(cache, me);
for (int i = 0; i < cache->mat_len; ++i) {
DRW_batch_request(&cache->surf_per_mat[i]);
}
return cache->surf_per_mat;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
if (cache->texpaint_triangles_single == NULL) {
/* create batch from DM */
const int datatype =
MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOPUV;
MeshRenderData *rdata = mesh_render_data_create(me, datatype);
GPUVertBuf *vbo = mesh_batch_cache_get_tri_pos_and_normals_final(rdata, cache, false);
cache->texpaint_triangles_single = GPU_batch_create(
GPU_PRIM_TRIS, vbo, NULL);
GPUVertBuf *vbo_uv = mesh_batch_cache_get_tri_uv_active(rdata, cache);
if (vbo_uv) {
GPU_batch_vertbuf_add(cache->texpaint_triangles_single, vbo_uv);
}
mesh_render_data_free(rdata);
}
return cache->texpaint_triangles_single;
texpaint_request_active_uv(cache, me);
return DRW_batch_request(&cache->batch.surface);
}
GPUBatch *DRW_mesh_batch_cache_get_texpaint_loop_wire(Mesh *me)
@ -5799,12 +5733,12 @@ void DRW_mesh_batch_cache_create_requested(Object *UNUSED(ob), Mesh *me)
}
}
}
/* We can't discard batches at this point as they have been
* referenced for drawing. Just clear them in place. */
for (int i = 0; i < cache->mat_len; ++i) {
/* We can't discard batches at this point as they have been
* referenced for drawing. Just clear them in place. */
GPU_batch_clear(cache->surf_per_mat[i]);
memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
GPU_BATCH_CLEAR_SAFE(cache->surf_per_mat[i]);
}
GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
mesh_cd_layers_type_merge(cache->cd_vused, cache->cd_lused,
cache->cd_vneeded, cache->cd_lneeded);
@ -5818,6 +5752,10 @@ void DRW_mesh_batch_cache_create_requested(Object *UNUSED(ob), Mesh *me)
if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.surface, &cache->ibo.surface_tris);
DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_pos_nor);
/* For paint overlay. Active layer should have been queried. */
if (cache->cd_lused[CD_MLOOPUV] != 0) {
DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_uv_tan);
}
}
if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache->batch.all_verts, &cache->ordered.pos_nor);

View File

@ -295,40 +295,31 @@ static void PAINT_TEXTURE_cache_populate(void *vedata, Object *ob)
const bool use_surface = draw_ctx->v3d->overlay.texture_paint_mode_opacity != 0.0; //DRW_object_is_mode_shade(ob) == true;
const bool use_material_slots = (scene->toolsettings->imapaint.mode == IMAGEPAINT_MODE_MATERIAL);
const bool use_face_sel = (me_orig->editflag & ME_EDIT_PAINT_FACE_SEL) != 0;
bool ok = false;
if (use_surface) {
if (me->mloopuv != NULL) {
if (use_material_slots || use_face_sel) {
struct GPUBatch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
if ((me->totcol == 0) || (geom_array == NULL)) {
struct GPUBatch *geom = DRW_cache_mesh_surface_get(ob);
DRW_shgroup_call_add(stl->g_data->shgroup_fallback, geom, ob->obmat);
ok = true;
}
else {
for (int i = 0; i < me->totcol; i++) {
const int index = use_material_slots ? i : 0;
if (stl->g_data->shgroup_image_array[index]) {
DRW_shgroup_call_add(stl->g_data->shgroup_image_array[index], geom_array[i], ob->obmat);
}
else {
DRW_shgroup_call_add(stl->g_data->shgroup_fallback, geom_array[i], ob->obmat);
}
ok = true;
if (use_material_slots) {
int mat_nr = max_ii(1, me->totcol);
struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
for (int i = 0; i < mat_nr; i++) {
const int index = use_material_slots ? i : 0;
if (stl->g_data->shgroup_image_array[index]) {
DRW_shgroup_call_add(stl->g_data->shgroup_image_array[index], geom_array[i], ob->obmat);
}
else {
DRW_shgroup_call_add(stl->g_data->shgroup_fallback, geom_array[i], ob->obmat);
}
}
}
else {
struct GPUBatch *geom = DRW_cache_mesh_surface_texpaint_single_get(ob);
if (geom && stl->g_data->shgroup_image_array[0]) {
if (stl->g_data->shgroup_image_array[0]) {
struct GPUBatch *geom = DRW_cache_mesh_surface_texpaint_single_get(ob);
DRW_shgroup_call_add(stl->g_data->shgroup_image_array[0], geom, ob->obmat);
ok = true;
}
}
}
if (!ok) {
else {
struct GPUBatch *geom = DRW_cache_mesh_surface_get(ob);
DRW_shgroup_call_add(stl->g_data->shgroup_fallback, geom, ob->obmat);
}

View File

@ -1,7 +1,7 @@
uniform mat4 ModelViewProjectionMatrix;
in vec2 uv;
in vec2 u; /* active uv map */
in vec3 pos;
out vec2 uv_interp;
@ -10,6 +10,5 @@ void main()
{
gl_Position = ModelViewProjectionMatrix * vec4(pos, 1.0);
uv_interp = uv;
uv_interp = u;
}

View File

@ -197,6 +197,13 @@ void gpu_batch_exit(void);
} \
} while (0)
#define GPU_BATCH_CLEAR_SAFE(batch) do { \
if (batch != NULL) { \
GPU_batch_clear(batch); \
memset(batch, 0, sizeof(*(batch))); \
} \
} while (0)
#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) do { \
if (_batch_array != NULL) { \
BLI_assert(_len > 0); \