GPUTexture: Add support for binding textures with custom sampler states

This commit is contained in:
Clément Foucault 2020-06-03 13:03:36 +02:00
parent 4184371fb0
commit 866e067d65
4 changed files with 52 additions and 50 deletions

View File

@ -170,13 +170,20 @@ void drw_resource_buffer_finish(ViewportMemoryPool *vmempool)
/** \name Uniforms (DRW_shgroup_uniform)
* \{ */
static DRWUniform *drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
int loc,
DRWUniformType type,
const void *value,
int length,
int arraysize)
static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
int loc,
DRWUniformType type,
const void *value,
eGPUSamplerState sampler_state,
int length,
int arraysize)
{
if (loc == -1) {
/* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
// BLI_assert(0);
return;
}
DRWUniformChunk *unichunk = shgroup->uniforms;
/* Happens on first uniform or if chunk is full. */
if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) {
@ -210,18 +217,16 @@ static DRWUniform *drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
break;
case DRW_UNIFORM_TEXTURE:
uni->texture = (GPUTexture *)value;
uni->sampler_state = GPU_SAMPLER_MAX; /* Use texture state for now. */
uni->sampler_state = sampler_state;
break;
case DRW_UNIFORM_TEXTURE_REF:
uni->texture_ref = (GPUTexture **)value;
uni->sampler_state = GPU_SAMPLER_MAX; /* Use texture state for now. */
uni->sampler_state = sampler_state;
break;
default:
uni->pvalue = (const float *)value;
break;
}
return uni;
}
static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
@ -231,39 +236,29 @@ static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
int length,
int arraysize)
{
int location;
if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_REF)) {
location = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
}
else if (ELEM(type, DRW_UNIFORM_TEXTURE, DRW_UNIFORM_TEXTURE_REF)) {
location = GPU_shader_get_texture_binding(shgroup->shader, name);
}
else {
location = GPU_shader_get_uniform(shgroup->shader, name);
}
if (location == -1) {
/* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
// BLI_assert(0);
return;
}
BLI_assert(arraysize > 0 && arraysize <= 16);
BLI_assert(length >= 0 && length <= 16);
drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
BLI_assert(!ELEM(type,
DRW_UNIFORM_BLOCK,
DRW_UNIFORM_BLOCK_REF,
DRW_UNIFORM_TEXTURE,
DRW_UNIFORM_TEXTURE_REF));
int location = GPU_shader_get_uniform(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, location, type, value, 0, length, arraysize);
}
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
BLI_assert(tex != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, GPU_SAMPLER_MAX, 0, 1);
}
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
{
BLI_assert(tex != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, GPU_SAMPLER_MAX, 0, 1);
}
void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
@ -271,7 +266,8 @@ void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
const GPUUniformBuffer *ubo)
{
BLI_assert(ubo != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1);
}
void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup,
@ -279,7 +275,8 @@ void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup,
GPUUniformBuffer **ubo)
{
BLI_assert(ubo != NULL);
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_REF, ubo, 0, 1);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1);
}
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
@ -425,7 +422,8 @@ void DRW_shgroup_uniform_vec4_array_copy(DRWShadingGroup *shgroup,
}
for (int i = 0; i < arraysize; i++) {
drw_shgroup_uniform_create_ex(shgroup, location + i, DRW_UNIFORM_FLOAT_COPY, &value[i], 4, 1);
drw_shgroup_uniform_create_ex(
shgroup, location + i, DRW_UNIFORM_FLOAT_COPY, &value[i], 0, 4, 1);
}
}
@ -1180,22 +1178,22 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
if (chunkid_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 1);
shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1);
}
if (resourceid_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 1);
shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 0, 1);
}
if (baseinst_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 1);
shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 0, 1);
}
if (model_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 1);
shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 0, 1);
}
else {
/* Note: This is only here to support old hardware fallback where uniform buffer is still
@ -1203,17 +1201,17 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
if (model != -1) {
drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 1);
drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 0, 1);
}
if (modelinverse != -1) {
drw_shgroup_uniform_create_ex(
shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 1);
shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 0, 1);
}
}
if (info_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 1);
shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 0, 1);
/* Abusing this loc to tell shgroup we need the obinfos. */
shgroup->objectinfo = 1;
@ -1224,7 +1222,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
if (view_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 1);
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 0, 1);
}
/* Not supported. */
@ -1279,7 +1277,9 @@ static void drw_shgroup_material_texture(DRWShadingGroup *grp,
int textarget)
{
GPUTexture *gputex = GPU_texture_from_blender(tex->ima, tex->iuser, NULL, textarget);
DRW_shgroup_uniform_texture(grp, name, gputex);
int loc = GPU_shader_get_texture_binding(grp->shader, name);
drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_TEXTURE, gputex, GPU_SAMPLER_MAX, 0, 1);
GPUTexture **gputex_ref = BLI_memblock_alloc(DST.vmempool->images);
*gputex_ref = gputex;
@ -1355,7 +1355,7 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
BLI_assert(tf_target != NULL);
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
drw_shgroup_init(shgroup, shader);
drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 1);
drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 0, 1);
return shgroup;
}

View File

@ -816,10 +816,10 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
shgroup->shader, uni->location, uni->length, uni->arraysize, uni->pvalue);
break;
case DRW_UNIFORM_TEXTURE:
GPU_texture_bind_ex(uni->texture, uni->location, false);
GPU_texture_bind_ex(uni->texture, uni->sampler_state, uni->location, false);
break;
case DRW_UNIFORM_TEXTURE_REF:
GPU_texture_bind_ex(*uni->texture_ref, uni->location, false);
GPU_texture_bind_ex(*uni->texture_ref, uni->sampler_state, uni->location, false);
break;
case DRW_UNIFORM_BLOCK:
GPU_uniformbuffer_bind(uni->block, uni->location);

View File

@ -261,7 +261,7 @@ void GPU_texture_free(GPUTexture *tex);
void GPU_texture_ref(GPUTexture *tex);
void GPU_texture_bind(GPUTexture *tex, int number);
void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number);
void GPU_texture_bind_ex(GPUTexture *tex, eGPUSamplerState state, int unit, const bool set_number);
void GPU_texture_unbind(GPUTexture *tex);
void GPU_texture_unbind_all(void);

View File

@ -1713,7 +1713,7 @@ void GPU_invalid_tex_free(void)
}
/* set_number is to save the the texture unit for setting texture parameters. */
void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number)
void GPU_texture_bind_ex(GPUTexture *tex, eGPUSamplerState state, int unit, const bool set_number)
{
BLI_assert(unit >= 0);
@ -1740,9 +1740,11 @@ void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number)
glActiveTexture(GL_TEXTURE0 + unit);
state = (state < GPU_SAMPLER_MAX) ? state : tex->sampler_state;
if (tex->bindcode != 0) {
glBindTexture(tex->target, tex->bindcode);
glBindSampler(unit, GG.samplers[tex->sampler_state]);
glBindSampler(unit, GG.samplers[state]);
}
else {
GPU_invalid_tex_bind(tex->target_base);
@ -1752,7 +1754,7 @@ void GPU_texture_bind_ex(GPUTexture *tex, int unit, const bool set_number)
void GPU_texture_bind(GPUTexture *tex, int unit)
{
GPU_texture_bind_ex(tex, unit, true);
GPU_texture_bind_ex(tex, GPU_SAMPLER_MAX, unit, true);
}
void GPU_texture_unbind(GPUTexture *tex)