Merge branch 'blender-v2.82-release'

This commit is contained in:
Lukas Stockner 2020-01-16 02:21:32 +01:00
commit 7f571aad22
32 changed files with 959 additions and 320 deletions

View File

@ -2621,7 +2621,7 @@ void MANTA::updatePointers()
mantaFloatObjects.push_back(std::make_tuple(
&mColorGHigh, "color_g" + noise_ext, func, mUsingSmoke & mUsingNoise & mUsingColors));
mantaFloatObjects.push_back(std::make_tuple(
&mColorRHigh, "color_b" + noise_ext, func, mUsingSmoke & mUsingNoise & mUsingColors));
&mColorBHigh, "color_b" + noise_ext, func, mUsingSmoke & mUsingNoise & mUsingColors));
std::vector<std::tuple<std::vector<pData> **, std::string, std::string, bool>> mantaPDataObjects;
mantaPDataObjects.push_back(

View File

@ -2859,7 +2859,7 @@ static void update_flowsfluids(struct Depsgraph *depsgraph,
color_b_in,
phiout_in);
}
else if (mfs->behavior == FLUID_FLOW_BEHAVIOR_GEOMETRY && mmd2->time > 2) {
else if (mfs->behavior == FLUID_FLOW_BEHAVIOR_GEOMETRY && !is_first_frame) {
apply_inflow_fields(mfs,
0.0f,
FLT_MAX,

View File

@ -441,10 +441,9 @@ void BKE_image_copy_data(Main *UNUSED(bmain), Image *ima_dst, const Image *ima_s
BLI_listbase_clear(&ima_dst->anims);
BLI_duplicatelist(&ima_dst->tiles, &ima_src->tiles);
LISTBASE_FOREACH (ImageTile *, tile, &ima_dst->tiles) {
for (int i = 0; i < TEXTARGET_COUNT; i++) {
tile->gputexture[i] = NULL;
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
ima_dst->gputexture[i] = NULL;
}
if ((flag & LIB_ID_COPY_NO_PREVIEW) == 0) {
@ -510,11 +509,9 @@ bool BKE_image_scale(Image *image, int width, int height)
bool BKE_image_has_opengl_texture(Image *ima)
{
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (int i = 0; i < TEXTARGET_COUNT; i++) {
if (tile->gputexture[i] != NULL) {
return true;
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
if (ima->gputexture[i] != NULL) {
return true;
}
}
return false;
@ -3293,9 +3290,16 @@ void BKE_image_init_imageuser(Image *ima, ImageUser *iuser)
static void image_free_tile(Image *ima, ImageTile *tile)
{
for (int i = 0; i < TEXTARGET_COUNT; i++) {
if (tile->gputexture[i] != NULL) {
GPU_texture_free(tile->gputexture[i]);
tile->gputexture[i] = NULL;
/* Only two textures depends on all tiles, so if this is a secondary tile we can keep the other
* two. */
if (tile != ima->tiles.first &&
!(ELEM(i, TEXTARGET_TEXTURE_2D_ARRAY, TEXTARGET_TEXTURE_TILE_MAPPING))) {
continue;
}
if (ima->gputexture[i] != NULL) {
GPU_texture_free(ima->gputexture[i]);
ima->gputexture[i] = NULL;
}
}
@ -3560,6 +3564,16 @@ ImageTile *BKE_image_add_tile(struct Image *ima, int tile_number, const char *la
BLI_strncpy(tile->label, label, sizeof(tile->label));
}
/* Reallocate GPU tile array. */
if (ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY] != NULL) {
GPU_texture_free(ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY]);
ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY] = NULL;
}
if (ima->gputexture[TEXTARGET_TEXTURE_TILE_MAPPING] != NULL) {
GPU_texture_free(ima->gputexture[TEXTARGET_TEXTURE_TILE_MAPPING]);
ima->gputexture[TEXTARGET_TEXTURE_TILE_MAPPING] = NULL;
}
return tile;
}

View File

@ -24,6 +24,8 @@
* \ingroup bli
*/
struct ListBase;
/* Box Packer */
typedef struct BoxPack {
@ -44,4 +46,15 @@ void BLI_box_pack_2d(BoxPack *boxarray,
float *tot_width,
float *tot_height);
typedef struct FixedSizeBoxPack {
struct FixedSizeBoxPack *next, *prev;
int x, y;
int w, h;
} FixedSizeBoxPack;
void BLI_box_pack_2d_fixedarea(struct ListBase *boxes,
int width,
int height,
struct ListBase *packed);
#endif /* __BLI_BOXPACK_2D_H__ */

View File

@ -24,6 +24,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_listbase.h"
#include "BLI_boxpack_2d.h" /* own include */
#include "BLI_sort.h" /* qsort_r */
@ -673,3 +674,110 @@ void BLI_box_pack_2d(BoxPack *boxarray, const uint len, float *r_tot_x, float *r
MEM_freeN(vertex_pack_indices);
MEM_freeN(vs_ctx.vertarray);
}
/* Packs boxes into a fixed area.
* boxes and packed are linked lists containing structs that can be cast to
* FixedSizeBoxPack (i.e. contains a FixedSizeBoxPack as its first element).
* Boxes that were packed successfully are placed into *packed and removed from *boxes.
*
* The algorithm is a simplified version of https://github.com/TeamHypersomnia/rectpack2D.
* Better ones could be used, but for the current use case (packing Image tiles into GPU
* textures) this is fine.
*
* Note that packing efficiency depends on the order of the input boxes. Generally speaking,
* larger boxes should come first, though how exactly size is best defined (e.g. area,
* perimeter) depends on the particular application. */
void BLI_box_pack_2d_fixedarea(ListBase *boxes, int width, int height, ListBase *packed)
{
ListBase spaces = {NULL};
FixedSizeBoxPack *full_rect = MEM_callocN(sizeof(FixedSizeBoxPack), __func__);
full_rect->w = width;
full_rect->h = height;
BLI_addhead(&spaces, full_rect);
/* The basic idea of the algorithm is to keep a list of free spaces in the packing area.
* Then, for each box to be packed, we try to find a space that can contain it.
* The found space is then split into the area that is occupied by the box and the
* remaining area, which is reinserted into the free space list.
* By inserting the smaller remaining spaces first, the algorithm tries to use these
* smaller spaces first instead of "wasting" a large space. */
LISTBASE_FOREACH_MUTABLE (FixedSizeBoxPack *, box, boxes) {
LISTBASE_FOREACH (FixedSizeBoxPack *, space, &spaces) {
/* Skip this space if it's too small. */
if (box->w > space->w || box->h > space->w) {
continue;
}
/* Pack this box into this space. */
box->x = space->x;
box->y = space->y;
BLI_remlink(boxes, box);
BLI_addtail(packed, box);
if (box->w == space->w && box->h == space->h) {
/* Box exactly fills space, so just remove the space. */
BLI_remlink(&spaces, space);
MEM_freeN(space);
}
else if (box->w == space->w) {
/* Box fills the entire width, so we can just contract the box
* to the upper part that remains. */
space->y += box->h;
space->h -= box->h;
}
else if (box->h == space->h) {
/* Box fills the entire height, so we can just contract the box
* to the right part that remains. */
space->x += box->w;
space->w -= box->w;
}
else {
/* Split the remaining L-shaped space into two spaces.
* There are two ways to do so, we pick the one that produces the biggest
* remaining space:
*
* Horizontal Split Vertical Split
* ################### ###################
* # # # - #
* # Large # # Small - #
* # # # - #
* #********---------# #******** Large #
* # Box * Small # # Box * #
* # * # # * #
* ################### ###################
*
*/
int area_hsplit_large = space->w * (space->h - box->h);
int area_vsplit_large = (space->w - box->w) * space->h;
/* Perform split. This space becomes the larger space,
* while the new smaller space is inserted _before_ it. */
FixedSizeBoxPack *new_space = MEM_callocN(sizeof(FixedSizeBoxPack), __func__);
if (area_hsplit_large > area_vsplit_large) {
new_space->x = space->x + box->w;
new_space->y = space->y;
new_space->w = space->w - box->w;
new_space->h = box->h;
space->y += box->h;
space->h -= box->h;
}
else {
new_space->x = space->x;
new_space->y = space->y + box->h;
new_space->w = box->w;
new_space->h = space->h - box->h;
space->x += box->w;
space->w -= box->w;
}
BLI_addhead(&spaces, new_space);
}
break;
}
}
BLI_freelistN(&spaces);
}

View File

@ -169,6 +169,12 @@ MINLINE void copy_v4_v4_short(short r[4], const short a[4])
}
/* int */
MINLINE void zero_v2_int(int r[2])
{
r[0] = 0;
r[1] = 0;
}
MINLINE void zero_v3_int(int r[3])
{
r[0] = 0;

View File

@ -1910,11 +1910,9 @@ void blo_make_image_pointer_map(FileData *fd, Main *oldmain)
if (ima->cache) {
oldnewmap_insert(fd->imamap, ima->cache, ima->cache, 0);
}
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (a = 0; a < TEXTARGET_COUNT; a++) {
if (tile->gputexture[a] != NULL) {
oldnewmap_insert(fd->imamap, tile->gputexture[a], tile->gputexture[a], 0);
}
for (a = 0; a < TEXTARGET_COUNT; a++) {
if (ima->gputexture[a] != NULL) {
oldnewmap_insert(fd->imamap, ima->gputexture[a], ima->gputexture[a], 0);
}
}
if (ima->rr) {
@ -1958,10 +1956,8 @@ void blo_end_image_pointer_map(FileData *fd, Main *oldmain)
if (ima->cache == NULL) {
ima->gpuflag = 0;
ima->gpuframenr = INT_MAX;
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (i = 0; i < TEXTARGET_COUNT; i++) {
tile->gputexture[i] = NULL;
}
for (i = 0; i < TEXTARGET_COUNT; i++) {
ima->gputexture[i] = NULL;
}
ima->rr = NULL;
}
@ -1969,10 +1965,8 @@ void blo_end_image_pointer_map(FileData *fd, Main *oldmain)
slot->render = newimaadr(fd, slot->render);
}
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (i = 0; i < TEXTARGET_COUNT; i++) {
tile->gputexture[i] = newimaadr(fd, tile->gputexture[i]);
}
for (i = 0; i < TEXTARGET_COUNT; i++) {
ima->gputexture[i] = newimaadr(fd, ima->gputexture[i]);
}
ima->rr = newimaadr(fd, ima->rr);
}
@ -4274,18 +4268,14 @@ static void direct_link_image(FileData *fd, Image *ima)
if (!ima->cache) {
ima->gpuflag = 0;
ima->gpuframenr = INT_MAX;
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (int i = 0; i < TEXTARGET_COUNT; i++) {
tile->gputexture[i] = NULL;
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
ima->gputexture[i] = NULL;
}
ima->rr = NULL;
}
else {
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (int i = 0; i < TEXTARGET_COUNT; i++) {
tile->gputexture[i] = newimaadr(fd, tile->gputexture[i]);
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
ima->gputexture[i] = newimaadr(fd, ima->gputexture[i]);
}
ima->rr = newimaadr(fd, ima->rr);
}

View File

@ -402,6 +402,7 @@ static void OVERLAY_draw_scene(void *vedata)
OVERLAY_image_draw(vedata);
OVERLAY_facing_draw(vedata);
OVERLAY_extra_blend_draw(vedata);
if (DRW_state_is_fbo()) {
GPU_framebuffer_bind(fbl->overlay_line_fb);

View File

@ -157,13 +157,13 @@ void OVERLAY_extra_cache_init(OVERLAY_Data *vedata)
DRW_shgroup_uniform_block_persistent(grp, "globalsBlock", G_draw.block_ubo);
grp_sub = DRW_shgroup_create_sub(grp);
DRW_shgroup_state_enable(grp_sub, DRW_STATE_CULL_BACK);
DRW_shgroup_state_enable(grp_sub, DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_CULL_BACK);
cb->camera_volume = BUF_INSTANCE(grp_sub, format, DRW_cache_camera_volume_get());
cb->camera_volume_frame = BUF_INSTANCE(grp_sub, format, DRW_cache_camera_volume_wire_get());
cb->light_spot_cone_back = BUF_INSTANCE(grp_sub, format, DRW_cache_light_spot_volume_get());
grp_sub = DRW_shgroup_create_sub(grp);
DRW_shgroup_state_enable(grp_sub, DRW_STATE_CULL_FRONT);
DRW_shgroup_state_enable(grp_sub, DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_CULL_FRONT);
cb->light_spot_cone_front = BUF_INSTANCE(grp_sub, format, DRW_cache_light_spot_volume_get());
}
{
@ -637,6 +637,9 @@ void OVERLAY_light_cache_populate(OVERLAY_Data *vedata, Object *ob)
DRW_buffer_add_entry(cb->light_sun, color, &instdata);
}
else if (la->type == LA_SPOT) {
/* Previous implementation was using the clipend distance as cone size.
* We cannot do this anymore so we use a fixed size of 10. (see T72871) */
rescale_m4(instdata.mat, (float[3]){10.0f, 10.0f, 10.0f});
/* For cycles and eevee the spot attenuation is
* y = (1/(1 + x^2) - a)/((1 - a) b)
* We solve the case where spot attenuation y = 1 and y = 0

View File

@ -156,9 +156,10 @@ static void select_cache_init(void *vedata)
{
SELECTID_PassList *psl = ((SELECTID_Data *)vedata)->psl;
SELECTID_StorageList *stl = ((SELECTID_Data *)vedata)->stl;
SELECTID_PrivateData *pd = stl->g_data;
const DRWContextState *draw_ctx = DRW_context_state_get();
SELECTID_Shaders *sh_data = &e_data.sh_data[draw_ctx->sh_cfg];
SELECTID_Shaders *sh = &e_data.sh_data[draw_ctx->sh_cfg];
if (e_data.context.select_mode == -1) {
e_data.context.select_mode = select_id_get_object_select_mode(draw_ctx->scene,
@ -166,57 +167,32 @@ static void select_cache_init(void *vedata)
BLI_assert(e_data.context.select_mode != 0);
}
DRWState state = DRW_STATE_DEFAULT;
state |= RV3D_CLIPPING_ENABLED(draw_ctx->v3d, draw_ctx->rv3d) ? DRW_STATE_CLIP_PLANES : 0;
{
psl->depth_only_pass = DRW_pass_create("Depth Only Pass", DRW_STATE_DEFAULT);
stl->g_data->shgrp_depth_only = DRW_shgroup_create(sh_data->select_id_uniform,
psl->depth_only_pass);
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_state_enable(stl->g_data->shgrp_depth_only, DRW_STATE_CLIP_PLANES);
}
psl->select_id_face_pass = DRW_pass_create("Face Pass", DRW_STATE_DEFAULT);
DRW_PASS_CREATE(psl->depth_only_pass, state);
pd->shgrp_depth_only = DRW_shgroup_create(sh->select_id_uniform, psl->depth_only_pass);
DRW_PASS_CREATE(psl->select_id_face_pass, state);
if (e_data.context.select_mode & SCE_SELECT_FACE) {
stl->g_data->shgrp_face_flat = DRW_shgroup_create(sh_data->select_id_flat,
psl->select_id_face_pass);
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_state_enable(stl->g_data->shgrp_face_flat, DRW_STATE_CLIP_PLANES);
}
pd->shgrp_face_flat = DRW_shgroup_create(sh->select_id_flat, psl->select_id_face_pass);
}
else {
stl->g_data->shgrp_face_unif = DRW_shgroup_create(sh_data->select_id_uniform,
psl->select_id_face_pass);
DRW_shgroup_uniform_int_copy(stl->g_data->shgrp_face_unif, "id", 0);
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_state_enable(stl->g_data->shgrp_face_unif, DRW_STATE_CLIP_PLANES);
}
pd->shgrp_face_unif = DRW_shgroup_create(sh->select_id_uniform, psl->select_id_face_pass);
DRW_shgroup_uniform_int_copy(pd->shgrp_face_unif, "id", 0);
}
if (e_data.context.select_mode & SCE_SELECT_EDGE) {
psl->select_id_edge_pass = DRW_pass_create(
"Edge Pass", DRW_STATE_DEFAULT | DRW_STATE_FIRST_VERTEX_CONVENTION);
DRW_PASS_CREATE(psl->select_id_edge_pass, state | DRW_STATE_FIRST_VERTEX_CONVENTION);
stl->g_data->shgrp_edge = DRW_shgroup_create(sh_data->select_id_flat,
psl->select_id_edge_pass);
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_state_enable(stl->g_data->shgrp_edge, DRW_STATE_CLIP_PLANES);
}
pd->shgrp_edge = DRW_shgroup_create(sh->select_id_flat, psl->select_id_edge_pass);
}
if (e_data.context.select_mode & SCE_SELECT_VERTEX) {
psl->select_id_vert_pass = DRW_pass_create("Vert Pass", DRW_STATE_DEFAULT);
stl->g_data->shgrp_vert = DRW_shgroup_create(sh_data->select_id_flat,
psl->select_id_vert_pass);
DRW_shgroup_uniform_float_copy(
stl->g_data->shgrp_vert, "sizeVertex", G_draw.block.sizeVertex);
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_state_enable(stl->g_data->shgrp_vert, DRW_STATE_CLIP_PLANES);
}
DRW_PASS_CREATE(psl->select_id_vert_pass, state);
pd->shgrp_vert = DRW_shgroup_create(sh->select_id_flat, psl->select_id_vert_pass);
DRW_shgroup_uniform_float_copy(pd->shgrp_vert, "sizeVertex", G_draw.block.sizeVertex);
}
}

View File

@ -140,6 +140,28 @@ vec2 matcap_uv_compute(vec3 I, vec3 N, bool flipped)
return matcap_uv * 0.496 + 0.5;
}
bool node_tex_tile_lookup(inout vec3 co, sampler2DArray ima, sampler1DArray map)
{
vec2 tile_pos = floor(co.xy);
if (tile_pos.x < 0 || tile_pos.y < 0 || tile_pos.x >= 10)
return false;
float tile = 10.0 * tile_pos.y + tile_pos.x;
if (tile >= textureSize(map, 0).x)
return false;
/* Fetch tile information. */
float tile_layer = texelFetch(map, ivec2(tile, 0), 0).x;
if (tile_layer < 0.0)
return false;
vec4 tile_info = texelFetch(map, ivec2(tile, 1), 0);
co = vec3(((co.xy - tile_pos) * tile_info.zw) + tile_info.xy, tile_layer);
return true;
}
vec4 workbench_sample_texture(sampler2D image,
vec2 coord,
bool nearest_sampling,
@ -158,3 +180,28 @@ vec4 workbench_sample_texture(sampler2D image,
return color;
}
vec4 workbench_sample_texture_array(sampler2DArray tile_array,
sampler1DArray tile_data,
vec2 coord,
bool nearest_sampling,
bool premultiplied)
{
vec2 tex_size = vec2(textureSize(tile_array, 0).xy);
vec3 uv = vec3(coord, 0);
if (!node_tex_tile_lookup(uv, tile_array, tile_data))
return vec4(1.0, 0.0, 1.0, 1.0);
/* TODO(fclem) We could do the same with sampler objects.
* But this is a quick workaround instead of messing with the GPUTexture itself. */
uv.xy = nearest_sampling ? (floor(uv.xy * tex_size) + 0.5) / tex_size : uv.xy;
vec4 color = texture(tile_array, uv);
/* Unpremultiply if stored multiplied, since straight alpha is expected by shaders. */
if (premultiplied && !(color.a == 0.0 || color.a == 1.0)) {
color.rgb = color.rgb / color.a;
}
return color;
}

View File

@ -1,6 +1,11 @@
uniform float ImageTransparencyCutoff = 0.1;
#ifdef TEXTURE_IMAGE_ARRAY
uniform sampler2DArray image_tile_array;
uniform sampler1DArray image_tile_data;
#else
uniform sampler2D image;
#endif
uniform bool imageNearest;
uniform bool imagePremultiplied;
@ -44,7 +49,12 @@ void main()
vec4 base_color;
#if defined(V3D_SHADING_TEXTURE_COLOR)
# ifdef TEXTURE_IMAGE_ARRAY
base_color = workbench_sample_texture_array(
image_tile_array, image_tile_data, uv_interp, imageNearest, imagePremultiplied);
# else
base_color = workbench_sample_texture(image, uv_interp, imageNearest, imagePremultiplied);
# endif
if (base_color.a < ImageTransparencyCutoff) {
discard;
}

View File

@ -2,7 +2,12 @@
uniform vec4 materialColorAndMetal;
uniform float materialRoughness;
#ifdef TEXTURE_IMAGE_ARRAY
uniform sampler2DArray image_tile_array;
uniform sampler1DArray image_tile_data;
#else
uniform sampler2D image;
#endif
uniform float ImageTransparencyCutoff = 0.1;
uniform bool imageNearest;
uniform bool imagePremultiplied;
@ -39,7 +44,12 @@ void main()
vec4 color;
# if defined(V3D_SHADING_TEXTURE_COLOR)
# ifdef TEXTURE_IMAGE_ARRAY
color = workbench_sample_texture_array(
image_tile_array, image_tile_data, uv_interp, imageNearest, imagePremultiplied);
# else
color = workbench_sample_texture(image, uv_interp, imageNearest, imagePremultiplied);
# endif
if (color.a < ImageTransparencyCutoff) {
discard;
}

View File

@ -214,16 +214,17 @@ static GPUShader *workbench_cavity_shader_get(bool cavity, bool curvature)
static GPUShader *ensure_deferred_prepass_shader(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override,
eGPUShaderConfig sh_cfg)
{
WORKBENCH_DEFERRED_Shaders *sh_data = &e_data.sh_data[sh_cfg];
int index = workbench_material_get_prepass_shader_index(
wpd, is_uniform_color, is_hair, color_override);
wpd, is_uniform_color, is_hair, is_tiled, color_override);
if (sh_data->prepass_sh_cache[index] == NULL) {
const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
char *defines = workbench_material_build_defines(
wpd, is_uniform_color, is_hair, color_override);
wpd, is_uniform_color, is_hair, is_tiled, color_override);
char *prepass_vert = workbench_build_prepass_vert(is_hair);
char *prepass_frag = workbench_build_prepass_frag();
sh_data->prepass_sh_cache[index] = GPU_shader_create_from_arrays({
@ -243,7 +244,7 @@ static GPUShader *ensure_deferred_composite_shader(WORKBENCH_PrivateData *wpd)
int index = workbench_material_get_composite_shader_index(wpd);
if (e_data.composite_sh_cache[index] == NULL) {
char *defines = workbench_material_build_defines(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
char *composite_frag = workbench_build_composite_frag(wpd);
e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
MEM_freeN(composite_frag);
@ -271,17 +272,19 @@ static GPUShader *ensure_background_shader(WORKBENCH_PrivateData *wpd)
static void select_deferred_shaders(WORKBENCH_PrivateData *wpd, eGPUShaderConfig sh_cfg)
{
wpd->prepass_sh = ensure_deferred_prepass_shader(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->prepass_hair_sh = ensure_deferred_prepass_shader(
wpd, false, true, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, false, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->prepass_uniform_sh = ensure_deferred_prepass_shader(
wpd, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, true, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->prepass_uniform_hair_sh = ensure_deferred_prepass_shader(
wpd, true, true, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, true, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->prepass_textured_sh = ensure_deferred_prepass_shader(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd->prepass_textured_array_sh = ensure_deferred_prepass_shader(
wpd, false, false, true, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd->prepass_vertex_sh = ensure_deferred_prepass_shader(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_VERTEX, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_VERTEX, sh_cfg);
wpd->composite_sh = ensure_deferred_composite_shader(wpd);
wpd->background_sh = ensure_background_shader(wpd);
}
@ -873,8 +876,9 @@ static WORKBENCH_MaterialData *get_or_create_material_data(WORKBENCH_Data *vedat
/* select the correct prepass shader */
GPUShader *shader = (wpd->shading.color_type == color_type) ? wpd->prepass_sh :
wpd->prepass_uniform_sh;
const bool is_tiled = (ima && ima->source == IMA_SRC_TILED);
if (color_type == V3D_SHADING_TEXTURE_COLOR) {
shader = wpd->prepass_textured_sh;
shader = is_tiled ? wpd->prepass_textured_array_sh : wpd->prepass_textured_sh;
}
if (color_type == V3D_SHADING_VERTEX_COLOR) {
shader = wpd->prepass_vertex_sh;
@ -883,7 +887,7 @@ static WORKBENCH_MaterialData *get_or_create_material_data(WORKBENCH_Data *vedat
shader, (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
workbench_material_copy(material, &material_template);
DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, interp);
workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, is_tiled, interp);
BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
}
return material;
@ -926,7 +930,7 @@ static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *o
(ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
shader);
DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, interp);
workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, false, interp);
}
}
}

View File

@ -172,8 +172,10 @@ WORKBENCH_MaterialData *workbench_forward_get_or_create_material_data(WORKBENCH_
GPUShader *shader = (wpd->shading.color_type == color_type) ?
wpd->transparent_accum_sh :
wpd->transparent_accum_uniform_sh;
const bool is_tiled = (ima && ima->source == IMA_SRC_TILED);
if (color_type == V3D_SHADING_TEXTURE_COLOR) {
shader = wpd->transparent_accum_textured_sh;
shader = is_tiled ? wpd->transparent_accum_textured_array_sh :
wpd->transparent_accum_textured_sh;
}
grp = DRW_shgroup_create(shader, psl->transparent_accum_pass);
@ -201,7 +203,7 @@ WORKBENCH_MaterialData *workbench_forward_get_or_create_material_data(WORKBENCH_
DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
}
workbench_material_shgroup_uniform(wpd, grp, material, ob, false, interp);
workbench_material_shgroup_uniform(wpd, grp, material, ob, false, is_tiled, interp);
material->shgrp = grp;
/* Depth */
@ -226,16 +228,17 @@ WORKBENCH_MaterialData *workbench_forward_get_or_create_material_data(WORKBENCH_
static GPUShader *ensure_forward_accum_shaders(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override,
eGPUShaderConfig sh_cfg)
{
WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[sh_cfg];
int index = workbench_material_get_accum_shader_index(
wpd, is_uniform_color, is_hair, color_override);
wpd, is_uniform_color, is_hair, is_tiled, color_override);
if (sh_data->transparent_accum_sh_cache[index] == NULL) {
const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
char *defines = workbench_material_build_defines(
wpd, is_uniform_color, is_hair, color_override);
wpd, is_uniform_color, is_hair, is_tiled, color_override);
char *transparent_accum_vert = workbench_build_forward_vert(is_hair);
char *transparent_accum_frag = workbench_build_forward_transparent_accum_frag();
sh_data->transparent_accum_sh_cache[index] = GPU_shader_create_from_arrays({
@ -255,7 +258,7 @@ static GPUShader *ensure_forward_composite_shaders(WORKBENCH_PrivateData *wpd)
int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
if (e_data.composite_sh_cache[index] == NULL) {
char *defines = workbench_material_build_defines(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
char *composite_frag = workbench_build_forward_composite_frag();
e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
MEM_freeN(composite_frag);
@ -268,17 +271,19 @@ void workbench_forward_choose_shaders(WORKBENCH_PrivateData *wpd, eGPUShaderConf
{
wpd->composite_sh = ensure_forward_composite_shaders(wpd);
wpd->transparent_accum_sh = ensure_forward_accum_shaders(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->transparent_accum_hair_sh = ensure_forward_accum_shaders(
wpd, false, true, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, false, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->transparent_accum_uniform_sh = ensure_forward_accum_shaders(
wpd, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, true, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->transparent_accum_uniform_hair_sh = ensure_forward_accum_shaders(
wpd, true, true, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd, true, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
wpd->transparent_accum_textured_sh = ensure_forward_accum_shaders(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd->transparent_accum_textured_array_sh = ensure_forward_accum_shaders(
wpd, false, false, true, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
wpd->transparent_accum_vertex_sh = ensure_forward_accum_shaders(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_VERTEX, sh_cfg);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_VERTEX, sh_cfg);
}
void workbench_forward_outline_shaders_ensure(WORKBENCH_PrivateData *wpd, eGPUShaderConfig sh_cfg)
@ -288,11 +293,11 @@ void workbench_forward_outline_shaders_ensure(WORKBENCH_PrivateData *wpd, eGPUSh
if (sh_data->object_outline_sh == NULL) {
const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
char *defines = workbench_material_build_defines(
wpd, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
char *defines_texture = workbench_material_build_defines(
wpd, true, false, WORKBENCH_COLOR_OVERRIDE_OFF);
wpd, true, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
char *defines_hair = workbench_material_build_defines(
wpd, false, true, WORKBENCH_COLOR_OVERRIDE_OFF);
wpd, false, true, false, WORKBENCH_COLOR_OVERRIDE_OFF);
char *forward_vert = workbench_build_forward_vert(false);
char *forward_frag = workbench_build_forward_outline_frag();
char *forward_hair_vert = workbench_build_forward_vert(true);
@ -533,7 +538,7 @@ static void workbench_forward_cache_populate_particles(WORKBENCH_Data *vedata, O
DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
ob, psys, md, psl->transparent_accum_pass, shader);
DRW_shgroup_uniform_block(shgrp, "world_block", wpd->world_ubo);
workbench_material_shgroup_uniform(wpd, shgrp, material, ob, false, interp);
workbench_material_shgroup_uniform(wpd, shgrp, material, ob, false, false, interp);
DRW_shgroup_uniform_vec4(shgrp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
/* Hairs have lots of layer and can rapidly become the most prominent surface.
* So lower their alpha artificially. */

View File

@ -85,6 +85,7 @@ void workbench_material_update_data(WORKBENCH_PrivateData *wpd,
char *workbench_material_build_defines(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override)
{
char *str = NULL;
@ -102,6 +103,7 @@ char *workbench_material_build_defines(WORKBENCH_PrivateData *wpd,
use_textures = false;
use_vertex_colors = true;
is_hair = false;
is_tiled = false;
break;
case WORKBENCH_COLOR_OVERRIDE_OFF:
break;
@ -151,6 +153,9 @@ char *workbench_material_build_defines(WORKBENCH_PrivateData *wpd,
if (is_hair) {
BLI_dynstr_append(ds, "#define HAIR_SHADER\n");
}
if (use_textures && is_tiled) {
BLI_dynstr_append(ds, "#define TEXTURE_IMAGE_ARRAY\n");
}
str = BLI_dynstr_get_cstring(ds);
BLI_dynstr_free(ds);
@ -211,6 +216,7 @@ int workbench_material_get_composite_shader_index(WORKBENCH_PrivateData *wpd)
int workbench_material_get_prepass_shader_index(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override)
{
bool use_textures = (wpd->shading.color_type == V3D_SHADING_TEXTURE_COLOR) && !is_uniform_color;
@ -225,6 +231,7 @@ int workbench_material_get_prepass_shader_index(WORKBENCH_PrivateData *wpd,
case WORKBENCH_COLOR_OVERRIDE_VERTEX:
use_textures = false;
use_vertex_colors = true;
is_tiled = false;
break;
case WORKBENCH_COLOR_OVERRIDE_OFF:
break;
@ -239,6 +246,7 @@ int workbench_material_get_prepass_shader_index(WORKBENCH_PrivateData *wpd,
SET_FLAG_FROM_TEST(index, MATCAP_ENABLED(wpd), 1 << 4);
SET_FLAG_FROM_TEST(index, use_textures, 1 << 5);
SET_FLAG_FROM_TEST(index, use_vertex_colors, 1 << 6);
SET_FLAG_FROM_TEST(index, is_tiled && use_textures, 1 << 7);
BLI_assert(index < MAX_PREPASS_SHADERS);
return index;
}
@ -246,6 +254,7 @@ int workbench_material_get_prepass_shader_index(WORKBENCH_PrivateData *wpd,
int workbench_material_get_accum_shader_index(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override)
{
bool use_textures = (wpd->shading.color_type == V3D_SHADING_TEXTURE_COLOR) && !is_uniform_color;
@ -262,6 +271,7 @@ int workbench_material_get_accum_shader_index(WORKBENCH_PrivateData *wpd,
use_textures = false;
use_vertex_colors = true;
is_hair = false;
is_tiled = false;
break;
case WORKBENCH_COLOR_OVERRIDE_OFF:
break;
@ -277,6 +287,7 @@ int workbench_material_get_accum_shader_index(WORKBENCH_PrivateData *wpd,
/* 1 bits SHADOWS (only facing factor) */
SET_FLAG_FROM_TEST(index, SHADOW_ENABLED(wpd), 1 << 5);
SET_FLAG_FROM_TEST(index, workbench_is_specular_highlight_enabled(wpd), 1 << 6);
SET_FLAG_FROM_TEST(index, is_tiled && use_textures, 1 << 7);
BLI_assert(index < MAX_ACCUM_SHADERS);
return index;
}
@ -352,6 +363,7 @@ void workbench_material_shgroup_uniform(WORKBENCH_PrivateData *wpd,
WORKBENCH_MaterialData *material,
Object *ob,
const bool deferred,
const bool is_tiled,
const int interp)
{
if (deferred && !workbench_is_matdata_pass_enabled(wpd)) {
@ -362,8 +374,18 @@ void workbench_material_shgroup_uniform(WORKBENCH_PrivateData *wpd,
const bool use_texture = (V3D_SHADING_TEXTURE_COLOR == workbench_material_determine_color_type(
wpd, material->ima, ob, false));
if (use_texture) {
GPUTexture *tex = GPU_texture_from_blender(material->ima, material->iuser, GL_TEXTURE_2D);
DRW_shgroup_uniform_texture(grp, "image", tex);
if (is_tiled) {
GPUTexture *array_tex = GPU_texture_from_blender(
material->ima, material->iuser, GL_TEXTURE_2D_ARRAY);
GPUTexture *data_tex = GPU_texture_from_blender(
material->ima, material->iuser, GL_TEXTURE_1D_ARRAY);
DRW_shgroup_uniform_texture(grp, "image_tile_array", array_tex);
DRW_shgroup_uniform_texture(grp, "image_tile_data", data_tex);
}
else {
GPUTexture *tex = GPU_texture_from_blender(material->ima, material->iuser, GL_TEXTURE_2D);
DRW_shgroup_uniform_texture(grp, "image", tex);
}
DRW_shgroup_uniform_bool_copy(
grp, "imagePremultiplied", (material->ima->alpha_mode == IMA_ALPHA_PREMUL));
DRW_shgroup_uniform_bool_copy(grp, "imageNearest", (interp == SHD_INTERP_CLOSEST));

View File

@ -37,8 +37,8 @@
#define WORKBENCH_ENGINE "BLENDER_WORKBENCH"
#define M_GOLDEN_RATION_CONJUGATE 0.618033988749895
#define MAX_COMPOSITE_SHADERS (1 << 7)
#define MAX_PREPASS_SHADERS (1 << 7)
#define MAX_ACCUM_SHADERS (1 << 7)
#define MAX_PREPASS_SHADERS (1 << 8)
#define MAX_ACCUM_SHADERS (1 << 8)
#define MAX_CAVITY_SHADERS (1 << 3)
#define TEXTURE_DRAWING_ENABLED(wpd) (wpd->shading.color_type == V3D_SHADING_TEXTURE_COLOR)
@ -207,6 +207,7 @@ typedef struct WORKBENCH_PrivateData {
struct GPUShader *prepass_uniform_sh;
struct GPUShader *prepass_uniform_hair_sh;
struct GPUShader *prepass_textured_sh;
struct GPUShader *prepass_textured_array_sh;
struct GPUShader *prepass_vertex_sh;
struct GPUShader *composite_sh;
struct GPUShader *background_sh;
@ -215,6 +216,7 @@ typedef struct WORKBENCH_PrivateData {
struct GPUShader *transparent_accum_uniform_sh;
struct GPUShader *transparent_accum_uniform_hair_sh;
struct GPUShader *transparent_accum_textured_sh;
struct GPUShader *transparent_accum_textured_array_sh;
struct GPUShader *transparent_accum_vertex_sh;
View3DShading shading;
StudioLight *studio_light;
@ -516,6 +518,7 @@ void workbench_material_get_image_and_mat(
char *workbench_material_build_defines(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override);
void workbench_material_update_data(WORKBENCH_PrivateData *wpd,
Object *ob,
@ -527,16 +530,19 @@ int workbench_material_get_composite_shader_index(WORKBENCH_PrivateData *wpd);
int workbench_material_get_prepass_shader_index(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override);
int workbench_material_get_accum_shader_index(WORKBENCH_PrivateData *wpd,
bool is_uniform_color,
bool is_hair,
bool is_tiled,
const WORKBENCH_ColorOverride color_override);
void workbench_material_shgroup_uniform(WORKBENCH_PrivateData *wpd,
DRWShadingGroup *grp,
WORKBENCH_MaterialData *material,
Object *ob,
const bool deferred,
const bool is_tiled,
const int interp);
void workbench_material_copy(WORKBENCH_MaterialData *dest_material,
const WORKBENCH_MaterialData *source_material);

View File

@ -1214,17 +1214,19 @@ static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp,
GPUTexture *tex = NULL;
if (input->ima) {
/* If there's no specified iuser but we need a different tile, create a temporary one. */
ImageUser local_iuser;
BKE_imageuser_default(&local_iuser);
local_iuser.tile = input->image_tile;
ImageUser *iuser = input->iuser ? input->iuser : &local_iuser;
iuser->tile = input->image_tile;
GPUTexture **tex_ref = BLI_memblock_alloc(DST.vmempool->images);
*tex_ref = tex = GPU_texture_from_blender(input->ima, iuser, GL_TEXTURE_2D);
int textarget;
if (input->type == GPU_TEX2D_ARRAY) {
textarget = GL_TEXTURE_2D_ARRAY;
}
else if (input->type == GPU_TEX1D_ARRAY) {
textarget = GL_TEXTURE_1D_ARRAY;
}
else {
textarget = GL_TEXTURE_2D;
}
*tex_ref = tex = GPU_texture_from_blender(input->ima, input->iuser, textarget);
GPU_texture_ref(tex);
}

View File

@ -215,8 +215,6 @@ static bool fluid_validatepaths(FluidJob *job, ReportList *reports)
temp_dir,
mds->cache_directory);
BLI_strncpy(temp_dir, mds->cache_directory, FILE_MAXDIR);
/* Ensure whole path exists and is writable. */
if (!BLI_dir_create_recursive(temp_dir)) {
BKE_reportf(reports,
@ -477,7 +475,6 @@ static void fluid_free_startjob(void *customdata, short *stop, short *do_update,
{
FluidJob *job = customdata;
FluidDomainSettings *mds = job->mmd->domain;
Scene *scene = job->scene;
job->stop = stop;
job->do_update = do_update;
@ -514,9 +511,6 @@ static void fluid_free_startjob(void *customdata, short *stop, short *do_update,
*do_update = true;
*stop = 0;
/* Reset scene frame to cache frame start */
CFRA = mds->cache_frame_start;
/* Update scene so that viewport shows freed up scene */
ED_update_for_newframe(job->bmain, job->depsgraph);
}

View File

@ -71,12 +71,13 @@ typedef enum eGPUType {
/* Values not in GPU_DATATYPE_STR */
GPU_TEX1D_ARRAY = 1001,
GPU_TEX2D = 1002,
GPU_TEX3D = 1003,
GPU_SHADOW2D = 1004,
GPU_TEXCUBE = 1005,
GPU_TEX2D_ARRAY = 1003,
GPU_TEX3D = 1004,
GPU_SHADOW2D = 1005,
GPU_TEXCUBE = 1006,
/* GLSL Struct types */
GPU_CLOSURE = 1006,
GPU_CLOSURE = 1007,
/* Opengl Attributes */
GPU_ATTR = 3001,
@ -142,7 +143,8 @@ typedef enum eGPUMaterialStatus {
GPUNodeLink *GPU_attribute(CustomDataType type, const char *name);
GPUNodeLink *GPU_constant(float *num);
GPUNodeLink *GPU_uniform(float *num);
GPUNodeLink *GPU_image(struct Image *ima, struct ImageUser *iuser, int tile);
GPUNodeLink *GPU_image(struct Image *ima, struct ImageUser *iuser);
GPUNodeLink *GPU_image_tilemap(struct Image *ima, struct ImageUser *iuser);
GPUNodeLink *GPU_color_band(GPUMaterial *mat, int size, float *pixels, float *layer);
GPUNodeLink *GPU_builtin(eGPUBuiltin builtin);

View File

@ -279,6 +279,9 @@ static void gpu_parse_material_library(GHash *hash, GPUMaterialLibrary *library)
if (!type && gpu_str_prefix(code, "sampler1DArray")) {
type = GPU_TEX1D_ARRAY;
}
if (!type && gpu_str_prefix(code, "sampler2DArray")) {
type = GPU_TEX2D_ARRAY;
}
if (!type && gpu_str_prefix(code, "sampler2D")) {
type = GPU_TEX2D;
}
@ -618,7 +621,7 @@ static void codegen_set_unique_ids(ListBase *nodes)
input->bindtex = false;
if (input->ima) {
/* input is texture from image */
codegen_set_texid(bindhash, input, &texid, input->ima, input->image_tile);
codegen_set_texid(bindhash, input, &texid, input->ima, input->type);
}
else if (input->coba) {
/* input is color band texture, check coba pointer */
@ -657,10 +660,18 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
if (input->source == GPU_SOURCE_TEX) {
/* create exactly one sampler for each texture */
if (codegen_input_has_texture(input) && input->bindtex) {
BLI_dynstr_appendf(ds,
"uniform %s samp%d;\n",
(input->coba) ? "sampler1DArray" : "sampler2D",
input->texid);
const char *type;
if (input->coba || input->type == GPU_TEX1D_ARRAY) {
type = "sampler1DArray";
}
else if (input->type == GPU_TEX2D_ARRAY) {
type = "sampler2DArray";
}
else {
BLI_assert(input->type == GPU_TEX2D);
type = "sampler2D";
}
BLI_dynstr_appendf(ds, "uniform %s samp%d;\n", type, input->texid);
}
}
else if (input->source == GPU_SOURCE_BUILTIN) {
@ -1544,10 +1555,10 @@ static void gpu_node_input_link(GPUNode *node, GPUNodeLink *link, const eGPUType
input->coba = link->coba;
break;
case GPU_NODE_LINK_IMAGE_BLENDER:
case GPU_NODE_LINK_IMAGE_TILEMAP:
input->source = GPU_SOURCE_TEX;
input->ima = link->ima;
input->iuser = link->iuser;
input->image_tile = link->image_tile;
break;
case GPU_NODE_LINK_ATTR:
input->source = GPU_SOURCE_ATTR;
@ -1792,13 +1803,12 @@ GPUNodeLink *GPU_uniform(float *num)
return link;
}
GPUNodeLink *GPU_image(Image *ima, ImageUser *iuser, int tile)
GPUNodeLink *GPU_image(Image *ima, ImageUser *iuser)
{
GPUNodeLink *link = GPU_node_link_create();
link->link_type = GPU_NODE_LINK_IMAGE_BLENDER;
link->ima = ima;
link->iuser = iuser;
link->image_tile = tile;
return link;
}

View File

@ -59,6 +59,7 @@ typedef enum {
GPU_NODE_LINK_COLORBAND,
GPU_NODE_LINK_CONSTANT,
GPU_NODE_LINK_IMAGE_BLENDER,
GPU_NODE_LINK_IMAGE_TILEMAP,
GPU_NODE_LINK_OUTPUT,
GPU_NODE_LINK_UNIFORM,
} GPUNodeLinkType;
@ -95,11 +96,10 @@ struct GPUNodeLink {
const char *attr_name;
CustomDataType attr_type;
};
/* GPU_NODE_LINK_IMAGE_BLENDER */
/* GPU_NODE_LINK_IMAGE_BLENDER | GPU_NODE_LINK_IMAGE_TILEMAP */
struct {
struct Image *ima;
struct ImageUser *iuser;
int image_tile;
};
};
};
@ -139,7 +139,6 @@ typedef struct GPUInput {
struct ImageUser *iuser; /* image user */
bool bindtex; /* input is responsible for binding the texture? */
int texid; /* number for multitexture, starting from zero */
int image_tile; /* image tile */
eGPUType textype; /* texture type (2D, 1D Array ...) */
};
/* GPU_SOURCE_ATTR */

View File

@ -31,6 +31,7 @@
#include <string.h>
#include "BLI_blenlib.h"
#include "BLI_boxpack_2d.h"
#include "BLI_linklist.h"
#include "BLI_math.h"
#include "BLI_threads.h"
@ -71,7 +72,7 @@ static bool is_power_of_2_resolution(int w, int h)
static bool is_over_resolution_limit(GLenum textarget, int w, int h)
{
int size = (textarget == GL_TEXTURE_2D) ? GPU_max_texture_size() : GPU_max_cube_map_size();
int size = (textarget == GL_TEXTURE_CUBE_MAP) ? GPU_max_cube_map_size() : GPU_max_texture_size();
int reslimit = (U.glreslimit != 0) ? min_ii(U.glreslimit, size) : size;
return (w > reslimit || h > reslimit);
@ -179,18 +180,294 @@ float GPU_get_anisotropic(void)
/* Set OpenGL state for an MTFace */
static GPUTexture **gpu_get_tile_gputexture(ImageTile *tile, GLenum textarget)
static GPUTexture **gpu_get_image_gputexture(Image *ima, GLenum textarget)
{
if (textarget == GL_TEXTURE_2D) {
return &tile->gputexture[TEXTARGET_TEXTURE_2D];
return &ima->gputexture[TEXTARGET_TEXTURE_2D];
}
else if (textarget == GL_TEXTURE_CUBE_MAP) {
return &tile->gputexture[TEXTARGET_TEXTURE_CUBE_MAP];
return &ima->gputexture[TEXTARGET_TEXTURE_CUBE_MAP];
}
else if (textarget == GL_TEXTURE_2D_ARRAY) {
return &ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY];
}
else if (textarget == GL_TEXTURE_1D_ARRAY) {
return &ima->gputexture[TEXTARGET_TEXTURE_TILE_MAPPING];
}
return NULL;
}
static uint gpu_texture_create_tile_mapping(Image *ima)
{
GPUTexture *tilearray = ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY];
if (tilearray == NULL) {
return 0;
}
float array_w = GPU_texture_width(tilearray);
float array_h = GPU_texture_height(tilearray);
ImageTile *last_tile = ima->tiles.last;
/* Tiles are sorted by number. */
int max_tile = last_tile->tile_number - 1001;
/* create image */
int bindcode;
glGenTextures(1, (GLuint *)&bindcode);
glBindTexture(GL_TEXTURE_1D_ARRAY, bindcode);
int width = max_tile + 1;
float *data = MEM_callocN(width * 8 * sizeof(float), __func__);
for (int i = 0; i < width; i++) {
data[4 * i] = -1.0f;
}
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
int i = tile->tile_number - 1001;
data[4 * i] = tile->runtime.tilearray_layer;
float *tile_info = &data[4 * width + 4 * i];
tile_info[0] = tile->runtime.tilearray_offset[0] / array_w;
tile_info[1] = tile->runtime.tilearray_offset[1] / array_h;
tile_info[2] = tile->runtime.tilearray_size[0] / array_w;
tile_info[3] = tile->runtime.tilearray_size[1] / array_h;
}
glTexImage2D(GL_TEXTURE_1D_ARRAY, 0, GL_RGBA32F, width, 2, 0, GL_RGBA, GL_FLOAT, data);
MEM_freeN(data);
glTexParameteri(GL_TEXTURE_1D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_1D_ARRAY, 0);
return bindcode;
}
typedef struct PackTile {
FixedSizeBoxPack boxpack;
ImageTile *tile;
float pack_score;
} PackTile;
static int compare_packtile(const void *a, const void *b)
{
const PackTile *tile_a = a;
const PackTile *tile_b = b;
return tile_a->pack_score < tile_b->pack_score;
}
static uint gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
{
int arraywidth = 0, arrayheight = 0;
ListBase boxes = {NULL};
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
ImageUser iuser;
BKE_imageuser_default(&iuser);
iuser.tile = tile->tile_number;
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
if (ibuf) {
PackTile *packtile = MEM_callocN(sizeof(PackTile), __func__);
packtile->tile = tile;
packtile->boxpack.w = ibuf->x;
packtile->boxpack.h = ibuf->y;
if (is_over_resolution_limit(
GL_TEXTURE_2D_ARRAY, packtile->boxpack.w, packtile->boxpack.h)) {
packtile->boxpack.w = smaller_power_of_2_limit(packtile->boxpack.w);
packtile->boxpack.h = smaller_power_of_2_limit(packtile->boxpack.h);
}
arraywidth = max_ii(arraywidth, packtile->boxpack.w);
arrayheight = max_ii(arrayheight, packtile->boxpack.h);
/* We sort the tiles by decreasing size, with an additional penalty term
* for high aspect ratios. This improves packing efficiency. */
float w = packtile->boxpack.w, h = packtile->boxpack.h;
packtile->pack_score = max_ff(w, h) / min_ff(w, h) * w * h;
BKE_image_release_ibuf(ima, ibuf, NULL);
BLI_addtail(&boxes, packtile);
}
}
BLI_assert(arraywidth > 0 && arrayheight > 0);
BLI_listbase_sort(&boxes, compare_packtile);
int arraylayers = 0;
/* Keep adding layers until all tiles are packed. */
while (boxes.first != NULL) {
ListBase packed = {NULL};
BLI_box_pack_2d_fixedarea(&boxes, arraywidth, arrayheight, &packed);
BLI_assert(packed.first != NULL);
LISTBASE_FOREACH (PackTile *, packtile, &packed) {
ImageTile *tile = packtile->tile;
int *tileoffset = tile->runtime.tilearray_offset;
int *tilesize = tile->runtime.tilearray_size;
tileoffset[0] = packtile->boxpack.x;
tileoffset[1] = packtile->boxpack.y;
tilesize[0] = packtile->boxpack.w;
tilesize[1] = packtile->boxpack.h;
tile->runtime.tilearray_layer = arraylayers;
}
BLI_freelistN(&packed);
arraylayers++;
}
/* create image */
int bindcode;
glGenTextures(1, (GLuint *)&bindcode);
glBindTexture(GL_TEXTURE_2D_ARRAY, bindcode);
GLenum data_type, internal_format;
if (main_ibuf->rect_float) {
data_type = GL_FLOAT;
internal_format = GL_RGBA16F;
}
else {
data_type = GL_UNSIGNED_BYTE;
internal_format = GL_RGBA8;
if (!IMB_colormanagement_space_is_data(main_ibuf->rect_colorspace) &&
!IMB_colormanagement_space_is_scene_linear(main_ibuf->rect_colorspace)) {
internal_format = GL_SRGB8_ALPHA8;
}
}
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
internal_format,
arraywidth,
arrayheight,
arraylayers,
0,
GL_RGBA,
data_type,
NULL);
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
int tilelayer = tile->runtime.tilearray_layer;
int *tileoffset = tile->runtime.tilearray_offset;
int *tilesize = tile->runtime.tilearray_size;
if (tilesize[0] == 0 || tilesize[1] == 0) {
continue;
}
ImageUser iuser;
BKE_imageuser_default(&iuser);
iuser.tile = tile->tile_number;
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, NULL);
BLI_assert(ibuf != NULL);
bool needs_scale = (ibuf->x != tilesize[0] || ibuf->y != tilesize[1]);
ImBuf *scale_ibuf = NULL;
if (ibuf->rect_float) {
float *rect_float = ibuf->rect_float;
const bool store_premultiplied = ima->alpha_mode != IMA_ALPHA_STRAIGHT;
if (ibuf->channels != 4 || !store_premultiplied) {
rect_float = MEM_mallocN(sizeof(float) * 4 * ibuf->x * ibuf->y, __func__);
IMB_colormanagement_imbuf_to_float_texture(
rect_float, 0, 0, ibuf->x, ibuf->y, ibuf, store_premultiplied);
}
float *pixeldata = rect_float;
if (needs_scale) {
scale_ibuf = IMB_allocFromBuffer(NULL, rect_float, ibuf->x, ibuf->y, 4);
IMB_scaleImBuf(scale_ibuf, tilesize[0], tilesize[1]);
pixeldata = scale_ibuf->rect_float;
}
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
tileoffset[0],
tileoffset[1],
tilelayer,
tilesize[0],
tilesize[1],
1,
GL_RGBA,
GL_FLOAT,
pixeldata);
if (rect_float != ibuf->rect_float) {
MEM_freeN(rect_float);
}
}
else {
unsigned int *rect = ibuf->rect;
if (!IMB_colormanagement_space_is_data(ibuf->rect_colorspace)) {
rect = MEM_mallocN(sizeof(uchar) * 4 * ibuf->x * ibuf->y, __func__);
IMB_colormanagement_imbuf_to_byte_texture((uchar *)rect,
0,
0,
ibuf->x,
ibuf->y,
ibuf,
internal_format == GL_SRGB8_ALPHA8,
ima->alpha_mode == IMA_ALPHA_PREMUL);
}
unsigned int *pixeldata = rect;
if (needs_scale) {
scale_ibuf = IMB_allocFromBuffer(rect, NULL, ibuf->x, ibuf->y, 4);
IMB_scaleImBuf(scale_ibuf, tilesize[0], tilesize[1]);
pixeldata = scale_ibuf->rect;
}
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
tileoffset[0],
tileoffset[1],
tilelayer,
tilesize[0],
tilesize[1],
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
pixeldata);
if (rect != ibuf->rect) {
MEM_freeN(rect);
}
}
if (scale_ibuf != NULL) {
IMB_freeImBuf(scale_ibuf);
}
BKE_image_release_ibuf(ima, ibuf, NULL);
}
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
if (GPU_get_mipmap()) {
glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
if (ima) {
ima->gpuflag |= IMA_GPU_MIPMAP_COMPLETE;
}
}
else {
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
if (GLEW_EXT_texture_filter_anisotropic) {
glTexParameterf(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAX_ANISOTROPY_EXT, GPU_get_anisotropic());
}
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
return bindcode;
}
static uint gpu_texture_create_from_ibuf(Image *ima, ImBuf *ibuf, int textarget)
{
uint bindcode = 0;
@ -305,48 +582,105 @@ static GPUTexture **gpu_get_movieclip_gputexture(MovieClip *clip,
return NULL;
}
static ImBuf *update_do_scale(uchar *rect,
float *rect_float,
int *x,
int *y,
int *w,
int *h,
int limit_w,
int limit_h,
int full_w,
int full_h)
{
/* Partial update with scaling. */
float xratio = limit_w / (float)full_w;
float yratio = limit_h / (float)full_h;
int part_w = *w, part_h = *h;
/* Find sub coordinates in scaled image. Take ceiling because we will be
* losing 1 pixel due to rounding errors in x,y. */
*x *= xratio;
*y *= yratio;
*w = (int)ceil(xratio * (*w));
*h = (int)ceil(yratio * (*h));
/* ...but take back if we are over the limit! */
if (*x + *w > limit_w) {
(*w)--;
}
if (*y + *h > limit_h) {
(*h)--;
}
/* Scale pixels. */
ImBuf *ibuf = IMB_allocFromBuffer((uint *)rect, rect_float, part_w, part_h, 4);
IMB_scaleImBuf(ibuf, *w, *h);
return ibuf;
}
static void gpu_texture_update_scaled_array(uchar *rect,
float *rect_float,
int full_w,
int full_h,
int x,
int y,
int layer,
const int *tile_offset,
const int *tile_size,
int w,
int h)
{
ImBuf *ibuf = update_do_scale(
rect, rect_float, &x, &y, &w, &h, tile_size[0], tile_size[1], full_w, full_h);
/* Shift to account for tile packing. */
x += tile_offset[0];
y += tile_offset[1];
if (ibuf->rect_float) {
glTexSubImage3D(
GL_TEXTURE_2D_ARRAY, 0, x, y, layer, w, h, 1, GL_RGBA, GL_FLOAT, ibuf->rect_float);
}
else {
glTexSubImage3D(
GL_TEXTURE_2D_ARRAY, 0, x, y, layer, w, h, 1, GL_RGBA, GL_UNSIGNED_BYTE, ibuf->rect);
}
IMB_freeImBuf(ibuf);
}
static void gpu_texture_update_scaled(
uchar *rect, float *rect_float, int full_w, int full_h, int x, int y, int w, int h)
{
/* Partial update with scaling. */
int limit_w = smaller_power_of_2_limit(full_w);
int limit_h = smaller_power_of_2_limit(full_h);
float xratio = limit_w / (float)full_w;
float yratio = limit_h / (float)full_h;
/* Find sub coordinates in scaled image. Take ceiling because we will be
* losing 1 pixel due to rounding errors in x,y. */
int sub_x = x * xratio;
int sub_y = y * yratio;
int sub_w = (int)ceil(xratio * w);
int sub_h = (int)ceil(yratio * h);
/* ...but take back if we are over the limit! */
if (sub_w + sub_x > limit_w) {
sub_w--;
}
if (sub_h + sub_y > limit_h) {
sub_h--;
}
/* Scale pixels. */
ImBuf *ibuf = IMB_allocFromBuffer((uint *)rect, rect_float, w, h, 4);
IMB_scaleImBuf(ibuf, sub_w, sub_h);
ImBuf *ibuf = update_do_scale(
rect, rect_float, &x, &y, &w, &h, limit_w, limit_h, full_w, full_h);
if (ibuf->rect_float) {
glTexSubImage2D(
GL_TEXTURE_2D, 0, sub_x, sub_y, sub_w, sub_h, GL_RGBA, GL_FLOAT, ibuf->rect_float);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_FLOAT, ibuf->rect_float);
}
else {
glTexSubImage2D(
GL_TEXTURE_2D, 0, sub_x, sub_y, sub_w, sub_h, GL_RGBA, GL_UNSIGNED_BYTE, ibuf->rect);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, ibuf->rect);
}
IMB_freeImBuf(ibuf);
}
static void gpu_texture_update_unscaled(
uchar *rect, float *rect_float, int x, int y, int w, int h, GLint tex_stride, GLint tex_offset)
static void gpu_texture_update_unscaled(uchar *rect,
float *rect_float,
int x,
int y,
int layer,
int w,
int h,
GLint tex_stride,
GLint tex_offset)
{
/* Partial update without scaling. Stride and offset are used to copy only a
* subset of a possible larger buffer than what we are updating. */
@ -354,22 +688,61 @@ static void gpu_texture_update_unscaled(
glGetIntegerv(GL_UNPACK_ROW_LENGTH, &row_length);
glPixelStorei(GL_UNPACK_ROW_LENGTH, tex_stride);
if (rect_float == NULL) {
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, rect + tex_offset);
if (layer >= 0) {
if (rect_float == NULL) {
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
x,
y,
layer,
w,
h,
1,
GL_RGBA,
GL_UNSIGNED_BYTE,
rect + tex_offset);
}
else {
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
x,
y,
layer,
w,
h,
1,
GL_RGBA,
GL_FLOAT,
rect_float + tex_offset);
}
}
else {
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_FLOAT, rect_float + tex_offset);
if (rect_float == NULL) {
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, rect + tex_offset);
}
else {
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_FLOAT, rect_float + tex_offset);
}
}
glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length);
}
static void gpu_texture_update_from_ibuf(Image *ima, ImBuf *ibuf, int x, int y, int w, int h)
static void gpu_texture_update_from_ibuf(
GPUTexture *tex, Image *ima, ImBuf *ibuf, ImageTile *tile, int x, int y, int w, int h)
{
/* Partial update of texture for texture painting. This is often much
* quicker than fully updating the texture for high resolution images.
* Assumes the OpenGL texture is bound to 0. */
const bool scaled = is_over_resolution_limit(GL_TEXTURE_2D, ibuf->x, ibuf->y);
* quicker than fully updating the texture for high resolution images. */
GPU_texture_bind(tex, 0);
bool scaled;
if (tile != NULL) {
int *tilesize = tile->runtime.tilearray_size;
scaled = (ibuf->x != tilesize[0]) || (ibuf->y != tilesize[1]);
}
else {
scaled = is_over_resolution_limit(GL_TEXTURE_2D, ibuf->x, ibuf->y);
}
if (scaled) {
/* Extra padding to account for bleed from neighboring pixels. */
@ -429,11 +802,35 @@ static void gpu_texture_update_from_ibuf(Image *ima, ImBuf *ibuf, int x, int y,
if (scaled) {
/* Slower update where we first have to scale the input pixels. */
gpu_texture_update_scaled(rect, rect_float, ibuf->x, ibuf->y, x, y, w, h);
if (tile != NULL) {
int *tileoffset = tile->runtime.tilearray_offset;
int *tilesize = tile->runtime.tilearray_size;
int tilelayer = tile->runtime.tilearray_layer;
gpu_texture_update_scaled_array(
rect, rect_float, ibuf->x, ibuf->y, x, y, tilelayer, tileoffset, tilesize, w, h);
}
else {
gpu_texture_update_scaled(rect, rect_float, ibuf->x, ibuf->y, x, y, w, h);
}
}
else {
/* Fast update at same resolution. */
gpu_texture_update_unscaled(rect, rect_float, x, y, w, h, tex_stride, tex_offset);
if (tile != NULL) {
int *tileoffset = tile->runtime.tilearray_offset;
int tilelayer = tile->runtime.tilearray_layer;
gpu_texture_update_unscaled(rect,
rect_float,
x + tileoffset[0],
y + tileoffset[1],
tilelayer,
w,
h,
tex_stride,
tex_offset);
}
else {
gpu_texture_update_unscaled(rect, rect_float, x, y, -1, w, h, tex_stride, tex_offset);
}
}
/* Free buffers if needed. */
@ -443,6 +840,15 @@ static void gpu_texture_update_from_ibuf(Image *ima, ImBuf *ibuf, int x, int y,
if (rect_float && rect_float != ibuf->rect_float) {
MEM_freeN(rect_float);
}
if (GPU_get_mipmap()) {
glGenerateMipmap((tile != NULL) ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D);
}
else {
ima->gpuflag &= ~IMA_GPU_MIPMAP_COMPLETE;
}
GPU_texture_unbind(tex);
}
GPUTexture *GPU_texture_from_blender(Image *ima, ImageUser *iuser, int textarget)
@ -460,19 +866,8 @@ GPUTexture *GPU_texture_from_blender(Image *ima, ImageUser *iuser, int textarget
/* Tag as in active use for garbage collector. */
BKE_image_tag_time(ima);
ImageTile *tile = BKE_image_get_tile_from_iuser(ima, iuser);
if (tile == NULL) {
/* TODO(lukas): When a tile gets deleted, the materials using the image
* aren't rebuilt and therefore continue to use it.
* This workaround isn't ideal, the result should be a pink color
* (for a missing tile). With the current behavior, new tiles also won't
* be detected. */
tile = BKE_image_get_tile(ima, 0);
}
/* Test if we already have a texture. */
GPUTexture **tex = gpu_get_tile_gputexture(tile, textarget);
GPUTexture **tex = gpu_get_image_gputexture(ima, textarget);
if (*tex) {
return *tex;
}
@ -480,6 +875,7 @@ GPUTexture *GPU_texture_from_blender(Image *ima, ImageUser *iuser, int textarget
/* Check if we have a valid image. If not, we return a dummy
* texture with zero bindcode so we don't keep trying. */
uint bindcode = 0;
ImageTile *tile = BKE_image_get_tile(ima, 0);
if (tile->ok == 0) {
*tex = GPU_texture_from_bindcode(textarget, bindcode);
return *tex;
@ -492,7 +888,15 @@ GPUTexture *GPU_texture_from_blender(Image *ima, ImageUser *iuser, int textarget
return *tex;
}
bindcode = gpu_texture_create_from_ibuf(ima, ibuf, textarget);
if (textarget == GL_TEXTURE_2D_ARRAY) {
bindcode = gpu_texture_create_tile_array(ima, ibuf);
}
else if (textarget == GL_TEXTURE_1D_ARRAY) {
bindcode = gpu_texture_create_tile_mapping(ima);
}
else {
bindcode = gpu_texture_create_from_ibuf(ima, ibuf, textarget);
}
BKE_image_release_ibuf(ima, ibuf, NULL);
@ -856,13 +1260,15 @@ void GPU_paint_set_mipmap(Main *bmain, bool mipmap)
for (Image *ima = bmain->images.first; ima; ima = ima->id.next) {
if (BKE_image_has_opengl_texture(ima)) {
if (ima->gpuflag & IMA_GPU_MIPMAP_COMPLETE) {
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
GPUTexture *tex = tile->gputexture[TEXTARGET_TEXTURE_2D];
if (tex != NULL) {
GPU_texture_bind(tex, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(tex);
for (int a = 0; a < TEXTARGET_COUNT; a++) {
if (ELEM(a, TEXTARGET_TEXTURE_2D, TEXTARGET_TEXTURE_2D_ARRAY)) {
GPUTexture *tex = ima->gputexture[a];
if (tex != NULL) {
GPU_texture_bind(tex, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(tex);
}
}
}
}
@ -878,13 +1284,15 @@ void GPU_paint_set_mipmap(Main *bmain, bool mipmap)
else {
for (Image *ima = bmain->images.first; ima; ima = ima->id.next) {
if (BKE_image_has_opengl_texture(ima)) {
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
GPUTexture *tex = tile->gputexture[TEXTARGET_TEXTURE_2D];
if (tex != NULL) {
GPU_texture_bind(tex, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(tex);
for (int a = 0; a < TEXTARGET_COUNT; a++) {
if (ELEM(a, TEXTARGET_TEXTURE_2D, TEXTARGET_TEXTURE_2D_ARRAY)) {
GPUTexture *tex = ima->gputexture[a];
if (tex != NULL) {
GPU_texture_bind(tex, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(tex);
}
}
}
}
@ -899,26 +1307,22 @@ void GPU_paint_update_image(Image *ima, ImageUser *iuser, int x, int y, int w, i
{
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, NULL);
ImageTile *tile = BKE_image_get_tile_from_iuser(ima, iuser);
GPUTexture *tex = tile->gputexture[TEXTARGET_TEXTURE_2D];
if ((tex == NULL) || (ibuf == NULL) || (w == 0) || (h == 0)) {
if ((ibuf == NULL) || (w == 0) || (h == 0)) {
/* Full reload of texture. */
GPU_free_image(ima);
}
else {
/* Partial update of texture. */
GPU_texture_bind(tex, 0);
gpu_texture_update_from_ibuf(ima, ibuf, x, y, w, h);
GPUTexture *tex = ima->gputexture[TEXTARGET_TEXTURE_2D];
/* Check if we need to update the main gputexture. */
if (tex != NULL && tile == ima->tiles.first) {
gpu_texture_update_from_ibuf(tex, ima, ibuf, NULL, x, y, w, h);
}
if (GPU_get_mipmap()) {
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
ima->gpuflag &= ~IMA_GPU_MIPMAP_COMPLETE;
}
GPU_texture_unbind(tex);
/* Check if we need to update the array gputexture. */
tex = ima->gputexture[TEXTARGET_TEXTURE_2D_ARRAY];
if (tex != NULL) {
gpu_texture_update_from_ibuf(tex, ima, ibuf, tile, x, y, w, h);
}
BKE_image_release_ibuf(ima, ibuf, NULL);
@ -960,13 +1364,11 @@ void GPU_free_unused_buffers(Main *bmain)
static void gpu_free_image_immediate(Image *ima)
{
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
for (int i = 0; i < TEXTARGET_COUNT; i++) {
/* free glsl image binding */
if (tile->gputexture[i] != NULL) {
GPU_texture_free(tile->gputexture[i]);
tile->gputexture[i] = NULL;
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
/* free glsl image binding */
if (ima->gputexture[i] != NULL) {
GPU_texture_free(ima->gputexture[i]);
ima->gputexture[i] = NULL;
}
}

View File

@ -1032,10 +1032,6 @@ GPUTexture *GPU_texture_create_buffer(eGPUTextureFormat tex_format, const GLuint
GPUTexture *GPU_texture_from_bindcode(int textarget, int bindcode)
{
/* see GPUInput::textarget: it can take two values - GL_TEXTURE_2D and GL_TEXTURE_CUBE_MAP
* these values are correct for glDisable, so textarget can be safely used in
* GPU_texture_bind/GPU_texture_unbind through tex->target_base */
/* (is any of this obsolete now that we don't glEnable/Disable textures?) */
GPUTexture *tex = MEM_callocN(sizeof(GPUTexture), "GPUTexture");
tex->bindcode = bindcode;
tex->number = -1;
@ -1052,12 +1048,8 @@ GPUTexture *GPU_texture_from_bindcode(int textarget, int bindcode)
else {
GLint w, h;
GLenum gettarget;
if (textarget == GL_TEXTURE_2D) {
gettarget = GL_TEXTURE_2D;
}
else {
GLenum gettarget = textarget;
if (textarget == GL_TEXTURE_CUBE_MAP) {
gettarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X;
}

View File

@ -354,67 +354,92 @@ void node_tex_image_empty(vec3 co, out vec4 color, out float alpha)
alpha = 0.0;
}
void node_tex_tile_map(vec3 co, out vec4 color, out vec3 map)
bool node_tex_tile_lookup(inout vec3 co, sampler2DArray ima, sampler1DArray map)
{
float tx = floor(co.x);
float ty = floor(co.y);
vec2 tile_pos = floor(co.xy);
if (tx < 0 || ty < 0 || tx >= 10)
map = vec3(0, 0, -1);
else
map = vec3(co.x - tx, co.y - ty, 1001 + 10 * ty + tx);
if (tile_pos.x < 0 || tile_pos.y < 0 || tile_pos.x >= 10)
return false;
color = vec4(1.0, 0.0, 1.0, 1.0);
float tile = 10 * tile_pos.y + tile_pos.x;
if (tile >= textureSize(map, 0).x)
return false;
/* Fetch tile information. */
float tile_layer = texelFetch(map, ivec2(tile, 0), 0).x;
if (tile_layer < 0)
return false;
vec4 tile_info = texelFetch(map, ivec2(tile, 1), 0);
co = vec3(((co.xy - tile_pos) * tile_info.zw) + tile_info.xy, tile_layer);
return true;
}
void node_tex_tile_linear(
vec3 map, float tile_id, sampler2D ima, vec4 in_color, out vec4 color, out float alpha)
vec3 co, sampler2DArray ima, sampler1DArray map, out vec4 color, out float alpha)
{
if (map.z == tile_id) {
vec3 co = map.xyy;
node_tex_image_linear(co, ima, color, alpha);
if (node_tex_tile_lookup(co, ima, map)) {
color = safe_color(texture(ima, co));
}
else {
color = in_color;
alpha = color.a;
color = vec4(1.0, 0.0, 1.0, 1.0);
}
alpha = color.a;
}
void node_tex_tile_nearest(
vec3 map, float tile_id, sampler2D ima, vec4 in_color, out vec4 color, out float alpha)
vec3 co, sampler2DArray ima, sampler1DArray map, out vec4 color, out float alpha)
{
if (map.z == tile_id) {
vec3 co = map.xyy;
node_tex_image_nearest(co, ima, color, alpha);
if (node_tex_tile_lookup(co, ima, map)) {
ivec3 pix = ivec3(fract(co.xy) * textureSize(ima, 0).xy, co.z);
color = safe_color(texelFetch(ima, pix, 0));
}
else {
color = in_color;
alpha = color.a;
color = vec4(1.0, 0.0, 1.0, 1.0);
}
alpha = color.a;
}
void node_tex_tile_cubic(
vec3 map, float tile_id, sampler2D ima, vec4 in_color, out vec4 color, out float alpha)
vec3 co, sampler2DArray ima, sampler1DArray map, out vec4 color, out float alpha)
{
if (map.z == tile_id) {
vec3 co = map.xyy;
node_tex_image_cubic(co, ima, color, alpha);
if (node_tex_tile_lookup(co, ima, map)) {
vec2 tex_size = vec2(textureSize(ima, 0).xy);
co.xy *= tex_size;
/* texel center */
vec2 tc = floor(co.xy - 0.5) + 0.5;
vec2 w0, w1, w2, w3;
cubic_bspline_coefs(co.xy - tc, w0, w1, w2, w3);
vec2 s0 = w0 + w1;
vec2 s1 = w2 + w3;
vec2 f0 = w1 / (w0 + w1);
vec2 f1 = w3 / (w2 + w3);
vec4 final_co;
final_co.xy = tc - 1.0 + f0;
final_co.zw = tc + 1.0 + f1;
final_co /= tex_size.xyxy;
color = safe_color(textureLod(ima, vec3(final_co.xy, co.z), 0.0)) * s0.x * s0.y;
color += safe_color(textureLod(ima, vec3(final_co.zy, co.z), 0.0)) * s1.x * s0.y;
color += safe_color(textureLod(ima, vec3(final_co.xw, co.z), 0.0)) * s0.x * s1.y;
color += safe_color(textureLod(ima, vec3(final_co.zw, co.z), 0.0)) * s1.x * s1.y;
}
else {
color = in_color;
alpha = color.a;
color = vec4(1.0, 0.0, 1.0, 1.0);
}
alpha = color.a;
}
void node_tex_tile_smart(
vec3 map, float tile_id, sampler2D ima, vec4 in_color, out vec4 color, out float alpha)
vec3 co, sampler2DArray ima, sampler1DArray map, out vec4 color, out float alpha)
{
if (map.z == tile_id) {
vec3 co = map.xyy;
node_tex_image_smart(co, ima, color, alpha);
}
else {
color = in_color;
alpha = color.a;
}
node_tex_tile_cubic(co, ima, map, color, alpha);
}

View File

@ -91,11 +91,17 @@ typedef struct RenderSlot {
struct RenderResult *render;
} RenderSlot;
typedef struct ImageTile_Runtime {
int tilearray_layer;
int _pad;
int tilearray_offset[2];
int tilearray_size[2];
} ImageTile_Runtime;
typedef struct ImageTile {
struct ImageTile *next, *prev;
/** Not written in file 2 = TEXTARGET_COUNT. */
struct GPUTexture *gputexture[2];
struct ImageTile_Runtime runtime;
char ok;
char _pad[3];
@ -114,7 +120,9 @@ typedef struct ImageTile {
enum {
TEXTARGET_TEXTURE_2D = 0,
TEXTARGET_TEXTURE_CUBE_MAP = 1,
TEXTARGET_COUNT = 2,
TEXTARGET_TEXTURE_2D_ARRAY = 2,
TEXTARGET_TEXTURE_TILE_MAPPING = 3,
TEXTARGET_COUNT = 4,
};
typedef struct Image {
@ -125,6 +133,8 @@ typedef struct Image {
/** Not written in file. */
struct MovieCache *cache;
/** Not written in file 4 = TEXTARGET_COUNT. */
struct GPUTexture *gputexture[4];
/* sources from: */
ListBase anims;

View File

@ -64,8 +64,8 @@ typedef struct MovieClipProxy {
typedef struct MovieClip_RuntimeGPUTexture {
void *next, *prev;
MovieClipUser user;
/** Not written in file 2 = TEXTARGET_COUNT. */
struct GPUTexture *gputexture[2];
/** Not written in file 4 = TEXTARGET_COUNT. */
struct GPUTexture *gputexture[4];
} MovieClip_RuntimeGPUTexture;
typedef struct MovieClip_Runtime {

View File

@ -81,7 +81,6 @@ static void rna_Fluid_resetCache(Main *UNUSED(bmain), Scene *scene, PointerRNA *
FLUID_DOMAIN_OUTDATED_NOISE |
FLUID_DOMAIN_OUTDATED_MESH |
FLUID_DOMAIN_OUTDATED_PARTICLES);
scene->r.cfra = settings->cache_frame_start;
}
DEG_id_tag_update(ptr->owner_id, ID_RECALC_GEOMETRY);
}

View File

@ -385,8 +385,7 @@ static void rna_Image_resolution_set(PointerRNA *ptr, const float *values)
static int rna_Image_bindcode_get(PointerRNA *ptr)
{
Image *ima = (Image *)ptr->data;
ImageTile *tile = BKE_image_get_tile(ima, 0);
GPUTexture *tex = tile->gputexture[TEXTARGET_TEXTURE_2D];
GPUTexture *tex = ima->gputexture[TEXTARGET_TEXTURE_2D];
return (tex) ? GPU_texture_opengl_bindcode(tex) : 0;
}

View File

@ -216,12 +216,11 @@ static void rna_Image_scale(Image *image, ReportList *reports, int width, int he
}
}
static int rna_Image_gl_load(Image *image, ReportList *reports, int frame, int tile_number)
static int rna_Image_gl_load(Image *image, ReportList *reports, int frame)
{
ImageUser iuser;
BKE_imageuser_default(&iuser);
iuser.framenr = frame;
iuser.tile = tile_number;
GPUTexture *tex = GPU_texture_from_blender(image, &iuser, GL_TEXTURE_2D);
@ -233,15 +232,14 @@ static int rna_Image_gl_load(Image *image, ReportList *reports, int frame, int t
return GL_NO_ERROR;
}
static int rna_Image_gl_touch(Image *image, ReportList *reports, int frame, int tile_number)
static int rna_Image_gl_touch(Image *image, ReportList *reports, int frame)
{
int error = GL_NO_ERROR;
BKE_image_tag_time(image);
ImageTile *tile = BKE_image_get_tile(image, tile_number);
if (tile->gputexture[TEXTARGET_TEXTURE_2D] == NULL) {
error = rna_Image_gl_load(image, reports, frame, tile_number);
if (image->gputexture[TEXTARGET_TEXTURE_2D] == NULL) {
error = rna_Image_gl_load(image, reports, frame);
}
return error;
@ -336,7 +334,6 @@ void RNA_api_image(StructRNA *srna)
RNA_def_function_flag(func, FUNC_USE_REPORTS);
RNA_def_int(
func, "frame", 0, 0, INT_MAX, "Frame", "Frame of image sequence or movie", 0, INT_MAX);
RNA_def_int(func, "tile_number", 0, 0, INT_MAX, "Tile", "Tile of a tiled image", 0, INT_MAX);
/* return value */
parm = RNA_def_int(
func, "error", 0, -INT_MAX, INT_MAX, "Error", "OpenGL error value", -INT_MAX, INT_MAX);
@ -351,7 +348,6 @@ void RNA_api_image(StructRNA *srna)
RNA_def_function_flag(func, FUNC_USE_REPORTS);
RNA_def_int(
func, "frame", 0, 0, INT_MAX, "Frame", "Frame of image sequence or movie", 0, INT_MAX);
RNA_def_int(func, "tile_number", 0, 0, INT_MAX, "Tile", "Tile of a tiled image", 0, INT_MAX);
/* return value */
parm = RNA_def_int(
func, "error", 0, -INT_MAX, INT_MAX, "Error", "OpenGL error value", -INT_MAX, INT_MAX);

View File

@ -88,7 +88,7 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
"node_tex_environment_equirectangular",
in[0].link,
GPU_constant(&clamp_size),
GPU_image(ima, iuser, 0),
GPU_image(ima, iuser),
&in[0].link);
}
else {
@ -103,7 +103,7 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
GPU_link(mat,
"node_tex_image_linear_no_mip",
in[0].link,
GPU_image(ima, iuser, 0),
GPU_image(ima, iuser),
&out[0].link,
&outalpha);
break;
@ -111,17 +111,13 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
GPU_link(mat,
"node_tex_image_nearest",
in[0].link,
GPU_image(ima, iuser, 0),
GPU_image(ima, iuser),
&out[0].link,
&outalpha);
break;
default:
GPU_link(mat,
"node_tex_image_cubic",
in[0].link,
GPU_image(ima, iuser, 0),
&out[0].link,
&outalpha);
GPU_link(
mat, "node_tex_image_cubic", in[0].link, GPU_image(ima, iuser), &out[0].link, &outalpha);
break;
}

View File

@ -130,21 +130,19 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
node_shader_gpu_tex_mapping(mat, node, in, out);
if (ima->source == IMA_SRC_TILED) {
GPUNodeLink *map;
GPU_link(mat, "node_tex_tile_map", in[0].link, &out[0].link, &map);
/* This is not exactly great, but if we want to support different sizes per
* tile and older hardware, which rules out better methods like texture arrays. */
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
float tile_number = tile->tile_number;
GPU_link(mat,
names_tiled[tex->interpolation],
map,
GPU_uniform(&tile_number),
GPU_image(ima, iuser, tile->tile_number),
out[0].link,
&out[0].link,
&out[1].link);
}
/* The tiled shader needs both the tile array itself as well as the mapping from tile to array
* position. Which of these to allocate is automatically decided based on the shader argument
* type, so here the first GPU_image(ima, iuser) will resolve to the array and the second to
* the mapping since the third argument in the shader has type sampler2DArray while
* the fourth is sampler1DArray.
*/
GPU_stack_link(mat,
node,
names_tiled[tex->interpolation],
in,
out,
GPU_image(ima, iuser),
GPU_image(ima, iuser));
}
else {
switch (tex->projection) {
@ -159,20 +157,20 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
GPU_link(mat, "set_rgb", *texco, &input_coords);
}
if (do_texco_extend) {
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser, 0), texco);
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser), texco);
}
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser, 0));
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser));
break;
case SHD_PROJ_BOX:
vnor = GPU_builtin(GPU_WORLD_NORMAL);
ob_mat = GPU_builtin(GPU_OBJECT_MATRIX);
blend = GPU_uniform(&tex->projection_blend);
gpu_image = GPU_image(ima, iuser, 0);
gpu_image = GPU_image(ima, iuser);
/* equivalent to normal_world_to_object */
GPU_link(mat, "normal_transform_transposed_m4v3", vnor, ob_mat, &norm);
GPU_link(mat, gpu_node_name, *texco, norm, GPU_image(ima, iuser, 0), &col1, &col2, &col3);
GPU_link(mat, gpu_node_name, *texco, norm, GPU_image(ima, iuser), &col1, &col2, &col3);
GPU_stack_link(
mat, node, "node_tex_image_box", in, out, norm, col1, col2, col3, gpu_image, blend);
break;
@ -186,9 +184,9 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
GPU_link(mat, "set_rgb", *texco, &input_coords);
}
if (do_texco_extend) {
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser, 0), texco);
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser), texco);
}
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser, 0));
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser));
break;
case SHD_PROJ_TUBE:
@ -200,9 +198,9 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
GPU_link(mat, "set_rgb", *texco, &input_coords);
}
if (do_texco_extend) {
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser, 0), texco);
GPU_link(mat, "point_texco_clamp", *texco, GPU_image(ima, iuser), texco);
}
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser, 0));
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser));
break;
}
@ -210,7 +208,7 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
if (do_texco_clip) {
gpu_node_name = names_clip[tex->interpolation];
in[0].link = input_coords;
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser, 0), out[0].link);
GPU_stack_link(mat, node, gpu_node_name, in, out, GPU_image(ima, iuser), out[0].link);
}
}
}