Merge branch 'blender-v2.83-release'

This commit is contained in:
Jeroen Bakker 2020-04-16 08:58:25 +02:00
commit ec263547b5
4 changed files with 178 additions and 69 deletions

View File

@ -42,11 +42,11 @@
/* Using Hammersley distribution */
static float *create_disk_samples(int num_samples, int num_iterations)
{
BLI_assert(num_samples * num_iterations <= CAVITY_MAX_SAMPLES);
const int total_samples = num_samples * num_iterations;
const float num_samples_inv = 1.0f / num_samples;
/* vec4 to ensure memory alignment. */
float(*texels)[4] = MEM_callocN(sizeof(float[4]) * CAVITY_MAX_SAMPLES, __func__);
for (int i = 0; i < total_samples; i++) {
float it_add = (i / num_samples) * 0.499f;
float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
@ -102,7 +102,7 @@ void workbench_cavity_data_update(WORKBENCH_PrivateData *wpd, WORKBENCH_UBO_Worl
if (CAVITY_ENABLED(wpd)) {
int cavity_sample_count_single_iteration = scene->display.matcap_ssao_samples;
int cavity_sample_count_total = workbench_cavity_total_sample_count(wpd, scene);
int max_iter_count = cavity_sample_count_total / cavity_sample_count_single_iteration;
const int max_iter_count = cavity_sample_count_total / cavity_sample_count_single_iteration;
int sample = wpd->taa_sample % max_iter_count;
wd->cavity_sample_start = cavity_sample_count_single_iteration * sample;
@ -128,6 +128,7 @@ void workbench_cavity_samples_ubo_ensure(WORKBENCH_PrivateData *wpd)
int cavity_sample_count_single_iteration = scene->display.matcap_ssao_samples;
int cavity_sample_count = workbench_cavity_total_sample_count(wpd, scene);
const int max_iter_count = max_ii(1, cavity_sample_count / cavity_sample_count_single_iteration);
if (wpd->vldata->cavity_sample_count != cavity_sample_count) {
DRW_UBO_FREE_SAFE(wpd->vldata->cavity_sample_ubo);
@ -135,8 +136,7 @@ void workbench_cavity_samples_ubo_ensure(WORKBENCH_PrivateData *wpd)
}
if (wpd->vldata->cavity_sample_ubo == NULL) {
float *samples = create_disk_samples(cavity_sample_count_single_iteration,
max_ii(1, wpd->taa_sample_len));
float *samples = create_disk_samples(cavity_sample_count_single_iteration, max_iter_count);
wpd->vldata->cavity_jitter_tx = create_jitter_texture(cavity_sample_count);
/* NOTE: Uniform buffer needs to always be filled to be valid. */
wpd->vldata->cavity_sample_ubo = DRW_uniformbuffer_create(

View File

@ -1648,9 +1648,12 @@ static void extract_lnor_hq_loop_mesh(
normal_float_to_short_v3(&lnor_data->x, mr->poly_normals[p]);
}
/* Flag for paint mode overlay. */
if (mpoly->flag & ME_HIDE || (mr->extract_type == MR_EXTRACT_MAPPED && (mr->v_origindex) &&
mr->v_origindex[mloop->v] == ORIGINDEX_NONE)) {
/* Flag for paint mode overlay.
* Only use MR_EXTRACT_MAPPED in edit mode where it is used to display the edge-normals. In paint
* mode it will use the unmapped data to draw the wireframe. */
if (mpoly->flag & ME_HIDE ||
(mr->edit_bmesh && mr->extract_type == MR_EXTRACT_MAPPED && (mr->v_origindex) &&
mr->v_origindex[mloop->v] == ORIGINDEX_NONE)) {
lnor_data->w = -1;
}
else if (mpoly->flag & ME_FACE_SEL) {
@ -1724,9 +1727,12 @@ static void extract_lnor_loop_mesh(
*lnor_data = GPU_normal_convert_i10_v3(mr->poly_normals[p]);
}
/* Flag for paint mode overlay. */
if (mpoly->flag & ME_HIDE || (mr->extract_type == MR_EXTRACT_MAPPED && (mr->v_origindex) &&
mr->v_origindex[mloop->v] == ORIGINDEX_NONE)) {
/* Flag for paint mode overlay.
* Only use MR_EXTRACT_MAPPED in edit mode where it is used to display the edge-normals. In paint
* mode it will use the unmapped data to draw the wireframe. */
if (mpoly->flag & ME_HIDE ||
(mr->edit_bmesh && mr->extract_type == MR_EXTRACT_MAPPED && (mr->v_origindex) &&
mr->v_origindex[mloop->v] == ORIGINDEX_NONE)) {
lnor_data->w = -1;
}
else if (mpoly->flag & ME_FACE_SEL) {

View File

@ -43,6 +43,14 @@
extern void GPU_matrix_bind(const GPUShaderInterface *);
extern bool GPU_matrix_dirty_get(void);
typedef struct ImmediateDrawBuffer {
GLuint vbo_id;
GLubyte *buffer_data;
uint buffer_offset;
uint buffer_size;
uint default_size;
} ImmediateDrawBuffer;
typedef struct {
/* TODO: organize this struct by frequency of change (run-time) */
@ -50,14 +58,14 @@ typedef struct {
GPUContext *context;
/* current draw call */
GLubyte *buffer_data;
uint buffer_offset;
uint buffer_bytes_mapped;
uint vertex_len;
bool strict_vertex_len;
uint vertex_len;
uint buffer_bytes_mapped;
ImmediateDrawBuffer *active_buffer;
GPUPrimType prim_type;
GPUVertFormat vertex_format;
ImmediateDrawBuffer draw_buffer;
ImmediateDrawBuffer draw_buffer_strict;
/* current vertex */
uint vertex_idx;
@ -65,7 +73,6 @@ typedef struct {
uint16_t
unassigned_attr_bits; /* which attributes of current vertex have not been given values? */
GLuint vbo_id;
GLuint vao_id;
GLuint bound_program;
@ -76,7 +83,6 @@ typedef struct {
/* size of internal buffer */
#define DEFAULT_INTERNAL_BUFFER_SIZE (4 * 1024 * 1024)
static uint imm_buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
static bool initialized = false;
static Immediate imm;
@ -88,9 +94,16 @@ void immInit(void)
#endif
memset(&imm, 0, sizeof(Immediate));
imm.vbo_id = GPU_buf_alloc();
glBindBuffer(GL_ARRAY_BUFFER, imm.vbo_id);
glBufferData(GL_ARRAY_BUFFER, imm_buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.draw_buffer.vbo_id = GPU_buf_alloc();
imm.draw_buffer.buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
imm.draw_buffer.default_size = DEFAULT_INTERNAL_BUFFER_SIZE;
glBindBuffer(GL_ARRAY_BUFFER, imm.draw_buffer.vbo_id);
glBufferData(GL_ARRAY_BUFFER, imm.draw_buffer.buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.draw_buffer_strict.vbo_id = GPU_buf_alloc();
imm.draw_buffer_strict.buffer_size = 0;
imm.draw_buffer_strict.default_size = 0;
glBindBuffer(GL_ARRAY_BUFFER, imm.draw_buffer_strict.vbo_id);
glBufferData(GL_ARRAY_BUFFER, imm.draw_buffer_strict.buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
@ -124,7 +137,8 @@ void immDeactivate(void)
void immDestroy(void)
{
GPU_buf_free(imm.vbo_id);
GPU_buf_free(imm.draw_buffer.vbo_id);
GPU_buf_free(imm.draw_buffer_strict.vbo_id);
initialized = false;
}
@ -213,6 +227,7 @@ void immBegin(GPUPrimType prim_type, uint vertex_len)
assert(initialized);
assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we haven't already begun */
assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
assert(imm.active_buffer == NULL);
#endif
imm.prim_type = prim_type;
imm.vertex_len = vertex_len;
@ -221,54 +236,58 @@ void immBegin(GPUPrimType prim_type, uint vertex_len)
/* how many bytes do we need for this draw call? */
const uint bytes_needed = vertex_buffer_size(&imm.vertex_format, vertex_len);
ImmediateDrawBuffer *active_buffer = imm.strict_vertex_len ? &imm.draw_buffer_strict :
&imm.draw_buffer;
imm.active_buffer = active_buffer;
glBindBuffer(GL_ARRAY_BUFFER, imm.vbo_id);
glBindBuffer(GL_ARRAY_BUFFER, active_buffer->vbo_id);
/* does the current buffer have enough room? */
const uint available_bytes = imm_buffer_size - imm.buffer_offset;
const uint available_bytes = active_buffer->buffer_size - active_buffer->buffer_offset;
bool recreate_buffer = false;
if (bytes_needed > imm_buffer_size) {
if (bytes_needed > active_buffer->buffer_size) {
/* expand the internal buffer */
imm_buffer_size = bytes_needed;
active_buffer->buffer_size = bytes_needed;
recreate_buffer = true;
}
else if (bytes_needed < DEFAULT_INTERNAL_BUFFER_SIZE &&
imm_buffer_size > DEFAULT_INTERNAL_BUFFER_SIZE) {
else if (bytes_needed < active_buffer->default_size &&
active_buffer->buffer_size > active_buffer->default_size) {
/* shrink the internal buffer */
imm_buffer_size = DEFAULT_INTERNAL_BUFFER_SIZE;
active_buffer->buffer_size = active_buffer->default_size;
recreate_buffer = true;
}
/* ensure vertex data is aligned */
/* Might waste a little space, but it's safe. */
const uint pre_padding = padding(imm.buffer_offset, imm.vertex_format.stride);
const uint pre_padding = padding(active_buffer->buffer_offset, imm.vertex_format.stride);
if (!recreate_buffer && ((bytes_needed + pre_padding) <= available_bytes)) {
imm.buffer_offset += pre_padding;
active_buffer->buffer_offset += pre_padding;
}
else {
/* orphan this buffer & start with a fresh one */
/* this method works on all platforms, old & new */
glBufferData(GL_ARRAY_BUFFER, imm_buffer_size, NULL, GL_DYNAMIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, active_buffer->buffer_size, NULL, GL_DYNAMIC_DRAW);
imm.buffer_offset = 0;
active_buffer->buffer_offset = 0;
}
/* printf("mapping %u to %u\n", imm.buffer_offset, imm.buffer_offset + bytes_needed - 1); */
imm.buffer_data = glMapBufferRange(GL_ARRAY_BUFFER,
imm.buffer_offset,
bytes_needed,
GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT |
(imm.strict_vertex_len ? 0 : GL_MAP_FLUSH_EXPLICIT_BIT));
active_buffer->buffer_data = glMapBufferRange(
GL_ARRAY_BUFFER,
active_buffer->buffer_offset,
bytes_needed,
GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT |
(imm.strict_vertex_len ? 0 : GL_MAP_FLUSH_EXPLICIT_BIT));
#if TRUST_NO_ONE
assert(imm.buffer_data != NULL);
assert(active_buffer->buffer_data != NULL);
#endif
imm.buffer_bytes_mapped = bytes_needed;
imm.vertex_data = imm.buffer_data;
imm.vertex_data = active_buffer->buffer_data;
}
void immBeginAtMost(GPUPrimType prim_type, uint vertex_len)
@ -338,7 +357,7 @@ static void immDrawSetup(void)
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; a_idx++) {
const GPUVertAttr *a = &imm.vertex_format.attrs[a_idx];
const uint offset = imm.buffer_offset + a->offset;
const uint offset = imm.active_buffer->buffer_offset + a->offset;
const GLvoid *pointer = (const GLubyte *)0 + offset;
const uint loc = read_attr_location(&imm.attr_binding, a_idx);
@ -365,6 +384,7 @@ void immEnd(void)
{
#if TRUST_NO_ONE
assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
assert(imm.active_buffer);
#endif
uint buffer_bytes_used;
@ -421,12 +441,13 @@ void immEnd(void)
// glBindBuffer(GL_ARRAY_BUFFER, 0);
// glBindVertexArray(0);
/* prep for next immBegin */
imm.buffer_offset += buffer_bytes_used;
imm.active_buffer->buffer_offset += buffer_bytes_used;
}
/* prep for next immBegin */
imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
imm.active_buffer = NULL;
}
static void setAttrValueBit(uint attr_id)

View File

@ -65,6 +65,24 @@ typedef struct ViewportTempTexture {
GPUTexture *texture;
} ViewportTempTexture;
/* Struct storing a viewport specific GPUBatch.
* The end-goal is to have a single batch shared across viewport and use a model matrix to place
* the batch. Due to OCIO and Image/UV editor we are not able to use an model matrix yet. */
struct GPUViewportBatch {
GPUBatch *batch;
struct {
rctf rect_pos;
rctf rect_uv;
} last_used_parameters;
} GPUViewportBatch;
static struct {
GPUVertFormat format;
struct {
uint pos, tex_coord;
} attr_id;
} g_viewport = {{0}};
struct GPUViewport {
int size[2];
int flag;
@ -98,6 +116,7 @@ struct GPUViewport {
/* TODO(fclem) the uvimage display use the viewport but do not set any view transform for the
* moment. The end goal would be to let the GPUViewport do the color management. */
bool do_color_management;
struct GPUViewportBatch batch;
};
enum {
@ -661,6 +680,76 @@ void GPU_viewport_stereo_composite(GPUViewport *viewport, Stereo3dFormat *stereo
GPU_framebuffer_restore();
}
/* -------------------------------------------------------------------- */
/** \name Viewport Batches
* \{ */
static GPUVertFormat *gpu_viewport_batch_format(void)
{
if (g_viewport.format.attr_len == 0) {
GPUVertFormat *format = &g_viewport.format;
g_viewport.attr_id.pos = GPU_vertformat_attr_add(
format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
g_viewport.attr_id.tex_coord = GPU_vertformat_attr_add(
format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
return &g_viewport.format;
}
static GPUBatch *gpu_viewport_batch_create(const rctf *rect_pos, const rctf *rect_uv)
{
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(gpu_viewport_batch_format());
const uint vbo_len = 4;
GPU_vertbuf_data_alloc(vbo, vbo_len);
GPUVertBufRaw pos_step, tex_coord_step;
GPU_vertbuf_attr_get_raw_data(vbo, g_viewport.attr_id.pos, &pos_step);
GPU_vertbuf_attr_get_raw_data(vbo, g_viewport.attr_id.tex_coord, &tex_coord_step);
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmin, rect_pos->ymin);
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmin, rect_uv->ymin);
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmax, rect_pos->ymin);
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmax, rect_uv->ymin);
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmin, rect_pos->ymax);
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmin, rect_uv->ymax);
copy_v2_fl2(GPU_vertbuf_raw_step(&pos_step), rect_pos->xmax, rect_pos->ymax);
copy_v2_fl2(GPU_vertbuf_raw_step(&tex_coord_step), rect_uv->xmax, rect_uv->ymax);
return GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
}
static GPUBatch *gpu_viewport_batch_get(GPUViewport *viewport,
const rctf *rect_pos,
const rctf *rect_uv)
{
const float compare_limit = 0.0001f;
const bool parameters_changed =
(!BLI_rctf_compare(
&viewport->batch.last_used_parameters.rect_pos, rect_pos, compare_limit) ||
!BLI_rctf_compare(&viewport->batch.last_used_parameters.rect_uv, rect_uv, compare_limit));
if (viewport->batch.batch && parameters_changed) {
GPU_batch_discard(viewport->batch.batch);
viewport->batch.batch = NULL;
}
if (!viewport->batch.batch) {
viewport->batch.batch = gpu_viewport_batch_create(rect_pos, rect_uv);
viewport->batch.last_used_parameters.rect_pos = *rect_pos;
viewport->batch.last_used_parameters.rect_uv = *rect_uv;
}
return viewport->batch.batch;
}
static void gpu_viewport_batch_free(GPUViewport *viewport)
{
if (viewport->batch.batch) {
GPU_batch_discard(viewport->batch.batch);
viewport->batch.batch = NULL;
}
}
/** \} */
static void gpu_viewport_draw_colormanaged(GPUViewport *viewport,
const rctf *rect_pos,
@ -671,13 +760,17 @@ static void gpu_viewport_draw_colormanaged(GPUViewport *viewport,
GPUTexture *color = dtxl->color;
GPUTexture *color_overlay = dtxl->color_overlay;
GPUVertFormat *vert_format = immVertexFormat();
uint pos = GPU_vertformat_attr_add(vert_format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
uint texco = GPU_vertformat_attr_add(vert_format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
bool use_ocio = false;
if (viewport->do_color_management && display_colorspace) {
/* During the binding process the last used VertexFormat is tested and can assert as it is not
* valid. By calling the `immVertexFormat` the last used VertexFormat is reset and the assert
* does not happen. This solves a chicken and egg problem when using GPUBatches. GPUBatches
* contain the correct vertex format, but can only bind after the shader is bound.
*
* Image/UV editor still uses imm, after that has been changed we could move this fix to the
* OCIO. */
immVertexFormat();
use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(&viewport->view_settings,
&viewport->display_settings,
NULL,
@ -686,38 +779,26 @@ static void gpu_viewport_draw_colormanaged(GPUViewport *viewport,
true);
}
if (!use_ocio) {
immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_OVERLAYS_MERGE);
immUniform1i("display_transform", display_colorspace);
immUniform1i("image_texture", 0);
immUniform1i("overlays_texture", 1);
GPUBatch *batch = gpu_viewport_batch_get(viewport, rect_pos, rect_uv);
if (use_ocio) {
GPU_batch_program_set_imm_shader(batch);
}
else {
GPU_batch_program_set_builtin(batch, GPU_SHADER_2D_IMAGE_OVERLAYS_MERGE);
GPU_batch_uniform_1i(batch, "display_transform", display_colorspace);
GPU_batch_uniform_1i(batch, "image_texture", 0);
GPU_batch_uniform_1i(batch, "overlays_texture", 1);
}
GPU_texture_bind(color, 0);
GPU_texture_bind(color_overlay, 1);
immBegin(GPU_PRIM_TRI_STRIP, 4);
immAttr2f(texco, rect_uv->xmin, rect_uv->ymin);
immVertex2f(pos, rect_pos->xmin, rect_pos->ymin);
immAttr2f(texco, rect_uv->xmax, rect_uv->ymin);
immVertex2f(pos, rect_pos->xmax, rect_pos->ymin);
immAttr2f(texco, rect_uv->xmin, rect_uv->ymax);
immVertex2f(pos, rect_pos->xmin, rect_pos->ymax);
immAttr2f(texco, rect_uv->xmax, rect_uv->ymax);
immVertex2f(pos, rect_pos->xmax, rect_pos->ymax);
immEnd();
GPU_batch_draw(batch);
GPU_texture_unbind(color);
GPU_texture_unbind(color_overlay);
if (use_ocio) {
IMB_colormanagement_finish_glsl_draw();
}
else {
immUnbindProgram();
}
}
/**
@ -781,8 +862,8 @@ void GPU_viewport_draw_to_screen_ex(GPUViewport *viewport,
* Merge and draw the buffers of \a viewport into the currently active framebuffer, performing
* color transform to display space.
*
* \param rect: Coordinates to draw into. By swapping min and max values, drawing can be done with
* inversed axis coordinates (upside down or sideways).
* \param rect: Coordinates to draw into. By swapping min and max values, drawing can be done
* with inversed axis coordinates (upside down or sideways).
*/
void GPU_viewport_draw_to_screen(GPUViewport *viewport, int view, const rcti *rect)
{
@ -960,6 +1041,7 @@ void GPU_viewport_free(GPUViewport *viewport)
MEM_freeN(viewport->idatalist);
BKE_color_managed_view_settings_free(&viewport->view_settings);
gpu_viewport_batch_free(viewport);
MEM_freeN(viewport);
}