DRW: Refactor & Split draw_manager.c into multiple files.

Refactor include:
- Removal of DRWInterface. (was useless)
- Split DRWCallHeader into a new struct DRWCallState that will be reused in the future.
- Use BLI_link_utils for APPEND/PREPEND.
- Creation of the new DRWManager struct type. This will enable us to create more than one manager in the future.
- Removal of some dead code.
This commit is contained in:
Clément Foucault 2018-02-28 01:16:23 +01:00
parent d5a55b6918
commit 0df21e2504
10 changed files with 2662 additions and 2536 deletions

View File

@ -64,7 +64,12 @@ set(SRC
intern/draw_common.c
intern/draw_instance_data.c
intern/draw_manager.c
intern/draw_manager_data.c
intern/draw_manager_exec.c
intern/draw_manager_framebuffer.c
intern/draw_manager_shader.c
intern/draw_manager_text.c
intern/draw_manager_texture.c
intern/draw_manager_profiling.c
intern/draw_view.c
modes/edit_armature_mode.c
@ -108,6 +113,7 @@ set(SRC
intern/draw_cache_impl.h
intern/draw_common.h
intern/draw_instance_data.h
intern/draw_manager.h
intern/draw_manager_text.h
intern/draw_manager_profiling.h
intern/draw_view.h

View File

@ -288,7 +288,7 @@ typedef enum {
DRW_STATE_CULL_BACK = (1 << 6),
DRW_STATE_CULL_FRONT = (1 << 7),
DRW_STATE_WIRE = (1 << 8),
DRW_STATE_WIRE_LARGE = (1 << 9),
// DRW_STATE_WIRE_LARGE = (1 << 9), /* Removed from ogl in 3.0 */
DRW_STATE_POINT = (1 << 10),
DRW_STATE_STIPPLE_2 = (1 << 11),
DRW_STATE_STIPPLE_3 = (1 << 12),

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,346 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file draw_manager.h
* \ingroup draw
*/
/* Private functions / structs of the draw manager */
#ifndef __DRAW_MANAGER_H__
#define __DRAW_MANAGER_H__
#include "DRW_engine.h"
#include "DRW_render.h"
#include "BLI_linklist.h"
#include "BLI_threads.h"
#include "GPU_batch.h"
#include "GPU_framebuffer.h"
#include "GPU_shader.h"
#include "GPU_uniformbuffer.h"
#include "GPU_viewport.h"
#include "draw_instance_data.h"
/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
#define USE_GPU_SELECT
/* ------------ Profiling --------------- */
#define USE_PROFILE
#ifdef USE_PROFILE
# include "PIL_time.h"
# define PROFILE_TIMER_FALLOFF 0.1
# define PROFILE_START(time_start) \
double time_start = PIL_check_seconds_timer();
# define PROFILE_END_ACCUM(time_accum, time_start) { \
time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
} ((void)0)
/* exp average */
# define PROFILE_END_UPDATE(time_update, time_start) { \
double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
(_time_delta * PROFILE_TIMER_FALLOFF); \
} ((void)0)
#else /* USE_PROFILE */
# define PROFILE_START(time_start) ((void)0)
# define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
# define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
#endif /* USE_PROFILE */
/* ------------ Data Structure --------------- */
/**
* Data structure containing all drawcalls organized by passes and materials.
* DRWPass > DRWShadingGroup > DRWCall > DRWCallState
* > DRWUniform
**/
typedef struct DRWCallHeader {
struct DRWCallHeader *next; /* in reality DRWCall or DRWCallGenerate. */
#ifdef USE_GPU_SELECT
int select_id;
#endif
unsigned char type;
} DRWCallHeader;
typedef struct DRWCallState {
unsigned char flag;
uint16_t matflag;
/* Culling: Using Bounding Sphere for now for faster culling.
* Not ideal for planes. */
struct {
float loc[3], rad; /* Bypassed if radius is < 0.0. */
} bsphere;
/* Matrices */
float model[4][4];
float modelinverse[4][4];
float modelview[4][4];
float modelviewinverse[4][4];
float modelviewprojection[4][4];
float normalview[3][3];
float normalworld[3][3]; /* Not view dependant */
float orcotexfac[2][3]; /* Not view dependant */
float eyevec[3];
} DRWCallState;
typedef struct DRWCall {
DRWCallHeader head;
DRWCallState state; /* For now integrated to the struct. */
Gwn_Batch *geometry;
} DRWCall;
typedef struct DRWCallGenerate {
DRWCallHeader head;
DRWCallState state; /* For now integrated to the struct. */
DRWCallGenerateFn *geometry_fn;
void *user_data;
} DRWCallGenerate;
struct DRWUniform {
DRWUniform *next; /* single-linked list */
const void *value;
int location;
char type; /* DRWUniformType */
char length; /* cannot be more than 16 */
char arraysize; /* cannot be more than 16 too */
};
struct DRWShadingGroup {
DRWShadingGroup *next;
GPUShader *shader; /* Shader to bind */
DRWUniform *uniforms; /* Uniforms pointers */
/* Watch this! Can be nasty for debugging. */
union {
struct { /* DRW_SHG_NORMAL */
DRWCallHeader *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
} calls;
struct { /* DRW_SHG_***_BATCH */
struct Gwn_Batch *batch_geom; /* Result of call batching */
struct Gwn_VertBuf *batch_vbo;
unsigned int primitive_count;
};
struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
struct Gwn_Batch *instance_geom;
struct Gwn_VertBuf *instance_vbo;
unsigned int instance_count;
float instance_orcofac[2][3]; /* TODO find a better place. */
};
};
DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */
DRWState state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */
unsigned int stencil_mask; /* Stencil mask to use for stencil test / write operations */
int type;
/* Builtin matrices locations */
int model;
int modelinverse;
int modelview;
int modelviewinverse;
int modelviewprojection;
int normalview;
int normalworld;
int orcotexfac;
int eye;
uint16_t matflag; /* Matrices needed, same as DRWCall.flag */
#ifndef NDEBUG
char attribs_count;
#endif
#ifdef USE_GPU_SELECT
DRWInstanceData *inst_selectid;
DRWPass *pass_parent; /* backlink to pass we're in */
int override_selectid; /* Override for single object instances. */
#endif
};
#define MAX_PASS_NAME 32
struct DRWPass {
/* Linked list */
struct {
DRWShadingGroup *first;
DRWShadingGroup *last;
} shgroups;
DRWState state;
char name[MAX_PASS_NAME];
};
/* Used by DRWUniform.type */
typedef enum {
DRW_UNIFORM_BOOL,
DRW_UNIFORM_SHORT_TO_INT,
DRW_UNIFORM_SHORT_TO_FLOAT,
DRW_UNIFORM_INT,
DRW_UNIFORM_FLOAT,
DRW_UNIFORM_TEXTURE,
DRW_UNIFORM_BUFFER,
DRW_UNIFORM_BLOCK
} DRWUniformType;
/* Used by DRWCall.flag */
enum {
DRW_CALL_SINGLE, /* A single batch */
DRW_CALL_GENERATE, /* Uses a callback to draw with any number of batches. */
};
/* Used by DRWCall.state */
enum {
DRW_CALL_CULLED = (1 << 0),
DRW_CALL_NEGSCALE = (1 << 1),
};
/* Used by DRWCall.flag */
enum {
DRW_CALL_MODELINVERSE = (1 << 0),
DRW_CALL_MODELVIEW = (1 << 1),
DRW_CALL_MODELVIEWINVERSE = (1 << 2),
DRW_CALL_MODELVIEWPROJECTION = (1 << 3),
DRW_CALL_NORMALVIEW = (1 << 4),
DRW_CALL_NORMALWORLD = (1 << 5),
DRW_CALL_ORCOTEXFAC = (1 << 6),
DRW_CALL_EYEVEC = (1 << 7),
};
/* Used by DRWShadingGroup.type */
enum {
DRW_SHG_NORMAL,
DRW_SHG_POINT_BATCH,
DRW_SHG_LINE_BATCH,
DRW_SHG_TRIANGLE_BATCH,
DRW_SHG_INSTANCE,
DRW_SHG_INSTANCE_EXTERNAL,
};
/* ------------- DRAW MANAGER ------------ */
#define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
typedef struct DRWManager {
/* TODO clean up this struct a bit */
/* Cache generation */
ViewportMemoryPool *vmempool;
DRWUniform *last_uniform;
DRWCall *last_call;
DRWCallGenerate *last_callgenerate;
DRWShadingGroup *last_shgroup;
DRWInstanceDataList *idatalist;
DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE];
/* Rendering state */
GPUShader *shader;
/* Managed by `DRW_state_set`, `DRW_state_reset` */
DRWState state;
unsigned int stencil_mask;
/* Per viewport */
GPUViewport *viewport;
struct GPUFrameBuffer *default_framebuffer;
float size[2];
float screenvecs[2][3];
float pixsize;
GLenum backface, frontface;
struct {
unsigned int is_select : 1;
unsigned int is_depth : 1;
unsigned int is_image_render : 1;
unsigned int is_scene_render : 1;
unsigned int draw_background : 1;
} options;
/* Current rendering context */
DRWContextState draw_ctx;
/* Convenience pointer to text_store owned by the viewport */
struct DRWTextStore **text_store_p;
ListBase enabled_engines; /* RenderEngineType */
bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */
/* Profiling */
double cache_time;
/* View dependant uniforms. */
float original_mat[6][4][4]; /* Original rv3d matrices. */
int override_mat; /* Bitflag of which matrices are overriden. */
int num_clip_planes; /* Number of active clipplanes. */
struct {
float mat[6][4][4];
float viewcamtexcofac[4];
float clip_planes_eq[MAX_CLIP_PLANES][4];
} view_data;
#ifdef USE_GPU_SELECT
unsigned int select_id;
#endif
/* ---------- Nothing after this point is cleared after use ----------- */
/* ogl_context serves as the offset for clearing only
* the top portion of the struct so DO NOT MOVE IT! */
void *ogl_context; /* Unique ghost context used by the draw manager. */
Gwn_Context *gwn_context;
ThreadMutex ogl_context_mutex; /* Mutex to lock the drw manager and avoid concurent context usage. */
/** GPU Resource State: Memory storage between drawing. */
struct {
GPUTexture **bound_texs;
bool *bound_tex_slots;
int bind_tex_inc;
int bind_ubo_inc;
} RST;
} DRWManager;
extern DRWManager DST; /* TODO : get rid of this and allow multithreaded rendering */
/* --------------- FUNCTIONS ------------- */
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags);
void drw_texture_get_format(
DRWTextureFormat format, bool is_framebuffer,
GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth);
void *drw_viewport_engine_data_ensure(void *engine_type);
void drw_state_set(DRWState state);
#endif /* __DRAW_MANAGER_H__ */

View File

@ -0,0 +1,867 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file blender/draw/intern/draw_manager_data.c
* \ingroup draw
*/
#include "draw_manager.h"
#include "BKE_curve.h"
#include "BKE_global.h"
#include "BKE_mesh.h"
#include "BKE_paint.h"
#include "BKE_pbvh.h"
#include "DNA_curve_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meta_types.h"
#include "BLI_link_utils.h"
#include "BLI_mempool.h"
#include "intern/gpu_codegen.h"
struct Gwn_VertFormat *g_pos_format = NULL;
/* -------------------------------------------------------------------- */
/** \name Uniform Buffer Object (DRW_uniformbuffer)
* \{ */
GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
{
return GPU_uniformbuffer_create(size, data, NULL);
}
void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
{
GPU_uniformbuffer_update(ubo, data);
}
void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
{
GPU_uniformbuffer_free(ubo);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Uniforms (DRW_shgroup_uniform)
* \{ */
static void drw_interface_builtin_uniform(
DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
{
int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
if (loc == -1)
return;
DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
uni->location = loc;
uni->type = DRW_UNIFORM_FLOAT;
uni->value = value;
uni->length = length;
uni->arraysize = arraysize;
BLI_LINKS_PREPEND(shgroup->uniforms, uni);
}
static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name,
DRWUniformType type, const void *value, int length, int arraysize)
{
int location;
if (type == DRW_UNIFORM_BLOCK) {
location = GPU_shader_get_uniform_block(shgroup->shader, name);
}
else {
location = GPU_shader_get_uniform(shgroup->shader, name);
}
if (location == -1) {
if (G.debug & G_DEBUG)
fprintf(stderr, "Uniform '%s' not found!\n", name);
/* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
// BLI_assert(0);
return;
}
DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
BLI_assert(arraysize > 0 && arraysize <= 16);
BLI_assert(length >= 0 && length <= 16);
uni->location = location;
uni->type = type;
uni->value = value;
uni->length = length;
uni->arraysize = arraysize;
BLI_LINKS_PREPEND(shgroup->uniforms, uni);
}
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
}
void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
}
void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
}
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
}
void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
}
void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
}
void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
}
void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
}
void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
}
void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
}
void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
}
void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
}
void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
}
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 9, 1);
}
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
{
drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 16, 1);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Draw Call (DRW_calls)
* \{ */
static void drw_call_calc_orco(ID *ob_data, float (*r_orcofacs)[3])
{
float *texcoloc = NULL;
float *texcosize = NULL;
if (ob_data != NULL) {
switch (GS(ob_data->name)) {
case ID_ME:
BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
break;
case ID_CU:
{
Curve *cu = (Curve *)ob_data;
if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
BKE_curve_texspace_calc(cu);
}
texcoloc = cu->loc;
texcosize = cu->size;
break;
}
case ID_MB:
{
MetaBall *mb = (MetaBall *)ob_data;
texcoloc = mb->loc;
texcosize = mb->size;
break;
}
default:
break;
}
}
if ((texcoloc != NULL) && (texcosize != NULL)) {
mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
invert_v3(r_orcofacs[1]);
sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
negate_v3(r_orcofacs[0]);
mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
}
else {
copy_v3_fl(r_orcofacs[0], 0.0f);
copy_v3_fl(r_orcofacs[1], 1.0f);
}
}
static void drw_call_set_matrices(DRWCallState *state, float (*obmat)[4], ID *ob_data)
{
/* Matrices */
if (obmat != NULL) {
copy_m4_m4(state->model, obmat);
if (is_negative_m4(state->model)) {
state->matflag |= DRW_CALL_NEGSCALE;
}
}
else {
unit_m4(state->model);
}
/* Orco factors */
if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
drw_call_calc_orco(ob_data, state->orcotexfac);
state->matflag &= ~DRW_CALL_ORCOTEXFAC;
}
/* TODO Set culling bsphere IF needed by the DRWPass */
state->bsphere.rad = -1.0f;
}
void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
{
BLI_assert(geom != NULL);
BLI_assert(shgroup->type == DRW_SHG_NORMAL);
DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
call->head.type = DRW_CALL_SINGLE;
call->state.flag = 0;
call->state.matflag = shgroup->matflag;
#ifdef USE_GPU_SELECT
call->head.select_id = DST.select_id;
#endif
call->geometry = geom;
drw_call_set_matrices(&call->state, obmat, NULL);
BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
}
void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
{
BLI_assert(geom != NULL);
BLI_assert(shgroup->type == DRW_SHG_NORMAL);
DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
call->head.type = DRW_CALL_SINGLE;
call->state.flag = 0;
call->state.matflag = shgroup->matflag;
#ifdef USE_GPU_SELECT
call->head.select_id = DST.select_id;
#endif
call->geometry = geom;
drw_call_set_matrices(&call->state, ob->obmat, ob->data);
BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
}
void DRW_shgroup_call_generate_add(
DRWShadingGroup *shgroup,
DRWCallGenerateFn *geometry_fn, void *user_data,
float (*obmat)[4])
{
BLI_assert(geometry_fn != NULL);
BLI_assert(shgroup->type == DRW_SHG_NORMAL);
DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
call->head.type = DRW_CALL_GENERATE;
call->state.flag = 0;
call->state.matflag = shgroup->matflag;
#ifdef USE_GPU_SELECT
call->head.select_id = DST.select_id;
#endif
call->geometry_fn = geometry_fn;
call->user_data = user_data;
drw_call_set_matrices(&call->state, obmat, NULL);
BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
}
static void sculpt_draw_cb(
DRWShadingGroup *shgroup,
void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
void *user_data)
{
Object *ob = user_data;
PBVH *pbvh = ob->sculpt->pbvh;
if (pbvh) {
BKE_pbvh_draw_cb(
pbvh, NULL, NULL, false,
(void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
}
}
void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
{
DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
}
void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
{
#ifdef USE_GPU_SELECT
if (G.f & G_PICKSEL) {
if (shgroup->inst_selectid == NULL) {
shgroup->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
}
int *select_id = DRW_instance_data_next(shgroup->inst_selectid);
*select_id = DST.select_id;
}
#endif
BLI_assert(attr_len == shgroup->attribs_count);
UNUSED_VARS_NDEBUG(attr_len);
for (int i = 0; i < attr_len; ++i) {
if (shgroup->instance_count == shgroup->instance_vbo->vertex_ct) {
GWN_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
}
GWN_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
}
shgroup->instance_count += 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Shading Groups (DRW_shgroup)
* \{ */
static void drw_interface_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
shgroup->instance_geom = NULL;
shgroup->instance_vbo = NULL;
shgroup->instance_count = 0;
shgroup->uniforms = NULL;
#ifdef USE_GPU_SELECT
shgroup->inst_selectid = NULL;
shgroup->override_selectid = -1;
#endif
#ifndef NDEBUG
shgroup->attribs_count = 0;
#endif
/* TODO : They should be grouped inside a UBO updated once per redraw. */
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, DST.view_data.mat[DRW_MAT_VIEW], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, DST.view_data.mat[DRW_MAT_VIEWINV], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, DST.view_data.mat[DRW_MAT_PERS], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, DST.view_data.mat[DRW_MAT_PERSINV], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, DST.view_data.mat[DRW_MAT_WIN], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, DST.view_data.mat[DRW_MAT_WININV], 16, 1);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CLIPPLANES, DST.view_data.clip_planes_eq, 4, DST.num_clip_planes); /* TO REMOVE */
shgroup->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
shgroup->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
shgroup->matflag = 0;
if (shgroup->modelinverse > -1)
shgroup->matflag |= DRW_CALL_MODELINVERSE;
if (shgroup->modelview > -1)
shgroup->matflag |= DRW_CALL_MODELVIEW;
if (shgroup->modelviewinverse > -1)
shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
if (shgroup->modelviewprojection > -1)
shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
if (shgroup->normalview > -1)
shgroup->matflag |= DRW_CALL_NORMALVIEW;
if (shgroup->normalworld > -1)
shgroup->matflag |= DRW_CALL_NORMALWORLD;
if (shgroup->orcotexfac > -1)
shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
if (shgroup->eye > -1)
shgroup->matflag |= DRW_CALL_EYEVEC;
}
static void drw_interface_instance_init(
DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format)
{
BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
BLI_assert(batch != NULL);
drw_interface_init(shgroup, shader);
shgroup->instance_geom = batch;
#ifndef NDEBUG
shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
#endif
if (format != NULL) {
DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
&shgroup->instance_geom, &shgroup->instance_vbo);
}
}
static void drw_interface_batching_init(
DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format)
{
drw_interface_init(shgroup, shader);
#ifndef NDEBUG
shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
#endif
BLI_assert(format != NULL);
Gwn_PrimType type;
switch (shgroup->type) {
case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break;
case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break;
case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break;
default:
BLI_assert(0);
}
DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
&shgroup->batch_geom, &shgroup->batch_vbo);
}
static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
{
DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
BLI_LINKS_APPEND(&pass->shgroups, shgroup);
shgroup->type = DRW_SHG_NORMAL;
shgroup->shader = shader;
shgroup->state_extra = 0;
shgroup->state_extra_disable = ~0x0;
shgroup->stencil_mask = 0;
shgroup->calls.first = NULL;
shgroup->calls.last = NULL;
#if 0 /* All the same in the union! */
shgroup->batch_geom = NULL;
shgroup->batch_vbo = NULL;
shgroup->instance_geom = NULL;
shgroup->instance_vbo = NULL;
#endif
#ifdef USE_GPU_SELECT
shgroup->pass_parent = pass;
#endif
return shgroup;
}
static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
{
if (!gpupass) {
/* Shader compilation error */
return NULL;
}
DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
return grp;
}
static DRWShadingGroup *drw_shgroup_material_inputs(
DRWShadingGroup *grp, struct GPUMaterial *material, GPUPass *gpupass)
{
/* TODO : Ideally we should not convert. But since the whole codegen
* is relying on GPUPass we keep it as is for now. */
/* Converting dynamic GPUInput to DRWUniform */
ListBase *inputs = &gpupass->inputs;
for (GPUInput *input = inputs->first; input; input = input->next) {
/* Textures */
if (input->ima) {
double time = 0.0; /* TODO make time variable */
GPUTexture *tex = GPU_texture_from_blender(
input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
if (input->bindtex) {
DRW_shgroup_uniform_texture(grp, input->shadername, tex);
}
}
/* Color Ramps */
else if (input->tex) {
DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
}
/* Floats */
else {
switch (input->type) {
case GPU_FLOAT:
case GPU_VEC2:
case GPU_VEC3:
case GPU_VEC4:
/* Should already be in the material ubo. */
break;
case GPU_MAT3:
DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
break;
case GPU_MAT4:
DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
break;
default:
break;
}
}
}
GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
if (ubo != NULL) {
DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
}
return grp;
}
Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
{
Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat");
for (int i = 0; i < arraysize; ++i) {
GWN_vertformat_attr_add(format, attribs[i].name,
(attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32,
attribs[i].components,
(attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT);
}
return format;
}
DRWShadingGroup *DRW_shgroup_material_create(
struct GPUMaterial *material, DRWPass *pass)
{
GPUPass *gpupass = GPU_material_get_pass(material);
DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
if (shgroup) {
drw_interface_init(shgroup, GPU_pass_shader(gpupass));
drw_shgroup_material_inputs(shgroup, material, gpupass);
}
return shgroup;
}
DRWShadingGroup *DRW_shgroup_material_instance_create(
struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format)
{
GPUPass *gpupass = GPU_material_get_pass(material);
DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
if (shgroup) {
shgroup->type = DRW_SHG_INSTANCE;
shgroup->instance_geom = geom;
drw_call_calc_orco(ob->data, shgroup->instance_orcofac);
drw_interface_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
drw_shgroup_material_inputs(shgroup, material, gpupass);
}
return shgroup;
}
DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
struct GPUMaterial *material, DRWPass *pass, int tri_count)
{
#ifdef USE_GPU_SELECT
BLI_assert((G.f & G_PICKSEL) == 0);
#endif
GPUPass *gpupass = GPU_material_get_pass(material);
DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
if (shgroup) {
/* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
drw_interface_init(shgroup, GPU_pass_shader(gpupass));
shgroup->type = DRW_SHG_TRIANGLE_BATCH;
shgroup->instance_count = tri_count * 3;
drw_shgroup_material_inputs(shgroup, material, gpupass);
}
return shgroup;
}
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
{
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
drw_interface_init(shgroup, shader);
return shgroup;
}
DRWShadingGroup *DRW_shgroup_instance_create(
struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format)
{
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
shgroup->type = DRW_SHG_INSTANCE;
shgroup->instance_geom = geom;
drw_call_calc_orco(NULL, shgroup->instance_orcofac);
drw_interface_instance_init(shgroup, shader, geom, format);
return shgroup;
}
DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
{
DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
shgroup->type = DRW_SHG_POINT_BATCH;
drw_interface_batching_init(shgroup, shader, g_pos_format);
return shgroup;
}
DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
{
DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
shgroup->type = DRW_SHG_LINE_BATCH;
drw_interface_batching_init(shgroup, shader, g_pos_format);
return shgroup;
}
/* Very special batch. Use this if you position
* your vertices with the vertex shader
* and dont need any VBO attrib */
DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
{
#ifdef USE_GPU_SELECT
BLI_assert((G.f & G_PICKSEL) == 0);
#endif
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
/* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
drw_interface_init(shgroup, shader);
shgroup->type = DRW_SHG_TRIANGLE_BATCH;
shgroup->instance_count = tri_count * 3;
return shgroup;
}
/* Specify an external batch instead of adding each attrib one by one. */
void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch)
{
BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
BLI_assert(shgroup->instance_count == 0);
/* You cannot use external instancing batch without a dummy format. */
BLI_assert(shgroup->attribs_count != 0);
shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
drw_call_calc_orco(NULL, shgroup->instance_orcofac);
/* PERF : This destroys the vaos cache so better check if it's necessary. */
/* Note: This WILL break if batch->verts[0] is destroyed and reallocated
* at the same adress. Bindings/VAOs would remain obsolete. */
//if (shgroup->instancing_geom->inst != batch->verts[0])
GWN_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
#ifdef USE_GPU_SELECT
shgroup->override_selectid = DST.select_id;
#endif
}
/* Used for instancing with no attributes */
void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, unsigned int count)
{
BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
BLI_assert(shgroup->instance_count == 0);
BLI_assert(shgroup->attribs_count == 0);
#ifdef USE_GPU_SELECT
if (G.f & G_PICKSEL) {
shgroup->override_selectid = DST.select_id;
}
#endif
shgroup->instance_count = count;
}
unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
{
return shgroup->instance_count;
}
/**
* State is added to #Pass.state while drawing.
* Use to temporarily enable draw options.
*/
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
{
shgroup->state_extra |= state;
}
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
{
shgroup->state_extra_disable &= ~state;
}
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
{
BLI_assert(mask <= 255);
shgroup->stencil_mask = mask;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Passes (DRW_pass)
* \{ */
DRWPass *DRW_pass_create(const char *name, DRWState state)
{
DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
pass->state = state;
if (G.debug_value > 20) {
BLI_strncpy(pass->name, name, MAX_PASS_NAME);
}
pass->shgroups.first = NULL;
pass->shgroups.last = NULL;
return pass;
}
void DRW_pass_state_set(DRWPass *pass, DRWState state)
{
pass->state = state;
}
void DRW_pass_free(DRWPass *pass)
{
pass->shgroups.first = NULL;
pass->shgroups.last = NULL;
}
void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
{
for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
callback(userData, shgroup);
}
}
typedef struct ZSortData {
float *axis;
float *origin;
} ZSortData;
static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
{
const ZSortData *zsortdata = (ZSortData *)thunk;
const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
if (call_a == NULL) return -1;
if (call_b == NULL) return -1;
float tmp[3];
sub_v3_v3v3(tmp, zsortdata->origin, call_a->state.model[3]);
const float a_sq = dot_v3v3(zsortdata->axis, tmp);
sub_v3_v3v3(tmp, zsortdata->origin, call_b->state.model[3]);
const float b_sq = dot_v3v3(zsortdata->axis, tmp);
if (a_sq < b_sq) return 1;
else if (a_sq > b_sq) return -1;
else {
/* If there is a depth prepass put it before */
if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
return -1;
}
else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
return 1;
}
else return 0;
}
}
/* ------------------ Shading group sorting --------------------- */
#define SORT_IMPL_LINKTYPE DRWShadingGroup
#define SORT_IMPL_USE_THUNK
#define SORT_IMPL_FUNC shgroup_sort_fn_r
#include "../../blenlib/intern/list_sort_impl.h"
#undef SORT_IMPL_FUNC
#undef SORT_IMPL_USE_THUNK
#undef SORT_IMPL_LINKTYPE
/**
* Sort Shading groups by decreasing Z of their first draw call.
* This is usefull for order dependant effect such as transparency.
**/
void DRW_pass_sort_shgroup_z(DRWPass *pass)
{
float (*viewinv)[4];
viewinv = DST.view_data.mat[DRW_MAT_VIEWINV];
ZSortData zsortdata = {viewinv[2], viewinv[3]};
if (pass->shgroups.first && pass->shgroups.first->next) {
pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
/* Find the next last */
DRWShadingGroup *last = pass->shgroups.first;
while ((last = last->next)) {
/* Do nothing */
}
pass->shgroups.last = last;
}
}
/** \} */

View File

@ -0,0 +1,747 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file blender/draw/intern/draw_manager_exec.c
* \ingroup draw
*/
#include "draw_manager.h"
#include "BIF_glutil.h"
#include "BKE_global.h"
#include "GPU_draw.h"
#include "GPU_extensions.h"
#ifdef USE_GPU_SELECT
# include "ED_view3d.h"
# include "ED_armature.h"
# include "GPU_select.h"
#endif
#ifdef USE_GPU_SELECT
void DRW_select_load_id(unsigned int id)
{
BLI_assert(G.f & G_PICKSEL);
DST.select_id = id;
}
#endif
/* -------------------------------------------------------------------- */
/** \name Draw State (DRW_state)
* \{ */
void drw_state_set(DRWState state)
{
if (DST.state == state) {
return;
}
#define CHANGED_TO(f) \
((DST.state & (f)) ? \
((state & (f)) ? 0 : -1) : \
((state & (f)) ? 1 : 0))
#define CHANGED_ANY(f) \
((DST.state & (f)) != (state & (f)))
#define CHANGED_ANY_STORE_VAR(f, enabled) \
((DST.state & (f)) != (enabled = (state & (f))))
/* Depth Write */
{
int test;
if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
if (test == 1) {
glDepthMask(GL_TRUE);
}
else {
glDepthMask(GL_FALSE);
}
}
}
/* Color Write */
{
int test;
if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
if (test == 1) {
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
}
else {
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
}
}
}
/* Cull */
{
DRWState test;
if (CHANGED_ANY_STORE_VAR(
DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
test))
{
if (test) {
glEnable(GL_CULL_FACE);
if ((state & DRW_STATE_CULL_BACK) != 0) {
glCullFace(GL_BACK);
}
else if ((state & DRW_STATE_CULL_FRONT) != 0) {
glCullFace(GL_FRONT);
}
else {
BLI_assert(0);
}
}
else {
glDisable(GL_CULL_FACE);
}
}
}
/* Depth Test */
{
DRWState test;
if (CHANGED_ANY_STORE_VAR(
DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
test))
{
if (test) {
glEnable(GL_DEPTH_TEST);
if (state & DRW_STATE_DEPTH_LESS) {
glDepthFunc(GL_LEQUAL);
}
else if (state & DRW_STATE_DEPTH_EQUAL) {
glDepthFunc(GL_EQUAL);
}
else if (state & DRW_STATE_DEPTH_GREATER) {
glDepthFunc(GL_GREATER);
}
else if (state & DRW_STATE_DEPTH_ALWAYS) {
glDepthFunc(GL_ALWAYS);
}
else {
BLI_assert(0);
}
}
else {
glDisable(GL_DEPTH_TEST);
}
}
}
/* Wire Width */
{
if (CHANGED_ANY(DRW_STATE_WIRE)) {
if ((state & DRW_STATE_WIRE) != 0) {
glLineWidth(1.0f);
}
else {
/* do nothing */
}
}
}
/* Points Size */
{
int test;
if ((test = CHANGED_TO(DRW_STATE_POINT))) {
if (test == 1) {
GPU_enable_program_point_size();
glPointSize(5.0f);
}
else {
GPU_disable_program_point_size();
}
}
}
/* Blending (all buffer) */
{
int test;
if (CHANGED_ANY_STORE_VAR(
DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION |
DRW_STATE_ADDITIVE_FULL,
test))
{
if (test) {
glEnable(GL_BLEND);
if ((state & DRW_STATE_BLEND) != 0) {
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
}
else if ((state & DRW_STATE_MULTIPLY) != 0) {
glBlendFunc(GL_DST_COLOR, GL_ZERO);
}
else if ((state & DRW_STATE_TRANSMISSION) != 0) {
glBlendFunc(GL_ONE, GL_SRC_ALPHA);
}
else if ((state & DRW_STATE_ADDITIVE) != 0) {
/* Do not let alpha accumulate but premult the source RGB by it. */
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
GL_ZERO, GL_ONE); /* Alpha */
}
else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
/* Let alpha accumulate. */
glBlendFunc(GL_ONE, GL_ONE);
}
else {
BLI_assert(0);
}
}
else {
glDisable(GL_BLEND);
}
}
}
/* Clip Planes */
{
int test;
if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
if (test == 1) {
for (int i = 0; i < DST.num_clip_planes; ++i) {
glEnable(GL_CLIP_DISTANCE0 + i);
}
}
else {
for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
glDisable(GL_CLIP_DISTANCE0 + i);
}
}
}
}
/* Line Stipple */
{
int test;
if (CHANGED_ANY_STORE_VAR(
DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
test))
{
if (test) {
if ((state & DRW_STATE_STIPPLE_2) != 0) {
setlinestyle(2);
}
else if ((state & DRW_STATE_STIPPLE_3) != 0) {
setlinestyle(3);
}
else if ((state & DRW_STATE_STIPPLE_4) != 0) {
setlinestyle(4);
}
else {
BLI_assert(0);
}
}
else {
setlinestyle(0);
}
}
}
/* Stencil */
{
DRWState test;
if (CHANGED_ANY_STORE_VAR(
DRW_STATE_WRITE_STENCIL |
DRW_STATE_STENCIL_EQUAL,
test))
{
if (test) {
glEnable(GL_STENCIL_TEST);
/* Stencil Write */
if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
glStencilMask(0xFF);
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
}
/* Stencil Test */
else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
glStencilMask(0x00); /* disable write */
DST.stencil_mask = 0;
}
else {
BLI_assert(0);
}
}
else {
/* disable write & test */
DST.stencil_mask = 0;
glStencilMask(0x00);
glStencilFunc(GL_ALWAYS, 1, 0xFF);
glDisable(GL_STENCIL_TEST);
}
}
}
#undef CHANGED_TO
#undef CHANGED_ANY
#undef CHANGED_ANY_STORE_VAR
DST.state = state;
}
static void drw_stencil_set(unsigned int mask)
{
if (DST.stencil_mask != mask) {
/* Stencil Write */
if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
glStencilFunc(GL_ALWAYS, mask, 0xFF);
DST.stencil_mask = mask;
}
/* Stencil Test */
else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
glStencilFunc(GL_EQUAL, mask, 0xFF);
DST.stencil_mask = mask;
}
}
}
/* Reset state to not interfer with other UI drawcall */
void DRW_state_reset_ex(DRWState state)
{
DST.state = ~state;
drw_state_set(state);
}
void DRW_state_reset(void)
{
/* Reset blending function */
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
DRW_state_reset_ex(DRW_STATE_DEFAULT);
}
/* NOTE : Make sure to reset after use! */
void DRW_state_invert_facing(void)
{
SWAP(GLenum, DST.backface, DST.frontface);
glFrontFace(DST.frontface);
}
/**
* This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
* and if the shaders have support for it (see usage of gl_ClipDistance).
* Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
**/
void DRW_state_clip_planes_add(float plane_eq[4])
{
BLI_assert(DST.num_clip_planes < MAX_CLIP_PLANES-1);
copy_v4_v4(DST.view_data.clip_planes_eq[DST.num_clip_planes++], plane_eq);
}
void DRW_state_clip_planes_reset(void)
{
DST.num_clip_planes = 0;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Draw (DRW_draw)
* \{ */
static void draw_matrices_model_prepare(DRWCallState *st)
{
/* OPTI : We can optimize further by sharing this computation for each call using the same object. */
/* Order matters */
if (st->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE |
DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC))
{
mul_m4_m4m4(st->modelview, DST.view_data.mat[DRW_MAT_VIEW], st->model);
}
if (st->matflag & DRW_CALL_MODELVIEWINVERSE) {
invert_m4_m4(st->modelviewinverse, st->modelview);
}
if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) {
mul_m4_m4m4(st->modelviewprojection, DST.view_data.mat[DRW_MAT_PERS], st->model);
}
if (st->matflag & DRW_CALL_NORMALVIEW) {
copy_m3_m4(st->normalview, st->modelview);
invert_m3(st->normalview);
transpose_m3(st->normalview);
}
if (st->matflag & DRW_CALL_EYEVEC) {
/* Used by orthographic wires */
float tmp[3][3];
copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f);
invert_m3_m3(tmp, st->normalview);
/* set eye vector, transformed to object coords */
mul_m3_v3(tmp, st->eyevec);
}
/* Non view dependant */
if (st->matflag & DRW_CALL_MODELINVERSE) {
invert_m4_m4(st->modelinverse, st->model);
st->matflag &= ~DRW_CALL_MODELINVERSE;
}
if (st->matflag & DRW_CALL_NORMALWORLD) {
copy_m3_m4(st->normalworld, st->model);
invert_m3(st->normalworld);
transpose_m3(st->normalworld);
st->matflag &= ~DRW_CALL_NORMALWORLD;
}
}
static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCallState *state)
{
/* step 1 : bind object dependent matrices */
if (state != NULL) {
/* OPTI/IDEA(clem): Do this preparation in another thread. */
draw_matrices_model_prepare(state);
GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection);
GPU_shader_uniform_vector(shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview);
GPU_shader_uniform_vector(shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld);
GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac);
GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec);
}
else {
BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) && (shgroup->eye == -1));
/* For instancing and batching. */
float unitmat[4][4];
unit_m4(unitmat);
GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)DST.view_data.mat[DRW_MAT_VIEW]);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)DST.view_data.mat[DRW_MAT_VIEWINV]);
GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)DST.view_data.mat[DRW_MAT_PERS]);
GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
}
}
static void draw_geometry_execute_ex(
DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count)
{
/* Special case: empty drawcall, placement is done via shader, don't bind anything. */
if (geom == NULL) {
BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
/* Shader is already bound. */
GWN_draw_primitive(GWN_PRIM_TRIS, count);
return;
}
/* step 2 : bind vertex array & draw */
GWN_batch_program_set_no_use(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
/* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */
geom->program_in_use = true;
if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
GWN_batch_draw_range_ex(geom, start, count, true);
}
else {
GWN_batch_draw_range(geom, start, count);
}
geom->program_in_use = false; /* XXX hacking gawain */
}
static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
{
draw_geometry_execute_ex(shgroup, geom, 0, 0);
}
static void bind_texture(GPUTexture *tex)
{
int bind_num = GPU_texture_bound_number(tex);
if (bind_num == -1) {
for (int i = 0; i < GPU_max_textures(); ++i) {
DST.RST.bind_tex_inc = (DST.RST.bind_tex_inc + 1) % GPU_max_textures();
if (DST.RST.bound_tex_slots[DST.RST.bind_tex_inc] == false) {
if (DST.RST.bound_texs[DST.RST.bind_tex_inc] != NULL) {
GPU_texture_unbind(DST.RST.bound_texs[DST.RST.bind_tex_inc]);
}
GPU_texture_bind(tex, DST.RST.bind_tex_inc);
DST.RST.bound_texs[DST.RST.bind_tex_inc] = tex;
DST.RST.bound_tex_slots[DST.RST.bind_tex_inc] = true;
// printf("Binds Texture %d %p\n", DST.RST.bind_tex_inc, tex);
return;
}
}
printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
}
DST.RST.bound_tex_slots[bind_num] = true;
}
static void bind_ubo(GPUUniformBuffer *ubo)
{
if (DST.RST.bind_ubo_inc < GPU_max_ubo_binds()) {
GPU_uniformbuffer_bind(ubo, DST.RST.bind_ubo_inc);
DST.RST.bind_ubo_inc++;
}
else {
/* This is not depending on user input.
* It is our responsability to make sure there enough slots. */
BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
/* printf so user can report bad behaviour */
printf("Not enough ubo slots! This should not happen!\n");
}
}
static void release_texture_slots(void)
{
memset(DST.RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
}
static void release_ubo_slots(void)
{
DST.RST.bind_ubo_inc = 0;
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
{
BLI_assert(shgroup->shader);
GPUTexture *tex;
GPUUniformBuffer *ubo;
int val;
float fval;
if (DST.shader != shgroup->shader) {
if (DST.shader) GPU_shader_unbind();
GPU_shader_bind(shgroup->shader);
DST.shader = shgroup->shader;
}
release_texture_slots();
release_ubo_slots();
drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
drw_stencil_set(shgroup->stencil_mask);
/* Binding Uniform */
/* Don't check anything, Interface should already contain the least uniform as possible */
for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) {
switch (uni->type) {
case DRW_UNIFORM_SHORT_TO_INT:
val = (int)*((short *)uni->value);
GPU_shader_uniform_vector_int(
shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
break;
case DRW_UNIFORM_SHORT_TO_FLOAT:
fval = (float)*((short *)uni->value);
GPU_shader_uniform_vector(
shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
break;
case DRW_UNIFORM_BOOL:
case DRW_UNIFORM_INT:
GPU_shader_uniform_vector_int(
shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
break;
case DRW_UNIFORM_FLOAT:
GPU_shader_uniform_vector(
shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
break;
case DRW_UNIFORM_TEXTURE:
tex = (GPUTexture *)uni->value;
BLI_assert(tex);
bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BUFFER:
if (!DRW_state_is_fbo()) {
break;
}
tex = *((GPUTexture **)uni->value);
BLI_assert(tex);
bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BLOCK:
ubo = (GPUUniformBuffer *)uni->value;
bind_ubo(ubo);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
break;
}
}
#ifdef USE_GPU_SELECT
/* use the first item because of selection we only ever add one */
# define GPU_SELECT_LOAD_IF_PICKSEL(_call) \
if ((G.f & G_PICKSEL) && (_call)) { \
GPU_select_load_id((_call)->head.select_id); \
} ((void)0)
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
_start = 0; \
_count = _shgroup->instance_count; \
int *select_id = NULL; \
if (G.f & G_PICKSEL) { \
if (_shgroup->override_selectid == -1) { \
select_id = DRW_instance_data_get(_shgroup->inst_selectid); \
switch (_shgroup->type) { \
case DRW_SHG_TRIANGLE_BATCH: _count = 3; break; \
case DRW_SHG_LINE_BATCH: _count = 2; break; \
default: _count = 1; break; \
} \
} \
else { \
GPU_select_load_id(_shgroup->override_selectid); \
} \
} \
while (_start < _shgroup->instance_count) { \
if (select_id) { \
GPU_select_load_id(select_id[_start]); \
}
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
_start += _count; \
}
#else
# define GPU_SELECT_LOAD_IF_PICKSEL(call)
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
_start = 0; \
_count = _shgroup->interface.instance_count;
#endif
/* Rendering Calls */
if (!ELEM(shgroup->type, DRW_SHG_NORMAL)) {
/* Replacing multiple calls with only one */
if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
if (shgroup->instance_geom != NULL) {
unsigned int count, start;
draw_geometry_prepare(shgroup, NULL);
/* This will only load override_selectid */
GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
{
draw_geometry_execute(shgroup, shgroup->instance_geom);
}
GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
}
}
else {
if (shgroup->instance_count > 0) {
unsigned int count, start;
draw_geometry_prepare(shgroup, NULL);
GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
{
draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count);
}
GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
}
}
}
else { /* DRW_SHG_***_BATCH */
/* Some dynamic batch can have no geom (no call to aggregate) */
if (shgroup->instance_count > 0) {
unsigned int count, start;
draw_geometry_prepare(shgroup, NULL);
GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
{
draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count);
}
GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
}
}
}
else {
bool prev_neg_scale = false;
for (DRWCall *call = (DRWCall *)shgroup->calls.first; call; call = (DRWCall *)call->head.next) {
if ((call->state.flag & DRW_CALL_CULLED) != 0)
continue;
/* Negative scale objects */
bool neg_scale = call->state.flag & DRW_CALL_NEGSCALE;
if (neg_scale != prev_neg_scale) {
glFrontFace((neg_scale) ? DST.backface : DST.frontface);
prev_neg_scale = neg_scale;
}
GPU_SELECT_LOAD_IF_PICKSEL(call);
if (call->head.type == DRW_CALL_SINGLE) {
draw_geometry_prepare(shgroup, &call->state);
draw_geometry_execute(shgroup, call->geometry);
}
else {
BLI_assert(call->head.type == DRW_CALL_GENERATE);
DRWCallGenerate *callgen = ((DRWCallGenerate *)call);
draw_geometry_prepare(shgroup, &callgen->state);
callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data);
}
}
/* Reset state */
glFrontFace(DST.frontface);
}
/* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
DRW_state_reset();
}
static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
{
/* Start fresh */
DST.shader = NULL;
BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing");
drw_state_set(pass->state);
DRW_stats_query_start(pass->name);
for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
draw_shgroup(shgroup, pass->state);
/* break if upper limit */
if (shgroup == end_group) {
break;
}
}
/* Clear Bound textures */
for (int i = 0; i < GPU_max_textures(); i++) {
if (DST.RST.bound_texs[i] != NULL) {
GPU_texture_unbind(DST.RST.bound_texs[i]);
DST.RST.bound_texs[i] = NULL;
}
}
if (DST.shader) {
GPU_shader_unbind();
DST.shader = NULL;
}
DRW_stats_query_end();
}
void DRW_draw_pass(DRWPass *pass)
{
drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
}
/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
{
drw_draw_pass_ex(pass, start_group, end_group);
}
/** \} */

View File

@ -0,0 +1,189 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file blender/draw/intern/draw_manager_framebuffer.c
* \ingroup draw
*/
#include "draw_manager.h"
GPUFrameBuffer *DRW_framebuffer_create(void)
{
return GPU_framebuffer_create();
}
void DRW_framebuffer_init(
GPUFrameBuffer **fb, void *engine_type, int width, int height,
DRWFboTexture textures[MAX_FBO_TEX], int textures_len)
{
BLI_assert(textures_len <= MAX_FBO_TEX);
BLI_assert(width > 0 && height > 0);
bool create_fb = false;
int color_attachment = -1;
if (!*fb) {
*fb = GPU_framebuffer_create();
create_fb = true;
}
for (int i = 0; i < textures_len; ++i) {
int channels;
bool is_depth;
bool create_tex = false;
GPUTextureFormat gpu_format;
DRWFboTexture fbotex = textures[i];
bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0;
drw_texture_get_format(fbotex.format, true, &gpu_format, &channels, &is_depth);
if (!*fbotex.tex || is_temp) {
/* Temp textures need to be queried each frame, others not. */
if (is_temp) {
*fbotex.tex = GPU_viewport_texture_pool_query(
DST.viewport, engine_type, width, height, channels, gpu_format);
}
else {
*fbotex.tex = GPU_texture_create_2D_custom(
width, height, channels, gpu_format, NULL, NULL);
create_tex = true;
}
}
if (!is_depth) {
++color_attachment;
}
if (create_fb || create_tex) {
drw_texture_set_parameters(*fbotex.tex, fbotex.flag);
GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0);
}
}
if (create_fb && (textures_len > 0)) {
if (!GPU_framebuffer_check_valid(*fb, NULL)) {
printf("Error invalid framebuffer\n");
}
/* Detach temp textures */
for (int i = 0; i < textures_len; ++i) {
DRWFboTexture fbotex = textures[i];
if ((fbotex.flag & DRW_TEX_TEMP) != 0) {
GPU_framebuffer_texture_detach(*fbotex.tex);
}
}
if (DST.default_framebuffer != NULL) {
GPU_framebuffer_bind(DST.default_framebuffer);
}
}
}
void DRW_framebuffer_free(GPUFrameBuffer *fb)
{
GPU_framebuffer_free(fb);
}
void DRW_framebuffer_bind(GPUFrameBuffer *fb)
{
GPU_framebuffer_bind(fb);
}
void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth)
{
if (color) {
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
}
if (depth) {
glDepthMask(GL_TRUE);
glClearDepth(clear_depth);
}
if (stencil) {
glStencilMask(0xFF);
}
glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) |
((depth) ? GL_DEPTH_BUFFER_BIT : 0) |
((stencil) ? GL_STENCIL_BUFFER_BIT : 0));
}
void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data)
{
GLenum type;
switch (channels) {
case 1: type = GL_RED; break;
case 2: type = GL_RG; break;
case 3: type = GL_RGB; break;
case 4: type = GL_RGBA; break;
default:
BLI_assert(false && "wrong number of read channels");
return;
}
glReadBuffer(GL_COLOR_ATTACHMENT0 + slot);
glReadPixels(x, y, w, h, type, GL_FLOAT, data);
}
void DRW_framebuffer_read_depth(int x, int y, int w, int h, float *data)
{
GLenum type = GL_DEPTH_COMPONENT;
glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */
glReadPixels(x, y, w, h, type, GL_FLOAT, data);
}
void DRW_framebuffer_texture_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip)
{
GPU_framebuffer_texture_attach(fb, tex, slot, mip);
}
void DRW_framebuffer_texture_layer_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int layer, int mip)
{
GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip);
}
void DRW_framebuffer_cubeface_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip)
{
GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip);
}
void DRW_framebuffer_texture_detach(GPUTexture *tex)
{
GPU_framebuffer_texture_detach(tex);
}
void DRW_framebuffer_blit(GPUFrameBuffer *fb_read, GPUFrameBuffer *fb_write, bool depth, bool stencil)
{
GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil);
}
void DRW_framebuffer_recursive_downsample(
GPUFrameBuffer *fb, GPUTexture *tex, int num_iter,
void (*callback)(void *userData, int level), void *userData)
{
GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData);
}
void DRW_framebuffer_viewport_size(GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h)
{
glViewport(x, y, w, h);
}

View File

@ -32,7 +32,12 @@
#include "MEM_guardedalloc.h"
#include "draw_manager.h"
#include "GPU_glew.h"
#include "GPU_texture.h"
#include "UI_resources.h"
#include "WM_api.h"
#include "WM_types.h"
@ -198,14 +203,120 @@ void DRW_stats_reset(void)
}
}
static void draw_stat_5row(rcti *rect, int u, int v, const char *txt, const int size)
{
BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
rect->ymax - (3 + v) * U.widget_unit, 0.0f,
txt, size);
}
static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
{
BLF_draw_default_ascii(rect->xmin + (1 + u) * U.widget_unit,
rect->ymax - (3 + v) * U.widget_unit, 0.0f,
txt, size);
}
void DRW_stats_draw(rcti *rect)
{
char stat_string[64];
int lvl_index[MAX_NESTED_TIMER];
int v = 0;
int v = 0, u = 0;
BLI_snprintf(stat_string, sizeof(stat_string), "GPU Render Stats");
BLF_draw_default_ascii(rect->xmin + 1 * U.widget_unit, rect->ymax - v++ * U.widget_unit, 0.0f, stat_string, sizeof(stat_string));
double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
/* ------------------------------------------ */
/* ---------------- CPU stats --------------- */
/* ------------------------------------------ */
/* Label row */
char col_label[32];
sprintf(col_label, "Engine");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(col_label, "Init");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(col_label, "Background");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(col_label, "Render");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(col_label, "Total (w/o cache)");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
v++;
/* Engines rows */
char time_to_txt[16];
for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
u = 0;
DrawEngineType *engine = link->data;
ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname));
init_tot_time += data->init_time;
sprintf(time_to_txt, "%.2fms", data->init_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
background_tot_time += data->background_time;
sprintf(time_to_txt, "%.2fms", data->background_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
render_tot_time += data->render_time;
sprintf(time_to_txt, "%.2fms", data->render_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
tot_time += data->init_time + data->background_time + data->render_time;
sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v++;
}
/* Totals row */
u = 0;
sprintf(col_label, "Sub Total");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(time_to_txt, "%.2fms", init_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
sprintf(time_to_txt, "%.2fms", background_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
sprintf(time_to_txt, "%.2fms", render_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
sprintf(time_to_txt, "%.2fms", tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v += 2;
u = 0;
sprintf(col_label, "Cache Time");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
sprintf(time_to_txt, "%.2fms", DST.cache_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v += 2;
/* ------------------------------------------ */
/* ---------------- GPU stats --------------- */
/* ------------------------------------------ */
/* Memory Stats */
unsigned int tex_mem = GPU_texture_memory_usage_get();
unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
sprintf(stat_string, "GPU Memory");
draw_stat(rect, 0, v, stat_string, sizeof(stat_string));
sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
sprintf(stat_string, "Textures");
draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
sprintf(stat_string, "Meshes");
draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
v += 1;
/* GPU Timings */
BLI_snprintf(stat_string, sizeof(stat_string), "GPU Render Timings");
draw_stat(rect, 0, v++, stat_string, sizeof(stat_string));
for (int i = 0; i < DTP.timer_increment; ++i) {
double time_ms, time_percent;
@ -232,11 +343,11 @@ void DRW_stats_draw(rcti *rect)
time_percent = MIN2(time_percent, 100.0);
BLI_snprintf(stat_string, sizeof(stat_string), "%s", timer->name);
BLF_draw_default_ascii(rect->xmin + (1 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string));
draw_stat(rect, 0 + timer->lvl, v, stat_string, sizeof(stat_string));
BLI_snprintf(stat_string, sizeof(stat_string), "%.2fms", time_ms);
BLF_draw_default_ascii(rect->xmin + (13 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string));
draw_stat(rect, 12 + timer->lvl, v, stat_string, sizeof(stat_string));
BLI_snprintf(stat_string, sizeof(stat_string), "%.0f", time_percent);
BLF_draw_default_ascii(rect->xmin + (17 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string));
draw_stat(rect, 16 + timer->lvl, v, stat_string, sizeof(stat_string));
v++;
}
}
}

View File

@ -0,0 +1,90 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file blender/draw/intern/draw_manager_shader.c
* \ingroup draw
*/
#include "draw_manager.h"
#include "BLI_string.h"
#include "BLI_string_utils.h"
#include "GPU_shader.h"
extern char datatoc_gpu_shader_2D_vert_glsl[];
extern char datatoc_gpu_shader_3D_vert_glsl[];
extern char datatoc_gpu_shader_fullscreen_vert_glsl[];
GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
{
return GPU_shader_create(vert, frag, geom, NULL, defines);
}
GPUShader *DRW_shader_create_with_lib(
const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
{
GPUShader *sh;
char *vert_with_lib = NULL;
char *frag_with_lib = NULL;
char *geom_with_lib = NULL;
vert_with_lib = BLI_string_joinN(lib, vert);
frag_with_lib = BLI_string_joinN(lib, frag);
if (geom) {
geom_with_lib = BLI_string_joinN(lib, geom);
}
sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
MEM_freeN(vert_with_lib);
MEM_freeN(frag_with_lib);
if (geom) {
MEM_freeN(geom_with_lib);
}
return sh;
}
GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
{
return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
}
GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
{
return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
}
GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
{
return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines);
}
GPUShader *DRW_shader_create_3D_depth_only(void)
{
return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
}
void DRW_shader_free(GPUShader *shader)
{
GPU_shader_free(shader);
}

View File

@ -0,0 +1,213 @@
/*
* Copyright 2016, Blender Foundation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Blender Institute
*
*/
/** \file blender/draw/intern/draw_manager_texture.c
* \ingroup draw
*/
#include "draw_manager.h"
void drw_texture_get_format(
DRWTextureFormat format, bool is_framebuffer,
GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth)
{
/* Some formats do not work with framebuffers. */
if (is_framebuffer) {
switch (format) {
/* Only add formats that are COMPATIBLE with FB.
* Generally they are multiple of 16bit. */
case DRW_TEX_R_16:
case DRW_TEX_R_32:
case DRW_TEX_RG_8:
case DRW_TEX_RG_16:
case DRW_TEX_RG_16I:
case DRW_TEX_RG_32:
case DRW_TEX_RGBA_8:
case DRW_TEX_RGBA_16:
case DRW_TEX_RGBA_32:
case DRW_TEX_DEPTH_16:
case DRW_TEX_DEPTH_24:
case DRW_TEX_DEPTH_24_STENCIL_8:
case DRW_TEX_DEPTH_32:
case DRW_TEX_RGB_11_11_10:
break;
default:
BLI_assert(false && "Texture format unsupported as render target!");
*r_channels = 4;
*r_data_type = GPU_RGBA8;
*r_is_depth = false;
return;
}
}
switch (format) {
case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break;
case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
#if 0
case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
#endif
case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
default :
/* file type not supported you must uncomment it from above */
BLI_assert(false);
break;
}
switch (format) {
case DRW_TEX_RGBA_8:
case DRW_TEX_RGBA_16:
case DRW_TEX_RGBA_32:
*r_channels = 4;
break;
case DRW_TEX_RGB_8:
case DRW_TEX_RGB_16:
case DRW_TEX_RGB_32:
case DRW_TEX_RGB_11_11_10:
*r_channels = 3;
break;
case DRW_TEX_RG_8:
case DRW_TEX_RG_16:
case DRW_TEX_RG_16I:
case DRW_TEX_RG_32:
*r_channels = 2;
break;
default:
*r_channels = 1;
break;
}
if (r_is_depth) {
*r_is_depth = ELEM(format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
}
}
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
{
GPU_texture_bind(tex, 0);
if (flags & DRW_TEX_MIPMAP) {
GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
DRW_texture_generate_mipmaps(tex);
}
else {
GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
}
GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
GPU_texture_unbind(tex);
}
GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex;
GPUTextureFormat data_type;
int channels;
drw_texture_get_format(format, false, &data_type, &channels, NULL);
tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
drw_texture_set_parameters(tex, flags);
return tex;
}
GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex;
GPUTextureFormat data_type;
int channels;
drw_texture_get_format(format, false, &data_type, &channels, NULL);
tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
drw_texture_set_parameters(tex, flags);
return tex;
}
GPUTexture *DRW_texture_create_2D_array(
int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex;
GPUTextureFormat data_type;
int channels;
drw_texture_get_format(format, false, &data_type, &channels, NULL);
tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
drw_texture_set_parameters(tex, flags);
return tex;
}
GPUTexture *DRW_texture_create_3D(
int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex;
GPUTextureFormat data_type;
int channels;
drw_texture_get_format(format, false, &data_type, &channels, NULL);
tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
drw_texture_set_parameters(tex, flags);
return tex;
}
GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex;
GPUTextureFormat data_type;
int channels;
drw_texture_get_format(format, false, &data_type, &channels, NULL);
tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
drw_texture_set_parameters(tex, flags);
return tex;
}
void DRW_texture_generate_mipmaps(GPUTexture *tex)
{
GPU_texture_bind(tex, 0);
GPU_texture_generate_mipmap(tex);
GPU_texture_unbind(tex);
}
void DRW_texture_update(GPUTexture *tex, const float *pixels)
{
GPU_texture_update(tex, pixels);
}
void DRW_texture_free(GPUTexture *tex)
{
GPU_texture_free(tex);
}