Cleanup: move public doc-strings into headers for 'draw'

Ref T92709
This commit is contained in:
Campbell Barton 2021-12-08 20:29:39 +11:00
parent a46ff1dd38
commit e89d42ddff
Notes: blender-bot 2023-02-13 17:15:14 +01:00
Referenced by issue #93854, Relocate doc-strings into public headers
Referenced by issue #92709, Code Style: documentation at declaration or definition
63 changed files with 723 additions and 359 deletions

View File

@ -75,9 +75,21 @@ typedef enum eDRWSelectStage {
typedef bool (*DRW_SelectPassFn)(eDRWSelectStage stage, void *user_data);
typedef bool (*DRW_ObjectFilterFn)(struct Object *ob, void *user_data);
/**
* Everything starts here.
* This function takes care of calling all cache and rendering functions
* for each relevant engine / mode engine.
*/
void DRW_draw_view(const struct bContext *C);
/**
* Draw render engine info.
*/
void DRW_draw_region_engine_info(int xoffset, int *yoffset, int line_height);
/**
* Used for both regular and off-screen drawing.
* Need to reset DST before calling this function
*/
void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
struct RenderEngineType *engine_type,
struct ARegion *region,
@ -88,6 +100,9 @@ void DRW_draw_render_loop(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
struct GPUViewport *viewport);
/**
* \param viewport: can be NULL, in this case we create one.
*/
void DRW_draw_render_loop_offscreen(struct Depsgraph *depsgraph,
struct RenderEngineType *engine_type,
struct ARegion *region,
@ -101,6 +116,9 @@ void DRW_draw_render_loop_2d_ex(struct Depsgraph *depsgraph,
struct ARegion *region,
struct GPUViewport *viewport,
const struct bContext *evil_C);
/**
* object mode select-loop, see: #ED_view3d_draw_select_loop (legacy drawing).
*/
void DRW_draw_select_loop(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -113,14 +131,23 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
void *select_pass_user_data,
DRW_ObjectFilterFn object_filter_fn,
void *object_filter_user_data);
/**
* object mode select-loop, see: #ED_view3d_draw_depth_loop (legacy drawing).
*/
void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
struct GPUViewport *viewport);
/**
* Converted from #ED_view3d_draw_depth_gpencil (legacy drawing).
*/
void DRW_draw_depth_loop_gpencil(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
struct GPUViewport *viewport);
/**
* Clears the Depth Buffer and draws only the specified object.
*/
void DRW_draw_depth_object(struct Scene *scene,
struct ARegion *region,
struct View3D *v3d,
@ -131,11 +158,17 @@ void DRW_draw_select_id(struct Depsgraph *depsgraph,
struct View3D *v3d,
const struct rcti *rect);
/* grease pencil render */
/* Grease pencil render. */
/**
* Helper to check if exit object type to render.
*/
bool DRW_render_check_grease_pencil(struct Depsgraph *depsgraph);
void DRW_render_gpencil(struct RenderEngine *engine, struct Depsgraph *depsgraph);
/* This is here because GPUViewport needs it */
/**
* This is here because #GPUViewport needs it.
*/
struct DRWInstanceDataList *DRW_instance_data_list_create(void);
void DRW_instance_data_list_free(struct DRWInstanceDataList *idatalist);
void DRW_uniform_attrs_pool_free(struct GHash *table);
@ -165,11 +198,21 @@ void DRW_opengl_context_disable_ex(bool restore);
void DRW_opengl_render_context_enable(void *re_gl_context);
void DRW_opengl_render_context_disable(void *re_gl_context);
/**
* Needs to be called AFTER #DRW_opengl_render_context_enable().
*/
void DRW_gpu_render_context_enable(void *re_gpu_context);
/**
* Needs to be called BEFORE #DRW_opengl_render_context_disable().
*/
void DRW_gpu_render_context_disable(void *re_gpu_context);
void DRW_deferred_shader_remove(struct GPUMaterial *mat);
/**
* Get DrawData from the given ID-block. In order for this to work, we assume that
* the DrawData pointer is stored in the struct in the same fashion as in #IdDdtTemplate.
*/
struct DrawDataList *DRW_drawdatalist_from_id(struct ID *id);
void DRW_drawdata_free(struct ID *id);
@ -179,7 +222,10 @@ void DRW_viewport_data_free(struct DRWData *drw_data);
bool DRW_opengl_context_release(void);
void DRW_opengl_context_activate(bool drw_state);
/* We may want to move this into a more general location. */
/**
* We may want to move this into a more general location.
* \note This doesn't require the draw context to be in use.
*/
void DRW_draw_cursor_2d_ex(const struct ARegion *region, const float cursor[2]);
#ifdef __cplusplus

View File

@ -81,6 +81,7 @@ typedef struct SELECTID_Context {
} SELECTID_Context;
/* draw_select_buffer.c */
bool DRW_select_buffer_elem_get(const uint sel_id,
uint *r_elem,
uint *r_base_index,
@ -88,22 +89,41 @@ bool DRW_select_buffer_elem_get(const uint sel_id,
uint DRW_select_buffer_context_offset_for_object_elem(struct Depsgraph *depsgraph,
struct Object *object,
char elem_type);
/**
* Main function to read a block of pixels from the select frame buffer.
*/
uint *DRW_select_buffer_read(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
const rcti *rect,
uint *r_buf_len);
/**
* \param rect: The rectangle to sample indices from (min/max inclusive).
* \returns a #BLI_bitmap the length of \a bitmap_len or NULL on failure.
*/
uint *DRW_select_buffer_bitmap_from_rect(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
const struct rcti *rect,
uint *r_bitmap_len);
/**
* \param center: Circle center.
* \param radius: Circle radius.
* \param r_bitmap_len: Number of indices in the selection id buffer.
* \returns a #BLI_bitmap the length of \a r_bitmap_len or NULL on failure.
*/
uint *DRW_select_buffer_bitmap_from_circle(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
const int center[2],
const int radius,
uint *r_bitmap_len);
/**
* \param poly: The polygon coordinates.
* \param poly_len: Length of the polygon.
* \param rect: Polygon boundaries.
* \returns a #BLI_bitmap.
*/
uint *DRW_select_buffer_bitmap_from_poly(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -111,10 +131,18 @@ uint *DRW_select_buffer_bitmap_from_poly(struct Depsgraph *depsgraph,
const int poly_len,
const struct rcti *rect,
uint *r_bitmap_len);
/**
* Samples a single pixel.
*/
uint DRW_select_buffer_sample_point(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
const int center[2]);
/**
* Find the selection id closest to \a center.
* \param dist: Use to initialize the distance,
* when found, this value is set to the distance of the selection that's returned.
*/
uint DRW_select_buffer_find_nearest_to_point(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,

View File

@ -435,9 +435,6 @@ void EEVEE_cryptomatte_output_accumulate(EEVEE_ViewLayerData *UNUSED(sldata), EE
/** \name Update Render Passes
* \{ */
/* Register the render passes needed for cryptomatte
* normally this is done in `EEVEE_render_update_passes`, but it has been placed here to keep
* related code side-by-side for clarity. */
void EEVEE_cryptomatte_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
{
char cryptomatte_pass_name[MAX_NAME];

View File

@ -411,9 +411,6 @@ static void downsample_radiance_cb(void *vedata, int level)
DRW_draw_pass(psl->color_downsample_ps);
}
/**
* Simple down-sampling algorithm. Reconstruct mip chain up to mip level.
*/
void EEVEE_effects_downsample_radiance_buffer(EEVEE_Data *vedata, GPUTexture *texture_src)
{
EEVEE_PassList *psl = vedata->psl;
@ -430,9 +427,6 @@ void EEVEE_effects_downsample_radiance_buffer(EEVEE_Data *vedata, GPUTexture *te
DRW_stats_group_end();
}
/**
* Simple down-sampling algorithm for cube-map. Reconstruct mip chain up to mip level.
*/
void EEVEE_downsample_cube_buffer(EEVEE_Data *vedata, GPUTexture *texture_src, int level)
{
EEVEE_FramebufferList *fbl = vedata->fbl;

View File

@ -806,7 +806,6 @@ wmJob *EEVEE_lightbake_job_create(struct wmWindowManager *wm,
return wm_job;
}
/* MUST run on the main thread. */
void *EEVEE_lightbake_job_data_alloc(struct Main *bmain,
struct ViewLayer *view_layer,
struct Scene *scene,
@ -1484,8 +1483,6 @@ void EEVEE_lightbake_job(void *custom_data, short *stop, short *do_update, float
EEVEE_volumes_free_smoke_textures();
}
/* This is to update the world irradiance and reflection contribution from
* within the viewport drawing (does not have the overhead of a full light cache rebuild.) */
void EEVEE_lightbake_update_world_quick(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
const Scene *scene)

View File

@ -33,7 +33,9 @@ struct Scene;
struct SceneEEVEE;
struct ViewLayer;
/* Light Bake */
/**
* Light Bake.
*/
struct wmJob *EEVEE_lightbake_job_create(struct wmWindowManager *wm,
struct wmWindow *win,
struct Main *bmain,
@ -41,6 +43,9 @@ struct wmJob *EEVEE_lightbake_job_create(struct wmWindowManager *wm,
struct Scene *scene,
int delay,
int frame);
/**
* MUST run on the main thread.
*/
void *EEVEE_lightbake_job_data_alloc(struct Main *bmain,
struct ViewLayer *view_layer,
struct Scene *scene,
@ -50,11 +55,17 @@ void EEVEE_lightbake_job_data_free(void *custom_data);
void EEVEE_lightbake_update(void *custom_data);
void EEVEE_lightbake_job(void *custom_data, short *stop, short *do_update, float *progress);
/**
* This is to update the world irradiance and reflection contribution from
* within the viewport drawing (does not have the overhead of a full light cache rebuild.)
*/
void EEVEE_lightbake_update_world_quick(struct EEVEE_ViewLayerData *sldata,
struct EEVEE_Data *vedata,
const Scene *scene);
/* Light Cache */
/**
* Light Cache.
*/
struct LightCache *EEVEE_lightcache_create(const int grid_len,
const int cube_len,
const int cube_size,

View File

@ -202,7 +202,6 @@ void EEVEE_lightprobes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
}
}
/* Only init the passes useful for rendering the light cache. */
void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
GPUTexture *rt_color,
@ -871,7 +870,6 @@ static void lightbake_render_scene_face(int face, EEVEE_BakeRenderData *user_dat
DRW_draw_pass(psl->transparent_pass);
}
/* Render the scene to the probe_rt texture. */
void EEVEE_lightbake_render_scene(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUFrameBuffer *face_fb[6],
@ -975,7 +973,6 @@ static void eevee_lightbake_render_scene_to_planars(EEVEE_ViewLayerData *sldata,
/** \name Filtering
* \{ */
/* Glossy filter rt_color to light_cache->cube_tx.tex at index probe_idx */
void EEVEE_lightbake_filter_glossy(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *rt_color,
@ -1064,7 +1061,6 @@ void EEVEE_lightbake_filter_glossy(EEVEE_ViewLayerData *sldata,
}
}
/* Diffuse filter rt_color to light_cache->grid_tx.tex at index grid_offset */
void EEVEE_lightbake_filter_diffuse(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *rt_color,
@ -1117,7 +1113,6 @@ void EEVEE_lightbake_filter_diffuse(EEVEE_ViewLayerData *sldata,
GPU_framebuffer_viewport_reset(fb);
}
/* Filter rt_depth to light_cache->grid_tx.tex at index grid_offset */
void EEVEE_lightbake_filter_visibility(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *UNUSED(rt_depth),

View File

@ -28,7 +28,6 @@
#include "eevee_private.h"
/* Reconstruct local obmat from EEVEE_light. (normalized) */
void eevee_light_matrix_get(const EEVEE_Light *evli, float r_mat[4][4])
{
copy_v3_v3(r_mat[0], evli->rightvec);

View File

@ -76,9 +76,6 @@ struct GPUTexture *EEVEE_materials_get_util_tex(void)
return e_data.util_tex;
}
/**
* ssr_id can be null to disable ssr contribution.
*/
void EEVEE_material_bind_resources(DRWShadingGroup *shgrp,
GPUMaterial *gpumat,
EEVEE_ViewLayerData *sldata,

View File

@ -1073,6 +1073,7 @@ typedef struct EEVEE_PrivateData {
} EEVEE_PrivateData; /* Transient data */
/* eevee_data.c */
void EEVEE_motion_blur_data_init(EEVEE_MotionBlurData *mb);
void EEVEE_motion_blur_data_free(EEVEE_MotionBlurData *mb);
void EEVEE_view_layer_data_free(void *storage);
@ -1097,6 +1098,7 @@ EEVEE_WorldEngineData *EEVEE_world_data_ensure(World *wo);
void eevee_id_update(void *vedata, ID *id);
/* eevee_materials.c */
struct GPUTexture *EEVEE_materials_get_util_tex(void); /* XXX */
void EEVEE_materials_init(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
@ -1121,6 +1123,9 @@ void EEVEE_update_noise(EEVEE_PassList *psl, EEVEE_FramebufferList *fbl, const d
void EEVEE_material_renderpasses_init(EEVEE_Data *vedata);
void EEVEE_material_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, uint tot_samples);
void EEVEE_material_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/**
* \param ssr_id: Can be null to disable SSR contribution.
*/
void EEVEE_material_bind_resources(DRWShadingGroup *shgrp,
struct GPUMaterial *gpumat,
EEVEE_ViewLayerData *sldata,
@ -1130,20 +1135,34 @@ void EEVEE_material_bind_resources(DRWShadingGroup *shgrp,
bool use_ssrefraction,
bool use_alpha_blend);
/* eevee_lights.c */
/**
* Reconstruct local `obmat` from EEVEE_light. (normalized).
*/
void eevee_light_matrix_get(const EEVEE_Light *evli, float r_mat[4][4]);
void EEVEE_lights_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_lights_cache_add(EEVEE_ViewLayerData *sldata, struct Object *ob);
void EEVEE_lights_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/* eevee_shadows.c */
void eevee_contact_shadow_setup(const Light *la, EEVEE_Shadow *evsh);
void EEVEE_shadows_init(EEVEE_ViewLayerData *sldata);
void EEVEE_shadows_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/**
* Make that object update shadow casting lights inside its influence bounding box.
*/
void EEVEE_shadows_caster_register(EEVEE_ViewLayerData *sldata, struct Object *ob);
void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_shadows_cube_add(EEVEE_LightsInfo *linfo, EEVEE_Light *evli, struct Object *ob);
/**
* Return true if sample has changed and light needs to be updated.
*/
bool EEVEE_shadows_cube_setup(EEVEE_LightsInfo *linfo, const EEVEE_Light *evli, int sample_ofs);
void EEVEE_shadows_cascade_add(EEVEE_LightsInfo *linfo, EEVEE_Light *evli, struct Object *ob);
/**
* This refresh lights shadow buffers.
*/
void EEVEE_shadows_draw(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, struct DRWView *view);
void EEVEE_shadows_draw_cubemap(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, int cube_index);
void EEVEE_shadows_draw_cascades(EEVEE_ViewLayerData *sldata,
@ -1154,6 +1173,13 @@ void EEVEE_shadow_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, u
void EEVEE_shadow_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/* eevee_sampling.c */
/**
* Special ball distribution:
* Point are distributed in a way that when they are orthogonally
* projected into any plane, the resulting distribution is (close to)
* a uniform disc distribution.
*/
void EEVEE_sample_ball(int sample_ofs, float radius, float rsample[3]);
void EEVEE_sample_rectangle(int sample_ofs,
const float x_axis[3],
@ -1170,6 +1196,7 @@ void EEVEE_sample_ellipse(int sample_ofs,
void EEVEE_random_rotation_m4(int sample_ofs, float scale, float r_mat[4][4]);
/* eevee_shaders.c */
void EEVEE_shaders_material_shaders_init(void);
struct DRWShaderLibrary *EEVEE_shader_lib_get(void);
struct GPUShader *EEVEE_shaders_bloom_blit_get(bool high_quality);
@ -1236,18 +1263,28 @@ struct GPUShader *EEVEE_shaders_probe_planar_display_sh_get(void);
struct GPUShader *EEVEE_shaders_update_noise_sh_get(void);
struct GPUShader *EEVEE_shaders_velocity_resolve_sh_get(void);
struct GPUShader *EEVEE_shaders_taa_resolve_sh_get(EEVEE_EffectsFlag enabled_effects);
/**
* Configure a default node-tree with the given material.
*/
struct bNodeTree *EEVEE_shader_default_surface_nodetree(Material *ma);
/**
* Configure a default node-tree with the given world.
*/
struct bNodeTree *EEVEE_shader_default_world_nodetree(World *wo);
Material *EEVEE_material_default_diffuse_get(void);
Material *EEVEE_material_default_glossy_get(void);
Material *EEVEE_material_default_error_get(void);
World *EEVEE_world_default_get(void);
/**
* \note Compilation is not deferred.
*/
struct GPUMaterial *EEVEE_material_default_get(struct Scene *scene, Material *ma, int options);
struct GPUMaterial *EEVEE_material_get(
EEVEE_Data *vedata, struct Scene *scene, Material *ma, World *wo, int options);
void EEVEE_shaders_free(void);
/* eevee_lightprobes.c */
bool EEVEE_lightprobes_obj_visibility_cb(bool vis_in, void *user_data);
void EEVEE_lightprobes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
@ -1257,6 +1294,9 @@ void EEVEE_lightprobes_refresh(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_lightprobes_refresh_planar(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_lightprobes_free(void);
/**
* Only initialize the passes useful for rendering the light cache.
*/
void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
GPUTexture *rt_color,
@ -1264,12 +1304,18 @@ void EEVEE_lightbake_cache_init(EEVEE_ViewLayerData *sldata,
void EEVEE_lightbake_render_world(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUFrameBuffer *face_fb[6]);
/**
* Render the scene to the `probe_rt` texture.
*/
void EEVEE_lightbake_render_scene(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUFrameBuffer *face_fb[6],
const float pos[3],
float near_clip,
float far_clip);
/**
* Glossy filter `rt_color` to `light_cache->cube_tx.tex` at index `probe_idx`.
*/
void EEVEE_lightbake_filter_glossy(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *rt_color,
@ -1279,12 +1325,18 @@ void EEVEE_lightbake_filter_glossy(EEVEE_ViewLayerData *sldata,
int maxlevel,
float filter_quality,
float firefly_fac);
/**
* Diffuse filter `rt_color` to `light_cache->grid_tx.tex` at index `grid_offset`.
*/
void EEVEE_lightbake_filter_diffuse(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *rt_color,
struct GPUFrameBuffer *fb,
int grid_offset,
float intensity);
/**
* Filter `rt_depth` to `light_cache->grid_tx.tex` at index `grid_offset`.
*/
void EEVEE_lightbake_filter_visibility(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
struct GPUTexture *rt_depth,
@ -1303,6 +1355,7 @@ void EEVEE_lightprobes_planar_data_from_object(Object *ob,
EEVEE_LightProbeVisTest *vis_test);
/* eevee_depth_of_field.c */
int EEVEE_depth_of_field_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, Object *camera);
void EEVEE_depth_of_field_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_depth_of_field_draw(EEVEE_Data *vedata);
@ -1314,6 +1367,7 @@ int EEVEE_depth_of_field_sample_count_get(EEVEE_EffectsInfo *effects,
int *r_ring_count);
/* eevee_bloom.c */
int EEVEE_bloom_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_bloom_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_bloom_draw(EEVEE_Data *vedata);
@ -1321,6 +1375,7 @@ void EEVEE_bloom_output_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, ui
void EEVEE_bloom_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/* eevee_cryptomatte.c */
void EEVEE_cryptomatte_renderpasses_init(EEVEE_Data *vedata);
void EEVEE_cryptomatte_output_init(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
@ -1334,6 +1389,11 @@ void EEVEE_cryptomatte_object_hair_cache_populate(EEVEE_Data *vedata,
EEVEE_ViewLayerData *sldata,
Object *ob);
void EEVEE_cryptomatte_output_accumulate(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/**
* Register the render passes needed for cryptomatte
* normally this is done in `EEVEE_render_update_passes`, but it has been placed here to keep
* related code side-by-side for clarity.
*/
void EEVEE_cryptomatte_update_passes(struct RenderEngine *engine,
struct Scene *scene,
struct ViewLayer *view_layer);
@ -1409,6 +1469,17 @@ void EEVEE_renderpasses_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ve
void EEVEE_renderpasses_output_accumulate(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
bool post_effect);
/**
* Post-process data to construct a specific render-pass
*
* This method will create a shading group to perform the post-processing for the given
* `renderpass_type`. The post-processing will be done and the result will be stored in the
* `vedata->txl->renderpass` texture.
*
* Only invoke this function for passes that need post-processing.
*
* After invoking this function the active frame-buffer is set to `vedata->fbl->renderpass_fb`.
*/
void EEVEE_renderpasses_postprocess(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
eViewLayerEEVEEPassType renderpass_type,
@ -1416,6 +1487,10 @@ void EEVEE_renderpasses_postprocess(EEVEE_ViewLayerData *sldata,
void EEVEE_renderpasses_draw(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_renderpasses_draw_debug(EEVEE_Data *vedata);
bool EEVEE_renderpasses_only_first_sample_pass_active(EEVEE_Data *vedata);
/**
* Calculate the hash for an AOV. The least significant bit is used to store the AOV
* type the rest of the bits are used for the name hash.
*/
int EEVEE_renderpasses_aov_hash(const ViewLayerAOV *aov);
/* eevee_temporal_sampling.c */
@ -1427,11 +1502,16 @@ void EEVEE_temporal_sampling_offset_calc(const double ht_point[2],
const float filter_size,
float r_offset[2]);
void EEVEE_temporal_sampling_matrices_calc(EEVEE_EffectsInfo *effects, const double ht_point[2]);
/**
* Update the matrices based on the current sample.
* \note `DRW_MAT_PERS` and `DRW_MAT_VIEW` needs to read the original matrices.
*/
void EEVEE_temporal_sampling_update_matrices(EEVEE_Data *vedata);
void EEVEE_temporal_sampling_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_temporal_sampling_draw(EEVEE_Data *vedata);
/* eevee_volumes.c */
void EEVEE_volumes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_volumes_set_jitter(EEVEE_ViewLayerData *sldata, uint current_sample);
void EEVEE_volumes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
@ -1449,18 +1529,29 @@ void EEVEE_volumes_free_smoke_textures(void);
void EEVEE_volumes_free(void);
/* eevee_effects.c */
void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
EEVEE_Data *vedata,
Object *camera,
const bool minimal);
void EEVEE_effects_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
void EEVEE_effects_draw_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/**
* Simple down-sampling algorithm. Reconstruct mip chain up to mip level.
*/
void EEVEE_effects_downsample_radiance_buffer(EEVEE_Data *vedata, struct GPUTexture *texture_src);
void EEVEE_create_minmax_buffer(EEVEE_Data *vedata, struct GPUTexture *depth_src, int layer);
/**
* Simple down-sampling algorithm for cube-map. Reconstruct mip chain up to mip level.
*/
void EEVEE_downsample_cube_buffer(EEVEE_Data *vedata, struct GPUTexture *texture_src, int level);
void EEVEE_draw_effects(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/* eevee_render.c */
/**
* Return true if initialized properly.
*/
bool EEVEE_render_init(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct Depsgraph *depsgraph);
@ -1471,6 +1562,9 @@ void EEVEE_render_modules_init(EEVEE_Data *vedata,
struct RenderEngine *engine,
struct Depsgraph *depsgraph);
void EEVEE_render_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata);
/**
* Used by light cache. in this case engine is NULL.
*/
void EEVEE_render_cache(void *vedata,
struct Object *ob,
struct RenderEngine *engine,

View File

@ -46,7 +46,6 @@
#include "eevee_private.h"
/* Return true if init properly. */
bool EEVEE_render_init(EEVEE_Data *ved, RenderEngine *engine, struct Depsgraph *depsgraph)
{
EEVEE_Data *vedata = (EEVEE_Data *)ved;
@ -194,7 +193,6 @@ void EEVEE_render_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
EEVEE_cryptomatte_cache_init(sldata, vedata);
}
/* Used by light cache. in this case engine is NULL. */
void EEVEE_render_cache(void *vedata,
struct Object *ob,
struct RenderEngine *engine,

View File

@ -75,8 +75,6 @@ bool EEVEE_renderpasses_only_first_sample_pass_active(EEVEE_Data *vedata)
return (g_data->render_passes & ~EEVEE_RENDERPASSES_POST_PROCESS_ON_FIRST_SAMPLE) == 0;
}
/* Calculate the hash for an AOV. The least significant bit is used to store the AOV
* type the rest of the bits are used for the name hash. */
int EEVEE_renderpasses_aov_hash(const ViewLayerAOV *aov)
{
int hash = BLI_hash_string(aov->name) << 1;
@ -257,15 +255,6 @@ void EEVEE_renderpasses_cache_finish(EEVEE_ViewLayerData *sldata, EEVEE_Data *ve
}
}
/* Post-process data to construct a specific render-pass
*
* This method will create a shading group to perform the post-processing for the given
* `renderpass_type`. The post-processing will be done and the result will be stored in the
* `vedata->txl->renderpass` texture.
*
* Only invoke this function for passes that need post-processing.
*
* After invoking this function the active frame-buffer is set to `vedata->fbl->renderpass_fb`. */
void EEVEE_renderpasses_postprocess(EEVEE_ViewLayerData *UNUSED(sldata),
EEVEE_Data *vedata,
eViewLayerEEVEEPassType renderpass_type,

View File

@ -24,12 +24,6 @@
#include "BLI_rand.h"
/**
* Special ball distribution:
* Point are distributed in a way that when they are orthogonally
* projected into any plane, the resulting distribution is (close to)
* a uniform disc distribution.
*/
void EEVEE_sample_ball(int sample_ofs, float radius, float rsample[3])
{
double ht_point[3];

View File

@ -1274,7 +1274,6 @@ Material *EEVEE_material_default_error_get(void)
return e_data.error_mat;
}
/* Configure a default nodetree with the given material. */
struct bNodeTree *EEVEE_shader_default_surface_nodetree(Material *ma)
{
/* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
@ -1302,7 +1301,6 @@ struct bNodeTree *EEVEE_shader_default_surface_nodetree(Material *ma)
return e_data.surface.ntree;
}
/* Configure a default nodetree with the given world. */
struct bNodeTree *EEVEE_shader_default_world_nodetree(World *wo)
{
/* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
@ -1493,7 +1491,6 @@ static struct GPUMaterial *eevee_material_get_ex(
return mat;
}
/* NOTE: Compilation is not deferred. */
struct GPUMaterial *EEVEE_material_default_get(struct Scene *scene, Material *ma, int options)
{
Material *def_ma = (ma && (options & VAR_MAT_VOLUME)) ? BKE_material_default_volume() :

View File

@ -123,7 +123,6 @@ void EEVEE_shadows_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
}
}
/* Make that object update shadow casting lights inside its influence bounding box. */
void EEVEE_shadows_caster_register(EEVEE_ViewLayerData *sldata, Object *ob)
{
EEVEE_LightsInfo *linfo = sldata->lights;
@ -300,7 +299,6 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
}
}
/* this refresh lights shadow buffers */
void EEVEE_shadows_draw(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata, DRWView *view)
{
EEVEE_LightsInfo *linfo = sldata->lights;

View File

@ -95,7 +95,6 @@ start:
add_v3_v3(ws_sample_pos, jitter);
}
/* Return true if sample has changed and light needs to be updated. */
bool EEVEE_shadows_cube_setup(EEVEE_LightsInfo *linfo, const EEVEE_Light *evli, int sample_ofs)
{
EEVEE_Shadow *shdw_data = linfo->shadow_data + (int)evli->shadow_id;

View File

@ -196,8 +196,6 @@ void EEVEE_temporal_sampling_matrices_calc(EEVEE_EffectsInfo *effects, const dou
DRW_view_update_sub(effects->taa_view, viewmat, winmat);
}
/* Update the matrices based on the current sample.
* NOTE: `DRW_MAT_PERS` and `DRW_MAT_VIEW` needs to read the original matrices. */
void EEVEE_temporal_sampling_update_matrices(EEVEE_Data *vedata)
{
EEVEE_StorageList *stl = ((EEVEE_Data *)vedata)->stl;

View File

@ -176,11 +176,6 @@ static MaterialGPencilStyle *gpencil_viewport_material_overrides(
return gp_style;
}
/**
* Creates a linked list of material pool containing all materials assigned for a given object.
* We merge the material pools together if object does not contain a huge amount of materials.
* Also return an offset to the first material of the object in the ubo.
*/
GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Object *ob, int *ofs)
{
GPENCIL_MaterialPool *matpool = pd->last_material_pool;
@ -429,9 +424,6 @@ void gpencil_light_pool_populate(GPENCIL_LightPool *lightpool, Object *ob)
}
}
/**
* Creates a single pool containing all lights assigned (light linked) for a given object.
*/
GPENCIL_LightPool *gpencil_light_pool_create(GPENCIL_PrivateData *pd, Object *UNUSED(ob))
{
GPENCIL_LightPool *lightpool = pd->last_light_pool;

View File

@ -392,6 +392,11 @@ GPENCIL_tLayer *gpencil_layer_cache_add(GPENCIL_PrivateData *pd,
GPENCIL_tObject *tgp_ob);
GPENCIL_tLayer *gpencil_layer_cache_get(GPENCIL_tObject *tgp_ob, int number);
/**
* Creates a linked list of material pool containing all materials assigned for a given object.
* We merge the material pools together if object does not contain a huge amount of materials.
* Also return an offset to the first material of the object in the UBO.
*/
GPENCIL_MaterialPool *gpencil_material_pool_create(GPENCIL_PrivateData *pd, Object *ob, int *ofs);
void gpencil_material_resources_get(GPENCIL_MaterialPool *first_pool,
int mat_id,
@ -402,6 +407,9 @@ void gpencil_material_resources_get(GPENCIL_MaterialPool *first_pool,
void gpencil_light_ambient_add(GPENCIL_LightPool *lightpool, const float color[3]);
void gpencil_light_pool_populate(GPENCIL_LightPool *lightpool, Object *ob);
GPENCIL_LightPool *gpencil_light_pool_add(GPENCIL_PrivateData *pd);
/**
* Creates a single pool containing all lights assigned (light linked) for a given object.
*/
GPENCIL_LightPool *gpencil_light_pool_create(GPENCIL_PrivateData *pd, Object *ob);
/* effects */
@ -436,6 +444,10 @@ void GPENCIL_cache_finish(void *vedata);
void GPENCIL_draw_scene(void *vedata);
/* render */
/**
* Initialize render data.
*/
void GPENCIL_render_init(struct GPENCIL_Data *ved,
struct RenderEngine *engine,
struct RenderLayer *render_layer,

View File

@ -33,7 +33,6 @@
#include "gpencil_engine.h"
/* init render data */
void GPENCIL_render_init(GPENCIL_Data *vedata,
RenderEngine *engine,
struct RenderLayer *render_layer,

View File

@ -104,9 +104,6 @@ typedef struct ArmatureDrawContext {
const ThemeWireColor *bcolor; /* pchan color */
} ArmatureDrawContext;
/**
* Return true if armature should be handled by the pose mode engine.
*/
bool OVERLAY_armature_is_pose_mode(Object *ob, const DRWContextState *draw_ctx)
{
Object *active_ob = draw_ctx->obact;

View File

@ -462,8 +462,6 @@ void OVERLAY_image_cache_finish(OVERLAY_Data *vedata)
DRW_pass_sort_shgroup_z(psl->image_empties_back_ps);
}
/* This function draws images that needs the view transform applied.
* It draws these images directly into the scene color buffer. */
void OVERLAY_image_scene_background_draw(OVERLAY_Data *vedata)
{
OVERLAY_PassList *psl = vedata->psl;

View File

@ -510,6 +510,9 @@ void OVERLAY_xray_fade_draw(OVERLAY_Data *vedata);
void OVERLAY_xray_depth_copy(OVERLAY_Data *vedata);
void OVERLAY_xray_depth_infront_copy(OVERLAY_Data *vedata);
/**
* Return true if armature should be handled by the pose mode engine.
*/
bool OVERLAY_armature_is_pose_mode(Object *ob, const struct DRWContextState *draw_ctx);
void OVERLAY_armature_cache_init(OVERLAY_Data *vedata);
void OVERLAY_armature_cache_populate(OVERLAY_Data *vedata, Object *ob);
@ -631,6 +634,10 @@ void OVERLAY_image_empty_cache_populate(OVERLAY_Data *vedata, Object *ob);
void OVERLAY_image_cache_finish(OVERLAY_Data *vedata);
void OVERLAY_image_draw(OVERLAY_Data *vedata);
void OVERLAY_image_background_draw(OVERLAY_Data *vedata);
/**
* This function draws images that needs the view transform applied.
* It draws these images directly into the scene color buffer.
*/
void OVERLAY_image_scene_background_draw(OVERLAY_Data *vedata);
void OVERLAY_image_in_front_draw(OVERLAY_Data *vedata);

View File

@ -371,7 +371,6 @@ void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata)
}
}
/* Return true if render is not cached. */
bool workbench_antialiasing_setup(WORKBENCH_Data *vedata)
{
WORKBENCH_PrivateData *wpd = vedata->stl->wpd;

View File

@ -473,8 +473,6 @@ void workbench_cache_finish(void *ved)
}
}
/* Used by viewport rendering & final rendering.
* Do one render loop iteration (i.e: One TAA sample). */
void workbench_draw_sample(void *ved)
{
WORKBENCH_Data *vedata = ved;

View File

@ -244,7 +244,6 @@ DRWShadingGroup *workbench_material_setup_ex(WORKBENCH_PrivateData *wpd,
}
}
/* If ima is null, search appropriate image node but will fallback to purple texture otherwise. */
DRWShadingGroup *workbench_image_setup_ex(WORKBENCH_PrivateData *wpd,
Object *ob,
int mat_nr,

View File

@ -414,6 +414,10 @@ void workbench_opaque_cache_init(WORKBENCH_Data *data);
/* workbench_transparent.c */
void workbench_transparent_engine_init(WORKBENCH_Data *data);
void workbench_transparent_cache_init(WORKBENCH_Data *data);
/**
* Redraw the transparent passes but with depth test
* to output correct outline IDs and depth.
*/
void workbench_transparent_draw_depth_pass(WORKBENCH_Data *data);
/* workbench_shadow.c */
@ -463,6 +467,9 @@ int workbench_antialiasing_sample_count_get(WORKBENCH_PrivateData *wpd);
void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata);
void workbench_antialiasing_cache_init(WORKBENCH_Data *vedata);
void workbench_antialiasing_view_updated(WORKBENCH_Data *vedata);
/**
* Return true if render is not cached.
*/
bool workbench_antialiasing_setup(WORKBENCH_Data *vedata);
void workbench_antialiasing_draw_pass(WORKBENCH_Data *vedata);
@ -491,6 +498,9 @@ DRWShadingGroup *workbench_material_setup_ex(WORKBENCH_PrivateData *wpd,
eV3DShadingColorType color_type,
eWORKBENCH_DataType datatype,
bool *r_transp);
/**
* If `ima` is null, search appropriate image node but will fallback to purple texture otherwise.
*/
DRWShadingGroup *workbench_image_setup_ex(WORKBENCH_PrivateData *wpd,
Object *ob,
int mat_nr,
@ -535,6 +545,10 @@ void workbench_engine_init(void *ved);
void workbench_cache_init(void *ved);
void workbench_cache_populate(void *ved, Object *ob);
void workbench_cache_finish(void *ved);
/**
* Used by viewport rendering & final rendering.
* Do one render loop iteration (i.e: One TAA sample).
*/
void workbench_draw_sample(void *ved);
void workbench_draw_finish(void *ved);

View File

@ -149,8 +149,6 @@ void workbench_transparent_cache_init(WORKBENCH_Data *vedata)
}
}
/* Redraw the transparent passes but with depth test
* to output correct outline IDs and depth. */
void workbench_transparent_draw_depth_pass(WORKBENCH_Data *data)
{
WORKBENCH_PrivateData *wpd = data->stl->wpd;

View File

@ -147,9 +147,11 @@ typedef enum {
DRW_TEX_MIPMAP = (1 << 3),
} DRWTextureFlag;
/* Textures from DRW_texture_pool_query_* have the options
* DRW_TEX_FILTER for color float textures, and no options
* for depth textures and integer textures. */
/**
* Textures from `DRW_texture_pool_query_*` have the options
* #DRW_TEX_FILTER for color float textures, and no options
* for depth textures and integer textures.
*/
struct GPUTexture *DRW_texture_pool_query_2d(int w,
int h,
eGPUTextureFormat format,
@ -288,11 +290,17 @@ void DRW_shader_free(struct GPUShader *shader);
DRWShaderLibrary *DRW_shader_library_create(void);
/* Warning: Each library must be added after all its dependencies. */
/**
* \warning Each library must be added after all its dependencies.
*/
void DRW_shader_library_add_file(DRWShaderLibrary *lib, char *lib_code, const char *lib_name);
#define DRW_SHADER_LIB_ADD(lib, lib_name) \
DRW_shader_library_add_file(lib, datatoc_##lib_name##_glsl, STRINGIFY(lib_name) ".glsl")
/**
* \return an allocN'ed string containing the shader code with its dependencies prepended.
* Caller must free the string with #MEM_freeN after use.
*/
char *DRW_shader_library_create_shader_string(const DRWShaderLibrary *lib,
const char *shader_code);
@ -306,11 +314,14 @@ void DRW_shader_library_free(DRWShaderLibrary *lib);
} while (0)
/* Batches */
/* DRWState is a bitmask that stores the current render state and the desired render state. Based
/**
* DRWState is a bit-mask that stores the current render state and the desired render state. Based
* on the differences the minimum state changes can be invoked to setup the desired render state.
*
* The Write Stencil, Stencil test, Depth test and Blend state options are mutual exclusive
* therefore they aren't ordered as a bit mask. */
* therefore they aren't ordered as a bit mask.
*/
typedef enum {
/** Write mask */
DRW_STATE_WRITE_DEPTH = (1 << 0),
@ -408,7 +419,9 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material);
/* return final visibility */
/**
* Return final visibility.
*/
typedef bool(DRWCallVisibilityFn)(bool vis_in, void *user_data);
void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
@ -418,11 +431,15 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
bool bypass_culling,
void *user_data);
/* If ob is NULL, unit modelmatrix is assumed and culling is bypassed. */
/**
* If ob is NULL, unit modelmatrix is assumed and culling is bypassed.
*/
#define DRW_shgroup_call(shgroup, geom, ob) \
DRW_shgroup_call_ex(shgroup, ob, NULL, geom, false, NULL)
/* Same as DRW_shgroup_call but override the obmat. Not culled. */
/**
* Same as #DRW_shgroup_call but override the `obmat`. Not culled.
*/
#define DRW_shgroup_call_obmat(shgroup, geom, obmat) \
DRW_shgroup_call_ex(shgroup, NULL, obmat, geom, false, NULL)
@ -431,12 +448,17 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
#define DRW_shgroup_call_with_callback(shgroup, geom, ob, user_data) \
DRW_shgroup_call_ex(shgroup, ob, NULL, geom, false, user_data)
/* Same as DRW_shgroup_call but bypass culling even if ob is not NULL. */
/**
* Same as #DRW_shgroup_call but bypass culling even if ob is not NULL.
*/
#define DRW_shgroup_call_no_cull(shgroup, geom, ob) \
DRW_shgroup_call_ex(shgroup, ob, NULL, geom, true, NULL)
void DRW_shgroup_call_range(
DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint v_sta, uint v_ct);
/**
* A count of 0 instance will use the default number of instance in the batch.
*/
void DRW_shgroup_call_instance_range(
DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_ct);
@ -447,12 +469,17 @@ void DRW_shgroup_call_compute(DRWShadingGroup *shgroup,
void DRW_shgroup_call_procedural_points(DRWShadingGroup *sh, Object *ob, uint point_count);
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *sh, Object *ob, uint line_count);
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *sh, Object *ob, uint tri_count);
/* Warning: Only use with Shaders that have IN_PLACE_INSTANCES defined. */
/**
* \warning Only use with Shaders that have `IN_PLACE_INSTANCES` defined.
* TODO: Should be removed.
*/
void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
Object *ob,
struct GPUBatch *geom,
uint count);
/* Warning: Only use with Shaders that have INSTANCED_ATTR defined. */
/**
* \warning Only use with Shaders that have INSTANCED_ATTR defined.
*/
void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
Object *ob,
struct GPUBatch *geom,
@ -477,13 +504,20 @@ void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint
DRW_buffer_add_entry_array(buffer, array, (sizeof(array) / sizeof(*array))); \
} while (0)
/* Can only be called during iter phase. */
/**
* Can only be called during iteration phase.
*/
uint32_t DRW_object_resource_id_get(Object *ob);
/**
* State is added to #Pass.state while drawing.
* Use to temporarily enable draw options.
*/
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state);
/* Reminders:
/**
* Reminders:
* - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
* stencil_value being the value stored in the stencil buffer.
* - (write-mask & reference) is what gets written if the test condition is fulfilled.
@ -492,10 +526,14 @@ void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup,
uint write_mask,
uint reference,
uint compare_mask);
/* TODO: remove this function. Obsolete version. mask is actually reference value. */
/**
* TODO: remove this function. Obsolete version. mask is actually reference value.
*/
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask);
/* Issue a clear command. */
/**
* Issue a clear command.
*/
void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup,
eGPUFrameBufferBits channels,
uchar r,
@ -541,7 +579,6 @@ void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup,
const char *name,
const float *value,
int arraysize);
/* Boolean are expected to be 4bytes longs for opengl! */
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
const char *name,
const int *value,
@ -564,10 +601,14 @@ void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup,
int arraysize);
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3]);
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4]);
/* Only to be used when image load store is supported (GPU_shader_image_load_store_support()). */
/**
* Only to be used when image load store is supported (#GPU_shader_image_load_store_support()).
*/
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex);
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex);
/* Store value instead of referencing it. */
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value);
void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value);
void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value);
@ -587,14 +628,29 @@ void DRW_shgroup_vertex_buffer(DRWShadingGroup *shgroup,
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup);
/* Passes */
/* Passes. */
DRWPass *DRW_pass_create(const char *name, DRWState state);
/**
* Create an instance of the original pass that will execute the same drawcalls but with its own
* #DRWState.
*/
DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state);
/**
* Link two passes so that they are both rendered if the first one is being drawn.
*/
void DRW_pass_link(DRWPass *first, DRWPass *second);
void DRW_pass_foreach_shgroup(DRWPass *pass,
void (*callback)(void *userData, DRWShadingGroup *shgroup),
void *userData);
/**
* Sort Shading groups by decreasing Z of their first draw call.
* This is useful for order dependent effect such as alpha-blending.
*/
void DRW_pass_sort_shgroup_z(DRWPass *pass);
/**
* Reverse Shading group submission order.
*/
void DRW_pass_sort_shgroup_reverse(DRWPass *pass);
bool DRW_pass_is_empty(DRWPass *pass);
@ -603,56 +659,113 @@ bool DRW_pass_is_empty(DRWPass *pass);
#define DRW_PASS_INSTANCE_CREATE(pass, original, state) \
(pass = DRW_pass_create_instance(#pass, (original), state))
/* Views */
/* Views. */
/**
* Create a view with culling.
*/
DRWView *DRW_view_create(const float viewmat[4][4],
const float winmat[4][4],
const float (*culling_viewmat)[4],
const float (*culling_winmat)[4],
DRWCallVisibilityFn *visibility_fn);
/**
* Create a view with culling done by another view.
*/
DRWView *DRW_view_create_sub(const DRWView *parent_view,
const float viewmat[4][4],
const float winmat[4][4]);
/**
* Update matrices of a view created with #DRW_view_create.
*/
void DRW_view_update(DRWView *view,
const float viewmat[4][4],
const float winmat[4][4],
const float (*culling_viewmat)[4],
const float (*culling_winmat)[4]);
/**
* Update matrices of a view created with #DRW_view_create_sub.
*/
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4]);
/**
* \return default view if it is a viewport render.
*/
const DRWView *DRW_view_default_get(void);
/**
* MUST only be called once per render and only in render mode. Sets default view.
*/
void DRW_view_default_set(DRWView *view);
/**
* \warning Only use in render AND only if you are going to set view_default again.
*/
void DRW_view_reset(void);
/**
* Set active view for rendering.
*/
void DRW_view_set_active(DRWView *view);
const DRWView *DRW_view_get_active(void);
/**
* This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
* and if the shaders have support for it (see usage of gl_ClipDistance).
* \note planes must be in world space.
*/
void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len);
void DRW_view_camtexco_set(DRWView *view, float texco[4]);
/* For all getters, if view is NULL, default view is assumed. */
void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse);
void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse);
void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse);
/**
* \return world space frustum corners.
*/
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners);
/**
* \return world space frustum sides as planes.
* See #draw_frustum_culling_planes_calc() for the plane order.
*/
void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4]);
/* These are in view-space, so negative if in perspective.
* Extract near and far clip distance from the projection matrix. */
/**
* These are in view-space, so negative if in perspective.
* Extract near and far clip distance from the projection matrix.
*/
float DRW_view_near_distance_get(const DRWView *view);
float DRW_view_far_distance_get(const DRWView *view);
bool DRW_view_is_persp_get(const DRWView *view);
/* Culling, return true if object is inside view frustum. */
/**
* \return True if the given BoundSphere intersect the current view frustum.
* bsphere must be in world space.
*/
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere);
/**
* \return True if the given BoundBox intersect the current view frustum.
* bbox must be in world space.
*/
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox);
/**
* \return True if the view frustum is inside or intersect the given plane.
* plane must be in world space.
*/
bool DRW_culling_plane_test(const DRWView *view, const float plane[4]);
/**
* Return True if the given box intersect the current view frustum.
* This function will have to be replaced when world space bb per objects is implemented.
*/
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3]);
void DRW_culling_frustum_corners_get(const DRWView *view, BoundBox *corners);
void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4]);
/* Viewport */
/* Viewport. */
const float *DRW_viewport_size_get(void);
const float *DRW_viewport_invert_size_get(void);
@ -672,18 +785,35 @@ void DRW_render_object_iter(void *vedata,
struct Object *ob,
struct RenderEngine *engine,
struct Depsgraph *depsgraph));
/**
* Must run after all instance datas have been added.
*/
void DRW_render_instance_buffer_finish(void);
/**
* \warning Changing frame might free the #ViewLayerEngineData.
*/
void DRW_render_set_time(struct RenderEngine *engine,
struct Depsgraph *depsgraph,
int frame,
float subframe);
/**
* \warning only use for custom pipeline. 99% of the time, you don't want to use this.
*/
void DRW_render_viewport_size_set(const int size[2]);
/**
* Assume a valid GL context is bound (and that the gl_context_mutex has been acquired).
* This function only setup DST and execute the given function.
* \warning similar to DRW_render_to_image you cannot use default lists (dfbl & dtxl).
*/
void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
struct Depsgraph *depsgraph,
void (*callback)(void *vedata, void *user_data),
void *user_data);
/**
* Used when the render engine want to redo another cache populate inside the same render frame.
*/
void DRW_cache_restart(void);
/* ViewLayers */
@ -701,11 +831,26 @@ DrawData *DRW_drawdata_ensure(ID *id,
size_t size,
DrawDataInitCb init_cb,
DrawDataFreeCb free_cb);
/**
* Return NULL if not a dupli or a pointer of pointer to the engine data.
*/
void **DRW_duplidata_get(void *vedata);
/* Settings */
/* Settings. */
bool DRW_object_is_renderable(const struct Object *ob);
/**
* Does `ob` needs to be rendered in edit mode.
*
* When using duplicate linked meshes, objects that are not in edit-mode will be drawn as
* it is in edit mode, when another object with the same mesh is in edit mode.
* This will not be the case when one of the objects are influenced by modifiers.
*/
bool DRW_object_is_in_edit_mode(const struct Object *ob);
/**
* Return whether this object is visible depending if
* we are rendering or drawing in the viewport.
*/
int DRW_object_visibility_in_active_context(const struct Object *ob);
bool DRW_object_is_flat_normal(const struct Object *ob);
bool DRW_object_use_hide_faces(const struct Object *ob);
@ -717,31 +862,76 @@ struct Object *DRW_object_get_dupli_parent(const struct Object *ob);
struct DupliObject *DRW_object_get_dupli(const struct Object *ob);
/* Draw commands */
void DRW_draw_pass(DRWPass *pass);
/**
* Draw only a subset of shgroups. Used in special situations as grease pencil strokes.
*/
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group);
void DRW_draw_callbacks_pre_scene(void);
void DRW_draw_callbacks_post_scene(void);
/**
* Reset state to not interfere with other UI draw-call.
*/
void DRW_state_reset_ex(DRWState state);
void DRW_state_reset(void);
/**
* Use with care, intended so selection code can override passes depth settings,
* which is important for selection to work properly.
*
* Should be set in main draw loop, cleared afterwards
*/
void DRW_state_lock(DRWState state);
/* Selection */
/* Selection. */
void DRW_select_load_id(uint id);
/* Draw State */
/* Draw State. */
/**
* When false, drawing doesn't output to a pixel buffer
* eg: Occlusion queries, or when we have setup a context to draw in already.
*/
bool DRW_state_is_fbo(void);
/**
* For when engines need to know if this is drawing for selection or not.
*/
bool DRW_state_is_select(void);
bool DRW_state_is_material_select(void);
bool DRW_state_is_depth(void);
/**
* Whether we are rendering for an image
*/
bool DRW_state_is_image_render(void);
/**
* Whether we are rendering only the render engine,
* or if we should also render the mode engines.
*/
bool DRW_state_is_scene_render(void);
/**
* Whether we are rendering simple opengl render
*/
bool DRW_state_is_opengl_render(void);
bool DRW_state_is_playback(void);
/**
* Is the user navigating the region.
*/
bool DRW_state_is_navigating(void);
/**
* Should text draw in this mode?
*/
bool DRW_state_show_text(void);
/**
* Should draw support elements
* Objects center, selection outline, probe data, ...
*/
bool DRW_state_draw_support(void);
/**
* Whether we should render the background
*/
bool DRW_state_draw_background(void);
/* Avoid too many lookups while drawing */

View File

@ -354,7 +354,6 @@ static GPUVertBuf *sphere_wire_vbo(const float rad, int flag)
}
/* Quads */
/* Use this one for rendering fullscreen passes. For 3D objects use DRW_cache_quad_get(). */
GPUBatch *DRW_cache_fullscreen_quad_get(void)
{
if (!SHC.drw_fullscreen_quad) {
@ -388,7 +387,6 @@ GPUBatch *DRW_cache_fullscreen_quad_get(void)
return SHC.drw_fullscreen_quad;
}
/* Just a regular quad with 4 vertices. */
GPUBatch *DRW_cache_quad_get(void)
{
if (!SHC.drw_quad) {
@ -409,7 +407,6 @@ GPUBatch *DRW_cache_quad_get(void)
return SHC.drw_quad;
}
/* Just a regular quad with 4 vertices - wires. */
GPUBatch *DRW_cache_quad_wires_get(void)
{
if (!SHC.drw_quad_wires) {
@ -430,7 +427,6 @@ GPUBatch *DRW_cache_quad_wires_get(void)
return SHC.drw_quad_wires;
}
/* Grid */
GPUBatch *DRW_cache_grid_get(void)
{
if (!SHC.drw_grid) {
@ -911,7 +907,6 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
}
}
/* Returns the vertbuf used by shaded surface batch. */
GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
{
Mesh *me = BKE_object_get_evaluated_mesh(ob);
@ -1260,7 +1255,6 @@ GPUBatch *DRW_cache_empty_capsule_cap_get(void)
#undef NSEGMENTS
}
/* Force Field */
GPUBatch *DRW_cache_field_wind_get(void)
{
#define CIRCLE_RESOL 32
@ -1336,7 +1330,6 @@ GPUBatch *DRW_cache_field_vortex_get(void)
#undef SPIRAL_RESOL
}
/* Screen-aligned circle. */
GPUBatch *DRW_cache_field_curve_get(void)
{
#define CIRCLE_RESOL 32
@ -1425,7 +1418,6 @@ GPUBatch *DRW_cache_field_cone_limit_get(void)
#undef CIRCLE_RESOL
}
/* Screen-aligned dashed circle */
GPUBatch *DRW_cache_field_sphere_limit_get(void)
{
#define CIRCLE_RESOL 32
@ -2872,7 +2864,6 @@ GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
return DRW_mesh_batch_cache_get_surface_edges(ob->data);
}
/* Return list of batches with length equal to max(1, totcol). */
GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len)
@ -2881,7 +2872,6 @@ GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
return DRW_mesh_batch_cache_get_surface_shaded(ob->data, gpumat_array, gpumat_array_len);
}
/* Return list of batches with length equal to max(1, totcol). */
GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
@ -3078,7 +3068,6 @@ GPUBatch *DRW_cache_surf_loose_edges_get(Object *ob)
return NULL;
}
/* Return list of batches */
GPUBatch **DRW_cache_surf_surface_shaded_get(Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len)
@ -3275,7 +3264,6 @@ GPUBatch *DRW_cache_particles_get_prim(int type)
return NULL;
}
/* 3D cursor */
GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
{
GPUBatch **drw_cursor = crosshair_lines ? &SHC.drw_cursor : &SHC.drw_cursor_only_circle;
@ -3450,9 +3438,10 @@ void drw_batch_cache_generate_requested(Object *ob)
}
}
/* Note: Logic here is duplicated from #drw_batch_cache_generate_requested. */
void drw_batch_cache_generate_requested_evaluated_mesh(Object *ob)
{
/* NOTE: Logic here is duplicated from #drw_batch_cache_generate_requested. */
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene = draw_ctx->scene;
const enum eContextObjectMode mode = CTX_data_mode_enum_ex(

View File

@ -36,7 +36,9 @@ struct Volume;
struct VolumeGrid;
struct bGPDstroke;
/* Shape resolution level of detail */
/**
* Shape resolution level of detail.
*/
typedef enum eDRWLevelOfDetail {
DRW_LOD_LOW = 0,
DRW_LOD_MEDIUM = 1,
@ -52,9 +54,15 @@ struct GPUBatch *DRW_cache_cursor_get(bool crosshair_lines);
/* Common Shapes */
struct GPUBatch *DRW_cache_groundline_get(void);
/* Grid */
struct GPUBatch *DRW_cache_grid_get(void);
/**
* Use this one for rendering full-screen passes. For 3D objects use #DRW_cache_quad_get().
*/
struct GPUBatch *DRW_cache_fullscreen_quad_get(void);
/* Just a regular quad with 4 vertices. */
struct GPUBatch *DRW_cache_quad_get(void);
/* Just a regular quad with 4 vertices - wires. */
struct GPUBatch *DRW_cache_quad_wires_get(void);
struct GPUBatch *DRW_cache_cube_get(void);
struct GPUBatch *DRW_cache_normal_arrow_get(void);
@ -62,9 +70,11 @@ struct GPUBatch *DRW_cache_normal_arrow_get(void);
struct GPUBatch *DRW_cache_sphere_get(const eDRWLevelOfDetail level_of_detail);
/* Dummy VBOs */
struct GPUBatch *DRW_gpencil_dummy_buffer_get(void);
/* Common Object */
struct GPUBatch *DRW_cache_object_all_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_object_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct GPUBatch *DRW_cache_object_surface_get(struct Object *ob);
@ -75,6 +85,9 @@ struct GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
struct GPUBatch *DRW_cache_object_face_wireframe_get(struct Object *ob);
int DRW_cache_object_material_count_get(struct Object *ob);
/**
* Returns the vertbuf used by shaded surface batch.
*/
struct GPUVertBuf *DRW_cache_object_pos_vertbuf_get(struct Object *ob);
/* Empties */
@ -89,15 +102,23 @@ struct GPUBatch *DRW_cache_empty_capsule_cap_get(void);
struct GPUBatch *DRW_cache_empty_capsule_body_get(void);
/* Force Field */
struct GPUBatch *DRW_cache_field_wind_get(void);
struct GPUBatch *DRW_cache_field_force_get(void);
struct GPUBatch *DRW_cache_field_vortex_get(void);
/* Screen-aligned circle. */
struct GPUBatch *DRW_cache_field_curve_get(void);
struct GPUBatch *DRW_cache_field_tube_limit_get(void);
struct GPUBatch *DRW_cache_field_cone_limit_get(void);
/* Screen-aligned dashed circle */
struct GPUBatch *DRW_cache_field_sphere_limit_get(void);
/* Lights */
struct GPUBatch *DRW_cache_light_point_lines_get(void);
struct GPUBatch *DRW_cache_light_sun_lines_get(void);
struct GPUBatch *DRW_cache_light_spot_lines_get(void);
@ -106,6 +127,7 @@ struct GPUBatch *DRW_cache_light_area_square_lines_get(void);
struct GPUBatch *DRW_cache_light_spot_volume_get(void);
/* Camera */
struct GPUBatch *DRW_cache_camera_frame_get(void);
struct GPUBatch *DRW_cache_camera_volume_get(void);
struct GPUBatch *DRW_cache_camera_volume_wire_get(void);
@ -114,14 +136,17 @@ struct GPUBatch *DRW_cache_camera_tria_get(void);
struct GPUBatch *DRW_cache_camera_distances_get(void);
/* Speaker */
struct GPUBatch *DRW_cache_speaker_get(void);
/* Probe */
struct GPUBatch *DRW_cache_lightprobe_cube_get(void);
struct GPUBatch *DRW_cache_lightprobe_grid_get(void);
struct GPUBatch *DRW_cache_lightprobe_planar_get(void);
/* Bones */
struct GPUBatch *DRW_cache_bone_octahedral_get(void);
struct GPUBatch *DRW_cache_bone_octahedral_wire_get(void);
struct GPUBatch *DRW_cache_bone_box_get(void);
@ -136,15 +161,22 @@ struct GPUBatch *DRW_cache_bone_dof_sphere_get(void);
struct GPUBatch *DRW_cache_bone_dof_lines_get(void);
/* Meshes */
struct GPUBatch *DRW_cache_mesh_all_verts_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_all_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_loose_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct GPUBatch *DRW_cache_mesh_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_edges_get(struct Object *ob);
/**
* Return list of batches with length equal to `max(1, totcol)`.
*/
struct GPUBatch **DRW_cache_mesh_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
/**
* Return list of batches with length equal to `max(1, totcol)`.
*/
struct GPUBatch **DRW_cache_mesh_surface_texpaint_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_vertpaint_get(struct Object *ob);
@ -154,19 +186,27 @@ struct GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_face_wireframe_get(struct Object *ob);
/* Curve */
struct GPUBatch *DRW_cache_curve_edge_wire_get(struct Object *ob);
/* edit-mode */
struct GPUBatch *DRW_cache_curve_edge_normal_get(struct Object *ob);
struct GPUBatch *DRW_cache_curve_edge_overlay_get(struct Object *ob);
struct GPUBatch *DRW_cache_curve_vert_overlay_get(struct Object *ob);
/* Font */
struct GPUBatch *DRW_cache_text_edge_wire_get(struct Object *ob);
/* Surface */
struct GPUBatch *DRW_cache_surf_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_edge_wire_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_loose_edges_get(struct Object *ob);
/* Return list of batches */
struct GPUBatch **DRW_cache_surf_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
@ -174,11 +214,13 @@ struct GPUBatch *DRW_cache_surf_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* Lattice */
struct GPUBatch *DRW_cache_lattice_verts_get(struct Object *ob);
struct GPUBatch *DRW_cache_lattice_wire_get(struct Object *ob, bool use_weight);
struct GPUBatch *DRW_cache_lattice_vert_overlay_get(struct Object *ob);
/* Particles */
struct GPUBatch *DRW_cache_particles_get_hair(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md);
@ -196,6 +238,7 @@ struct GPUBatch *DRW_cache_particles_get_edit_tip_points(struct Object *object,
struct GPUBatch *DRW_cache_particles_get_prim(int type);
/* Metaball */
struct GPUBatch *DRW_cache_mball_surface_get(struct Object *ob);
struct GPUBatch **DRW_cache_mball_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
@ -204,6 +247,7 @@ struct GPUBatch *DRW_cache_mball_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_mball_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* Hair */
struct GPUBatch *DRW_cache_hair_surface_get(struct Object *ob);
struct GPUBatch **DRW_cache_hair_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
@ -212,10 +256,12 @@ struct GPUBatch *DRW_cache_hair_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_hair_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* PointCloud */
struct GPUBatch *DRW_cache_pointcloud_get_dots(struct Object *obj);
struct GPUBatch *DRW_cache_pointcloud_surface_get(struct Object *obj);
/* Volume */
typedef struct DRWVolumeGrid {
struct DRWVolumeGrid *next, *prev;
@ -240,6 +286,7 @@ struct GPUBatch *DRW_cache_volume_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_volume_selection_surface_get(struct Object *ob);
/* GPencil */
struct GPUBatch *DRW_cache_gpencil_strokes_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_fills_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_edit_lines_get(struct Object *ob, int cfra);
@ -252,6 +299,9 @@ struct GPUBatch *DRW_cache_gpencil_sbuffer_fill_get(struct Object *ob);
struct GPUBatch *DRW_cache_gpencil_face_wireframe_get(struct Object *ob);
struct bGPDstroke *DRW_cache_gpencil_sbuffer_stroke_data_get(struct Object *ob);
/**
* Sbuffer batches are temporary. We need to clear it after drawing.
*/
void DRW_cache_gpencil_sbuffer_clear(struct Object *ob);
#ifdef __cplusplus

View File

@ -335,9 +335,6 @@ static int *mesh_render_data_mat_tri_len_build(MeshRenderData *mr)
/** \name Mesh/BMesh Interface (indirect, partially cached access to complex data).
* \{ */
/**
* Part of the creation of the #MeshRenderData that happens in a thread.
*/
void mesh_render_data_update_looptris(MeshRenderData *mr,
const eMRIterType iter_type,
const eMRDataType data_flag)
@ -440,10 +437,6 @@ void mesh_render_data_update_normals(MeshRenderData *mr, const eMRDataType data_
}
}
/**
* \param is_mode_active: When true, use the modifiers from the edit-data,
* otherwise don't use modifiers as they are not from this object.
*/
MeshRenderData *mesh_render_data_create(Mesh *me,
const bool is_editmode,
const bool is_paint_mode,

View File

@ -47,7 +47,10 @@ struct bGPdata;
extern "C" {
#endif
/* Expose via BKE callbacks */
/* -------------------------------------------------------------------- */
/** \name Expose via BKE callbacks
* \{ */
void DRW_mball_batch_cache_dirty_tag(struct MetaBall *mb, int mode);
void DRW_mball_batch_cache_validate(struct MetaBall *mb);
void DRW_mball_batch_cache_free(struct MetaBall *mb);
@ -82,15 +85,34 @@ void DRW_volume_batch_cache_dirty_tag(struct Volume *volume, int mode);
void DRW_volume_batch_cache_validate(struct Volume *volume);
void DRW_volume_batch_cache_free(struct Volume *volume);
/* Garbage collection */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Garbage Collection
* \{ */
void DRW_batch_cache_free_old(struct Object *ob, int ctime);
/**
* Thread safety need to be assured by caller. Don't call this during drawing.
* \note For now this only free the shading batches / VBO if any cd layers is not needed anymore.
*/
void DRW_mesh_batch_cache_free_old(struct Mesh *me, int ctime);
/* Generic */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generic
* \{ */
void DRW_vertbuf_create_wiredata(struct GPUVertBuf *vbo, const int vert_len);
/* Curve */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Curve
* \{ */
void DRW_curve_batch_cache_create_requested(struct Object *ob, const struct Scene *scene);
int DRW_curve_material_count_get(struct Curve *cu);
@ -107,7 +129,12 @@ struct GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
uint gpumat_array_len);
struct GPUBatch *DRW_curve_batch_cache_get_wireframes_face(struct Curve *cu);
/* Metaball */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Metaball
* \{ */
int DRW_metaball_material_count_get(struct MetaBall *mb);
struct GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(struct Object *ob);
@ -119,7 +146,12 @@ struct GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(struct Object *ob)
struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
bool *r_is_manifold);
/* DispList */
/** \} */
/* -------------------------------------------------------------------- */
/** \name DispList
* \{ */
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb,
struct GPUVertBuf *vbo,
const struct Scene *scene);
@ -138,17 +170,32 @@ void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb,
struct GPUIndexBuf *ibo,
bool *r_is_manifold);
/* Lattice */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Lattice
* \{ */
struct GPUBatch *DRW_lattice_batch_cache_get_all_edges(struct Lattice *lt,
bool use_weight,
const int actdef);
struct GPUBatch *DRW_lattice_batch_cache_get_all_verts(struct Lattice *lt);
struct GPUBatch *DRW_lattice_batch_cache_get_edit_verts(struct Lattice *lt);
/* Hair */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Hair
* \{ */
int DRW_hair_material_count_get(struct Hair *hair);
/* PointCloud */
/** \} */
/* -------------------------------------------------------------------- */
/** \name PointCloud
* \{ */
int DRW_pointcloud_material_count_get(struct PointCloud *pointcloud);
struct GPUBatch *DRW_pointcloud_batch_cache_get_dots(struct Object *ob);
@ -157,13 +204,26 @@ struct GPUBatch **DRW_cache_pointcloud_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
/* Volume */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Volume
* \{ */
int DRW_volume_material_count_get(struct Volume *volume);
struct GPUBatch *DRW_volume_batch_cache_get_wireframes_face(struct Volume *volume);
struct GPUBatch *DRW_volume_batch_cache_get_selection_surface(struct Volume *volume);
/* Mesh */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Mesh
* \{ */
/**
* Can be called for any surface type. Mesh *me is the final mesh.
*/
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
struct Object *ob,
struct Mesh *me,
@ -186,7 +246,13 @@ struct GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_weights(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(struct Mesh *me);
/* edit-mesh drawing */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Edit-Mesh Drawing
* \{ */
struct GPUBatch *DRW_mesh_batch_cache_get_edit_triangles(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_vertices(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_edges(struct Mesh *me);
@ -194,14 +260,39 @@ struct GPUBatch *DRW_mesh_batch_cache_get_edit_vnors(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_lnors(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_facedots(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_skin_roots(struct Mesh *me);
/* edit-mesh selection */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Edit-mesh Selection
* \{ */
struct GPUBatch *DRW_mesh_batch_cache_get_triangles_with_select_id(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_facedots_with_select_id(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edges_with_select_id(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(struct Mesh *me);
/* Object mode Wireframe overlays */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Object Mode Wireframe Overlays
* \{ */
struct GPUBatch *DRW_mesh_batch_cache_get_wireframes_face(struct Mesh *me);
/* edit-mesh UV editor */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Edit-mesh UV Editor
* \{ */
/**
* Creates the #GPUBatch for drawing the UV Stretching Area Overlay.
* Optional retrieves the total area or total uv area of the mesh.
*
* The `cache->tot_area` and cache->tot_uv_area` update are calculation are
* only valid after calling `DRW_mesh_batch_cache_create_requested`.
*/
struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(struct Mesh *me,
float **tot_area,
float **tot_uv_area);
@ -210,11 +301,22 @@ struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(struct Mesh *me);
/* For Image UV editor. */
/** \} */
/* -------------------------------------------------------------------- */
/** \name For Image UV Editor
* \{ */
struct GPUBatch *DRW_mesh_batch_cache_get_uv_edges(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(struct Mesh *me);
/* For direct data access. */
/** \} */
/* -------------------------------------------------------------------- */
/** \name For Direct Data Access
* \{ */
struct GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(struct Mesh *me);
struct GPUVertBuf *DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu);
struct GPUVertBuf *DRW_mball_batch_cache_pos_vertbuf_get(struct Object *ob);
@ -250,7 +352,12 @@ enum {
/* Beware to not go over 1 << 7 (it's a byte flag). */
};
/* Particles */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Particles
* \{ */
struct GPUBatch *DRW_particles_batch_cache_get_hair(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md);
@ -266,6 +373,9 @@ struct GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(struct Object *
struct GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(struct Object *object,
struct ParticleSystem *psys,
struct PTCacheEdit *edit);
/** \} */
#ifdef __cplusplus
}
#endif

View File

@ -671,7 +671,6 @@ GPUBatch *DRW_cache_gpencil_sbuffer_fill_get(Object *ob)
return gpd->runtime.sbuffer_fill_batch;
}
/* Sbuffer batches are temporary. We need to clear it after drawing */
void DRW_cache_gpencil_sbuffer_clear(Object *ob)
{
bGPdata *gpd = (bGPdata *)ob->data;

View File

@ -333,7 +333,6 @@ static void hair_batch_cache_ensure_procedural_indices(Hair *hair,
prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
}
/* Ensure all textures and buffers needed for GPU accelerated drawing. */
bool hair_ensure_procedural_data(Object *object,
ParticleHairCache **r_hair_cache,
GPUMaterial *gpu_material,

View File

@ -1372,11 +1372,6 @@ static void edituv_request_active_uv(MeshBatchCache *cache, Mesh *me)
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
/* Creates the GPUBatch for drawing the UV Stretching Area Overlay.
* Optional retrieves the total area or total uv area of the mesh.
*
* The `cache->tot_area` and cache->tot_uv_area` update are calculation are
* only valid after calling `DRW_mesh_batch_cache_create_requested`. */
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh *me,
float **tot_area,
float **tot_uv_area)
@ -1456,9 +1451,6 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
/** \name Grouped batch generation
* \{ */
/* Thread safety need to be assured by caller. Don't call this during drawing.
* NOTE: For now this only free the shading batches / vbo if any cd layers is
* not needed anymore. */
void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
{
MeshBatchCache *cache = me->runtime.batch_cache;
@ -1526,7 +1518,6 @@ static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, M
}
#endif
/* Can be called for any surface type. Mesh *me is the final mesh. */
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
Object *ob,
Mesh *me,

View File

@ -1674,7 +1674,6 @@ GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(Object *object,
return cache->edit_tip_points;
}
/* Ensure all textures and buffers needed for GPU accelerated drawing. */
bool particles_ensure_procedural_data(Object *object,
ParticleSystem *psys,
ModifierData *md,

View File

@ -175,7 +175,6 @@ void DRW_viewport_colormanagement_set(GPUViewport *viewport)
blender::draw::color_management::viewport_color_management_set(*viewport);
}
/* Draw texture to framebuffer without any color transforms */
void DRW_transform_none(GPUTexture *tex)
{
drw_state_set(DRW_STATE_WRITE_COLOR);

View File

@ -28,6 +28,9 @@ extern "C" {
struct GPUViewport;
/**
* Draw texture to frame-buffer without any color transforms.
*/
void DRW_transform_none(struct GPUTexture *tex);
void DRW_viewport_colormanagement_set(struct GPUViewport *viewport);

View File

@ -41,7 +41,9 @@
#define UI_COLOR_RGBA_FROM_U8(r, g, b, a, v4) \
ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, (float)a / 255.0f)
/* Colors & Constant */
/**
* Colors & Constant.
*/
struct DRW_Global G_draw = {{{0}}};
static bool weight_ramp_custom = false;
@ -287,10 +289,6 @@ DRWView *DRW_view_create_with_zoffset(const DRWView *parent_view,
/* ******************************************** COLOR UTILS ************************************ */
/* TODO: FINISH. */
/**
* Get the wire color theme_id of an object based on its state
* \a r_color is a way to get a pointer to the static color var associated
*/
int DRW_object_wire_theme_get(Object *ob, ViewLayer *view_layer, float **r_color)
{
const DRWContextState *draw_ctx = DRW_context_state_get();

View File

@ -161,6 +161,10 @@ struct DRWView *DRW_view_create_with_zoffset(const struct DRWView *parent_view,
const struct RegionView3D *rv3d,
float offset);
/**
* Get the wire color theme_id of an object based on its state
* \a r_color is a way to get a pointer to the static color var associated
*/
int DRW_object_wire_theme_get(struct Object *ob, struct ViewLayer *view_layer, float **r_color);
float *DRW_color_background_blend_get(int theme_id);
@ -169,13 +173,18 @@ bool DRW_object_axis_orthogonal_to_view(struct Object *ob, int axis);
/* draw_hair.c */
/* This creates a shading group with display hairs.
* The draw call is already added by this function, just add additional uniforms. */
/**
* This creates a shading group with display hairs.
* The draw call is already added by this function, just add additional uniforms.
*/
struct DRWShadingGroup *DRW_shgroup_hair_create_sub(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md,
struct DRWShadingGroup *shgrp,
struct GPUMaterial *gpu_material);
/**
* \note Only valid after #DRW_hair_update().
*/
struct GPUVertBuf *DRW_hair_pos_buffer_get(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md);
@ -201,6 +210,7 @@ void DRW_smoke_free(struct FluidModifierData *fmd);
void DRW_smoke_free_velocity(struct FluidModifierData *fmd);
/* draw_common.c */
struct DRW_Global {
/** If needed, contains all global/Theme colors
* Add needed theme colors / values to DRW_globals_update() and update UBO

View File

@ -68,7 +68,6 @@ void DRW_debug_polygon_v3(const float (*v)[3], const int vert_len, const float c
}
}
/* NOTE: g_modelmat is still applied on top. */
void DRW_debug_m4(const float m[4][4])
{
float v0[3] = {0.0f, 0.0f, 0.0f};

View File

@ -29,6 +29,9 @@ void DRW_debug_modelmat(const float modelmat[4][4]);
void DRW_debug_line_v3v3(const float v1[3], const float v2[3], const float color[4]);
void DRW_debug_polygon_v3(const float (*v)[3], const int vert_len, const float color[4]);
/**
* \note g_modelmat is still applied on top.
*/
void DRW_debug_m4(const float m[4][4]);
void DRW_debug_m4_as_bbox(const float m[4][4], const float color[4], const bool invert);
void DRW_debug_bbox(const BoundBox *bbox, const float color[4]);

View File

@ -589,9 +589,10 @@ void DRW_fluid_ensure_range_field(FluidModifierData *fmd)
#endif /* WITH_FLUID */
}
/* TODO: Unify with the other #GPU_free_smoke. */
void DRW_smoke_free_velocity(FluidModifierData *fmd)
{
/* TODO: Unify with the other #GPU_free_smoke. */
if (fmd->type & MOD_FLUID_TYPE_DOMAIN && fmd->domain) {
if (fmd->domain->tex_velocity_x) {
GPU_texture_free(fmd->domain->tex_velocity_x);

View File

@ -203,7 +203,6 @@ static ParticleHairCache *drw_hair_particle_cache_get(Object *object,
return cache;
}
/* NOTE: Only valid after DRW_hair_update(). */
GPUVertBuf *DRW_hair_pos_buffer_get(Object *object, ParticleSystem *psys, ModifierData *md)
{
const DRWContextState *draw_ctx = DRW_context_state_get();

View File

@ -93,6 +93,9 @@ typedef struct ParticleHairCache {
void particle_batch_cache_clear_hair(struct ParticleHairCache *hair_cache);
/**
* Ensure all textures and buffers needed for GPU accelerated drawing.
*/
bool particles_ensure_procedural_data(struct Object *object,
struct ParticleSystem *psys,
struct ModifierData *md,
@ -101,6 +104,9 @@ bool particles_ensure_procedural_data(struct Object *object,
int subdiv,
int thickness_res);
/**
* Ensure all textures and buffers needed for GPU accelerated drawing.
*/
bool hair_ensure_procedural_data(struct Object *object,
struct ParticleHairCache **r_hair_cache,
struct GPUMaterial *gpu_material,

View File

@ -112,14 +112,6 @@ static void instancing_batch_references_remove(GPUBatch *batch)
/** \name Instance Buffer Management
* \{ */
/**
* This manager allows to distribute existing batches for instancing
* attributes. This reduce the number of batches creation.
* Querying a batch is done with a vertex format. This format should
* be static so that its pointer never changes (because we are using
* this pointer as identifier [we don't want to check the full format
* that would be too slow]).
*/
GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
GPUVertFormat *format,
int *vert_len)
@ -143,8 +135,6 @@ GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
return handle->buf;
}
/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
* Initialization is delayed because instancer or geom could still not be initialized. */
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUBatch *instancer,
@ -185,7 +175,6 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
return batch;
}
/* NOTE: Use only with buf allocated via DRW_temp_buffer_request. */
GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUPrimType prim_type)
@ -301,9 +290,6 @@ static void DRW_instance_data_free(DRWInstanceData *idata)
BLI_mempool_destroy(idata->mempool);
}
/**
* Return a pointer to the next instance data space.
*/
void *DRW_instance_data_next(DRWInstanceData *idata)
{
return BLI_mempool_alloc(idata->mempool);
@ -453,7 +439,6 @@ static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
buffer->chunk_bytes = item_size * chunk_size;
}
/** Allocate a chunked UBO with the specified item and chunk size. */
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsigned int chunk_size)
{
DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
@ -461,7 +446,6 @@ DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsig
return buf;
}
/** Flush data from ordinary memory to UBOs. */
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
{
for (int i = 0; i < buffer->num_chunks; i++) {
@ -474,7 +458,6 @@ void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
}
}
/** Clean all buffers and free unused ones. */
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
{
int max_used_chunk = 0;
@ -517,14 +500,12 @@ void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
BLI_bitmap_set_all(buffer->chunk_used, false, buffer->num_chunks);
}
/** Frees the buffer. */
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer)
{
DRW_sparse_uniform_buffer_clear(buffer, true);
MEM_freeN(buffer);
}
/** Checks if the buffer contains any allocated chunks. */
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer)
{
return buffer->num_chunks == 0;
@ -538,7 +519,6 @@ static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buf
return NULL;
}
/** Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
{
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
@ -547,7 +527,6 @@ void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int
}
}
/** Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
{
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
@ -556,7 +535,6 @@ void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
}
}
/** Returns a pointer to the given item of the given chunk, allocating memory if necessary. */
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item)
{
if (chunk >= buffer->num_chunks) {

View File

@ -38,21 +38,41 @@ typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
typedef struct DRWSparseUniformBuf DRWSparseUniformBuf;
/**
* Return a pointer to the next instance data space.
*/
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
/**
* This manager allows to distribute existing batches for instancing
* attributes. This reduce the number of batches creation.
* Querying a batch is done with a vertex format. This format should
* be static so that its pointer never changes (because we are using
* this pointer as identifier [we don't want to check the full format
* that would be too slow]).
*/
GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
GPUVertFormat *format,
int *vert_len);
/**
* \note Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
* Initialization is delayed because instancer or geom could still not be initialized.
*/
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUBatch *instancer,
GPUBatch *geom);
/**
* \note Use only with buf allocated via DRW_temp_buffer_request.
*/
GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUPrimType type);
/* Upload all instance data to the GPU as soon as possible. */
/**
* Upload all instance data to the GPU as soon as possible.
*/
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist);
@ -60,17 +80,43 @@ void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist);
/* Sparse chunked UBO manager. */
/**
* Allocate a chunked UBO with the specified item and chunk size.
*/
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size,
unsigned int chunk_size);
/**
* Flush data from ordinary memory to UBOs.
*/
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer);
/**
* Clean all buffers and free unused ones.
*/
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all);
/**
* Frees the buffer.
*/
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer);
/**
* Checks if the buffer contains any allocated chunks.
*/
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer);
/**
* Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty.
*/
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location);
/**
* Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty.
*/
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk);
/**
* Returns a pointer to the given item of the given chunk, allocating memory if necessary.
*/
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item);
/* Uniform attribute UBO management. */
struct GHash *DRW_uniform_attrs_pool_new(void);
void DRW_uniform_attrs_pool_flush_all(struct GHash *table);
void DRW_uniform_attrs_pool_clear_all(struct GHash *table);

View File

@ -204,11 +204,6 @@ bool DRW_object_is_renderable(const Object *ob)
return true;
}
/* Does `ob` needs to be rendered in edit mode.
*
* When using duplicate linked meshes, objects that are not in edit-mode will be drawn as
* it is in edit mode, when another object with the same mesh is in edit mode.
* This will not be the case when one of the objects are influenced by modifiers. */
bool DRW_object_is_in_edit_mode(const Object *ob)
{
if (BKE_object_is_in_editmode(ob)) {
@ -236,10 +231,6 @@ bool DRW_object_is_in_edit_mode(const Object *ob)
return false;
}
/**
* Return whether this object is visible depending if
* we are rendering or drawing in the viewport.
*/
int DRW_object_visibility_in_active_context(const Object *ob)
{
const eEvaluationMode mode = DRW_state_is_scene_render() ? DAG_EVAL_RENDER : DAG_EVAL_VIEWPORT;
@ -322,7 +313,6 @@ struct DupliObject *DRW_object_get_dupli(const Object *UNUSED(ob))
/** \name Viewport (DRW_viewport)
* \{ */
/* WARNING: only use for custom pipeline. 99% of the time, you don't want to use this. */
void DRW_render_viewport_size_set(const int size[2])
{
DST.size[0] = size[0];
@ -779,7 +769,6 @@ static void drw_duplidata_free(void)
}
}
/* Return NULL if not a dupli or a pointer of pointer to the engine data */
void **DRW_duplidata_get(void *vedata)
{
if (DST.dupli_source == NULL) {
@ -875,9 +864,6 @@ static bool id_can_have_drawdata(const ID *id)
return id_type_can_have_drawdata(GS(id->name));
}
/* Get DrawData from the given ID-block. In order for this to work, we assume that
* the DrawData pointer is stored in the struct in the same fashion as in IdDdtTemplate.
*/
DrawDataList *DRW_drawdatalist_from_id(ID *id)
{
/* only some ID-blocks have this info for now, so we cast the
@ -1149,7 +1135,6 @@ static void drw_engines_draw_text(void)
}
}
/* Draw render engine info. */
void DRW_draw_region_engine_info(int xoffset, int *yoffset, int line_height)
{
DRW_ENABLED_ENGINE_ITER (DST.view_data_active, engine, data) {
@ -1605,9 +1590,6 @@ struct DRWTextStore *DRW_text_cache_ensure(void)
/** \name Main Draw Loops (DRW_draw)
* \{ */
/* Everything starts here.
* This function takes care of calling all cache and rendering functions
* for each relevant engine / mode engine. */
void DRW_draw_view(const bContext *C)
{
View3D *v3d = CTX_wm_view3d(C);
@ -1635,10 +1617,6 @@ void DRW_draw_view(const bContext *C)
}
}
/**
* Used for both regular and off-screen drawing.
* Need to reset DST before calling this function
*/
void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
RenderEngineType *engine_type,
ARegion *region,
@ -1785,9 +1763,6 @@ void DRW_draw_render_loop(struct Depsgraph *depsgraph,
DRW_draw_render_loop_ex(depsgraph, engine_type, region, v3d, viewport, NULL);
}
/**
* \param viewport: can be NULL, in this case we create one.
*/
void DRW_draw_render_loop_offscreen(struct Depsgraph *depsgraph,
RenderEngineType *engine_type,
ARegion *region,
@ -1849,7 +1824,6 @@ void DRW_draw_render_loop_offscreen(struct Depsgraph *depsgraph,
}
}
/* Helper to check if exit object type to render. */
bool DRW_render_check_grease_pencil(Depsgraph *depsgraph)
{
if (!drw_gpencil_engine_needed(depsgraph, NULL)) {
@ -2078,9 +2052,6 @@ void DRW_render_object_iter(
drw_task_graph_deinit();
}
/* Assume a valid gl context is bound (and that the gl_context_mutex has been acquired).
* This function only setup DST and execute the given function.
* Warning: similar to DRW_render_to_image you cannot use default lists (dfbl & dtxl). */
void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
struct Depsgraph *depsgraph,
void (*callback)(void *vedata, void *user_data),
@ -2126,8 +2097,6 @@ void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
drw_manager_exit(&DST);
}
/* Used when the render engine want to redo another cache populate inside the same render frame.
*/
void DRW_cache_restart(void)
{
drw_manager_init(&DST, DST.viewport, (int[2]){UNPACK2(DST.size)});
@ -2318,7 +2287,6 @@ static void draw_select_framebuffer_depth_only_setup(const int size[2])
}
}
/* Must run after all instance datas have been added. */
void DRW_render_instance_buffer_finish(void)
{
BLI_assert_msg(!DST.buffer_finish_called, "DRW_render_instance_buffer_finish called twice!");
@ -2327,7 +2295,6 @@ void DRW_render_instance_buffer_finish(void)
drw_resource_buffer_finish(DST.vmempool);
}
/* WARNING: Changing frame might free the ViewLayerEngineData */
void DRW_render_set_time(RenderEngine *engine, Depsgraph *depsgraph, int frame, float subframe)
{
RE_engine_frame_set(engine, frame, subframe);
@ -2335,9 +2302,6 @@ void DRW_render_set_time(RenderEngine *engine, Depsgraph *depsgraph, int frame,
DST.draw_ctx.view_layer = DEG_get_evaluated_view_layer(depsgraph);
}
/**
* object mode select-loop, see: ED_view3d_draw_select_loop (legacy drawing).
*/
void DRW_draw_select_loop(struct Depsgraph *depsgraph,
ARegion *region,
View3D *v3d,
@ -2665,9 +2629,6 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
}
}
/**
* object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
*/
void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
ARegion *region,
View3D *v3d,
@ -2695,9 +2656,6 @@ void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
drw_draw_depth_loop_impl(depsgraph, region, v3d, viewport, false);
}
/**
* Converted from ED_view3d_draw_depth_gpencil (legacy drawing).
*/
void DRW_draw_depth_loop_gpencil(struct Depsgraph *depsgraph,
ARegion *region,
View3D *v3d,
@ -2790,9 +2748,6 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
drw_manager_exit(&DST);
}
/**
* Clears the Depth Buffer and draws only the specified object.
*/
void DRW_draw_depth_object(
Scene *scene, ARegion *region, View3D *v3d, GPUViewport *viewport, Object *object)
{
@ -2873,19 +2828,12 @@ void DRW_draw_depth_object(
/** \name Draw Manager State (DRW_state)
* \{ */
/**
* When false, drawing doesn't output to a pixel buffer
* eg: Occlusion queries, or when we have setup a context to draw in already.
*/
bool DRW_state_is_fbo(void)
{
return ((DST.default_framebuffer != NULL) || DST.options.is_image_render) &&
!DRW_state_is_depth() && !DRW_state_is_select();
}
/**
* For when engines need to know if this is drawing for selection or not.
*/
bool DRW_state_is_select(void)
{
return DST.options.is_select;
@ -2901,27 +2849,17 @@ bool DRW_state_is_depth(void)
return DST.options.is_depth;
}
/**
* Whether we are rendering for an image
*/
bool DRW_state_is_image_render(void)
{
return DST.options.is_image_render;
}
/**
* Whether we are rendering only the render engine,
* or if we should also render the mode engines.
*/
bool DRW_state_is_scene_render(void)
{
BLI_assert(DST.options.is_scene_render ? DST.options.is_image_render : true);
return DST.options.is_scene_render;
}
/**
* Whether we are rendering simple opengl render
*/
bool DRW_state_is_opengl_render(void)
{
return DST.options.is_image_render && !DST.options.is_scene_render;
@ -2936,28 +2874,18 @@ bool DRW_state_is_playback(void)
return false;
}
/**
* Is the user navigating the region.
*/
bool DRW_state_is_navigating(void)
{
const RegionView3D *rv3d = DST.draw_ctx.rv3d;
return (rv3d) && (rv3d->rflag & (RV3D_NAVIGATING | RV3D_PAINTING));
}
/**
* Should text draw in this mode?
*/
bool DRW_state_show_text(void)
{
return (DST.options.is_select) == 0 && (DST.options.is_depth) == 0 &&
(DST.options.is_scene_render) == 0 && (DST.options.draw_text) == 0;
}
/**
* Should draw support elements
* Objects center, selection outline, probe data, ...
*/
bool DRW_state_draw_support(void)
{
View3D *v3d = DST.draw_ctx.v3d;
@ -2965,9 +2893,6 @@ bool DRW_state_draw_support(void)
((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0);
}
/**
* Whether we should render the background
*/
bool DRW_state_draw_background(void)
{
return DST.options.draw_background;
@ -3219,6 +3144,8 @@ void DRW_opengl_context_disable_ex(bool restore)
void DRW_opengl_context_enable(void)
{
/* TODO: should be replace by a more elegant alternative. */
if (G.background && DST.gl_context == NULL) {
WM_init_opengl();
}
@ -3247,7 +3174,6 @@ void DRW_opengl_render_context_disable(void *re_gl_context)
BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}
/* Needs to be called AFTER DRW_opengl_render_context_enable() */
void DRW_gpu_render_context_enable(void *re_gpu_context)
{
/* If thread is main you should use DRW_opengl_context_enable(). */
@ -3256,7 +3182,6 @@ void DRW_gpu_render_context_enable(void *re_gpu_context)
GPU_context_active_set(re_gpu_context);
}
/* Needs to be called BEFORE DRW_opengl_render_context_disable() */
void DRW_gpu_render_context_disable(void *UNUSED(re_gpu_context))
{
GPU_flush();

View File

@ -304,6 +304,7 @@ void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
const int *value,
int arraysize)
{
/* Boolean are expected to be 4bytes longs for OpenGL! */
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
}
@ -381,7 +382,6 @@ void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
}
/* Stores the int instead of a pointer. */
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
{
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
@ -833,7 +833,6 @@ void DRW_shgroup_call_range(
drw_command_draw_range(shgroup, geom, handle, v_sta, v_ct);
}
/* A count of 0 instance will use the default number of instance in the batch. */
void DRW_shgroup_call_instance_range(
DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_ct)
{
@ -888,7 +887,6 @@ void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob,
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3);
}
/* Should be removed */
void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
Object *ob,
struct GPUBatch *geom,
@ -1441,10 +1439,6 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
return shgroup;
}
/**
* State is added to #Pass.state while drawing.
* Use to temporarily enable draw options.
*/
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
{
drw_command_set_mutable_state(shgroup, state, 0x0);
@ -1463,7 +1457,6 @@ void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup,
drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask);
}
/* TODO: remove this function. */
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
{
drw_command_set_stencil_mask(shgroup, 0xFF, mask, 0xFF);
@ -1758,7 +1751,6 @@ static void draw_view_matrix_state_update(DRWViewUboStorage *storage,
storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2];
}
/* Create a view with culling. */
DRWView *DRW_view_create(const float viewmat[4][4],
const float winmat[4][4],
const float (*culling_viewmat)[4],
@ -1785,7 +1777,6 @@ DRWView *DRW_view_create(const float viewmat[4][4],
return view;
}
/* Create a view with culling done by another view. */
DRWView *DRW_view_create_sub(const DRWView *parent_view,
const float viewmat[4][4],
const float winmat[4][4])
@ -1807,13 +1798,10 @@ DRWView *DRW_view_create_sub(const DRWView *parent_view,
return view;
}
/**
* DRWView Update:
/* DRWView Update:
* This is meant to be done on existing views when rendering in a loop and there is no
* need to allocate more DRWViews.
*/
* need to allocate more DRWViews. */
/* Update matrices of a view created with DRW_view_create_sub. */
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
{
BLI_assert(view->parent != NULL);
@ -1824,7 +1812,6 @@ void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float w
draw_view_matrix_state_update(&view->storage, viewmat, winmat);
}
/* Update matrices of a view created with DRW_view_create. */
void DRW_view_update(DRWView *view,
const float viewmat[4][4],
const float winmat[4][4],
@ -1893,13 +1880,11 @@ void DRW_view_update(DRWView *view,
#endif
}
/* Return default view if it is a viewport render. */
const DRWView *DRW_view_default_get(void)
{
return DST.view_default;
}
/* WARNING: Only use in render AND only if you are going to set view_default again. */
void DRW_view_reset(void)
{
DST.view_default = NULL;
@ -1907,18 +1892,12 @@ void DRW_view_reset(void)
DST.view_previous = NULL;
}
/* MUST only be called once per render and only in render mode. Sets default view. */
void DRW_view_default_set(DRWView *view)
{
BLI_assert(DST.view_default == NULL);
DST.view_default = view;
}
/**
* This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
* and if the shaders have support for it (see usage of gl_ClipDistance).
* NOTE: planes must be in world space.
*/
void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
{
BLI_assert(plane_len <= MAX_CLIP_PLANES);
@ -1933,14 +1912,11 @@ void DRW_view_camtexco_set(DRWView *view, float texco[4])
copy_v4_v4(view->storage.viewcamtexcofac, texco);
}
/* Return world space frustum corners. */
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners)
{
memcpy(corners, &view->frustum_corners, sizeof(view->frustum_corners));
}
/* Return world space frustum sides as planes.
* See draw_frustum_culling_planes_calc() for the plane order. */
void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
{
memcpy(planes, &view->frustum_planes, sizeof(view->frustum_planes));
@ -2022,8 +1998,6 @@ DRWPass *DRW_pass_create(const char *name, DRWState state)
return pass;
}
/* Create an instance of the original pass that will execute the same drawcalls but with its own
* DRWState. */
DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state)
{
DRWPass *pass = DRW_pass_create(name, state);
@ -2032,7 +2006,6 @@ DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState
return pass;
}
/* Link two passes so that they are both rendered if the first one is being drawn. */
void DRW_pass_link(DRWPass *first, DRWPass *second)
{
BLI_assert(first != second);
@ -2093,10 +2066,6 @@ static int pass_shgroup_dist_sort(const void *a, const void *b)
#undef SORT_IMPL_LINKTYPE
/**
* Sort Shading groups by decreasing Z of their first draw call.
* This is useful for order dependent effect such as alpha-blending.
*/
void DRW_pass_sort_shgroup_z(DRWPass *pass)
{
const float(*viewinv)[4] = DST.view_active->storage.viewinv;
@ -2147,9 +2116,6 @@ void DRW_pass_sort_shgroup_z(DRWPass *pass)
pass->shgroups.last = last;
}
/**
* Reverse Shading group submission order.
*/
void DRW_pass_sort_shgroup_reverse(DRWPass *pass)
{
pass->shgroups.last = pass->shgroups.first;

View File

@ -273,7 +273,6 @@ static void drw_stencil_state_set(uint write_mask, uint reference, uint compare_
GPU_stencil_compare_mask_set(compare_mask);
}
/* Reset state to not interfere with other UI draw-call. */
void DRW_state_reset_ex(DRWState state)
{
DST.state = ~state;
@ -292,12 +291,6 @@ static void drw_state_validate(void)
}
}
/**
* Use with care, intended so selection code can override passes depth settings,
* which is important for selection to work properly.
*
* Should be set in main draw loop, cleared afterwards
*/
void DRW_state_lock(DRWState state)
{
DST.state_lock = state;
@ -361,7 +354,6 @@ static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
return (culling->mask & view->culling_mask) != 0;
}
/* Set active view for rendering. */
void DRW_view_set_active(DRWView *view)
{
DST.view_active = (view) ? view : DST.view_default;
@ -435,32 +427,24 @@ static bool draw_culling_plane_test(const BoundBox *corners, const float plane[4
return false;
}
/* Return True if the given BoundSphere intersect the current view frustum.
* bsphere must be in world space. */
bool DRW_culling_sphere_test(const DRWView *view, const BoundSphere *bsphere)
{
view = view ? view : DST.view_default;
return draw_culling_sphere_test(&view->frustum_bsphere, view->frustum_planes, bsphere);
}
/* Return True if the given BoundBox intersect the current view frustum.
* bbox must be in world space. */
bool DRW_culling_box_test(const DRWView *view, const BoundBox *bbox)
{
view = view ? view : DST.view_default;
return draw_culling_box_test(view->frustum_planes, bbox);
}
/* Return True if the view frustum is inside or intersect the given plane.
* plane must be in world space. */
bool DRW_culling_plane_test(const DRWView *view, const float plane[4])
{
view = view ? view : DST.view_default;
return draw_culling_plane_test(&view->frustum_corners, plane);
}
/* Return True if the given box intersect the current view frustum.
* This function will have to be replaced when world space bb per objects is implemented. */
bool DRW_culling_min_max_test(const DRWView *view, float obmat[4][4], float min[3], float max[3])
{
view = view ? view : DST.view_default;
@ -1169,7 +1153,6 @@ void DRW_draw_pass(DRWPass *pass)
}
}
/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
{
drw_draw_pass_ex(pass, start_group, end_group);

View File

@ -129,8 +129,6 @@ static void drw_stats_timer_start_ex(const char *name, const bool is_query)
}
}
/* Use this to group the queries. It does NOT keep track
* of the time, it only sum what the queries inside it. */
void DRW_stats_group_start(const char *name)
{
drw_stats_timer_start_ex(name, false);
@ -147,7 +145,6 @@ void DRW_stats_group_end(void)
}
}
/* NOTE: Only call this when no sub timer will be called. */
void DRW_stats_query_start(const char *name)
{
GPU_debug_group_begin(name);

View File

@ -28,9 +28,16 @@ void DRW_stats_free(void);
void DRW_stats_begin(void);
void DRW_stats_reset(void);
/**
* Use this to group the queries. It does NOT keep track
* of the time, it only sum what the queries inside it.
*/
void DRW_stats_group_start(const char *name);
void DRW_stats_group_end(void);
/**
* \note Only call this when no sub timer will be called.
*/
void DRW_stats_query_start(const char *name);
void DRW_stats_query_end(void);

View File

@ -650,8 +650,6 @@ void DRW_shader_library_add_file(DRWShaderLibrary *lib, char *lib_code, const ch
}
}
/* Return an allocN'ed string containing the shader code with its dependencies prepended.
* Caller must free the string with MEM_freeN after use. */
char *DRW_shader_library_create_shader_string(const DRWShaderLibrary *lib, const char *shader_code)
{
uint32_t deps = drw_shader_dependencies_get(lib, shader_code);

View File

@ -223,7 +223,6 @@ void DRW_text_cache_draw(DRWTextStore *dt, ARegion *region, struct View3D *v3d)
}
}
/* Copied from drawobject.c */
void DRW_text_edit_mesh_measure_stats(ARegion *region,
View3D *v3d,
Object *ob,

View File

@ -60,6 +60,7 @@ enum {
};
/* draw_manager.c */
struct DRWTextStore *DRW_text_cache_ensure(void);
#ifdef __cplusplus

View File

@ -47,7 +47,6 @@
/** \name Buffer of select ID's
* \{ */
/* Main function to read a block of pixels from the select frame buffer. */
uint *DRW_select_buffer_read(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -122,10 +121,6 @@ uint *DRW_select_buffer_read(struct Depsgraph *depsgraph,
*
* \{ */
/**
* \param rect: The rectangle to sample indices from (min/max inclusive).
* \returns a #BLI_bitmap the length of \a bitmap_len or NULL on failure.
*/
uint *DRW_select_buffer_bitmap_from_rect(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -165,12 +160,6 @@ uint *DRW_select_buffer_bitmap_from_rect(struct Depsgraph *depsgraph,
return bitmap_buf;
}
/**
* \param center: Circle center.
* \param radius: Circle radius.
* \param r_bitmap_len: Number of indices in the selection id buffer.
* \returns a #BLI_bitmap the length of \a r_bitmap_len or NULL on failure.
*/
uint *DRW_select_buffer_bitmap_from_circle(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -235,12 +224,6 @@ static void drw_select_mask_px_cb(int x, int x_end, int y, void *user_data)
} while (++x != x_end);
}
/**
* \param poly: The polygon coordinates.
* \param poly_len: Length of the polygon.
* \param rect: Polygon boundaries.
* \returns a #BLI_bitmap.
*/
uint *DRW_select_buffer_bitmap_from_poly(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -309,9 +292,6 @@ uint *DRW_select_buffer_bitmap_from_poly(struct Depsgraph *depsgraph,
*
* \{ */
/**
* Samples a single pixel.
*/
uint DRW_select_buffer_sample_point(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,
@ -357,11 +337,6 @@ static bool select_buffer_test_fn(const void *__restrict value, void *__restrict
return false;
}
/**
* Find the selection id closest to \a center.
* \param dist: Use to initialize the distance,
* when found, this value is set to the distance of the selection that's returned.
*/
uint DRW_select_buffer_find_nearest_to_point(struct Depsgraph *depsgraph,
struct ARegion *region,
struct View3D *v3d,

View File

@ -53,10 +53,6 @@ void DRW_texture_pool_free(DRWTexturePool *pool)
delete pool;
}
/**
* Try to find a texture corresponding to params into the texture pool.
* If no texture was found, create one and add it to the pool.
*/
GPUTexture *DRW_texture_pool_query(
DRWTexturePool *pool, int width, int height, eGPUTextureFormat format, void *user)
{
@ -113,7 +109,6 @@ GPUTexture *DRW_texture_pool_query(
return handle.texture;
}
/* Resets the user bits for each texture in the pool and delete unused ones. */
void DRW_texture_pool_reset(DRWTexturePool *pool)
{
pool->last_user_id = -1;

View File

@ -38,8 +38,15 @@ extern "C" {
DRWTexturePool *DRW_texture_pool_create(void);
void DRW_texture_pool_free(DRWTexturePool *pool);
/**
* Try to find a texture corresponding to params into the texture pool.
* If no texture was found, create one and add it to the pool.
*/
GPUTexture *DRW_texture_pool_query(
DRWTexturePool *pool, int width, int height, eGPUTextureFormat format, void *user);
/**
* Resets the user bits for each texture in the pool and delete unused ones.
*/
void DRW_texture_pool_reset(DRWTexturePool *pool);
#ifdef __cplusplus

View File

@ -236,9 +236,6 @@ static bool is_cursor_visible_2d(const DRWContextState *draw_ctx)
/** \name Generic Cursor
* \{ */
/**
* \note This doesn't require the draw context to be in use.
*/
void DRW_draw_cursor_2d_ex(const ARegion *region, const float cursor[2])
{
int co[2];

View File

@ -50,11 +50,6 @@ struct DRWViewData {
Vector<ViewportEngineData *> enabled_engines;
};
/**
* Creates a view data with all possible engines type for this view.
*
* `engine_types` contains `DRWRegisteredDrawEngine`.
* */
DRWViewData *DRW_view_data_create(ListBase *engine_types)
{
DRWViewData *view_data = new DRWViewData();

View File

@ -106,6 +106,11 @@ typedef struct DefaultTextureList {
typedef struct DRWViewData DRWViewData;
/**
* Creates a view data with all possible engines type for this view.
*
* `engine_types` contains #DRWRegisteredDrawEngine.
*/
DRWViewData *DRW_view_data_create(ListBase *engine_types);
void DRW_view_data_free(DRWViewData *view_data);

View File

@ -241,6 +241,11 @@ typedef struct MeshExtract {
/** \} */
/* draw_cache_extract_mesh_render_data.c */
/**
* \param is_mode_active: When true, use the modifiers from the edit-data,
* otherwise don't use modifiers as they are not from this object.
*/
MeshRenderData *mesh_render_data_create(Mesh *me,
const bool is_editmode,
const bool is_paint_mode,
@ -258,6 +263,9 @@ void mesh_render_data_update_loose_geom(MeshRenderData *mr,
void mesh_render_data_update_polys_sorted(MeshRenderData *mr,
MeshBufferCache *cache,
const eMRDataType data_flag);
/**
* Part of the creation of the #MeshRenderData that happens in a thread.
*/
void mesh_render_data_update_looptris(MeshRenderData *mr,
const eMRIterType iter_type,
const eMRDataType data_flag);