Cleanup: spelling in comments
This commit is contained in:
parent
77061a5621
commit
4d66cbd140
|
@ -338,9 +338,9 @@ bool BlenderGPUDisplay::do_update_begin(const GPUDisplayParams ¶ms,
|
|||
* NOTE: Allocate the PBO for the the size which will fit the final render resolution (as in,
|
||||
* at a resolution divider 1. This was we don't need to recreate graphics interoperability
|
||||
* objects which are costly and which are tied to the specific underlying buffer size.
|
||||
* The downside of this approach is that when graphics interopeability is not used we are sending
|
||||
* too much data to GPU when resolution divider is not 1. */
|
||||
/* TODO(sergey): Investigate whether keeping the PBO exact size of the texute makes non-interop
|
||||
* The downside of this approach is that when graphics interoperability is not used we are
|
||||
* sending too much data to GPU when resolution divider is not 1. */
|
||||
/* TODO(sergey): Investigate whether keeping the PBO exact size of the texture makes non-interop
|
||||
* mode faster. */
|
||||
const int buffer_width = params.full_size.x;
|
||||
const int buffer_height = params.full_size.y;
|
||||
|
|
|
@ -134,7 +134,7 @@ class BlenderGPUDisplay : public GPUDisplay {
|
|||
/* Make sure texture is allocated and its initial configuration is performed. */
|
||||
bool gl_texture_resources_ensure();
|
||||
|
||||
/* Ensure all runtime GPU resources needefd for drawing are allocated.
|
||||
/* Ensure all runtime GPU resources needed for drawing are allocated.
|
||||
* Returns true if all resources needed for drawing are available. */
|
||||
bool gl_draw_resources_ensure();
|
||||
|
||||
|
@ -146,7 +146,7 @@ class BlenderGPUDisplay : public GPUDisplay {
|
|||
* NOTE: The texture needs to be bound. */
|
||||
void texture_update_if_needed();
|
||||
|
||||
/* Update vetrex buffer with new coordinates of vertex positions and texture coordinates.
|
||||
/* Update vertex buffer with new coordinates of vertex positions and texture coordinates.
|
||||
* This buffer is used to render texture in the viewport.
|
||||
*
|
||||
* NOTE: The buffer needs to be bound. */
|
||||
|
@ -200,7 +200,7 @@ class BlenderGPUDisplay : public GPUDisplay {
|
|||
bool gl_draw_resource_creation_attempted_ = false;
|
||||
bool gl_draw_resources_created_ = false;
|
||||
|
||||
/* Vertex buffer which hold vertrices of a triangle fan which is textures with the texture
|
||||
/* Vertex buffer which hold vertices of a triangle fan which is textures with the texture
|
||||
* holding the render result. */
|
||||
uint vertex_buffer_ = 0;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ CPUKernels::CPUKernels()
|
|||
/* Shader evaluation. */
|
||||
REGISTER_KERNEL(shader_eval_displace),
|
||||
REGISTER_KERNEL(shader_eval_background),
|
||||
/* Adaptive campling. */
|
||||
/* Adaptive sampling. */
|
||||
REGISTER_KERNEL(adaptive_sampling_convergence_check),
|
||||
REGISTER_KERNEL(adaptive_sampling_filter_x),
|
||||
REGISTER_KERNEL(adaptive_sampling_filter_y),
|
||||
|
|
|
@ -28,7 +28,7 @@ void CUDADeviceKernels::load(CUDADevice *device)
|
|||
for (int i = 0; i < (int)DEVICE_KERNEL_NUM; i++) {
|
||||
CUDADeviceKernel &kernel = kernels_[i];
|
||||
|
||||
/* No megakernel used for GPU. */
|
||||
/* No mega-kernel used for GPU. */
|
||||
if (i == DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -213,7 +213,7 @@ class Device {
|
|||
* The interoperability comes here by the meaning that the device is capable of computing result
|
||||
* directly into an OpenGL (or other graphics library) buffer. */
|
||||
|
||||
/* Check display si to be updated using graphics interoperability.
|
||||
/* Check display is to be updated using graphics interoperability.
|
||||
* The interoperability can not be used is it is not supported by the device. But the device
|
||||
* might also force disable the interoperability if it detects that it will be slower than
|
||||
* copying pixels from the render buffer. */
|
||||
|
|
|
@ -68,7 +68,7 @@ class DenoiseParams : public Node {
|
|||
/* Viewport start sample. */
|
||||
int start_sample = 0;
|
||||
|
||||
/* Auxiliry passes. */
|
||||
/* Auxiliary passes. */
|
||||
bool use_pass_albedo = true;
|
||||
bool use_pass_normal = true;
|
||||
|
||||
|
|
|
@ -571,7 +571,7 @@ class OptiXDevice::DenoiseContext {
|
|||
int pass_stride = -1;
|
||||
} guiding_params;
|
||||
|
||||
/* Number of input passes. Including the color and extra auxillary passes. */
|
||||
/* Number of input passes. Including the color and extra auxiliary passes. */
|
||||
int num_input_passes = 0;
|
||||
bool use_pass_albedo = false;
|
||||
bool use_pass_normal = false;
|
||||
|
@ -956,7 +956,7 @@ bool OptiXDevice::denoise_run(DenoiseContext &context, const DenoisePass &pass)
|
|||
/* Denoise in-place of the noisy input in the render buffers. */
|
||||
output_layer = color_layer;
|
||||
|
||||
/* Finally run denonising. */
|
||||
/* Finally run denoising. */
|
||||
OptixDenoiserParams params = {}; /* All parameters are disabled/zero. */
|
||||
OptixDenoiserLayer image_layers = {};
|
||||
image_layers.input = color_layer;
|
||||
|
|
|
@ -146,7 +146,7 @@ class OptiXDevice : public CUDADevice {
|
|||
/* Read guiding passes from the render buffers, preprocess them in a way which is expected by
|
||||
* OptiX and store in the guiding passes memory within the given context.
|
||||
*
|
||||
* Pre=-processing of the guiding passes is to only hapopen once per context lifetime. DO not
|
||||
* Pre=-processing of the guiding passes is to only happen once per context lifetime. DO not
|
||||
* preprocess them for every pass which is being denoised. */
|
||||
bool denoise_filter_guiding_preprocess(DenoiseContext &context);
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ class Progress;
|
|||
|
||||
/* Implementation of a specific denoising algorithm.
|
||||
*
|
||||
* This class takes care of breaking down denosiing algorithm into a series of device calls or to
|
||||
* This class takes care of breaking down denoising algorithm into a series of device calls or to
|
||||
* calls of an external API to denoise given input.
|
||||
*
|
||||
* TODO(sergey): Are we better with device or a queue here? */
|
||||
|
@ -53,7 +53,7 @@ class Denoiser {
|
|||
const DenoiseParams &get_params() const;
|
||||
|
||||
/* Create devices and load kernels needed for denoising.
|
||||
* The progress is used to communicate state when kenrels actually needs to be loaded.
|
||||
* The progress is used to communicate state when kernels actually needs to be loaded.
|
||||
*
|
||||
* NOTE: The `progress` is an optional argument, can be nullptr. */
|
||||
virtual bool load_kernels(Progress *progress);
|
||||
|
@ -64,7 +64,7 @@ class Denoiser {
|
|||
* a lower resolution render into a bigger allocated buffer, which is used in viewport during
|
||||
* navigation and non-unit pixel size. Use that instead of render_buffers->params.
|
||||
*
|
||||
* The buffer might be copming from a "foreign" device from what this denoise is created for.
|
||||
* The buffer might be coming from a "foreign" device from what this denoise is created for.
|
||||
* This means that in general case the denoiser will make sure the input data is available on
|
||||
* the denoiser device, perform denoising, and put data back to the device where the buffer
|
||||
* came from.
|
||||
|
@ -95,8 +95,8 @@ class Denoiser {
|
|||
* using OptiX denoiser and rendering on CPU.
|
||||
*
|
||||
* - No threading safety is ensured in this call. This means, that it is up to caller to ensure
|
||||
* that there is no threadingconflict between denoising task lazily initializing the device and
|
||||
* access to this device happen. */
|
||||
* that there is no threading-conflict between denoising task lazily initializing the device
|
||||
* and access to this device happen. */
|
||||
Device *get_denoiser_device() const;
|
||||
|
||||
function<bool(void)> is_cancelled_cb;
|
||||
|
|
|
@ -77,7 +77,7 @@ bool DeviceDenoiser::denoise_buffer(const BufferParams &buffer_params,
|
|||
local_render_buffers.reset(buffer_params);
|
||||
|
||||
/* NOTE: The local buffer is allocated for an exact size of the effective render size, while
|
||||
* the input render buffer is allcoated for the lowest resolution divider possible. So it is
|
||||
* the input render buffer is allocated for the lowest resolution divider possible. So it is
|
||||
* important to only copy actually needed part of the input buffer. */
|
||||
memcpy(local_render_buffers.buffer.data(),
|
||||
render_buffers->buffer.data(),
|
||||
|
|
|
@ -93,7 +93,7 @@ class OIDNPass {
|
|||
* Is required for albedo and normal passes. The color pass OIDN will perform auto-exposure, so
|
||||
* scaling is not needed for the color pass unless adaptive sampling is used.
|
||||
*
|
||||
* NOTE: Do not scale the outout pass, as that requires to be a pointer in the original buffer.
|
||||
* NOTE: Do not scale the output pass, as that requires to be a pointer in the original buffer.
|
||||
* All the scaling on the output needed for integration with adaptive sampling will happen
|
||||
* outside of generic pass handling. */
|
||||
bool need_scale = false;
|
||||
|
@ -479,7 +479,7 @@ class OIDNDenoiseContext {
|
|||
}
|
||||
|
||||
if (num_samples_ == 1) {
|
||||
/* If the avoid scaling if there is only one sample, to save up time (so we dont divide
|
||||
/* If the avoid scaling if there is only one sample, to save up time (so we don't divide
|
||||
* buffer by 1). */
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ void PathTrace::render(const RenderWork &render_work)
|
|||
|
||||
void PathTrace::render_pipeline(RenderWork render_work)
|
||||
{
|
||||
/* NOTE: Only check for "instant" cancel here. Ther user-requested cancel via progress is
|
||||
/* NOTE: Only check for "instant" cancel here. The user-requested cancel via progress is
|
||||
* checked in Session and the work in the event of cancel is to be finished here. */
|
||||
|
||||
render_scheduler_.set_need_schedule_cryptomatte(device_scene_->data.film.cryptomatte_passes !=
|
||||
|
@ -680,7 +680,7 @@ void PathTrace::write_tile_buffer(const RenderWork &render_work)
|
|||
*
|
||||
* Tiles are written to a file during rendering, and written to the software at the end
|
||||
* of rendering (wither when all tiles are finished, or when rendering was requested to be
|
||||
* cancelled).
|
||||
* canceled).
|
||||
*
|
||||
* Important thing is: tile should be written to the software via callback only once. */
|
||||
if (!has_multiple_tiles) {
|
||||
|
@ -913,7 +913,7 @@ void PathTrace::process_full_buffer_from_disk(string_view filename)
|
|||
* ensure proper denoiser is used. */
|
||||
set_denoiser_params(denoise_params);
|
||||
|
||||
/* Number of samples doesn't matter too much, since the sampels count pass will be used. */
|
||||
/* Number of samples doesn't matter too much, since the samples count pass will be used. */
|
||||
denoiser_->denoise_buffer(full_frame_buffers.params, &full_frame_buffers, 0, false);
|
||||
|
||||
render_state_.has_denoised_result = true;
|
||||
|
|
|
@ -83,7 +83,7 @@ class PathTrace {
|
|||
void set_progress(Progress *progress);
|
||||
|
||||
/* NOTE: This is a blocking call. Meaning, it will not return until given number of samples are
|
||||
* rendered (or until rendering is requested to be cancelled). */
|
||||
* rendered (or until rendering is requested to be canceled). */
|
||||
void render(const RenderWork &render_work);
|
||||
|
||||
/* TODO(sergey): Decide whether denoiser is really a part of path tracer. Currently it is
|
||||
|
@ -110,7 +110,7 @@ class PathTrace {
|
|||
/* Cancel rendering process as soon as possible, without waiting for full tile to be sampled.
|
||||
* Used in cases like reset of render session.
|
||||
*
|
||||
* This is a blockign call, which returns as soon as there is no running `render_samples()` call.
|
||||
* This is a blocking call, which returns as soon as there is no running `render_samples()` call.
|
||||
*/
|
||||
void cancel();
|
||||
|
||||
|
@ -120,11 +120,11 @@ class PathTrace {
|
|||
* the data will be copied to the device of the given render buffers. */
|
||||
void copy_to_render_buffers(RenderBuffers *render_buffers);
|
||||
|
||||
/* Copy happens via CPU side buffer: data will be copied from the device of the given rendetr
|
||||
/* Copy happens via CPU side buffer: data will be copied from the device of the given render
|
||||
* buffers and will be copied to all devices of the path trace. */
|
||||
void copy_from_render_buffers(RenderBuffers *render_buffers);
|
||||
|
||||
/* Copy render buffers of the big tile from the device to hsot.
|
||||
/* Copy render buffers of the big tile from the device to host.
|
||||
* Return true if all copies are successful. */
|
||||
bool copy_render_tile_from_device();
|
||||
|
||||
|
@ -172,10 +172,10 @@ class PathTrace {
|
|||
* Is called during path tracing to communicate work-in-progress state of the final buffer. */
|
||||
function<void(void)> tile_buffer_update_cb;
|
||||
|
||||
/* Callback which communicates final rendered buffer. Is called after pathtracing is done. */
|
||||
/* Callback which communicates final rendered buffer. Is called after path-tracing is done. */
|
||||
function<void(void)> tile_buffer_write_cb;
|
||||
|
||||
/* Callback which initializes rendered buffer. Is called before pathtracing starts.
|
||||
/* Callback which initializes rendered buffer. Is called before path-tracing starts.
|
||||
*
|
||||
* This is used for baking. */
|
||||
function<bool(void)> tile_buffer_read_cb;
|
||||
|
@ -189,7 +189,7 @@ class PathTrace {
|
|||
|
||||
protected:
|
||||
/* Actual implementation of the rendering pipeline.
|
||||
* Calls steps in order, checking for the cancel to be requested inbetween.
|
||||
* Calls steps in order, checking for the cancel to be requested in between.
|
||||
*
|
||||
* Is separate from `render()` to simplify dealing with the early outputs and keeping
|
||||
* `render_cancel_` in the consistent state. */
|
||||
|
@ -283,7 +283,7 @@ class PathTrace {
|
|||
* affects both resolution and stride as visible by the integrator kernels. */
|
||||
int resolution_divider = 0;
|
||||
|
||||
/* Paramaters of the big tile with the current resolution divider applied. */
|
||||
/* Parameters of the big tile with the current resolution divider applied. */
|
||||
BufferParams effective_big_tile_params;
|
||||
|
||||
/* Denosier was run and there are denoised versions of the passes in the render buffers. */
|
||||
|
|
|
@ -39,8 +39,8 @@ class PathTraceWork {
|
|||
|
||||
/* Create path trace work which fits best the device.
|
||||
*
|
||||
* The cancel request flag is used for a cheap check whether cancel is to berformed as soon as
|
||||
* possible. This could be, for rexample, request to cancel rendering on camera navigation in
|
||||
* The cancel request flag is used for a cheap check whether cancel is to be performed as soon as
|
||||
* possible. This could be, for example, request to cancel rendering on camera navigation in
|
||||
* viewport. */
|
||||
static unique_ptr<PathTraceWork> create(Device *device,
|
||||
Film *film,
|
||||
|
@ -107,7 +107,7 @@ class PathTraceWork {
|
|||
/* Special version of the `copy_from_render_buffers()` which only copies denosied passes from the
|
||||
* given render buffers, leaving rest of the passes.
|
||||
*
|
||||
* Same notes about device copying aplies to this call as well. */
|
||||
* Same notes about device copying applies to this call as well. */
|
||||
void copy_from_denoised_render_buffers(const RenderBuffers *render_buffers);
|
||||
|
||||
/* Copy render buffers to/from device using an appropriate device queue when needed so that
|
||||
|
@ -119,7 +119,7 @@ class PathTraceWork {
|
|||
* things are executed in order with the `render_samples()`. */
|
||||
virtual bool zero_render_buffers() = 0;
|
||||
|
||||
/* Access pixels rendered by this work and copy them to the coresponding location in the
|
||||
/* Access pixels rendered by this work and copy them to the corresponding location in the
|
||||
* destination.
|
||||
*
|
||||
* NOTE: Does not perform copy of buffers from the device. Use `copy_render_tile_from_device()`
|
||||
|
@ -182,7 +182,7 @@ class PathTraceWork {
|
|||
unique_ptr<RenderBuffers> buffers_;
|
||||
|
||||
/* Effective parameters of the full, big tile, and current work render buffer.
|
||||
* The latter might be different from buffers_->params when there is a resolution divider
|
||||
* The latter might be different from `buffers_->params` when there is a resolution divider
|
||||
* involved. */
|
||||
BufferParams effective_full_params_;
|
||||
BufferParams effective_big_tile_params_;
|
||||
|
|
|
@ -498,7 +498,7 @@ void PathTraceWorkGPU::compact_states(const int num_active_paths)
|
|||
bool PathTraceWorkGPU::enqueue_work_tiles(bool &finished)
|
||||
{
|
||||
/* If there are existing paths wait them to go to intersect closest kernel, which will align the
|
||||
* wavefront of the existing and newely added paths. */
|
||||
* wavefront of the existing and newly added paths. */
|
||||
/* TODO: Check whether counting new intersection kernels here will have positive affect on the
|
||||
* performance. */
|
||||
const DeviceKernel kernel = get_most_queued_kernel();
|
||||
|
@ -508,7 +508,7 @@ bool PathTraceWorkGPU::enqueue_work_tiles(bool &finished)
|
|||
|
||||
int num_active_paths = get_num_active_paths();
|
||||
|
||||
/* Don't schedule more work if cancelling. */
|
||||
/* Don't schedule more work if canceling. */
|
||||
if (is_cancel_requested()) {
|
||||
if (num_active_paths == 0) {
|
||||
finished = true;
|
||||
|
@ -729,7 +729,7 @@ void PathTraceWorkGPU::copy_to_gpu_display_naive(GPUDisplay *gpu_display,
|
|||
gpu_display_rgba_half_.data_height != final_height) {
|
||||
gpu_display_rgba_half_.alloc(final_width, final_height);
|
||||
/* TODO(sergey): There should be a way to make sure device-side memory is allocated without
|
||||
* transfering zeroes to the device. */
|
||||
* transferring zeroes to the device. */
|
||||
queue_->zero_to_device(gpu_display_rgba_half_);
|
||||
}
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
|
|||
|
||||
const bool has_rendered_samples = get_num_rendered_samples() != 0;
|
||||
|
||||
/* Reset all fields of the previous work, canelling things like adaptive sampling filtering and
|
||||
/* Reset all fields of the previous work, canceling things like adaptive sampling filtering and
|
||||
* denoising.
|
||||
* However, need to preserve write requests, since those will not be possible to recover and
|
||||
* writes are only to happen once. */
|
||||
|
@ -246,7 +246,7 @@ void RenderScheduler::render_work_reschedule_on_cancel(RenderWork &render_work)
|
|||
render_work.full.write = full_write;
|
||||
|
||||
/* Do not write tile if it has zero samples it it, treat it similarly to all other tiles which
|
||||
* got cancelled. */
|
||||
* got canceled. */
|
||||
if (!state_.tile_result_was_written && has_rendered_samples) {
|
||||
render_work.tile.write = true;
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ int RenderScheduler::get_num_samples_to_path_trace() const
|
|||
|
||||
int num_samples_to_render = min(num_samples_pot, max_num_samples_to_render);
|
||||
|
||||
/* When enough statistics is available and doing an offlien rendering prefer to keep device
|
||||
/* When enough statistics is available and doing an offline rendering prefer to keep device
|
||||
* occupied. */
|
||||
if (state_.occupancy_num_samples && (background_ || headless_)) {
|
||||
/* Keep occupancy at about 0.5 (this is more of an empirical figure which seems to match scenes
|
||||
|
@ -874,7 +874,7 @@ int RenderScheduler::get_num_samples_during_navigation(int resolution_divider) c
|
|||
|
||||
/* Always render 4 samples, even if scene is configured for less.
|
||||
* The idea here is to have enough information on the screen. Resolution divider of 2 allows us
|
||||
* to have 4 time extra samples, so verall worst case timing is the same as the final resolution
|
||||
* to have 4 time extra samples, so overall worst case timing is the same as the final resolution
|
||||
* at one sample. */
|
||||
return 4;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ class RenderWork {
|
|||
} display;
|
||||
|
||||
/* Re-balance multi-device scheduling after rendering this work.
|
||||
* Note that the scheduler does not know anything abouce devices, so if there is only a single
|
||||
* Note that the scheduler does not know anything about devices, so if there is only a single
|
||||
* device used, then it is up for the PathTracer to ignore the balancing. */
|
||||
bool rebalance = false;
|
||||
|
||||
|
@ -203,7 +203,7 @@ class RenderScheduler {
|
|||
* extra work needs to be scheduled to denoise and write final result. */
|
||||
bool done() const;
|
||||
|
||||
/* Update scheduling state for a newely scheduled work.
|
||||
/* Update scheduling state for a newly scheduled work.
|
||||
* Takes care of things like checking whether work was ever denoised, tile was written and states
|
||||
* like that. */
|
||||
void update_state_for_render_work(const RenderWork &render_work);
|
||||
|
@ -235,7 +235,7 @@ class RenderScheduler {
|
|||
double guess_display_update_interval_in_seconds_for_num_samples_no_limit(
|
||||
int num_rendered_samples) const;
|
||||
|
||||
/* Calculate number of samples which can be rendered within current desred update interval which
|
||||
/* Calculate number of samples which can be rendered within current desired update interval which
|
||||
* is calculated by `guess_update_interval_in_seconds()`. */
|
||||
int calculate_num_samples_per_update() const;
|
||||
|
||||
|
@ -250,11 +250,11 @@ class RenderScheduler {
|
|||
/* Whether adaptive sampling convergence check and filter is to happen. */
|
||||
bool work_need_adaptive_filter() const;
|
||||
|
||||
/* Calculate thretshold for adaptive sampling. */
|
||||
/* Calculate threshold for adaptive sampling. */
|
||||
float work_adaptive_threshold() const;
|
||||
|
||||
/* Check whether current work needs denoising.
|
||||
* Denoising is not needed if the denoiser is not configured, or when denosiing is happening too
|
||||
* Denoising is not needed if the denoiser is not configured, or when denoising is happening too
|
||||
* often.
|
||||
*
|
||||
* The delayed will be true when the denoiser is configured for use, but it was delayed for a
|
||||
|
|
|
@ -71,7 +71,7 @@ bool ShaderEval::eval(const ShaderEvalType type,
|
|||
success = (device->info.type == DEVICE_CPU) ? eval_cpu(device, type, input, output) :
|
||||
eval_gpu(device, type, input, output);
|
||||
|
||||
/* Copy data back from device if not cancelled. */
|
||||
/* Copy data back from device if not canceled. */
|
||||
if (success) {
|
||||
output.copy_from_device(0, 1, output.size());
|
||||
read_output(output);
|
||||
|
|
|
@ -32,7 +32,7 @@ struct WorkBalanceInfo {
|
|||
double weight = 1.0;
|
||||
};
|
||||
|
||||
/* Balance work for an initial render interation, before any statistics is known. */
|
||||
/* Balance work for an initial render integration, before any statistics is known. */
|
||||
void work_balance_do_initial(vector<WorkBalanceInfo> &work_balance_infos);
|
||||
|
||||
/* Rebalance work after statistics has been accumulated.
|
||||
|
|
|
@ -81,7 +81,7 @@ void WorkTileScheduler::reset_scheduler_state()
|
|||
bool WorkTileScheduler::get_work(KernelWorkTile *work_tile_, const int max_work_size)
|
||||
{
|
||||
/* Note that the `max_work_size` can be higher than the `max_num_path_states_`: this is because
|
||||
* the path trace work can decice to use smaller tile sizes and greedily schedule multiple tiles,
|
||||
* the path trace work can decide to use smaller tile sizes and greedily schedule multiple tiles,
|
||||
* improving overall device occupancy.
|
||||
* So the `max_num_path_states_` is a "scheduling unit", and the `max_work_size` is a "scheduling
|
||||
* limit". */
|
||||
|
|
|
@ -64,7 +64,7 @@ class WorkTileScheduler {
|
|||
/* dimensions of the currently rendering image in pixels. */
|
||||
int2 image_size_px_ = make_int2(0, 0);
|
||||
|
||||
/* Offset and stride of the buffer within which scheduing is happenning.
|
||||
/* Offset and stride of the buffer within which scheduling is happening.
|
||||
* Will be passed over to the KernelWorkTile. */
|
||||
int offset_, stride_;
|
||||
|
||||
|
@ -87,7 +87,7 @@ class WorkTileScheduler {
|
|||
* in the `get_work()`? */
|
||||
int total_tiles_num_ = 0;
|
||||
|
||||
/* In the case when the number of sam[les in the `tile_size_` is lower than samples_num_ denotes
|
||||
/* In the case when the number of samples in the `tile_size_` is lower than samples_num_ denotes
|
||||
* how many tiles are to be "stacked" to cover the entire requested range of samples. */
|
||||
int num_tiles_per_sample_range_ = 0;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ ccl_device void bssrdf_setup_radius(Bssrdf *bssrdf, const ClosureType type, cons
|
|||
const float inv_eta = 1.0f / eta;
|
||||
const float F_dr = inv_eta * (-1.440f * inv_eta + 0.710f) + 0.668f + 0.0636f * eta;
|
||||
const float fourthirdA = (4.0f / 3.0f) * (1.0f + F_dr) /
|
||||
(1.0f - F_dr); /* From Jensen's Fdr ratio formula. */
|
||||
(1.0f - F_dr); /* From Jensen's `Fdr` ratio formula. */
|
||||
|
||||
const float3 alpha_prime = make_float3(
|
||||
bssrdf_dipole_compute_alpha_prime(bssrdf->albedo.x, fourthirdA),
|
||||
|
|
|
@ -80,7 +80,7 @@ typedef unsigned long long uint64_t;
|
|||
#define ccl_gpu_global_id_x() (ccl_gpu_block_idx_x * ccl_gpu_block_dim_x + ccl_gpu_thread_idx_x)
|
||||
#define ccl_gpu_global_size_x() (ccl_gpu_grid_dim_x * ccl_gpu_block_dim_x)
|
||||
|
||||
/* GPU warp synchronizaton */
|
||||
/* GPU warp synchronization. */
|
||||
|
||||
#define ccl_gpu_syncthreads() __syncthreads()
|
||||
#define ccl_gpu_ballot(predicate) __ballot_sync(0xFFFFFFFF, predicate)
|
||||
|
|
|
@ -81,7 +81,7 @@ typedef unsigned long long uint64_t;
|
|||
#define ccl_gpu_global_id_x() (ccl_gpu_block_idx_x * ccl_gpu_block_dim_x + ccl_gpu_thread_idx_x)
|
||||
#define ccl_gpu_global_size_x() (ccl_gpu_grid_dim_x * ccl_gpu_block_dim_x)
|
||||
|
||||
/* GPU warp synchronizaton */
|
||||
/* GPU warp synchronization. */
|
||||
|
||||
#define ccl_gpu_syncthreads() __syncthreads()
|
||||
#define ccl_gpu_ballot(predicate) __ballot_sync(0xFFFFFFFF, predicate)
|
||||
|
|
|
@ -30,7 +30,7 @@ CCL_NAMESPACE_BEGIN
|
|||
|
||||
#ifdef __VOLUME__
|
||||
|
||||
/* Events for probalistic scattering */
|
||||
/* Events for probabilistic scattering. */
|
||||
|
||||
typedef enum VolumeIntegrateEvent {
|
||||
VOLUME_PATH_SCATTERED = 0,
|
||||
|
@ -228,8 +228,8 @@ ccl_device void volume_shadow_heterogeneous(INTEGRATOR_STATE_ARGS,
|
|||
/* compute attenuation over segment */
|
||||
sd->P = new_P;
|
||||
if (shadow_volume_shader_sample(INTEGRATOR_STATE_PASS, sd, &sigma_t)) {
|
||||
/* Compute expf() only for every Nth step, to save some calculations
|
||||
* because exp(a)*exp(b) = exp(a+b), also do a quick VOLUME_THROUGHPUT_EPSILON
|
||||
/* Compute `expf()` only for every Nth step, to save some calculations
|
||||
* because `exp(a)*exp(b) = exp(a+b)`, also do a quick #VOLUME_THROUGHPUT_EPSILON
|
||||
* check then. */
|
||||
sum += (-sigma_t * dt);
|
||||
if ((i & 0x07) == 0) { /* ToDo: Other interval? */
|
||||
|
@ -648,7 +648,7 @@ ccl_device_forceinline void volume_integrate_heterogeneous(
|
|||
}
|
||||
}
|
||||
|
||||
/* Write accumulated emisison. */
|
||||
/* Write accumulated emission. */
|
||||
if (!is_zero(accum_emission)) {
|
||||
kernel_accum_emission(
|
||||
INTEGRATOR_STATE_PASS, result.indirect_throughput, accum_emission, render_buffer);
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
* INTEGRATOR_STATE_PASS_NULL: use to pass empty state to other functions.
|
||||
*
|
||||
* NOTE: if we end up with a device that passes no arguments, the leading comma will be a problem.
|
||||
* Can solve it with more macros if we encouter it, but rather ugly so postpone for now.
|
||||
* Can solve it with more macros if we encounter it, but rather ugly so postpone for now.
|
||||
*/
|
||||
|
||||
#include "kernel/kernel_types.h"
|
||||
|
@ -129,7 +129,7 @@ typedef struct IntegratorStateGPU {
|
|||
*
|
||||
* Note that there is a special access function for the shadow catcher state. This access is to
|
||||
* happen from a kernel which operates on a "main" path. Attempt to use shadow catcher accessors
|
||||
* from a kernel which operates on a shadow catcher state will cause bad memory acces. */
|
||||
* from a kernel which operates on a shadow catcher state will cause bad memory access. */
|
||||
|
||||
#ifdef __KERNEL_CPU__
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ ccl_device bool kernel_adaptive_sampling_convergence_check(const KernelGlobals *
|
|||
|
||||
const float4 A = kernel_read_pass_float4(buffer + kernel_data.film.pass_adaptive_aux_buffer);
|
||||
if (!reset && A.w != 0.0f) {
|
||||
/* If the pixel was considered converged, its state will not change in this kernmel. Early
|
||||
/* If the pixel was considered converged, its state will not change in this kernel. Early
|
||||
* output before doing any math.
|
||||
*
|
||||
* TODO(sergey): On a GPU it might be better to keep thread alive for better coherency? */
|
||||
|
|
|
@ -393,7 +393,7 @@ film_calculate_shadow_catcher(const KernelFilmConvert *ccl_restrict kfilm_conver
|
|||
ccl_global const float *in_catcher = buffer + kfilm_convert->pass_shadow_catcher;
|
||||
|
||||
/* NOTE: It is possible that the Shadow Catcher pass is requested as an output without actual
|
||||
* shadow catcher objects in the scene. In this case there will be no auxillary passes required
|
||||
* shadow catcher objects in the scene. In this case there will be no auxiliary passes required
|
||||
* for the devision (to save up memory). So delay the asserts to this point so that the number of
|
||||
* samples check handles such configuration. */
|
||||
kernel_assert(kfilm_convert->pass_offset != PASS_UNUSED);
|
||||
|
@ -404,14 +404,14 @@ film_calculate_shadow_catcher(const KernelFilmConvert *ccl_restrict kfilm_conver
|
|||
ccl_global const float *in_matte = buffer + kfilm_convert->pass_shadow_catcher_matte;
|
||||
|
||||
/* No scaling needed. The integration works in way that number of samples in the combined and
|
||||
* shadow catcher passes are the same, and exposure is cancelled during the division. */
|
||||
* shadow catcher passes are the same, and exposure is canceled during the division. */
|
||||
const float3 color_catcher = make_float3(in_catcher[0], in_catcher[1], in_catcher[2]);
|
||||
const float3 color_combined = make_float3(in_combined[0], in_combined[1], in_combined[2]);
|
||||
const float3 color_matte = make_float3(in_matte[0], in_matte[1], in_matte[2]);
|
||||
|
||||
/* Need to ignore contribution of the matte object when doing division (otherwise there will be
|
||||
* artifacts caused by anti-aliasing). Since combined pass is used for adaptive sampling and need
|
||||
* to contain matte objects, we subtrack matte objects contribution here. This is the same as if
|
||||
* to contain matte objects, we subtract matte objects contribution here. This is the same as if
|
||||
* the matte objects were not accumulated to the combined pass. */
|
||||
const float3 combined_no_matte = color_combined - color_matte;
|
||||
|
||||
|
@ -422,8 +422,8 @@ film_calculate_shadow_catcher(const KernelFilmConvert *ccl_restrict kfilm_conver
|
|||
const float alpha = film_transparency_to_alpha(transparency);
|
||||
|
||||
/* Alpha-over on white using transparency of the combined pass. This allows to eliminate
|
||||
* artifacts which are happenning on an edge of a shadow catcher when using transparent film.
|
||||
* Note that we treat shadow catcher as straight alpha here because alpha got cancelled out
|
||||
* artifacts which are happening on an edge of a shadow catcher when using transparent film.
|
||||
* Note that we treat shadow catcher as straight alpha here because alpha got canceled out
|
||||
* during the division. */
|
||||
const float3 pixel = (1.0f - alpha) * one_float3() + alpha * shadow_catcher;
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ ccl_device_forceinline void path_rng_2D(
|
|||
}
|
||||
|
||||
/**
|
||||
* 1D hash recomended from "Hash Functions for GPU Rendering" JCGT Vol. 9, No. 3, 2020
|
||||
* 1D hash recommended from "Hash Functions for GPU Rendering" JCGT Vol. 9, No. 3, 2020
|
||||
* See https://www.shadertoy.com/view/4tXyWN and https://www.shadertoy.com/view/XlGcRh
|
||||
* http://www.jcgt.org/published/0009/03/02/paper.pdf
|
||||
*/
|
||||
|
@ -124,7 +124,7 @@ ccl_device_inline uint hash_iqint1(uint n)
|
|||
}
|
||||
|
||||
/**
|
||||
* 2D hash recomended from "Hash Functions for GPU Rendering" JCGT Vol. 9, No. 3, 2020
|
||||
* 2D hash recommended from "Hash Functions for GPU Rendering" JCGT Vol. 9, No. 3, 2020
|
||||
* See https://www.shadertoy.com/view/4tXyWN and https://www.shadertoy.com/view/XlGcRh
|
||||
* http://www.jcgt.org/published/0009/03/02/paper.pdf
|
||||
*/
|
||||
|
|
|
@ -380,7 +380,7 @@ typedef enum PassType {
|
|||
* pass contains number of samples which contributed to the color components of the pass.
|
||||
*
|
||||
* PASS_SHADOW_CATCHER_SAMPLE_COUNT contains number of samples for which the path split
|
||||
* happenned.
|
||||
* happened.
|
||||
*
|
||||
* PASS_SHADOW_CATCHER_MATTE contains pass which contains non-catcher objects. This pass is to be
|
||||
* alpha-overed onto the backdrop (after multiplication). */
|
||||
|
|
|
@ -255,7 +255,7 @@ ccl_device float3 svm_bevel(INTEGRATOR_STATE_CONST_ARGS,
|
|||
|
||||
/* Multiple importance sample between 3 axes, power heuristic
|
||||
* found to be slightly better than balance heuristic. pdf_N
|
||||
* in the MIS weight and denominator cancelled out. */
|
||||
* in the MIS weight and denominator canceled out. */
|
||||
float w = pdf_N / (sqr(pdf_N) + sqr(pdf_T) + sqr(pdf_B));
|
||||
if (isect.num_hits > LOCAL_MAX_HITS) {
|
||||
w *= isect.num_hits / (float)LOCAL_MAX_HITS;
|
||||
|
|
|
@ -300,7 +300,7 @@ void render_buffers_host_copy_denoised(RenderBuffers *dst,
|
|||
DCHECK_EQ(dst_params.width, src_params.width);
|
||||
/* TODO(sergey): More sanity checks to avoid buffer overrun. */
|
||||
|
||||
/* Create a map of pass ofsets to be copied.
|
||||
/* Create a map of pass offsets to be copied.
|
||||
* Assume offsets are different to allow copying passes between buffers with different set of
|
||||
* passes. */
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ class BufferParams : public Node {
|
|||
*
|
||||
* When the scene passes are given, the buffer passes will be created from them and stored in
|
||||
* this params, and then params are updated for those passes.
|
||||
* The `update_passes()` without parameters updates offsets and stries which are stored outside
|
||||
* The `update_passes()` without parameters updates offsets and strides which are stored outside
|
||||
* of the passes. */
|
||||
void update_passes();
|
||||
void update_passes(const vector<Pass *> &scene_passes);
|
||||
|
@ -140,7 +140,7 @@ class BufferParams : public Node {
|
|||
protected:
|
||||
void reset_pass_offset();
|
||||
|
||||
/* Multipled by 2 to be able to store noisy and denoised pass types. */
|
||||
/* Multiplied by 2 to be able to store noisy and denoised pass types. */
|
||||
static constexpr int kNumPassOffsets = PASS_NUM * 2;
|
||||
|
||||
/* Indexed by an index derived from pass type and mode, indicates offset of the corresponding
|
||||
|
@ -171,7 +171,7 @@ class RenderBuffers {
|
|||
|
||||
/* Copy denoised passes form source to destination.
|
||||
*
|
||||
* Buffer parameters are provided explicitly, allowing to copy pixelks between render buffers which
|
||||
* Buffer parameters are provided explicitly, allowing to copy pixels between render buffers which
|
||||
* content corresponds to a render result at a non-unit resolution divider.
|
||||
*
|
||||
* `src_offset` allows to offset source pixel index which is used when a fraction of the source
|
||||
|
|
|
@ -117,7 +117,7 @@ class GPUDisplay {
|
|||
*
|
||||
* NOTE: The GPUDisplay should be marked for an update being in process with `update_begin()`.
|
||||
*
|
||||
* NOTE: Texture buffer can not be mapped while graphics interopeability is active. This means
|
||||
* NOTE: Texture buffer can not be mapped while graphics interoperability is active. This means
|
||||
* that `map_texture_buffer()` is not allowed between `graphics_interop_begin()` and
|
||||
* `graphics_interop_end()` calls.
|
||||
*/
|
||||
|
@ -125,7 +125,7 @@ class GPUDisplay {
|
|||
/* Map pixels memory form texture to a buffer available for write from CPU. Width and height will
|
||||
* define a requested size of the texture to write to.
|
||||
* Upon success a non-null pointer is returned and the texture buffer is to be unmapped.
|
||||
* If an error happens during mapping, or if mapoping is not supported by this GPU display a
|
||||
* If an error happens during mapping, or if mapping is not supported by this GPU display a
|
||||
* null pointer is returned and the buffer is NOT to be unmapped.
|
||||
*
|
||||
* NOTE: Usually the implementation will rely on a GPU context of some sort, and the GPU context
|
||||
|
@ -149,7 +149,7 @@ class GPUDisplay {
|
|||
* device API. */
|
||||
DeviceGraphicsInteropDestination graphics_interop_get();
|
||||
|
||||
/* (De)activate GPU display for graphics interoperability outside of regular display udpate
|
||||
/* (De)activate GPU display for graphics interoperability outside of regular display update
|
||||
* routines. */
|
||||
virtual void graphics_interop_activate();
|
||||
virtual void graphics_interop_deactivate();
|
||||
|
@ -206,8 +206,8 @@ class GPUDisplay {
|
|||
GPUDisplayParams params_;
|
||||
|
||||
/* Mark texture as its content has been updated.
|
||||
* Used from places which knows that the texture content has been brough up-to-date, so that the
|
||||
* drawing knows whether it can be performed, and whether drawing happenned with an up-to-date
|
||||
* Used from places which knows that the texture content has been brought up-to-date, so that the
|
||||
* drawing knows whether it can be performed, and whether drawing happened with an up-to-date
|
||||
* texture state. */
|
||||
void mark_texture_updated();
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ Session::~Session()
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Make sure path tracer is destroyed before the deviec. This is needed because destruction might
|
||||
/* Make sure path tracer is destroyed before the device. This is needed because destruction might
|
||||
* need to access device for device memory free. */
|
||||
/* TODO(sergey): Convert device to be unique_ptr, and rely on C++ to destruct objects in the
|
||||
* pre-defined order. */
|
||||
|
@ -612,7 +612,7 @@ void Session::collect_statistics(RenderStats *render_stats)
|
|||
}
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* Tile and tile pixels aceess.
|
||||
* Tile and tile pixels access.
|
||||
*/
|
||||
|
||||
bool Session::has_multiple_render_tiles() const
|
||||
|
@ -650,7 +650,7 @@ bool Session::copy_render_tile_from_device()
|
|||
bool Session::get_render_tile_pixels(const string &pass_name, int num_components, float *pixels)
|
||||
{
|
||||
/* NOTE: The code relies on a fact that session is fully update and no scene/buffer modification
|
||||
* is happenning while this function runs. */
|
||||
* is happening while this function runs. */
|
||||
|
||||
const BufferParams &buffer_params = path_trace_->get_render_tile_params();
|
||||
|
||||
|
@ -689,7 +689,7 @@ bool Session::set_render_tile_pixels(const string &pass_name,
|
|||
const float *pixels)
|
||||
{
|
||||
/* NOTE: The code relies on a fact that session is fully update and no scene/buffer modification
|
||||
* is happenning while this function runs. */
|
||||
* is happening while this function runs. */
|
||||
|
||||
const BufferPass *pass = buffer_params_.find_pass(pass_name);
|
||||
if (!pass) {
|
||||
|
|
|
@ -120,7 +120,7 @@ class Session {
|
|||
|
||||
/* Callback is invoked by tile manager whenever on-dist tiles storage file is closed after
|
||||
* writing. Allows an engine integration to keep track of those files without worry about
|
||||
* transfering the information when it needs to re-create session during rendering. */
|
||||
* transferring the information when it needs to re-create session during rendering. */
|
||||
function<void(string_view)> full_buffer_written_cb;
|
||||
|
||||
explicit Session(const SessionParams ¶ms, const SceneParams &scene_params);
|
||||
|
@ -128,7 +128,7 @@ class Session {
|
|||
|
||||
void start();
|
||||
|
||||
/* When quick cancel is requested path tracing is cancelles as soon as possible, without waiting
|
||||
/* When quick cancel is requested path tracing is cancels as soon as possible, without waiting
|
||||
* for the buffer to be uniformly sampled. */
|
||||
void cancel(bool quick = false);
|
||||
|
||||
|
@ -154,7 +154,7 @@ class Session {
|
|||
void collect_statistics(RenderStats *stats);
|
||||
|
||||
/* --------------------------------------------------------------------
|
||||
* Tile and tile pixels aceess.
|
||||
* Tile and tile pixels access.
|
||||
*/
|
||||
|
||||
bool has_multiple_render_tiles() const;
|
||||
|
|
|
@ -282,7 +282,7 @@ static bool buffer_params_from_image_spec_atttributes(BufferParams *buffer_param
|
|||
|
||||
/* Configure image specification for the given buffer parameters and passes.
|
||||
*
|
||||
* Image channels will ber strictly ordered to match content of corresponding buffer, and the
|
||||
* Image channels will be strictly ordered to match content of corresponding buffer, and the
|
||||
* metadata will be set so that the render buffers and passes can be reconstructed from it.
|
||||
*
|
||||
* If the tile size different from (0, 0) the image specification will be configured to use the
|
||||
|
@ -358,7 +358,7 @@ void TileManager::update(const BufferParams ¶ms, const Scene *scene)
|
|||
|
||||
buffer_params_ = params;
|
||||
|
||||
/* TODO(sergey): Proper Error handling, so that if configuration has failed we dont' attempt to
|
||||
/* TODO(sergey): Proper Error handling, so that if configuration has failed we don't attempt to
|
||||
* write to a partially configured file. */
|
||||
configure_image_spec_from_buffer(&write_state_.image_spec, buffer_params_, tile_size_);
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ class TileManager {
|
|||
* The file will be considered final, all handles to it will be closed. */
|
||||
void finish_write_tiles();
|
||||
|
||||
/* Check whether any tile ahs been written to disk. */
|
||||
/* Check whether any tile has been written to disk. */
|
||||
inline bool has_written_tiles() const
|
||||
{
|
||||
return write_state_.num_tiles_written != 0;
|
||||
|
|
|
@ -154,8 +154,8 @@ void InstancesComponent::ensure_geometry_instances()
|
|||
|
||||
/**
|
||||
* With write access to the instances component, the data in the instanced geometry sets can be
|
||||
* changed. This is a function on the component rather than each reference to ensure const
|
||||
* correct-ness for that reason.
|
||||
* changed. This is a function on the component rather than each reference to ensure `const`
|
||||
* correctness for that reason.
|
||||
*/
|
||||
GeometrySet &InstancesComponent::geometry_set_from_reference(const int reference_index)
|
||||
{
|
||||
|
|
|
@ -600,7 +600,7 @@ static bool BKE_gpencil_stroke_extra_points(bGPDstroke *gps,
|
|||
* \param dist: Length of the added section.
|
||||
* \param overshoot_fac: Relative length of the curve which is used to determine the extension.
|
||||
* \param mode: Affect to Start, End or Both extremes (0->Both, 1->Start, 2->End)
|
||||
* \param follow_curvature: True for appproximating curvature of given overshoot.
|
||||
* \param follow_curvature: True for approximating curvature of given overshoot.
|
||||
* \param extra_point_count: When follow_curvature is true, use this amount of extra points
|
||||
*/
|
||||
bool BKE_gpencil_stroke_stretch(bGPDstroke *gps,
|
||||
|
|
|
@ -1118,7 +1118,7 @@ static Mesh *mesh_new_from_mball_object(Object *object)
|
|||
* balls and all evaluated child meta balls (since polygonization is only stored in the mother
|
||||
* ball).
|
||||
*
|
||||
* We create empty mesh so scripters don't run into None objects. */
|
||||
* Create empty mesh so script-authors don't run into None objects. */
|
||||
if (!DEG_is_evaluated_object(object) || object->runtime.curve_cache == nullptr ||
|
||||
BLI_listbase_is_empty(&object->runtime.curve_cache->disp)) {
|
||||
return (Mesh *)BKE_id_new_nomain(ID_ME, ((ID *)object->data)->name + 2);
|
||||
|
|
|
@ -1271,7 +1271,7 @@ void file_params_rename_end(wmWindowManager *wm,
|
|||
/* Ensure smooth-scroll timer is active, even if not needed, because that way rename state is
|
||||
* handled properly. */
|
||||
file_params_invoke_rename_postscroll(wm, win, sfile);
|
||||
/* Also always activate the rename file, even if renaming was cancelled. */
|
||||
/* Also always activate the rename file, even if renaming was canceled. */
|
||||
file_params_renamefile_activate(sfile, params);
|
||||
}
|
||||
|
||||
|
|
|
@ -1757,7 +1757,7 @@ static void draw_seq_strip_thumbnail(View2D *v2d,
|
|||
}
|
||||
/* Store recently rendered frames, so they can be reused when zooming. */
|
||||
else if (!sequencer_thumbnail_v2d_is_navigating(C)) {
|
||||
/* Clear images in frame range occupied bynew thumbnail. */
|
||||
/* Clear images in frame range occupied by new thumbnail. */
|
||||
last_displayed_thumbnails_list_cleanup(
|
||||
last_displayed_thumbnails, thumb_x_start, thumb_x_end);
|
||||
/* Insert new thumbnail frame to list. */
|
||||
|
|
|
@ -140,8 +140,8 @@ static void applyLength(LengthGpencilModifierData *lmd, bGPdata *gpd, bGPDstroke
|
|||
/* HACK: The second #overshoot_fac needs to be adjusted because it is not
|
||||
* done in the same stretch call, because it can have a different length.
|
||||
* The adjustment needs to be stable when
|
||||
* ceil(overshoot_fac*(gps->totpoints - 2)) is used in stretch and never
|
||||
* produce a result highter than totpoints - 2. */
|
||||
* `ceil(overshoot_fac*(gps->totpoints - 2))` is used in stretch and never
|
||||
* produce a result higher than `totpoints - 2`. */
|
||||
const float second_overshoot_fac = lmd->overshoot_fac * (totpoints - 2) /
|
||||
((float)gps->totpoints - 2) *
|
||||
(1.0f - 0.1f / (totpoints - 1.0f));
|
||||
|
|
|
@ -155,7 +155,7 @@ class SampleCurveFunction : public fn::MultiFunction {
|
|||
spline_indices[i] = std::max(index, 0);
|
||||
}
|
||||
|
||||
/* Storing lookups in an array is unecessary but will simplify custom attribute transfer. */
|
||||
/* Storing lookups in an array is unnecessary but will simplify custom attribute transfer. */
|
||||
Array<Spline::LookupResult> lookups(mask.min_array_size());
|
||||
for (const int i : mask) {
|
||||
const float length_in_spline = lengths[i] - spline_lengths_[spline_indices[i]];
|
||||
|
|
|
@ -428,7 +428,7 @@ ID *WM_drag_get_local_ID_or_import_from_asset(const wmDrag *drag, int idcode)
|
|||
}
|
||||
|
||||
/**
|
||||
* \brief Free asset ID imported for cancelled drop.
|
||||
* \brief Free asset ID imported for canceled drop.
|
||||
*
|
||||
* If the asset was imported (linked/appended) using #WM_drag_get_local_ID_or_import_from_asset()`
|
||||
* (typically via a #wmDropBox.copy() callback), we want the ID to be removed again if the drop
|
||||
|
|
Loading…
Reference in New Issue