Cleanup: spelling

This commit is contained in:
Campbell Barton 2020-11-20 11:39:03 +11:00
parent 5f1bb8da96
commit 25266caa45
11 changed files with 20 additions and 18 deletions

View File

@ -582,7 +582,7 @@ ccl_device_forceinline void triangle_light_sample(KernelGlobals *kg,
ccl_device int light_distribution_sample(KernelGlobals *kg, float *randu)
{
/* This is basically std::upper_bound as used by pbrt, to find a point light or
/* This is basically std::upper_bound as used by PBRT, to find a point light or
* triangle to emit from, proportional to area. a good improvement would be to
* also sample proportional to power, though it's not so well defined with
* arbitrary shaders. */

View File

@ -31,7 +31,7 @@ ccl_device float3 background_map_sample(KernelGlobals *kg, float randu, float ra
int res_y = kernel_data.background.map_res_y;
int cdf_width = res_x + 1;
/* this is basically std::lower_bound as used by pbrt */
/* This is basically std::lower_bound as used by PBRT. */
int first = 0;
int count = res_y;
@ -58,7 +58,7 @@ ccl_device float3 background_map_sample(KernelGlobals *kg, float randu, float ra
float dv = inverse_lerp(cdf_v.y, cdf_next_v.y, randv);
float v = (index_v + dv) / res_y;
/* this is basically std::lower_bound as used by pbrt */
/* This is basically std::lower_bound as used by PBRT. */
first = 0;
count = res_x;
while (count > 0) {

View File

@ -171,7 +171,7 @@ void Object::compute_bounds(bool motion_blur)
bounds = BoundBox::empty;
/* todo: this is really terrible. according to pbrt there is a better
/* TODO: this is really terrible. according to PBRT there is a better
* way to find this iteratively, but did not find implementation yet
* or try to implement myself */
for (float t = 0.0f; t < 1.0f; t += (1.0f / 128.0f)) {

View File

@ -239,7 +239,7 @@ void MEM_use_lockfree_allocator(void);
*
* Use for debug purposes. This allocator contains lock section around every allocator call, which
* makes it slow. What is gained with this is the ability to have list of allocated blocks (in an
* addition to the trackign of number of allocations and amount of allocated bytes).
* addition to the tracking of number of allocations and amount of allocated bytes).
*
* NOTE: The switch between allocator types can only happen before any allocation did happen. */
void MEM_use_guarded_allocator(void);
@ -249,7 +249,7 @@ void MEM_use_guarded_allocator(void);
#endif /* __cplusplus */
#ifdef __cplusplus
/* alloc funcs for C++ only */
/* Allocation functions (for C++ only). */
# define MEM_CXX_CLASS_ALLOC_FUNCS(_id) \
public: \
void *operator new(size_t num_bytes) \

View File

@ -99,7 +99,7 @@ void aligned_free(void *ptr)
/* Perform assert checks on allocator type change.
*
* Helps catching issues (in debug build) caused by an unintended allocator type change when there
* are allocation happenned. */
* are allocation happened. */
static void assert_for_allocator_change(void)
{
/* NOTE: Assume that there is no "sticky" internal state which would make switching allocator

View File

@ -777,7 +777,7 @@ static void poly_edge_loop_islands_calc(const MEdge *medge,
if (UNLIKELY(gid_bit > 31)) {
/* All bits used in contiguous smooth groups, we can't do much!
* Note: this is *very* unlikely - theoretically, four groups are enough,
* I don't think we can reach this goal with such a simple algo,
* I don't think we can reach this goal with such a simple algorithm,
* but I don't think either we'll never need all 32 groups!
*/
printf(

View File

@ -5537,7 +5537,7 @@ void vcloud_estimate_transform_v3(const int list_size,
/* build 'projection' matrix */
for (a = 0; a < list_size; a++) {
sub_v3_v3v3(va, rpos[a], accu_rcom);
/* mul_v3_fl(va, bp->mass); mass needs renormalzation here ?? */
/* mul_v3_fl(va, bp->mass); mass needs re-normalization here ?? */
sub_v3_v3v3(vb, pos[a], accu_com);
/* mul_v3_fl(va, rp->mass); */
m[0][0] += va[0] * vb[0];
@ -5571,11 +5571,11 @@ void vcloud_estimate_transform_v3(const int list_size,
stunt[0] = q[0][0];
stunt[1] = q[1][1];
stunt[2] = q[2][2];
/* renormalizing for numeric stability */
/* Re-normalizing for numeric stability. */
mul_m3_fl(q, 1.0f / len_v3(stunt));
/* this is pretty much Polardecompose 'inline' the algo based on Higham's thesis */
/* without the far case ... but seems to work here pretty neat */
/* This is pretty much Polar-decompose 'inline' the algorithm based on Higham's thesis
* without the far case ... but seems to work here pretty neat. */
odet = 0.0f;
ndet = determinant_m3_array(q);
while ((odet - ndet) * (odet - ndet) > eps && i < imax) {

View File

@ -397,7 +397,7 @@ static void cuboid_do(CastModifierData *cmd,
facm = 1.0f - fac;
}
/* The algo used to project the vertices to their
/* The algorithm used to project the vertices to their
* bounding box (bb) is pretty simple:
* for each vertex v:
* 1) find in which octant v is in;

View File

@ -40,7 +40,7 @@ static void node_composit_init_color_spill(bNodeTree *UNUSED(ntree), bNode *node
NodeColorspill *ncs = MEM_callocN(sizeof(NodeColorspill), "node colorspill");
node->storage = ncs;
node->custom1 = 2; /* green channel */
node->custom2 = 0; /* simple limit algo*/
node->custom2 = 0; /* simple limit algorithm */
ncs->limchan = 0; /* limit by red */
ncs->limscale = 1.0f; /* limit scaling factor */
ncs->unspill = 0; /* do not use unspill */

View File

@ -278,7 +278,7 @@ static void seq_split_set_left_offset(Sequence *seq, int timeline_frame)
* \param seq: Sequence to be split
* \param timeline_frame: frame at which seq is split.
* \param method: affects type of offset to be applied to resize Sequence
* \return poitner to created Sequence. This is always Sequence on right side.
* \return The newly created sequence strip. This is always Sequence on right side.
*/
Sequence *SEQ_edit_strip_split(Main *bmain,
Scene *scene,

View File

@ -140,8 +140,10 @@ bool BKE_sequence_base_isolated_sel_check(ListBase *seqbase)
return true;
}
/* use to impose limits when dragging/extending - so impossible situations don't happen
* Cant use the SEQ_LEFTSEL and SEQ_LEFTSEL directly because the strip may be in a metastrip */
/**
* Use to impose limits when dragging/extending - so impossible situations don't happen.
* Cant use the #SEQ_LEFTSEL and #SEQ_LEFTSEL directly because the strip may be in a meta-strip.
*/
void BKE_sequence_tx_handle_xlimits(Sequence *seq, int leftflag, int rightflag)
{
if (leftflag) {
@ -214,7 +216,7 @@ bool BKE_sequence_tx_test(Sequence *seq)
/**
* Return \a true if given \a seq needs a complete cleanup of its cache when it is transformed.
*
* Some (effect) strip types need a complete recache of themselves when they are transformed,
* Some (effect) strip types need a complete re-cache of themselves when they are transformed,
* because they do not 'contain' anything and do not have any explicit relations to other strips.
*/
bool BKE_sequence_tx_fullupdate_test(Sequence *seq)