Merge branch 'master' into refactor-mesh-position-generic

This commit is contained in:
Hans Goudey 2022-09-15 11:46:47 -05:00
commit f60bd397af
54 changed files with 767 additions and 368 deletions

View File

@ -418,6 +418,13 @@ function(blender_add_test_lib
library_deps
)
# Not currently supported for Python module due to different required
# Python link flags.
if(WITH_PYTHON_MODULE)
add_custom_target(${name})
return()
endif()
add_cc_flags_custom_test(${name} PARENT_SCOPE)
# Otherwise external projects will produce warnings that we cannot fix.
@ -464,6 +471,13 @@ function(blender_add_test_executable
library_deps
)
# Not currently supported for Python module due to different required
# Python link flags.
if(WITH_PYTHON_MODULE)
add_custom_target(${name})
return()
endif()
add_cc_flags_custom_test(${name} PARENT_SCOPE)
## Otherwise external projects will produce warnings that we cannot fix.

View File

@ -504,12 +504,16 @@ if(WITH_JACK)
endif()
if(WITH_PYTHON)
set(PYTHON_VERSION 3.10) # CACHE STRING)
# Cache version for make_bpy_wheel.py to detect.
unset(PYTHON_VERSION CACHE)
set(PYTHON_VERSION "3.10" CACHE STRING "Python version")
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
set(PYTHON_LIBRARY ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}.lib)
set(PYTHON_LIBRARY_DEBUG ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}_d.lib)
set(PYTHON_EXECUTABLE ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python$<$<CONFIG:Debug>:_d>.exe)
set(PYTHON_INCLUDE_DIR ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/include)
set(PYTHON_NUMPY_INCLUDE_DIRS ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/lib/site-packages/numpy/core/include)
set(NUMPY_FOUND ON)

View File

@ -3,20 +3,22 @@
# First generate the manifest for tests since it will not need the dependency on the CRT.
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/tests.exe.manifest @ONLY)
# Always detect system libraries, since they are also used by oneAPI.
# But don't always install them, only for WITH_WINDOWS_BUNDLE_CRT=ON.
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
# This sometimes can change when updates are installed and the compiler version
# changes, so test if it exists and if not, give InstallRequiredSystemLibraries
# another chance to figure out the path.
if(MSVC_REDIST_DIR AND NOT EXISTS "${MSVC_REDIST_DIR}")
unset(MSVC_REDIST_DIR CACHE)
endif()
include(InstallRequiredSystemLibraries)
if(WITH_WINDOWS_BUNDLE_CRT)
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
# This sometimes can change when updates are installed and the compiler version
# changes, so test if it exists and if not, give InstallRequiredSystemLibraries
# another chance to figure out the path.
if(MSVC_REDIST_DIR AND NOT EXISTS "${MSVC_REDIST_DIR}")
unset(MSVC_REDIST_DIR CACHE)
endif()
include(InstallRequiredSystemLibraries)
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
# redirects for this dll, for details see T88813.
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})

View File

@ -0,0 +1,167 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import make_utils
import os
import re
import platform
import string
import setuptools
import sys
from typing import (
Generator,
Tuple,
List,
Optional,
Sequence,
)
# ------------------------------------------------------------------------------
# Generic Functions
def find_dominating_file(
path: str,
search: Sequence[str],
) -> str:
while True:
for d in search:
if os.path.exists(os.path.join(path, d)):
return os.path.join(path, d)
path_next = os.path.normpath(os.path.join(path, ".."))
if path == path_next:
break
path = path_next
return ""
# ------------------------------------------------------------------------------
# CMake Cache Access
def cmake_cache_var_iter(filepath_cmake_cache: str) -> Generator[Tuple[str, str, str], None, None]:
import re
re_cache = re.compile(r"([A-Za-z0-9_\-]+)?:?([A-Za-z0-9_\-]+)?=(.*)$")
with open(filepath_cmake_cache, "r", encoding="utf-8") as cache_file:
for l in cache_file:
match = re_cache.match(l.strip())
if match is not None:
var, type_, val = match.groups()
yield (var, type_ or "", val)
def cmake_cache_var(filepath_cmake_cache: str, var: str) -> Optional[str]:
for var_iter, type_iter, value_iter in cmake_cache_var_iter(filepath_cmake_cache):
if var == var_iter:
return value_iter
return None
def cmake_cache_var_or_exit(filepath_cmake_cache: str, var: str) -> str:
value = cmake_cache_var(filepath_cmake_cache, var)
if value is None:
print("Unable to find %r exiting!" % var)
sys.exit(1)
return value
# ------------------------------------------------------------------------------
# Main Function
def main() -> None:
# Parse arguments.
parser = argparse.ArgumentParser(description="Make Python wheel package")
parser.add_argument("install_dir")
parser.add_argument("--build-dir", default=None)
parser.add_argument("--output-dir", default=None)
args = parser.parse_args()
install_dir = os.path.abspath(args.install_dir)
build_dir = os.path.abspath(args.build_dir) if args.build_dir else install_dir
output_dir = os.path.abspath(args.output_dir) if args.output_dir else install_dir
filepath_cmake_cache = find_dominating_file(build_dir, ("CMakeCache.txt",))
if not filepath_cmake_cache:
# Should never fail.
print("Unable to find CMakeCache.txt in or above %r" % (build_dir))
sys.exit(1)
# Get the major and minor Python version.
python_version = cmake_cache_var_or_exit(filepath_cmake_cache, "PYTHON_VERSION")
python_version_number = (
tuple(int("".join(c for c in digit if c in string.digits)) for digit in python_version.split(".")) +
# Support version without a minor version "3" (add zero).
tuple((0, 0, 0))
)
python_version_str = "%d.%d" % python_version_number[:2]
# Get Blender version.
blender_version_str = str(make_utils.parse_blender_version())
# Set platform tag following conventions.
if sys.platform == "darwin":
target = cmake_cache_var_or_exit(filepath_cmake_cache, "CMAKE_OSX_DEPLOYMENT_TARGET").split(".")
machine = cmake_cache_var_or_exit(filepath_cmake_cache, "CMAKE_OSX_ARCHITECTURES")
platform_tag = "macosx_%d_%d_%s" % (int(target[0]), int(target[1]), machine)
elif sys.platform == "win32":
platform_tag = "win_%s" % (platform.machine().lower())
elif sys.platform == "linux":
glibc = os.confstr("CS_GNU_LIBC_VERSION").split()[1].split(".")
platform_tag = "manylinux_%s_%s_%s" % (glibc[0], glibc[1], platform.machine().lower())
else:
print("Unsupported platform %s" % (sys.platform))
sys.exit(1)
os.chdir(install_dir)
# Include all files recursively.
def package_files(root_dir: str) -> List[str]:
paths = []
for path, dirs, files in os.walk(root_dir):
paths += [os.path.join("..", path, f) for f in files]
return paths
# Ensure this wheel is marked platform specific.
class BinaryDistribution(setuptools.dist.Distribution):
def has_ext_modules(foo):
return True
# Build wheel.
sys.argv = [sys.argv[0], "bdist_wheel"]
setuptools.setup(
name="bpy",
version=blender_version_str,
install_requires=["cython", "numpy", "requests", "zstandard"],
python_requires="==%d.%d.*" % (python_version_number[0], python_version_number[1]),
packages=["bpy"],
package_data={"": package_files("bpy")},
distclass=BinaryDistribution,
options={"bdist_wheel": {"plat_name": platform_tag}},
description="Blender as a Python module",
license="GPL-3.0",
author="Blender Foundation",
author_email="bf-committers@blender.org",
url="https://www.blender.org"
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Move wheel to output directory.
dist_dir = os.path.join(install_dir, "dist")
for f in os.listdir(dist_dir):
if f.endswith(".whl"):
# No apparent way to override this ABI version with setuptools, so rename.
sys_py = "cp%d%d" % (sys.version_info.major, sys.version_info.minor)
blender_py = "cp%d%d" % (python_version_number[0], python_version_number[1])
renamed_f = f.replace(sys_py, blender_py)
os.rename(os.path.join(dist_dir, f), os.path.join(output_dir, renamed_f))
if __name__ == "__main__":
main()

View File

@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import dataclasses
import make_utils
import os
import re
import subprocess
@ -50,7 +50,7 @@ def main() -> None:
print(f"Output dir: {curdir}")
version = parse_blender_version(blender_srcdir)
version = make_utils.parse_blender_version()
tarball = tarball_path(curdir, version, cli_args)
manifest = manifest_path(tarball)
packages_dir = packages_path(curdir, cli_args)
@ -62,53 +62,7 @@ def main() -> None:
print("Done!")
@dataclasses.dataclass
class BlenderVersion:
version: int # 293 for 2.93.1
patch: int # 1 for 2.93.1
cycle: str # 'alpha', 'beta', 'release', maybe others.
@property
def is_release(self) -> bool:
return self.cycle == "release"
def __str__(self) -> str:
"""Convert to version string.
>>> str(BlenderVersion(293, 1, "alpha"))
'2.93.1-alpha'
>>> str(BlenderVersion(327, 0, "release"))
'3.27.0'
"""
version_major = self.version // 100
version_minor = self.version % 100
as_string = f"{version_major}.{version_minor}.{self.patch}"
if self.is_release:
return as_string
return f"{as_string}-{self.cycle}"
def parse_blender_version(blender_srcdir: Path) -> BlenderVersion:
version_path = blender_srcdir / "source/blender/blenkernel/BKE_blender_version.h"
version_info = {}
line_re = re.compile(r"^#define (BLENDER_VERSION[A-Z_]*)\s+([0-9a-z]+)$")
with version_path.open(encoding="utf-8") as version_file:
for line in version_file:
match = line_re.match(line.strip())
if not match:
continue
version_info[match.group(1)] = match.group(2)
return BlenderVersion(
int(version_info["BLENDER_VERSION"]),
int(version_info["BLENDER_VERSION_PATCH"]),
version_info["BLENDER_VERSION_CYCLE"],
)
def tarball_path(output_dir: Path, version: BlenderVersion, cli_args: Any) -> Path:
def tarball_path(output_dir: Path, version: make_utils.BlenderVersion, cli_args: Any) -> Path:
extra = ""
if cli_args.include_packages:
extra = "-with-libraries"
@ -148,7 +102,7 @@ def packages_path(current_directory: Path, cli_args: Any) -> Optional[Path]:
def create_manifest(
version: BlenderVersion,
version: make_utils.BlenderVersion,
outpath: Path,
blender_srcdir: Path,
packages_dir: Optional[Path],
@ -170,9 +124,9 @@ def main_files_to_manifest(blender_srcdir: Path, outfile: TextIO) -> None:
def submodules_to_manifest(
blender_srcdir: Path, version: BlenderVersion, outfile: TextIO
blender_srcdir: Path, version: make_utils.BlenderVersion, outfile: TextIO
) -> None:
skip_addon_contrib = version.is_release
skip_addon_contrib = version.is_release()
assert not blender_srcdir.is_absolute()
for line in git_command("-C", blender_srcdir, "submodule"):
@ -200,7 +154,7 @@ def packages_to_manifest(outfile: TextIO, packages_dir: Path) -> None:
def create_tarball(
version: BlenderVersion, tarball: Path, manifest: Path, blender_srcdir: Path, packages_dir: Optional[Path]
version: make_utils.BlenderVersion, tarball: Path, manifest: Path, blender_srcdir: Path, packages_dir: Optional[Path]
) -> None:
print(f'Creating archive: "{tarball}" ...', end="", flush=True)
command = ["tar"]

View File

@ -9,6 +9,7 @@ import re
import shutil
import subprocess
import sys
from pathlib import Path
def call(cmd, exit_on_error=True, silent=False):
@ -101,3 +102,51 @@ def command_missing(command):
return shutil.which(command) is None
else:
return False
class BlenderVersion:
def __init__(self, version, patch, cycle):
# 293 for 2.93.1
self.version = version
# 1 for 2.93.1
self.patch = patch
# 'alpha', 'beta', 'release', maybe others.
self.cycle = cycle
def is_release(self) -> bool:
return self.cycle == "release"
def __str__(self) -> str:
"""Convert to version string.
>>> str(BlenderVersion(293, 1, "alpha"))
'2.93.1-alpha'
>>> str(BlenderVersion(327, 0, "release"))
'3.27.0'
"""
version_major = self.version // 100
version_minor = self.version % 100
as_string = f"{version_major}.{version_minor}.{self.patch}"
if self.is_release():
return as_string
return f"{as_string}-{self.cycle}"
def parse_blender_version() -> BlenderVersion:
blender_srcdir = Path(__file__).absolute().parent.parent.parent
version_path = blender_srcdir / "source/blender/blenkernel/BKE_blender_version.h"
version_info = {}
line_re = re.compile(r"^#define (BLENDER_VERSION[A-Z_]*)\s+([0-9a-z]+)$")
with version_path.open(encoding="utf-8") as version_file:
for line in version_file:
match = line_re.match(line.strip())
if not match:
continue
version_info[match.group(1)] = match.group(2)
return BlenderVersion(
int(version_info["BLENDER_VERSION"]),
int(version_info["BLENDER_VERSION_PATCH"]),
version_info["BLENDER_VERSION_CYCLE"],
)

View File

@ -1,7 +1,9 @@
:tocdepth: 2
Blender API Change Log
**********************
Change Log
**********
Changes in Blender's Python API between releases.
.. note, this document is auto generated by sphinx_changelog_gen.py

View File

@ -1,6 +1,6 @@
*******************
Reference API Usage
API Reference Usage
*******************
Blender has many interlinking data types which have an auto-generated reference API which often has the information

View File

@ -1,8 +1,8 @@
.. _info_overview:
*******************
Python API Overview
*******************
************
API Overview
************
The purpose of this document is to explain how Python and Blender fit together,
covering some of the functionality that may not be obvious from reading the API references

View File

@ -349,8 +349,10 @@ def api_changelog(args):
fw(""
":tocdepth: 2\n"
"\n"
"Blender API Change Log\n"
"**********************\n"
"Change Log\n"
"**********\n"
"\n"
"Changes in Blender's Python API between releases.\n"
"\n"
".. note, this document is auto generated by sphinx_changelog_gen.py\n"
"\n"

View File

@ -387,24 +387,25 @@ EXAMPLE_SET_USED = set()
# RST files directory.
RST_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, "rst"))
# extra info, not api reference docs
# stored in ./rst/info_*
# Extra info, not api reference docs stored in `./rst/info_*`.
# Pairs of (file, description), the title makes from the RST files are displayed before the description.
INFO_DOCS = (
("info_quickstart.rst",
"Quickstart: New to Blender or scripting and want to get your feet wet?"),
"New to Blender or scripting and want to get your feet wet?"),
("info_overview.rst",
"API Overview: A more complete explanation of Python integration"),
"A more complete explanation of Python integration."),
("info_api_reference.rst",
"API Reference Usage: examples of how to use the API reference docs"),
"Examples of how to use the API reference docs."),
("info_best_practice.rst",
"Best Practice: Conventions to follow for writing good scripts"),
"Conventions to follow for writing good scripts."),
("info_tips_and_tricks.rst",
"Tips and Tricks: Hints to help you while writing scripts for Blender"),
"Hints to help you while writing scripts for Blender."),
("info_gotcha.rst",
"Gotcha's: Some of the problems you may encounter when writing scripts"),
"Some of the problems you may encounter when writing scripts."),
("info_advanced.rst",
"Advanced use (topics which may not be required for typical usage)"),
("change_log.rst", "Change Log: List of changes since last Blender release"),
"Topics which may not be required for typical usage."),
("change_log.rst",
"List of changes since last Blender release"),
)
# Referenced indirectly.
INFO_DOCS_OTHER = (
@ -412,6 +413,10 @@ INFO_DOCS_OTHER = (
"info_advanced_blender_as_bpy.rst",
)
# Hide the actual TOC, use a separate list that links to the items.
# This is done so a short description can be included with each link.
USE_INFO_DOCS_FANCY_INDEX = True
# only support for properties atm.
RNA_BLACKLIST = {
# XXX messes up PDF!, really a bug but for now just workaround.
@ -1911,7 +1916,7 @@ except ModuleNotFoundError:
# fw(" 'collapse_navigation': True,\n")
fw(" 'sticky_navigation': False,\n")
fw(" 'navigation_depth': 1,\n")
# fw(" 'includehidden': True,\n")
fw(" 'includehidden': False,\n")
# fw(" 'titles_only': False\n")
fw(" }\n\n")
@ -1983,12 +1988,21 @@ def write_rst_index(basepath):
if not EXCLUDE_INFO_DOCS:
fw(".. toctree::\n")
if USE_INFO_DOCS_FANCY_INDEX:
fw(" :hidden:\n")
fw(" :maxdepth: 1\n")
fw(" :caption: Documentation\n\n")
for info, info_desc in INFO_DOCS:
fw(" %s <%s>\n" % (info_desc, info))
fw(" %s\n" % info)
fw("\n")
if USE_INFO_DOCS_FANCY_INDEX:
# Show a fake TOC, allowing for an extra description to be shown as well as the title.
fw(title_string("Documentation", "="))
for info, info_desc in INFO_DOCS:
fw("- :doc:`%s`: %s\n" % (info.removesuffix(".rst"), info_desc))
fw("\n")
fw(".. toctree::\n")
fw(" :maxdepth: 1\n")
fw(" :caption: Application Modules\n\n")

View File

@ -30,8 +30,8 @@ ccl_device_forceinline void film_write_denoising_features_surface(KernelGlobals
if (kernel_data.film.pass_denoising_depth != PASS_UNUSED) {
const Spectrum denoising_feature_throughput = INTEGRATOR_STATE(
state, path, denoising_feature_throughput);
const float denoising_depth = ensure_finite(average(denoising_feature_throughput) *
sd->ray_length);
const float depth = sd->ray_length - INTEGRATOR_STATE(state, ray, tmin);
const float denoising_depth = ensure_finite(average(denoising_feature_throughput) * depth);
film_write_pass_float(buffer + kernel_data.film.pass_denoising_depth, denoising_depth);
}

View File

@ -402,7 +402,7 @@ class IMAGE_MT_uvs(Menu):
layout.menu("IMAGE_MT_uvs_mirror")
layout.menu("IMAGE_MT_uvs_snap")
layout.prop_menu_enum(uv, "pixel_snap_mode")
layout.prop_menu_enum(uv, "pixel_round_mode")
layout.prop(uv, "lock_bounds")
layout.separator()

View File

@ -15,9 +15,9 @@
namespace blender::bke::curves {
/* --------------------------------------------------------------------
* Utility structs.
*/
/* -------------------------------------------------------------------- */
/** \name Utility Structs
* \{ */
/**
* Reference to a piecewise segment on a spline curve.
@ -37,7 +37,7 @@ struct CurveSegment {
/**
* Reference to a point on a piecewise curve (spline).
*
* Tracks indices of the neighbouring control/evaluated point pair associated with the segment
* Tracks indices of the neighboring control/evaluated point pair associated with the segment
* in which the point resides. Referenced point within the segment is defined by a
* normalized parameter in the range [0, 1].
*/
@ -75,7 +75,7 @@ class IndexRangeCyclic {
int64_t start_ = 0;
int64_t end_ = 0;
/* Index for the start and end of the entire iterable range which contains the iterated range
* (e.g. the point range for an indiviudal spline/curve within the entire Curves point domain).
* (e.g. the point range for an individual spline/curve within the entire Curves point domain).
*/
int64_t range_start_ = 0;
int64_t range_end_ = 0;
@ -302,9 +302,9 @@ class IndexRangeCyclic {
/** \} */
/* --------------------------------------------------------------------
* Utility functions.
*/
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
/**
* Copy the provided point attribute values between all curves in the #curve_ranges index

View File

@ -25,6 +25,9 @@ extern const char *POINTCLOUD_ATTR_RADIUS;
void *BKE_pointcloud_add(struct Main *bmain, const char *name);
void *BKE_pointcloud_add_default(struct Main *bmain, const char *name);
struct PointCloud *BKE_pointcloud_new_nomain(int totpoint);
void BKE_pointcloud_nomain_to_pointcloud(struct PointCloud *pointcloud_src,
struct PointCloud *pointcloud_dst,
bool take_ownership);
struct BoundBox *BKE_pointcloud_boundbox_get(struct Object *ob);
bool BKE_pointcloud_minmax(const struct PointCloud *pointcloud, float r_min[3], float r_max[3]);

View File

@ -252,6 +252,37 @@ PointCloud *BKE_pointcloud_new_nomain(const int totpoint)
return pointcloud;
}
void BKE_pointcloud_nomain_to_pointcloud(PointCloud *pointcloud_src,
PointCloud *pointcloud_dst,
bool take_ownership)
{
BLI_assert(pointcloud_src->id.tag & LIB_TAG_NO_MAIN);
eCDAllocType alloctype = CD_DUPLICATE;
if (take_ownership) {
bool has_any_referenced_layers = CustomData_has_referenced(&pointcloud_src->pdata);
if (!has_any_referenced_layers) {
alloctype = CD_ASSIGN;
}
}
CustomData_free(&pointcloud_dst->pdata, pointcloud_dst->totpoint);
const int totpoint = pointcloud_dst->totpoint = pointcloud_src->totpoint;
CustomData_copy(
&pointcloud_src->pdata, &pointcloud_dst->pdata, CD_MASK_ALL, alloctype, totpoint);
if (take_ownership) {
if (alloctype == CD_ASSIGN) {
/* Free the CustomData but keep the layers. */
CustomData_free_typemask(&pointcloud_src->pdata, pointcloud_src->totpoint, 0);
}
BKE_id_free(nullptr, pointcloud_src);
}
}
static std::optional<blender::bounds::MinMaxResult<float3>> point_cloud_bounds(
const PointCloud &pointcloud)
{

View File

@ -10,14 +10,14 @@
namespace blender::array_utils {
/**
* Fill the destination span by copying masked values from the src array. Threaded based on
* grainsize.
* Fill the destination span by copying masked values from the `src` array. Threaded based on
* grain-size.
*/
void copy(const GVArray &src, IndexMask selection, GMutableSpan dst, int64_t grain_size = 4096);
/**
* Fill the destination span by copying values from the src array. Threaded based on
* grainsize.
* Fill the destination span by copying values from the `src` array. Threaded based on
* grain-size.
*/
template<typename T>
inline void copy(const Span<T> src,

View File

@ -589,7 +589,7 @@ extern "C" {
/** Performs `offsetof(typeof(data), member) + sizeof((data)->member)` for non-gcc compilers. */
#define OFFSETOF_STRUCT_AFTER(_struct, _member) \
((((const char *)&((_struct)->_member)) - ((const char *)(_struct))) + \
((size_t)(((const char *)&((_struct)->_member)) - ((const char *)(_struct))) + \
sizeof((_struct)->_member))
/**

View File

@ -11,11 +11,13 @@
namespace blender::realtime_compositor {
/* -------------------------------------------------------------------------------------------------
* Conversion Operation
/* -------------------------------------------------------------------- */
/** \name Conversion Operation
*
* A simple operation that converts a result from a certain type to another. See the derived
* classes for more details. */
* classes for more details.
* \{ */
class ConversionOperation : public SimpleOperation {
public:
using SimpleOperation::SimpleOperation;
@ -37,13 +39,18 @@ class ConversionOperation : public SimpleOperation {
/* Get the shader the will be used for conversion. */
virtual GPUShader *get_conversion_shader() const = 0;
};
/* -------------------------------------------------------------------------------------------------
* Convert Float To Vector Operation
/** \} */
}; // namespace blender::realtime_compositorclassConversionOperation:publicSimpleOperation
/* -------------------------------------------------------------------- */
/** \name Convert Float to Vector Operation
*
* Takes a float result and outputs a vector result. All three components of the output are filled
* with the input float. */
* with the input float.
* \{ */
class ConvertFloatToVectorOperation : public ConversionOperation {
public:
ConvertFloatToVectorOperation(Context &context);
@ -53,11 +60,15 @@ class ConvertFloatToVectorOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/* -------------------------------------------------------------------------------------------------
* Convert Float To Color Operation
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Float to Color Operation
*
* Takes a float result and outputs a color result. All three color channels of the output are
* filled with the input float and the alpha channel is set to 1. */
* filled with the input float and the alpha channel is set to 1.
* \{ */
class ConvertFloatToColorOperation : public ConversionOperation {
public:
ConvertFloatToColorOperation(Context &context);
@ -67,11 +78,15 @@ class ConvertFloatToColorOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/* -------------------------------------------------------------------------------------------------
* Convert Color To Float Operation
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Color to Float Operation
*
* Takes a color result and outputs a float result. The output is the average of the three color
* channels, the alpha channel is ignored. */
* channels, the alpha channel is ignored.
* \{ */
class ConvertColorToFloatOperation : public ConversionOperation {
public:
ConvertColorToFloatOperation(Context &context);
@ -81,11 +96,15 @@ class ConvertColorToFloatOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/* -------------------------------------------------------------------------------------------------
* Convert Color To Vector Operation
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Color to Vector Operation
*
* Takes a color result and outputs a vector result. The output is a copy of the three color
* channels to the three vector components. */
* channels to the three vector components.
* \{ */
class ConvertColorToVectorOperation : public ConversionOperation {
public:
ConvertColorToVectorOperation(Context &context);
@ -95,11 +114,18 @@ class ConvertColorToVectorOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/* -------------------------------------------------------------------------------------------------
* Convert Vector To Float Operation
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Vector to Float Operation
*
* Takes a vector result and outputs a float result. The output is the average of the three
* components. */
* components.
* \{ */
/*
*
* */
class ConvertVectorToFloatOperation : public ConversionOperation {
public:
ConvertVectorToFloatOperation(Context &context);
@ -109,11 +135,15 @@ class ConvertVectorToFloatOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/* -------------------------------------------------------------------------------------------------
* Convert Vector To Color Operation
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Vector to Color Operation
*
* Takes a vector result and outputs a color result. The output is a copy of the three vector
* components to the three color channels with the alpha channel set to 1. */
* components to the three color channels with the alpha channel set to 1.
* \{ */
class ConvertVectorToColorOperation : public ConversionOperation {
public:
ConvertVectorToColorOperation(Context &context);
@ -123,4 +153,6 @@ class ConvertVectorToColorOperation : public ConversionOperation {
GPUShader *get_conversion_shader() const override;
};
/** \} */
} // namespace blender::realtime_compositor

View File

@ -17,7 +17,7 @@ namespace blender::realtime_compositor {
using namespace nodes::derived_node_tree_types;
/**
Get the origin socket of the given node input. If the input is not linked, the socket itself is
* Get the origin socket of the given node input. If the input is not linked, the socket itself is
* returned. If the input is linked, the socket that is linked to it is returned, which could
* either be an input or an output. An input socket is returned when the given input is connected
* to an unlinked input of a group input node.

View File

@ -12,9 +12,9 @@
namespace blender::realtime_compositor {
/* -------------------------------------------------------------------------------------------------
* Conversion Operation.
*/
/* -------------------------------------------------------------------- */
/** \name Conversion Operation
* \{ */
void ConversionOperation::execute()
{
@ -79,9 +79,11 @@ SimpleOperation *ConversionOperation::construct_if_needed(Context &context,
return nullptr;
}
/* -------------------------------------------------------------------------------------------------
* Convert Float To Vector Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Float to Vector Operation
* \{ */
ConvertFloatToVectorOperation::ConvertFloatToVectorOperation(Context &context)
: ConversionOperation(context)
@ -102,9 +104,11 @@ GPUShader *ConvertFloatToVectorOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_float_to_vector");
}
/* -------------------------------------------------------------------------------------------------
* Convert Float To Color Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Float to Color Operation
* \{ */
ConvertFloatToColorOperation::ConvertFloatToColorOperation(Context &context)
: ConversionOperation(context)
@ -127,9 +131,11 @@ GPUShader *ConvertFloatToColorOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_float_to_color");
}
/* -------------------------------------------------------------------------------------------------
* Convert Color To Float Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Color to Float Operation
* \{ */
ConvertColorToFloatOperation::ConvertColorToFloatOperation(Context &context)
: ConversionOperation(context)
@ -151,9 +157,11 @@ GPUShader *ConvertColorToFloatOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_color_to_float");
}
/* -------------------------------------------------------------------------------------------------
* Convert Color To Vector Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Color to Vector Operation
* \{ */
ConvertColorToVectorOperation::ConvertColorToVectorOperation(Context &context)
: ConversionOperation(context)
@ -175,9 +183,11 @@ GPUShader *ConvertColorToVectorOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_color_to_vector");
}
/* -------------------------------------------------------------------------------------------------
* Convert Vector To Float Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Vector to Float Operation
* \{ */
ConvertVectorToFloatOperation::ConvertVectorToFloatOperation(Context &context)
: ConversionOperation(context)
@ -199,9 +209,11 @@ GPUShader *ConvertVectorToFloatOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_vector_to_float");
}
/* -------------------------------------------------------------------------------------------------
* Convert Vector To Color Operation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert Vector to Color Operation
* \{ */
ConvertVectorToColorOperation::ConvertVectorToColorOperation(Context &context)
: ConversionOperation(context)
@ -222,4 +234,6 @@ GPUShader *ConvertVectorToColorOperation::get_conversion_shader() const
return shader_manager().get("compositor_convert_vector_to_color");
}
/** \} */
} // namespace blender::realtime_compositor

View File

@ -13,9 +13,9 @@
namespace blender::realtime_compositor {
/* --------------------------------------------------------------------
* Texture Pool Key.
*/
/* -------------------------------------------------------------------- */
/** \name Texture Pool Key
* \{ */
TexturePoolKey::TexturePoolKey(int2 size, eGPUTextureFormat format) : size(size), format(format)
{
@ -37,9 +37,11 @@ bool operator==(const TexturePoolKey &a, const TexturePoolKey &b)
return a.size == b.size && a.format == b.format;
}
/* --------------------------------------------------------------------
* Texture Pool.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Texture Pool
* \{ */
GPUTexture *TexturePool::acquire(int2 size, eGPUTextureFormat format)
{
@ -81,4 +83,6 @@ void TexturePool::reset()
textures_.clear();
}
/** \} */
} // namespace blender::realtime_compositor

View File

@ -377,9 +377,9 @@ void Instance::update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view
}
}
/* NOTE: Name channels lowercase rgba so that compression rules check in OpenEXR DWA code uses
* loseless compression. Reportedly this naming is the only one which works good from the
* interoperability point of view. Using xyzw naming is not portable. */
/* NOTE: Name channels lowercase `rgba` so that compression rules check in OpenEXR DWA code uses
* lossless compression. Reportedly this naming is the only one which works good from the
* interoperability point of view. Using `xyzw` naming is not portable. */
auto register_cryptomatte_passes = [&](eViewLayerCryptomatteFlags cryptomatte_layer,
eViewLayerEEVEEPassType eevee_pass) {
if (view_layer->cryptomatte_flag & cryptomatte_layer) {

View File

@ -33,6 +33,8 @@
#include "BKE_report.h"
#include "BKE_vfont.h"
#include "BLT_translation.h"
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_query.h"
@ -1964,6 +1966,8 @@ static int set_case_exec(bContext *C, wmOperator *op)
void FONT_OT_case_set(wmOperatorType *ot)
{
PropertyRNA *prop;
/* identifiers */
ot->name = "Set Case";
ot->description = "Set font case";
@ -1977,7 +1981,8 @@ void FONT_OT_case_set(wmOperatorType *ot)
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
/* properties */
RNA_def_enum(ot->srna, "case", case_items, CASE_LOWER, "Case", "Lower or upper case");
prop = RNA_def_enum(ot->srna, "case", case_items, CASE_LOWER, "Case", "Lower or upper case");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_TEXT);
}
/** \} */

View File

@ -3986,6 +3986,7 @@ static int gpencil_strokes_reproject_exec(bContext *C, wmOperator *op)
void GPENCIL_OT_reproject(wmOperatorType *ot)
{
PropertyRNA *prop;
static const EnumPropertyItem reproject_type[] = {
{GP_REPROJECT_FRONT, "FRONT", 0, "Front", "Reproject the strokes using the X-Z plane"},
{GP_REPROJECT_SIDE, "SIDE", 0, "Side", "Reproject the strokes using the Y-Z plane"},
@ -4031,12 +4032,13 @@ void GPENCIL_OT_reproject(wmOperatorType *ot)
ot->prop = RNA_def_enum(
ot->srna, "type", reproject_type, GP_REPROJECT_VIEW, "Projection Type", "");
RNA_def_boolean(
prop = RNA_def_boolean(
ot->srna,
"keep_original",
0,
"Keep Original",
"Keep original strokes and create a copy before reprojecting instead of reproject them");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_MOVIECLIP);
}
static int gpencil_recalc_geometry_exec(bContext *C, wmOperator *UNUSED(op))

View File

@ -71,6 +71,9 @@ typedef struct TraceJob {
int32_t thickness;
int32_t turnpolicy;
int32_t mode;
/** Frame to render to be used by python API. Not exposed in UI.
* This feature is only used in Studios to run custom video trace for selected frames. */
int32_t frame_num;
bool success;
bool was_canceled;
@ -212,7 +215,10 @@ static void trace_start_job(void *customdata, short *stop, short *do_update, flo
(trace_job->mode == GPENCIL_TRACE_MODE_SINGLE)) {
void *lock;
ImageUser *iuser = trace_job->ob_active->iuser;
iuser->framenr = init_frame;
iuser->framenr = ((trace_job->frame_num == 0) || (trace_job->frame_num > iuser->frames)) ?
init_frame :
trace_job->frame_num;
ImBuf *ibuf = BKE_image_acquire_ibuf(trace_job->image, iuser, &lock);
if (ibuf) {
/* Create frame. */
@ -325,13 +331,14 @@ static int gpencil_trace_image_exec(bContext *C, wmOperator *op)
job->thickness = RNA_int_get(op->ptr, "thickness");
job->turnpolicy = RNA_enum_get(op->ptr, "turnpolicy");
job->mode = RNA_enum_get(op->ptr, "mode");
job->frame_num = RNA_int_get(op->ptr, "frame_number");
trace_initialize_job_data(job);
/* Back to active base. */
ED_object_base_activate(job->C, job->base_active);
if (job->image->source == IMA_SRC_FILE) {
if ((job->image->source == IMA_SRC_FILE) || (job->frame_num > 0)) {
short stop = 0, do_update = true;
float progress;
trace_start_job(job, &stop, &do_update, &progress);
@ -365,6 +372,8 @@ static int gpencil_trace_image_invoke(bContext *C, wmOperator *op, const wmEvent
void GPENCIL_OT_trace_image(wmOperatorType *ot)
{
PropertyRNA *prop;
static const EnumPropertyItem turnpolicy_type[] = {
{POTRACE_TURNPOLICY_BLACK,
"BLACK",
@ -476,4 +485,15 @@ void GPENCIL_OT_trace_image(wmOperatorType *ot)
true,
"Start At Current Frame",
"Trace Image starting in current image frame");
prop = RNA_def_int(
ot->srna,
"frame_number",
0,
0,
9999,
"Trace Frame",
"Used to trace only one frame of the image sequence, set to zero to trace all",
0,
9999);
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}

View File

@ -2100,12 +2100,22 @@ void UI_view2d_text_cache_draw(ARegion *region)
col_pack_prev = v2s->col.pack;
}
BLF_enable(font_id, BLF_CLIPPING);
BLF_clipping(
font_id, v2s->rect.xmin - 4, v2s->rect.ymin - 4, v2s->rect.xmax + 4, v2s->rect.ymax + 4);
BLF_draw_default(
v2s->rect.xmin + xofs, v2s->rect.ymin + yofs, 0.0f, v2s->str, BLF_DRAW_STR_DUMMY_MAX);
BLF_disable(font_id, BLF_CLIPPING);
/* Don't use clipping if `v2s->rect` is not set. */
if (BLI_rcti_size_x(&v2s->rect) == 0 && BLI_rcti_size_y(&v2s->rect) == 0) {
BLF_draw_default((float)(v2s->mval[0] + xofs),
(float)(v2s->mval[1] + yofs),
0.0,
v2s->str,
BLF_DRAW_STR_DUMMY_MAX);
}
else {
BLF_enable(font_id, BLF_CLIPPING);
BLF_clipping(
font_id, v2s->rect.xmin - 4, v2s->rect.ymin - 4, v2s->rect.xmax + 4, v2s->rect.ymax + 4);
BLF_draw_default(
v2s->rect.xmin + xofs, v2s->rect.ymin + yofs, 0.0f, v2s->str, BLF_DRAW_STR_DUMMY_MAX);
BLF_disable(font_id, BLF_CLIPPING);
}
}
g_v2d_strings = nullptr;

View File

@ -4893,12 +4893,13 @@ void MESH_OT_knife_tool(wmOperatorType *ot)
KNF_MEASUREMENT_NONE,
"Measurements",
"Visible distance and angle measurements");
RNA_def_enum(ot->srna,
prop = RNA_def_enum(ot->srna,
"angle_snapping",
angle_snapping_items,
KNF_CONSTRAIN_ANGLE_MODE_NONE,
"Angle Snapping",
"Angle snapping mode");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_MESH);
prop = RNA_def_float(ot->srna,
"angle_snapping_increment",

View File

@ -3510,11 +3510,12 @@ void OBJECT_OT_convert(wmOperatorType *ot)
/* properties */
ot->prop = RNA_def_enum(
ot->srna, "target", convert_target_items, OB_MESH, "Target", "Type of object to convert to");
RNA_def_boolean(ot->srna,
"keep_original",
false,
"Keep Original",
"Keep original objects instead of replacing them");
prop = RNA_def_boolean(ot->srna,
"keep_original",
false,
"Keep Original",
"Keep original objects instead of replacing them");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_OBJECT);
RNA_def_boolean(
ot->srna,

View File

@ -497,7 +497,7 @@ void SCULPT_vertex_face_set_set(SculptSession *ss, PBVHVertRef vertex, int face_
for (int j = 0; j < ss->pmap[vertex.i].count; j++) {
const int poly_index = vert_map->indices[j];
if (ss->hide_poly && ss->hide_poly[poly_index]) {
/* Skip hidden faces conntected to the vertex. */
/* Skip hidden faces connected to the vertex. */
continue;
}
ss->face_sets[poly_index] = face_set;

View File

@ -1535,6 +1535,7 @@ void CLIP_OT_average_tracks(wmOperatorType *ot)
PropertyRNA *prop;
prop = RNA_def_boolean(ot->srna, "keep_original", 1, "Keep Original", "Keep original tracks");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_MOVIECLIP);
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}

View File

@ -22,6 +22,7 @@
#include "BLI_range.h"
#include "BLI_utildefines.h"
#include "BKE_action.h"
#include "BKE_context.h"
#include "BKE_fcurve.h"
#include "BKE_nla.h"
@ -856,8 +857,9 @@ void draw_nla_main_data(bAnimContext *ac, SpaceNla *snla, ARegion *region)
immVertexFormat(), "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
immBindBuiltinProgram(GPU_SHADER_3D_UNIFORM_COLOR);
/* just draw a semi-shaded rect spanning the width of the viewable area if there's data,
* and a second darker rect within which we draw keyframe indicator dots if there's data
/* just draw a semi-shaded rect spanning the width of the viewable area, based on if
* there's data and the action's extrapolation mode. Draw a second darker rect within
* which we draw keyframe indicator dots if there's data.
*/
GPU_blend(GPU_BLEND_ALPHA);
@ -869,8 +871,26 @@ void draw_nla_main_data(bAnimContext *ac, SpaceNla *snla, ARegion *region)
/* draw slightly shifted up for greater separation from standard channels,
* but also slightly shorter for some more contrast when viewing the strips
*/
immRectf(
pos, v2d->cur.xmin, ymin + NLACHANNEL_SKIP, v2d->cur.xmax, ymax - NLACHANNEL_SKIP);
switch (adt->act_extendmode) {
case NLASTRIP_EXTEND_HOLD: {
immRectf(pos,
v2d->cur.xmin,
ymin + NLACHANNEL_SKIP,
v2d->cur.xmax,
ymax - NLACHANNEL_SKIP);
break;
}
case NLASTRIP_EXTEND_HOLD_FORWARD: {
float r_start;
float r_end;
BKE_action_get_frame_range(ale->data, &r_start, &r_end);
immRectf(pos, r_end, ymin + NLACHANNEL_SKIP, v2d->cur.xmax, ymax - NLACHANNEL_SKIP);
break;
}
case NLASTRIP_EXTEND_NOTHING:
break;
}
immUnbindProgram();

View File

@ -399,7 +399,7 @@ static void createTransUVs(bContext *C, TransInfo *t)
static void flushTransUVs(TransInfo *t)
{
SpaceImage *sima = t->area->spacedata.first;
const bool use_pixel_snap = ((sima->pixel_snap_mode != SI_PIXEL_SNAP_DISABLED) &&
const bool use_pixel_round = ((sima->pixel_round_mode != SI_PIXEL_ROUND_DISABLED) &&
(t->state != TRANS_CANCEL));
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
@ -410,7 +410,7 @@ static void flushTransUVs(TransInfo *t)
aspect_inv[0] = 1.0f / t->aspect[0];
aspect_inv[1] = 1.0f / t->aspect[1];
if (use_pixel_snap) {
if (use_pixel_round) {
int size_i[2];
ED_space_image_get_size(sima, &size_i[0], &size_i[1]);
size[0] = size_i[0];
@ -422,16 +422,16 @@ static void flushTransUVs(TransInfo *t)
td->loc2d[0] = td->loc[0] * aspect_inv[0];
td->loc2d[1] = td->loc[1] * aspect_inv[1];
if (use_pixel_snap) {
if (use_pixel_round) {
td->loc2d[0] *= size[0];
td->loc2d[1] *= size[1];
switch (sima->pixel_snap_mode) {
case SI_PIXEL_SNAP_CENTER:
switch (sima->pixel_round_mode) {
case SI_PIXEL_ROUND_CENTER:
td->loc2d[0] = roundf(td->loc2d[0] - 0.5f) + 0.5f;
td->loc2d[1] = roundf(td->loc2d[1] - 0.5f) + 0.5f;
break;
case SI_PIXEL_SNAP_CORNER:
case SI_PIXEL_ROUND_CORNER:
td->loc2d[0] = roundf(td->loc2d[0]);
td->loc2d[1] = roundf(td->loc2d[1]);
break;

View File

@ -67,7 +67,7 @@ inline void execute_lazy_function_eagerly_impl(
(
[&]() {
constexpr size_t I = InIndices;
using T = Inputs;
typedef Inputs T;
const CPPType &type = CPPType::get<T>();
input_pointers[I] = {type, &std::get<I>(inputs)};
}(),
@ -75,7 +75,7 @@ inline void execute_lazy_function_eagerly_impl(
(
[&]() {
constexpr size_t I = OutIndices;
using T = Outputs;
typedef Outputs T;
const CPPType &type = CPPType::get<T>();
output_pointers[I] = {type, std::get<I>(outputs)};
}(),

View File

@ -16,9 +16,9 @@
namespace blender::fn {
/* --------------------------------------------------------------------
* Field Evaluation.
*/
/* -------------------------------------------------------------------- */
/** \name Field Evaluation
* \{ */
struct FieldTreeInfo {
/**
@ -571,16 +571,20 @@ bool IndexFieldInput::is_equal_to(const fn::FieldNode &other) const
return dynamic_cast<const IndexFieldInput *>(&other) != nullptr;
}
/* --------------------------------------------------------------------
* FieldNode.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Fieldnode
* \{ */
/* Avoid generating the destructor in every translation unit. */
FieldNode::~FieldNode() = default;
/* --------------------------------------------------------------------
* FieldOperation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Fieldoperation
* \{ */
FieldOperation::FieldOperation(std::shared_ptr<const MultiFunction> function,
Vector<GField> inputs)
@ -653,9 +657,11 @@ FieldOperation::FieldOperation(const MultiFunction &function, Vector<GField> inp
field_inputs_ = combine_field_inputs(inputs_);
}
/* --------------------------------------------------------------------
* FieldInput.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Fieldinput
* \{ */
FieldInput::FieldInput(const CPPType &type, std::string debug_name)
: FieldNode(FieldNodeType::Input), type_(&type), debug_name_(std::move(debug_name))
@ -669,9 +675,11 @@ FieldInput::FieldInput(const CPPType &type, std::string debug_name)
/* Avoid generating the destructor in every translation unit. */
FieldInput::~FieldInput() = default;
/* --------------------------------------------------------------------
* FieldConstant.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Fieldconstant
* \{ */
FieldConstant::FieldConstant(const CPPType &type, const void *value)
: FieldNode(FieldNodeType::Constant), type_(type)
@ -703,9 +711,11 @@ GPointer FieldConstant::value() const
return {type_, value_};
}
/* --------------------------------------------------------------------
* FieldEvaluator.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Fieldevaluator
* \{ */
static IndexMask index_mask_from_selection(const IndexMask full_mask,
const VArray<bool> &selection,
@ -800,4 +810,6 @@ IndexMask FieldEvaluator::get_evaluated_selection_as_mask()
return selection_mask_;
}
/** \} */
} // namespace blender::fn

View File

@ -41,9 +41,9 @@ typedef enum CurveTypeMask {
* Create a cyclical iterator for all control points within the interval [start_point, end_point]
* including any control point at the start or end point.
*
* \param start_point Point on the curve that define the starting point of the interval.
* \param end_point Point on the curve that define the end point of the interval (included).
* \param points IndexRange for the curve points.
* \param start_point: Point on the curve that define the starting point of the interval.
* \param end_point: Point on the curve that define the end point of the interval (included).
* \param points: #IndexRange for the curve points.
*/
static bke::curves::IndexRangeCyclic get_range_between_endpoints(
const bke::curves::CurvePoint start_point,
@ -81,8 +81,8 @@ static bke::curves::IndexRangeCyclic get_range_between_endpoints(
* constant for all curve segments and evaluated curve points are uniformly spaced between the
* segment endpoints in relation to the curve parameter.
*
* \param lengths: Accumulated lenght for the evaluated curve.
* \param sample_length: Distance along the curve to determine the CurvePoint for.
* \param lengths: Accumulated length for the evaluated curve.
* \param sample_length: Distance along the curve to determine the #CurvePoint for.
* \param cyclic: If curve is cyclic.
* \param resolution: Curve resolution (number of evaluated points per segment).
* \param num_curve_points: Total number of control points in the curve.
@ -185,7 +185,7 @@ Array<bke::curves::CurvePoint, 12> lookup_curve_points(const bke::CurvesGeometry
const VArray<int> resolution = curves.resolution();
const VArray<int8_t> curve_types = curves.curve_types();
/* Compute curve lenghts! */
/* Compute curve lengths! */
curves.ensure_evaluated_lengths();
curves.ensure_evaluated_offsets();
@ -294,7 +294,7 @@ static void determine_copyable_curve_types(const bke::CurvesGeometry &src_curves
}
/**
* Determine if a curve is treated as an evaluated curve. Curves which inheretly do not support
* Determine if a curve is treated as an evaluated curve. Curves which inherently do not support
* trimming are discretized (e.g. NURBS).
*/
static bool copy_as_evaluated_curve(const int8_t src_type, const int8_t dst_type)
@ -359,9 +359,11 @@ static void compute_trim_result_offsets(const bke::CurvesGeometry &src_curves,
bke::curves::accumulate_counts_to_offsets(dst_curve_offsets);
}
/* --------------------------------------------------------------------
* Utility functions.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
static void fill_bezier_data(bke::CurvesGeometry &dst_curves, const IndexMask selection)
{
@ -415,9 +417,11 @@ static int64_t copy_point_data_between_endpoints(const Span<T> src_data,
return dst_index;
}
/* --------------------------------------------------------------------
* Sampling utilities.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sampling Utilities
* \{ */
template<typename T>
static T interpolate_catmull_rom(const Span<T> src_data,
@ -459,9 +463,11 @@ static bke::curves::bezier::Insertion knot_insert_bezier(
insertion_point.parameter);
}
/* --------------------------------------------------------------------
* Sample single point.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sample Single Point
* \{ */
template<typename T>
static void sample_linear(const Span<T> src_data,
@ -533,9 +539,11 @@ static void sample_bezier(const Span<float3> src_positions,
}
}
/* --------------------------------------------------------------------
* Sample curve interval (trim).
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sample Curve Interval (Trim)
* \{ */
/**
* Sample source curve data in the interval defined by the points [start_point, end_point].
@ -758,9 +766,11 @@ static void sample_interval_bezier(const Span<float3> src_positions,
BLI_assert(dst_index == dst_range.one_after_last());
}
/* --------------------------------------------------------------------
* Convert to point curves.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Convert to Point Curves
* \{ */
static void convert_point_polygonal_curves(
const bke::CurvesGeometry &src_curves,
@ -924,9 +934,11 @@ static void convert_point_evaluated_curves(
fill_nurbs_data(dst_curves, selection);
}
/* --------------------------------------------------------------------
* Trim curves.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Trim Curves
* \{ */
static void trim_attribute_linear(const bke::CurvesGeometry &src_curves,
bke::CurvesGeometry &dst_curves,
@ -1282,4 +1294,6 @@ bke::CurvesGeometry trim_curves(const bke::CurvesGeometry &src_curves,
return dst_curves;
}
/** \} */
} // namespace blender::geometry

View File

@ -397,7 +397,7 @@ static void edge_types_panel_draw(const bContext *UNUSED(C), Panel *panel)
sub = uiLayoutRow(entry, false);
uiItemR(sub, ptr, "use_light_contour", 0, IFACE_("Light Contour"), ICON_NONE);
uiItemR(entry, ptr, "use_shadow", 0, IFACE_("Cast Shadow"), ICON_NONE);
uiItemR(entry, ptr, "use_shadow", 0, CTX_IFACE_(BLT_I18NCONTEXT_ID_GPENCIL, "Cast Shadow"), ICON_NONE);
uiItemL(layout, IFACE_("Options"), ICON_NONE);
@ -442,8 +442,8 @@ static void options_light_reference_draw(const bContext *UNUSED(C), Panel *panel
uiItemR(remaining, ptr, "shadow_camera_size", 0, NULL, ICON_NONE);
uiLayout *col = uiLayoutColumn(remaining, true);
uiItemR(col, ptr, "shadow_camera_near", 0, "Near", ICON_NONE);
uiItemR(col, ptr, "shadow_camera_far", 0, "Far", ICON_NONE);
uiItemR(col, ptr, "shadow_camera_near", 0, IFACE_("Near"), ICON_NONE);
uiItemR(col, ptr, "shadow_camera_far", 0, IFACE_("Far"), ICON_NONE);
}
static void options_panel_draw(const bContext *UNUSED(C), Panel *panel)

View File

@ -201,8 +201,9 @@ id<MTLRenderCommandEncoder> MTLContext::ensure_begin_render_pass()
/* Ensure command buffer workload submissions are optimal --
* Though do not split a batch mid-IMM recording. */
/* TODO(Metal): Add IMM Check once MTLImmediate has been implemented. */
if (this->main_command_buffer.do_break_submission()/*&&
!((MTLImmediate *)(this->imm))->imm_is_recording()*/) {
if (this->main_command_buffer.do_break_submission()
// && !((MTLImmediate *)(this->imm))->imm_is_recording()
) {
this->flush();
}

View File

@ -35,8 +35,8 @@ typedef enum eViewLayerEEVEEPassType {
EEVEE_RENDER_PASS_BLOOM = (1 << 14),
EEVEE_RENDER_PASS_AOV = (1 << 15),
/*
* TODO(jbakker): Clean up confliting bits after EEVEE has been removed.
* EEVEE_RENDER_PASS_CRYPTOMATTE is for EEVEE, EEVEE_RENDER_PASS_CRYTPOMATTE_* are for
* TODO(@jbakker): Clean up conflicting bits after EEVEE has been removed.
* #EEVEE_RENDER_PASS_CRYPTOMATTE is for EEVEE, `EEVEE_RENDER_PASS_CRYTPOMATTE_*` are for
* EEVEE-Next.
*/
EEVEE_RENDER_PASS_CRYPTOMATTE = (1 << 16),

View File

@ -1856,9 +1856,13 @@ enum {
};
typedef struct CorrectiveSmoothDeltaCache {
/* delta's between the original positions and the smoothed positions */
/**
* Delta's between the original positions and the smoothed positions,
* calculated loop-tangent and which is accumulated into the vertex it uses.
* (run-time only).
*/
float (*deltas)[3];
unsigned int totverts;
unsigned int deltas_num;
/* Value of settings when creating the cache.
* These are used to check if the cache should be recomputed. */

View File

@ -1218,7 +1218,7 @@ typedef struct SpaceImage {
char pin;
char pixel_snap_mode;
char pixel_round_mode;
char lock;
/** UV draw type. */
@ -1260,12 +1260,12 @@ typedef enum eSpaceImage_UVDT_Stretch {
SI_UVDT_STRETCH_AREA = 1,
} eSpaceImage_UVDT_Stretch;
/** #SpaceImage.pixel_snap_mode */
typedef enum eSpaceImage_PixelSnapMode {
SI_PIXEL_SNAP_DISABLED = 0,
SI_PIXEL_SNAP_CENTER = 1,
SI_PIXEL_SNAP_CORNER = 2,
} eSpaceImage_Snap_Mode;
/** #SpaceImage.pixel_round_mode */
typedef enum eSpaceImage_PixelRoundMode {
SI_PIXEL_ROUND_DISABLED = 0,
SI_PIXEL_ROUND_CENTER = 1,
SI_PIXEL_ROUND_CORNER = 2,
} eSpaceImage_Round_Mode;
/** #SpaceImage.mode */
typedef enum eSpaceImage_Mode {

View File

@ -109,6 +109,7 @@ DNA_STRUCT_RENAME_ELEM(RenderData, bake_filter, bake_margin)
DNA_STRUCT_RENAME_ELEM(RigidBodyWorld, steps_per_second, substeps_per_frame)
DNA_STRUCT_RENAME_ELEM(SDefBind, numverts, verts_num)
DNA_STRUCT_RENAME_ELEM(SDefVert, numbinds, binds_num)
DNA_STRUCT_RENAME_ELEM(SpaceImage, pixel_snap_mode, pixel_round_mode)
DNA_STRUCT_RENAME_ELEM(SpaceSeq, overlay_type, overlay_frame_type)
DNA_STRUCT_RENAME_ELEM(SurfaceDeformModifierData, num_mesh_verts, mesh_verts_num)
DNA_STRUCT_RENAME_ELEM(SurfaceDeformModifierData, numpoly, target_polys_num)

View File

@ -21,6 +21,8 @@
#include "BKE_attribute.h"
#include "BKE_customdata.h"
#include "BLT_translation.h"
#include "WM_types.h"
const EnumPropertyItem rna_enum_attribute_type_items[] = {
@ -194,7 +196,7 @@ const EnumPropertyItem *rna_enum_attribute_domain_itemf(ID *id,
int totitem = 0, a;
static EnumPropertyItem mesh_vertex_domain_item = {
ATTR_DOMAIN_POINT, "POINT", 0, "Vertex", "Attribute per point/vertex"};
ATTR_DOMAIN_POINT, "POINT", 0, N_("Vertex"), N_("Attribute per point/vertex")};
for (a = 0; rna_enum_attribute_domain_items[a].identifier; a++) {
domain_item = &rna_enum_attribute_domain_items[a];

View File

@ -2247,6 +2247,7 @@ static void rna_def_editor(BlenderRNA *brna)
prop = RNA_def_property(srna, "proxy_storage", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, editing_storage_items);
RNA_def_property_ui_text(prop, "Proxy Storage", "How to store proxies for this project");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_SEQUENCE);
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_SEQUENCER, "rna_SequenceEditor_update_cache");
prop = RNA_def_property(srna, "proxy_dir", PROP_STRING, PROP_DIRPATH);

View File

@ -3531,10 +3531,10 @@ static void rna_def_space_image_uv(BlenderRNA *brna)
{0, NULL, 0, NULL, NULL},
};
static const EnumPropertyItem pixel_snap_mode_items[] = {
{SI_PIXEL_SNAP_DISABLED, "DISABLED", 0, "Disabled", "Don't snap to pixels"},
{SI_PIXEL_SNAP_CORNER, "CORNER", 0, "Corner", "Snap to pixel corners"},
{SI_PIXEL_SNAP_CENTER, "CENTER", 0, "Center", "Snap to pixel centers"},
static const EnumPropertyItem pixel_round_mode_items[] = {
{SI_PIXEL_ROUND_DISABLED, "DISABLED", 0, "Disabled", "Don't round to pixels"},
{SI_PIXEL_ROUND_CORNER, "CORNER", 0, "Corner", "Round to pixel corners"},
{SI_PIXEL_ROUND_CENTER, "CENTER", 0, "Center", "Round to pixel centers"},
{0, NULL, 0, NULL, NULL},
};
@ -3624,11 +3624,9 @@ static void rna_def_space_image_uv(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "UV Opacity", "Opacity of UV overlays");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_IMAGE, NULL);
/* TODO: move edge and face drawing options here from `G.f`. */
prop = RNA_def_property(srna, "pixel_snap_mode", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, pixel_snap_mode_items);
RNA_def_property_ui_text(prop, "Snap to Pixels", "Snap UVs to pixels while editing");
prop = RNA_def_property(srna, "pixel_round_mode", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, pixel_round_mode_items);
RNA_def_property_ui_text(prop, "Round to Pixels", "Round UVs to pixels while editing");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_IMAGE, NULL);
prop = RNA_def_property(srna, "lock_bounds", PROP_BOOLEAN, PROP_NONE);
@ -3998,6 +3996,7 @@ static void rna_def_space_view3d_shading(BlenderRNA *brna)
prop = RNA_def_property(srna, "cavity_type", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, cavity_type_items);
RNA_def_property_ui_text(prop, "Cavity Type", "Way to display the cavity shading");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_EDITOR_VIEW3D);
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_VIEW3D | NS_VIEW3D_SHADING, NULL);
prop = RNA_def_property(srna, "curvature_ridge_factor", PROP_FLOAT, PROP_FACTOR);

View File

@ -2357,6 +2357,7 @@ static void rna_def_window(BlenderRNA *brna)
"rna_Window_screen_set",
NULL,
"rna_Window_screen_assign_poll");
RNA_def_property_translation_context(prop, BLT_I18NCONTEXT_ID_SCREEN);
RNA_def_property_flag(prop, PROP_NEVER_NULL | PROP_EDITABLE | PROP_CONTEXT_UPDATE);
RNA_def_property_update(prop, 0, "rna_workspace_screen_update");

View File

@ -49,10 +49,10 @@
#include "PIL_time.h"
#ifdef DEBUG_TIME
# include "PIL_time_utildefines.h"
#endif
/* minor optimization, calculate this inline */
#define USE_TANGENT_CALC_INLINE
#include "BLI_strict_flags.h"
static void initData(ModifierData *md)
{
@ -65,8 +65,6 @@ static void initData(ModifierData *md)
csmd->delta_cache.deltas = NULL;
}
#include "BLI_strict_flags.h"
static void copyData(const ModifierData *md, ModifierData *target, const int flag)
{
const CorrectiveSmoothModifierData *csmd = (const CorrectiveSmoothModifierData *)md;
@ -79,7 +77,7 @@ static void copyData(const ModifierData *md, ModifierData *target, const int fla
}
tcsmd->delta_cache.deltas = NULL;
tcsmd->delta_cache.totverts = 0;
tcsmd->delta_cache.deltas_num = 0;
}
static void freeBind(CorrectiveSmoothModifierData *csmd)
@ -132,25 +130,24 @@ static void mesh_get_boundaries(Mesh *mesh, float *smooth_weights)
const MEdge *medge = BKE_mesh_edges(mesh);
const MPoly *mpoly = BKE_mesh_polys(mesh);
const MLoop *mloop = BKE_mesh_loops(mesh);
uint mpoly_num, medge_num, i;
ushort *boundaries;
mpoly_num = (uint)mesh->totpoly;
medge_num = (uint)mesh->totedge;
const uint mpoly_num = (uint)mesh->totpoly;
const uint medge_num = (uint)mesh->totedge;
boundaries = MEM_calloc_arrayN(medge_num, sizeof(*boundaries), __func__);
/* Flag boundary edges so only boundaries are set to 1. */
uint8_t *boundaries = MEM_calloc_arrayN(medge_num, sizeof(*boundaries), __func__);
/* count the number of adjacent faces */
for (i = 0; i < mpoly_num; i++) {
for (uint i = 0; i < mpoly_num; i++) {
const MPoly *p = &mpoly[i];
const int totloop = p->totloop;
int j;
for (j = 0; j < totloop; j++) {
boundaries[mloop[p->loopstart + j].e]++;
uint8_t *e_value = &boundaries[mloop[p->loopstart + j].e];
*e_value |= (uint8_t)((*e_value) + 1);
}
}
for (i = 0; i < medge_num; i++) {
for (uint i = 0; i < medge_num; i++) {
if (boundaries[i] == 1) {
smooth_weights[medge[i].v1] = 0.0f;
smooth_weights[medge[i].v2] = 0.0f;
@ -391,69 +388,61 @@ static void smooth_verts(CorrectiveSmoothModifierData *csmd,
}
/**
* finalize after accumulation.
* Calculate an orthogonal 3x3 matrix from 2 edge vectors.
* \return false if this loop should be ignored (have zero influence).
*/
static void calc_tangent_ortho(float ts[3][3])
static bool calc_tangent_loop(const float v_dir_prev[3],
const float v_dir_next[3],
float r_tspace[3][3])
{
float v_tan_a[3], v_tan_b[3];
float t_vec_a[3], t_vec_b[3];
if (UNLIKELY(compare_v3v3(v_dir_prev, v_dir_next, FLT_EPSILON * 10.0f))) {
/* As there are no weights, the value doesn't matter just initialize it. */
unit_m3(r_tspace);
return false;
}
normalize_v3(ts[2]);
copy_v3_v3(r_tspace[0], v_dir_prev);
copy_v3_v3(r_tspace[1], v_dir_next);
copy_v3_v3(v_tan_a, ts[0]);
copy_v3_v3(v_tan_b, ts[1]);
cross_v3_v3v3(r_tspace[2], v_dir_prev, v_dir_next);
normalize_v3(r_tspace[2]);
cross_v3_v3v3(ts[1], ts[2], v_tan_a);
mul_v3_fl(ts[1], dot_v3v3(ts[1], v_tan_b) < 0.0f ? -1.0f : 1.0f);
/* Make orthogonal using `r_tspace[2]` as a basis.
*
* NOTE: while it seems more logical to use `v_dir_prev` & `v_dir_next` as separate X/Y axis
* (instead of combining them as is done here). It's not necessary as the directions of the
* axis aren't important as long as the difference between tangent matrices is equivalent.
* Some computations can be skipped by combining the the two directions,
* using the cross product for the 3rd axes. */
add_v3_v3(r_tspace[0], r_tspace[1]);
normalize_v3(r_tspace[0]);
cross_v3_v3v3(r_tspace[1], r_tspace[2], r_tspace[0]);
/* Orthogonalize tangent. */
mul_v3_v3fl(t_vec_a, ts[2], dot_v3v3(ts[2], v_tan_a));
sub_v3_v3v3(ts[0], v_tan_a, t_vec_a);
/* Orthogonalize bi-tangent. */
mul_v3_v3fl(t_vec_a, ts[2], dot_v3v3(ts[2], ts[1]));
mul_v3_v3fl(t_vec_b, ts[0], dot_v3v3(ts[0], ts[1]) / dot_v3v3(v_tan_a, v_tan_a));
sub_v3_v3(ts[1], t_vec_a);
sub_v3_v3(ts[1], t_vec_b);
normalize_v3(ts[0]);
normalize_v3(ts[1]);
return true;
}
/**
* accumulate edge-vectors from all polys.
* \param r_tangent_spaces: Loop aligned array of tangents.
* \param r_tangent_weights: Loop aligned array of weights (may be NULL).
* \param r_tangent_weights_per_vertex: Vertex aligned array, accumulating weights for each loop
* (may be NULL).
*/
static void calc_tangent_loop_accum(const float v_dir_prev[3],
const float v_dir_next[3],
float r_tspace[3][3])
{
add_v3_v3v3(r_tspace[1], v_dir_prev, v_dir_next);
if (compare_v3v3(v_dir_prev, v_dir_next, FLT_EPSILON * 10.0f) == false) {
const float weight = fabsf(acosf(dot_v3v3(v_dir_next, v_dir_prev)));
float nor[3];
cross_v3_v3v3(nor, v_dir_prev, v_dir_next);
normalize_v3(nor);
cross_v3_v3v3(r_tspace[0], r_tspace[1], nor);
mul_v3_fl(nor, weight);
/* accumulate weighted normals */
add_v3_v3(r_tspace[2], nor);
}
}
static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tangent_spaces)[3][3])
static void calc_tangent_spaces(const Mesh *mesh,
const float (*vertexCos)[3],
float (*r_tangent_spaces)[3][3],
float *r_tangent_weights,
float *r_tangent_weights_per_vertex)
{
const uint mpoly_num = (uint)mesh->totpoly;
#ifndef USE_TANGENT_CALC_INLINE
const uint mvert_num = (uint)dm->getNumVerts(dm);
#endif
const uint mvert_num = (uint)mesh->totvert;
const MPoly *mpoly = BKE_mesh_polys(mesh);
const MLoop *mloop = BKE_mesh_loops(mesh);
uint i;
if (r_tangent_weights_per_vertex != NULL) {
copy_vn_fl(r_tangent_weights_per_vertex, (int)mvert_num, 0.0f);
}
for (i = 0; i < mpoly_num; i++) {
const MPoly *mp = &mpoly[i];
const MLoop *l_next = &mloop[mp->loopstart];
@ -469,7 +458,8 @@ static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tan
normalize_v3(v_dir_prev);
for (; l_next != l_term; l_prev = l_curr, l_curr = l_next, l_next++) {
float(*ts)[3] = r_tangent_spaces[l_curr->v];
uint l_index = (uint)(l_curr - mloop);
float(*ts)[3] = r_tangent_spaces[l_index];
/* re-use the previous value */
#if 0
@ -479,19 +469,22 @@ static void calc_tangent_spaces(Mesh *mesh, float (*vertexCos)[3], float (*r_tan
sub_v3_v3v3(v_dir_next, vertexCos[l_curr->v], vertexCos[l_next->v]);
normalize_v3(v_dir_next);
calc_tangent_loop_accum(v_dir_prev, v_dir_next, ts);
if (calc_tangent_loop(v_dir_prev, v_dir_next, ts)) {
if (r_tangent_weights != NULL) {
const float weight = fabsf(acosf(dot_v3v3(v_dir_next, v_dir_prev)));
r_tangent_weights[l_index] = weight;
r_tangent_weights_per_vertex[l_curr->v] += weight;
}
}
else {
if (r_tangent_weights != NULL) {
r_tangent_weights[l_index] = 0;
}
}
copy_v3_v3(v_dir_prev, v_dir_next);
}
}
/* do inline */
#ifndef USE_TANGENT_CALC_INLINE
for (i = 0; i < mvert_num; i++) {
float(*ts)[3] = r_tangent_spaces[i];
calc_tangent_ortho(ts);
}
#endif
}
static void store_cache_settings(CorrectiveSmoothModifierData *csmd)
@ -522,38 +515,42 @@ static void calc_deltas(CorrectiveSmoothModifierData *csmd,
const float (*rest_coords)[3],
uint verts_num)
{
const MLoop *mloop = BKE_mesh_loops(mesh);
const uint loops_num = (uint)mesh->totloop;
float(*smooth_vertex_coords)[3] = MEM_dupallocN(rest_coords);
float(*tangent_spaces)[3][3];
uint i;
tangent_spaces = MEM_calloc_arrayN(verts_num, sizeof(float[3][3]), __func__);
uint l_index;
if (csmd->delta_cache.totverts != verts_num) {
tangent_spaces = MEM_malloc_arrayN(loops_num, sizeof(float[3][3]), __func__);
if (csmd->delta_cache.deltas_num != loops_num) {
MEM_SAFE_FREE(csmd->delta_cache.deltas);
}
/* allocate deltas if they have not yet been allocated, otherwise we will just write over them */
if (!csmd->delta_cache.deltas) {
csmd->delta_cache.totverts = verts_num;
csmd->delta_cache.deltas = MEM_malloc_arrayN(verts_num, sizeof(float[3]), __func__);
csmd->delta_cache.deltas_num = loops_num;
csmd->delta_cache.deltas = MEM_malloc_arrayN(loops_num, sizeof(float[3]), __func__);
}
smooth_verts(csmd, mesh, dvert, defgrp_index, smooth_vertex_coords, verts_num);
calc_tangent_spaces(mesh, smooth_vertex_coords, tangent_spaces);
calc_tangent_spaces(mesh, smooth_vertex_coords, tangent_spaces, NULL, NULL);
for (i = 0; i < verts_num; i++) {
float imat[3][3], delta[3];
copy_vn_fl(&csmd->delta_cache.deltas[0][0], (int)loops_num * 3, 0.0f);
#ifdef USE_TANGENT_CALC_INLINE
calc_tangent_ortho(tangent_spaces[i]);
#endif
for (l_index = 0; l_index < loops_num; l_index++) {
const int v_index = (int)mloop[l_index].v;
float delta[3];
sub_v3_v3v3(delta, rest_coords[v_index], smooth_vertex_coords[v_index]);
sub_v3_v3v3(delta, rest_coords[i], smooth_vertex_coords[i]);
if (UNLIKELY(!invert_m3_m3(imat, tangent_spaces[i]))) {
transpose_m3_m3(imat, tangent_spaces[i]);
float imat[3][3];
if (UNLIKELY(!invert_m3_m3(imat, tangent_spaces[l_index]))) {
transpose_m3_m3(imat, tangent_spaces[l_index]);
}
mul_v3_m3v3(csmd->delta_cache.deltas[i], imat, delta);
mul_v3_m3v3(csmd->delta_cache.deltas[l_index], imat, delta);
}
MEM_freeN(tangent_spaces);
@ -576,6 +573,9 @@ static void correctivesmooth_modifier_do(ModifierData *md,
((csmd->rest_source == MOD_CORRECTIVESMOOTH_RESTSOURCE_ORCO) &&
(((ID *)ob->data)->recalc & ID_RECALC_ALL));
const MLoop *mloop = BKE_mesh_loops(mesh);
const uint loops_num = (uint)mesh->totloop;
bool use_only_smooth = (csmd->flag & MOD_CORRECTIVESMOOTH_ONLY_SMOOTH) != 0;
const MDeformVert *dvert = NULL;
int defgrp_index;
@ -638,7 +638,7 @@ static void correctivesmooth_modifier_do(ModifierData *md,
}
/* check to see if our deltas are still valid */
if (!csmd->delta_cache.deltas || (csmd->delta_cache.totverts != verts_num) ||
if (!csmd->delta_cache.deltas || (csmd->delta_cache.deltas_num != loops_num) ||
force_delta_cache_update) {
const float(*rest_coords)[3];
bool is_rest_coords_alloc = false;
@ -686,27 +686,38 @@ static void correctivesmooth_modifier_do(ModifierData *md,
smooth_verts(csmd, mesh, dvert, defgrp_index, vertexCos, verts_num);
{
uint i;
uint l_index;
float(*tangent_spaces)[3][3];
float *tangent_weights;
float *tangent_weights_per_vertex;
const float scale = csmd->scale;
/* calloc, since values are accumulated */
tangent_spaces = MEM_calloc_arrayN(verts_num, sizeof(float[3][3]), __func__);
calc_tangent_spaces(mesh, vertexCos, tangent_spaces);
tangent_spaces = MEM_malloc_arrayN(loops_num, sizeof(float[3][3]), __func__);
tangent_weights = MEM_malloc_arrayN(loops_num, sizeof(float), __func__);
tangent_weights_per_vertex = MEM_malloc_arrayN(verts_num, sizeof(float), __func__);
calc_tangent_spaces(
mesh, vertexCos, tangent_spaces, tangent_weights, tangent_weights_per_vertex);
for (l_index = 0; l_index < loops_num; l_index++) {
const uint v_index = mloop[l_index].v;
const float weight = tangent_weights[l_index] / tangent_weights_per_vertex[v_index];
if (UNLIKELY(!(weight > 0.0f))) {
/* Catches zero & divide by zero. */
continue;
}
for (i = 0; i < verts_num; i++) {
float delta[3];
#ifdef USE_TANGENT_CALC_INLINE
calc_tangent_ortho(tangent_spaces[i]);
#endif
mul_v3_m3v3(delta, tangent_spaces[i], csmd->delta_cache.deltas[i]);
madd_v3_v3fl(vertexCos[i], delta, scale);
mul_v3_m3v3(delta, tangent_spaces[l_index], csmd->delta_cache.deltas[l_index]);
mul_v3_fl(delta, weight);
madd_v3_v3fl(vertexCos[v_index], delta, scale);
}
MEM_freeN(tangent_spaces);
MEM_freeN(tangent_weights);
MEM_freeN(tangent_weights_per_vertex);
}
#ifdef DEBUG_TIME
@ -718,7 +729,7 @@ static void correctivesmooth_modifier_do(ModifierData *md,
/* when the modifier fails to execute */
error:
MEM_SAFE_FREE(csmd->delta_cache.deltas);
csmd->delta_cache.totverts = 0;
csmd->delta_cache.deltas_num = 0;
}
static void deformVerts(ModifierData *md,
@ -827,7 +838,7 @@ static void blendRead(BlendDataReader *reader, ModifierData *md)
/* runtime only */
csmd->delta_cache.deltas = NULL;
csmd->delta_cache.totverts = 0;
csmd->delta_cache.deltas_num = 0;
}
ModifierTypeInfo modifierType_CorrectiveSmooth = {

View File

@ -592,7 +592,7 @@ static void spray_panel_draw_header(const bContext *UNUSED(C), Panel *panel)
row = uiLayoutRow(layout, false);
uiLayoutSetActive(row, use_foam);
uiItemR(row, ptr, "use_spray", 0, IFACE_("Spray"), ICON_NONE);
uiItemR(row, ptr, "use_spray", 0, CTX_IFACE_(BLT_I18NCONTEXT_ID_MESH, "Spray"), ICON_NONE);
}
static void spray_panel_draw(const bContext *UNUSED(C), Panel *panel)

View File

@ -41,6 +41,8 @@
#include "MOD_modifiertypes.h"
#include "MOD_ui_common.h"
#include "BLI_strict_flags.h"
static void initData(ModifierData *md)
{
ScrewModifierData *ltmd = (ScrewModifierData *)md;
@ -50,8 +52,6 @@ static void initData(ModifierData *md)
MEMCPY_STRUCT_AFTER(ltmd, DNA_struct_default_get(ScrewModifierData), modifier);
}
#include "BLI_strict_flags.h"
/** Used for gathering edge connectivity. */
typedef struct ScrewVertConnect {
/** Distance from the center axis. */

View File

@ -787,7 +787,7 @@ bool SIM_hair_volume_solve_divergence(HairGrid *grid,
vert->density, target_density, target_strength);
/* B vector contains the finite difference approximation of the velocity divergence.
* NOTE: according to the discretized Navier-Stokes equation the rhs vector
* NOTE: according to the discretized Navier-Stokes equation the RHS vector
* and resulting pressure gradient should be multiplied by the (inverse) density;
* however, this is already included in the weighting of hair velocities on the grid!
*/

View File

@ -344,10 +344,10 @@ if(UNIX AND NOT APPLE)
elseif(WIN32)
if(WITH_PYTHON_MODULE)
set(TARGETDIR_BPY $<TARGET_FILE_DIR:blender>)
set(TARGETDIR_VER $<TARGET_FILE_DIR:blender>/${BLENDER_VERSION})
set(TARGETDIR_BPY ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy)
set(TARGETDIR_VER ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy/${BLENDER_VERSION})
# Important the DLL's are next to `__init__.pyd` otherwise it won't load.
set(TARGETDIR_LIB $<TARGET_FILE_DIR:blender>)
set(TARGETDIR_LIB ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy)
else()
set(TARGETDIR_VER ${BLENDER_VERSION})
set(TARGETDIR_TEXT .)
@ -410,6 +410,7 @@ if(WITH_PYTHON)
DESTINATION ${TARGETDIR_VER}
PATTERN ".git" EXCLUDE
PATTERN ".gitignore" EXCLUDE
PATTERN ".github" EXCLUDE
PATTERN ".arcconfig" EXCLUDE
PATTERN "__pycache__" EXCLUDE
PATTERN "${ADDON_EXCLUDE_CONDITIONAL}" EXCLUDE

View File

@ -10,29 +10,25 @@ set(TEST_INSTALL_DIR ${CMAKE_INSTALL_PREFIX_WITH_CONFIG})
# Path to Blender and Python executables for all platforms.
if(MSVC)
set(TEST_BLENDER_EXE ${TEST_INSTALL_DIR}/blender.exe)
set(_default_test_python_exe "${TEST_INSTALL_DIR}/${BLENDER_VERSION_MAJOR}.${BLENDER_VERSION_MINOR}/python/bin/python$<$<CONFIG:Debug>:_d>")
elseif(APPLE)
set(TEST_BLENDER_EXE ${TEST_INSTALL_DIR}/Blender.app/Contents/MacOS/Blender)
set(_default_test_python_exe ${PYTHON_EXECUTABLE})
else()
if(WITH_INSTALL_PORTABLE)
set(TEST_BLENDER_EXE ${TEST_INSTALL_DIR}/blender)
else()
set(TEST_BLENDER_EXE ${TEST_INSTALL_DIR}/bin/blender)
endif()
set(_default_test_python_exe ${PYTHON_EXECUTABLE})
endif()
# The installation directory's Python is the best one to use. However, it can only be there after the install step,
# which means that Python will never be there on a fresh system. To suit different needs, the user can pass
# -DTEST_PYTHON_EXE=/path/to/python to CMake.
if(NOT TEST_PYTHON_EXE)
set(TEST_PYTHON_EXE ${_default_test_python_exe})
set(TEST_PYTHON_EXE ${PYTHON_EXECUTABLE})
message(STATUS "Tests: Using Python executable: ${TEST_PYTHON_EXE}")
elseif(NOT EXISTS ${TEST_PYTHON_EXE})
message(FATAL_ERROR "Tests: TEST_PYTHON_EXE ${TEST_PYTHON_EXE} does not exist")
endif()
unset(_default_test_python_exe)
# For testing with Valgrind

View File

@ -12,4 +12,4 @@ function(add_blender_as_python_module_test testname testscript)
)
endfunction()
add_blender_as_python_module_test(import_bpy ${CMAKE_CURRENT_LIST_DIR}/import_bpy.py)
add_blender_as_python_module_test(import_bpy ${CMAKE_CURRENT_LIST_DIR}/import_bpy.py ${CMAKE_INSTALL_PREFIX_WITH_CONFIG})

View File

@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Add directory with module to the path.
import sys
sys.path.append(sys.argv[1])
# Just import bpy and see if there are any dynamic loader errors.
import bpy