Merge branch 'master' into refactor-mesh-position-generic

This commit is contained in:
Hans Goudey 2022-12-20 11:29:37 -06:00
commit d5100cff52
52 changed files with 887 additions and 385 deletions

View File

@ -37,6 +37,11 @@ int BKE_object_defgroup_active_index_get(const struct Object *ob);
*/
void BKE_object_defgroup_active_index_set(struct Object *ob, int new_index);
/**
* Return the ID's vertex group names.
* Supports Mesh (ME), Lattice (LT), and GreasePencil (GD) IDs.
* \return ListBase of bDeformGroup pointers.
*/
const struct ListBase *BKE_id_defgroup_list_get(const struct ID *id);
struct ListBase *BKE_id_defgroup_list_get_mutable(struct ID *id);
int BKE_id_defgroup_name_index(const struct ID *id, const char *name);

View File

@ -5,6 +5,7 @@
#include <memory>
#include <mutex>
#include "BLI_cache_mutex.hh"
#include "BLI_multi_value_map.hh"
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
@ -119,9 +120,8 @@ class bNodeTreeRuntime : NonCopyable, NonMovable {
* Protects access to all topology cache variables below. This is necessary so that the cache can
* be updated on a const #bNodeTree.
*/
std::mutex topology_cache_mutex;
bool topology_cache_is_dirty = true;
bool topology_cache_exists = false;
CacheMutex topology_cache_mutex;
std::atomic<bool> topology_cache_exists = false;
/**
* Under some circumstances, it can be useful to use the cached data while editing the
* #bNodeTree. By default, this is protected against using an assert.
@ -298,7 +298,7 @@ inline bool topology_cache_is_available(const bNodeTree &tree)
if (tree.runtime->allow_use_dirty_topology_cache.load() > 0) {
return true;
}
if (tree.runtime->topology_cache_is_dirty) {
if (tree.runtime->topology_cache_mutex.is_dirty()) {
return false;
}
return true;

View File

@ -2010,7 +2010,7 @@ bNode *nodeFindNodebyName(bNodeTree *ntree, const char *name)
void nodeFindNode(bNodeTree *ntree, bNodeSocket *sock, bNode **r_node, int *r_sockindex)
{
*r_node = nullptr;
if (!ntree->runtime->topology_cache_is_dirty) {
if (ntree->runtime->topology_cache_mutex.is_cached()) {
bNode *node = &sock->owner_node();
*r_node = node;
if (r_sockindex) {

View File

@ -22,26 +22,6 @@ void preprocess_geometry_node_tree_for_evaluation(bNodeTree &tree_cow)
blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree_cow);
}
static void double_checked_lock(std::mutex &mutex, bool &data_is_dirty, FunctionRef<void()> fn)
{
if (!data_is_dirty) {
return;
}
std::lock_guard lock{mutex};
if (!data_is_dirty) {
return;
}
fn();
data_is_dirty = false;
}
static void double_checked_lock_with_task_isolation(std::mutex &mutex,
bool &data_is_dirty,
FunctionRef<void()> fn)
{
double_checked_lock(mutex, data_is_dirty, [&]() { threading::isolate_task(fn); });
}
static void update_interface_sockets(const bNodeTree &ntree)
{
bNodeTreeRuntime &tree_runtime = *ntree.runtime;
@ -434,37 +414,34 @@ static void update_group_output_node(const bNodeTree &ntree)
static void ensure_topology_cache(const bNodeTree &ntree)
{
bNodeTreeRuntime &tree_runtime = *ntree.runtime;
double_checked_lock_with_task_isolation(
tree_runtime.topology_cache_mutex, tree_runtime.topology_cache_is_dirty, [&]() {
update_interface_sockets(ntree);
update_node_vector(ntree);
update_link_vector(ntree);
update_socket_vectors_and_owner_node(ntree);
update_internal_link_inputs(ntree);
update_directly_linked_links_and_sockets(ntree);
threading::parallel_invoke(
tree_runtime.nodes_by_id.size() > 32,
[&]() { update_logical_origins(ntree); },
[&]() { update_nodes_by_type(ntree); },
[&]() { update_sockets_by_identifier(ntree); },
[&]() {
update_toposort(ntree,
ToposortDirection::LeftToRight,
tree_runtime.toposort_left_to_right,
tree_runtime.has_available_link_cycle);
},
[&]() {
bool dummy;
update_toposort(ntree,
ToposortDirection::RightToLeft,
tree_runtime.toposort_right_to_left,
dummy);
},
[&]() { update_root_frames(ntree); },
[&]() { update_direct_frames_childrens(ntree); });
update_group_output_node(ntree);
tree_runtime.topology_cache_exists = true;
});
tree_runtime.topology_cache_mutex.ensure([&]() {
update_interface_sockets(ntree);
update_node_vector(ntree);
update_link_vector(ntree);
update_socket_vectors_and_owner_node(ntree);
update_internal_link_inputs(ntree);
update_directly_linked_links_and_sockets(ntree);
threading::parallel_invoke(
tree_runtime.nodes_by_id.size() > 32,
[&]() { update_logical_origins(ntree); },
[&]() { update_nodes_by_type(ntree); },
[&]() { update_sockets_by_identifier(ntree); },
[&]() {
update_toposort(ntree,
ToposortDirection::LeftToRight,
tree_runtime.toposort_left_to_right,
tree_runtime.has_available_link_cycle);
},
[&]() {
bool dummy;
update_toposort(
ntree, ToposortDirection::RightToLeft, tree_runtime.toposort_right_to_left, dummy);
},
[&]() { update_root_frames(ntree); },
[&]() { update_direct_frames_childrens(ntree); });
update_group_output_node(ntree);
tree_runtime.topology_cache_exists = true;
});
}
} // namespace blender::bke::node_tree_runtime

View File

@ -5,6 +5,7 @@
#include "NOD_node_declaration.hh"
#include "BLI_resource_scope.hh"
#include "BLI_set.hh"
#include "BLI_stack.hh"
@ -91,9 +92,10 @@ static OutputFieldDependency get_interface_output_field_dependency(const bNode &
return socket_decl.output_field_dependency();
}
static FieldInferencingInterface get_dummy_field_inferencing_interface(const bNode &node)
static const FieldInferencingInterface &get_dummy_field_inferencing_interface(const bNode &node,
ResourceScope &scope)
{
FieldInferencingInterface inferencing_interface;
auto &inferencing_interface = scope.construct<FieldInferencingInterface>();
inferencing_interface.inputs.append_n_times(InputSocketFieldType::None,
node.input_sockets().size());
inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
@ -106,17 +108,19 @@ static FieldInferencingInterface get_dummy_field_inferencing_interface(const bNo
* In the future, this information can be stored in the node declaration. This would allow this
* function to return a reference, making it more efficient.
*/
static FieldInferencingInterface get_node_field_inferencing_interface(const bNode &node)
static const FieldInferencingInterface &get_node_field_inferencing_interface(const bNode &node,
ResourceScope &scope)
{
/* Node groups already reference all required information, so just return that. */
if (node.is_group()) {
bNodeTree *group = (bNodeTree *)node.id;
if (group == nullptr) {
return FieldInferencingInterface();
static const FieldInferencingInterface empty_interface;
return empty_interface;
}
if (!ntreeIsRegistered(group)) {
/* This can happen when there is a linked node group that was not found (see T92799). */
return get_dummy_field_inferencing_interface(node);
return get_dummy_field_inferencing_interface(node, scope);
}
if (!group->runtime->field_inferencing_interface) {
/* This shouldn't happen because referenced node groups should always be updated first. */
@ -125,7 +129,7 @@ static FieldInferencingInterface get_node_field_inferencing_interface(const bNod
return *group->runtime->field_inferencing_interface;
}
FieldInferencingInterface inferencing_interface;
auto &inferencing_interface = scope.construct<FieldInferencingInterface>();
for (const bNodeSocket *input_socket : node.input_sockets()) {
inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
}
@ -185,7 +189,9 @@ static Vector<const bNodeSocket *> gather_input_socket_dependencies(
* to figure out if it is always a field or if it depends on any group inputs.
*/
static OutputFieldDependency find_group_output_dependencies(
const bNodeSocket &group_output_socket, const Span<SocketFieldState> field_state_by_socket_id)
const bNodeSocket &group_output_socket,
const Span<const FieldInferencingInterface *> interface_by_node,
const Span<SocketFieldState> field_state_by_socket_id)
{
if (!is_field_socket_type(group_output_socket)) {
return OutputFieldDependency::ForDataSource();
@ -227,8 +233,8 @@ static OutputFieldDependency find_group_output_dependencies(
}
}
else if (!origin_state.is_single) {
const FieldInferencingInterface inferencing_interface =
get_node_field_inferencing_interface(origin_node);
const FieldInferencingInterface &inferencing_interface =
*interface_by_node[origin_node.index()];
const OutputFieldDependency &field_dependency =
inferencing_interface.outputs[origin_socket->index()];
@ -251,13 +257,14 @@ static OutputFieldDependency find_group_output_dependencies(
}
static void propagate_data_requirements_from_right_to_left(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
const bNodeTree &tree,
const Span<const FieldInferencingInterface *> interface_by_node,
const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_right_to_left();
for (const bNode *node : toposort_result) {
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
const FieldInferencingInterface &inferencing_interface = *interface_by_node[node->index()];
for (const bNodeSocket *output_socket : node->output_sockets()) {
SocketFieldState &state = field_state_by_socket_id[output_socket->index_in_tree()];
@ -369,7 +376,9 @@ static void determine_group_input_states(
}
static void propagate_field_status_from_left_to_right(
const bNodeTree &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
const bNodeTree &tree,
const Span<const FieldInferencingInterface *> interface_by_node,
const MutableSpan<SocketFieldState> field_state_by_socket_id)
{
const Span<const bNode *> toposort_result = tree.toposort_left_to_right();
@ -378,8 +387,7 @@ static void propagate_field_status_from_left_to_right(
continue;
}
const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
*node);
const FieldInferencingInterface &inferencing_interface = *interface_by_node[node->index()];
/* Update field state of input sockets, also taking into account linked origin sockets. */
for (const bNodeSocket *input_socket : node->input_sockets()) {
@ -440,9 +448,11 @@ static void propagate_field_status_from_left_to_right(
}
}
static void determine_group_output_states(const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const Span<SocketFieldState> field_state_by_socket_id)
static void determine_group_output_states(
const bNodeTree &tree,
FieldInferencingInterface &new_inferencing_interface,
const Span<const FieldInferencingInterface *> interface_by_node,
const Span<SocketFieldState> field_state_by_socket_id)
{
const bNode *group_output_node = tree.group_output_node();
if (!group_output_node) {
@ -451,7 +461,7 @@ static void determine_group_output_states(const bNodeTree &tree,
for (const bNodeSocket *group_output_socket : group_output_node->input_sockets().drop_back(1)) {
OutputFieldDependency field_dependency = find_group_output_dependencies(
*group_output_socket, field_state_by_socket_id);
*group_output_socket, interface_by_node, field_state_by_socket_id);
new_inferencing_interface.outputs[group_output_socket->index()] = std::move(field_dependency);
}
}
@ -486,10 +496,25 @@ static void update_socket_shapes(const bNodeTree &tree,
}
}
static void prepare_inferencing_interfaces(
const Span<const bNode *> nodes,
MutableSpan<const FieldInferencingInterface *> interface_by_node,
ResourceScope &scope)
{
for (const int i : nodes.index_range()) {
interface_by_node[i] = &get_node_field_inferencing_interface(*nodes[i], scope);
}
}
bool update_field_inferencing(const bNodeTree &tree)
{
tree.ensure_topology_cache();
const Span<const bNode *> nodes = tree.all_nodes();
ResourceScope scope;
Array<const FieldInferencingInterface *> interface_by_node(nodes.size());
prepare_inferencing_interfaces(nodes, interface_by_node, scope);
/* Create new inferencing interface for this node group. */
std::unique_ptr<FieldInferencingInterface> new_inferencing_interface =
std::make_unique<FieldInferencingInterface>();
@ -501,10 +526,12 @@ bool update_field_inferencing(const bNodeTree &tree)
/* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
Array<SocketFieldState> field_state_by_socket_id(tree.all_sockets().size());
propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
propagate_data_requirements_from_right_to_left(
tree, interface_by_node, field_state_by_socket_id);
determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
propagate_field_status_from_left_to_right(tree, interface_by_node, field_state_by_socket_id);
determine_group_output_states(
tree, *new_inferencing_interface, interface_by_node, field_state_by_socket_id);
update_socket_shapes(tree, field_state_by_socket_id);
/* Update the previous group interface. */

View File

@ -51,7 +51,7 @@ enum eNodeTreeChangedFlag {
static void add_tree_tag(bNodeTree *ntree, const eNodeTreeChangedFlag flag)
{
ntree->runtime->changed_flag |= flag;
ntree->runtime->topology_cache_is_dirty = true;
ntree->runtime->topology_cache_mutex.tag_dirty();
}
static void add_node_tag(bNodeTree *ntree, bNode *node, const eNodeTreeChangedFlag flag)

View File

@ -102,8 +102,9 @@ class Evaluator {
private:
/* A reference to the compositor context. */
Context &context_;
/* A reference to the compositor node tree. */
bNodeTree &node_tree_;
/* A derived node tree representing the compositor node tree. This is constructed when the node
* tree is compiled and reset when the evaluator is reset, so it gets reconstructed every time
* the node tree changes. */
std::unique_ptr<DerivedNodeTree> derived_node_tree_;
/* The compiled operations stream. This contains ordered pointers to the operations that were
* compiled. This is initialized when the node tree is compiled and freed when the evaluator
@ -116,8 +117,8 @@ class Evaluator {
bool is_compiled_ = false;
public:
/* Construct an evaluator from a compositor node tree and a context. */
Evaluator(Context &context, bNodeTree &node_tree);
/* Construct an evaluator from a context. */
Evaluator(Context &context);
/* Evaluate the compositor node tree. If the node tree is already compiled into an operations
* stream, that stream will be evaluated directly. Otherwise, the node tree will be compiled and

View File

@ -21,8 +21,7 @@ namespace blender::realtime_compositor {
using namespace nodes::derived_node_tree_types;
Evaluator::Evaluator(Context &context, bNodeTree &node_tree)
: context_(context), node_tree_(node_tree)
Evaluator::Evaluator(Context &context) : context_(context)
{
}
@ -67,7 +66,7 @@ bool Evaluator::validate_node_tree()
void Evaluator::compile_and_evaluate()
{
derived_node_tree_ = std::make_unique<DerivedNodeTree>(node_tree_);
derived_node_tree_ = std::make_unique<DerivedNodeTree>(*context_.get_scene()->nodetree);
if (!validate_node_tree()) {
return;

View File

@ -90,7 +90,7 @@ class Engine {
public:
Engine(char *info_message)
: context_(texture_pool_, info_message),
evaluator_(context_, node_tree()),
evaluator_(context_),
last_viewport_size_(context_.get_output_size())
{
}
@ -124,12 +124,6 @@ class Engine {
evaluator_.reset();
}
}
/* Get a reference to the compositor node tree. */
static bNodeTree &node_tree()
{
return *DRW_context_state_get()->scene->nodetree;
}
};
} // namespace blender::draw::compositor

View File

@ -1,12 +1,7 @@
#ifdef GPU_ARB_texture_cube_map_array
# define textureLod_cubemapArray(tex, co, lod) textureLod(tex, co, lod)
#else
/* Fallback implementation for hardware not supporting cubemap arrays. */
# define samplerCubeArray sampler2DArray
/* Fallback implementation for hardware not supporting cubemap arrays.
* `samplerCubeArray` fallback declaration as sampler2DArray in `glsl_shader_defines.glsl`*/
#ifndef GPU_ARB_texture_cube_map_array
float cubemap_face_index(vec3 P)
{

View File

@ -59,7 +59,9 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_surface_vert_common)
.additional_info("eevee_legacy_material_empty_base")
.additional_info("draw_resource_id_varying")
.additional_info("eevee_legacy_common_utiltex_lib")
.additional_info("eevee_legacy_closure_eval_surface_lib");
.additional_info("eevee_legacy_closure_eval_surface_lib")
/* Planar reflections assigns to gl_ClipDistance via surface_vert.glsl. */
.define("USE_CLIP_PLANES");
GPU_SHADER_CREATE_INFO(eevee_legacy_material_surface_vert)
.additional_info("eevee_legacy_material_surface_vert_common")

View File

@ -70,7 +70,7 @@ int g_curves_attr_id = 0;
int curves_attribute_element_id()
{
int id = hairStrandID;
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0) {
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0u) {
id = hair_get_base_id();
}

View File

@ -77,7 +77,7 @@ int g_curves_attr_id = 0;
int curves_attribute_element_id()
{
int id = hairStrandID;
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0) {
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0u) {
id = hair_get_base_id();
}

View File

@ -133,7 +133,7 @@ int g_curves_attr_id = 0;
int curves_attribute_element_id()
{
int id = interp.curves_strand_id;
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0) {
if (drw_curves.is_point_attribute[g_curves_attr_id][0] != 0u) {
# ifdef COMMON_HAIR_LIB
id = hair_get_base_id();
# endif

View File

@ -397,8 +397,8 @@ GPU_SHADER_CREATE_INFO(overlay_edit_curve_handle_no_geom)
/* NOTE: Color already in Linear space. Which is what we want. */
.define("srgbTarget", "false")
.vertex_in(0, Type::VEC3, "pos")
.vertex_in(1, Type::UINT, "data")
.vertex_out(overlay_edit_curve_handle_iface)
.vertex_in(1, Type::UCHAR, "data")
.vertex_out(overlay_edit_smooth_color_iface)
.push_constant(Type::BOOL, "showCurveHandles")
.push_constant(Type::INT, "curveHandleDisplay")
.fragment_out(0, Type::VEC4, "fragColor")

View File

@ -1,16 +1,166 @@
/* TODO(Metal): Implement correct SSBO implementation for geom shader workaround.
* Currently included as placeholder to unblock failing compilation in Metal. */
#pragma BLENDER_REQUIRE(common_view_clipping_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma USE_SSBO_VERTEX_FETCH(TriangleStrip, 10)
#define DISCARD_VERTEX \
gl_Position = vec4(0.0); \
finalColor = vec4(0.0); \
return;
void output_line(vec2 offset, vec4 color, vec3 out_world_pos, vec4 out_ndc_pos)
{
finalColor = color;
gl_Position = out_ndc_pos;
gl_Position.xy += offset * out_ndc_pos.w;
view_clipping_distances(out_world_pos);
}
void main()
{
GPU_INTEL_VERTEX_SHADER_WORKAROUND
vec3 world_pos = point_object_to_world(pos);
gl_Position = point_world_to_ndc(world_pos);
vert.flag = data;
/* Perform vertex shader for each input primitive. */
vec3 in_pos[2];
vec3 world_pos[2];
vec4 ndc_pos[2];
uint vert_flag[2];
view_clipping_distances(world_pos);
/* Input prim is LineList. */
/* Index of the input line primitive. */
int input_line_id = gl_VertexID / 10;
/* Index of output vertex set. Grouped into pairs as outputted by original "output_line" function
* in overlay_edit_curve_handle_geom.glsl. */
int output_prim_id = (gl_VertexID / 2) % 5;
/* ID of vertex within line primitive (0 or 1) for current vertex. */
int output_prim_vert_id = gl_VertexID % 2;
for (int i = 0; i < 2; i++) {
in_pos[i] = vertex_fetch_attribute((input_line_id * 2) + i, pos, vec3).xyz;
vert_flag[i] = (uint)vertex_fetch_attribute((input_line_id * 2) + i, data, uchar);
world_pos[i] = point_object_to_world(in_pos[i]);
ndc_pos[i] = point_world_to_ndc(world_pos[i]);
}
/* Perform Geometry shader equivalent calculation. */
uint is_active_nurb = (vert_flag[1] & ACTIVE_NURB);
uint color_id = (vert_flag[1] >> COLOR_SHIFT);
/* Don't output any edges if we don't show handles */
if (!showCurveHandles && (color_id < 5)) {
return;
}
bool edge_selected = (((vert_flag[1] | vert_flag[0]) & VERT_SELECTED) != 0u);
bool handle_selected = (showCurveHandles &&
(((vert_flag[1] | vert_flag[0]) & VERT_SELECTED_BEZT_HANDLE) != 0u));
bool is_gpencil = ((vert_flag[1] & VERT_GPENCIL_BEZT_HANDLE) != 0u);
/* If handle type is only selected and the edge is not selected, don't show. */
if ((curveHandleDisplay != CURVE_HANDLE_ALL) && (!handle_selected)) {
/* Nurbs must show the handles always. */
bool is_u_segment = (((vert_flag[1] ^ vert_flag[0]) & EVEN_U_BIT) != 0u);
if ((!is_u_segment) && (color_id <= 4)) {
return;
}
if (is_gpencil) {
return;
}
}
vec4 inner_color;
if (color_id == 0) {
inner_color = (edge_selected) ? colorHandleSelFree : colorHandleFree;
}
else if (color_id == 1) {
inner_color = (edge_selected) ? colorHandleSelAuto : colorHandleAuto;
}
else if (color_id == 2) {
inner_color = (edge_selected) ? colorHandleSelVect : colorHandleVect;
}
else if (color_id == 3) {
inner_color = (edge_selected) ? colorHandleSelAlign : colorHandleAlign;
}
else if (color_id == 4) {
inner_color = (edge_selected) ? colorHandleSelAutoclamp : colorHandleAutoclamp;
}
else {
bool is_selected = (((vert_flag[1] & vert_flag[0]) & VERT_SELECTED) != 0);
bool is_u_segment = (((vert_flag[1] ^ vert_flag[0]) & EVEN_U_BIT) != 0);
if (is_u_segment) {
inner_color = (is_selected) ? colorNurbSelUline : colorNurbUline;
}
else {
inner_color = (is_selected) ? colorNurbSelVline : colorNurbVline;
}
}
vec4 outer_color = (is_active_nurb != 0) ?
mix(colorActiveSpline,
inner_color,
0.25) /* Minimize active color bleeding on inner_color. */
:
vec4(inner_color.rgb, 0.0);
vec2 v1_2 = (ndc_pos[1].xy / ndc_pos[1].w - ndc_pos[0].xy / ndc_pos[0].w);
vec2 offset = sizeEdge * 4.0 * sizeViewportInv; /* 4.0 is eyeballed */
if (abs(v1_2.x * sizeViewport.x) < abs(v1_2.y * sizeViewport.y)) {
offset.y = 0.0;
}
else {
offset.x = 0.0;
}
/* Output geometry based on output line ID. */
switch (output_prim_id) {
case 0: {
/* draw the transparent border (AA). */
if (is_active_nurb != 0u) {
offset *= 0.75; /* Don't make the active "halo" appear very thick. */
output_line(offset * 2.0,
vec4(colorActiveSpline.rgb, 0.0),
world_pos[output_prim_vert_id],
ndc_pos[output_prim_vert_id]);
}
else {
DISCARD_VERTEX
}
break;
}
case 1: {
/* draw the outline. */
output_line(
offset, outer_color, world_pos[output_prim_vert_id], ndc_pos[output_prim_vert_id]);
break;
}
case 2: {
/* draw the core of the line. */
output_line(
vec2(0.0), inner_color, world_pos[output_prim_vert_id], ndc_pos[output_prim_vert_id]);
break;
}
case 3: {
/* draw the outline. */
output_line(
-offset, outer_color, world_pos[output_prim_vert_id], ndc_pos[output_prim_vert_id]);
break;
}
case 4: {
/* draw the transparent border (AA). */
if (is_active_nurb != 0u) {
output_line(offset * -2.0,
vec4(colorActiveSpline.rgb, 0.0),
world_pos[output_prim_vert_id],
ndc_pos[output_prim_vert_id]);
}
break;
}
default: {
DISCARD_VERTEX
break;
}
}
}

View File

@ -17,7 +17,7 @@ vec4 EDIT_MESH_edge_color_inner(uint edge_flag)
color = ((edge_flag & EDGE_SELECTED) != 0u) ? color_select : color;
color = ((edge_flag & EDGE_ACTIVE) != 0u) ? colorEditMeshActive : color;
color.a = (selectEdges || (edge_flag & (EDGE_SELECTED | EDGE_ACTIVE)) != 0) ? 1.0 : 0.7;
color.a = (selectEdges || (edge_flag & (EDGE_SELECTED | EDGE_ACTIVE)) != 0u) ? 1.0 : 0.7;
return color;
}
@ -35,7 +35,7 @@ vec4 EDIT_MESH_edge_vertex_color(uint vertex_flag)
vec4 EDIT_MESH_vertex_color(uint vertex_flag, float vertex_crease)
{
if ((vertex_flag & VERT_ACTIVE) != 0) {
if ((vertex_flag & VERT_ACTIVE) != 0u) {
return vec4(colorEditMeshActive.xyz, 1.0);
}
else if ((vertex_flag & VERT_SELECTED) != 0u) {
@ -57,7 +57,7 @@ vec4 EDIT_MESH_face_color(uint face_flag)
color = ((face_flag & FACE_FREESTYLE) != 0u) ? colorFaceFreestyle : color;
color = ((face_flag & FACE_SELECTED) != 0u) ? colorFaceSelect : color;
color = ((face_flag & FACE_ACTIVE) != 0u) ? color_active : color;
color.a *= ((face_flag & (FACE_FREESTYLE | FACE_SELECTED | FACE_ACTIVE)) == 0 || selectFaces) ?
color.a *= ((face_flag & (FACE_FREESTYLE | FACE_SELECTED | FACE_ACTIVE)) == 0u || selectFaces) ?
1.0 :
0.5;
return color;

View File

@ -38,10 +38,10 @@ void main()
finalColor = EDIT_MESH_vertex_color(m_data.y, vertexCrease);
gl_PointSize = sizeVertex * ((vertexCrease > 0.0) ? 3.0 : 2.0);
/* Make selected and active vertex always on top. */
if ((data.x & VERT_SELECTED) != 0) {
if ((data.x & VERT_SELECTED) != 0u) {
gl_Position.z -= 5e-7 * abs(gl_Position.w);
}
if ((data.x & VERT_ACTIVE) != 0) {
if ((data.x & VERT_ACTIVE) != 0u) {
gl_Position.z -= 5e-7 * abs(gl_Position.w);
}

View File

@ -11,9 +11,9 @@ void main()
half_pixel_offset;
#ifdef USE_EDGE_SELECT
bool is_select = (flag & int(EDGE_UV_SELECT)) != 0u;
bool is_select = (flag & int(EDGE_UV_SELECT)) != 0;
#else
bool is_select = (flag & int(VERT_UV_SELECT)) != 0u;
bool is_select = (flag & int(VERT_UV_SELECT)) != 0;
#endif
geom_in.selectionFac = is_select ? 1.0 : 0.0;
/* Move selected edges to the top

View File

@ -5,8 +5,8 @@ void main()
vec3 world_pos = point_object_to_world(vec3(au, 0.0));
gl_Position = point_world_to_ndc(world_pos);
bool is_selected = (flag & FACE_UV_SELECT) != 0;
bool is_active = (flag & FACE_UV_ACTIVE) != 0;
bool is_selected = (flag & FACE_UV_SELECT) != 0u;
bool is_active = (flag & FACE_UV_ACTIVE) != 0u;
finalColor = (is_selected) ? colorFaceSelect : colorFace;
finalColor = (is_active) ? colorEditMeshActive : finalColor;

View File

@ -5,8 +5,8 @@ const vec4 pinned_col = vec4(1.0, 0.0, 0.0, 1.0);
void main()
{
bool is_selected = (flag & (VERT_UV_SELECT | FACE_UV_SELECT)) != 0;
bool is_pinned = (flag & VERT_UV_PINNED) != 0;
bool is_selected = (flag & (VERT_UV_SELECT | FACE_UV_SELECT)) != 0u;
bool is_pinned = (flag & VERT_UV_PINNED) != 0u;
vec4 deselect_col = (is_pinned) ? pinned_col : vec4(color.rgb, 1.0);
fillColor = (is_selected) ? colorVertexSelect : deselect_col;
outlineColor = (is_pinned) ? pinned_col : vec4(fillColor.rgb, 0.0);

View File

@ -24,7 +24,7 @@ void main()
}
if (showKeyFrames) {
if ((flag & MOTIONPATH_VERT_KEY) != 0) {
if ((flag & MOTIONPATH_VERT_KEY) != 0u) {
gl_PointSize = float(pointSize + 5);
finalColor = colorVertexSelect;
/* Bias more to get these on top of regular points */

View File

@ -272,6 +272,20 @@ void draw_channel_strips(bAnimContext *ac, SpaceAction *saction, ARegion *region
}
break;
}
case ANIMTYPE_GPLAYER: {
if (show_group_colors) {
uchar gpl_col[4];
bGPDlayer *gpl = (bGPDlayer *)ale->data;
rgb_float_to_uchar(gpl_col, gpl->color);
gpl_col[3] = col1[3];
immUniformColor4ubv(sel ? col1 : gpl_col);
}
else {
immUniformColor4ubv(sel ? col1 : col2);
}
break;
}
default: {
immUniformColor4ubv(sel ? col1 : col2);
}

View File

@ -588,7 +588,7 @@ static void lineart_shadow_edge_cut(LineartData *ld,
new_seg_2->ratio = end;
}
double r_fb_co_1[4], r_fb_co_2[4], r_gloc_1[3], r_gloc_2[3];
double r_fb_co_1[4]={0}, r_fb_co_2[4]={0}, r_gloc_1[3]={0}, r_gloc_2[3]={0};
double r_new_in_the_middle[4], r_new_in_the_middle_global[3], r_new_at;
double *s1_fb_co_1, *s1_fb_co_2, *s1_gloc_1, *s1_gloc_2;

View File

@ -334,6 +334,8 @@ void gpu_shader_create_info_init()
overlay_edit_mesh_edge_flat = overlay_edit_mesh_edge_flat_no_geom;
overlay_edit_mesh_edge_clipped = overlay_edit_mesh_edge_clipped_no_geom;
overlay_edit_mesh_edge_flat_clipped = overlay_edit_mesh_edge_flat_clipped_no_geom;
overlay_edit_curve_handle = overlay_edit_curve_handle_no_geom;
overlay_edit_curve_handle_clipped = overlay_edit_curve_handle_clipped_no_geom;
/* Overlay Armature Shape outline. */
overlay_armature_shape_outline = overlay_armature_shape_outline_no_geom;

View File

@ -43,6 +43,19 @@ typedef enum eGPUTextureType {
ENUM_OPERATORS(eGPUTextureType, GPU_TEXTURE_CUBE_ARRAY)
/* Format types for samplers within the shader.
* This covers the sampler format type permutations within GLSL/MSL.*/
typedef enum eGPUSamplerFormat {
GPU_SAMPLER_TYPE_FLOAT = 0,
GPU_SAMPLER_TYPE_INT = 1,
GPU_SAMPLER_TYPE_UINT = 2,
/* Special case for depth, as these require differing dummy formats. */
GPU_SAMPLER_TYPE_DEPTH = 3,
GPU_SAMPLER_TYPE_MAX = 4
} eGPUSamplerFormat;
ENUM_OPERATORS(eGPUSamplerFormat, GPU_SAMPLER_TYPE_UINT)
#ifdef DEBUG
# define DEBUG_NAME_LEN 64
#else

View File

@ -154,11 +154,13 @@ int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
continue;
}
/* Fetch metal attribute information. */
const MTLShaderInputAttribute &mtl_attr = interface->get_attribute(input->location);
/* Fetch metal attribute information (ShaderInput->binding is used to fetch the corresponding
* slot. */
const MTLShaderInputAttribute &mtl_attr = interface->get_attribute(input->binding);
BLI_assert(mtl_attr.location >= 0);
/* Verify that the attribute location from the shader interface
* matches the attribute location returned. */
* matches the attribute location returned in the input table. These should always be the
* same. */
BLI_assert(mtl_attr.location == input->location);
/* Check if attribute is already present in the given slot. */
@ -247,12 +249,16 @@ int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
buffer_index;
/* Update total attribute account. */
desc.vertex_descriptor.num_attributes = max_ii(
mtl_attr.location + i + 1, desc.vertex_descriptor.num_attributes);
desc.vertex_descriptor.total_attributes++;
desc.vertex_descriptor.max_attribute_value = max_ii(
mtl_attr.location + i, desc.vertex_descriptor.max_attribute_value);
MTL_LOG_INFO("-- Sub-Attrib Location: %d, offset: %d, buffer index: %d\n",
mtl_attr.location + i,
attribute_offset + i * 16,
buffer_index);
/* Update attribute used-slot mask for array elements. */
attr_mask &= ~(1 << (mtl_attr.location + i));
}
MTL_LOG_INFO(
"Float4x4 attribute type added for '%s' at attribute locations: %d to %d\n",
@ -262,7 +268,8 @@ int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
}
/* Ensure we are not exceeding the attribute limit. */
BLI_assert(desc.vertex_descriptor.num_attributes <= MTL_MAX_VERTEX_INPUT_ATTRIBUTES);
BLI_assert(desc.vertex_descriptor.max_attribute_value <
MTL_MAX_VERTEX_INPUT_ATTRIBUTES);
}
}
else {
@ -330,11 +337,11 @@ int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
}
desc.vertex_descriptor.attributes[mtl_attr.location].offset = attribute_offset;
desc.vertex_descriptor.attributes[mtl_attr.location].buffer_index = buffer_index;
desc.vertex_descriptor.num_attributes = ((mtl_attr.location + 1) >
desc.vertex_descriptor.num_attributes) ?
(mtl_attr.location + 1) :
desc.vertex_descriptor.num_attributes;
desc.vertex_descriptor.max_attribute_value =
((mtl_attr.location) > desc.vertex_descriptor.max_attribute_value) ?
(mtl_attr.location) :
desc.vertex_descriptor.max_attribute_value;
desc.vertex_descriptor.total_attributes++;
/* SSBO Vertex Fetch attribute bind. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
BLI_assert_msg(desc.vertex_descriptor.attributes[mtl_attr.location].format ==
@ -356,9 +363,9 @@ int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
desc.vertex_descriptor.num_ssbo_attributes++;
}
/* NOTE: We are setting num_attributes to be up to the maximum found index, because of
* this, it is possible that we may skip over certain attributes if they were not in the
* source GPUVertFormat. */
/* NOTE: We are setting max_attribute_value to be up to the maximum found index, because
* of this, it is possible that we may skip over certain attributes if they were not in
* the source GPUVertFormat. */
MTL_LOG_INFO(
" -- Batch Attribute(%d): ORIG Shader Format: %d, ORIG Vert format: %d, Vert "
"components: %d, Fetch Mode %d --> FINAL FORMAT: %d\n",
@ -472,6 +479,9 @@ id<MTLRenderCommandEncoder> MTLBatch::bind(uint v_first, uint v_count, uint i_fi
this->elem ? @"(indexed)" : @"",
active_shader_->get_interface()->get_name()]];
}
/*** Bind Vertex Buffers and Index Buffers **/
/* SSBO Vertex Fetch Buffer bindings. */
if (uses_ssbo_fetch) {
@ -536,7 +546,7 @@ id<MTLRenderCommandEncoder> MTLBatch::bind(uint v_first, uint v_count, uint i_fi
* This should happen after all other final rendering setup is complete. */
MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
if (!ctx->ensure_render_pipeline_state(mtl_prim_type)) {
printf("FAILED TO ENSURE RENDER PIPELINE STATE");
MTL_LOG_ERROR("Failed to prepare and apply render pipeline state.\n");
BLI_assert(false);
if (G.debug & G_DEBUG_GPU) {
@ -705,18 +715,22 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(
/* DEBUG: verify if our attribute bindings have been fully provided as expected. */
#if MTL_DEBUG_SHADER_ATTRIBUTES == 1
if (attr_mask != 0) {
for (uint16_t mask = 1, a = 0; a < 16; a++, mask <<= 1) {
if (attr_mask & mask) {
/* Fallback for setting default attributes, for missed slots. Attributes flagged with
* 'MTLVertexFormatInvalid' in the vertex descriptor are bound to a NULL buffer during PSO
* creation. */
MTL_LOG_WARNING("MTLBatch: Missing expected attribute '%s' at index '%d' for shader: %s\n",
this->active_shader->interface->attributes[a].name,
a,
interface->name);
/* Ensure any assigned attribute has not been given an invalid format. This should not
* occur and may be the result of an unsupported attribute type conversion. */
BLI_assert(desc.attributes[a].format == MTLVertexFormatInvalid);
/* Attributes are not necessarily contiguous. */
for (int i = 0; i < active_shader_->get_interface()->get_total_attributes(); i++) {
const MTLShaderInputAttribute &attr = active_shader_->get_interface()->get_attribute(i);
if (attr_mask & (1 << attr.location)) {
MTL_LOG_WARNING(
"Warning: Missing expected attribute '%s' with location: %u in shader %s (attr "
"number: %u)\n",
active_shader_->get_interface()->get_name_at_offset(attr.name_offset),
attr.location,
active_shader_->name_get(),
i);
/* If an attribute is not included, then format in vertex descriptor should be invalid due
* to nil assignment. */
BLI_assert(desc.vertex_descriptor.attributes[attr.location].format ==
MTLVertexFormatInvalid);
}
}
}

View File

@ -460,6 +460,9 @@ struct MTLContextGlobalShaderPipelineState {
/* Render parameters. */
float point_size = 1.0f;
float line_width = 1.0f;
/* Clipping plane enablement. */
bool clip_distance_enabled[6] = {false};
};
/* Command Buffer Manager - Owned by MTLContext.
@ -655,9 +658,9 @@ class MTLContext : public Context {
/** Dummy Resources */
/* Maximum of 32 texture types. Though most combinations invalid. */
gpu::MTLTexture *dummy_textures_[GPU_TEXTURE_BUFFER] = {nullptr};
GPUVertFormat dummy_vertformat_;
GPUVertBuf *dummy_verts_ = nullptr;
gpu::MTLTexture *dummy_textures_[GPU_SAMPLER_TYPE_MAX][GPU_TEXTURE_BUFFER] = {nullptr};
GPUVertFormat dummy_vertformat_[GPU_SAMPLER_TYPE_MAX];
GPUVertBuf *dummy_verts_[GPU_SAMPLER_TYPE_MAX] = {nullptr};
public:
/* GPUContext interface. */
@ -743,7 +746,7 @@ class MTLContext : public Context {
id<MTLBuffer> get_null_buffer();
id<MTLBuffer> get_null_attribute_buffer();
gpu::MTLTexture *get_dummy_texture(eGPUTextureType type);
gpu::MTLTexture *get_dummy_texture(eGPUTextureType type, eGPUSamplerFormat sampler_format);
void free_dummy_resources();
/* State assignment. */

View File

@ -13,6 +13,7 @@
#include "mtl_shader_interface.hh"
#include "mtl_state.hh"
#include "mtl_uniform_buffer.hh"
#include "mtl_vertex_buffer.hh"
#include "DNA_userdef_types.h"
@ -512,53 +513,98 @@ id<MTLBuffer> MTLContext::get_null_attribute_buffer()
return null_attribute_buffer_;
}
gpu::MTLTexture *MTLContext::get_dummy_texture(eGPUTextureType type)
gpu::MTLTexture *MTLContext::get_dummy_texture(eGPUTextureType type,
eGPUSamplerFormat sampler_format)
{
/* Decrement 1 from texture type as they start from 1 and go to 32 (inclusive). Remap to 0..31 */
gpu::MTLTexture *dummy_tex = dummy_textures_[type - 1];
gpu::MTLTexture *dummy_tex = dummy_textures_[sampler_format][type - 1];
if (dummy_tex != nullptr) {
return dummy_tex;
}
else {
/* Determine format for dummy texture. */
eGPUTextureFormat format = GPU_RGBA8;
switch (sampler_format) {
case GPU_SAMPLER_TYPE_FLOAT:
format = GPU_RGBA8;
break;
case GPU_SAMPLER_TYPE_INT:
format = GPU_RGBA8I;
break;
case GPU_SAMPLER_TYPE_UINT:
format = GPU_RGBA8UI;
break;
case GPU_SAMPLER_TYPE_DEPTH:
format = GPU_DEPTH32F_STENCIL8;
break;
default:
BLI_assert_unreachable();
}
/* Create dummy texture based on desired type. */
GPUTexture *tex = nullptr;
switch (type) {
case GPU_TEXTURE_1D:
tex = GPU_texture_create_1d("Dummy 1D", 128, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_1d("Dummy 1D", 128, 1, format, nullptr);
break;
case GPU_TEXTURE_1D_ARRAY:
tex = GPU_texture_create_1d_array("Dummy 1DArray", 128, 1, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_1d_array("Dummy 1DArray", 128, 1, 1, format, nullptr);
break;
case GPU_TEXTURE_2D:
tex = GPU_texture_create_2d("Dummy 2D", 128, 128, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_2d("Dummy 2D", 128, 128, 1, format, nullptr);
break;
case GPU_TEXTURE_2D_ARRAY:
tex = GPU_texture_create_2d_array("Dummy 2DArray", 128, 128, 1, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_2d_array("Dummy 2DArray", 128, 128, 1, 1, format, nullptr);
break;
case GPU_TEXTURE_3D:
tex = GPU_texture_create_3d(
"Dummy 3D", 128, 128, 1, 1, GPU_RGBA8, GPU_DATA_UBYTE, nullptr);
tex = GPU_texture_create_3d("Dummy 3D", 128, 128, 1, 1, format, GPU_DATA_UBYTE, nullptr);
break;
case GPU_TEXTURE_CUBE:
tex = GPU_texture_create_cube("Dummy Cube", 128, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_cube("Dummy Cube", 128, 1, format, nullptr);
break;
case GPU_TEXTURE_CUBE_ARRAY:
tex = GPU_texture_create_cube_array("Dummy CubeArray", 128, 1, 1, GPU_RGBA8, nullptr);
tex = GPU_texture_create_cube_array("Dummy CubeArray", 128, 1, 1, format, nullptr);
break;
case GPU_TEXTURE_BUFFER:
if (!dummy_verts_) {
GPU_vertformat_clear(&dummy_vertformat_);
GPU_vertformat_attr_add(&dummy_vertformat_, "dummy", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
dummy_verts_ = GPU_vertbuf_create_with_format_ex(&dummy_vertformat_, GPU_USAGE_STATIC);
GPU_vertbuf_data_alloc(dummy_verts_, 64);
if (!dummy_verts_[sampler_format]) {
GPU_vertformat_clear(&dummy_vertformat_[sampler_format]);
GPUVertCompType comp_type = GPU_COMP_F32;
GPUVertFetchMode fetch_mode = GPU_FETCH_FLOAT;
switch (sampler_format) {
case GPU_SAMPLER_TYPE_FLOAT:
case GPU_SAMPLER_TYPE_DEPTH:
comp_type = GPU_COMP_F32;
fetch_mode = GPU_FETCH_FLOAT;
break;
case GPU_SAMPLER_TYPE_INT:
comp_type = GPU_COMP_I32;
fetch_mode = GPU_FETCH_INT;
break;
case GPU_SAMPLER_TYPE_UINT:
comp_type = GPU_COMP_U32;
fetch_mode = GPU_FETCH_INT;
break;
default:
BLI_assert_unreachable();
}
GPU_vertformat_attr_add(
&dummy_vertformat_[sampler_format], "dummy", comp_type, 4, fetch_mode);
dummy_verts_[sampler_format] = GPU_vertbuf_create_with_format_ex(
&dummy_vertformat_[sampler_format],
GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(dummy_verts_[sampler_format], 64);
}
tex = GPU_texture_create_from_vertbuf("Dummy TextureBuffer", dummy_verts_);
tex = GPU_texture_create_from_vertbuf("Dummy TextureBuffer", dummy_verts_[sampler_format]);
break;
default:
BLI_assert_msg(false, "Unrecognised texture type");
return nullptr;
}
gpu::MTLTexture *metal_tex = static_cast<gpu::MTLTexture *>(reinterpret_cast<Texture *>(tex));
dummy_textures_[type - 1] = metal_tex;
dummy_textures_[sampler_format][type - 1] = metal_tex;
return metal_tex;
}
return nullptr;
@ -566,15 +612,17 @@ gpu::MTLTexture *MTLContext::get_dummy_texture(eGPUTextureType type)
void MTLContext::free_dummy_resources()
{
for (int tex = 0; tex < GPU_TEXTURE_BUFFER; tex++) {
if (dummy_textures_[tex]) {
GPU_texture_free(
reinterpret_cast<GPUTexture *>(static_cast<Texture *>(dummy_textures_[tex])));
dummy_textures_[tex] = nullptr;
for (int format = 0; format < GPU_SAMPLER_TYPE_MAX; format++) {
for (int tex = 0; tex < GPU_TEXTURE_BUFFER; tex++) {
if (dummy_textures_[format][tex]) {
GPU_texture_free(
reinterpret_cast<GPUTexture *>(static_cast<Texture *>(dummy_textures_[format][tex])));
dummy_textures_[format][tex] = nullptr;
}
}
if (dummy_verts_[format]) {
GPU_vertbuf_discard(dummy_verts_[format]);
}
}
if (dummy_verts_) {
GPU_vertbuf_discard(dummy_verts_);
}
}
@ -809,31 +857,31 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
}
/* Transform feedback buffer binding. */
/* TOOD(Metal): Include this code once MTLVertBuf is merged. We bind the vertex buffer to which
* transform feedback data will be written. */
// GPUVertBuf *tf_vbo =
// this->pipeline_state.active_shader->get_transform_feedback_active_buffer();
// if (tf_vbo != nullptr && pipeline_state_instance->transform_feedback_buffer_index >= 0) {
GPUVertBuf *tf_vbo =
this->pipeline_state.active_shader->get_transform_feedback_active_buffer();
if (tf_vbo != nullptr && pipeline_state_instance->transform_feedback_buffer_index >= 0) {
// /* Ensure primitive type is either GPU_LINES, GPU_TRIANGLES or GPU_POINT */
// BLI_assert(mtl_prim_type == MTLPrimitiveTypeLine ||
// mtl_prim_type == MTLPrimitiveTypeTriangle ||
// mtl_prim_type == MTLPrimitiveTypePoint);
/* Ensure primitive type is either GPU_LINES, GPU_TRIANGLES or GPU_POINT */
BLI_assert(mtl_prim_type == MTLPrimitiveTypeLine ||
mtl_prim_type == MTLPrimitiveTypeTriangle ||
mtl_prim_type == MTLPrimitiveTypePoint);
// /* Fetch active transform feedback buffer from vertbuf */
// MTLVertBuf *tf_vbo_mtl = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(tf_vbo));
// int tf_buffer_offset = 0;
// id<MTLBuffer> tf_buffer_mtl = tf_vbo_mtl->get_metal_buffer(&tf_buffer_offset);
/* Fetch active transform feedback buffer from vertbuf */
MTLVertBuf *tf_vbo_mtl = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(tf_vbo));
/* Ensure TF buffer is ready. */
tf_vbo_mtl->bind();
id<MTLBuffer> tf_buffer_mtl = tf_vbo_mtl->get_metal_buffer();
BLI_assert(tf_buffer_mtl != nil);
// if (tf_buffer_mtl != nil && tf_buffer_offset >= 0) {
// [rec setVertexBuffer:tf_buffer_mtl
// offset:tf_buffer_offset
// atIndex:pipeline_state_instance->transform_feedback_buffer_index];
// printf("Successfully bound VBO: %p for transform feedback (MTL Buffer: %p)\n",
// tf_vbo_mtl,
// tf_buffer_mtl);
// }
// }
if (tf_buffer_mtl != nil) {
[rec setVertexBuffer:tf_buffer_mtl
offset:0
atIndex:pipeline_state_instance->transform_feedback_buffer_index];
MTL_LOG_INFO("Successfully bound VBO: %p for transform feedback (MTL Buffer: %p)\n",
tf_vbo_mtl,
tf_buffer_mtl);
}
}
/* Matrix Bindings. */
/* This is now called upon shader bind. We may need to re-evaluate this though,
@ -1221,7 +1269,9 @@ void MTLContext::ensure_texture_bindings(
if (bind_dummy_texture) {
if (bool(shader_texture_info.stage_mask & ShaderStage::VERTEX)) {
rps.bind_vertex_texture(
get_dummy_texture(shader_texture_info.type)->get_metal_handle(), slot);
get_dummy_texture(shader_texture_info.type, shader_texture_info.sampler_format)
->get_metal_handle(),
slot);
/* Bind default sampler state. */
MTLSamplerBinding default_binding = {true, DEFAULT_SAMPLER_STATE};
@ -1229,7 +1279,9 @@ void MTLContext::ensure_texture_bindings(
}
if (bool(shader_texture_info.stage_mask & ShaderStage::FRAGMENT)) {
rps.bind_fragment_texture(
get_dummy_texture(shader_texture_info.type)->get_metal_handle(), slot);
get_dummy_texture(shader_texture_info.type, shader_texture_info.sampler_format)
->get_metal_handle(),
slot);
/* Bind default sampler state. */
MTLSamplerBinding default_binding = {true, DEFAULT_SAMPLER_STATE};

View File

@ -185,8 +185,9 @@ void MTLDrawList::submit()
can_use_MDI = can_use_MDI && (is_finishing_a_buffer || command_len_ > 2);
/* Bind Batch to setup render pipeline state. */
BLI_assert(batch_ != nullptr);
id<MTLRenderCommandEncoder> rec = batch_->bind(0, 0, 0, 0);
if (!rec) {
if (rec == nil) {
BLI_assert_msg(false, "A RenderCommandEncoder should always be available!\n");
return;
}

View File

@ -95,7 +95,7 @@ void MTLFrameBuffer::bind(bool enabled_srgb)
/* Verify Context is valid. */
if (context_ != static_cast<MTLContext *>(unwrap(GPU_context_active_get()))) {
BLI_assert(false && "Trying to use the same frame-buffer in multiple context's.");
BLI_assert_msg(false, "Trying to use the same frame-buffer in multiple context's.");
return;
}
@ -986,7 +986,7 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
if (layer == -1) {
mtl_depth_attachment_.slice = 0;
mtl_depth_attachment_.depth_plane = 0;
mtl_depth_attachment_.render_target_array_length = 1;
mtl_depth_attachment_.render_target_array_length = 6;
use_multilayered_rendering_ = true;
}
break;
@ -1007,7 +1007,7 @@ bool MTLFrameBuffer::add_depth_attachment(gpu::MTLTexture *texture, int miplevel
mtl_depth_attachment_.depth_plane = 0;
break;
default:
BLI_assert(false && "Unrecognized texture type");
BLI_assert_msg(false, "Unrecognized texture type");
break;
}
@ -1108,7 +1108,7 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
if (layer == -1) {
mtl_stencil_attachment_.slice = 0;
mtl_stencil_attachment_.depth_plane = 0;
mtl_stencil_attachment_.render_target_array_length = 1;
mtl_stencil_attachment_.render_target_array_length = 6;
use_multilayered_rendering_ = true;
}
break;
@ -1129,7 +1129,7 @@ bool MTLFrameBuffer::add_stencil_attachment(gpu::MTLTexture *texture, int miplev
mtl_stencil_attachment_.depth_plane = 0;
break;
default:
BLI_assert(false && "Unrecognized texture type");
BLI_assert_msg(false, "Unrecognized texture type");
break;
}

View File

@ -39,8 +39,16 @@ uchar *MTLImmediate::begin()
metal_primitive_mode_ = mtl_prim_type_to_topology_class(metal_primitive_type_);
has_begun_ = true;
/* If prim type is line loop, add an extra vertex at the end for placing the closing line,
* as metal does not support this primitive type. We treat this as a Line strip with one
* extra value. */
int vertex_alloc_length = vertex_len;
if (prim_type == GPU_PRIM_LINE_LOOP) {
vertex_alloc_length++;
}
/* Allocate a range of data and return host-accessible pointer. */
const size_t bytes_needed = vertex_buffer_size(&vertex_format, vertex_len);
const size_t bytes_needed = vertex_buffer_size(&vertex_format, vertex_alloc_length);
current_allocation_ = context_->get_scratchbuffer_manager()
.scratch_buffer_allocate_range_aligned(bytes_needed, 256);
[current_allocation_.metal_buffer retain];
@ -101,11 +109,11 @@ void MTLImmediate::end()
/* Reset vertex descriptor to default state. */
desc.reset_vertex_descriptor();
desc.vertex_descriptor.num_attributes = interface->get_total_attributes();
desc.vertex_descriptor.total_attributes = interface->get_total_attributes();
desc.vertex_descriptor.max_attribute_value = interface->get_total_attributes() - 1;
desc.vertex_descriptor.num_vert_buffers = 1;
for (int i = 0; i < desc.vertex_descriptor.num_attributes; i++) {
for (int i = 0; i < desc.vertex_descriptor.total_attributes; i++) {
desc.vertex_descriptor.attributes[i].format = MTLVertexFormatInvalid;
}
desc.vertex_descriptor.uses_ssbo_vertex_fetch =
@ -266,71 +274,88 @@ void MTLImmediate::end()
* For immediate mode, generating these is currently very cheap, as we use
* fast scratch buffer allocations. Though we may benefit from caching of
* frequently used buffer sizes. */
bool rendered = false;
if (mtl_needs_topology_emulation(this->prim_type)) {
/* Debug safety check for SSBO FETCH MODE. */
if (active_mtl_shader->get_uses_ssbo_vertex_fetch()) {
BLI_assert(false && "Topology emulation not supported with SSBO Vertex Fetch mode");
}
/* Emulate Tri-fan. */
if (this->prim_type == GPU_PRIM_TRI_FAN) {
/* Prepare Triangle-Fan emulation index buffer on CPU based on number of input
* vertices. */
uint32_t base_vert_count = this->vertex_idx;
uint32_t num_triangles = max_ii(base_vert_count - 2, 0);
uint32_t fan_index_count = num_triangles * 3;
BLI_assert(num_triangles > 0);
switch (this->prim_type) {
case GPU_PRIM_TRI_FAN: {
/* Debug safety check for SSBO FETCH MODE. */
if (active_mtl_shader->get_uses_ssbo_vertex_fetch()) {
BLI_assert(
false &&
"Topology emulation for TriangleFan not supported with SSBO Vertex Fetch mode");
}
uint32_t alloc_size = sizeof(uint32_t) * fan_index_count;
uint32_t *index_buffer = nullptr;
/* Prepare Triangle-Fan emulation index buffer on CPU based on number of input
* vertices. */
uint32_t base_vert_count = this->vertex_idx;
uint32_t num_triangles = max_ii(base_vert_count - 2, 0);
uint32_t fan_index_count = num_triangles * 3;
BLI_assert(num_triangles > 0);
MTLTemporaryBuffer allocation =
context_->get_scratchbuffer_manager().scratch_buffer_allocate_range_aligned(
alloc_size, 128);
index_buffer = (uint32_t *)allocation.data;
uint32_t alloc_size = sizeof(uint32_t) * fan_index_count;
uint32_t *index_buffer = nullptr;
int a = 0;
for (int i = 0; i < num_triangles; i++) {
index_buffer[a++] = 0;
index_buffer[a++] = i + 1;
index_buffer[a++] = i + 2;
}
MTLTemporaryBuffer allocation =
context_->get_scratchbuffer_manager().scratch_buffer_allocate_range_aligned(
alloc_size, 128);
index_buffer = (uint32_t *)allocation.data;
@autoreleasepool {
int a = 0;
for (int i = 0; i < num_triangles; i++) {
index_buffer[a++] = 0;
index_buffer[a++] = i + 1;
index_buffer[a++] = i + 2;
}
id<MTLBuffer> index_buffer_mtl = nil;
uint32_t index_buffer_offset = 0;
@autoreleasepool {
/* Region of scratch buffer used for topology emulation element data.
* NOTE(Metal): We do not need to manually flush as the entire scratch
* buffer for current command buffer is flushed upon submission. */
index_buffer_mtl = allocation.metal_buffer;
index_buffer_offset = allocation.buffer_offset;
id<MTLBuffer> index_buffer_mtl = nil;
uint32_t index_buffer_offset = 0;
/* Set depth stencil state (requires knowledge of primitive type). */
context_->ensure_depth_stencil_state(MTLPrimitiveTypeTriangle);
/* Region of scratch buffer used for topology emulation element data.
* NOTE(Metal): We do not need to manually flush as the entire scratch
* buffer for current command buffer is flushed upon submission. */
index_buffer_mtl = allocation.metal_buffer;
index_buffer_offset = allocation.buffer_offset;
/* Bind Vertex Buffer. */
rps.bind_vertex_buffer(
current_allocation_.metal_buffer, current_allocation_.buffer_offset, 0);
/* Set depth stencil state (requires knowledge of primitive type). */
context_->ensure_depth_stencil_state(MTLPrimitiveTypeTriangle);
/* Draw. */
[rec drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:fan_index_count
indexType:MTLIndexTypeUInt32
indexBuffer:index_buffer_mtl
indexBufferOffset:index_buffer_offset];
}
}
else {
/* TODO(Metal): Topology emulation for line loop.
* NOTE(Metal): This is currently not used anywhere and modified at the high
* level for efficiency in such cases. */
BLI_assert_msg(false, "LineLoop requires emulation support in immediate mode.");
/* Bind Vertex Buffer. */
rps.bind_vertex_buffer(
current_allocation_.metal_buffer, current_allocation_.buffer_offset, 0);
/* Draw. */
[rec drawIndexedPrimitives:MTLPrimitiveTypeTriangle
indexCount:fan_index_count
indexType:MTLIndexTypeUInt32
indexBuffer:index_buffer_mtl
indexBufferOffset:index_buffer_offset];
context_->main_command_buffer.register_draw_counters(fan_index_count);
}
rendered = true;
} break;
case GPU_PRIM_LINE_LOOP: {
/* Patch final vertex of line loop to close. Rendered using LineStrip.
* Note: vertex_len represents original length, however, allocated Metal
* buffer contains space for one extra vertex when LineLoop is used. */
uchar *buffer_data = reinterpret_cast<uchar *>(current_allocation_.data);
memcpy(buffer_data + (vertex_len)*vertex_format.stride,
buffer_data,
vertex_format.stride);
this->vertex_idx++;
} break;
default: {
BLI_assert_unreachable();
} break;
}
}
else {
/* If not yet rendered, run through main render path. LineLoop primitive topology emulation
* will simply amend original data passed into default rendering path. */
if (!rendered) {
MTLPrimitiveType primitive_type = metal_primitive_type_;
int vertex_count = this->vertex_idx;

View File

@ -39,10 +39,10 @@ static inline MTLPrimitiveType gpu_prim_type_to_metal(GPUPrimType prim_type)
return MTLPrimitiveTypePoint;
case GPU_PRIM_LINES:
case GPU_PRIM_LINES_ADJ:
case GPU_PRIM_LINE_LOOP:
return MTLPrimitiveTypeLine;
case GPU_PRIM_LINE_STRIP:
case GPU_PRIM_LINE_STRIP_ADJ:
case GPU_PRIM_LINE_LOOP:
return MTLPrimitiveTypeLineStrip;
case GPU_PRIM_TRIS:
case GPU_PRIM_TRI_FAN:

View File

@ -84,7 +84,8 @@ struct MTLVertexDescriptor {
MTLVertexAttributeDescriptorPSO attributes[GPU_VERT_ATTR_MAX_LEN];
MTLVertexBufferLayoutDescriptorPSO
buffer_layouts[GPU_BATCH_VBO_MAX_LEN + GPU_BATCH_INST_VBO_MAX_LEN];
int num_attributes;
int max_attribute_value;
int total_attributes;
int num_vert_buffers;
MTLPrimitiveTopologyClass prim_topology_class;
@ -97,7 +98,8 @@ struct MTLVertexDescriptor {
bool operator==(const MTLVertexDescriptor &other) const
{
if ((this->num_attributes != other.num_attributes) ||
if ((this->max_attribute_value != other.max_attribute_value) ||
(this->total_attributes != other.total_attributes) ||
(this->num_vert_buffers != other.num_vert_buffers)) {
return false;
}
@ -105,7 +107,7 @@ struct MTLVertexDescriptor {
return false;
};
for (const int a : IndexRange(this->num_attributes)) {
for (const int a : IndexRange(this->max_attribute_value + 1)) {
if (!(this->attributes[a] == other.attributes[a])) {
return false;
}
@ -125,8 +127,8 @@ struct MTLVertexDescriptor {
uint64_t hash() const
{
uint64_t hash = (uint64_t)(this->num_attributes ^ this->num_vert_buffers);
for (const int a : IndexRange(this->num_attributes)) {
uint64_t hash = (uint64_t)(this->max_attribute_value ^ this->num_vert_buffers);
for (const int a : IndexRange(this->max_attribute_value + 1)) {
hash ^= this->attributes[a].hash() << a;
}
@ -172,6 +174,9 @@ struct MTLRenderPipelineStateDescriptor {
/* Global color write mask as this cannot be specified per attachment. */
MTLColorWriteMask color_write_mask;
/* Clip distance enablement. */
uchar clipping_plane_enable_mask = 0;
/* Point size required by point primitives. */
float point_size = 0.0f;
@ -182,6 +187,10 @@ struct MTLRenderPipelineStateDescriptor {
return false;
}
if (clipping_plane_enable_mask != other.clipping_plane_enable_mask) {
return false;
}
if ((num_color_attachments != other.num_color_attachments) ||
(depth_attachment_format != other.depth_attachment_format) ||
(stencil_attachment_format != other.stencil_attachment_format) ||
@ -241,13 +250,17 @@ struct MTLRenderPipelineStateDescriptor {
hash |= uint64_t((this->blending_enabled && (this->num_color_attachments > 0)) ? 1 : 0) << 62;
hash ^= uint64_t(this->point_size);
/* Clipping plane enablement. */
hash ^= uint64_t(clipping_plane_enable_mask) << 20;
return hash;
}
/* Reset the Vertex Descriptor to default. */
void reset_vertex_descriptor()
{
vertex_descriptor.num_attributes = 0;
vertex_descriptor.total_attributes = 0;
vertex_descriptor.max_attribute_value = 0;
vertex_descriptor.num_vert_buffers = 0;
for (int i = 0; i < GPU_VERT_ATTR_MAX_LEN; i++) {
vertex_descriptor.attributes[i].format = MTLVertexFormatInvalid;

View File

@ -29,6 +29,7 @@
#include "mtl_shader_generator.hh"
#include "mtl_shader_interface.hh"
#include "mtl_texture.hh"
#include "mtl_vertex_buffer.hh"
extern char datatoc_mtl_shader_common_msl[];
@ -347,9 +348,8 @@ bool MTLShader::transform_feedback_enable(GPUVertBuf *buf)
BLI_assert(buf);
transform_feedback_active_ = true;
transform_feedback_vertbuf_ = buf;
/* TODO(Metal): Enable this assertion once #MTLVertBuf lands. */
// BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
// GPU_USAGE_DEVICE_ONLY);
BLI_assert(static_cast<MTLVertBuf *>(unwrap(transform_feedback_vertbuf_))->get_usage_type() ==
GPU_USAGE_DEVICE_ONLY);
return true;
}
@ -568,6 +568,7 @@ void MTLShader::shader_source_from_msl(NSString *input_vertex_source,
void MTLShader::set_interface(MTLShaderInterface *interface)
{
/* Assign gpu::Shader super-class interface. */
BLI_assert(Shader::interface == nullptr);
Shader::interface = interface;
}
@ -653,6 +654,14 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
pipeline_descriptor.src_rgb_blend_factor = ctx->pipeline_state.src_rgb_blend_factor;
pipeline_descriptor.point_size = ctx->pipeline_state.point_size;
/* Resolve clipping plane enablement. */
pipeline_descriptor.clipping_plane_enable_mask = 0;
for (const int plane : IndexRange(6)) {
pipeline_descriptor.clipping_plane_enable_mask =
pipeline_descriptor.clipping_plane_enable_mask |
((ctx->pipeline_state.clip_distance_enabled[plane]) ? (1 << plane) : 0);
}
/* Primitive Type -- Primitive topology class needs to be specified for layered rendering. */
bool requires_specific_topology_class = uses_mtl_array_index_ ||
prim_type == MTLPrimitiveTopologyClassPoint;
@ -709,7 +718,7 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
MTL_uniform_buffer_base_index = MTL_SSBO_VERTEX_FETCH_IBO_INDEX + 1;
}
else {
for (const uint i : IndexRange(current_state.vertex_descriptor.num_attributes)) {
for (const uint i : IndexRange(current_state.vertex_descriptor.max_attribute_value + 1)) {
/* Metal back-end attribute descriptor state. */
MTLVertexAttributeDescriptorPSO &attribute_desc =
@ -727,8 +736,9 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
* https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
*/
if (attribute_desc.format == MTLVertexFormatInvalid) {
/* If attributes are non-contiguous, we can skip over gaps. */
MTL_LOG_WARNING(
"MTLShader: baking pipeline state for '%s'- expected input attribute at "
"MTLShader: baking pipeline state for '%s'- skipping input attribute at "
"index '%d' but none was specified in the current vertex state\n",
mtl_interface->get_name(),
i);
@ -777,7 +787,8 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
}
/* Mark empty attribute conversion. */
for (int i = current_state.vertex_descriptor.num_attributes; i < GPU_VERT_ATTR_MAX_LEN;
for (int i = current_state.vertex_descriptor.max_attribute_value + 1;
i < GPU_VERT_ATTR_MAX_LEN;
i++) {
int MTL_attribute_conversion_mode = 0;
[values setConstantValue:&MTL_attribute_conversion_mode
@ -790,13 +801,15 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
* #GPUVertFormat, however, if attributes have not been set, we can sort them out here. */
for (const uint i : IndexRange(mtl_interface->get_total_attributes())) {
const MTLShaderInputAttribute &attribute = mtl_interface->get_attribute(i);
MTLVertexAttributeDescriptor *current_attribute = desc.vertexDescriptor.attributes[i];
MTLVertexAttributeDescriptor *current_attribute =
desc.vertexDescriptor.attributes[attribute.location];
if (current_attribute.format == MTLVertexFormatInvalid) {
#if MTL_DEBUG_SHADER_ATTRIBUTES == 1
MTL_LOG_INFO("-> Filling in unbound attribute '%s' for shader PSO '%s' \n",
attribute.name,
mtl_interface->name);
printf("-> Filling in unbound attribute '%s' for shader PSO '%s' with location: %u\n",
mtl_interface->get_name_at_offset(attribute.name_offset),
mtl_interface->get_name(),
attribute.location);
#endif
current_attribute.format = attribute.format;
current_attribute.offset = 0;
@ -828,28 +841,53 @@ MTLRenderPipelineStateInstance *MTLShader::bake_current_pipeline_state(
}
}
/* Primitive Topology */
/* Primitive Topology. */
desc.inputPrimitiveTopology = pipeline_descriptor.vertex_descriptor.prim_topology_class;
}
/* Update constant value for 'MTL_uniform_buffer_base_index' */
/* Update constant value for 'MTL_uniform_buffer_base_index'. */
[values setConstantValue:&MTL_uniform_buffer_base_index
type:MTLDataTypeInt
withName:@"MTL_uniform_buffer_base_index"];
/* Transform feedback constant */
/* Transform feedback constant.
* Ensure buffer is placed after existing buffers, including default buffers. */
int MTL_transform_feedback_buffer_index = (this->transform_feedback_type_ !=
GPU_SHADER_TFB_NONE) ?
MTL_uniform_buffer_base_index +
mtl_interface->get_total_uniform_blocks() :
mtl_interface->get_max_ubo_index() + 2 :
-1;
if (this->transform_feedback_type_ != GPU_SHADER_TFB_NONE) {
[values setConstantValue:&MTL_transform_feedback_buffer_index
type:MTLDataTypeInt
withName:@"MTL_transform_feedback_buffer_index"];
}
/* gl_PointSize constant */
/* Clipping planes. */
int MTL_clip_distances_enabled = (pipeline_descriptor.clipping_plane_enable_mask > 0) ? 1 : 0;
/* Only define specialization constant if planes are required.
* We guard clip_planes usage on this flag. */
[values setConstantValue:&MTL_clip_distances_enabled
type:MTLDataTypeInt
withName:@"MTL_clip_distances_enabled"];
if (MTL_clip_distances_enabled > 0) {
/* Assign individual enablement flags. Only define a flag function constant
* if it is used. */
for (const int plane : IndexRange(6)) {
int plane_enabled = ctx->pipeline_state.clip_distance_enabled[plane] ? 1 : 0;
if (plane_enabled) {
[values
setConstantValue:&plane_enabled
type:MTLDataTypeInt
withName:[NSString stringWithFormat:@"MTL_clip_distance_enabled%d", plane]];
}
}
}
/* gl_PointSize constant. */
bool null_pointsize = true;
float MTL_pointsize = pipeline_descriptor.point_size;
if (pipeline_descriptor.vertex_descriptor.prim_topology_class ==

View File

@ -228,6 +228,7 @@ struct MSLTextureSampler {
uint location;
eGPUTextureType get_texture_binding_type() const;
eGPUSamplerFormat get_sampler_format() const;
void resolve_binding_indices();

View File

@ -336,7 +336,8 @@ static bool extract_ssbo_pragma_info(const MTLShader *shader,
/* SSBO Vertex-fetch parameter extraction. */
static std::regex use_ssbo_fetch_mode_find(
"#pragma "
"USE_SSBO_VERTEX_FETCH\\(\\s*(TriangleList|LineList|\\w+)\\s*,\\s*([0-9]+)\\s*\\)");
"USE_SSBO_VERTEX_FETCH\\(\\s*(TriangleList|LineList|TriangleStrip|\\w+)\\s*,\\s*([0-9]+)\\s*"
"\\)");
/* Perform regex search if pragma string found. */
std::smatch vertex_shader_ssbo_flags;
@ -352,6 +353,7 @@ static bool extract_ssbo_pragma_info(const MTLShader *shader,
* Supported Primitive Types (Others can be added if needed, but List types for efficiency):
* - TriangleList
* - LineList
* - TriangleStrip (To be used with caution).
*
* Output vertex count is determined by calculating the number of input primitives, and
* multiplying that by the number of output vertices specified. */
@ -365,6 +367,9 @@ static bool extract_ssbo_pragma_info(const MTLShader *shader,
else if (str_output_primitive_type == "LineList") {
out_prim_tye = MTLPrimitiveTypeLine;
}
else if (str_output_primitive_type == "TriangleStrip") {
out_prim_tye = MTLPrimitiveTypeTriangleStrip;
}
else {
MTL_LOG_ERROR("Unsupported output primitive type for SSBO VERTEX FETCH MODE. Shader: %s",
shader->name_get());
@ -555,8 +560,6 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
BLI_assert(shd_builder_->glsl_fragment_source_.size() > 0);
}
/** Determine use of Transform Feedback. **/
msl_iface.uses_transform_feedback = false;
if (transform_feedback_type_ != GPU_SHADER_TFB_NONE) {
/* Ensure #TransformFeedback is configured correctly. */
BLI_assert(tf_output_name_list_.size() > 0);
@ -1270,8 +1273,10 @@ void MSLGeneratorInterface::prepare_from_createinfo(const shader::ShaderCreateIn
access = MSLTextureSamplerAccess::TEXTURE_ACCESS_READ;
}
BLI_assert(used_slot >= 0 && used_slot < MTL_MAX_TEXTURE_SLOTS);
/* Writeable image targets only assigned to Fragment shader. */
MSLTextureSampler msl_tex(
ShaderStage::BOTH, res.image.type, res.image.name, access, used_slot);
ShaderStage::FRAGMENT, res.image.type, res.image.name, access, used_slot);
texture_samplers.append(msl_tex);
} break;
@ -1344,6 +1349,10 @@ void MSLGeneratorInterface::prepare_from_createinfo(const shader::ShaderCreateIn
fragment_outputs.append(mtl_frag_out);
}
/* Transform feedback. */
uses_transform_feedback = (create_info_->tf_type_ != GPU_SHADER_TFB_NONE) &&
(create_info_->tf_names_.size() > 0);
}
bool MSLGeneratorInterface::use_argument_buffer_for_samplers() const
@ -1514,7 +1523,7 @@ std::string MSLGeneratorInterface::generate_msl_fragment_entry_stub()
if (this->uses_barycentrics) {
/* Main barycentrics. */
out << "fragment_shader_instance.gpu_BaryCoord = mtl_barycentric_coord.xyz;";
out << "fragment_shader_instance.gpu_BaryCoord = mtl_barycentric_coord.xyz;" << std::endl;
/* barycentricDist represents the world-space distance from the current world-space position
* to the opposite edge of the vertex. */
@ -1867,11 +1876,14 @@ std::string MSLGeneratorInterface::generate_msl_vertex_out_struct(ShaderStage sh
out << "#if defined(USE_CLIP_PLANES) || defined(USE_WORLD_CLIP_PLANES)" << std::endl;
if (this->clip_distances.size() > 1) {
/* Output array of clip distances if specified. */
out << "\tfloat clipdistance [[clip_distance]] [" << this->clip_distances.size() << "];"
<< std::endl;
out << "\tfloat clipdistance [[clip_distance, "
"function_constant(MTL_clip_distances_enabled)]] ["
<< this->clip_distances.size() << "];" << std::endl;
}
else if (this->clip_distances.size() > 0) {
out << "\tfloat clipdistance [[clip_distance]];" << std::endl;
out << "\tfloat clipdistance [[clip_distance, "
"function_constant(MTL_clip_distances_enabled)]];"
<< std::endl;
}
out << "#endif" << std::endl;
}
@ -2148,18 +2160,24 @@ std::string MSLGeneratorInterface::generate_msl_vertex_output_population()
<< std::endl;
}
/* Output clip-distances. */
out << "#if defined(USE_CLIP_PLANES) || defined(USE_WORLD_CLIP_PLANES)" << std::endl;
/* Output clip-distances.
* Clip distances are only written to if both clipping planes are turned on for the shader,
* and the clipping planes are enabled. Enablement is controlled on a per-plane basis
* via function constants in the shader pipeline state object (PSO). */
out << "#if defined(USE_CLIP_PLANES) || defined(USE_WORLD_CLIP_PLANES)" << std::endl
<< "if(MTL_clip_distances_enabled) {" << std::endl;
if (this->clip_distances.size() > 1) {
for (int cd = 0; cd < this->clip_distances.size(); cd++) {
out << "\toutput.clipdistance[" << cd << "] = vertex_shader_instance.gl_ClipDistance_" << cd
<< ";" << std::endl;
/* Default value when clipping is disabled >= 0.0 to ensure primitive is not clipped. */
out << "\toutput.clipdistance[" << cd
<< "] = (is_function_constant_defined(MTL_clip_distance_enabled" << cd
<< "))?vertex_shader_instance.gl_ClipDistance_" << cd << ":1.0;" << std::endl;
}
}
else if (this->clip_distances.size() > 0) {
out << "\toutput.clipdistance = vertex_shader_instance.gl_ClipDistance_0;" << std::endl;
}
out << "#endif" << std::endl;
out << "}" << std::endl << "#endif" << std::endl;
/* Populate output vertex variables. */
int output_id = 0;
@ -2381,8 +2399,8 @@ std::string MSLGeneratorInterface::generate_msl_texture_vars(ShaderStage shader_
out << "\t"
<< ((shader_stage == ShaderStage::VERTEX) ? "vertex_shader_instance." :
"fragment_shader_instance.")
<< this->texture_samplers[i].name << ".samp = &samplers.sampler_args[" << i << "];"
<< std::endl;
<< this->texture_samplers[i].name << ".samp = &samplers.sampler_args["
<< this->texture_samplers[i].location << "];" << std::endl;
}
else {
out << "\t"
@ -2613,6 +2631,7 @@ MTLShaderInterface *MSLGeneratorInterface::bake_shader_interface(const char *nam
name_buffer_offset),
texture_sampler.location,
texture_sampler.get_texture_binding_type(),
texture_sampler.get_sampler_format(),
texture_sampler.stage);
}
@ -3011,6 +3030,51 @@ eGPUTextureType MSLTextureSampler::get_texture_binding_type() const
};
}
eGPUSamplerFormat MSLTextureSampler::get_sampler_format() const
{
switch (this->type) {
case ImageType::FLOAT_BUFFER:
case ImageType::FLOAT_1D:
case ImageType::FLOAT_1D_ARRAY:
case ImageType::FLOAT_2D:
case ImageType::FLOAT_2D_ARRAY:
case ImageType::FLOAT_3D:
case ImageType::FLOAT_CUBE:
case ImageType::FLOAT_CUBE_ARRAY:
return GPU_SAMPLER_TYPE_FLOAT;
case ImageType::INT_BUFFER:
case ImageType::INT_1D:
case ImageType::INT_1D_ARRAY:
case ImageType::INT_2D:
case ImageType::INT_2D_ARRAY:
case ImageType::INT_3D:
case ImageType::INT_CUBE:
case ImageType::INT_CUBE_ARRAY:
return GPU_SAMPLER_TYPE_INT;
case ImageType::UINT_BUFFER:
case ImageType::UINT_1D:
case ImageType::UINT_1D_ARRAY:
case ImageType::UINT_2D:
case ImageType::UINT_2D_ARRAY:
case ImageType::UINT_3D:
case ImageType::UINT_CUBE:
case ImageType::UINT_CUBE_ARRAY:
return GPU_SAMPLER_TYPE_UINT;
case ImageType::SHADOW_2D:
case ImageType::SHADOW_2D_ARRAY:
case ImageType::SHADOW_CUBE:
case ImageType::SHADOW_CUBE_ARRAY:
case ImageType::DEPTH_2D:
case ImageType::DEPTH_2D_ARRAY:
case ImageType::DEPTH_CUBE:
case ImageType::DEPTH_CUBE_ARRAY:
return GPU_SAMPLER_TYPE_DEPTH;
default:
BLI_assert_unreachable();
}
return GPU_SAMPLER_TYPE_FLOAT;
}
/** \} */
} // namespace blender::gpu

View File

@ -130,6 +130,7 @@ struct MTLShaderTexture {
/* Texture resource bind slot in shader `[[texture(n)]]`. */
int slot_index;
eGPUTextureType type;
eGPUSamplerFormat sampler_format;
ShaderStage stage_mask;
};
@ -168,6 +169,7 @@ class MTLShaderInterface : public ShaderInterface {
/* Uniform Blocks. */
uint32_t total_uniform_blocks_;
uint32_t max_uniformbuf_index_;
MTLShaderUniformBlock ubos_[MTL_MAX_UNIFORM_BUFFER_BINDINGS];
MTLShaderUniformBlock push_constant_block_;
@ -209,6 +211,7 @@ class MTLShaderInterface : public ShaderInterface {
void add_texture(uint32_t name_offset,
uint32_t texture_slot,
eGPUTextureType tex_binding_type,
eGPUSamplerFormat sampler_format,
ShaderStage stage_mask = ShaderStage::FRAGMENT);
void add_push_constant_block(uint32_t name_offset);
@ -228,6 +231,7 @@ class MTLShaderInterface : public ShaderInterface {
/* Fetch Uniform Blocks. */
const MTLShaderUniformBlock &get_uniform_block(uint index) const;
uint32_t get_total_uniform_blocks() const;
uint32_t get_max_ubo_index() const;
bool has_uniform_block(uint32_t block_index) const;
uint32_t get_uniform_block_size(uint32_t block_index) const;

View File

@ -55,6 +55,7 @@ void MTLShaderInterface::init()
{
total_attributes_ = 0;
total_uniform_blocks_ = 0;
max_uniformbuf_index_ = 0;
total_uniforms_ = 0;
total_textures_ = 0;
max_texture_index_ = -1;
@ -121,6 +122,7 @@ uint32_t MTLShaderInterface::add_uniform_block(uint32_t name_offset,
uni_block.size = size;
uni_block.current_offset = 0;
uni_block.stage_mask = ShaderStage::BOTH;
max_uniformbuf_index_ = max_ii(max_uniformbuf_index_, buffer_index);
return (total_uniform_blocks_++);
}
@ -187,9 +189,11 @@ void MTLShaderInterface::add_uniform(uint32_t name_offset, eMTLDataType type, in
void MTLShaderInterface::add_texture(uint32_t name_offset,
uint32_t texture_slot,
eGPUTextureType tex_binding_type,
eGPUSamplerFormat sampler_format,
ShaderStage stage_mask)
{
BLI_assert(texture_slot >= 0 && texture_slot < GPU_max_textures());
BLI_assert(sampler_format < GPU_SAMPLER_TYPE_MAX);
if (texture_slot >= 0 && texture_slot < GPU_max_textures()) {
MTLShaderTexture &tex = textures_[texture_slot];
@ -197,6 +201,7 @@ void MTLShaderInterface::add_texture(uint32_t name_offset,
tex.name_offset = name_offset;
tex.slot_index = texture_slot;
tex.type = tex_binding_type;
tex.sampler_format = sampler_format;
tex.stage_mask = stage_mask;
tex.used = true;
total_textures_++;
@ -281,7 +286,11 @@ void MTLShaderInterface::prepare_common_shader_inputs()
MTLShaderInputAttribute &shd_attr = attributes_[attr_index];
current_input->name_offset = shd_attr.name_offset;
current_input->name_hash = BLI_hash_string(this->get_name_at_offset(shd_attr.name_offset));
current_input->location = attr_index;
/* For Metal, we flatten the vertex attribute indices within the shader in order to minimise
* complexity. ShaderInput "Location" contains the original attribute location, as can be
* fetched using `GPU_shader_get_attribute_info`. ShaderInput binding contains the array index
* into the MTLShaderInterface `attributes_` array. */
current_input->location = shd_attr.location;
current_input->binding = attr_index;
current_input++;
}
@ -419,6 +428,11 @@ uint32_t MTLShaderInterface::get_total_uniform_blocks() const
return total_uniform_blocks_;
}
uint32_t MTLShaderInterface::get_max_ubo_index() const
{
return max_uniformbuf_index_;
}
bool MTLShaderInterface::has_uniform_block(uint32_t block_index) const
{
return (block_index < total_uniform_blocks_);

View File

@ -6,6 +6,7 @@
#pragma once
#include "BLI_assert.h"
#include "GPU_material.h"
enum eMTLDataType {
MTL_DATATYPE_CHAR,
@ -249,3 +250,25 @@ inline uint mtl_get_data_type_alignment(eMTLDataType type)
return 0;
};
}
inline eMTLDataType gpu_type_to_mtl_type(eGPUType type)
{
switch (type) {
case GPU_FLOAT:
return MTL_DATATYPE_FLOAT;
case GPU_VEC2:
return MTL_DATATYPE_FLOAT2;
case GPU_VEC3:
return MTL_DATATYPE_FLOAT3;
case GPU_VEC4:
return MTL_DATATYPE_FLOAT4;
case GPU_MAT3:
return MTL_DATATYPE_FLOAT3x3;
case GPU_MAT4:
return MTL_DATATYPE_FLOAT4x4;
default:
BLI_assert(false && "Other types unsupported");
return MTL_DATATYPE_FLOAT;
}
return MTL_DATATYPE_FLOAT;
}

View File

@ -80,6 +80,8 @@ class MTLStateManager : public StateManager {
void mtl_depth_range(float near, float far);
void mtl_stencil_mask(uint mask);
void mtl_stencil_set_func(eGPUStencilTest stencil_func, int ref, uint mask);
void mtl_clip_plane_enable(uint i);
void mtl_clip_plane_disable(uint i);
MEM_CXX_CLASS_ALLOC_FUNCS("MTLStateManager")
};

View File

@ -35,6 +35,12 @@ MTLStateManager::MTLStateManager(MTLContext *ctx) : StateManager()
/* Force update using default state. */
current_ = ~state;
current_mutable_ = ~mutable_state;
/* Clip distances initial mask forces to 0x111, which exceeds
* max clip plane count of 6, so limit to ensure all clipping
* planes get disabled. */
current_.clip_distances = 6;
set_state(state);
set_mutable_state(mutable_state);
}
@ -52,6 +58,7 @@ void MTLStateManager::force_state()
{
/* Little exception for clip distances since they need to keep the old count correct. */
uint32_t clip_distances = current_.clip_distances;
BLI_assert(clip_distances <= 6);
current_ = ~this->state;
current_.clip_distances = clip_distances;
current_mutable_ = ~this->mutable_state;
@ -329,11 +336,32 @@ void MTLStateManager::set_stencil_mask(const eGPUStencilTest test, const GPUStat
}
}
void MTLStateManager::mtl_clip_plane_enable(uint i)
{
BLI_assert(context_);
MTLContextGlobalShaderPipelineState &pipeline_state = context_->pipeline_state;
pipeline_state.clip_distance_enabled[i] = true;
pipeline_state.dirty_flags |= MTL_PIPELINE_STATE_PSO_FLAG;
}
void MTLStateManager::mtl_clip_plane_disable(uint i)
{
BLI_assert(context_);
MTLContextGlobalShaderPipelineState &pipeline_state = context_->pipeline_state;
pipeline_state.clip_distance_enabled[i] = false;
pipeline_state.dirty_flags |= MTL_PIPELINE_STATE_PSO_FLAG;
}
void MTLStateManager::set_clip_distances(const int new_dist_len, const int old_dist_len)
{
/* TODO(Metal): Support Clip distances in METAL. Clip distance
* assignment via shader is supported, but global clip-states require
* support. */
BLI_assert(new_dist_len <= 6);
BLI_assert(old_dist_len <= 6);
for (uint i = 0; i < new_dist_len; i++) {
mtl_clip_plane_enable(i);
}
for (uint i = new_dist_len; i < old_dist_len; i++) {
mtl_clip_plane_disable(i);
}
}
void MTLStateManager::set_logic_op(const bool enable)

View File

@ -1621,6 +1621,7 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
}
/* Verify Texture and vertex buffer alignment. */
const GPUVertFormat *format = GPU_vertbuf_get_format(vbo);
int bytes_per_pixel = get_mtl_format_bytesize(mtl_format);
int bytes_per_row = bytes_per_pixel * w_;
@ -1628,12 +1629,40 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
uint32_t align_requirement = static_cast<uint32_t>(
[mtl_ctx->device minimumLinearTextureAlignmentForPixelFormat:mtl_format]);
/* Verify per-vertex size aligns with texture size. */
const GPUVertFormat *format = GPU_vertbuf_get_format(vbo);
BLI_assert(bytes_per_pixel == format->stride &&
"Pixel format stride MUST match the texture format stride -- These being different "
"is likely caused by Metal's VBO padding to a minimum of 4-bytes per-vertex");
UNUSED_VARS_NDEBUG(format);
/* If stride is larger than bytes per pixel, but format has multiple attributes,
* split attributes across several pixels. */
if (format->stride > bytes_per_pixel && format->attr_len > 1) {
/* We need to increase the number of pixels available to store additional attributes.
* First ensure that the total stride of the vertex format fits uniformly into
* multiple pixels. If these sizes are different, then attributes are of differing
* sizes and this operation is unsupported. */
if (bytes_per_pixel * format->attr_len != format->stride) {
BLI_assert_msg(false,
"Cannot split attributes across multiple pixels as attribute format sizes do "
"not match.");
return false;
}
/* Provide a single pixel per attribute. */
/* Increase bytes per row to ensure there are enough bytes for all vertex attribute data. */
bytes_per_row *= format->attr_len;
BLI_assert(bytes_per_row == format->stride * w_);
/* Multiply width of image to provide one attribute per pixel. */
w_ *= format->attr_len;
BLI_assert(bytes_per_row == bytes_per_pixel * w_);
BLI_assert_msg(w_ == mtl_vbo->vertex_len * format->attr_len,
"Image should contain one pixel for each attribute in every vertex.");
}
else {
/* Verify per-vertex size aligns with texture size. */
BLI_assert(bytes_per_pixel == format->stride &&
"Pixel format stride MUST match the texture format stride -- These being different "
"is likely caused by Metal's VBO padding to a minimum of 4-bytes per-vertex."
" If multiple attributes are used. Each attribute is to be packed into its own "
"individual pixel when stride length is exceeded. ");
}
/* Create texture descriptor. */
BLI_assert(type_ == GPU_TEXTURE_BUFFER);
@ -1668,9 +1697,6 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
/* Track Status. */
vert_buffer_ = mtl_vbo;
vert_buffer_mtl_ = source_buffer;
/* Cleanup. */
[texture_descriptor_ release];
texture_descriptor_ = nullptr;
return true;
}
@ -1882,8 +1908,6 @@ void gpu::MTLTexture::ensure_baked()
/* Standard texture allocation. */
texture_ = [ctx->device newTextureWithDescriptor:texture_descriptor_];
[texture_descriptor_ release];
texture_descriptor_ = nullptr;
texture_.label = [NSString stringWithUTF8String:this->get_name()];
BLI_assert(texture_);
is_baked_ = true;

View File

@ -42,6 +42,7 @@ class MTLVertBuf : public VertBuf {
* Access limited to friend classes. */
id<MTLBuffer> get_metal_buffer()
{
BLI_assert(vbo_ != nullptr);
vbo_->debug_ensure_used();
return vbo_->get_metal_buffer();
}

View File

@ -16,11 +16,11 @@ void main(void)
const float end_gradient_threshold = 0.65;
#ifdef USE_INSTANCE
# define colStart (colid_doarrow[0] < 3 ? start_color : node_link_data.colors[colid_doarrow[0]])
# define colEnd (colid_doarrow[1] < 3 ? end_color : node_link_data.colors[colid_doarrow[1]])
# define colStart (colid_doarrow[0] < 3u ? start_color : node_link_data.colors[colid_doarrow[0]])
# define colEnd (colid_doarrow[1] < 3u ? end_color : node_link_data.colors[colid_doarrow[1]])
# define colShadow node_link_data.colors[colid_doarrow[2]]
# define doArrow (colid_doarrow[3] != 0)
# define doMuted (domuted[0] != 0)
# define doArrow (colid_doarrow[3] != 0u)
# define doMuted (domuted[0] != 0u)
#else
vec2 P0 = node_link_data.bezierPts[0].xy;
vec2 P1 = node_link_data.bezierPts[1].xy;

View File

@ -1,13 +1,13 @@
/* Values in GPU_shader.h. */
#define GPU_KEYFRAME_SHAPE_DIAMOND (1 << 0)
#define GPU_KEYFRAME_SHAPE_CIRCLE (1 << 1)
#define GPU_KEYFRAME_SHAPE_CLIPPED_VERTICAL (1 << 2)
#define GPU_KEYFRAME_SHAPE_CLIPPED_HORIZONTAL (1 << 3)
#define GPU_KEYFRAME_SHAPE_INNER_DOT (1 << 4)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MAX (1 << 8)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MIN (1 << 9)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MIXED (1 << 10)
#define GPU_KEYFRAME_SHAPE_DIAMOND (1u << 0)
#define GPU_KEYFRAME_SHAPE_CIRCLE (1u << 1)
#define GPU_KEYFRAME_SHAPE_CLIPPED_VERTICAL (1u << 2)
#define GPU_KEYFRAME_SHAPE_CLIPPED_HORIZONTAL (1u << 3)
#define GPU_KEYFRAME_SHAPE_INNER_DOT (1u << 4)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MAX (1u << 8)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MIN (1u << 9)
#define GPU_KEYFRAME_SHAPE_ARROW_END_MIXED (1u << 10)
#define GPU_KEYFRAME_SHAPE_SQUARE \
(GPU_KEYFRAME_SHAPE_CLIPPED_VERTICAL | GPU_KEYFRAME_SHAPE_CLIPPED_HORIZONTAL)
@ -18,7 +18,7 @@ const float minmax_scale = sqrt(1.0 / (1.0 + 1.0 / minmax_bias));
bool test(uint bit)
{
return (finalFlags & bit) != 0;
return (finalFlags & bit) != 0u;
}
void main()

View File

@ -42,6 +42,22 @@ constant int MTL_AttributeConvert15 [[function_constant(17)]];
* Unused if function constant not set. */
constant int MTL_transform_feedback_buffer_index [[function_constant(18)]];
/** Clip distance enablement. */
/* General toggle to control whether any clipping distanes are written at all.
* This is an optimization to avoid having the clipping distance shader output
* paramter if it is not needed. */
constant int MTL_clip_distances_enabled [[function_constant(19)]];
/* If clipping planes are enabled at all, then we require an enablement
* flag per clipping plane. If enabled, then gl_ClipDistances[N] will
* control clipping for a given plane, otherwise the value is ignored. */
constant int MTL_clip_distance_enabled0 [[function_constant(20)]];
constant int MTL_clip_distance_enabled1 [[function_constant(21)]];
constant int MTL_clip_distance_enabled2 [[function_constant(22)]];
constant int MTL_clip_distance_enabled3 [[function_constant(23)]];
constant int MTL_clip_distance_enabled4 [[function_constant(24)]];
constant int MTL_clip_distance_enabled5 [[function_constant(25)]];
/** Internal attribute conversion functionality. */
/* Following descriptions in mtl_shader.hh, Metal only supports some implicit
* attribute type conversions. These conversions occur when there is a difference

View File

@ -153,6 +153,9 @@ struct SStruct {
/* Texture-write functions. */
#define imageStore(_tex, _coord, _value) _texture_write_internal(_tex, _coord, _value)
/* Cubemap support always available when using Metal. */
#define textureLod_cubemapArray(tex, co, lod) textureLod(tex, co, lod)
/* Singular return values from texture functions of type DEPTH are often indexed with either .r or
* .x. This is a lightweight wrapper type for handling this syntax. */
union _msl_return_float {

View File

@ -1,4 +1,11 @@
/* Cubemap support and fallback implementation declarations. */
#ifdef GPU_ARB_texture_cube_map_array
# define textureLod_cubemapArray(tex, co, lod) textureLod(tex, co, lod)
#else
# define samplerCubeArray sampler2DArray
#endif
/* Texture format tokens -- Type explicitness required by other Graphics APIs. */
#define depth2D sampler2D
#define depth2DArray sampler2DArray

View File

@ -351,13 +351,23 @@ GPUTexture *IMB_create_gpu_texture(const char *name,
bool freebuf = false;
/* Create Texture. */
tex = GPU_texture_create_2d_ex(
name, UNPACK2(size), 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, NULL);
tex = GPU_texture_create_2d_ex(name,
UNPACK2(size),
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
NULL);
if (tex == NULL) {
size[0] = max_ii(1, size[0] / 2);
size[1] = max_ii(1, size[1] / 2);
tex = GPU_texture_create_2d_ex(
name, UNPACK2(size), 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, NULL);
tex = GPU_texture_create_2d_ex(name,
UNPACK2(size),
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
NULL);
do_rescale = true;
}
BLI_assert(tex != NULL);

View File

@ -92,7 +92,7 @@ typedef enum eCustomDataType {
CD_MVERT = 0, /* DEPRECATED */
CD_MSTICKY = 1, /* DEPRECATED */
#endif
CD_MDEFORMVERT = 2,
CD_MDEFORMVERT = 2, /* Array of `MDeformVert`. */
CD_MEDGE = 3,
CD_MFACE = 4,
CD_MTFACE = 5,

View File

@ -13,68 +13,48 @@ from modules.mesh_test import RunTest, ModifierSpec, SpecMeshTest
seed(0)
def get_generate_modifiers_list(test_object_name, randomize=False):
"""
Construct a list of 'Generate' modifiers with default parameters.
:arg test_object_name: Name of test object. Some modifiers like boolean need an extra parameter beside
the default one. E.g. boolean needs object, mask needs vertex group etc...
The extra parameter name will be <test_object_name>_<modifier_type>
:type test_object_name: str
:arg randomize: If True shuffle the list of modifiers.
:type randomize: bool
:return: list of 'Generate' modifiers with default parameters.
"""
boolean_test_object = bpy.data.objects[test_object_name + "_boolean"]
def cube_mask_first_modifier_list():
generate_modifiers = [
ModifierSpec('array', 'ARRAY', {}),
ModifierSpec('bevel', 'BEVEL', {'width': 0.1, 'limit_method': 'NONE'}),
ModifierSpec('boolean', 'BOOLEAN', {'object': boolean_test_object, 'solver': 'FAST'}),
ModifierSpec('build', 'BUILD', {'frame_start': 1, 'frame_duration': 1}, 2),
ModifierSpec('decimate', 'DECIMATE', {}),
ModifierSpec('edge split', 'EDGE_SPLIT', {}),
# mask can effectively delete the mesh since the vertex group need to be updated after each
# applied modifier. Needs to be tested separately.
# ModifierSpec('mask', 'MASK', {'vertex_group': mask_vertex_group}, False),
ModifierSpec('mirror', 'MIRROR', {}),
ModifierSpec('multires', 'MULTIRES', {}),
# remesh can also generate an empty mesh. Skip.
# ModifierSpec('remesh', 'REMESH', {}),
# ModifierSpec('screw', 'SCREW', {}), # screw can make the test very slow. Skipping for now.
ModifierSpec('mask', 'MASK', {'vertex_group': "testCubeMaskFirst_mask"}),
ModifierSpec('solidify', 'SOLIDIFY', {}),
# Opensubdiv results might differ slightly when compiled with different optimization flags.
#ModifierSpec('subsurf', 'SUBSURF', {}),
ModifierSpec('triangulate', 'TRIANGULATE', {}),
ModifierSpec('wireframe', 'WIREFRAME', {})
ModifierSpec('bevel', 'BEVEL', {'width': 0.1, 'limit_method': 'NONE'}),
ModifierSpec('boolean', 'BOOLEAN', {'object': bpy.data.objects["testCubeMaskFirst_boolean"], 'solver': 'FAST'}),
ModifierSpec('edge split', 'EDGE_SPLIT', {}),
ModifierSpec('build', 'BUILD', {'frame_start': 1, 'frame_duration': 1}, 2),
ModifierSpec('multires', 'MULTIRES', {}),
ModifierSpec('decimate', 'DECIMATE', {}),
ModifierSpec('array', 'ARRAY', {}),
ModifierSpec('wireframe', 'WIREFRAME', {}),
ModifierSpec('mirror', 'MIRROR', {}),
]
if randomize:
shuffle(generate_modifiers)
return generate_modifiers
def cube_random_modifier_list():
generate_modifiers = [
ModifierSpec('edge split', 'EDGE_SPLIT', {}),
ModifierSpec('decimate', 'DECIMATE', {}),
ModifierSpec('wireframe', 'WIREFRAME', {}),
ModifierSpec('mirror', 'MIRROR', {}),
ModifierSpec('array', 'ARRAY', {}),
ModifierSpec('bevel', 'BEVEL', {'width': 0.1, 'limit_method': 'NONE'}),
ModifierSpec('multires', 'MULTIRES', {}),
ModifierSpec('boolean', 'BOOLEAN', {'object': bpy.data.objects["testCubeRandom_boolean"], 'solver': 'FAST'}),
ModifierSpec('solidify', 'SOLIDIFY', {}),
ModifierSpec('build', 'BUILD', {'frame_start': 1, 'frame_duration': 1}, 2),
ModifierSpec('triangulate', 'TRIANGULATE', {}),
]
return generate_modifiers
def main():
mask_first_list = get_generate_modifiers_list("testCubeMaskFirst", randomize=True)
mask_vertex_group = "testCubeMaskFirst" + "_mask"
mask_first_list.insert(0, ModifierSpec('mask', 'MASK', {'vertex_group': mask_vertex_group}))
tests = [
###############################
# List of 'Generate' modifiers on a cube
###############################
# 0
# SpecMeshTest("testCube", "expectedCube", get_generate_modifiers_list("testCube")),
SpecMeshTest("CubeRandom", "testCubeRandom", "expectedCubeRandom",
get_generate_modifiers_list("testCubeRandom", randomize=True)),
SpecMeshTest("CubeMaskFirst", "testCubeMaskFirst", "expectedCubeMaskFirst", mask_first_list),
SpecMeshTest("CubeRandom", "testCubeRandom", "expectedCubeRandom", cube_random_modifier_list()),
SpecMeshTest("CubeMaskFirst", "testCubeMaskFirst", "expectedCubeMaskFirst", cube_mask_first_modifier_list()),
SpecMeshTest("CollapseDecimate", "testCollapseDecimate", "expectedCollapseDecimate",
[ModifierSpec('decimate', 'DECIMATE',