Metal: Fix memory leaks.
Fix a number of small memory leaks in the Metal backend. Unreleased blit shader objects and temporary textures addressed. Static memory manager modified to defer creation until use. Added reference count tracker to shared memory manager across contexts, such that cached memory allocations will be released if all contexts are destroyed and re-initialized. Authored by Apple: Michael Parkin-White Ref T96261 Reviewed By: fclem Differential Revision: https://developer.blender.org/D16415
This commit is contained in:
parent
9ec20f2ede
commit
359d98423e
Notes:
blender-bot
2023-02-14 09:43:37 +01:00
Referenced by issue #96261, Metal Viewport
|
@ -144,15 +144,15 @@ void MTLBackend::render_step()
|
|||
* is also thread-safe. */
|
||||
|
||||
/* Flush any MTLSafeFreeLists which have previously been released by any MTLContext. */
|
||||
MTLContext::get_global_memory_manager().update_memory_pools();
|
||||
MTLContext::get_global_memory_manager()->update_memory_pools();
|
||||
|
||||
/* End existing MTLSafeFreeList and begin new list --
|
||||
* Buffers wont `free` until all associated in-flight command buffers have completed.
|
||||
* Decrement final reference count for ensuring the previous list is certainly
|
||||
* released. */
|
||||
MTLSafeFreeList *cmd_free_buffer_list =
|
||||
MTLContext::get_global_memory_manager().get_current_safe_list();
|
||||
MTLContext::get_global_memory_manager().begin_new_safe_list();
|
||||
MTLContext::get_global_memory_manager()->get_current_safe_list();
|
||||
MTLContext::get_global_memory_manager()->begin_new_safe_list();
|
||||
cmd_free_buffer_list->decrement_reference();
|
||||
}
|
||||
|
||||
|
|
|
@ -943,7 +943,7 @@ id<MTLBuffer> MTLBatch::get_emulated_toplogy_buffer(GPUPrimType &in_out_prim_typ
|
|||
/* Allocate buffer. */
|
||||
uint32_t buffer_bytes = output_IB_elems * 4;
|
||||
BLI_assert(buffer_bytes > 0);
|
||||
this->emulated_topology_buffer_ = MTLContext::get_global_memory_manager().allocate(
|
||||
this->emulated_topology_buffer_ = MTLContext::get_global_memory_manager()->allocate(
|
||||
buffer_bytes, true);
|
||||
|
||||
/* Populate. */
|
||||
|
|
|
@ -115,7 +115,7 @@ bool MTLCommandBufferManager::submit(bool wait)
|
|||
* This ensures that in-use resources are not prematurely de-referenced and returned to the
|
||||
* available buffer pool while they are in-use by the GPU. */
|
||||
MTLSafeFreeList *cmd_free_buffer_list =
|
||||
MTLContext::get_global_memory_manager().get_current_safe_list();
|
||||
MTLContext::get_global_memory_manager()->get_current_safe_list();
|
||||
BLI_assert(cmd_free_buffer_list);
|
||||
cmd_free_buffer_list->increment_reference();
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <Cocoa/Cocoa.h>
|
||||
#include <Metal/Metal.h>
|
||||
#include <QuartzCore/QuartzCore.h>
|
||||
#include <mutex>
|
||||
|
||||
@class CAMetalLayer;
|
||||
@class MTLCommandQueue;
|
||||
|
@ -310,6 +311,12 @@ struct MTLContextTextureUtils {
|
|||
GPU_shader_free(fullscreen_blit_shader);
|
||||
}
|
||||
|
||||
/* Free depth 2D Update shaders */
|
||||
for (auto item : depth_2d_update_shaders.items()) {
|
||||
GPU_shader_free(item.value);
|
||||
}
|
||||
depth_2d_update_shaders.clear();
|
||||
|
||||
/* Free Read shader maps */
|
||||
free_cached_pso_map(texture_1d_read_compute_psos);
|
||||
free_cached_pso_map(texture_1d_read_compute_psos);
|
||||
|
@ -599,7 +606,9 @@ class MTLContext : public Context {
|
|||
|
||||
/* Memory Management. */
|
||||
MTLScratchBufferManager memory_manager;
|
||||
static MTLBufferPool global_memory_manager;
|
||||
static std::mutex global_memory_manager_reflock;
|
||||
static int global_memory_manager_refcount;
|
||||
static MTLBufferPool *global_memory_manager;
|
||||
|
||||
/* CommandBuffer managers. */
|
||||
MTLCommandBufferManager main_command_buffer;
|
||||
|
@ -780,8 +789,34 @@ class MTLContext : public Context {
|
|||
return this->memory_manager;
|
||||
}
|
||||
|
||||
static MTLBufferPool &get_global_memory_manager()
|
||||
static void global_memory_manager_acquire_ref()
|
||||
{
|
||||
MTLContext::global_memory_manager_reflock.lock();
|
||||
if (MTLContext::global_memory_manager == nullptr) {
|
||||
BLI_assert(MTLContext::global_memory_manager_refcount == 0);
|
||||
MTLContext::global_memory_manager = new MTLBufferPool();
|
||||
}
|
||||
MTLContext::global_memory_manager_refcount++;
|
||||
MTLContext::global_memory_manager_reflock.unlock();
|
||||
}
|
||||
|
||||
static void global_memory_manager_release_ref()
|
||||
{
|
||||
MTLContext::global_memory_manager_reflock.lock();
|
||||
MTLContext::global_memory_manager_refcount--;
|
||||
BLI_assert(MTLContext::global_memory_manager_refcount >= 0);
|
||||
BLI_assert(MTLContext::global_memory_manager != nullptr);
|
||||
|
||||
if (MTLContext::global_memory_manager_refcount <= 0) {
|
||||
delete MTLContext::global_memory_manager;
|
||||
MTLContext::global_memory_manager = nullptr;
|
||||
}
|
||||
MTLContext::global_memory_manager_reflock.unlock();
|
||||
}
|
||||
|
||||
static MTLBufferPool *get_global_memory_manager()
|
||||
{
|
||||
BLI_assert(MTLContext::global_memory_manager != nullptr);
|
||||
return MTLContext::global_memory_manager;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,9 @@ using namespace blender::gpu;
|
|||
namespace blender::gpu {
|
||||
|
||||
/* Global memory manager. */
|
||||
MTLBufferPool MTLContext::global_memory_manager;
|
||||
std::mutex MTLContext::global_memory_manager_reflock;
|
||||
int MTLContext::global_memory_manager_refcount = 0;
|
||||
MTLBufferPool *MTLContext::global_memory_manager = nullptr;
|
||||
|
||||
/* Swap-chain and latency management. */
|
||||
std::atomic<int> MTLContext::max_drawables_in_flight = 0;
|
||||
|
@ -207,7 +209,8 @@ MTLContext::MTLContext(void *ghost_window, void *ghost_context)
|
|||
this->imm = new MTLImmediate(this);
|
||||
|
||||
/* Ensure global memory manager is initialized. */
|
||||
MTLContext::global_memory_manager.init(this->device);
|
||||
MTLContext::global_memory_manager_acquire_ref();
|
||||
MTLContext::get_global_memory_manager()->init(this->device);
|
||||
|
||||
/* Initialize texture read/update structures. */
|
||||
this->get_texture_utils().init();
|
||||
|
@ -239,6 +242,16 @@ MTLContext::~MTLContext()
|
|||
}
|
||||
}
|
||||
|
||||
/* Release context textures. */
|
||||
if (default_fbo_gputexture_) {
|
||||
GPU_texture_free(wrap(static_cast<Texture *>(default_fbo_gputexture_)));
|
||||
default_fbo_gputexture_ = nullptr;
|
||||
}
|
||||
if (default_fbo_mtltexture_) {
|
||||
[default_fbo_mtltexture_ release];
|
||||
default_fbo_mtltexture_ = nil;
|
||||
}
|
||||
|
||||
/* Release Memory Manager */
|
||||
this->get_scratchbuffer_manager().free();
|
||||
|
||||
|
@ -283,6 +296,9 @@ MTLContext::~MTLContext()
|
|||
[null_attribute_buffer_ release];
|
||||
}
|
||||
|
||||
/* Release memory manager reference. */
|
||||
MTLContext::global_memory_manager_release_ref();
|
||||
|
||||
/* Free Metal objects. */
|
||||
if (this->queue) {
|
||||
[this->queue release];
|
||||
|
@ -1291,8 +1307,8 @@ void MTLContext::ensure_texture_bindings(
|
|||
size;
|
||||
|
||||
/* Allocate buffer to store encoded sampler arguments. */
|
||||
encoder_buffer = MTLContext::get_global_memory_manager().allocate(aligned_alloc_size,
|
||||
true);
|
||||
encoder_buffer = MTLContext::get_global_memory_manager()->allocate(aligned_alloc_size,
|
||||
true);
|
||||
BLI_assert(encoder_buffer);
|
||||
BLI_assert(encoder_buffer->get_metal_buffer());
|
||||
[argument_encoder setArgumentBuffer:encoder_buffer->get_metal_buffer() offset:0];
|
||||
|
@ -1687,7 +1703,7 @@ void present(MTLRenderPassDescriptor *blit_descriptor,
|
|||
|
||||
/* Ensure freed buffers have usage tracked against active CommandBuffer submissions. */
|
||||
MTLSafeFreeList *cmd_free_buffer_list =
|
||||
MTLContext::get_global_memory_manager().get_current_safe_list();
|
||||
MTLContext::get_global_memory_manager()->get_current_safe_list();
|
||||
BLI_assert(cmd_free_buffer_list);
|
||||
|
||||
id<MTLCommandBuffer> cmd_buffer_ref = cmdbuf;
|
||||
|
|
|
@ -98,7 +98,7 @@ void MTLIndexBuf::upload_data()
|
|||
MTL_LOG_WARNING("[Metal] Warning! Trying to allocate index buffer with size=0 bytes\n");
|
||||
}
|
||||
else {
|
||||
ibo_ = MTLContext::get_global_memory_manager().allocate_with_data(alloc_size_, true, data_);
|
||||
ibo_ = MTLContext::get_global_memory_manager()->allocate_with_data(alloc_size_, true, data_);
|
||||
BLI_assert(ibo_);
|
||||
ibo_->set_label(@"Index Buffer");
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
BLI_assert(max_possible_verts > 0);
|
||||
|
||||
/* Allocate new buffer. */
|
||||
optimized_ibo_ = MTLContext::get_global_memory_manager().allocate(
|
||||
optimized_ibo_ = MTLContext::get_global_memory_manager()->allocate(
|
||||
max_possible_verts *
|
||||
((index_type_ == GPU_INDEX_U16) ? sizeof(uint16_t) : sizeof(uint32_t)),
|
||||
true);
|
||||
|
@ -350,7 +350,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
Span<uint16_t> orig_data(static_cast<const uint16_t *>(ibo_->get_host_ptr()),
|
||||
this->index_len_);
|
||||
MutableSpan<uint16_t> output_data(
|
||||
static_cast<uint16_t *>(optimized_ibo_->get_host_ptr()), this->index_len_);
|
||||
static_cast<uint16_t *>(optimized_ibo_->get_host_ptr()), max_possible_verts);
|
||||
emulated_v_count = populate_emulated_tri_fan_buf<uint16_t>(
|
||||
orig_data, output_data, this->index_len_);
|
||||
}
|
||||
|
@ -358,7 +358,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
Span<uint32_t> orig_data(static_cast<const uint32_t *>(ibo_->get_host_ptr()),
|
||||
this->index_len_);
|
||||
MutableSpan<uint32_t> output_data(
|
||||
static_cast<uint32_t *>(optimized_ibo_->get_host_ptr()), this->index_len_);
|
||||
static_cast<uint32_t *>(optimized_ibo_->get_host_ptr()), max_possible_verts);
|
||||
emulated_v_count = populate_emulated_tri_fan_buf<uint32_t>(
|
||||
orig_data, output_data, this->index_len_);
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
BLI_assert(max_possible_verts > 0);
|
||||
|
||||
/* Allocate new buffer. */
|
||||
optimized_ibo_ = MTLContext::get_global_memory_manager().allocate(
|
||||
optimized_ibo_ = MTLContext::get_global_memory_manager()->allocate(
|
||||
max_possible_verts *
|
||||
((index_type_ == GPU_INDEX_U16) ? sizeof(uint16_t) : sizeof(uint32_t)),
|
||||
true);
|
||||
|
@ -389,7 +389,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
Span<uint16_t> orig_data(static_cast<const uint16_t *>(ibo_->get_host_ptr()),
|
||||
this->index_len_);
|
||||
MutableSpan<uint16_t> output_data(
|
||||
static_cast<uint16_t *>(optimized_ibo_->get_host_ptr()), this->index_len_);
|
||||
static_cast<uint16_t *>(optimized_ibo_->get_host_ptr()), max_possible_verts);
|
||||
emulated_v_count = populate_optimized_tri_strip_buf<uint16_t>(
|
||||
orig_data, output_data, this->index_len_);
|
||||
}
|
||||
|
@ -397,7 +397,7 @@ id<MTLBuffer> MTLIndexBuf::get_index_buffer(GPUPrimType &in_out_primitive_type,
|
|||
Span<uint32_t> orig_data(static_cast<const uint32_t *>(ibo_->get_host_ptr()),
|
||||
this->index_len_);
|
||||
MutableSpan<uint32_t> output_data(
|
||||
static_cast<uint32_t *>(optimized_ibo_->get_host_ptr()), this->index_len_);
|
||||
static_cast<uint32_t *>(optimized_ibo_->get_host_ptr()), max_possible_verts);
|
||||
emulated_v_count = populate_optimized_tri_strip_buf<uint32_t>(
|
||||
orig_data, output_data, this->index_len_);
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
|
|||
new_buffer->flag_in_use(true);
|
||||
|
||||
#if MTL_DEBUG_MEMORY_STATISTICS == 1
|
||||
this->per_frame_allocation_count++;
|
||||
per_frame_allocation_count_++;
|
||||
#endif
|
||||
|
||||
return new_buffer;
|
||||
|
@ -266,7 +266,7 @@ void MTLBufferPool::update_memory_pools()
|
|||
printf("--- Allocation Stats ---\n");
|
||||
printf(" Num buffers processed in pool (this frame): %u\n", num_buffers_added);
|
||||
|
||||
uint framealloc = (uint)this->per_frame_allocation_count;
|
||||
uint framealloc = (uint)per_frame_allocation_count_;
|
||||
printf(" Allocations in frame: %u\n", framealloc);
|
||||
printf(" Total Buffers allocated: %u\n", (uint)allocations_.size());
|
||||
printf(" Total Memory allocated: %u MB\n", (uint)total_allocation_bytes_ / (1024 * 1024));
|
||||
|
@ -297,7 +297,7 @@ void MTLBufferPool::update_memory_pools()
|
|||
++value_iterator;
|
||||
}
|
||||
|
||||
this->per_frame_allocation_count = 0;
|
||||
per_frame_allocation_count_ = 0;
|
||||
#endif
|
||||
|
||||
/* Clear safe pools list */
|
||||
|
@ -432,7 +432,7 @@ void MTLSafeFreeList::decrement_reference()
|
|||
int ref_count = --reference_count_;
|
||||
|
||||
if (ref_count == 0) {
|
||||
MTLContext::get_global_memory_manager().push_completed_safe_list(this);
|
||||
MTLContext::get_global_memory_manager()->push_completed_safe_list(this);
|
||||
}
|
||||
lock_.unlock();
|
||||
}
|
||||
|
@ -462,7 +462,6 @@ MTLBuffer::MTLBuffer(id<MTLDevice> mtl_device,
|
|||
|
||||
metal_buffer_ = [device_ newBufferWithLength:aligned_alloc_size options:options];
|
||||
BLI_assert(metal_buffer_);
|
||||
[metal_buffer_ retain];
|
||||
|
||||
size_ = aligned_alloc_size;
|
||||
this->set_usage_size(size_);
|
||||
|
@ -504,7 +503,7 @@ gpu::MTLBuffer::~MTLBuffer()
|
|||
void gpu::MTLBuffer::free()
|
||||
{
|
||||
if (!is_external_) {
|
||||
MTLContext::get_global_memory_manager().free_buffer(this);
|
||||
MTLContext::get_global_memory_manager()->free_buffer(this);
|
||||
}
|
||||
else {
|
||||
if (metal_buffer_ != nil) {
|
||||
|
|
|
@ -30,8 +30,8 @@ void MTLQueryPool::allocate()
|
|||
{
|
||||
/* Allocate Metal buffer for visibility results. */
|
||||
size_t buffer_size_in_bytes = VISIBILITY_COUNT_PER_BUFFER * VISIBILITY_RESULT_SIZE_IN_BYTES;
|
||||
gpu::MTLBuffer *buffer = MTLContext::get_global_memory_manager().allocate(buffer_size_in_bytes,
|
||||
true);
|
||||
gpu::MTLBuffer *buffer = MTLContext::get_global_memory_manager()->allocate(buffer_size_in_bytes,
|
||||
true);
|
||||
BLI_assert(buffer);
|
||||
buffer_.append(buffer);
|
||||
}
|
||||
|
|
|
@ -166,13 +166,13 @@ class MTLTexture : public Texture {
|
|||
/* 'baking' refers to the generation of GPU-backed resources. This flag ensures GPU resources are
|
||||
* ready. Baking is generally deferred until as late as possible, to ensure all associated
|
||||
* resource state has been specified up-front. */
|
||||
bool is_baked_;
|
||||
MTLTextureDescriptor *texture_descriptor_;
|
||||
id<MTLTexture> texture_;
|
||||
bool is_baked_ = false;
|
||||
MTLTextureDescriptor *texture_descriptor_ = nullptr;
|
||||
id<MTLTexture> texture_ = nil;
|
||||
MTLTextureUsage usage_;
|
||||
|
||||
/* Texture Storage. */
|
||||
id<MTLBuffer> texture_buffer_;
|
||||
id<MTLBuffer> texture_buffer_ = nil;
|
||||
uint aligned_w_ = 0;
|
||||
|
||||
/* Blit Frame-buffer. */
|
||||
|
|
|
@ -1272,17 +1272,14 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
BLI_assert(total_bytes <= debug_data_size);
|
||||
|
||||
/* Fetch allocation from scratch buffer. */
|
||||
id<MTLBuffer> destination_buffer = nil;
|
||||
uint destination_offset = 0;
|
||||
void *destination_buffer_host_ptr = nullptr;
|
||||
gpu::MTLBuffer *dest_buf = MTLContext::get_global_memory_manager()->allocate_aligned(
|
||||
total_bytes, 256, true);
|
||||
BLI_assert(dest_buf != nullptr);
|
||||
|
||||
/* TODO(Metal): Optimize buffer allocation. */
|
||||
MTLResourceOptions bufferOptions = MTLResourceStorageModeManaged;
|
||||
destination_buffer = [ctx->device newBufferWithLength:max_ii(total_bytes, 256)
|
||||
options:bufferOptions];
|
||||
destination_offset = 0;
|
||||
destination_buffer_host_ptr = (void *)((uint8_t *)([destination_buffer contents]) +
|
||||
destination_offset);
|
||||
id<MTLBuffer> destination_buffer = dest_buf->get_metal_buffer();
|
||||
BLI_assert(destination_buffer != nil);
|
||||
void *destination_buffer_host_ptr = dest_buf->get_host_ptr();
|
||||
BLI_assert(destination_buffer_host_ptr != nullptr);
|
||||
|
||||
/* Prepare specialization struct (For non-trivial texture read routine). */
|
||||
int depth_format_mode = 0;
|
||||
|
@ -1339,10 +1336,9 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
sourceOrigin:MTLOriginMake(x_off, y_off, 0)
|
||||
sourceSize:MTLSizeMake(width, height, 1)
|
||||
toBuffer:destination_buffer
|
||||
destinationOffset:destination_offset
|
||||
destinationOffset:0
|
||||
destinationBytesPerRow:bytes_per_row
|
||||
destinationBytesPerImage:bytes_per_image];
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
copy_successful = true;
|
||||
}
|
||||
else {
|
||||
|
@ -1359,17 +1355,10 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
};
|
||||
[compute_encoder setComputePipelineState:pso];
|
||||
[compute_encoder setBytes:¶ms length:sizeof(params) atIndex:0];
|
||||
[compute_encoder setBuffer:destination_buffer offset:destination_offset atIndex:1];
|
||||
[compute_encoder setBuffer:destination_buffer offset:0 atIndex:1];
|
||||
[compute_encoder setTexture:read_texture atIndex:0];
|
||||
[compute_encoder dispatchThreads:MTLSizeMake(width, height, 1) /* Width, Height, Layer */
|
||||
threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
|
||||
|
||||
/* Use Blit encoder to synchronize results back to CPU. */
|
||||
id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
|
||||
if (G.debug & G_DEBUG_GPU) {
|
||||
[enc insertDebugSignpost:@"GPUTextureRead-syncResource"];
|
||||
}
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
copy_successful = true;
|
||||
}
|
||||
} break;
|
||||
|
@ -1392,11 +1381,9 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
sourceOrigin:MTLOriginMake(x_off, y_off, 0)
|
||||
sourceSize:MTLSizeMake(width, height, 1)
|
||||
toBuffer:destination_buffer
|
||||
destinationOffset:destination_offset + texture_array_relative_offset
|
||||
destinationOffset:texture_array_relative_offset
|
||||
destinationBytesPerRow:bytes_per_row
|
||||
destinationBytesPerImage:bytes_per_image];
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
|
||||
texture_array_relative_offset += bytes_per_image;
|
||||
}
|
||||
copy_successful = true;
|
||||
|
@ -1415,18 +1402,11 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
};
|
||||
[compute_encoder setComputePipelineState:pso];
|
||||
[compute_encoder setBytes:¶ms length:sizeof(params) atIndex:0];
|
||||
[compute_encoder setBuffer:destination_buffer offset:destination_offset atIndex:1];
|
||||
[compute_encoder setBuffer:destination_buffer offset:0 atIndex:1];
|
||||
[compute_encoder setTexture:read_texture atIndex:0];
|
||||
[compute_encoder
|
||||
dispatchThreads:MTLSizeMake(width, height, depth) /* Width, Height, Layer */
|
||||
threadsPerThreadgroup:MTLSizeMake(8, 8, 1)];
|
||||
|
||||
/* Use Blit encoder to synchronize results back to CPU. */
|
||||
id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
|
||||
if (G.debug & G_DEBUG_GPU) {
|
||||
[enc insertDebugSignpost:@"GPUTextureRead-syncResource"];
|
||||
}
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
copy_successful = true;
|
||||
}
|
||||
} break;
|
||||
|
@ -1448,10 +1428,9 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
sourceOrigin:MTLOriginMake(x_off, y_off, 0)
|
||||
sourceSize:MTLSizeMake(width, height, 1)
|
||||
toBuffer:destination_buffer
|
||||
destinationOffset:destination_offset + texture_array_relative_offset
|
||||
destinationOffset:texture_array_relative_offset
|
||||
destinationBytesPerRow:bytes_per_row
|
||||
destinationBytesPerImage:bytes_per_image];
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
|
||||
texture_array_relative_offset += bytes_per_image;
|
||||
}
|
||||
|
@ -1472,6 +1451,16 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
}
|
||||
|
||||
if (copy_successful) {
|
||||
|
||||
/* Use Blit encoder to synchronize results back to CPU. */
|
||||
if (dest_buf->get_resource_options() == MTLResourceStorageModeManaged) {
|
||||
id<MTLBlitCommandEncoder> enc = ctx->main_command_buffer.ensure_begin_blit_encoder();
|
||||
if (G.debug & G_DEBUG_GPU) {
|
||||
[enc insertDebugSignpost:@"GPUTextureRead-syncResource"];
|
||||
}
|
||||
[enc synchronizeResource:destination_buffer];
|
||||
}
|
||||
|
||||
/* Ensure GPU copy commands have completed. */
|
||||
GPU_finish();
|
||||
|
||||
|
@ -1491,6 +1480,9 @@ void gpu::MTLTexture::read_internal(int mip,
|
|||
image_components,
|
||||
num_output_components);
|
||||
}
|
||||
|
||||
/* Release destination buffer. */
|
||||
dest_buf->free();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1658,12 +1650,13 @@ void gpu::MTLTexture::ensure_baked()
|
|||
{
|
||||
|
||||
/* If properties have changed, re-bake. */
|
||||
id<MTLTexture> previous_texture = nil;
|
||||
bool copy_previous_contents = false;
|
||||
|
||||
if (is_baked_ && is_dirty_) {
|
||||
copy_previous_contents = true;
|
||||
id<MTLTexture> previous_texture = texture_;
|
||||
previous_texture = texture_;
|
||||
[previous_texture retain];
|
||||
|
||||
this->reset();
|
||||
}
|
||||
|
||||
|
@ -1812,10 +1805,8 @@ void gpu::MTLTexture::ensure_baked()
|
|||
|
||||
/* Re-apply previous contents. */
|
||||
if (copy_previous_contents) {
|
||||
id<MTLTexture> previous_texture;
|
||||
/* TODO(Metal): May need to copy previous contents of texture into new texture. */
|
||||
/*[previous_texture release]; */
|
||||
UNUSED_VARS(previous_texture);
|
||||
[previous_texture release];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ void MTLUniformBuf::update(const void *data)
|
|||
UNUSED_VARS_NDEBUG(ctx);
|
||||
|
||||
if (data != nullptr) {
|
||||
metal_buffer_ = MTLContext::get_global_memory_manager().allocate_with_data(
|
||||
metal_buffer_ = MTLContext::get_global_memory_manager()->allocate_with_data(
|
||||
size_in_bytes_, true, data);
|
||||
has_data_ = true;
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ void MTLVertBuf::duplicate_data(VertBuf *dst_)
|
|||
|
||||
/* Allocate VBO for destination vertbuf. */
|
||||
uint length = src->vbo_->get_size();
|
||||
dst->vbo_ = MTLContext::get_global_memory_manager().allocate(
|
||||
dst->vbo_ = MTLContext::get_global_memory_manager()->allocate(
|
||||
length, (dst->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
|
||||
dst->alloc_size_ = length;
|
||||
|
||||
|
@ -162,7 +162,7 @@ void MTLVertBuf::bind()
|
|||
|
||||
/* Create MTLBuffer of requested size. */
|
||||
if (vbo_ == nullptr) {
|
||||
vbo_ = MTLContext::get_global_memory_manager().allocate(
|
||||
vbo_ = MTLContext::get_global_memory_manager()->allocate(
|
||||
required_size, (this->get_usage_type() != GPU_USAGE_DEVICE_ONLY));
|
||||
vbo_->set_label(@"Vertex Buffer");
|
||||
BLI_assert(vbo_ != nullptr);
|
||||
|
|
Loading…
Reference in New Issue