Fix T103433: Ensure Metal memory allocator is safe for multi-threaded allocation. Resolves crash when baking indirect lighting.
Also applies correct texture usage flag to light bake texture. Authored by Apple: Michael Parkin-White Ref T96261 Reviewed By: fclem, jbakker Maniphest Tasks: T96261, T103433 Differential Revision: https://developer.blender.org/D17018
This commit is contained in:
parent
0ba5954bb2
commit
1c672f3d1d
Notes:
blender-bot
2023-02-14 01:52:41 +01:00
Referenced by issue #103433, MacOS Metal GPU backend: Crash when baking indirect lightning Referenced by issue #96261, Metal Viewport
|
@ -716,7 +716,8 @@ static void eevee_lightbake_create_resources(EEVEE_LightBake *lbake)
|
|||
lbake->irr_size[1],
|
||||
lbake->irr_size[2],
|
||||
IRRADIANCE_FORMAT,
|
||||
GPU_TEXTURE_USAGE_SHADER_READ,
|
||||
GPU_TEXTURE_USAGE_SHADER_READ |
|
||||
GPU_TEXTURE_USAGE_ATTACHMENT,
|
||||
DRW_TEX_FILTER,
|
||||
NULL);
|
||||
|
||||
|
|
|
@ -339,10 +339,10 @@ class MTLSafeFreeList {
|
|||
class MTLBufferPool {
|
||||
|
||||
private:
|
||||
/* Memory statistics. */
|
||||
int64_t total_allocation_bytes_ = 0;
|
||||
|
||||
#if MTL_DEBUG_MEMORY_STATISTICS == 1
|
||||
/* Memory statistics. */
|
||||
std::atomic<int64_t> total_allocation_bytes_;
|
||||
|
||||
/* Debug statistics. */
|
||||
std::atomic<int> per_frame_allocation_count_;
|
||||
std::atomic<int64_t> allocations_in_pool_;
|
||||
|
@ -368,10 +368,14 @@ class MTLBufferPool {
|
|||
* - A size-ordered list (MultiSet) of allocated buffers is kept per MTLResourceOptions
|
||||
* permutation. This allows efficient lookup for buffers of a given requested size.
|
||||
* - MTLBufferHandle wraps a gpu::MTLBuffer pointer to achieve easy size-based sorting
|
||||
* via CompareMTLBuffer. */
|
||||
* via CompareMTLBuffer.
|
||||
*
|
||||
* NOTE: buffer_pool_lock_ guards against concurrent access to the memory allocator. This
|
||||
* can occur during light baking or rendering operations. */
|
||||
using MTLBufferPoolOrderedList = std::multiset<MTLBufferHandle, CompareMTLBuffer>;
|
||||
using MTLBufferResourceOptions = uint64_t;
|
||||
|
||||
std::mutex buffer_pool_lock_;
|
||||
blender::Map<MTLBufferResourceOptions, MTLBufferPoolOrderedList *> buffer_pools_;
|
||||
blender::Vector<gpu::MTLBuffer *> allocations_;
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ void MTLBufferPool::init(id<MTLDevice> mtl_device)
|
|||
|
||||
#if MTL_DEBUG_MEMORY_STATISTICS == 1
|
||||
/* Debug statistics. */
|
||||
total_allocation_bytes_ = 0;
|
||||
per_frame_allocation_count_ = 0;
|
||||
allocations_in_pool_ = 0;
|
||||
buffers_in_pool_ = 0;
|
||||
|
@ -43,7 +44,7 @@ MTLBufferPool::~MTLBufferPool()
|
|||
|
||||
void MTLBufferPool::free()
|
||||
{
|
||||
|
||||
buffer_pool_lock_.lock();
|
||||
for (auto buffer : allocations_) {
|
||||
BLI_assert(buffer);
|
||||
delete buffer;
|
||||
|
@ -55,6 +56,7 @@ void MTLBufferPool::free()
|
|||
delete buffer_pool;
|
||||
}
|
||||
buffer_pools_.clear();
|
||||
buffer_pool_lock_.unlock();
|
||||
}
|
||||
|
||||
gpu::MTLBuffer *MTLBufferPool::allocate(uint64_t size, bool cpu_visible)
|
||||
|
@ -96,6 +98,8 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
|
|||
|
||||
/* Check if we have a suitable buffer */
|
||||
gpu::MTLBuffer *new_buffer = nullptr;
|
||||
buffer_pool_lock_.lock();
|
||||
|
||||
std::multiset<MTLBufferHandle, CompareMTLBuffer> **pool_search = buffer_pools_.lookup_ptr(
|
||||
(uint64_t)options);
|
||||
|
||||
|
@ -142,7 +146,9 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
|
|||
|
||||
/* Track allocation in context. */
|
||||
allocations_.append(new_buffer);
|
||||
#if MTL_DEBUG_MEMORY_STATISTICS == 1
|
||||
total_allocation_bytes_ += aligned_alloc_size;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
/* Re-use suitable buffer. */
|
||||
|
@ -165,6 +171,9 @@ gpu::MTLBuffer *MTLBufferPool::allocate_aligned(uint64_t size,
|
|||
per_frame_allocation_count_++;
|
||||
#endif
|
||||
|
||||
/* Release lock. */
|
||||
buffer_pool_lock_.unlock();
|
||||
|
||||
return new_buffer;
|
||||
}
|
||||
|
||||
|
@ -209,8 +218,11 @@ void MTLBufferPool::update_memory_pools()
|
|||
{
|
||||
/* Ensure thread-safe access to `completed_safelist_queue_`, which contains
|
||||
* the list of MTLSafeFreeList's whose buffers are ready to be
|
||||
* re-inserted into the Memory Manager pools. */
|
||||
* re-inserted into the Memory Manager pools.
|
||||
* we also need to lock access to general buffer pools, to ensure allocations
|
||||
* are not simultaneously happening on background threads. */
|
||||
safelist_lock_.lock();
|
||||
buffer_pool_lock_.lock();
|
||||
|
||||
#if MTL_DEBUG_MEMORY_STATISTICS == 1
|
||||
int num_buffers_added = 0;
|
||||
|
@ -302,6 +314,7 @@ void MTLBufferPool::update_memory_pools()
|
|||
|
||||
/* Clear safe pools list */
|
||||
completed_safelist_queue_.clear();
|
||||
buffer_pool_lock_.unlock();
|
||||
safelist_lock_.unlock();
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue