Metal: Fix issue with premature safe buffer list flush and optimize memory manager overhead.
Resolve an issue where released buffers were returned to the reusable memory pool before GPU work associated with these buffers had been encoded. Usually release of memory pools is dependent on successful completion of GPU work via command buffer callbacks. However, if the pool refresh operation occurs between encoding of work and submission, buffer ref-count is prematurely decremented. Patch also ensures safe buffer free lists are only flushed once a set number of buffers have been used. This reduces overhead of small and frequent flushes, without raising the memory ceiling significantly. Authored by Apple: Michael Parkin-White Ref T96261 Reviewed By: fclem Maniphest Tasks: T96261 Differential Revision: https://developer.blender.org/D17118
This commit is contained in:
parent
d4d4efd3d3
commit
aca9c131fc
Notes:
blender-bot
2023-02-13 16:03:55 +01:00
Referenced by issue #96261, Metal Viewport
|
@ -152,8 +152,10 @@ void MTLBackend::render_step()
|
|||
* released. */
|
||||
MTLSafeFreeList *cmd_free_buffer_list =
|
||||
MTLContext::get_global_memory_manager()->get_current_safe_list();
|
||||
MTLContext::get_global_memory_manager()->begin_new_safe_list();
|
||||
cmd_free_buffer_list->decrement_reference();
|
||||
if (cmd_free_buffer_list->should_flush()) {
|
||||
MTLContext::get_global_memory_manager()->begin_new_safe_list();
|
||||
cmd_free_buffer_list->decrement_reference();
|
||||
}
|
||||
}
|
||||
|
||||
bool MTLBackend::is_inside_render_boundary()
|
||||
|
|
|
@ -285,6 +285,7 @@ class MTLSafeFreeList {
|
|||
private:
|
||||
std::atomic<int> reference_count_;
|
||||
std::atomic<bool> in_free_queue_;
|
||||
std::atomic<bool> referenced_by_workload_;
|
||||
std::recursive_mutex lock_;
|
||||
|
||||
/* Linked list of next MTLSafeFreeList chunk if current chunk is full. */
|
||||
|
@ -292,8 +293,12 @@ class MTLSafeFreeList {
|
|||
std::atomic<MTLSafeFreeList *> next_;
|
||||
|
||||
/* Lockless list. MAX_NUM_BUFFERS_ within a chunk based on considerations
|
||||
* for performance and memory. */
|
||||
* for performance and memory.
|
||||
* MIN_BUFFER_FLUSH_COUNT refers to the minimum count of buffers in the MTLSafeFreeList
|
||||
* before buffers are returned to global memory pool. This is set at a point to reduce
|
||||
* overhead of small pool flushes, while ensuring floating memory overhead is not excessive. */
|
||||
static const int MAX_NUM_BUFFERS_ = 1024;
|
||||
static const int MIN_BUFFER_FLUSH_COUNT = 120;
|
||||
std::atomic<int> current_list_index_;
|
||||
gpu::MTLBuffer *safe_free_pool_[MAX_NUM_BUFFERS_];
|
||||
|
||||
|
@ -304,11 +309,13 @@ class MTLSafeFreeList {
|
|||
* Performs a lockless list insert. */
|
||||
void insert_buffer(gpu::MTLBuffer *buffer);
|
||||
|
||||
/* Whether we need ot start a new safe free list, or can carry on using the existing one. */
|
||||
bool should_flush();
|
||||
|
||||
/* Increments command buffer reference count. */
|
||||
void increment_reference();
|
||||
|
||||
/* Decrement and return of buffers to pool occur on MTLCommandBuffer completion callback thread.
|
||||
*/
|
||||
/* Decrement and return of buffers to pool occur on MTLCommandBuffer completion callback. */
|
||||
void decrement_reference();
|
||||
|
||||
void flag_in_queue()
|
||||
|
|
|
@ -433,6 +433,7 @@ void MTLSafeFreeList::increment_reference()
|
|||
lock_.lock();
|
||||
BLI_assert(in_free_queue_ == false);
|
||||
reference_count_++;
|
||||
referenced_by_workload_ = true;
|
||||
lock_.unlock();
|
||||
}
|
||||
|
||||
|
@ -450,6 +451,17 @@ void MTLSafeFreeList::decrement_reference()
|
|||
lock_.unlock();
|
||||
}
|
||||
|
||||
bool MTLSafeFreeList::should_flush()
|
||||
{
|
||||
/* We should only consider refreshing a list if it has been referenced by active workloads, and
|
||||
* contains a sufficient buffer count to avoid overheads associated with flushing the list. If
|
||||
* the reference count is only equal to 1, buffers may have been added, but no command
|
||||
* submissions will have been issued, hence buffers could be returned to the pool prematurely if
|
||||
* associated workload submission occurs later. */
|
||||
return ((reference_count_ > 1 || referenced_by_workload_) &&
|
||||
current_list_index_ > MIN_BUFFER_FLUSH_COUNT);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
|
Loading…
Reference in New Issue