Cycles: use GPU module for viewport display

To make GPU backends other than OpenGL work. Adds required pixel buffer and
fence objects to GPU module.

Authored by Apple: Michael Parkin-White

Ref T96261
Ref T92212

Reviewed By: fclem, brecht

Differential Revision: https://developer.blender.org/D16042
This commit is contained in:
Jason Fielder 2022-12-01 15:33:54 +01:00 committed by Brecht Van Lommel
parent b5ebc9bb24
commit b132e3b3ce
Notes: blender-bot 2024-02-13 12:59:20 +01:00
Referenced by issue #103556, Regression: Cycles viewport pixels change if the UI is updated in a certain way.
Referenced by issue #103101, Cycles Crash on random frame in Blender 3.4
Referenced by issue #102944, Cycles Viewport broken on 3.5.0 Alpha Master since 2022-12-01 build (MacBook Pro 15.4 Intel)
Referenced by issue #96261, Metal Viewport
Referenced by issue #92212, Cycles Metal device
Referenced by issue #114209, Dark bar in result when rendering large image with multiple samples (can crash, too)
26 changed files with 947 additions and 389 deletions

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,10 @@
#include "util/unique_ptr.h"
#include "util/vector.h"
typedef struct GPUContext GPUContext;
typedef struct GPUFence GPUFence;
typedef struct GPUShader GPUShader;
CCL_NAMESPACE_BEGIN
/* Base class of shader used for display driver rendering. */
@ -29,7 +33,7 @@ class BlenderDisplayShader {
BlenderDisplayShader() = default;
virtual ~BlenderDisplayShader() = default;
virtual void bind(int width, int height) = 0;
virtual GPUShader *bind(int width, int height) = 0;
virtual void unbind() = 0;
/* Get attribute location for position and texture coordinate respectively.
@ -40,7 +44,7 @@ class BlenderDisplayShader {
protected:
/* Get program of this display shader.
* NOTE: The shader needs to be bound to have access to this. */
virtual uint get_shader_program() = 0;
virtual GPUShader *get_shader_program() = 0;
/* Cached values of various OpenGL resources. */
int position_attribute_location_ = -1;
@ -51,16 +55,16 @@ class BlenderDisplayShader {
* display space shader. */
class BlenderFallbackDisplayShader : public BlenderDisplayShader {
public:
virtual void bind(int width, int height) override;
virtual GPUShader *bind(int width, int height) override;
virtual void unbind() override;
protected:
virtual uint get_shader_program() override;
virtual GPUShader *get_shader_program() override;
void create_shader_if_needed();
void destroy_shader();
uint shader_program_ = 0;
GPUShader *shader_program_ = 0;
int image_texture_location_ = -1;
int fullscreen_location_ = -1;
@ -73,17 +77,17 @@ class BlenderDisplaySpaceShader : public BlenderDisplayShader {
public:
BlenderDisplaySpaceShader(BL::RenderEngine &b_engine, BL::Scene &b_scene);
virtual void bind(int width, int height) override;
virtual GPUShader *bind(int width, int height) override;
virtual void unbind() override;
protected:
virtual uint get_shader_program() override;
virtual GPUShader *get_shader_program() override;
BL::RenderEngine b_engine_;
BL::Scene &b_scene_;
/* Cached values of various OpenGL resources. */
uint shader_program_ = 0;
GPUShader *shader_program_ = nullptr;
};
/* Display driver implementation which is specific for Blender viewport integration. */
@ -122,6 +126,9 @@ class BlenderDisplayDriver : public DisplayDriver {
void gpu_context_lock();
void gpu_context_unlock();
/* Create GPU resources used by the dispaly driver. */
bool gpu_resources_create();
/* Destroy all GPU resources which are being used by this object. */
void gpu_resources_destroy();
@ -137,8 +144,8 @@ class BlenderDisplayDriver : public DisplayDriver {
struct Tiles;
unique_ptr<Tiles> tiles_;
void *gl_render_sync_ = nullptr;
void *gl_upload_sync_ = nullptr;
GPUFence *gpu_render_sync_ = nullptr;
GPUFence *gpu_upload_sync_ = nullptr;
float2 zoom_ = make_float2(1.0f, 1.0f);
};

View File

@ -97,6 +97,7 @@ void GPU_shader_free(GPUShader *shader);
void GPU_shader_bind(GPUShader *shader);
void GPU_shader_unbind(void);
GPUShader *GPU_shader_get_bound(void);
const char *GPU_shader_get_name(GPUShader *shader);

View File

@ -8,6 +8,9 @@
#include "BLI_utildefines.h"
/** Opaque type hiding blender::gpu::Fence. */
typedef struct GPUFence GPUFence;
typedef enum eGPUWriteMask {
GPU_WRITE_NONE = 0,
GPU_WRITE_RED = (1 << 0),
@ -196,6 +199,11 @@ bool GPU_bgl_get(void);
void GPU_memory_barrier(eGPUBarrier barrier);
GPUFence *GPU_fence_create(void);
void GPU_fence_free(GPUFence *fence);
void GPU_fence_signal(GPUFence *fence);
void GPU_fence_wait(GPUFence *fence);
#ifdef __cplusplus
}
#endif

View File

@ -16,6 +16,9 @@ struct GPUVertBuf;
/** Opaque type hiding blender::gpu::Texture. */
typedef struct GPUTexture GPUTexture;
/** Opaque type hiding blender::gpu::PixelBuffer. */
typedef struct GPUPixelBuffer GPUPixelBuffer;
/**
* GPU Samplers state
* - Specify the sampler state to bind a texture with.
@ -284,6 +287,17 @@ void GPU_texture_update_sub(GPUTexture *tex,
int width,
int height,
int depth);
/* Update from API Buffer. */
void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex,
eGPUDataFormat data_format,
GPUPixelBuffer *pix_buf,
int offset_x,
int offset_y,
int offset_z,
int width,
int height,
int depth);
/**
* Makes data interpretation aware of the source layout.
* Skipping pixels correctly when changing rows when doing partial update.
@ -366,6 +380,15 @@ void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *size);
size_t GPU_texture_component_len(eGPUTextureFormat format);
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format);
/* GPU Pixel Buffer. */
GPUPixelBuffer *GPU_pixel_buffer_create(uint size);
void GPU_pixel_buffer_free(GPUPixelBuffer *pix_buf);
void *GPU_pixel_buffer_map(GPUPixelBuffer *pix_buf);
void GPU_pixel_buffer_unmap(GPUPixelBuffer *pix_buf);
uint GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf);
int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pix_buf);
#ifdef __cplusplus
}
#endif

View File

@ -18,8 +18,10 @@ class Context;
class Batch;
class DrawList;
class Fence;
class FrameBuffer;
class IndexBuf;
class PixelBuffer;
class QueryPool;
class Shader;
class Texture;
@ -42,8 +44,10 @@ class GPUBackend {
virtual Batch *batch_alloc() = 0;
virtual DrawList *drawlist_alloc(int list_length) = 0;
virtual Fence *fence_alloc() = 0;
virtual FrameBuffer *framebuffer_alloc(const char *name) = 0;
virtual IndexBuf *indexbuf_alloc() = 0;
virtual PixelBuffer *pixelbuf_alloc(uint size) = 0;
virtual QueryPool *querypool_alloc() = 0;
virtual Shader *shader_alloc(const char *name) = 0;
virtual Texture *texture_alloc(const char *name) = 0;

View File

@ -527,6 +527,15 @@ void GPU_shader_unbind()
#endif
}
GPUShader *GPU_shader_get_bound()
{
Context *ctx = Context::get();
if (ctx) {
return wrap(ctx->shader);
}
return nullptr;
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -16,6 +16,7 @@
#include "GPU_state.h"
#include "gpu_backend.hh"
#include "gpu_context_private.hh"
#include "gpu_state_private.hh"
@ -373,6 +374,27 @@ void GPU_memory_barrier(eGPUBarrier barrier)
Context::get()->state_manager->issue_barrier(barrier);
}
GPUFence *GPU_fence_create()
{
Fence *fence = GPUBackend::get()->fence_alloc();
return wrap(fence);
}
void GPU_fence_free(GPUFence *fence)
{
delete unwrap(fence);
}
void GPU_fence_signal(GPUFence *fence)
{
unwrap(fence)->signal();
}
void GPU_fence_wait(GPUFence *fence)
{
unwrap(fence)->wait();
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -160,5 +160,34 @@ class StateManager {
virtual void texture_unpack_row_length_set(uint len) = 0;
};
/**
* GPUFence.
*/
class Fence {
protected:
bool signalled_ = false;
public:
Fence(){};
virtual ~Fence(){};
virtual void signal() = 0;
virtual void wait() = 0;
};
/* Syntactic sugar. */
static inline GPUFence *wrap(Fence *pixbuf)
{
return reinterpret_cast<GPUFence *>(pixbuf);
}
static inline Fence *unwrap(GPUFence *pixbuf)
{
return reinterpret_cast<Fence *>(pixbuf);
}
static inline const Fence *unwrap(const GPUFence *pixbuf)
{
return reinterpret_cast<const Fence *>(pixbuf);
}
} // namespace gpu
} // namespace blender

View File

@ -455,6 +455,21 @@ void GPU_texture_update_sub(GPUTexture *tex,
reinterpret_cast<Texture *>(tex)->update_sub(0, offset, extent, data_format, pixels);
}
void GPU_texture_update_sub_from_pixel_buffer(GPUTexture *tex,
eGPUDataFormat data_format,
GPUPixelBuffer *pix_buf,
int offset_x,
int offset_y,
int offset_z,
int width,
int height,
int depth)
{
int offset[3] = {offset_x, offset_y, offset_z};
int extent[3] = {width, height, depth};
reinterpret_cast<Texture *>(tex)->update_sub(offset, extent, data_format, pix_buf);
}
void *GPU_texture_read(GPUTexture *tex_, eGPUDataFormat data_format, int miplvl)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
@ -824,6 +839,53 @@ void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *r_size)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU Pixel Buffer
*
* Pixel buffer utility functions.
* \{ */
GPUPixelBuffer *GPU_pixel_buffer_create(uint size)
{
/* Ensure buffer satifies the alignment of 256 bytes for copying
* data between buffers and textures. As specified in:
* https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
*
* Ensuring minimal size across all platforms handles cases for small-sized
* textures and avoids issues with zero-sized buffers. */
size = ceil_to_multiple_ul(size, 256);
PixelBuffer *pixbuf = GPUBackend::get()->pixelbuf_alloc(size);
return wrap(pixbuf);
}
void GPU_pixel_buffer_free(GPUPixelBuffer *pix_buf)
{
PixelBuffer *handle = unwrap(pix_buf);
delete handle;
}
void *GPU_pixel_buffer_map(GPUPixelBuffer *pix_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->map();
}
void GPU_pixel_buffer_unmap(GPUPixelBuffer *pix_buf)
{
reinterpret_cast<PixelBuffer *>(pix_buf)->unmap();
}
uint GPU_pixel_buffer_size(GPUPixelBuffer *pix_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->get_size();
}
int64_t GPU_pixel_buffer_get_native_handle(GPUPixelBuffer *pix_buf)
{
return reinterpret_cast<PixelBuffer *>(pix_buf)->get_native_handle();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU Sampler Objects
*

View File

@ -129,6 +129,10 @@ class Texture {
virtual void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) = 0;
virtual void update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf) = 0;
/* TODO(fclem): Legacy. Should be removed at some point. */
virtual uint gl_bindcode_get() const = 0;
@ -264,6 +268,35 @@ static inline const Texture *unwrap(const GPUTexture *vert)
return reinterpret_cast<const Texture *>(vert);
}
/* GPU pixel Buffer. */
class PixelBuffer {
protected:
uint size_ = 0;
public:
PixelBuffer(uint size) : size_(size){};
virtual ~PixelBuffer(){};
virtual void *map() = 0;
virtual void unmap() = 0;
virtual int64_t get_native_handle() = 0;
virtual uint get_size() = 0;
};
/* Syntactic sugar. */
static inline GPUPixelBuffer *wrap(PixelBuffer *pixbuf)
{
return reinterpret_cast<GPUPixelBuffer *>(pixbuf);
}
static inline PixelBuffer *unwrap(GPUPixelBuffer *pixbuf)
{
return reinterpret_cast<PixelBuffer *>(pixbuf);
}
static inline const PixelBuffer *unwrap(const GPUPixelBuffer *pixbuf)
{
return reinterpret_cast<const PixelBuffer *>(pixbuf);
}
#undef DEBUG_NAME_LEN
inline size_t to_bytesize(eGPUTextureFormat format)
@ -405,6 +438,8 @@ inline size_t to_bytesize(eGPUDataFormat data_format)
switch (data_format) {
case GPU_DATA_UBYTE:
return 1;
case GPU_DATA_HALF_FLOAT:
return 2;
case GPU_DATA_FLOAT:
case GPU_DATA_INT:
case GPU_DATA_UINT:

View File

@ -66,8 +66,10 @@ class MTLBackend : public GPUBackend {
Context *context_alloc(void *ghost_window, void *ghost_context) override;
Batch *batch_alloc() override;
DrawList *drawlist_alloc(int list_length) override;
Fence *fence_alloc() override;
FrameBuffer *framebuffer_alloc(const char *name) override;
IndexBuf *indexbuf_alloc() override;
PixelBuffer *pixelbuf_alloc(uint size) override;
QueryPool *querypool_alloc() override;
Shader *shader_alloc(const char *name) override;
Texture *texture_alloc(const char *name) override;

View File

@ -55,6 +55,11 @@ DrawList *MTLBackend::drawlist_alloc(int list_length)
return new MTLDrawList(list_length);
};
Fence *MTLBackend::fence_alloc()
{
return new MTLFence();
};
FrameBuffer *MTLBackend::framebuffer_alloc(const char *name)
{
MTLContext *mtl_context = static_cast<MTLContext *>(
@ -67,6 +72,11 @@ IndexBuf *MTLBackend::indexbuf_alloc()
return new MTLIndexBuf();
};
PixelBuffer *MTLBackend::pixelbuf_alloc(uint size)
{
return new MTLPixelBuffer(size);
};
QueryPool *MTLBackend::querypool_alloc()
{
return new MTLQueryPool();

View File

@ -538,6 +538,24 @@ bool MTLCommandBufferManager::insert_memory_barrier(eGPUBarrier barrier_bits,
return false;
}
void MTLCommandBufferManager::encode_signal_event(id<MTLEvent> event, uint64_t signal_value)
{
/* Ensure active command buffer. */
id<MTLCommandBuffer> cmd_buf = this->ensure_begin();
BLI_assert(cmd_buf);
this->end_active_command_encoder();
[cmd_buf encodeSignalEvent:event value:signal_value];
}
void MTLCommandBufferManager::encode_wait_for_event(id<MTLEvent> event, uint64_t signal_value)
{
/* Ensure active command buffer. */
id<MTLCommandBuffer> cmd_buf = this->ensure_begin();
BLI_assert(cmd_buf);
this->end_active_command_encoder();
[cmd_buf encodeWaitForEvent:event value:signal_value];
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -555,6 +555,8 @@ class MTLCommandBufferManager {
bool insert_memory_barrier(eGPUBarrier barrier_bits,
eGPUStageBarrierBits before_stages,
eGPUStageBarrierBits after_stages);
void encode_signal_event(id<MTLEvent> event, uint64_t value);
void encode_wait_for_event(id<MTLEvent> event, uint64_t value);
/* TODO(Metal): Support fences in command buffer class. */
/* Debug. */

View File

@ -84,4 +84,24 @@ class MTLStateManager : public StateManager {
MEM_CXX_CLASS_ALLOC_FUNCS("MTLStateManager")
};
/* Fence synchronization primitive. */
class MTLFence : public Fence {
private:
/* Using an event in this instance, as this is global for the command stream, rather than being
* inserted at the encoder level. This has the behaviour to match the GL functionality. */
id<MTLEvent> mtl_event_ = nil;
/* Events can be re-used multiple times. We can track a counter flagging the latest value
* signalled. */
uint64_t last_signalled_value_ = 0;
public:
MTLFence() : Fence(){};
~MTLFence();
void signal() override;
void wait() override;
MEM_CXX_CLASS_ALLOC_FUNCS("MTLFence")
};
} // namespace blender::gpu

View File

@ -560,6 +560,44 @@ void MTLStateManager::issue_barrier(eGPUBarrier barrier_bits)
ctx->main_command_buffer.insert_memory_barrier(barrier_bits, before_stages, after_stages);
}
MTLFence::~MTLFence()
{
if (mtl_event_) {
[mtl_event_ release];
mtl_event_ = nil;
}
}
void MTLFence::signal()
{
if (mtl_event_ == nil) {
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
mtl_event_ = [ctx->device newEvent];
}
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
ctx->main_command_buffer.encode_signal_event(mtl_event_, ++last_signalled_value_);
signalled_ = true;
}
void MTLFence::wait()
{
/* do not attempt to wait if event has not yet been signalled for the first time. */
if (mtl_event_ == nil) {
return;
}
if (signalled_) {
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
ctx->main_command_buffer.encode_wait_for_event(mtl_event_, last_signalled_value_);
signalled_ = false;
}
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -235,6 +235,10 @@ class MTLTexture : public Texture {
void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data) override;
void update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf) override;
void generate_mipmap() override;
void copy_to(Texture *dst) override;
@ -424,6 +428,24 @@ class MTLTexture : public Texture {
MEM_CXX_CLASS_ALLOC_FUNCS("MTLTexture")
};
class MTLPixelBuffer : public PixelBuffer {
private:
id<MTLBuffer> buffer_ = nil;
public:
MTLPixelBuffer(uint size);
~MTLPixelBuffer();
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
id<MTLBuffer> get_metal_buffer();
MEM_CXX_CLASS_ALLOC_FUNCS("MTLPixelBuffer")
};
/* Utility */
MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format);
int get_mtl_format_bytesize(MTLPixelFormat tex_format);

View File

@ -885,6 +885,61 @@ void gpu::MTLTexture::update_sub(
}
}
void MTLTexture::update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf)
{
/* Update texture from pixel buffer. */
BLI_assert(validate_data_format(format_, format));
BLI_assert(pixbuf != nullptr);
/* Fetch pixel buffer metal buffer. */
MTLPixelBuffer *mtl_pix_buf = static_cast<MTLPixelBuffer *>(unwrap(pixbuf));
id<MTLBuffer> buffer = mtl_pix_buf->get_metal_buffer();
BLI_assert(buffer != nil);
if (buffer == nil) {
return;
}
/* Ensure texture is ready. */
this->ensure_baked();
BLI_assert(texture_ != nil);
/* Calculate dimensions. */
int num_image_channels = to_component_len(format_);
uint bits_per_pixel = num_image_channels * to_bytesize(format);
uint bytes_per_row = bits_per_pixel * extent[0];
uint bytes_per_image = bytes_per_row * extent[1];
/* Currently only required for 2D textures. */
if (type_ == GPU_TEXTURE_2D) {
/* Create blit command encoder to copy data. */
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
id<MTLBlitCommandEncoder> blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
[blit_encoder copyFromBuffer:buffer
sourceOffset:0
sourceBytesPerRow:bytes_per_row
sourceBytesPerImage:bytes_per_image
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
toTexture:texture_
destinationSlice:0
destinationLevel:0
destinationOrigin:MTLOriginMake(offset[0], offset[1], 0)];
if (texture_.storageMode == MTLStorageModeManaged) {
[blit_encoder synchronizeResource:texture_];
}
}
else {
BLI_assert(false);
}
}
void gpu::MTLTexture::ensure_mipmaps(int miplvl)
{
@ -1797,4 +1852,74 @@ void gpu::MTLTexture::reset()
/** \} */
/* -------------------------------------------------------------------- */
/** \name Pixel Buffer
* \{ */
MTLPixelBuffer::MTLPixelBuffer(uint size) : PixelBuffer(size)
{
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
/* Ensure buffer satifies the alignment of 256 bytes for copying
* data between buffers and textures. As specified in:
* https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf */
BLI_assert(size >= 256);
MTLResourceOptions resource_options = ([ctx->device hasUnifiedMemory]) ?
MTLResourceStorageModeShared :
MTLResourceStorageModeManaged;
buffer_ = [ctx->device newBufferWithLength:size options:resource_options];
BLI_assert(buffer_ != nil);
}
MTLPixelBuffer::~MTLPixelBuffer()
{
if (buffer_) {
[buffer_ release];
buffer_ = nil;
}
}
void *MTLPixelBuffer::map()
{
if (buffer_ == nil) {
return nullptr;
}
return [buffer_ contents];
}
void MTLPixelBuffer::unmap()
{
if (buffer_ == nil) {
return;
}
/* Ensure changes are synchronized. */
if (buffer_.resourceOptions & MTLResourceStorageModeManaged) {
[buffer_ didModifyRange:NSMakeRange(0, size_)];
}
}
int64_t MTLPixelBuffer::get_native_handle()
{
if (buffer_ == nil) {
return 0;
}
return reinterpret_cast<int64_t>(buffer_);
}
uint MTLPixelBuffer::get_size()
{
return size_;
}
id<MTLBuffer> MTLPixelBuffer::get_metal_buffer()
{
return buffer_;
}
/** \} */
} // namespace blender::gpu

View File

@ -76,6 +76,11 @@ class GLBackend : public GPUBackend {
return new GLDrawList(list_length);
};
Fence *fence_alloc() override
{
return new GLFence();
};
FrameBuffer *framebuffer_alloc(const char *name) override
{
return new GLFrameBuffer(name);
@ -86,6 +91,11 @@ class GLBackend : public GPUBackend {
return new GLIndexBuf();
};
PixelBuffer *pixelbuf_alloc(uint size) override
{
return new GLPixelBuffer(size);
};
QueryPool *querypool_alloc() override
{
return new GLQueryPool();

View File

@ -641,6 +641,34 @@ void GLStateManager::issue_barrier(eGPUBarrier barrier_bits)
glMemoryBarrier(to_gl(barrier_bits));
}
GLFence::~GLFence()
{
if (gl_sync_ != 0) {
glDeleteSync(gl_sync_);
gl_sync_ = 0;
}
}
void GLFence::signal()
{
/* If fence is already signalled, create a newly signalled fence primitive. */
if (gl_sync_) {
glDeleteSync(gl_sync_);
}
gl_sync_ = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
signalled_ = true;
}
void GLFence::wait()
{
/* Do not wait if fence does not yet exist. */
if (gl_sync_ == 0) {
return;
}
glWaitSync(gl_sync_, 0, GL_TIMEOUT_IGNORED);
signalled_ = false;
}
/** \} */
} // namespace blender::gpu

View File

@ -103,6 +103,21 @@ class GLStateManager : public StateManager {
MEM_CXX_CLASS_ALLOC_FUNCS("GLStateManager")
};
/* Fence synchronization primitive. */
class GLFence : public Fence {
private:
GLsync gl_sync_ = 0;
public:
GLFence() : Fence(){};
~GLFence();
void signal() override;
void wait() override;
MEM_CXX_CLASS_ALLOC_FUNCS("GLFence")
};
static inline GLbitfield to_gl(eGPUBarrier barrier_bits)
{
GLbitfield barrier = 0;

View File

@ -303,6 +303,42 @@ void GLTexture::update_sub(
has_pixels_ = true;
}
void GLTexture::update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf)
{
/* Update texture from pixel buffer. */
BLI_assert(validate_data_format(format_, format));
BLI_assert(pixbuf != nullptr);
const int dimensions = this->dimensions_count();
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(format);
/* Temporarily Bind texture. */
GLContext::state_manager_active_get()->texture_bind_temp(this);
/* Bind pixel buffer for source data. */
int pix_buf_handle = (int)GPU_pixel_buffer_get_native_handle(pixbuf);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pix_buf_handle);
switch (dimensions) {
default:
case 1:
glTexSubImage1D(target_, 0, offset[0], extent[0], gl_format, gl_type, 0);
break;
case 2:
glTexSubImage2D(target_, 0, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, 0);
break;
case 3:
glTexSubImage3D(target_, 0, UNPACK3(offset), UNPACK3(extent), gl_format, gl_type, 0);
break;
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
/**
* This will create the mipmap images and populate them with filtered data from base level.
*
@ -739,4 +775,63 @@ uint GLTexture::gl_bindcode_get() const
return tex_id_;
}
/* -------------------------------------------------------------------- */
/** \name Pixel Buffer
* \{ */
GLPixelBuffer::GLPixelBuffer(uint size) : PixelBuffer(size)
{
glGenBuffers(1, &gl_id_);
BLI_assert(gl_id_);
if (!gl_id_) {
return;
}
/* Ensure size is non-zero for pixel buffer backing storage creation. */
size = max_ii(size, 32);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, gl_id_);
glBufferData(GL_PIXEL_UNPACK_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
GLPixelBuffer::~GLPixelBuffer()
{
if (!gl_id_) {
return;
}
glDeleteBuffers(1, &gl_id_);
}
void *GLPixelBuffer::map()
{
if (!gl_id_) {
BLI_assert(false);
return nullptr;
}
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, gl_id_);
void *ptr = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
BLI_assert(ptr);
return ptr;
}
void GLPixelBuffer::unmap()
{
glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
}
int64_t GLPixelBuffer::get_native_handle()
{
return (int64_t)gl_id_;
}
uint GLPixelBuffer::get_size()
{
return size_;
}
/** \} */
} // namespace blender::gpu

View File

@ -46,6 +46,10 @@ class GLTexture : public Texture {
void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data) override;
void update_sub(int offset[3],
int extent[3],
eGPUDataFormat format,
GPUPixelBuffer *pixbuf) override;
/**
* This will create the mipmap images and populate them with filtered data from base level.
@ -87,6 +91,22 @@ class GLTexture : public Texture {
MEM_CXX_CLASS_ALLOC_FUNCS("GLTexture")
};
class GLPixelBuffer : public PixelBuffer {
private:
GLuint gl_id_ = 0;
public:
GLPixelBuffer(uint size);
~GLPixelBuffer();
void *map() override;
void unmap() override;
int64_t get_native_handle() override;
uint get_size() override;
MEM_CXX_CLASS_ALLOC_FUNCS("GLPixelBuffer")
};
inline GLenum to_gl_internal_format(eGPUTextureFormat format)
{
/* You can add any of the available type to this list
@ -282,6 +302,8 @@ inline GLenum to_gl(eGPUDataFormat format)
return GL_UNSIGNED_INT_2_10_10_10_REV;
case GPU_DATA_10_11_11_REV:
return GL_UNSIGNED_INT_10F_11F_11F_REV;
case GPU_DATA_HALF_FLOAT:
return GL_HALF_FLOAT;
default:
BLI_assert_msg(0, "Unhandled data format");
return GL_FLOAT;

View File

@ -18,6 +18,7 @@
struct BakeTargets;
struct BakePixel;
struct Depsgraph;
struct GPUContext;
struct Main;
struct Object;
struct Render;
@ -158,9 +159,12 @@ typedef struct RenderEngine {
void *update_render_passes_data;
/* GPU context. */
void *gpu_context;
void *wm_gpu_context; /* WindowManager GPU context -> GHOSTContext. */
ThreadMutex gpu_context_mutex;
bool use_drw_render_context;
struct GPUContext *gpu_context;
/* Whether to restore DRWState after RenderEngine display pass. */
bool gpu_restore_context;
} RenderEngine;
RenderEngine *RE_engine_create(RenderEngineType *type);

View File

@ -34,6 +34,8 @@
#include "DEG_depsgraph_debug.h"
#include "DEG_depsgraph_query.h"
#include "GPU_context.h"
#include "RNA_access.h"
#ifdef WITH_PYTHON
@ -1285,45 +1287,69 @@ bool RE_engine_gpu_context_create(RenderEngine *engine)
BLI_assert(BLI_thread_is_main());
const bool drw_state = DRW_opengl_context_release();
engine->gpu_context = WM_opengl_context_create();
engine->wm_gpu_context = WM_opengl_context_create();
/* On Windows an old context is restored after creation, and subsequent release of context
* generates a Win32 error. Harmless for users, but annoying to have possible misleading
* error prints in the console. */
#ifndef _WIN32
if (engine->gpu_context) {
WM_opengl_context_release(engine->gpu_context);
if (engine->wm_gpu_context) {
/* Activate new OpenGL Context for GPUContext creation. */
WM_opengl_context_activate(engine->wm_gpu_context);
/* Requires GPUContext for usage of GPU Module for displaying results. */
engine->gpu_context = GPU_context_create(nullptr, engine->wm_gpu_context);
GPU_context_active_set(nullptr);
/* Deactivate newly created OpenGL Context, as it is not needed until
* `RE_engine_gpu_context_enable` is called. */
WM_opengl_context_release(engine->wm_gpu_context);
}
else {
engine->gpu_context = nullptr;
}
#endif
DRW_opengl_context_activate(drw_state);
return engine->gpu_context != nullptr;
return engine->wm_gpu_context != nullptr;
}
void RE_engine_gpu_context_destroy(RenderEngine *engine)
{
if (!engine->gpu_context) {
if (!engine->wm_gpu_context) {
return;
}
const bool drw_state = DRW_opengl_context_release();
WM_opengl_context_activate(engine->gpu_context);
WM_opengl_context_dispose(engine->gpu_context);
WM_opengl_context_activate(engine->wm_gpu_context);
if (engine->gpu_context) {
GPUContext *restore_context = GPU_context_active_get();
GPU_context_active_set(engine->gpu_context);
GPU_context_discard(engine->gpu_context);
if (restore_context != engine->gpu_context) {
GPU_context_active_set(restore_context);
}
engine->gpu_context = nullptr;
}
WM_opengl_context_dispose(engine->wm_gpu_context);
DRW_opengl_context_activate(drw_state);
}
bool RE_engine_gpu_context_enable(RenderEngine *engine)
{
engine->gpu_restore_context = false;
if (engine->use_drw_render_context) {
DRW_render_context_enable(engine->re);
return true;
}
if (engine->gpu_context) {
if (engine->wm_gpu_context) {
BLI_mutex_lock(&engine->gpu_context_mutex);
WM_opengl_context_activate(engine->gpu_context);
/* If a previous OpenGL/GPUContext was active (DST.gpu_context), we should later restore this
* when disabling the RenderEngine context. */
engine->gpu_restore_context = DRW_opengl_context_release();
/* Activate RenderEngine OpenGL and GPU Context. */
WM_opengl_context_activate(engine->wm_gpu_context);
if (engine->gpu_context) {
GPU_context_active_set(engine->gpu_context);
GPU_render_begin();
}
return true;
}
return false;
@ -1335,8 +1361,14 @@ void RE_engine_gpu_context_disable(RenderEngine *engine)
DRW_render_context_disable(engine->re);
}
else {
if (engine->gpu_context) {
WM_opengl_context_release(engine->gpu_context);
if (engine->wm_gpu_context) {
if (engine->gpu_context) {
GPU_render_end();
GPU_context_active_set(nullptr);
}
WM_opengl_context_release(engine->wm_gpu_context);
/* Restore DRW state context if previously active. */
DRW_opengl_context_activate(engine->gpu_restore_context);
BLI_mutex_unlock(&engine->gpu_context_mutex);
}
}
@ -1348,7 +1380,7 @@ void RE_engine_gpu_context_lock(RenderEngine *engine)
/* Locking already handled by the draw manager. */
}
else {
if (engine->gpu_context) {
if (engine->wm_gpu_context) {
BLI_mutex_lock(&engine->gpu_context_mutex);
}
}
@ -1360,7 +1392,7 @@ void RE_engine_gpu_context_unlock(RenderEngine *engine)
/* Locking already handled by the draw manager. */
}
else {
if (engine->gpu_context) {
if (engine->wm_gpu_context) {
BLI_mutex_unlock(&engine->gpu_context_mutex);
}
}