Merge branch 'master' into sculpt-dev

This commit is contained in:
Pablo Dobarro 2021-02-17 18:02:01 +01:00
commit 13952ecec1
50 changed files with 3138 additions and 256 deletions

View File

@ -192,7 +192,7 @@ static GPUTexture *blf_batch_cache_texture_load(void)
int remain_row = tex_width - offset_x;
int width = remain > remain_row ? remain_row : remain;
GPU_texture_update_sub(gc->texture,
GPU_DATA_UNSIGNED_BYTE,
GPU_DATA_UBYTE,
&gc->bitmap_result[bitmap_len_landed],
offset_x,
offset_y,

View File

@ -647,7 +647,7 @@ static void gpu_texture_update_scaled(GPUTexture *tex,
}
void *data = (ibuf->rect_float) ? (void *)(ibuf->rect_float) : (void *)(ibuf->rect);
eGPUDataFormat data_format = (ibuf->rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UNSIGNED_BYTE;
eGPUDataFormat data_format = (ibuf->rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
GPU_texture_update_sub(tex, data_format, data, x, y, layer, w, h, 1);
@ -673,7 +673,7 @@ static void gpu_texture_update_unscaled(GPUTexture *tex,
}
void *data = (rect_float) ? (void *)(rect_float + tex_offset) : (void *)(rect + tex_offset);
eGPUDataFormat data_format = (rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UNSIGNED_BYTE;
eGPUDataFormat data_format = (rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
/* Partial update without scaling. Stride and offset are used to copy only a
* subset of a possible larger buffer than what we are updating. */

View File

@ -59,7 +59,7 @@ ExecutionSystem::ExecutionSystem(RenderData *rd,
this->m_context.setQuality((CompositorQuality)editingtree->edit_quality);
}
this->m_context.setRendering(rendering);
this->m_context.setHasActiveOpenCLDevices(WorkScheduler::hasGPUDevices() &&
this->m_context.setHasActiveOpenCLDevices(WorkScheduler::has_gpu_devices() &&
(editingtree->flag & NTREE_COM_OPENCL));
this->m_context.setRenderData(rd);

View File

@ -44,38 +44,42 @@
# error COM_CURRENT_THREADING_MODEL No threading model selected
#endif
/** \brief list of all CPUDevices. for every hardware thread an instance of CPUDevice is created */
static vector<CPUDevice *> g_cpudevices;
static ThreadLocal(CPUDevice *) g_thread_device;
static struct {
/** \brief list of all CPUDevices. for every hardware thread an instance of CPUDevice is created
*/
vector<CPUDevice *> cpu_devices;
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
/** \brief list of all thread for every CPUDevice in cpudevices a thread exists. */
static ListBase g_cputhreads;
static bool g_cpuInitialized = false;
/** \brief all scheduled work for the cpu */
static ThreadQueue *g_cpuqueue;
static ThreadQueue *g_gpuqueue;
/** \brief list of all thread for every CPUDevice in cpudevices a thread exists. */
ListBase cpu_threads;
bool cpu_initialized = false;
/** \brief all scheduled work for the cpu */
ThreadQueue *cpu_queue;
ThreadQueue *gpu_queue;
# ifdef COM_OPENCL_ENABLED
static cl_context g_context;
static cl_program g_program;
/** \brief list of all OpenCLDevices. for every OpenCL GPU device an instance of OpenCLDevice is
* created. */
static vector<OpenCLDevice *> g_gpudevices;
/** \brief list of all thread for every GPUDevice in cpudevices a thread exists. */
static ListBase g_gputhreads;
/** \brief all scheduled work for the GPU. */
static bool g_openclActive = false;
static bool g_openclInitialized = false;
cl_context opencl_context;
cl_program opencl_program;
/** \brief list of all OpenCLDevices. for every OpenCL GPU device an instance of OpenCLDevice is
* created. */
vector<OpenCLDevice *> gpu_devices;
/** \brief list of all thread for every GPUDevice in cpudevices a thread exists. */
ListBase gpu_threads;
/** \brief all scheduled work for the GPU. */
bool opencl_active = false;
bool opencl_initialized = false;
# endif
#endif
} g_work_scheduler;
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
void *WorkScheduler::thread_execute_cpu(void *data)
{
CPUDevice *device = (CPUDevice *)data;
WorkPackage *work;
BLI_thread_local_set(g_thread_device, device);
while ((work = (WorkPackage *)BLI_thread_queue_pop(g_cpuqueue))) {
while ((work = (WorkPackage *)BLI_thread_queue_pop(g_work_scheduler.cpu_queue))) {
device->execute(work);
delete work;
}
@ -88,7 +92,7 @@ void *WorkScheduler::thread_execute_gpu(void *data)
Device *device = (Device *)data;
WorkPackage *work;
while ((work = (WorkPackage *)BLI_thread_queue_pop(g_gpuqueue))) {
while ((work = (WorkPackage *)BLI_thread_queue_pop(g_work_scheduler.gpu_queue))) {
device->execute(work);
delete work;
}
@ -106,14 +110,14 @@ void WorkScheduler::schedule(ExecutionGroup *group, int chunkNumber)
delete package;
#elif COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
# ifdef COM_OPENCL_ENABLED
if (group->isOpenCL() && g_openclActive) {
BLI_thread_queue_push(g_gpuqueue, package);
if (group->isOpenCL() && g_work_scheduler.opencl_active) {
BLI_thread_queue_push(g_work_scheduler.gpu_queue, package);
}
else {
BLI_thread_queue_push(g_cpuqueue, package);
BLI_thread_queue_push(g_work_scheduler.cpu_queue, package);
}
# else
BLI_thread_queue_push(g_cpuqueue, package);
BLI_thread_queue_push(g_work_scheduler.cpu_queue, package);
# endif
#endif
}
@ -122,24 +126,26 @@ void WorkScheduler::start(CompositorContext &context)
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
unsigned int index;
g_cpuqueue = BLI_thread_queue_init();
BLI_threadpool_init(&g_cputhreads, thread_execute_cpu, g_cpudevices.size());
for (index = 0; index < g_cpudevices.size(); index++) {
Device *device = g_cpudevices[index];
BLI_threadpool_insert(&g_cputhreads, device);
g_work_scheduler.cpu_queue = BLI_thread_queue_init();
BLI_threadpool_init(
&g_work_scheduler.cpu_threads, thread_execute_cpu, g_work_scheduler.cpu_devices.size());
for (index = 0; index < g_work_scheduler.cpu_devices.size(); index++) {
Device *device = g_work_scheduler.cpu_devices[index];
BLI_threadpool_insert(&g_work_scheduler.cpu_threads, device);
}
# ifdef COM_OPENCL_ENABLED
if (context.getHasActiveOpenCLDevices()) {
g_gpuqueue = BLI_thread_queue_init();
BLI_threadpool_init(&g_gputhreads, thread_execute_gpu, g_gpudevices.size());
for (index = 0; index < g_gpudevices.size(); index++) {
Device *device = g_gpudevices[index];
BLI_threadpool_insert(&g_gputhreads, device);
g_work_scheduler.gpu_queue = BLI_thread_queue_init();
BLI_threadpool_init(
&g_work_scheduler.gpu_threads, thread_execute_gpu, g_work_scheduler.gpu_devices.size());
for (index = 0; index < g_work_scheduler.gpu_devices.size(); index++) {
Device *device = g_work_scheduler.gpu_devices[index];
BLI_threadpool_insert(&g_work_scheduler.gpu_threads, device);
}
g_openclActive = true;
g_work_scheduler.opencl_active = true;
}
else {
g_openclActive = false;
g_work_scheduler.opencl_active = false;
}
# endif
#endif
@ -148,12 +154,12 @@ void WorkScheduler::finish()
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
# ifdef COM_OPENCL_ENABLED
if (g_openclActive) {
BLI_thread_queue_wait_finish(g_gpuqueue);
BLI_thread_queue_wait_finish(g_cpuqueue);
if (g_work_scheduler.opencl_active) {
BLI_thread_queue_wait_finish(g_work_scheduler.gpu_queue);
BLI_thread_queue_wait_finish(g_work_scheduler.cpu_queue);
}
else {
BLI_thread_queue_wait_finish(g_cpuqueue);
BLI_thread_queue_wait_finish(g_work_scheduler.cpu_queue);
}
# else
BLI_thread_queue_wait_finish(cpuqueue);
@ -163,26 +169,26 @@ void WorkScheduler::finish()
void WorkScheduler::stop()
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
BLI_thread_queue_nowait(g_cpuqueue);
BLI_threadpool_end(&g_cputhreads);
BLI_thread_queue_free(g_cpuqueue);
g_cpuqueue = nullptr;
BLI_thread_queue_nowait(g_work_scheduler.cpu_queue);
BLI_threadpool_end(&g_work_scheduler.cpu_threads);
BLI_thread_queue_free(g_work_scheduler.cpu_queue);
g_work_scheduler.cpu_queue = nullptr;
# ifdef COM_OPENCL_ENABLED
if (g_openclActive) {
BLI_thread_queue_nowait(g_gpuqueue);
BLI_threadpool_end(&g_gputhreads);
BLI_thread_queue_free(g_gpuqueue);
g_gpuqueue = nullptr;
if (g_work_scheduler.opencl_active) {
BLI_thread_queue_nowait(g_work_scheduler.gpu_queue);
BLI_threadpool_end(&g_work_scheduler.gpu_threads);
BLI_thread_queue_free(g_work_scheduler.gpu_queue);
g_work_scheduler.gpu_queue = nullptr;
}
# endif
#endif
}
bool WorkScheduler::hasGPUDevices()
bool WorkScheduler::has_gpu_devices()
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
# ifdef COM_OPENCL_ENABLED
return !g_gpudevices.empty();
return !g_work_scheduler.gpu_devices.empty();
# else
return 0;
# endif
@ -205,37 +211,37 @@ void WorkScheduler::initialize(bool use_opencl, int num_cpu_threads)
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
/* deinitialize if number of threads doesn't match */
if (g_cpudevices.size() != num_cpu_threads) {
if (g_work_scheduler.cpu_devices.size() != num_cpu_threads) {
Device *device;
while (!g_cpudevices.empty()) {
device = g_cpudevices.back();
g_cpudevices.pop_back();
while (!g_work_scheduler.cpu_devices.empty()) {
device = g_work_scheduler.cpu_devices.back();
g_work_scheduler.cpu_devices.pop_back();
device->deinitialize();
delete device;
}
if (g_cpuInitialized) {
if (g_work_scheduler.cpu_initialized) {
BLI_thread_local_delete(g_thread_device);
}
g_cpuInitialized = false;
g_work_scheduler.cpu_initialized = false;
}
/* initialize CPU threads */
if (!g_cpuInitialized) {
if (!g_work_scheduler.cpu_initialized) {
for (int index = 0; index < num_cpu_threads; index++) {
CPUDevice *device = new CPUDevice(index);
device->initialize();
g_cpudevices.push_back(device);
g_work_scheduler.cpu_devices.push_back(device);
}
BLI_thread_local_create(g_thread_device);
g_cpuInitialized = true;
g_work_scheduler.cpu_initialized = true;
}
# ifdef COM_OPENCL_ENABLED
/* deinitialize OpenCL GPU's */
if (use_opencl && !g_openclInitialized) {
g_context = nullptr;
g_program = nullptr;
if (use_opencl && !g_work_scheduler.opencl_initialized) {
g_work_scheduler.opencl_context = nullptr;
g_work_scheduler.opencl_program = nullptr;
/* This will check for errors and skip if already initialized. */
if (clewInit() != CLEW_SUCCESS) {
@ -270,26 +276,40 @@ void WorkScheduler::initialize(bool use_opencl, int num_cpu_threads)
sizeof(cl_device_id) * numberOfDevices, __func__);
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numberOfDevices, cldevices, nullptr);
g_context = clCreateContext(
g_work_scheduler.opencl_context = clCreateContext(
nullptr, numberOfDevices, cldevices, clContextError, nullptr, &error);
if (error != CL_SUCCESS) {
printf("CLERROR[%d]: %s\n", error, clewErrorString(error));
}
const char *cl_str[2] = {datatoc_COM_OpenCLKernels_cl, nullptr};
g_program = clCreateProgramWithSource(g_context, 1, cl_str, nullptr, &error);
error = clBuildProgram(g_program, numberOfDevices, cldevices, nullptr, nullptr, nullptr);
g_work_scheduler.opencl_program = clCreateProgramWithSource(
g_work_scheduler.opencl_context, 1, cl_str, nullptr, &error);
error = clBuildProgram(g_work_scheduler.opencl_program,
numberOfDevices,
cldevices,
nullptr,
nullptr,
nullptr);
if (error != CL_SUCCESS) {
cl_int error2;
size_t ret_val_size = 0;
printf("CLERROR[%d]: %s\n", error, clewErrorString(error));
error2 = clGetProgramBuildInfo(
g_program, cldevices[0], CL_PROGRAM_BUILD_LOG, 0, nullptr, &ret_val_size);
error2 = clGetProgramBuildInfo(g_work_scheduler.opencl_program,
cldevices[0],
CL_PROGRAM_BUILD_LOG,
0,
nullptr,
&ret_val_size);
if (error2 != CL_SUCCESS) {
printf("CLERROR[%d]: %s\n", error, clewErrorString(error));
}
char *build_log = (char *)MEM_mallocN(sizeof(char) * ret_val_size + 1, __func__);
error2 = clGetProgramBuildInfo(
g_program, cldevices[0], CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, nullptr);
error2 = clGetProgramBuildInfo(g_work_scheduler.opencl_program,
cldevices[0],
CL_PROGRAM_BUILD_LOG,
ret_val_size,
build_log,
nullptr);
if (error2 != CL_SUCCESS) {
printf("CLERROR[%d]: %s\n", error, clewErrorString(error));
}
@ -307,9 +327,12 @@ void WorkScheduler::initialize(bool use_opencl, int num_cpu_threads)
if (error2 != CL_SUCCESS) {
printf("CLERROR[%d]: %s\n", error2, clewErrorString(error2));
}
OpenCLDevice *clDevice = new OpenCLDevice(g_context, device, g_program, vendorID);
OpenCLDevice *clDevice = new OpenCLDevice(g_work_scheduler.opencl_context,
device,
g_work_scheduler.opencl_program,
vendorID);
clDevice->initialize();
g_gpudevices.push_back(clDevice);
g_work_scheduler.gpu_devices.push_back(clDevice);
}
}
MEM_freeN(cldevices);
@ -317,7 +340,7 @@ void WorkScheduler::initialize(bool use_opencl, int num_cpu_threads)
MEM_freeN(platforms);
}
g_openclInitialized = true;
g_work_scheduler.opencl_initialized = true;
}
# endif
#endif
@ -327,38 +350,38 @@ void WorkScheduler::deinitialize()
{
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
/* deinitialize CPU threads */
if (g_cpuInitialized) {
if (g_work_scheduler.cpu_initialized) {
Device *device;
while (!g_cpudevices.empty()) {
device = g_cpudevices.back();
g_cpudevices.pop_back();
while (!g_work_scheduler.cpu_devices.empty()) {
device = g_work_scheduler.cpu_devices.back();
g_work_scheduler.cpu_devices.pop_back();
device->deinitialize();
delete device;
}
BLI_thread_local_delete(g_thread_device);
g_cpuInitialized = false;
g_work_scheduler.cpu_initialized = false;
}
# ifdef COM_OPENCL_ENABLED
/* deinitialize OpenCL GPU's */
if (g_openclInitialized) {
if (g_work_scheduler.opencl_initialized) {
Device *device;
while (!g_gpudevices.empty()) {
device = g_gpudevices.back();
g_gpudevices.pop_back();
while (!g_work_scheduler.gpu_devices.empty()) {
device = g_work_scheduler.gpu_devices.back();
g_work_scheduler.gpu_devices.pop_back();
device->deinitialize();
delete device;
}
if (g_program) {
clReleaseProgram(g_program);
g_program = nullptr;
if (g_work_scheduler.opencl_program) {
clReleaseProgram(g_work_scheduler.opencl_program);
g_work_scheduler.opencl_program = nullptr;
}
if (g_context) {
clReleaseContext(g_context);
g_context = nullptr;
if (g_work_scheduler.opencl_context) {
clReleaseContext(g_work_scheduler.opencl_context);
g_work_scheduler.opencl_context = nullptr;
}
g_openclInitialized = false;
g_work_scheduler.opencl_initialized = false;
}
# endif
#endif

View File

@ -29,14 +29,9 @@
/** \brief the workscheduler
* \ingroup execution
*/
class WorkScheduler {
struct WorkScheduler {
#if COM_CURRENT_THREADING_MODEL == COM_TM_QUEUE
/**
* \brief are we being stopped.
*/
static bool isStopping();
/**
* \brief main thread loop for cpudevices
* inside this loop new work is queried and being executed
@ -107,7 +102,7 @@ class WorkScheduler {
* A node can generate a different operation tree when OpenCLDevices exists.
* \see CompositorContext.getHasActiveOpenCLDevices
*/
static bool hasGPUDevices();
static bool has_gpu_devices();
static int current_thread_id();

View File

@ -403,7 +403,7 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
if (lcache->grid_tx.tex == NULL) {
lcache->grid_tx.tex = GPU_texture_create_2d_array(
"lightcache_irradiance", UNPACK3(lcache->grid_tx.tex_size), 1, IRRADIANCE_FORMAT, NULL);
GPU_texture_update(lcache->grid_tx.tex, GPU_DATA_UNSIGNED_BYTE, lcache->grid_tx.data);
GPU_texture_update(lcache->grid_tx.tex, GPU_DATA_UBYTE, lcache->grid_tx.data);
if (lcache->grid_tx.tex == NULL) {
lcache->flag |= LIGHTCACHE_NOT_USABLE;
@ -470,7 +470,7 @@ bool EEVEE_lightcache_load(LightCache *lcache)
static void eevee_lightbake_readback_irradiance(LightCache *lcache)
{
MEM_SAFE_FREE(lcache->grid_tx.data);
lcache->grid_tx.data = GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_UNSIGNED_BYTE, 0);
lcache->grid_tx.data = GPU_texture_read(lcache->grid_tx.tex, GPU_DATA_UBYTE, 0);
lcache->grid_tx.data_type = LIGHTCACHETEX_BYTE;
lcache->grid_tx.components = 4;
}

View File

@ -58,11 +58,11 @@ void GPENCIL_antialiasing_init(struct GPENCIL_Data *vedata)
if (txl->smaa_search_tx == NULL) {
txl->smaa_search_tx = GPU_texture_create_2d(
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, NULL);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UNSIGNED_BYTE, searchTexBytes);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UBYTE, searchTexBytes);
txl->smaa_area_tx = GPU_texture_create_2d(
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, NULL);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UNSIGNED_BYTE, areaTexBytes);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UBYTE, areaTexBytes);
GPU_texture_filter_mode(txl->smaa_search_tx, true);
GPU_texture_filter_mode(txl->smaa_area_tx, true);

View File

@ -244,11 +244,11 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
if (txl->smaa_search_tx == NULL) {
txl->smaa_search_tx = GPU_texture_create_2d(
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, NULL);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UNSIGNED_BYTE, searchTexBytes);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UBYTE, searchTexBytes);
txl->smaa_area_tx = GPU_texture_create_2d(
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, NULL);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UNSIGNED_BYTE, areaTexBytes);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UBYTE, areaTexBytes);
GPU_texture_filter_mode(txl->smaa_search_tx, true);
GPU_texture_filter_mode(txl->smaa_area_tx, true);

View File

@ -91,7 +91,7 @@ uint *DRW_select_buffer_read(struct Depsgraph *depsgraph,
BLI_rcti_size_y(&rect_clamp),
1,
0,
GPU_DATA_UNSIGNED_INT,
GPU_DATA_UINT,
r_buf);
if (!BLI_rcti_compare(rect, &rect_clamp)) {

View File

@ -723,7 +723,7 @@ static bool gpencil_render_offscreen(tGPDfill *tgpf)
GPU_offscreen_read_pixels(offscreen, GPU_DATA_FLOAT, ibuf->rect_float);
}
else if (ibuf->rect) {
GPU_offscreen_read_pixels(offscreen, GPU_DATA_UNSIGNED_BYTE, ibuf->rect);
GPU_offscreen_read_pixels(offscreen, GPU_DATA_UBYTE, ibuf->rect);
}
if (ibuf->rect_float && ibuf->rect) {
IMB_rect_from_float(ibuf);

View File

@ -889,15 +889,15 @@ void UI_icons_reload_internal_textures(void)
icongltex.invh = 1.0f / b32buf->y;
icongltex.tex[0] = GPU_texture_create_2d("icons", b32buf->x, b32buf->y, 2, GPU_RGBA8, NULL);
GPU_texture_update_mipmap(icongltex.tex[0], 0, GPU_DATA_UNSIGNED_BYTE, b32buf->rect);
GPU_texture_update_mipmap(icongltex.tex[0], 1, GPU_DATA_UNSIGNED_BYTE, b16buf->rect);
GPU_texture_update_mipmap(icongltex.tex[0], 0, GPU_DATA_UBYTE, b32buf->rect);
GPU_texture_update_mipmap(icongltex.tex[0], 1, GPU_DATA_UBYTE, b16buf->rect);
}
if (need_icons_with_border && icongltex.tex[1] == NULL) {
icongltex.tex[1] = GPU_texture_create_2d(
"icons_border", b32buf_border->x, b32buf_border->y, 2, GPU_RGBA8, NULL);
GPU_texture_update_mipmap(icongltex.tex[1], 0, GPU_DATA_UNSIGNED_BYTE, b32buf_border->rect);
GPU_texture_update_mipmap(icongltex.tex[1], 1, GPU_DATA_UNSIGNED_BYTE, b16buf_border->rect);
GPU_texture_update_mipmap(icongltex.tex[1], 0, GPU_DATA_UBYTE, b32buf_border->rect);
GPU_texture_update_mipmap(icongltex.tex[1], 1, GPU_DATA_UBYTE, b16buf_border->rect);
}
}

View File

@ -351,7 +351,7 @@ static void screen_opengl_render_doit(const bContext *C, OGLRender *oglrender, R
G.f &= ~G_FLAG_RENDER_VIEWPORT;
gp_rect = MEM_mallocN(sizeof(uchar[4]) * sizex * sizey, "offscreen rect");
GPU_offscreen_read_pixels(oglrender->ofs, GPU_DATA_UNSIGNED_BYTE, gp_rect);
GPU_offscreen_read_pixels(oglrender->ofs, GPU_DATA_UBYTE, gp_rect);
for (i = 0; i < sizex * sizey * 4; i += 4) {
blend_color_mix_byte(&render_rect[i], &render_rect[i], &gp_rect[i]);

View File

@ -131,7 +131,7 @@ void immDrawPixelsTexScaled_clipping(IMMDrawPixelsTexState *state,
}
const bool use_float_data = ELEM(gpu_format, GPU_RGBA16F, GPU_RGB16F, GPU_R16F);
eGPUDataFormat gpu_data = (use_float_data) ? GPU_DATA_FLOAT : GPU_DATA_UNSIGNED_BYTE;
eGPUDataFormat gpu_data = (use_float_data) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
size_t stride = components * ((use_float_data) ? sizeof(float) : sizeof(uchar));
GPUTexture *tex = GPU_texture_create_2d("immDrawPixels", tex_w, tex_h, 1, gpu_format, NULL);

View File

@ -610,7 +610,7 @@ void ED_screen_preview_render(const bScreen *screen, int size_x, int size_y, uin
screen_preview_draw(screen, size_x, size_y);
GPU_offscreen_read_pixels(offscreen, GPU_DATA_UNSIGNED_BYTE, r_rect);
GPU_offscreen_read_pixels(offscreen, GPU_DATA_UBYTE, r_rect);
GPU_offscreen_unbind(offscreen, true);
GPU_offscreen_free(offscreen);

View File

@ -350,7 +350,7 @@ static int load_tex(Brush *br, ViewContext *vc, float zoom, bool col, bool prima
eGPUTextureFormat format = col ? GPU_RGBA8 : GPU_R8;
target->overlay_texture = GPU_texture_create_2d(
"paint_cursor_overlay", size, size, 1, format, NULL);
GPU_texture_update(target->overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
GPU_texture_update(target->overlay_texture, GPU_DATA_UBYTE, buffer);
if (!col) {
GPU_texture_swizzle_set(target->overlay_texture, "rrrr");
@ -358,7 +358,7 @@ static int load_tex(Brush *br, ViewContext *vc, float zoom, bool col, bool prima
}
if (init) {
GPU_texture_update(target->overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
GPU_texture_update(target->overlay_texture, GPU_DATA_UBYTE, buffer);
}
if (buffer) {
@ -469,13 +469,13 @@ static int load_tex_cursor(Brush *br, ViewContext *vc, float zoom)
if (!cursor_snap.overlay_texture) {
cursor_snap.overlay_texture = GPU_texture_create_2d(
"cursor_snap_overaly", size, size, 1, GPU_R8, NULL);
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UBYTE, buffer);
GPU_texture_swizzle_set(cursor_snap.overlay_texture, "rrrr");
}
if (init) {
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UBYTE, buffer);
}
if (buffer) {

View File

@ -1207,7 +1207,7 @@ static void draw_plane_marker_image(Scene *scene,
GPUTexture *texture = GPU_texture_create_2d(
"plane_marker_image", ibuf->x, ibuf->y, 1, GPU_RGBA8, NULL);
GPU_texture_update(texture, GPU_DATA_UNSIGNED_BYTE, display_buffer);
GPU_texture_update(texture, GPU_DATA_UBYTE, display_buffer);
GPU_texture_filter_mode(texture, false);
GPU_matrix_push();

View File

@ -1533,7 +1533,7 @@ static void *sequencer_OCIO_transform_ibuf(const bContext *C,
/* Default */
*r_format = GPU_RGBA8;
*r_data = GPU_DATA_UNSIGNED_BYTE;
*r_data = GPU_DATA_UBYTE;
/* Fallback to CPU based color space conversion. */
if (force_fallback) {
@ -1580,7 +1580,7 @@ static void *sequencer_OCIO_transform_ibuf(const bContext *C,
if ((ibuf->rect || ibuf->rect_float) && !*r_glsl_used) {
display_buffer = IMB_display_buffer_acquire_ctx(C, ibuf, &cache_handle);
*r_format = GPU_RGBA8;
*r_data = GPU_DATA_UNSIGNED_BYTE;
*r_data = GPU_DATA_UBYTE;
}
if (cache_handle) {
IMB_display_buffer_release(cache_handle);
@ -1682,7 +1682,7 @@ static void sequencer_draw_display_buffer(const bContext *C,
display_buffer = (uchar *)ibuf->rect;
format = GPU_RGBA8;
data = GPU_DATA_UNSIGNED_BYTE;
data = GPU_DATA_UBYTE;
}
else {
display_buffer = sequencer_OCIO_transform_ibuf(C, ibuf, &glsl_used, &format, &data);

View File

@ -2002,7 +2002,7 @@ ImBuf *ED_view3d_draw_offscreen_imbuf(Depsgraph *depsgraph,
GPU_offscreen_read_pixels(ofs, GPU_DATA_FLOAT, ibuf->rect_float);
}
else if (ibuf->rect) {
GPU_offscreen_read_pixels(ofs, GPU_DATA_UNSIGNED_BYTE, ibuf->rect);
GPU_offscreen_read_pixels(ofs, GPU_DATA_UBYTE, ibuf->rect);
}
/* unbind */

View File

@ -209,6 +209,10 @@ void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *fb,
void (*callback)(void *userData, int level),
void *userData);
void GPU_framebuffer_push(GPUFrameBuffer *fb);
GPUFrameBuffer *GPU_framebuffer_pop(void);
uint GPU_framebuffer_stack_level_get(void);
/* GPU OffScreen
* - wrapper around frame-buffer and texture for simple off-screen drawing
*/

View File

@ -171,9 +171,9 @@ typedef enum eGPUTextureFormat {
typedef enum eGPUDataFormat {
GPU_DATA_FLOAT,
GPU_DATA_INT,
GPU_DATA_UNSIGNED_INT,
GPU_DATA_UNSIGNED_BYTE,
GPU_DATA_UNSIGNED_INT_24_8,
GPU_DATA_UINT,
GPU_DATA_UBYTE,
GPU_DATA_UINT_24_8,
GPU_DATA_10_11_11_REV,
} eGPUDataFormat;
@ -272,6 +272,10 @@ int GPU_texture_opengl_bindcode(const GPUTexture *tex);
void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *size);
/* utilities */
size_t GPU_texture_component_len(eGPUTextureFormat format);
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format);
#ifdef __cplusplus
}
#endif

View File

@ -476,10 +476,9 @@ void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *gpu_fb,
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen
/** \name Framebuffer Stack
*
* Container that holds a frame-buffer and its textures.
* Might be bound to multiple contexts.
* Keeps track of framebuffer binding operation to restore previously bound frambuffers.
* \{ */
#define FRAMEBUFFER_STACK_DEPTH 16
@ -489,22 +488,36 @@ static struct {
uint top;
} FrameBufferStack = {{nullptr}};
static void gpuPushFrameBuffer(GPUFrameBuffer *fb)
void GPU_framebuffer_push(GPUFrameBuffer *fb)
{
BLI_assert(FrameBufferStack.top < FRAMEBUFFER_STACK_DEPTH);
FrameBufferStack.framebuffers[FrameBufferStack.top] = fb;
FrameBufferStack.top++;
}
static GPUFrameBuffer *gpuPopFrameBuffer()
GPUFrameBuffer *GPU_framebuffer_pop(void)
{
BLI_assert(FrameBufferStack.top > 0);
FrameBufferStack.top--;
return FrameBufferStack.framebuffers[FrameBufferStack.top];
}
uint GPU_framebuffer_stack_level_get(void)
{
return FrameBufferStack.top;
}
#undef FRAMEBUFFER_STACK_DEPTH
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen
*
* Container that holds a frame-buffer and its textures.
* Might be bound to multiple contexts.
* \{ */
#define MAX_CTX_FB_LEN 3
struct GPUOffScreen {
@ -614,7 +627,7 @@ void GPU_offscreen_bind(GPUOffScreen *ofs, bool save)
{
if (save) {
GPUFrameBuffer *fb = GPU_framebuffer_active_get();
gpuPushFrameBuffer(fb);
GPU_framebuffer_push(fb);
}
unwrap(gpu_offscreen_fb_get(ofs))->bind(false);
}
@ -623,7 +636,7 @@ void GPU_offscreen_unbind(GPUOffScreen *UNUSED(ofs), bool restore)
{
GPUFrameBuffer *fb = nullptr;
if (restore) {
fb = gpuPopFrameBuffer();
fb = GPU_framebuffer_pop();
}
if (fb) {
@ -643,7 +656,7 @@ void GPU_offscreen_draw_to_screen(GPUOffScreen *ofs, int x, int y)
void GPU_offscreen_read_pixels(GPUOffScreen *ofs, eGPUDataFormat format, void *pixels)
{
BLI_assert(ELEM(format, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT));
BLI_assert(ELEM(format, GPU_DATA_UBYTE, GPU_DATA_FLOAT));
const int w = GPU_texture_width(ofs->color);
const int h = GPU_texture_height(ofs->color);

View File

@ -483,7 +483,7 @@ bool gpu_select_pick_load_id(uint id, bool end)
const uint rect_len = ps->src.rect_len;
GPUFrameBuffer *fb = GPU_framebuffer_active_get();
GPU_framebuffer_read_depth(
fb, UNPACK4(ps->gl.clip_readpixels), GPU_DATA_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
fb, UNPACK4(ps->gl.clip_readpixels), GPU_DATA_UINT, ps->gl.rect_depth_test->buf);
/* perform initial check since most cases the array remains unchanged */
bool do_pass = false;

View File

@ -608,3 +608,20 @@ void GPU_samplers_update(void)
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPU texture utilities
*
* \{ */
size_t GPU_texture_component_len(eGPUTextureFormat tex_format)
{
return to_component_len(tex_format);
}
size_t GPU_texture_dataformat_size(eGPUDataFormat data_format)
{
return to_bytesize(data_format);
}
/** \} */

View File

@ -384,16 +384,16 @@ inline int to_component_len(eGPUTextureFormat format)
}
}
inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
inline size_t to_bytesize(eGPUDataFormat data_format)
{
switch (data_format) {
case GPU_DATA_UNSIGNED_BYTE:
return 1 * to_component_len(tex_format);
case GPU_DATA_UBYTE:
return 1;
case GPU_DATA_FLOAT:
case GPU_DATA_INT:
case GPU_DATA_UNSIGNED_INT:
return 4 * to_component_len(tex_format);
case GPU_DATA_UNSIGNED_INT_24_8:
case GPU_DATA_UINT:
return 4;
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return 4;
default:
@ -402,6 +402,11 @@ inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_form
}
}
inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
{
return to_component_len(tex_format) * to_bytesize(data_format);
}
/* Definitely not complete, edit according to the gl specification. */
inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
{
@ -412,12 +417,12 @@ inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat da
return data_format == GPU_DATA_FLOAT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return data_format == GPU_DATA_UNSIGNED_INT_24_8;
return data_format == GPU_DATA_UINT_24_8;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
return data_format == GPU_DATA_UNSIGNED_INT;
return data_format == GPU_DATA_UINT;
case GPU_RG16I:
case GPU_R16I:
return data_format == GPU_DATA_INT;
@ -426,7 +431,7 @@ inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat da
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
return ELEM(data_format, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT);
return ELEM(data_format, GPU_DATA_UBYTE, GPU_DATA_FLOAT);
case GPU_R11F_G11F_B10F:
return ELEM(data_format, GPU_DATA_10_11_11_REV, GPU_DATA_FLOAT);
default:
@ -444,12 +449,12 @@ inline eGPUDataFormat to_data_format(eGPUTextureFormat tex_format)
return GPU_DATA_FLOAT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GPU_DATA_UNSIGNED_INT_24_8;
return GPU_DATA_UINT_24_8;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
return GPU_DATA_UNSIGNED_INT;
return GPU_DATA_UINT;
case GPU_RG16I:
case GPU_R16I:
return GPU_DATA_INT;
@ -458,7 +463,7 @@ inline eGPUDataFormat to_data_format(eGPUTextureFormat tex_format)
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
return GPU_DATA_UNSIGNED_BYTE;
return GPU_DATA_UBYTE;
case GPU_R11F_G11F_B10F:
return GPU_DATA_10_11_11_REV;
default:

View File

@ -365,7 +365,7 @@ void GLFrameBuffer::clear_attachment(GPUAttachmentType type,
context_->state_manager->apply_state();
if (type == GPU_FB_DEPTH_STENCIL_ATTACHMENT) {
BLI_assert(data_format == GPU_DATA_UNSIGNED_INT_24_8);
BLI_assert(data_format == GPU_DATA_UINT_24_8);
float depth = ((*(uint32_t *)clear_value) & 0x00FFFFFFu) / (float)0x00FFFFFFu;
int stencil = ((*(uint32_t *)clear_value) >> 24);
glClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
@ -374,7 +374,7 @@ void GLFrameBuffer::clear_attachment(GPUAttachmentType type,
if (data_format == GPU_DATA_FLOAT) {
glClearBufferfv(GL_DEPTH, 0, (GLfloat *)clear_value);
}
else if (data_format == GPU_DATA_UNSIGNED_INT) {
else if (data_format == GPU_DATA_UINT) {
float depth = *(uint32_t *)clear_value / (float)0xFFFFFFFFu;
glClearBufferfv(GL_DEPTH, 0, &depth);
}
@ -388,7 +388,7 @@ void GLFrameBuffer::clear_attachment(GPUAttachmentType type,
case GPU_DATA_FLOAT:
glClearBufferfv(GL_COLOR, slot, (GLfloat *)clear_value);
break;
case GPU_DATA_UNSIGNED_INT:
case GPU_DATA_UINT:
glClearBufferuiv(GL_COLOR, slot, (GLuint *)clear_value);
break;
case GPU_DATA_INT:

View File

@ -274,11 +274,11 @@ inline GLenum to_gl(eGPUDataFormat format)
return GL_FLOAT;
case GPU_DATA_INT:
return GL_INT;
case GPU_DATA_UNSIGNED_INT:
case GPU_DATA_UINT:
return GL_UNSIGNED_INT;
case GPU_DATA_UNSIGNED_BYTE:
case GPU_DATA_UBYTE:
return GL_UNSIGNED_BYTE;
case GPU_DATA_UNSIGNED_INT_24_8:
case GPU_DATA_UINT_24_8:
return GL_UNSIGNED_INT_24_8;
case GPU_DATA_10_11_11_REV:
return GL_UNSIGNED_INT_10F_11F_11F_REV;

View File

@ -49,7 +49,7 @@ static void imb_gpu_get_format(const ImBuf *ibuf,
!IMB_colormanagement_space_is_scene_linear(ibuf->rect_colorspace));
high_bitdepth = (!(ibuf->flags & IB_halffloat) && high_bitdepth);
*r_data_format = (float_rect) ? GPU_DATA_FLOAT : GPU_DATA_UNSIGNED_BYTE;
*r_data_format = (float_rect) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
if (float_rect) {
*r_texture_format = high_bitdepth ? GPU_RGBA32F : GPU_RGBA16F;

View File

@ -282,6 +282,17 @@ int PyC_ParseStringEnum(PyObject *o, void *p)
return 0;
}
const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items,
const int value)
{
for (int i = 0; items[i].id; i++) {
if (items[i].value == value) {
return items[i].id;
}
}
return NULL;
}
/* silly function, we dont use arg. just check its compatible with __deepcopy__ */
int PyC_CheckArgs_DeepCopy(PyObject *args)
{

View File

@ -140,6 +140,8 @@ struct PyC_StringEnum {
};
int PyC_ParseStringEnum(PyObject *o, void *p);
const char *PyC_StringEnum_FindIDFromValue(const struct PyC_StringEnumItems *items,
const int value);
int PyC_CheckArgs_DeepCopy(PyObject *args);

View File

@ -33,25 +33,37 @@ set(INC_SYS
)
set(SRC
gpu_py.c
gpu_py_api.c
gpu_py_batch.c
gpu_py_buffer.c
gpu_py_element.c
gpu_py_framebuffer.c
gpu_py_matrix.c
gpu_py_offscreen.c
gpu_py_select.c
gpu_py_shader.c
gpu_py_state.c
gpu_py_texture.c
gpu_py_types.c
gpu_py_uniformbuffer.c
gpu_py_vertex_buffer.c
gpu_py_vertex_format.c
gpu_py.h
gpu_py_api.h
gpu_py_batch.h
gpu_py_buffer.h
gpu_py_element.h
gpu_py_framebuffer.h
gpu_py_matrix.h
gpu_py_offscreen.h
gpu_py_select.h
gpu_py_shader.h
gpu_py_state.h
gpu_py_texture.h
gpu_py_types.h
gpu_py_uniformbuffer.h
gpu_py_vertex_buffer.h
gpu_py_vertex_format.h
)

View File

@ -0,0 +1,45 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPU Module
* \{ */
struct PyC_StringEnumItems bpygpu_dataformat_items[] = {
{GPU_DATA_FLOAT, "FLOAT"},
{GPU_DATA_INT, "INT"},
{GPU_DATA_UINT, "UINT"},
{GPU_DATA_UBYTE, "UBYTE"},
{GPU_DATA_UINT_24_8, "UINT_24_8"},
{GPU_DATA_10_11_11_REV, "10_11_11_REV"},
{0, NULL},
};
/** \} */

View File

@ -0,0 +1,23 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
extern struct PyC_StringEnumItems bpygpu_dataformat_items[];

View File

@ -35,6 +35,7 @@
#include "gpu_py_matrix.h"
#include "gpu_py_select.h"
#include "gpu_py_state.h"
#include "gpu_py_types.h"
#include "gpu_py_api.h" /* own include */
@ -134,6 +135,9 @@ PyObject *BPyInit_gpu(void)
PyModule_AddObject(mod, "shader", (submodule = bpygpu_shader_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
PyModule_AddObject(mod, "state", (submodule = bpygpu_state_init()));
PyDict_SetItem(sys_modules, PyModule_GetNameObject(submodule), submodule);
return mod;
}

View File

@ -20,6 +20,10 @@
#pragma once
/* Each type object could have a method for free GPU resources.
* However, it is currently of little use. */
// #define BPYGPU_USE_GPUOBJ_FREE_METHOD
int bpygpu_ParsePrimType(PyObject *o, void *p);
PyObject *BPyInit_gpu(void);

View File

@ -0,0 +1,669 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the gpu.state API.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_utildefines.h"
#include "MEM_guardedalloc.h"
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_buffer.h"
// #define PYGPU_BUFFER_PROTOCOL
/* -------------------------------------------------------------------- */
/** \name Utility Functions
* \{ */
static bool pygpu_buffer_dimensions_compare(int ndim,
const Py_ssize_t *shape_a,
const Py_ssize_t *shape_b)
{
return (bool)memcmp(shape_a, shape_b, ndim * sizeof(Py_ssize_t));
}
static const char *pygpu_buffer_formatstr(eGPUDataFormat data_format)
{
switch (data_format) {
case GPU_DATA_FLOAT:
return "f";
case GPU_DATA_INT:
return "i";
case GPU_DATA_UINT:
return "I";
case GPU_DATA_UBYTE:
return "B";
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return "I";
default:
break;
}
return NULL;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name BPyGPUBuffer API
* \{ */
static BPyGPUBuffer *pygpu_buffer_make_from_data(PyObject *parent,
const eGPUDataFormat format,
const int shape_len,
const Py_ssize_t *shape,
void *buf)
{
BPyGPUBuffer *buffer = (BPyGPUBuffer *)_PyObject_GC_New(&BPyGPU_BufferType);
buffer->parent = NULL;
buffer->format = format;
buffer->shape_len = shape_len;
buffer->shape = MEM_mallocN(shape_len * sizeof(*buffer->shape), "BPyGPUBuffer shape");
memcpy(buffer->shape, shape, shape_len * sizeof(*buffer->shape));
buffer->buf.as_void = buf;
if (parent) {
Py_INCREF(parent);
buffer->parent = parent;
PyObject_GC_Track(buffer);
}
return buffer;
}
static PyObject *pygpu_buffer__sq_item(BPyGPUBuffer *self, int i)
{
if (i >= self->shape[0] || i < 0) {
PyErr_SetString(PyExc_IndexError, "array index out of range");
return NULL;
}
const char *formatstr = pygpu_buffer_formatstr(self->format);
if (self->shape_len == 1) {
switch (self->format) {
case GPU_DATA_FLOAT:
return Py_BuildValue(formatstr, self->buf.as_float[i]);
case GPU_DATA_INT:
return Py_BuildValue(formatstr, self->buf.as_int[i]);
case GPU_DATA_UBYTE:
return Py_BuildValue(formatstr, self->buf.as_byte[i]);
case GPU_DATA_UINT:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return Py_BuildValue(formatstr, self->buf.as_uint[i]);
}
}
else {
int offset = i * GPU_texture_dataformat_size(self->format);
for (int j = 1; j < self->shape_len; j++) {
offset *= self->shape[j];
}
return (PyObject *)pygpu_buffer_make_from_data((PyObject *)self,
self->format,
self->shape_len - 1,
self->shape + 1,
self->buf.as_byte + offset);
}
return NULL;
}
static PyObject *pygpu_buffer_to_list(BPyGPUBuffer *self)
{
int i, len = self->shape[0];
PyObject *list = PyList_New(len);
for (i = 0; i < len; i++) {
PyList_SET_ITEM(list, i, pygpu_buffer__sq_item(self, i));
}
return list;
}
static PyObject *pygpu_buffer_to_list_recursive(BPyGPUBuffer *self)
{
PyObject *list;
if (self->shape_len > 1) {
int i, len = self->shape[0];
list = PyList_New(len);
for (i = 0; i < len; i++) {
/* "BPyGPUBuffer *sub_tmp" is a temporary object created just to be read for nested lists.
* That is why it is decremented/freed soon after.
* TODO: For efficiency, avoid creating #BPyGPUBuffer when creating nested lists. */
BPyGPUBuffer *sub_tmp = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i);
PyList_SET_ITEM(list, i, pygpu_buffer_to_list_recursive(sub_tmp));
Py_DECREF(sub_tmp);
}
}
else {
list = pygpu_buffer_to_list(self);
}
return list;
}
static PyObject *pygpu_buffer_dimensions(BPyGPUBuffer *self, void *UNUSED(arg))
{
PyObject *list = PyList_New(self->shape_len);
int i;
for (i = 0; i < self->shape_len; i++) {
PyList_SET_ITEM(list, i, PyLong_FromLong(self->shape[i]));
}
return list;
}
static int pygpu_buffer__tp_traverse(BPyGPUBuffer *self, visitproc visit, void *arg)
{
Py_VISIT(self->parent);
return 0;
}
static int pygpu_buffer__tp_clear(BPyGPUBuffer *self)
{
Py_CLEAR(self->parent);
return 0;
}
static void pygpu_buffer__tp_dealloc(BPyGPUBuffer *self)
{
if (self->parent) {
PyObject_GC_UnTrack(self);
pygpu_buffer__tp_clear(self);
Py_XDECREF(self->parent);
}
else {
MEM_freeN(self->buf.as_void);
}
MEM_freeN(self->shape);
PyObject_GC_Del(self);
}
static PyObject *pygpu_buffer__tp_repr(BPyGPUBuffer *self)
{
PyObject *repr;
PyObject *list = pygpu_buffer_to_list_recursive(self);
const char *typestr = PyC_StringEnum_FindIDFromValue(bpygpu_dataformat_items, self->format);
repr = PyUnicode_FromFormat("Buffer(%s, %R)", typestr, list);
Py_DECREF(list);
return repr;
}
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v);
static int pygpu_buffer_ass_slice(BPyGPUBuffer *self,
Py_ssize_t begin,
Py_ssize_t end,
PyObject *seq)
{
PyObject *item;
int count, err = 0;
if (begin < 0) {
begin = 0;
}
if (end > self->shape[0]) {
end = self->shape[0];
}
if (begin > end) {
begin = end;
}
if (!PySequence_Check(seq)) {
PyErr_Format(PyExc_TypeError,
"buffer[:] = value, invalid assignment. "
"Expected a sequence, not an %.200s type",
Py_TYPE(seq)->tp_name);
return -1;
}
/* re-use count var */
if ((count = PySequence_Size(seq)) != (end - begin)) {
PyErr_Format(PyExc_TypeError,
"buffer[:] = value, size mismatch in assignment. "
"Expected: %d (given: %d)",
count,
end - begin);
return -1;
}
for (count = begin; count < end; count++) {
item = PySequence_GetItem(seq, count - begin);
if (item) {
err = pygpu_buffer__sq_ass_item(self, count, item);
Py_DECREF(item);
}
else {
err = -1;
}
if (err) {
break;
}
}
return err;
}
#define MAX_DIMENSIONS 64
static PyObject *pygpu_buffer__tp_new(PyTypeObject *UNUSED(type), PyObject *args, PyObject *kwds)
{
PyObject *length_ob, *init = NULL;
BPyGPUBuffer *buffer = NULL;
Py_ssize_t shape[MAX_DIMENSIONS];
Py_ssize_t i, shape_len = 0;
if (kwds && PyDict_Size(kwds)) {
PyErr_SetString(PyExc_TypeError, "Buffer(): takes no keyword args");
return NULL;
}
const struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items, GPU_DATA_FLOAT};
if (!PyArg_ParseTuple(
args, "O&O|O: Buffer", PyC_ParseStringEnum, &pygpu_dataformat, &length_ob, &init)) {
return NULL;
}
if (PyLong_Check(length_ob)) {
shape_len = 1;
if (((shape[0] = PyLong_AsLong(length_ob)) < 1)) {
PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
return NULL;
}
}
else if (PySequence_Check(length_ob)) {
shape_len = PySequence_Size(length_ob);
if (shape_len > MAX_DIMENSIONS) {
PyErr_SetString(PyExc_AttributeError,
"too many dimensions, max is " STRINGIFY(MAX_DIMENSIONS));
return NULL;
}
if (shape_len < 1) {
PyErr_SetString(PyExc_AttributeError, "sequence must have at least one dimension");
return NULL;
}
for (i = 0; i < shape_len; i++) {
PyObject *ob = PySequence_GetItem(length_ob, i);
if (!PyLong_Check(ob)) {
PyErr_Format(PyExc_TypeError,
"invalid dimension %i, expected an int, not a %.200s",
i,
Py_TYPE(ob)->tp_name);
Py_DECREF(ob);
return NULL;
}
else {
shape[i] = PyLong_AsLong(ob);
}
Py_DECREF(ob);
if (shape[i] < 1) {
PyErr_SetString(PyExc_AttributeError, "dimension must be greater than or equal to 1");
return NULL;
}
}
}
else {
PyErr_Format(PyExc_TypeError,
"invalid second argument argument expected a sequence "
"or an int, not a %.200s",
Py_TYPE(length_ob)->tp_name);
return NULL;
}
if (init && PyObject_CheckBuffer(init)) {
Py_buffer pybuffer;
if (PyObject_GetBuffer(init, &pybuffer, PyBUF_ND | PyBUF_FORMAT) == -1) {
/* PyObject_GetBuffer raise a PyExc_BufferError */
return NULL;
}
if (shape_len != pybuffer.ndim ||
!pygpu_buffer_dimensions_compare(shape_len, shape, pybuffer.shape)) {
PyErr_Format(PyExc_TypeError, "array size does not match");
}
else {
buffer = pygpu_buffer_make_from_data(
init, pygpu_dataformat.value_found, pybuffer.ndim, shape, pybuffer.buf);
}
PyBuffer_Release(&pybuffer);
}
else {
buffer = BPyGPU_Buffer_CreatePyObject(pygpu_dataformat.value_found, shape_len, shape, NULL);
if (init && pygpu_buffer_ass_slice(buffer, 0, shape[0], init)) {
Py_DECREF(buffer);
return NULL;
}
}
return (PyObject *)buffer;
}
/* BPyGPUBuffer sequence methods */
static int pygpu_buffer__sq_length(BPyGPUBuffer *self)
{
return self->shape[0];
}
static PyObject *pygpu_buffer_slice(BPyGPUBuffer *self, Py_ssize_t begin, Py_ssize_t end)
{
PyObject *list;
Py_ssize_t count;
if (begin < 0) {
begin = 0;
}
if (end > self->shape[0]) {
end = self->shape[0];
}
if (begin > end) {
begin = end;
}
list = PyList_New(end - begin);
for (count = begin; count < end; count++) {
PyList_SET_ITEM(list, count - begin, pygpu_buffer__sq_item(self, count));
}
return list;
}
static int pygpu_buffer__sq_ass_item(BPyGPUBuffer *self, int i, PyObject *v)
{
if (i >= self->shape[0] || i < 0) {
PyErr_SetString(PyExc_IndexError, "array assignment index out of range");
return -1;
}
if (self->shape_len != 1) {
BPyGPUBuffer *row = (BPyGPUBuffer *)pygpu_buffer__sq_item(self, i);
if (row) {
const int ret = pygpu_buffer_ass_slice(row, 0, self->shape[1], v);
Py_DECREF(row);
return ret;
}
return -1;
}
switch (self->format) {
case GPU_DATA_FLOAT:
return PyArg_Parse(v, "f:Expected floats", &self->buf.as_float[i]) ? 0 : -1;
case GPU_DATA_INT:
return PyArg_Parse(v, "i:Expected ints", &self->buf.as_int[i]) ? 0 : -1;
case GPU_DATA_UBYTE:
return PyArg_Parse(v, "b:Expected ints", &self->buf.as_byte[i]) ? 0 : -1;
case GPU_DATA_UINT:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
return PyArg_Parse(v, "b:Expected ints", &self->buf.as_uint[i]) ? 0 : -1;
default:
return 0; /* should never happen */
}
}
static PyObject *pygpu_buffer__mp_subscript(BPyGPUBuffer *self, PyObject *item)
{
if (PyIndex_Check(item)) {
Py_ssize_t i;
i = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (i == -1 && PyErr_Occurred()) {
return NULL;
}
if (i < 0) {
i += self->shape[0];
}
return pygpu_buffer__sq_item(self, i);
}
if (PySlice_Check(item)) {
Py_ssize_t start, stop, step, slicelength;
if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
return NULL;
}
if (slicelength <= 0) {
return PyTuple_New(0);
}
if (step == 1) {
return pygpu_buffer_slice(self, start, stop);
}
PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
return NULL;
}
PyErr_Format(
PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
return NULL;
}
static int pygpu_buffer__mp_ass_subscript(BPyGPUBuffer *self, PyObject *item, PyObject *value)
{
if (PyIndex_Check(item)) {
Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
if (i == -1 && PyErr_Occurred()) {
return -1;
}
if (i < 0) {
i += self->shape[0];
}
return pygpu_buffer__sq_ass_item(self, i, value);
}
if (PySlice_Check(item)) {
Py_ssize_t start, stop, step, slicelength;
if (PySlice_GetIndicesEx(item, self->shape[0], &start, &stop, &step, &slicelength) < 0) {
return -1;
}
if (step == 1) {
return pygpu_buffer_ass_slice(self, start, stop, value);
}
PyErr_SetString(PyExc_IndexError, "slice steps not supported with vectors");
return -1;
}
PyErr_Format(
PyExc_TypeError, "buffer indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
return -1;
}
static PyMethodDef pygpu_buffer__tp_methods[] = {
{"to_list",
(PyCFunction)pygpu_buffer_to_list_recursive,
METH_NOARGS,
"return the buffer as a list"},
{NULL, NULL, 0, NULL},
};
static PyGetSetDef pygpu_buffer_getseters[] = {
{"dimensions", (getter)pygpu_buffer_dimensions, NULL, NULL, NULL},
{NULL, NULL, NULL, NULL, NULL},
};
static PySequenceMethods pygpu_buffer__tp_as_sequence = {
(lenfunc)pygpu_buffer__sq_length, /*sq_length */
(binaryfunc)NULL, /*sq_concat */
(ssizeargfunc)NULL, /*sq_repeat */
(ssizeargfunc)pygpu_buffer__sq_item, /*sq_item */
(ssizessizeargfunc)NULL, /*sq_slice, deprecated, handled in pygpu_buffer__sq_item */
(ssizeobjargproc)pygpu_buffer__sq_ass_item, /*sq_ass_item */
(ssizessizeobjargproc)NULL, /* sq_ass_slice, deprecated handled in pygpu_buffer__sq_ass_item */
(objobjproc)NULL, /* sq_contains */
(binaryfunc)NULL, /* sq_inplace_concat */
(ssizeargfunc)NULL, /* sq_inplace_repeat */
};
static PyMappingMethods pygpu_buffer__tp_as_mapping = {
(lenfunc)pygpu_buffer__sq_length,
(binaryfunc)pygpu_buffer__mp_subscript,
(objobjargproc)pygpu_buffer__mp_ass_subscript,
};
#ifdef PYGPU_BUFFER_PROTOCOL
static void pygpu_buffer_strides_calc(const eGPUDataFormat format,
const int shape_len,
const Py_ssize_t *shape,
Py_ssize_t *r_strides)
{
r_strides[0] = GPU_texture_dataformat_size(format);
for (int i = 1; i < shape_len; i++) {
r_strides[i] = r_strides[i - 1] * shape[i - 1];
}
}
/* Here is the buffer interface function */
static int pygpu_buffer__bf_getbuffer(BPyGPUBuffer *self, Py_buffer *view, int flags)
{
if (view == NULL) {
PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
return -1;
}
view->obj = (PyObject *)self;
view->buf = (void *)self->buf.as_void;
view->len = bpygpu_Buffer_size(self);
view->readonly = 0;
view->itemsize = GPU_texture_dataformat_size(self->format);
view->format = pygpu_buffer_formatstr(self->format);
view->ndim = self->shape_len;
view->shape = self->shape;
view->strides = MEM_mallocN(view->ndim * sizeof(*view->strides), "BPyGPUBuffer strides");
pygpu_buffer_strides_calc(self->format, view->ndim, view->shape, view->strides);
view->suboffsets = NULL;
view->internal = NULL;
Py_INCREF(self);
return 0;
}
static void pygpu_buffer__bf_releasebuffer(PyObject *UNUSED(exporter), Py_buffer *view)
{
MEM_SAFE_FREE(view->strides);
}
static PyBufferProcs pygpu_buffer__tp_as_buffer = {
(getbufferproc)pygpu_buffer__bf_getbuffer,
(releasebufferproc)pygpu_buffer__bf_releasebuffer,
};
#endif
PyDoc_STRVAR(pygpu_buffer__tp_doc,
".. class:: Buffer(format, dimensions, data)\n"
"\n"
" For Python access to GPU functions requiring a pointer.\n"
"\n"
" :arg format: One of these primitive types: {\n"
" `FLOAT`,\n"
" `INT`,\n"
" `UINT`,\n"
" `UBYTE`,\n"
" `UINT_24_8`,\n"
" `10_11_11_REV`,\n"
" :type type: `str`\n"
" :arg dimensions: Array describing the dimensions.\n"
" :type dimensions: `int`\n"
" :arg data: Optional data array.\n"
" :type data: `array`\n");
PyTypeObject BPyGPU_BufferType = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "Buffer",
.tp_basicsize = sizeof(BPyGPUBuffer),
.tp_dealloc = (destructor)pygpu_buffer__tp_dealloc,
.tp_repr = (reprfunc)pygpu_buffer__tp_repr,
.tp_as_sequence = &pygpu_buffer__tp_as_sequence,
.tp_as_mapping = &pygpu_buffer__tp_as_mapping,
#ifdef PYGPU_BUFFER_PROTOCOL
.tp_as_buffer = &pygpu_buffer__tp_as_buffer,
#endif
.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
.tp_doc = pygpu_buffer__tp_doc,
.tp_traverse = (traverseproc)pygpu_buffer__tp_traverse,
.tp_clear = (inquiry)pygpu_buffer__tp_clear,
.tp_methods = pygpu_buffer__tp_methods,
.tp_getset = pygpu_buffer_getseters,
.tp_new = pygpu_buffer__tp_new,
};
static size_t pygpu_buffer_calc_size(const int format,
const int shape_len,
const Py_ssize_t *shape)
{
size_t r_size = GPU_texture_dataformat_size(format);
for (int i = 0; i < shape_len; i++) {
r_size *= shape[i];
}
return r_size;
}
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer)
{
return pygpu_buffer_calc_size(buffer->format, buffer->shape_len, buffer->shape);
}
/**
* Create a buffer object
*
* \param dimensions: An array of ndimensions integers representing the size of each dimension.
* \param initbuffer: When not NULL holds a contiguous buffer
* with the correct format from which the buffer will be initialized
*/
BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format,
const int shape_len,
const Py_ssize_t *shape,
void *buffer)
{
if (buffer == NULL) {
size_t size = pygpu_buffer_calc_size(format, shape_len, shape);
buffer = MEM_callocN(size, "BPyGPUBuffer buffer");
}
return pygpu_buffer_make_from_data(NULL, format, shape_len, shape, buffer);
}
/** \} */

View File

@ -0,0 +1,53 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
extern PyTypeObject BPyGPU_BufferType;
#define BPyGPU_Buffer_Check(v) (Py_TYPE(v) == &BPyGPU_BufferType)
/**
* Buffer Object
*
* For Python access to GPU functions requiring a pointer.
*/
typedef struct BPyGPUBuffer {
PyObject_VAR_HEAD PyObject *parent;
int format;
int shape_len;
Py_ssize_t *shape;
union {
char *as_byte;
int *as_int;
uint *as_uint;
float *as_float;
void *as_void;
} buf;
} BPyGPUBuffer;
size_t bpygpu_Buffer_size(BPyGPUBuffer *buffer);
BPyGPUBuffer *BPyGPU_Buffer_CreatePyObject(const int format,
const int shape_len,
const Py_ssize_t *shape,
void *buffer);

View File

@ -0,0 +1,546 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the framebuffer functionalities of the 'gpu' module
* used for off-screen OpenGL rendering.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_context.h"
#include "GPU_framebuffer.h"
#include "GPU_init_exit.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "../mathutils/mathutils.h"
#include "gpu_py_api.h"
#include "gpu_py_texture.h"
#include "gpu_py_framebuffer.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUFrameBuffer Common Utilities
* \{ */
static int pygpu_framebuffer_valid_check(BPyGPUFrameBuffer *bpygpu_fb)
{
if (UNLIKELY(bpygpu_fb->fb == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU framebuffer was freed, no further access is valid"
#else
"GPU framebuffer: internal error"
#endif
);
return -1;
}
return 0;
}
#define PYGPU_FRAMEBUFFER_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_framebuffer_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
static void pygpu_framebuffer_free_if_possible(GPUFrameBuffer *fb)
{
if (!fb) {
return;
}
if (GPU_is_init()) {
GPU_framebuffer_free(fb);
}
else {
printf("PyFramebuffer freed after the context has been destroyed.\n");
}
}
/* Keep less than or equal to #FRAMEBUFFER_STACK_DEPTH */
#define GPU_PY_FRAMEBUFFER_STACK_LEN 16
static bool pygpu_framebuffer_stack_push_and_bind_or_error(GPUFrameBuffer *fb)
{
if (GPU_framebuffer_stack_level_get() >= GPU_PY_FRAMEBUFFER_STACK_LEN) {
PyErr_SetString(
PyExc_RuntimeError,
"Maximum framebuffer stack depth " STRINGIFY(GPU_PY_FRAMEBUFFER_STACK_LEN) " reached");
return false;
}
GPU_framebuffer_push(GPU_framebuffer_active_get());
GPU_framebuffer_bind(fb);
return true;
}
static bool pygpu_framebuffer_stack_pop_and_restore_or_error(GPUFrameBuffer *fb)
{
if (GPU_framebuffer_stack_level_get() == 0) {
PyErr_SetString(PyExc_RuntimeError, "Minimum framebuffer stack depth reached");
return false;
}
if (fb && !GPU_framebuffer_bound(fb)) {
PyErr_SetString(PyExc_RuntimeError, "Framebuffer is not bound");
return false;
}
GPUFrameBuffer *fb_prev = GPU_framebuffer_pop();
GPU_framebuffer_bind(fb_prev);
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Stack (Context Manager)
*
* Safer alternative to ensure balanced push/pop calls.
*
* \{ */
typedef struct {
PyObject_HEAD /* required python macro */
BPyGPUFrameBuffer *py_fb;
int level;
} PyFrameBufferStackContext;
static void pygpu_framebuffer_stack_context__tp_dealloc(PyFrameBufferStackContext *self)
{
Py_DECREF(self->py_fb);
PyObject_DEL(self);
}
static PyObject *pygpu_framebuffer_stack_context_enter(PyFrameBufferStackContext *self)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb);
/* sanity - should never happen */
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
if (!pygpu_framebuffer_stack_push_and_bind_or_error(self->py_fb->fb)) {
return NULL;
}
self->level = GPU_framebuffer_stack_level_get();
Py_RETURN_NONE;
}
static PyObject *pygpu_framebuffer_stack_context_exit(PyFrameBufferStackContext *self,
PyObject *UNUSED(args))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self->py_fb);
/* sanity - should never happen */
if (self->level == -1) {
fprintf(stderr, "Not yet in use\n");
return NULL;
}
const int level = GPU_framebuffer_stack_level_get();
if (level != self->level) {
fprintf(stderr, "Level of bind mismatch, expected %d, got %d\n", self->level, level);
}
if (!pygpu_framebuffer_stack_pop_and_restore_or_error(self->py_fb->fb)) {
return NULL;
}
Py_RETURN_NONE;
}
static PyMethodDef pygpu_framebuffer_stack_context__tp_methods[] = {
{"__enter__", (PyCFunction)pygpu_framebuffer_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_framebuffer_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject FramebufferStackContext_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext",
.tp_basicsize = sizeof(PyFrameBufferStackContext),
.tp_dealloc = (destructor)pygpu_framebuffer_stack_context__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = pygpu_framebuffer_stack_context__tp_methods,
};
PyDoc_STRVAR(pygpu_framebuffer_bind_doc,
".. function:: bind()\n"
"\n"
" Context manager to ensure balanced bind calls, even in the case of an error.\n");
static PyObject *pygpu_framebuffer_bind(BPyGPUFrameBuffer *self)
{
PyFrameBufferStackContext *ret = PyObject_New(PyFrameBufferStackContext,
&FramebufferStackContext_Type);
ret->py_fb = self;
ret->level = -1;
Py_INCREF(self);
return (PyObject *)ret;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUFramebuffer Type
* \{ */
/* Fill in the GPUAttachment according to the PyObject parameter.
* PyObject *o can be NULL, Py_None, BPyGPUTexture or a dictionary containing the keyword "texture"
* and the optional keywords "layer" and "mip".
* Returns false on error. In this case, a python message will be raised and GPUAttachment will not
* be touched. */
static bool pygpu_framebuffer_new_parse_arg(PyObject *o, GPUAttachment *r_attach)
{
GPUAttachment tmp_attach = GPU_ATTACHMENT_NONE;
if (!o || o == Py_None) {
/* Pass. */;
}
else if (BPyGPUTexture_Check(o)) {
if (!bpygpu_ParseTexture(o, &tmp_attach.tex)) {
return false;
}
}
else {
const char *c_texture = "texture";
const char *c_layer = "layer";
const char *c_mip = "mip";
PyObject *key, *value;
Py_ssize_t pos = 0;
while (PyDict_Next(o, &pos, &key, &value)) {
if (!PyUnicode_Check(key)) {
PyErr_SetString(PyExc_TypeError, "keywords must be strings");
return false;
}
if (c_texture && _PyUnicode_EqualToASCIIString(key, c_texture)) {
/* Compare only once. */
c_texture = NULL;
if (!bpygpu_ParseTexture(value, &tmp_attach.tex)) {
return false;
}
}
else if (c_layer && _PyUnicode_EqualToASCIIString(key, c_layer)) {
/* Compare only once. */
c_layer = NULL;
tmp_attach.layer = PyLong_AsLong(value);
if (tmp_attach.layer == -1 && PyErr_Occurred()) {
return false;
}
}
else if (c_mip && _PyUnicode_EqualToASCIIString(key, c_mip)) {
/* Compare only once. */
c_mip = NULL;
tmp_attach.mip = PyLong_AsLong(value);
if (tmp_attach.mip == -1 && PyErr_Occurred()) {
return false;
}
}
else {
PyErr_Format(
PyExc_TypeError, "'%U' is an invalid keyword argument for this attribute", key);
return false;
}
}
}
*r_attach = tmp_attach;
return true;
}
static PyObject *pygpu_framebuffer__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
if (!GPU_context_active_get()) {
PyErr_SetString(PyExc_RuntimeError, "No active GPU context found");
return NULL;
}
PyObject *depth_attachment = NULL;
PyObject *color_attachements = NULL;
static const char *_keywords[] = {"depth_slot", "color_slots", NULL};
static _PyArg_Parser _parser = {"|$OO:GPUFrameBuffer.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, &depth_attachment, &color_attachements)) {
return NULL;
}
/* Keep in sync with #GPU_FB_MAX_COLOR_ATTACHMENT.
* TODO: share the define. */
#define BPYGPU_FB_MAX_COLOR_ATTACHMENT 6
GPUAttachment config[BPYGPU_FB_MAX_COLOR_ATTACHMENT + 1];
if (!pygpu_framebuffer_new_parse_arg(depth_attachment, &config[0])) {
return NULL;
}
else if (config[0].tex && !GPU_texture_depth(config[0].tex)) {
PyErr_SetString(PyExc_ValueError, "Depth texture with incompatible format");
return NULL;
}
int color_attachements_len = 0;
if (color_attachements && color_attachements != Py_None) {
if (PySequence_Check(color_attachements)) {
color_attachements_len = PySequence_Size(color_attachements);
if (color_attachements_len > BPYGPU_FB_MAX_COLOR_ATTACHMENT) {
PyErr_SetString(
PyExc_AttributeError,
"too many attachements, max is " STRINGIFY(BPYGPU_FB_MAX_COLOR_ATTACHMENT));
return NULL;
}
for (int i = 1; i <= color_attachements_len; i++) {
PyObject *o = PySequence_GetItem(color_attachements, i);
bool ok = pygpu_framebuffer_new_parse_arg(o, &config[i]);
Py_DECREF(o);
if (!ok) {
return NULL;
}
}
}
else {
if (!pygpu_framebuffer_new_parse_arg(color_attachements, &config[1])) {
return NULL;
}
color_attachements_len = 1;
}
}
GPUFrameBuffer *fb_python = GPU_framebuffer_create("fb_python");
GPU_framebuffer_config_array(fb_python, config, color_attachements_len + 1);
return BPyGPUFrameBuffer_CreatePyObject(fb_python);
}
PyDoc_STRVAR(pygpu_framebuffer_is_bound_doc,
"Checks if this is the active framebuffer in the context.");
static PyObject *pygpu_framebuffer_is_bound(BPyGPUFrameBuffer *self, void *UNUSED(type))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
return PyBool_FromLong(GPU_framebuffer_bound(self->fb));
}
PyDoc_STRVAR(pygpu_framebuffer_clear_doc,
".. method:: clear(color=None, depth=None, stencil=None)\n"
"\n"
" Fill color, depth and stencil textures with specific value.\n"
" Common values: color=(0.0, 0.0, 0.0, 1.0), depth=1.0, stencil=0.\n"
"\n"
" :arg color: float sequence each representing ``(r, g, b, a)``.\n"
" :type color: sequence of 3 or 4 floats\n"
" :arg depth: depth value.\n"
" :type depth: `float`\n"
" :arg stencil: stencil value.\n"
" :type stencil: `int`\n");
static PyObject *pygpu_framebuffer_clear(BPyGPUFrameBuffer *self, PyObject *args, PyObject *kwds)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
if (!GPU_framebuffer_bound(self->fb)) {
return NULL;
}
PyObject *py_col = NULL;
PyObject *py_depth = NULL;
PyObject *py_stencil = NULL;
static const char *_keywords[] = {"color", "depth", "stencil", NULL};
static _PyArg_Parser _parser = {"|$OOO:clear", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &py_col, &py_depth, &py_stencil)) {
return NULL;
}
eGPUFrameBufferBits buffers = 0;
float col[4] = {0.0f, 0.0f, 0.0f, 1.0f};
float depth = 1.0f;
uint stencil = 0;
if (py_col && py_col != Py_None) {
if (mathutils_array_parse(col, 3, 4, py_col, "GPUFrameBuffer.clear(), invalid 'color' arg") ==
-1) {
return NULL;
}
buffers |= GPU_COLOR_BIT;
}
if (py_depth && py_depth != Py_None) {
depth = PyFloat_AsDouble(py_depth);
if (PyErr_Occurred()) {
return NULL;
}
buffers |= GPU_DEPTH_BIT;
}
if (py_stencil && py_stencil != Py_None) {
if ((stencil = PyC_Long_AsU32(py_stencil)) == (uint)-1) {
return NULL;
}
buffers |= GPU_STENCIL_BIT;
}
GPU_framebuffer_clear(self->fb, buffers, col, depth, stencil);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_framebuffer_viewport_set_doc,
".. function:: viewport_set(x, y, xsize, ysize)\n"
"\n"
" Set the viewport for this framebuffer object.\n"
" Note: The viewport state is not saved upon framebuffer rebind.\n"
"\n"
" :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n"
" :param xsize, ysize: width and height of the viewport_set.\n"
" :type x, y, xsize, ysize: `int`\n");
static PyObject *pygpu_framebuffer_viewport_set(BPyGPUFrameBuffer *self,
PyObject *args,
void *UNUSED(type))
{
int x, y, xsize, ysize;
if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) {
return NULL;
}
GPU_framebuffer_viewport_set(self->fb, x, y, xsize, ysize);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_framebuffer_viewport_get_doc,
".. function:: viewport_get()\n"
"\n"
" Returns position and dimension to current viewport.\n");
static PyObject *pygpu_framebuffer_viewport_get(BPyGPUFrameBuffer *self, void *UNUSED(type))
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
int viewport[4];
GPU_framebuffer_viewport_get(self->fb, viewport);
PyObject *ret = PyTuple_New(4);
PyTuple_SET_ITEMS(ret,
PyLong_FromLong(viewport[0]),
PyLong_FromLong(viewport[1]),
PyLong_FromLong(viewport[2]),
PyLong_FromLong(viewport[3]));
return ret;
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_framebuffer_free_doc,
".. method:: free()\n"
"\n"
" Free the framebuffer object.\n"
" The framebuffer will no longer be accessible.\n");
static PyObject *pygpu_framebuffer_free(BPyGPUFrameBuffer *self)
{
PYGPU_FRAMEBUFFER_CHECK_OBJ(self);
pygpu_framebuffer_free_if_possible(self->fb);
self->fb = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUFrameBuffer__tp_dealloc(BPyGPUFrameBuffer *self)
{
pygpu_framebuffer_free_if_possible(self->fb);
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_framebuffer__tp_getseters[] = {
{"is_bound",
(getter)pygpu_framebuffer_is_bound,
(setter)NULL,
pygpu_framebuffer_is_bound_doc,
NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_framebuffer__tp_methods[] = {
{"bind", (PyCFunction)pygpu_framebuffer_bind, METH_NOARGS, pygpu_framebuffer_bind_doc},
{"clear",
(PyCFunction)pygpu_framebuffer_clear,
METH_VARARGS | METH_KEYWORDS,
pygpu_framebuffer_clear_doc},
{"viewport_set",
(PyCFunction)pygpu_framebuffer_viewport_set,
METH_NOARGS,
pygpu_framebuffer_viewport_set_doc},
{"viewport_get",
(PyCFunction)pygpu_framebuffer_viewport_get,
METH_VARARGS,
pygpu_framebuffer_viewport_get_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_framebuffer_free, METH_NOARGS, pygpu_framebuffer_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_framebuffer__tp_doc,
".. class:: GPUFrameBuffer(depth_slot=None, color_slots=None)\n"
"\n"
" This object gives access to framebuffer functionallities.\n"
" When a 'layer' is specified in a argument, a single layer of a 3D or array "
"texture is attached to the frame-buffer.\n"
" For cube map textures, layer is translated into a cube map face.\n"
"\n"
" :arg depth_slot: GPUTexture to attach or a `dict` containing keywords: "
"'texture', 'layer' and 'mip'.\n"
" :type depth_slot: :class:`gpu.types.GPUTexture`, `dict` or `Nonetype`\n"
" :arg color_slots: Tuple where each item can be a GPUTexture or a `dict` "
"containing keywords: 'texture', 'layer' and 'mip'.\n"
" :type color_slots: `tuple` or `Nonetype`\n");
PyTypeObject BPyGPUFrameBuffer_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBuffer",
.tp_basicsize = sizeof(BPyGPUFrameBuffer),
.tp_dealloc = (destructor)BPyGPUFrameBuffer__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_framebuffer__tp_doc,
.tp_methods = pygpu_framebuffer__tp_methods,
.tp_getset = pygpu_framebuffer__tp_getseters,
.tp_new = pygpu_framebuffer__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUFrameBuffer_CreatePyObject(GPUFrameBuffer *fb)
{
BPyGPUFrameBuffer *self;
self = PyObject_New(BPyGPUFrameBuffer, &BPyGPUFrameBuffer_Type);
self->fb = fb;
return (PyObject *)self;
}
/** \} */
#undef PYGPU_FRAMEBUFFER_CHECK_OBJ

View File

@ -0,0 +1,33 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUFrameBuffer_Type;
#define BPyGPUFrameBuffer_Check(v) (Py_TYPE(v) == &BPyGPUFrameBuffer_Type)
typedef struct BPyGPUFrameBuffer {
PyObject_HEAD struct GPUFrameBuffer *fb;
} BPyGPUFrameBuffer;
PyObject *BPyGPUFrameBuffer_CreatePyObject(struct GPUFrameBuffer *fb) ATTR_NONNULL(1);

View File

@ -30,6 +30,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "BKE_global.h"
@ -54,14 +55,23 @@
#include "gpu_py_api.h"
#include "gpu_py_offscreen.h" /* own include */
/* Define the free method to avoid breakage. */
#define BPYGPU_USE_GPUOBJ_FREE_METHOD
/* -------------------------------------------------------------------- */
/** \name GPUOffScreen Common Utilities
* \{ */
static int pygpu_offscreen_valid_check(BPyGPUOffScreen *pygpu_ofs)
static int pygpu_offscreen_valid_check(BPyGPUOffScreen *py_ofs)
{
if (UNLIKELY(pygpu_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError, "GPU offscreen was freed, no further access is valid");
if (UNLIKELY(py_ofs->ofs == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU offscreen was freed, no further access is valid"
#else
"GPU offscreen: internal error"
#endif
);
return -1;
}
return 0;
@ -77,11 +87,130 @@ static int pygpu_offscreen_valid_check(BPyGPUOffScreen *pygpu_ofs)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Stack (Context Manager)
*
* Safer alternative to ensure balanced push/pop calls.
*
* \{ */
typedef struct {
PyObject_HEAD /* required python macro */
BPyGPUOffScreen *py_offs;
int level;
bool is_explicitly_bound; /* Bound by "bind" method. */
} OffScreenStackContext;
static void pygpu_offscreen_stack_context__tp_dealloc(OffScreenStackContext *self)
{
Py_DECREF(self->py_offs);
PyObject_DEL(self);
}
static PyObject *pygpu_offscreen_stack_context_enter(OffScreenStackContext *self)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs);
if (!self->is_explicitly_bound) {
if (self->level != -1) {
PyErr_SetString(PyExc_RuntimeError, "Already in use");
return NULL;
}
GPU_offscreen_bind(self->py_offs->ofs, true);
self->level = GPU_framebuffer_stack_level_get();
}
Py_RETURN_NONE;
}
static PyObject *pygpu_offscreen_stack_context_exit(OffScreenStackContext *self,
PyObject *UNUSED(args))
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self->py_offs);
if (self->level == -1) {
PyErr_SetString(PyExc_RuntimeError, "Not yet in use\n");
return NULL;
}
const int level = GPU_framebuffer_stack_level_get();
if (level != self->level) {
PyErr_Format(
PyExc_RuntimeError, "Level of bind mismatch, expected %d, got %d\n", self->level, level);
}
GPU_offscreen_unbind(self->py_offs->ofs, true);
Py_RETURN_NONE;
}
static PyMethodDef pygpu_offscreen_stack_context__tp_methods[] = {
{"__enter__", (PyCFunction)pygpu_offscreen_stack_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_offscreen_stack_context_exit, METH_VARARGS},
{NULL},
};
static PyTypeObject PyGPUOffscreenStackContext_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUFrameBufferStackContext",
.tp_basicsize = sizeof(OffScreenStackContext),
.tp_dealloc = (destructor)pygpu_offscreen_stack_context__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_methods = pygpu_offscreen_stack_context__tp_methods,
};
PyDoc_STRVAR(pygpu_offscreen_bind_doc,
".. function:: bind()\n"
"\n"
" Context manager to ensure balanced bind calls, even in the case of an error.\n");
static PyObject *pygpu_offscreen_bind(BPyGPUOffScreen *self)
{
OffScreenStackContext *ret = PyObject_New(OffScreenStackContext,
&PyGPUOffscreenStackContext_Type);
ret->py_offs = self;
ret->level = -1;
ret->is_explicitly_bound = false;
Py_INCREF(self);
pygpu_offscreen_stack_context_enter(ret);
ret->is_explicitly_bound = true;
return (PyObject *)ret;
}
PyDoc_STRVAR(pygpu_offscreen_unbind_doc,
".. method:: unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
"\n"
" :arg restore: Restore the OpenGL state, can only be used when the state has been "
"saved before.\n"
" :type restore: `bool`\n");
static PyObject *pygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
bool restore = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
static const char *_keywords[] = {"restore", NULL};
static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &restore)) {
return NULL;
}
GPU_offscreen_unbind(self->ofs, restore);
GPU_apply_state();
Py_RETURN_NONE;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUOffscreen Type
* \{ */
static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
static PyObject *pygpu_offscreen__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
@ -90,7 +219,7 @@ static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args,
char err_out[256];
static const char *_keywords[] = {"width", "height", NULL};
static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0};
static _PyArg_Parser _parser = {"ii:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height)) {
return NULL;
}
@ -99,7 +228,7 @@ static PyObject *pygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args,
ofs = GPU_offscreen_create(width, height, true, false, err_out);
}
else {
strncpy(err_out, "No active GPU context found", 256);
STRNCPY(err_out, "No active GPU context found");
}
if (ofs == NULL) {
@ -135,61 +264,6 @@ static PyObject *pygpu_offscreen_color_texture_get(BPyGPUOffScreen *self, void *
return PyLong_FromLong(GPU_texture_opengl_bindcode(texture));
}
PyDoc_STRVAR(
pygpu_offscreen_bind_doc,
".. method:: bind(save=True)\n"
"\n"
" Bind the offscreen object.\n"
" To make sure that the offscreen gets unbind whether an exception occurs or not,\n"
" pack it into a `with` statement.\n"
"\n"
" :arg save: Save the current OpenGL state, so that it can be restored when unbinding.\n"
" :type save: `bool`\n");
static PyObject *pygpu_offscreen_bind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
bool save = true;
static const char *_keywords[] = {"save", NULL};
static _PyArg_Parser _parser = {"|O&:bind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &save)) {
return NULL;
}
GPU_offscreen_bind(self->ofs, save);
GPU_apply_state();
self->is_saved = save;
Py_INCREF(self);
return (PyObject *)self;
}
PyDoc_STRVAR(pygpu_offscreen_unbind_doc,
".. method:: unbind(restore=True)\n"
"\n"
" Unbind the offscreen object.\n"
"\n"
" :arg restore: Restore the OpenGL state, can only be used when the state has been "
"saved before.\n"
" :type restore: `bool`\n");
static PyObject *pygpu_offscreen_unbind(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
bool restore = true;
BPY_GPU_OFFSCREEN_CHECK_OBJ(self);
static const char *_keywords[] = {"restore", NULL};
static _PyArg_Parser _parser = {"|O&:unbind", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, PyC_ParseBool, &restore)) {
return NULL;
}
GPU_offscreen_unbind(self->ofs, restore);
GPU_apply_state();
Py_RETURN_NONE;
}
PyDoc_STRVAR(
pygpu_offscreen_draw_view3d_doc,
".. method:: draw_view3d(scene, view_layer, view3d, region, view_matrix, projection_matrix)\n"
@ -210,8 +284,8 @@ PyDoc_STRVAR(
" :type projection_matrix: :class:`mathutils.Matrix`\n");
static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *args, PyObject *kwds)
{
MatrixObject *pygpu_mat_view, *pygpu_mat_projection;
PyObject *pygpu_scene, *pygpu_view_layer, *pygpu_region, *pygpu_view3d;
MatrixObject *py_mat_view, *py_mat_projection;
PyObject *py_scene, *py_view_layer, *py_region, *py_view3d;
struct Depsgraph *depsgraph;
struct Scene *scene;
@ -228,18 +302,18 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&pygpu_scene,
&pygpu_view_layer,
&pygpu_view3d,
&pygpu_region,
&py_scene,
&py_view_layer,
&py_view3d,
&py_region,
Matrix_Parse4x4,
&pygpu_mat_view,
&py_mat_view,
Matrix_Parse4x4,
&pygpu_mat_projection) ||
(!(scene = PyC_RNA_AsPointer(pygpu_scene, "Scene")) ||
!(view_layer = PyC_RNA_AsPointer(pygpu_view_layer, "ViewLayer")) ||
!(v3d = PyC_RNA_AsPointer(pygpu_view3d, "SpaceView3D")) ||
!(region = PyC_RNA_AsPointer(pygpu_region, "Region")))) {
&py_mat_projection) ||
(!(scene = PyC_RNA_AsPointer(py_scene, "Scene")) ||
!(view_layer = PyC_RNA_AsPointer(py_view_layer, "ViewLayer")) ||
!(v3d = PyC_RNA_AsPointer(py_view3d, "SpaceView3D")) ||
!(region = PyC_RNA_AsPointer(py_region, "Region")))) {
return NULL;
}
@ -262,8 +336,8 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar
region,
GPU_offscreen_width(self->ofs),
GPU_offscreen_height(self->ofs),
(float(*)[4])pygpu_mat_view->matrix,
(float(*)[4])pygpu_mat_projection->matrix,
(float(*)[4])py_mat_view->matrix,
(float(*)[4])py_mat_projection->matrix,
true,
true,
"",
@ -281,6 +355,7 @@ static PyObject *pygpu_offscreen_draw_view3d(BPyGPUOffScreen *self, PyObject *ar
Py_RETURN_NONE;
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_offscreen_free_doc,
".. method:: free()\n"
"\n"
@ -294,17 +369,7 @@ static PyObject *pygpu_offscreen_free(BPyGPUOffScreen *self)
self->ofs = NULL;
Py_RETURN_NONE;
}
static PyObject *pygpu_offscreen_bind_context_enter(BPyGPUOffScreen *UNUSED(self))
{
Py_RETURN_NONE;
}
static PyObject *pygpu_offscreen_bind_context_exit(BPyGPUOffScreen *self, PyObject *UNUSED(args))
{
GPU_offscreen_unbind(self->ofs, self->is_saved);
Py_RETURN_NONE;
}
#endif
static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
{
@ -314,7 +379,7 @@ static void BPyGPUOffScreen__tp_dealloc(BPyGPUOffScreen *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_offscreen_getseters[] = {
static PyGetSetDef pygpu_offscreen__tp_getseters[] = {
{"color_texture",
(getter)pygpu_offscreen_color_texture_get,
(setter)NULL,
@ -325,11 +390,8 @@ static PyGetSetDef pygpu_offscreen_getseters[] = {
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_offscreen_methods[] = {
{"bind",
(PyCFunction)pygpu_offscreen_bind,
METH_VARARGS | METH_KEYWORDS,
pygpu_offscreen_bind_doc},
static struct PyMethodDef pygpu_offscreen__tp_methods[] = {
{"bind", (PyCFunction)pygpu_offscreen_bind, METH_NOARGS, pygpu_offscreen_bind_doc},
{"unbind",
(PyCFunction)pygpu_offscreen_unbind,
METH_VARARGS | METH_KEYWORDS,
@ -338,13 +400,13 @@ static struct PyMethodDef pygpu_offscreen_methods[] = {
(PyCFunction)pygpu_offscreen_draw_view3d,
METH_VARARGS | METH_KEYWORDS,
pygpu_offscreen_draw_view3d_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_offscreen_free, METH_NOARGS, pygpu_offscreen_free_doc},
{"__enter__", (PyCFunction)pygpu_offscreen_bind_context_enter, METH_NOARGS},
{"__exit__", (PyCFunction)pygpu_offscreen_bind_context_exit, METH_VARARGS},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_offscreen_doc,
PyDoc_STRVAR(pygpu_offscreen__tp_doc,
".. class:: GPUOffScreen(width, height)\n"
"\n"
" This object gives access to off screen buffers.\n"
@ -358,10 +420,10 @@ PyTypeObject BPyGPUOffScreen_Type = {
.tp_basicsize = sizeof(BPyGPUOffScreen),
.tp_dealloc = (destructor)BPyGPUOffScreen__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_offscreen_doc,
.tp_methods = pygpu_offscreen_methods,
.tp_getset = pygpu_offscreen_getseters,
.tp_new = pygpu_offscreen_new,
.tp_doc = pygpu_offscreen__tp_doc,
.tp_methods = pygpu_offscreen__tp_methods,
.tp_getset = pygpu_offscreen__tp_getseters,
.tp_new = pygpu_offscreen__tp_new,
};
/** \} */

View File

@ -28,7 +28,6 @@ extern PyTypeObject BPyGPUOffScreen_Type;
typedef struct BPyGPUOffScreen {
PyObject_HEAD struct GPUOffScreen *ofs;
bool is_saved;
} BPyGPUOffScreen;
PyObject *BPyGPUOffScreen_CreatePyObject(struct GPUOffScreen *ofs) ATTR_NONNULL(1);

View File

@ -26,12 +26,16 @@
#include "BLI_utildefines.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "GPU_uniform_buffer.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "../mathutils/mathutils.h"
#include "gpu_py_api.h"
#include "gpu_py_texture.h"
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_format.h"
#include "gpu_py_shader.h" /* own include */
@ -464,6 +468,64 @@ static PyObject *pygpu_shader_uniform_int(BPyGPUShader *self, PyObject *args)
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_shader_uniform_sampler_doc,
".. method:: uniform_sampler(name, texture)\n"
"\n"
" Specify the value of a texture uniform variable for the current GPUShader.\n"
"\n"
" :param name: name of the uniform variable whose texture is to be specified.\n"
" :type name: str\n"
" :param texture: Texture to attach.\n"
" :type texture: :class:`gpu.types.GPUTexture`\n");
static PyObject *pygpu_shader_uniform_sampler(BPyGPUShader *self, PyObject *args)
{
const char *name;
BPyGPUTexture *py_texture;
if (!PyArg_ParseTuple(
args, "sO!:GPUShader.uniform_sampler", &name, &BPyGPUTexture_Type, &py_texture)) {
return NULL;
}
int slot = GPU_shader_get_texture_binding(self->shader, name);
GPU_texture_bind(py_texture->tex, slot);
GPU_shader_uniform_1i(self->shader, name, slot);
Py_RETURN_NONE;
}
PyDoc_STRVAR(
pygpu_shader_uniform_block_doc,
".. method:: uniform_block(name, ubo)\n"
"\n"
" Specify the value of an uniform buffer object variable for the current GPUShader.\n"
"\n"
" :param name: name of the uniform variable whose UBO is to be specified.\n"
" :type name: str\n"
" :param ubo: Uniform Buffer to attach.\n"
" :type texture: :class:`gpu.types.GPUUniformBuf`\n");
static PyObject *pygpu_shader_uniform_block(BPyGPUShader *self, PyObject *args)
{
const char *name;
BPyGPUUniformBuf *py_ubo;
if (!PyArg_ParseTuple(
args, "sO!:GPUShader.uniform_block", &name, &BPyGPUUniformBuf_Type, &py_ubo)) {
return NULL;
}
int slot = GPU_shader_get_uniform_block(self->shader, name);
if (slot == -1) {
PyErr_SetString(
PyExc_BufferError,
"GPUShader.uniform_buffer: uniform block not found, make sure the name is correct");
return NULL;
}
GPU_uniformbuf_bind(py_ubo->ubo, slot);
GPU_shader_uniform_1i(self->shader, name, slot);
Py_RETURN_NONE;
}
PyDoc_STRVAR(
pygpu_shader_attr_from_name_doc,
".. method:: attr_from_name(name)\n"
@ -535,6 +597,14 @@ static struct PyMethodDef pygpu_shader__tp_methods[] = {
(PyCFunction)pygpu_shader_uniform_int,
METH_VARARGS,
pygpu_shader_uniform_int_doc},
{"uniform_sampler",
(PyCFunction)pygpu_shader_uniform_sampler,
METH_VARARGS,
pygpu_shader_uniform_sampler_doc},
{"uniform_block",
(PyCFunction)pygpu_shader_uniform_block,
METH_VARARGS,
pygpu_shader_uniform_block_doc},
{"attr_from_name",
(PyCFunction)pygpu_shader_attr_from_name,
METH_O,

View File

@ -0,0 +1,423 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the gpu.state API.
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "GPU_state.h"
#include "../generic/py_capi_utils.h"
#include "../generic/python_utildefines.h"
#include "gpu_py_state.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name Helper Functions
* \{ */
static const struct PyC_StringEnumItems pygpu_state_blend_items[] = {
{GPU_BLEND_NONE, "NONE"},
{GPU_BLEND_ALPHA, "ALPHA"},
{GPU_BLEND_ALPHA_PREMULT, "ALPHA_PREMULT"},
{GPU_BLEND_ADDITIVE, "ADDITIVE"},
{GPU_BLEND_ADDITIVE_PREMULT, "ADDITIVE_PREMULT"},
{GPU_BLEND_MULTIPLY, "MULTIPLY"},
{GPU_BLEND_SUBTRACT, "SUBTRACT"},
{GPU_BLEND_INVERT, "INVERT"},
/**
* These are quite special cases used inside the draw manager.
* {GPU_BLEND_OIT, "OIT"},
* {GPU_BLEND_BACKGROUND, "BACKGROUND"},
* {GPU_BLEND_CUSTOM, "CUSTOM"},
*/
{0, NULL},
};
static const struct PyC_StringEnumItems pygpu_state_depthtest_items[] = {
{GPU_DEPTH_NONE, "NONE"},
{GPU_DEPTH_ALWAYS, "ALWAYS"},
{GPU_DEPTH_LESS, "LESS"},
{GPU_DEPTH_LESS_EQUAL, "LESS_EQUAL"},
{GPU_DEPTH_EQUAL, "EQUAL"},
{GPU_DEPTH_GREATER, "GREATER"},
{GPU_DEPTH_GREATER_EQUAL, "GREATER_EQUAL"},
{0, NULL},
};
static const struct PyC_StringEnumItems pygpu_state_faceculling_items[] = {
{GPU_CULL_NONE, "NONE"},
{GPU_CULL_FRONT, "FRONT"},
{GPU_CULL_BACK, "BACK"},
{0, NULL},
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Manage Stack
* \{ */
PyDoc_STRVAR(pygpu_state_blend_set_doc,
".. function:: blend_set(mode)\n"
"\n"
" Defines the fixed pipeline blending equation.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `ALPHA`,\n"
" `ALPHA_PREMULT`,\n"
" `ADDITIVE`,\n"
" `ADDITIVE_PREMULT`,\n"
" `MULTIPLY`,\n"
" `SUBTRACT`,\n"
" `INVERT`,\n"
//" `OIT`,\n"
//" `BACKGROUND`,\n"
//" `CUSTOM`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_blend_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_blend = {pygpu_state_blend_items};
if (!PyC_ParseStringEnum(value, &pygpu_blend)) {
return NULL;
}
GPU_blend(pygpu_blend.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_blend_get_doc,
".. function:: blend_get()\n"
"\n"
" Current blending equation.\n"
"\n");
static PyObject *pygpu_state_blend_get(PyObject *UNUSED(self))
{
eGPUBlend blend = GPU_blend_get();
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_blend_items, blend));
}
PyDoc_STRVAR(pygpu_state_depth_test_set_doc,
".. function:: depth_test_set(mode)\n"
"\n"
" Defines the depth_test equation.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `ALWAYS`,\n"
" `LESS`,\n"
" `LESS_EQUAL`,\n"
" `EQUAL`,\n"
" `GREATER`,\n"
" `GREATER_EQUAL`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_depth_test_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_depth_test = {pygpu_state_depthtest_items};
if (!PyC_ParseStringEnum(value, &pygpu_depth_test)) {
return NULL;
}
GPU_depth_test(pygpu_depth_test.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_depth_test_get_doc,
".. function:: blend_depth_test_get()\n"
"\n"
" Current depth_test equation.\n"
"\n");
static PyObject *pygpu_state_depth_test_get(PyObject *UNUSED(self))
{
eGPUDepthTest test = GPU_depth_test_get();
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_state_depthtest_items, test));
}
PyDoc_STRVAR(pygpu_state_depth_mask_set_doc,
".. function:: depth_mask_set(value)\n"
"\n"
" Write to depth component.\n"
"\n"
" :param value: True for writing to the depth component.\n"
" :type near: `bool`\n");
static PyObject *pygpu_state_depth_mask_set(PyObject *UNUSED(self), PyObject *value)
{
bool write_to_depth;
if (!PyC_ParseBool(value, &write_to_depth)) {
return NULL;
}
GPU_depth_mask(write_to_depth);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_depth_mask_get_doc,
".. function:: depth_mask_set_get()\n"
"\n"
" Writing status in the depth component.\n");
static PyObject *pygpu_state_depth_mask_get(PyObject *UNUSED(self))
{
return PyBool_FromLong(GPU_depth_mask_get());
}
PyDoc_STRVAR(pygpu_state_viewport_set_doc,
".. function:: viewport_set(x, y, xsize, ysize)\n"
"\n"
" Specifies the viewport of the active framebuffer.\n"
" Note: The viewport state is not saved upon framebuffer rebind.\n"
"\n"
" :param x, y: lower left corner of the viewport_set rectangle, in pixels.\n"
" :param width, height: width and height of the viewport_set.\n"
" :type x, y, xsize, ysize: `int`\n");
static PyObject *pygpu_state_viewport_set(PyObject *UNUSED(self), PyObject *args)
{
int x, y, xsize, ysize;
if (!PyArg_ParseTuple(args, "iiii:viewport_set", &x, &y, &xsize, &ysize)) {
return NULL;
}
GPU_viewport(x, y, xsize, ysize);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_viewport_get_doc,
".. function:: viewport_get()\n"
"\n"
" Viewport of the active framebuffer.\n");
static PyObject *pygpu_state_viewport_get(PyObject *UNUSED(self), PyObject *UNUSED(args))
{
int viewport[4];
GPU_viewport_size_get_i(viewport);
PyObject *ret = PyTuple_New(4);
PyTuple_SET_ITEMS(ret,
PyLong_FromLong(viewport[0]),
PyLong_FromLong(viewport[1]),
PyLong_FromLong(viewport[2]),
PyLong_FromLong(viewport[3]));
return ret;
}
PyDoc_STRVAR(pygpu_state_line_width_set_doc,
".. function:: line_width_set(width)\n"
"\n"
" Specify the width of rasterized lines.\n"
"\n"
" :param size: New width.\n"
" :type mode: `float`\n");
static PyObject *pygpu_state_line_width_set(PyObject *UNUSED(self), PyObject *value)
{
float width = (float)PyFloat_AsDouble(value);
if (PyErr_Occurred()) {
return NULL;
}
GPU_line_width(width);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_line_width_get_doc,
".. function:: line_width_get()\n"
"\n"
" Current width of rasterized lines.\n");
static PyObject *pygpu_state_line_width_get(PyObject *UNUSED(self))
{
float width = GPU_line_width_get();
return PyFloat_FromDouble((double)width);
}
PyDoc_STRVAR(pygpu_state_point_size_set_doc,
".. function:: point_size_set(size)\n"
"\n"
" Specify the diameter of rasterized points.\n"
"\n"
" :param size: New diameter.\n"
" :type mode: `float`\n");
static PyObject *pygpu_state_point_size_set(PyObject *UNUSED(self), PyObject *value)
{
float size = (float)PyFloat_AsDouble(value);
if (PyErr_Occurred()) {
return NULL;
}
GPU_point_size(size);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_color_mask_set_doc,
".. function:: color_mask_set(r, g, b, a)\n"
"\n"
" Enable or disable writing of frame buffer color components.\n"
"\n"
" :param r, g, b, a: components red, green, blue, and alpha.\n"
" :type r, g, b, a: `bool`\n");
static PyObject *pygpu_state_color_mask_set(PyObject *UNUSED(self), PyObject *args)
{
int r, g, b, a;
if (!PyArg_ParseTuple(args, "pppp:color_mask_set", &r, &g, &b, &a)) {
return NULL;
}
GPU_color_mask((bool)r, (bool)g, (bool)b, (bool)a);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_face_culling_set_doc,
".. function:: face_culling_set(culling)\n"
"\n"
" Specify whether none, front-facing or back-facing facets can be culled.\n"
"\n"
" :param mode: One of these modes: {\n"
" `NONE`,\n"
" `FRONT`,\n"
" `BACK`,\n"
" :type mode: `str`\n");
static PyObject *pygpu_state_face_culling_set(PyObject *UNUSED(self), PyObject *value)
{
struct PyC_StringEnum pygpu_faceculling = {pygpu_state_faceculling_items};
if (!PyC_ParseStringEnum(value, &pygpu_faceculling)) {
return NULL;
}
GPU_face_culling(pygpu_faceculling.value_found);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_front_facing_set_doc,
".. function:: front_facing_set(invert)\n"
"\n"
" Specifies the orientation of front-facing polygons.\n"
"\n"
" :param invert: True for clockwise polygons as front-facing.\n"
" :type mode: `bool`\n");
static PyObject *pygpu_state_front_facing_set(PyObject *UNUSED(self), PyObject *value)
{
bool invert;
if (!PyC_ParseBool(value, &invert)) {
return NULL;
}
GPU_front_facing(invert);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_state_program_point_size_set_doc,
".. function:: use_program_point_size(enable)\n"
"\n"
" If enabled, the derived point size is taken from the (potentially clipped) "
"shader builtin gl_PointSize.\n"
"\n"
" :param enable: True for shader builtin gl_PointSize.\n"
" :type enable: `bool`\n");
static PyObject *pygpu_state_program_point_size_set(PyObject *UNUSED(self), PyObject *value)
{
bool enable;
if (!PyC_ParseBool(value, &enable)) {
return NULL;
}
GPU_program_point_size(enable);
Py_RETURN_NONE;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Module
* \{ */
static struct PyMethodDef pygpu_state__tp_methods[] = {
/* Manage Stack */
{"blend_set", (PyCFunction)pygpu_state_blend_set, METH_O, pygpu_state_blend_set_doc},
{"blend_get", (PyCFunction)pygpu_state_blend_get, METH_NOARGS, pygpu_state_blend_get_doc},
{"depth_test_set",
(PyCFunction)pygpu_state_depth_test_set,
METH_O,
pygpu_state_depth_test_set_doc},
{"depth_test_get",
(PyCFunction)pygpu_state_depth_test_get,
METH_NOARGS,
pygpu_state_depth_test_get_doc},
{"depth_mask_set",
(PyCFunction)pygpu_state_depth_mask_set,
METH_O,
pygpu_state_depth_mask_set_doc},
{"depth_mask_get",
(PyCFunction)pygpu_state_depth_mask_get,
METH_NOARGS,
pygpu_state_depth_mask_get_doc},
{"viewport_set",
(PyCFunction)pygpu_state_viewport_set,
METH_VARARGS,
pygpu_state_viewport_set_doc},
{"viewport_get",
(PyCFunction)pygpu_state_viewport_get,
METH_NOARGS,
pygpu_state_viewport_get_doc},
{"line_width_set",
(PyCFunction)pygpu_state_line_width_set,
METH_O,
pygpu_state_line_width_set_doc},
{"line_width_get",
(PyCFunction)pygpu_state_line_width_get,
METH_NOARGS,
pygpu_state_line_width_get_doc},
{"point_size_set",
(PyCFunction)pygpu_state_point_size_set,
METH_O,
pygpu_state_point_size_set_doc},
{"color_mask_set",
(PyCFunction)pygpu_state_color_mask_set,
METH_VARARGS,
pygpu_state_color_mask_set_doc},
{"face_culling_set",
(PyCFunction)pygpu_state_face_culling_set,
METH_O,
pygpu_state_face_culling_set_doc},
{"front_facing_set",
(PyCFunction)pygpu_state_front_facing_set,
METH_O,
pygpu_state_front_facing_set_doc},
{"program_point_size_set",
(PyCFunction)pygpu_state_program_point_size_set,
METH_O,
pygpu_state_program_point_size_set_doc},
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_state__tp_doc, "This module provides access to the gpu state.");
static PyModuleDef pygpu_state_module_def = {
PyModuleDef_HEAD_INIT,
.m_name = "gpu.state",
.m_doc = pygpu_state__tp_doc,
.m_methods = pygpu_state__tp_methods,
};
PyObject *bpygpu_state_init(void)
{
PyObject *submodule;
submodule = PyModule_Create(&pygpu_state_module_def);
return submodule;
}
/** \} */

View File

@ -0,0 +1,23 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
PyObject *bpygpu_state_init(void);

View File

@ -0,0 +1,559 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the texture functionalities of the 'gpu' module
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_string.h"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_api.h"
#include "gpu_py_buffer.h"
#include "gpu_py_texture.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUTexture Common Utilities
* \{ */
static const struct PyC_StringEnumItems pygpu_textureformat_items[] = {
{GPU_RGBA8UI, "RGBA8UI"},
{GPU_RGBA8I, "RGBA8I"},
{GPU_RGBA8, "RGBA8"},
{GPU_RGBA32UI, "RGBA32UI"},
{GPU_RGBA32I, "RGBA32I"},
{GPU_RGBA32F, "RGBA32F"},
{GPU_RGBA16UI, "RGBA16UI"},
{GPU_RGBA16I, "RGBA16I"},
{GPU_RGBA16F, "RGBA16F"},
{GPU_RGBA16, "RGBA16"},
{GPU_RG8UI, "RG8UI"},
{GPU_RG8I, "RG8I"},
{GPU_RG8, "RG8"},
{GPU_RG32UI, "RG32UI"},
{GPU_RG32I, "RG32I"},
{GPU_RG32F, "RG32F"},
{GPU_RG16UI, "RG16UI"},
{GPU_RG16I, "RG16I"},
{GPU_RG16F, "RG16F"},
{GPU_RG16, "RG16"},
{GPU_R8UI, "R8UI"},
{GPU_R8I, "R8I"},
{GPU_R8, "R8"},
{GPU_R32UI, "R32UI"},
{GPU_R32I, "R32I"},
{GPU_R32F, "R32F"},
{GPU_R16UI, "R16UI"},
{GPU_R16I, "R16I"},
{GPU_R16F, "R16F"},
{GPU_R16, "R16"},
{GPU_R11F_G11F_B10F, "R11F_G11F_B10F"},
{GPU_DEPTH32F_STENCIL8, "DEPTH32F_STENCIL8"},
{GPU_DEPTH24_STENCIL8, "DEPTH24_STENCIL8"},
{GPU_SRGB8_A8, "SRGB8_A8"},
{GPU_RGB16F, "RGB16F"},
{GPU_SRGB8_A8_DXT1, "SRGB8_A8_DXT1"},
{GPU_SRGB8_A8_DXT3, "SRGB8_A8_DXT3"},
{GPU_SRGB8_A8_DXT5, "SRGB8_A8_DXT5"},
{GPU_RGBA8_DXT1, "RGBA8_DXT1"},
{GPU_RGBA8_DXT3, "RGBA8_DXT3"},
{GPU_RGBA8_DXT5, "RGBA8_DXT5"},
{GPU_DEPTH_COMPONENT32F, "DEPTH_COMPONENT32F"},
{GPU_DEPTH_COMPONENT24, "DEPTH_COMPONENT24"},
{GPU_DEPTH_COMPONENT16, "DEPTH_COMPONENT16"},
{0, NULL},
};
static int pygpu_texture_valid_check(BPyGPUTexture *bpygpu_tex)
{
if (UNLIKELY(bpygpu_tex->tex == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU texture was freed, no further access is valid"
#else
"GPU texture: internal error"
#endif
);
return -1;
}
return 0;
}
#define BPYGPU_TEXTURE_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_texture_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUTexture Type
* \{ */
static PyObject *pygpu_texture__tp_new(PyTypeObject *UNUSED(self), PyObject *args, PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
PyObject *py_size;
int size[3] = {1, 1, 1};
int layers = 0;
int is_cubemap = false;
struct PyC_StringEnum pygpu_textureformat = {pygpu_textureformat_items, GPU_RGBA8};
BPyGPUBuffer *pybuffer_obj = NULL;
char err_out[256] = "unknown error. See console";
static const char *_keywords[] = {"size", "layers", "is_cubemap", "format", "data", NULL};
static _PyArg_Parser _parser = {"O|$ipO&O!:GPUTexture.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args,
kwds,
&_parser,
&py_size,
&layers,
&is_cubemap,
PyC_ParseStringEnum,
&pygpu_textureformat,
&BPyGPU_BufferType,
&pybuffer_obj)) {
return NULL;
}
int len = 1;
if (PySequence_Check(py_size)) {
len = PySequence_Size(py_size);
if (PyC_AsArray(size, py_size, len, &PyLong_Type, false, "GPUTexture.__new__") == -1) {
return NULL;
}
}
else if (PyLong_Check(py_size)) {
size[0] = PyLong_AsLong(py_size);
}
else {
PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Expected an int or tuple as first arg");
return NULL;
}
void *data = NULL;
if (pybuffer_obj) {
if (pybuffer_obj->format != GPU_DATA_FLOAT) {
PyErr_SetString(PyExc_ValueError,
"GPUTexture.__new__: Only Buffer of format `FLOAT` is currently supported");
return NULL;
}
int component_len = GPU_texture_component_len(pygpu_textureformat.value_found);
int component_size_expected = sizeof(float);
size_t data_space_expected = (size_t)size[0] * size[1] * size[2] * max_ii(1, layers) *
component_len * component_size_expected;
if (is_cubemap) {
data_space_expected *= 6 * size[0];
}
if (bpygpu_Buffer_size(pybuffer_obj) < data_space_expected) {
PyErr_SetString(PyExc_ValueError, "GPUTexture.__new__: Buffer size smaller than requested");
return NULL;
}
data = pybuffer_obj->buf.as_void;
}
GPUTexture *tex = NULL;
if (is_cubemap && len != 1) {
STRNCPY(err_out,
"In cubemaps the same dimension represents height, width and depth. No tuple needed");
}
else if (size[0] < 1 || size[1] < 1 || size[2] < 1) {
STRNCPY(err_out, "Values less than 1 are not allowed in dimensions");
}
else if (layers && len == 3) {
STRNCPY(err_out, "3D textures have no layers");
}
else if (!GPU_context_active_get()) {
STRNCPY(err_out, "No active GPU context found");
}
else {
const char *name = "python_texture";
if (is_cubemap) {
if (layers) {
tex = GPU_texture_create_cube_array(
name, size[0], layers, 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_cube(name, size[0], 1, pygpu_textureformat.value_found, data);
}
}
else if (layers) {
if (len == 2) {
tex = GPU_texture_create_2d_array(
name, size[0], size[1], layers, 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_1d_array(
name, size[0], layers, 1, pygpu_textureformat.value_found, data);
}
}
else if (len == 3) {
tex = GPU_texture_create_3d(name,
size[0],
size[1],
size[2],
1,
pygpu_textureformat.value_found,
GPU_DATA_FLOAT,
NULL);
}
else if (len == 2) {
tex = GPU_texture_create_2d(
name, size[0], size[1], 1, pygpu_textureformat.value_found, data);
}
else {
tex = GPU_texture_create_1d(name, size[0], 1, pygpu_textureformat.value_found, data);
}
}
if (tex == NULL) {
PyErr_Format(PyExc_RuntimeError, "gpu.texture.new(...) failed with '%s'", err_out);
return NULL;
}
return BPyGPUTexture_CreatePyObject(tex);
}
PyDoc_STRVAR(pygpu_texture_width_doc, "Width of the texture.\n\n:type: `int`");
static PyObject *pygpu_texture_width_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
return PyLong_FromLong(GPU_texture_width(self->tex));
}
PyDoc_STRVAR(pygpu_texture_height_doc, "Height of the texture.\n\n:type: `int`");
static PyObject *pygpu_texture_height_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
return PyLong_FromLong(GPU_texture_height(self->tex));
}
PyDoc_STRVAR(pygpu_texture_format_doc, "Format of the texture.\n\n:type: `str`");
static PyObject *pygpu_texture_format_get(BPyGPUTexture *self, void *UNUSED(type))
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
eGPUTextureFormat format = GPU_texture_format(self->tex);
return PyUnicode_FromString(PyC_StringEnum_FindIDFromValue(pygpu_textureformat_items, format));
}
PyDoc_STRVAR(pygpu_texture_clear_doc,
".. method:: clear(format='FLOAT', value=(0.0, 0.0, 0.0, 1.0))\n"
"\n"
" Fill texture with specific value.\n"
"\n"
" :param format: One of these primitive types: {\n"
" `FLOAT`,\n"
" `INT`,\n"
" `UINT`,\n"
" `UBYTE`,\n"
" `UINT_24_8`,\n"
" `10_11_11_REV`,\n"
" :type type: `str`\n"
" :arg value: sequence each representing the value to fill.\n"
" :type value: sequence of 1, 2, 3 or 4 values\n");
static PyObject *pygpu_texture_clear(BPyGPUTexture *self, PyObject *args, PyObject *kwds)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
struct PyC_StringEnum pygpu_dataformat = {bpygpu_dataformat_items};
union {
int i[4];
float f[4];
char c[4];
} values;
PyObject *py_values;
static const char *_keywords[] = {"format", "value", NULL};
static _PyArg_Parser _parser = {"$O&O:clear", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(
args, kwds, &_parser, PyC_ParseStringEnum, &pygpu_dataformat, &py_values)) {
return NULL;
}
int shape = PySequence_Size(py_values);
if (shape == -1) {
return NULL;
}
if (shape > 4) {
PyErr_SetString(PyExc_AttributeError, "too many dimensions, max is 4");
return NULL;
}
if (shape != 1 &&
ELEM(pygpu_dataformat.value_found, GPU_DATA_UINT_24_8, GPU_DATA_10_11_11_REV)) {
PyErr_SetString(PyExc_AttributeError,
"`UINT_24_8` and `10_11_11_REV` only support single values");
return NULL;
}
memset(&values, 0, sizeof(values));
if (PyC_AsArray(&values,
py_values,
shape,
pygpu_dataformat.value_found == GPU_DATA_FLOAT ? &PyFloat_Type : &PyLong_Type,
false,
"clear") == -1) {
return NULL;
}
if (pygpu_dataformat.value_found == GPU_DATA_UBYTE) {
/* Convert to byte. */
values.c[0] = values.i[0];
values.c[1] = values.i[1];
values.c[2] = values.i[2];
values.c[3] = values.i[3];
}
GPU_texture_clear(self->tex, pygpu_dataformat.value_found, &values);
Py_RETURN_NONE;
}
PyDoc_STRVAR(pygpu_texture_read_doc,
".. method:: read()\n"
"\n"
" Creates a buffer with the value of all pixels.\n"
"\n");
static PyObject *pygpu_texture_read(BPyGPUTexture *self)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
/* #GPU_texture_read is restricted in combining 'data_format' with 'tex_format'.
* So choose data_format here. */
eGPUDataFormat best_data_format;
switch (GPU_texture_format(self->tex)) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
best_data_format = GPU_DATA_FLOAT;
break;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
best_data_format = GPU_DATA_UINT_24_8;
break;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
best_data_format = GPU_DATA_UINT;
break;
case GPU_RG16I:
case GPU_R16I:
best_data_format = GPU_DATA_INT;
break;
case GPU_R8:
case GPU_RG8:
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
best_data_format = GPU_DATA_UBYTE;
break;
case GPU_R11F_G11F_B10F:
best_data_format = GPU_DATA_10_11_11_REV;
break;
default:
best_data_format = GPU_DATA_FLOAT;
break;
}
void *buf = GPU_texture_read(self->tex, best_data_format, 0);
return (PyObject *)BPyGPU_Buffer_CreatePyObject(
best_data_format,
2,
(Py_ssize_t[2]){GPU_texture_height(self->tex), GPU_texture_width(self->tex)},
buf);
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_texture_free_doc,
".. method:: free()\n"
"\n"
" Free the texture object.\n"
" The texture object will no longer be accessible.\n");
static PyObject *pygpu_texture_free(BPyGPUTexture *self)
{
BPYGPU_TEXTURE_CHECK_OBJ(self);
GPU_texture_free(self->tex);
self->tex = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUTexture__tp_dealloc(BPyGPUTexture *self)
{
if (self->tex) {
GPU_texture_free(self->tex);
}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_texture__tp_getseters[] = {
{"width", (getter)pygpu_texture_width_get, (setter)NULL, pygpu_texture_width_doc, NULL},
{"height", (getter)pygpu_texture_height_get, (setter)NULL, pygpu_texture_height_doc, NULL},
{"format", (getter)pygpu_texture_format_get, (setter)NULL, pygpu_texture_format_doc, NULL},
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_texture__tp_methods[] = {
{"clear",
(PyCFunction)pygpu_texture_clear,
METH_VARARGS | METH_KEYWORDS,
pygpu_texture_clear_doc},
{"read", (PyCFunction)pygpu_texture_read, METH_NOARGS, pygpu_texture_read_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_texture_free, METH_NOARGS, pygpu_texture_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(
pygpu_texture__tp_doc,
".. class:: GPUTexture(size, layers=0, is_cubemap=False, format='RGBA8', data=None)\n"
"\n"
" This object gives access to off GPU textures.\n"
"\n"
" :arg size: Dimensions of the texture 1D, 2D, 3D or cubemap.\n"
" :type size: `tuple` or `int`\n"
" :arg layers: Number of layers in texture array or number of cubemaps in cubemap array\n"
" :type layers: `int`\n"
" :arg is_cubemap: Indicates the creation of a cubemap texture.\n"
" :type is_cubemap: `int`\n"
" :arg format: One of these primitive types: {\n"
" `RGBA8UI`,\n"
" `RGBA8I`,\n"
" `RGBA8`,\n"
" `RGBA32UI`,\n"
" `RGBA32I`,\n"
" `RGBA32F`,\n"
" `RGBA16UI`,\n"
" `RGBA16I`,\n"
" `RGBA16F`,\n"
" `RGBA16`,\n"
" `RG8UI`,\n"
" `RG8I`,\n"
" `RG8`,\n"
" `RG32UI`,\n"
" `RG32I`,\n"
" `RG32F`,\n"
" `RG16UI`,\n"
" `RG16I`,\n"
" `RG16F`,\n"
" `RG16`,\n"
" `R8UI`,\n"
" `R8I`,\n"
" `R8`,\n"
" `R32UI`,\n"
" `R32I`,\n"
" `R32F`,\n"
" `R16UI`,\n"
" `R16I`,\n"
" `R16F`,\n"
" `R16`,\n"
" `R11F_G11F_B10F`,\n"
" `DEPTH32F_STENCIL8`,\n"
" `DEPTH24_STENCIL8`,\n"
" `SRGB8_A8`,\n"
" `RGB16F`,\n"
" `SRGB8_A8_DXT1`,\n"
" `SRGB8_A8_DXT3`,\n"
" `SRGB8_A8_DXT5`,\n"
" `RGBA8_DXT1`,\n"
" `RGBA8_DXT3`,\n"
" `RGBA8_DXT5`,\n"
" `DEPTH_COMPONENT32F`,\n"
" `DEPTH_COMPONENT24`,\n"
" `DEPTH_COMPONENT16`,\n"
" :type format: `str`\n"
" :arg data: Buffer object to fill the texture.\n"
" :type data: `Buffer`\n");
PyTypeObject BPyGPUTexture_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUTexture",
.tp_basicsize = sizeof(BPyGPUTexture),
.tp_dealloc = (destructor)BPyGPUTexture__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_texture__tp_doc,
.tp_methods = pygpu_texture__tp_methods,
.tp_getset = pygpu_texture__tp_getseters,
.tp_new = pygpu_texture__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Local API
* \{ */
int bpygpu_ParseTexture(PyObject *o, void *p)
{
if (o == Py_None) {
*(GPUTexture **)p = NULL;
return 1;
}
if (!BPyGPUTexture_Check(o)) {
PyErr_Format(
PyExc_ValueError, "expected a texture or None object, got %s", Py_TYPE(o)->tp_name);
return 0;
}
if (UNLIKELY(pygpu_texture_valid_check((BPyGPUTexture *)o) == -1)) {
return 0;
}
*(GPUTexture **)p = ((BPyGPUTexture *)o)->tex;
return 1;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUTexture_CreatePyObject(GPUTexture *tex)
{
BPyGPUTexture *self;
self = PyObject_New(BPyGPUTexture, &BPyGPUTexture_Type);
self->tex = tex;
return (PyObject *)self;
}
/** \} */
#undef BPYGPU_TEXTURE_CHECK_OBJ

View File

@ -0,0 +1,34 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUTexture_Type;
#define BPyGPUTexture_Check(v) (Py_TYPE(v) == &BPyGPUTexture_Type)
typedef struct BPyGPUTexture {
PyObject_HEAD struct GPUTexture *tex;
} BPyGPUTexture;
int bpygpu_ParseTexture(PyObject *o, void *p);
PyObject *BPyGPUTexture_CreatePyObject(struct GPUTexture *tex) ATTR_NONNULL(1);

View File

@ -43,6 +43,9 @@ PyObject *bpygpu_types_init(void)
submodule = PyModule_Create(&pygpu_types_module_def);
if (PyType_Ready(&BPyGPU_BufferType) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUVertFormat_Type) < 0) {
return NULL;
}
@ -61,13 +64,26 @@ PyObject *bpygpu_types_init(void)
if (PyType_Ready(&BPyGPUShader_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUTexture_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUFrameBuffer_Type) < 0) {
return NULL;
}
if (PyType_Ready(&BPyGPUUniformBuf_Type) < 0) {
return NULL;
}
PyModule_AddType(submodule, &BPyGPU_BufferType);
PyModule_AddType(submodule, &BPyGPUVertFormat_Type);
PyModule_AddType(submodule, &BPyGPUVertBuf_Type);
PyModule_AddType(submodule, &BPyGPUIndexBuf_Type);
PyModule_AddType(submodule, &BPyGPUBatch_Type);
PyModule_AddType(submodule, &BPyGPUOffScreen_Type);
PyModule_AddType(submodule, &BPyGPUShader_Type);
PyModule_AddType(submodule, &BPyGPUTexture_Type);
PyModule_AddType(submodule, &BPyGPUFrameBuffer_Type);
PyModule_AddType(submodule, &BPyGPUUniformBuf_Type);
return submodule;
}

View File

@ -20,10 +20,15 @@
#pragma once
#include "gpu_py_buffer.h"
#include "gpu_py_batch.h"
#include "gpu_py_element.h"
#include "gpu_py_framebuffer.h"
#include "gpu_py_offscreen.h"
#include "gpu_py_shader.h"
#include "gpu_py_texture.h"
#include "gpu_py_uniformbuffer.h"
#include "gpu_py_vertex_buffer.h"
#include "gpu_py_vertex_format.h"

View File

@ -0,0 +1,195 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*
* This file defines the uniform buffer functionalities of the 'gpu' module
*
* - Use ``bpygpu_`` for local API.
* - Use ``BPyGPU`` for public API.
*/
#include <Python.h>
#include "BLI_string.h"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "GPU_uniform_buffer.h"
#include "../generic/py_capi_utils.h"
#include "gpu_py.h"
#include "gpu_py_api.h"
#include "gpu_py_buffer.h"
#include "gpu_py_uniformbuffer.h" /* own include */
/* -------------------------------------------------------------------- */
/** \name GPUUniformBuf Common Utilities
* \{ */
static int pygpu_uniformbuffer_valid_check(BPyGPUUniformBuf *bpygpu_ub)
{
if (UNLIKELY(bpygpu_ub->ubo == NULL)) {
PyErr_SetString(PyExc_ReferenceError,
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
"GPU uniform buffer was freed, no further access is valid");
#else
"GPU uniform buffer: internal error");
#endif
return -1;
}
return 0;
}
#define BPYGPU_UNIFORMBUF_CHECK_OBJ(bpygpu) \
{ \
if (UNLIKELY(pygpu_uniformbuffer_valid_check(bpygpu) == -1)) { \
return NULL; \
} \
} \
((void)0)
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUUniformBuf Type
* \{ */
static PyObject *pygpu_uniformbuffer__tp_new(PyTypeObject *UNUSED(self),
PyObject *args,
PyObject *kwds)
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
GPUUniformBuf *ubo = NULL;
BPyGPUBuffer *pybuffer_obj;
char err_out[256] = "unknown error. See console";
static const char *_keywords[] = {"data", NULL};
static _PyArg_Parser _parser = {"O!:GPUUniformBuf.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &BPyGPU_BufferType, &pybuffer_obj)) {
return NULL;
}
if (GPU_context_active_get()) {
ubo = GPU_uniformbuf_create_ex(
bpygpu_Buffer_size(pybuffer_obj), pybuffer_obj->buf.as_void, "python_uniformbuffer");
}
else {
STRNCPY(err_out, "No active GPU context found");
}
if (ubo == NULL) {
PyErr_Format(PyExc_RuntimeError, "GPUUniformBuf.__new__(...) failed with '%s'", err_out);
return NULL;
}
return BPyGPUUniformBuf_CreatePyObject(ubo);
}
PyDoc_STRVAR(pygpu_uniformbuffer_update_doc,
".. method::update(data)\n"
"\n"
" Update the data of the uniform buffer object.\n");
static PyObject *pygpu_uniformbuffer_update(BPyGPUUniformBuf *self, PyObject *obj)
{
BPYGPU_UNIFORMBUF_CHECK_OBJ(self);
if (!BPyGPU_Buffer_Check(obj)) {
return NULL;
}
GPU_uniformbuf_update(self->ubo, ((BPyGPUBuffer *)obj)->buf.as_void);
Py_RETURN_NONE;
}
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
PyDoc_STRVAR(pygpu_uniformbuffer_free_doc,
".. method::free()\n"
"\n"
" Free the uniform buffer object.\n"
" The uniform buffer object will no longer be accessible.\n");
static PyObject *pygpu_uniformbuffer_free(BPyGPUUniformBuf *self)
{
BPYGPU_UNIFORMBUF_CHECK_OBJ(self);
GPU_uniformbuf_free(self->ubo);
self->ubo = NULL;
Py_RETURN_NONE;
}
#endif
static void BPyGPUUniformBuf__tp_dealloc(BPyGPUUniformBuf *self)
{
if (self->ubo) {
GPU_uniformbuf_free(self->ubo);
}
Py_TYPE(self)->tp_free((PyObject *)self);
}
static PyGetSetDef pygpu_uniformbuffer__tp_getseters[] = {
{NULL, NULL, NULL, NULL, NULL} /* Sentinel */
};
static struct PyMethodDef pygpu_uniformbuffer__tp_methods[] = {
{"update", (PyCFunction)pygpu_uniformbuffer_update, METH_O, pygpu_uniformbuffer_update_doc},
#ifdef BPYGPU_USE_GPUOBJ_FREE_METHOD
{"free", (PyCFunction)pygpu_uniformbuffer_free, METH_NOARGS, pygpu_uniformbuffer_free_doc},
#endif
{NULL, NULL, 0, NULL},
};
PyDoc_STRVAR(pygpu_uniformbuffer__tp_doc,
".. class:: GPUUniformBuf(data)\n"
"\n"
" This object gives access to off uniform buffers.\n"
"\n"
" :arg data: Buffer object.\n"
" :type data: `Buffer`\n");
PyTypeObject BPyGPUUniformBuf_Type = {
PyVarObject_HEAD_INIT(NULL, 0).tp_name = "GPUUniformBuf",
.tp_basicsize = sizeof(BPyGPUUniformBuf),
.tp_dealloc = (destructor)BPyGPUUniformBuf__tp_dealloc,
.tp_flags = Py_TPFLAGS_DEFAULT,
.tp_doc = pygpu_uniformbuffer__tp_doc,
.tp_methods = pygpu_uniformbuffer__tp_methods,
.tp_getset = pygpu_uniformbuffer__tp_getseters,
.tp_new = pygpu_uniformbuffer__tp_new,
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public API
* \{ */
PyObject *BPyGPUUniformBuf_CreatePyObject(GPUUniformBuf *ubo)
{
BPyGPUUniformBuf *self;
self = PyObject_New(BPyGPUUniformBuf, &BPyGPUUniformBuf_Type);
self->ubo = ubo;
return (PyObject *)self;
}
/** \} */
#undef BPYGPU_UNIFORMBUF_CHECK_OBJ

View File

@ -0,0 +1,33 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/** \file
* \ingroup bpygpu
*/
#pragma once
#include "BLI_compiler_attrs.h"
extern PyTypeObject BPyGPUUniformBuf_Type;
#define BPyGPUUniformBuf_Check(v) (Py_TYPE(v) == &BPyGPUUniformBuf_Type)
typedef struct BPyGPUUniformBuf {
PyObject_HEAD struct GPUUniformBuf *ubo;
} BPyGPUUniformBuf;
PyObject *BPyGPUUniformBuf_CreatePyObject(struct GPUUniformBuf *ubo) ATTR_NONNULL(1);

View File

@ -2026,7 +2026,7 @@ uint *WM_window_pixels_read(wmWindowManager *wm, wmWindow *win, int r_size[2])
const uint rect_len = r_size[0] * r_size[1];
uint *rect = MEM_mallocN(sizeof(*rect) * rect_len, __func__);
GPU_frontbuffer_read_pixels(0, 0, r_size[0], r_size[1], 4, GPU_DATA_UNSIGNED_BYTE, rect);
GPU_frontbuffer_read_pixels(0, 0, r_size[0], r_size[1], 4, GPU_DATA_UBYTE, rect);
if (setup_context) {
if (wm->windrawable) {