Another attempt for T40981, clipping border does not work with GLSL on

ATIs.

This is actually a test to see if this can be enabled on ATI cards.
According to various sources, newer ATI cards supporting GLSL 3.0
support gl_ClippingDistance in shaders, which is the forward compatible
way to do custom clipping.

This fix will bind 6 additional varying variables on ATIs, which may
lead to some shaders not compiling due to limiting out of those
variables, or to performance degradation. Also I do not have an ATI
handy to test.

Having those in mind, this commit may well be reverted later.

Clipping planes are usually 4 (6 is for cube clipping), but making
shaders depend on viewport state is really bad, and would lead to
recompilation, so I took the worst case here to avoid that.
Hopefully driver does some optimization there.
This commit is contained in:
Antonis Ryakiotakis 2014-07-11 19:17:29 +03:00
parent 1aabbf8476
commit 4097f9c3c4
3 changed files with 60 additions and 21 deletions

View File

@ -58,6 +58,8 @@
# include "BLI_winstuff.h"
#endif
#define MAX_DEFINE_LENGTH 72
/* Extensions support */
/* extensions used:
@ -1187,29 +1189,44 @@ static void shader_print_errors(const char *task, char *log, const char *code)
fprintf(stderr, "%s\n", log);
}
static const char *gpu_shader_standard_extensions(void)
static const char *gpu_shader_version(void)
{
/* need this extensions for high quality bump mapping */
if (GPU_bicubic_bump_support()) {
return "#version 130\n"
"#extension GL_ARB_texture_query_lod: enable\n"
"#define BUMP_BICUBIC\n";
/* turn on glsl 1.30 for bicubic bump mapping and ATI clipping support */
if (GLEW_VERSION_3_0 &&
(GPU_bicubic_bump_support() || GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY)))
{
return "#version 130\n";
}
return "";
}
static const char *gpu_shader_standard_defines(void)
static const char *gpu_shader_standard_extensions(void)
{
/* need this extensions for high quality bump mapping */
if (GPU_bicubic_bump_support())
return "#extension GL_ARB_texture_query_lod: enable\n";
return "";
}
static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH])
{
/* some useful defines to detect GPU type */
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY))
return "#define GPU_ATI\n";
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY)) {
strcat(defines, "#define GPU_ATI\n");
if (GLEW_VERSION_3_0)
strcat(defines, "#define CLIP_WORKAROUND");
}
else if (GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_ANY, GPU_DRIVER_ANY))
return "#define GPU_NVIDIA\n";
strcat(defines, "#define GPU_NVIDIA\n");
else if (GPU_type_matches(GPU_DEVICE_INTEL, GPU_OS_ANY, GPU_DRIVER_ANY))
return "#define GPU_INTEL\n";
return "";
strcat(defines, "#define GPU_INTEL\n");
if (GPU_bicubic_bump_support())
strcat(defines, "#define BUMP_BICUBIC\n");
return;
}
GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const char *libcode, const char *defines)
@ -1218,6 +1235,7 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
GLcharARB log[5000];
GLsizei length = 0;
GPUShader *shader;
char standard_defines[MAX_DEFINE_LENGTH] = "";
if (!GLEW_ARB_vertex_shader || !GLEW_ARB_fragment_shader)
return NULL;
@ -1239,12 +1257,16 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
return NULL;
}
gpu_shader_standard_defines(standard_defines);
if (vertexcode) {
const char *source[4];
const char *source[5];
/* custom limit, may be too small, beware */
int num_source = 0;
source[num_source++] = gpu_shader_version();
source[num_source++] = gpu_shader_standard_extensions();
source[num_source++] = gpu_shader_standard_defines();
source[num_source++] = standard_defines;
if (defines) source[num_source++] = defines;
if (vertexcode) source[num_source++] = vertexcode;
@ -1265,11 +1287,12 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
}
if (fragcode) {
const char *source[5];
const char *source[6];
int num_source = 0;
source[num_source++] = gpu_shader_version();
source[num_source++] = gpu_shader_standard_extensions();
source[num_source++] = gpu_shader_standard_defines();
source[num_source++] = standard_defines;
if (defines) source[num_source++] = defines;
if (libcode) source[num_source++] = libcode;

View File

@ -15,6 +15,10 @@ varying vec4 varying_vertex_color;
varying vec2 varying_texture_coord;
#endif
#ifdef CLIP_WORKAROUND
varying float gl_ClipDistance[6];
#endif
void main()
{
vec4 co = gl_ModelViewMatrix * gl_Vertex;
@ -29,10 +33,14 @@ void main()
gl_Position = gl_ProjectionMatrix * co;
#ifndef GPU_ATI
#ifdef CLIP_WORKAROUND
int i;
for(i = 0; i < 6; i++)
gl_ClipDistance[i] = dot(co, gl_ClipPlane[i]);
#else
// Setting gl_ClipVertex is necessary to get glClipPlane working on NVIDIA
// graphic cards, while on ATI it can cause a software fallback.
gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;
gl_ClipVertex = co;
#endif
#ifdef USE_COLOR

View File

@ -2,6 +2,10 @@
varying vec3 varposition;
varying vec3 varnormal;
#ifdef CLIP_WORKAROUND
varying float gl_ClipDistance[6];
#endif
void main()
{
vec4 co = gl_ModelViewMatrix * gl_Vertex;
@ -10,9 +14,13 @@ void main()
varnormal = normalize(gl_NormalMatrix * gl_Normal);
gl_Position = gl_ProjectionMatrix * co;
#ifndef GPU_ATI
#ifdef CLIP_WORKAROUND
int i;
for(i = 0; i < 6; i++)
gl_ClipDistance[i] = dot(co, gl_ClipPlane[i]);
#else
// Setting gl_ClipVertex is necessary to get glClipPlane working on NVIDIA
// graphic cards, while on ATI it can cause a software fallback.
gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;
gl_ClipVertex = co;
#endif