I am trying to implement deferred deferred rendering, but I am stuck on calculating the minimum / maximum depth for each tile. For this, I use a computational shader. This is the full code:
layout (location = 0, rgba32f) uniform image2D u_texture;
layout (location = 1, rgba32f) uniform image2D depthtex;
layout (local_size_x = TILE_SIZE, local_size_y = TILE_SIZE) in;
shared uint min_depth;
shared uint max_depth;
void main(void){
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
ivec2 grid = ivec2(gl_WorkGroupID.xy);
if(gl_LocalInvocationIndex == 0){
min_depth = 0xFFFFFFFF;
max_depth = 0;
}
vec4 color;
vec4 color_tex = imageLoad(depthtex,pos);
float d = color_tex.x;
uint depth = uint(d * 0xFFFFFFFF);
atomicMin(min_depth, depth);
atomicMax(max_depth, depth);
barrier();
color = vec4(float(float(min_depth) / float(0xFFFFFFFF)));
imageStore(u_texture, grid, color);
}
When I try to run the program, it loops through an infinite loop in the compiler compilation. Any idea why this is caused and how to fix it?
EDIT: so I found out that the problem is caused by the glGetProgramiv () function when I try to check the binding status of the compute shader I am doing with this code:
GLint status;
glGetProgramiv(_object, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
std::string msg("Program linking failure: ");
GLint infoLogLength;
glGetProgramiv(_object, GL_INFO_LOG_LENGTH, &infoLogLength);
char* strInfoLog = new char[infoLogLength + 1];
glGetProgramInfoLog(_object, infoLogLength, NULL, strInfoLog);
msg += strInfoLog;
delete[] strInfoLog;
glDeleteProgram(_object); _object = 0;
throw std::runtime_error(msg);
}
However, if I remove atomMin () from my shader, everything works fine.
source
share