Hello world and thanks for taking the time to read this!
I am writing a program in GTK2 / 3 + OpenGL, I have two versions of the running program:
- (a) GTK + 2 + GtkGlext Extention → works great!
- (b) GTK + 3 + LibX11 → works great!
Everything looks great, except that rendering in (a) is much faster than rendering in (b) ... and I didn't understand why. Here are some examples of pieces of code used to create an OpenGL context:
(and)
GdkGLConfig * glconfig = gdk_gl_config_new_by_mode (GDK_GL_MODE_RGBA | GDK_GL_MODE_DEPTH | GDK_GL_MODE_DOUBLE);
GtkWidget * drawing_area = gtk_drawing_area_new ();
gtk_widget_set_gl_capability (drawing_area, glconfig, NULL, TRUE, GDK_GL_RGBA_TYPE);
g_signal_connect (G_OBJECT (drawing_area), "expose-event", G_CALLBACK (on_expose), data);
gboolean on_expose (GtkWidget * widg, GdkEvent * event, gpointer data)
{
GdkGLContext * glcontext = gtk_widget_get_gl_context (widg);
GdkGLDrawable * gldrawable = gtk_widget_get_gl_drawable (widg);
if (gdk_gl_drawable_gl_begin (gldrawable, glcontext))
{
gdk_gl_drawable_swap_buffers (view -> gldrawable);
gdk_gl_drawable_gl_end (view -> gldrawable);
}
return TRUE;
}
(b)
GtkWidget * drawing_area = gtk_drawing_area_new ();
gtk_widget_set_double_buffered (drawing_area, FALSE);
g_signal_connect (G_OBJECT (drawing_area), "realize", G_CALLBACK(on_realize), data);
g_signal_connect (G_OBJECT (drawing_area), "draw", G_CALLBACK(on_expose), data);
GLXContext glcontext;
G_MODULE_EXPORT void on_realize (GtkWidget * widg, gpointer data)
{
GdkWindow * xwin = gtk_widget_get_window (widg);
GLint attr_list[] = {GLX_DOUBLEBUFFER,
GLX_RGBA,
GLX_DEPTH_SIZE, 16,
GLX_RED_SIZE, 8,
GLX_GREEN_SIZE, 8,
GLX_BLUE_SIZE, 8,
None};
XVisualInfo * visualinfo = glXChooseVisual (GDK_WINDOW_XDISPLAY (xwin), gdk_screen_get_number (gdk_window_get_screen (xwin)), attr_list);
glcontext = glXCreateContext (GDK_WINDOW_XDISPLAY (xwin), visualinfo, NULL, TRUE);
xfree (visualinfo);
}
G_MODULE_EXPORT gboolean on_expose (GtkWidget * widg, cairo_t * cr, gpointer data)
{
GdkWindow * win = gtk_widget_get_window (widg);
if (glXMakeCurrent (GDK_WINDOW_XDISPLAY (xwin), GDK_WINDOW_XID (xwin), glcontext))
{
glXSwapBuffers (GDK_WINDOW_XDISPLAY (win), GDK_WINDOW_XID (win));
}
return TRUE;
}
Trying to understand why (a) was faster than (b) I downloaded the sources of the GtkGLext library, read them and found out that the commands were exactly the same as when calling X11. Now my thoughts are either the next line in (b)
gtk_widget_set_double_buffered (drawing_area, FALSE);
, ...
/ OpenGL, , . , ... , :
OpenGL Version : 3.0 Mesa 12.0.3
OpenGL Vendor : nouveau
OpenGL Renderer : Gallium 0.4 on NVCF
OpenGL Shading Version : 1.30
Color Bits (R,G,B,A) : 8, 8, 8, 0
Depth Bits : 24
Stencil Bits : 0
Max. Lights Allowed : 8
Max. Texture Size : 16384
Max. Clipping Planes : 8
Max. Modelview Matrix Stacks : 32
Max. Projection Matrix Stacks : 32
Max. Attribute Stacks : 16
Max. Texture Stacks : 10
Total number of OpenGL Extensions : 227
Extensions list:
N°1 : GL_AMD_conservative_depth
N°2 : GL_AMD_draw_buffers_blend
...
...
, ... :
OpenGL ?
() , !
S.
PS: GtkGLArea GTK3, , , .
[EDIT] OpenGL:
glLoadIdentity ();
glPushMatrix ();
glTranslated (0.0, 0.0, -d);
rotate_camera ();
glClearColor (r,g,b,a);
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDisable (GL_LIGHTING);
int i;
GLfloat * lineVertices;
for (i=0; i<nbds;i++)
{
lineVertices = get_bonds(i);
glPushMatrix();
glLineWidth (1.0);
glEnableClientState (GL_VERTEX_ARRAY);
glVertexPointer (3, GL_FLOAT, 0, lineVertices);
glDrawArrays (GL_LINES, 0, 2);
glDisableClientState (GL_VERTEX_ARRAY);
glPopMatrix();
}
glEnable (GL_LIGHTING);
[/EDIT]