I am trying to use SDL2 to load textures to render OpenGL Wavefront objects (I am currently testing with a fixed pipeline, but will ultimately plan to switch to shaders). The problem is that the loaded texture applied to the quad (and the model that uses a small part in the lower right corner of the texture) looks like this:

(source: image-upload.de )
This is the texture that I used
The image loads perfectly and looks completely normal when displayed using the SDL functions, so the conversion to OGL texture is probably interrupted. Please note that I have alpha blending turned on and the texture is still completely opaque - so the values are not completely random and are probably uninitialized in memory. This is my surface conversion code (compiled from various lessons and questions on this site here):
GLuint glMakeTexture(bool mipmap = false, int request_size = 0) { // Only works on 32 Bit Surfaces GLuint texture = 0; if ((bool)_surface) { int w,h; if (request_size) { // NPOT and rectangular textures are widely supported since at least a decade now; you should never need this... w = h = request_size; if (w<_surface->w || h<_surface->h) return 0; // No can do. } else { w = _surface->w; h = _surface->h; } SDL_LockSurface(&*_surface); std::cout<<"Bits: "<<(int)_surface->format->BytesPerPixel<<std::endl; Uint8 *temp = (Uint8*)malloc(w*h*sizeof(Uint32)); // Yes, I know it 4... if (!temp) return 0; // Optimized code /*for (int y = 0; y<h; y++) { // Pitch is given in bytes, so we need to cast to 8 bit here! memcpy(temp+y*w*sizeof(Uint32),(Uint8*)_surface->pixels+y*_surface->pitch,_surface->w*sizeof(Uint32)); if (w>_surface->w) memset(temp+y*w*sizeof(Uint32)+_surface->w,0,(w-_surface->w)*sizeof(Uint32)); } for (int y = _surface->h; y<h; y++) memset(temp+y*w*sizeof(Uint32),0,w*sizeof(Uint32)); GLenum format = (_surface->format->Rmask==0xFF)?GL_RGBA:GL_BGRA;*/ // Naive code for testing for (int y = 0; y<_surface->h; y++) for (int x = 0; x<_surface->w; x++) { int mempos = (x+y*w)*4; SDL_Color pcol = get_pixel(x,y); temp[mempos] = pcol.r; temp[mempos+1] = pcol.g; temp[mempos+2] = pcol.b; temp[mempos+3] = pcol.a; } GLenum format = GL_RGBA; SDL_UnlockSurface(&*_surface); glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); if (mipmap) glTexParameteri(texture, GL_GENERATE_MIPMAP, GL_TRUE); glTexImage2D(GL_TEXTURE_2D, 0, format, w, h, 0, format, GL_UNSIGNED_BYTE, temp); if (mipmap) glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); else glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); free(temp); // Always clean up... } return texture; }
UPDATE: _surface is actually std :: shared_ptr for SDL_Surface. So & * when (not) blocks it.
By the way, SDL claims that the surface is formatted as 32-bit RGBA on my machine, I already checked it.
The code that ties the texture and draws a quad: here:
glEnable(GL_TEXTURE_2D); glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glBindTexture(GL_TEXTURE_2D,_texture[MAP_KD]); static bool once = true; if (once) { int tex; glGetIntegerv(GL_TEXTURE_BINDING_2D, &tex); bool valid = glIsTexture(tex); std::cout<<tex<<" "<<valid<<std::endl; once = false; } glBegin(GL_TRIANGLE_STRIP);
The ax is pulled later from the list of indices; the code is too long to share here (and, moreover, it works great except for texture).
I also tried the naive method, which can be found in many manuals on passing _surface-> pixels to glTexImage2D (), but that doesn’t help either (and I heard that it’s wrong anyway), because pitch! = Width * BytesPerPixel as a whole). By the way, the "optimized" code looks exactly the same, by the way (as expected). I wrote the bottom for better testing. Setting all pixels to a specific color or creating a partially transparent texture works as expected, so I assume that OpenGL loads the values correctly in temp. This is probably my understanding of the memory structure in SDL2 Surfaces, which has gone bad.
FINAL EDITING: Solution (Peter Clark's reset, GL_UNPACK_ROW_LENGTH is the key):
GLuint glTexture(bool mipmap = false) { GLuint texture = 0; if ((bool)_surface) { GLenum texture_format, internal_format, tex_type; if (_surface->format->BytesPerPixel == 4) { if (_surface->format->Rmask == 0x000000ff) { texture_format = GL_RGBA; tex_type = GL_UNSIGNED_INT_8_8_8_8_REV; } else { texture_format = GL_BGRA; tex_type = GL_UNSIGNED_INT_8_8_8_8; } internal_format = GL_RGBA8; } else { if (_surface->format->Rmask == 0x000000ff) { texture_format = GL_RGB; tex_type = GL_UNSIGNED_BYTE; } else { texture_format = GL_BGR; tex_type = GL_UNSIGNED_BYTE; } internal_format = GL_RGB8; } int alignment = 8; while (_surface->pitch%alignment) alignment>>=1; // x%1==0 for any x glPixelStorei(GL_UNPACK_ALIGNMENT,alignment); int expected_pitch = (_surface->w*_surface->format->BytesPerPixel+alignment-1)/alignment*alignment; if (_surface->pitch-expected_pitch>=alignment) // Alignment alone wont't solve it now glPixelStorei(GL_UNPACK_ROW_LENGTH,_surface->pitch/_surface->format->BytesPerPixel); else glPixelStorei(GL_UNPACK_ROW_LENGTH,0); glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, internal_format, _surface->w, _surface->h, 0, texture_format, tex_type, _surface->pixels); if (mipmap) { glGenerateMipmap(GL_TEXTURE_2D); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); } else { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); } glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glPixelStorei(GL_UNPACK_ALIGNMENT,4); glPixelStorei(GL_UNPACK_ROW_LENGTH,0); } return texture; }