Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Shader design for multiple vertex types

I'm writing an OpenGL library and I stumbled upon a problem regarding multiple vertex types and vertex shaders. Do I need to write a new vertex/fragment shader for each new vertex type that handles its attributes? Or do I need to write a single vertex/fragment shader that handles all the possible attributes?

These are some basic class "patterns" that I use for the vertex types.

 struct simple_vertex
{
    glm::vec3 position;

    simple_vertex(glm::vec3 pos) {
        position = pos;
    }

    simple_vertex() {
        position = glm::vec3(0, 0, 0);
    }


    static void enable_attributes() {
        glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(simple_vertex),
                              (const GLvoid *) offsetof(simple_vertex, position));
        glEnableVertexAttribArray(0);
    }


    glm::vec3 get_position() const {
        return position;
    }
};

struct colored_vertex {//vertex that holds position and color data

    glm::vec3 position;
    glm::vec3 color;

    colored_vertex(glm::vec3 pos, glm::vec3 c) {
        color = c;
        position = pos;
    }

    colored_vertex() {
        color = glm::vec3(0, 0, 0);
        position = glm::vec3(0, 0, 0);
    }

    static void enable_attributes() {
        glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(colored_vertex),
                              (const GLvoid *) offsetof(colored_vertex, position));
        glEnableVertexAttribArray(0);
        glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(colored_vertex),
                              (const GLvoid *) offsetof(colored_vertex, color));
        glEnableVertexAttribArray(1);

    }

    glm::vec3 get_position() const {
        return position;
    }


};


struct textured_vertex {//vertex that holds position and texture coordinates

    glm::vec3 position;
    glm::vec2 texture_coords;

    textured_vertex(glm::vec3 pos, glm::vec2 text_coords) {
        texture_coords = text_coords;
        position = pos;
    }

    textured_vertex() {
        texture_coords = glm::vec2(0, 0);
        position = glm::vec3(0, 0, 0);
    }

    static void enable_attributes() {
        glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(textured_vertex),
                              (const GLvoid *) offsetof(textured_vertex, position));
        glEnableVertexAttribArray(0);

        glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(textured_vertex),
                              (const GLvoid *) offsetof(textured_vertex, texture_coords));
        glEnableVertexAttribArray(1);

    }

    glm::vec3 get_position() const {
        return position;
    }


};


struct normal_textured_vertex {//vertex that holds position normal and texture coordinates

    glm::vec3 position;
    glm::vec2 texture_coords;
    glm::vec3 normal;

    normal_textured_vertex(glm::vec3 pos, glm::vec2 text_coords, glm::vec3 n) {
        texture_coords = text_coords;
        position = pos;
        normal = n;
    }

    normal_textured_vertex() {
        texture_coords = glm::vec2(0, 0);
        position = glm::vec3(0, 0, 0);
        normal = glm::vec3(0, 0, 0);

    }

    static void enable_attributes() {
        glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(normal_textured_vertex),
                              (const GLvoid *) offsetof(normal_textured_vertex, position));
        glEnableVertexAttribArray(0);

        glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(normal_textured_vertex),
                              (const GLvoid *) offsetof(normal_textured_vertex, texture_coords));
        glEnableVertexAttribArray(1);

        glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(normal_textured_vertex),
                              (const GLvoid *) offsetof(normal_textured_vertex, normal));
        glEnableVertexAttribArray(2);

    }

    glm::vec3 get_position() const {
        return position;
    }


};
like image 212
BufferSpoofer Avatar asked Jan 27 '26 22:01

BufferSpoofer


1 Answers

The correct solution is option 3: don't have lots of meshes with wildly different vertex formats. By "vertex format", I mean the set of attributes (including how they're encoded in buffers) that a mesh provides.

In general, you should settle on a fairly limited set of vertex formats and adjust meshes (offline) to fit within those formats. You might have a format for non-skinned meshes, a format for skinned meshes, a format for GUI objects, maybe a format for particles, and perhaps one or two others.

If you are writing an application that has no control over the form of data it is given and has to work with whatever, even then I would suggest creating innocuous data for attributes not provided by the data. For example, if someone gives you a mesh with positions and UVs but no colors, create color data that is just repeated values of (1.0, 1.0, 1.0, 1.0). Your lighting equation should handle that color just fine. If someone gives you a mesh with positions and colors but no texture coordinates, create UV values that are just 0s (and it should be given a small, white texture to sample from). Etc.

Don't adjust your code to your data; adjust your data to your code.

like image 78
Nicol Bolas Avatar answered Jan 29 '26 12:01

Nicol Bolas