/* * Cogl * * An object oriented GL/GLES Abstraction/Utility Layer * * Copyright (C) 2007,2008,2009 Intel Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see . * * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "cogl.h" #include "cogl-debug.h" #include "cogl-internal.h" #include "cogl-context.h" #include "cogl-journal-private.h" #include "cogl-texture-private.h" #include "cogl-pipeline-private.h" #include "cogl-pipeline-opengl-private.h" #include "cogl-vertex-buffer-private.h" #include "cogl-framebuffer-private.h" #include "cogl-profile.h" #include "cogl-vertex-attribute-private.h" #include #include #include /* XXX NB: * The data logged in logged_vertices is formatted as follows: * * Per entry: * 4 RGBA GLubytes for the color * 2 floats for the top left position * 2 * n_layers floats for the top left texture coordinates * 2 floats for the bottom right position * 2 * n_layers floats for the bottom right texture coordinates */ #define GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS(N_LAYERS) \ (N_LAYERS * 2 + 2) /* XXX NB: * Once in the vertex array, the journal's vertex data is arranged as follows: * 4 vertices per quad: * 2 or 3 GLfloats per position (3 when doing software transforms) * 4 RGBA GLubytes, * 2 GLfloats per tex coord * n_layers * * Where n_layers corresponds to the number of pipeline layers enabled * * To avoid frequent changes in the stride of our vertex data we always pad * n_layers to be >= 2 * * There will be four vertices per quad in the vertex array * * When we are transforming quads in software we need to also track the z * coordinate of transformed vertices. * * So for a given number of layers this gets the stride in 32bit words: */ #define SW_TRANSFORM (!(cogl_debug_flags & \ COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM)) #define POS_STRIDE (SW_TRANSFORM ? 3 : 2) /* number of 32bit words */ #define N_POS_COMPONENTS POS_STRIDE #define COLOR_STRIDE 1 /* number of 32bit words */ #define TEX_STRIDE 2 /* number of 32bit words */ #define MIN_LAYER_PADING 2 #define GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS(N_LAYERS) \ (POS_STRIDE + COLOR_STRIDE + \ TEX_STRIDE * (N_LAYERS < MIN_LAYER_PADING ? MIN_LAYER_PADING : N_LAYERS)) /* If a batch is longer than this threshold then we'll assume it's not worth doing software clipping and it's cheaper to program the GPU to do the clip */ #define COGL_JOURNAL_HARDWARE_CLIP_THRESHOLD 8 typedef struct _CoglJournalFlushState { CoglVertexArray *vertex_array; GArray *attributes; int current_attribute; gsize stride; size_t array_offset; GLuint current_vertex; #ifndef HAVE_COGL_GL CoglIndices *indices; gsize indices_type_size; #endif CoglMatrixStack *modelview_stack; CoglMatrixStack *projection_stack; CoglPipeline *source; } CoglJournalFlushState; typedef void (*CoglJournalBatchCallback) (CoglJournalEntry *start, int n_entries, void *data); typedef gboolean (*CoglJournalBatchTest) (CoglJournalEntry *entry0, CoglJournalEntry *entry1); static void _cogl_journal_dump_logged_quad (guint8 *data, int n_layers) { gsize stride = GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS (n_layers); int i; _COGL_GET_CONTEXT (ctx, NO_RETVAL); g_print ("n_layers = %d; rgba=0x%02X%02X%02X%02X\n", n_layers, data[0], data[1], data[2], data[3]); data += 4; for (i = 0; i < 2; i++) { float *v = (float *)data + (i * stride); int j; g_print ("v%d: x = %f, y = %f", i, v[0], v[1]); for (j = 0; j < n_layers; j++) { float *t = v + 2 + TEX_STRIDE * j; g_print (", tx%d = %f, ty%d = %f", j, t[0], j, t[1]); } g_print ("\n"); } } static void _cogl_journal_dump_quad_vertices (guint8 *data, int n_layers) { gsize stride = GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS (n_layers); int i; _COGL_GET_CONTEXT (ctx, NO_RETVAL); g_print ("n_layers = %d; stride = %d; pos stride = %d; color stride = %d; " "tex stride = %d; stride in bytes = %d\n", n_layers, (int)stride, POS_STRIDE, COLOR_STRIDE, TEX_STRIDE, (int)stride * 4); for (i = 0; i < 4; i++) { float *v = (float *)data + (i * stride); guint8 *c = data + (POS_STRIDE * 4) + (i * stride * 4); int j; if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM)) g_print ("v%d: x = %f, y = %f, rgba=0x%02X%02X%02X%02X", i, v[0], v[1], c[0], c[1], c[2], c[3]); else g_print ("v%d: x = %f, y = %f, z = %f, rgba=0x%02X%02X%02X%02X", i, v[0], v[1], v[2], c[0], c[1], c[2], c[3]); for (j = 0; j < n_layers; j++) { float *t = v + POS_STRIDE + COLOR_STRIDE + TEX_STRIDE * j; g_print (", tx%d = %f, ty%d = %f", j, t[0], j, t[1]); } g_print ("\n"); } } static void _cogl_journal_dump_quad_batch (guint8 *data, int n_layers, int n_quads) { gsize byte_stride = GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS (n_layers) * 4; int i; g_print ("_cogl_journal_dump_quad_batch: n_layers = %d, n_quads = %d\n", n_layers, n_quads); for (i = 0; i < n_quads; i++) _cogl_journal_dump_quad_vertices (data + byte_stride * 2 * i, n_layers); } static void batch_and_call (CoglJournalEntry *entries, int n_entries, CoglJournalBatchTest can_batch_callback, CoglJournalBatchCallback batch_callback, void *data) { int i; int batch_len = 1; CoglJournalEntry *batch_start = entries; if (n_entries < 1) return; for (i = 1; i < n_entries; i++) { CoglJournalEntry *entry0 = &entries[i - 1]; CoglJournalEntry *entry1 = entry0 + 1; if (can_batch_callback (entry0, entry1)) { batch_len++; continue; } batch_callback (batch_start, batch_len, data); batch_start = entry1; batch_len = 1; } /* The last batch... */ batch_callback (batch_start, batch_len, data); } static void _cogl_journal_flush_modelview_and_entries (CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; CoglVertexAttribute **attributes; COGL_STATIC_TIMER (time_flush_modelview_and_entries, "flush: pipeline+entries", /* parent */ "flush: modelview+entries", "The time spent flushing modelview + entries", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_flush_modelview_and_entries); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_BATCHING)) g_print ("BATCHING: modelview batch len = %d\n", batch_len); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM)) { _cogl_matrix_stack_set (state->modelview_stack, &batch_start->model_view); _cogl_matrix_stack_flush_to_gl (state->modelview_stack, COGL_MATRIX_MODELVIEW); } attributes = (CoglVertexAttribute **)state->attributes->data; cogl_push_source (state->source); #ifdef HAVE_COGL_GL /* XXX: it's rather evil that we sneak in the GL_QUADS enum here... */ _cogl_draw_vertex_attributes_array (GL_QUADS, state->current_vertex, batch_len * 4, attributes); #else /* HAVE_COGL_GL */ if (batch_len > 1) { _cogl_draw_indexed_vertex_attributes_array (COGL_VERTICES_MODE_TRIANGLES, state->current_vertex * 6 / 4, batch_len * 6, state->indices, attributes); } else { _cogl_draw_vertex_attributes_array (COGL_VERTICES_MODE_TRIANGLE_FAN, state->current_vertex, 4, attributes); } #endif /* DEBUGGING CODE XXX: This path will cause all rectangles to be * drawn with a coloured outline. Each batch will be rendered with * the same color. This may e.g. help with debugging texture slicing * issues, visually seeing what is batched and debugging blending * issues, plus it looks quite cool. */ if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_RECTANGLES)) { static CoglPipeline *outline = NULL; guint8 color_intensity; int i; CoglVertexAttribute *loop_attributes[2]; _COGL_GET_CONTEXT (ctxt, NO_RETVAL); if (outline == NULL) outline = cogl_pipeline_new (); /* The least significant three bits represent the three components so that the order of colours goes red, green, yellow, blue, magenta, cyan. Black and white are skipped. The next two bits give four scales of intensity for those colours in the order 0xff, 0xcc, 0x99, and 0x66. This gives a total of 24 colours. If there are more than 24 batches on the stage then it will wrap around */ color_intensity = 0xff - 0x33 * (ctxt->journal_rectangles_color >> 3); cogl_pipeline_set_color4ub (outline, (ctxt->journal_rectangles_color & 1) ? color_intensity : 0, (ctxt->journal_rectangles_color & 2) ? color_intensity : 0, (ctxt->journal_rectangles_color & 4) ? color_intensity : 0, 0xff); cogl_set_source (outline); loop_attributes[0] = attributes[0]; /* we just want the position */ loop_attributes[1] = NULL; for (i = 0; i < batch_len; i++) _cogl_draw_vertex_attributes_array (COGL_VERTICES_MODE_LINE_LOOP, 4 * i + state->current_vertex, 4, loop_attributes); /* Go to the next color */ do ctxt->journal_rectangles_color = ((ctxt->journal_rectangles_color + 1) & ((1 << 5) - 1)); /* We don't want to use black or white */ while ((ctxt->journal_rectangles_color & 0x07) == 0 || (ctxt->journal_rectangles_color & 0x07) == 0x07); } state->current_vertex += (4 * batch_len); cogl_pop_source (); COGL_TIMER_STOP (_cogl_uprof_context, time_flush_modelview_and_entries); } static gboolean compare_entry_modelviews (CoglJournalEntry *entry0, CoglJournalEntry *entry1) { /* Batch together quads with the same model view matrix */ /* FIXME: this is nasty, there are much nicer ways to track this * (at the add_quad_vertices level) without resorting to a memcmp! * * E.g. If the cogl-current-matrix code maintained an "age" for * the modelview matrix we could simply check in add_quad_vertices * if the age has increased, and if so record the change as a * boolean in the journal. */ if (memcmp (&entry0->model_view, &entry1->model_view, sizeof (GLfloat) * 16) == 0) return TRUE; else return FALSE; } /* At this point we have a run of quads that we know have compatible * pipelines, but they may not all have the same modelview matrix */ static void _cogl_journal_flush_pipeline_and_entries (CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; COGL_STATIC_TIMER (time_flush_pipeline_entries, "flush: texcoords+pipeline+entries", /* parent */ "flush: pipeline+entries", "The time spent flushing pipeline + entries", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_flush_pipeline_entries); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_BATCHING)) g_print ("BATCHING: pipeline batch len = %d\n", batch_len); state->source = batch_start->pipeline; /* If we haven't transformed the quads in software then we need to also break * up batches according to changes in the modelview matrix... */ if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM)) { batch_and_call (batch_start, batch_len, compare_entry_modelviews, _cogl_journal_flush_modelview_and_entries, data); } else _cogl_journal_flush_modelview_and_entries (batch_start, batch_len, data); COGL_TIMER_STOP (_cogl_uprof_context, time_flush_pipeline_entries); } static gboolean compare_entry_pipelines (CoglJournalEntry *entry0, CoglJournalEntry *entry1) { /* batch rectangles using compatible pipelines */ /* XXX: _cogl_pipeline_equal may give false negatives since it avoids * deep comparisons as an optimization. It aims to compare enough so * that we that we are able to batch the 90% common cases, but may not * look at less common differences. */ if (_cogl_pipeline_equal (entry0->pipeline, entry1->pipeline, TRUE)) return TRUE; else return FALSE; } /* Since the stride may not reflect the number of texture layers in use * (due to padding) we deal with texture coordinate offsets separately * from vertex and color offsets... */ static void _cogl_journal_flush_texcoord_vbo_offsets_and_entries ( CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; int i; COGL_STATIC_TIMER (time_flush_texcoord_pipeline_entries, "flush: vbo+texcoords+pipeline+entries", /* parent */ "flush: texcoords+pipeline+entries", "The time spent flushing texcoord offsets + pipeline " "+ entries", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_flush_texcoord_pipeline_entries); /* NB: attributes 0 and 1 are position and color */ for (i = 2; i < state->attributes->len; i++) cogl_object_unref (g_array_index (state->attributes, CoglVertexAttribute *, i)); g_array_set_size (state->attributes, batch_start->n_layers + 2); for (i = 0; i < batch_start->n_layers; i++) { CoglVertexAttribute **attribute_entry = &g_array_index (state->attributes, CoglVertexAttribute *, i + 2); const char *names[] = { "cogl_tex_coord0_in", "cogl_tex_coord1_in", "cogl_tex_coord2_in", "cogl_tex_coord3_in", "cogl_tex_coord4_in", "cogl_tex_coord5_in", "cogl_tex_coord6_in", "cogl_tex_coord7_in" }; char *name; /* XXX NB: * Our journal's vertex data is arranged as follows: * 4 vertices per quad: * 2 or 3 floats per position (3 when doing software transforms) * 4 RGBA bytes, * 2 floats per tex coord * n_layers * (though n_layers may be padded; see definition of * GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS for details) */ name = i < 8 ? (char *)names[i] : g_strdup_printf ("cogl_tex_coord%d_in", i); /* XXX: it may be worth having some form of static initializer for * attributes... */ *attribute_entry = cogl_vertex_attribute_new (state->vertex_array, name, state->stride, state->array_offset + (POS_STRIDE + COLOR_STRIDE) * 4 + TEX_STRIDE * 4 * i, 2, COGL_VERTEX_ATTRIBUTE_TYPE_FLOAT); if (i >= 8) g_free (name); } batch_and_call (batch_start, batch_len, compare_entry_pipelines, _cogl_journal_flush_pipeline_and_entries, data); COGL_TIMER_STOP (_cogl_uprof_context, time_flush_texcoord_pipeline_entries); } static gboolean compare_entry_n_layers (CoglJournalEntry *entry0, CoglJournalEntry *entry1) { if (entry0->n_layers == entry1->n_layers) return TRUE; else return FALSE; } /* At this point we know the stride has changed from the previous batch * of journal entries */ static void _cogl_journal_flush_vbo_offsets_and_entries (CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; gsize stride; int i; CoglVertexAttribute **attribute_entry; COGL_STATIC_TIMER (time_flush_vbo_texcoord_pipeline_entries, "flush: clip+vbo+texcoords+pipeline+entries", /* parent */ "flush: vbo+texcoords+pipeline+entries", "The time spent flushing vbo + texcoord offsets + " "pipeline + entries", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_flush_vbo_texcoord_pipeline_entries); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_BATCHING)) g_print ("BATCHING: vbo offset batch len = %d\n", batch_len); /* XXX NB: * Our journal's vertex data is arranged as follows: * 4 vertices per quad: * 2 or 3 GLfloats per position (3 when doing software transforms) * 4 RGBA GLubytes, * 2 GLfloats per tex coord * n_layers * (though n_layers may be padded; see definition of * GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS for details) */ stride = GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS (batch_start->n_layers); stride *= sizeof (float); state->stride = stride; for (i = 0; i < state->attributes->len; i++) cogl_object_unref (g_array_index (state->attributes, CoglVertexAttribute *, i)); g_array_set_size (state->attributes, 2); attribute_entry = &g_array_index (state->attributes, CoglVertexAttribute *, 0); *attribute_entry = cogl_vertex_attribute_new (state->vertex_array, "cogl_position_in", stride, state->array_offset, N_POS_COMPONENTS, COGL_VERTEX_ATTRIBUTE_TYPE_FLOAT); attribute_entry = &g_array_index (state->attributes, CoglVertexAttribute *, 1); *attribute_entry = cogl_vertex_attribute_new (state->vertex_array, "cogl_color_in", stride, state->array_offset + (POS_STRIDE * 4), 4, COGL_VERTEX_ATTRIBUTE_TYPE_UNSIGNED_BYTE); #ifndef HAVE_COGL_GL state->indices = cogl_get_rectangle_indices (batch_len); #endif /* We only create new VertexAttributes when the stride within the * VertexArray changes. (due to a change in the number of pipeline layers) * While the stride remains constant we walk forward through the above * VertexArray using a vertex offset passed to cogl_draw_vertex_attributes */ state->current_vertex = 0; if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_JOURNAL)) { guint8 *verts; /* Mapping a buffer for read is probably a really bad thing to do but this will only happen during debugging so it probably doesn't matter */ verts = (cogl_buffer_map (COGL_BUFFER (state->vertex_array), COGL_BUFFER_ACCESS_READ, 0) + state->array_offset); _cogl_journal_dump_quad_batch (verts, batch_start->n_layers, batch_len); cogl_buffer_unmap (COGL_BUFFER (state->vertex_array)); } batch_and_call (batch_start, batch_len, compare_entry_n_layers, _cogl_journal_flush_texcoord_vbo_offsets_and_entries, data); /* progress forward through the VBO containing all our vertices */ state->array_offset += (stride * 4 * batch_len); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_JOURNAL)) g_print ("new vbo offset = %lu\n", (unsigned long)state->array_offset); COGL_TIMER_STOP (_cogl_uprof_context, time_flush_vbo_texcoord_pipeline_entries); } static gboolean compare_entry_strides (CoglJournalEntry *entry0, CoglJournalEntry *entry1) { /* Currently the only thing that affects the stride for our vertex arrays * is the number of pipeline layers. We need to update our VBO offsets * whenever the stride changes. */ /* TODO: We should be padding the n_layers == 1 case as if it were * n_layers == 2 so we can reduce the need to split batches. */ if (entry0->n_layers == entry1->n_layers || (entry0->n_layers <= MIN_LAYER_PADING && entry1->n_layers <= MIN_LAYER_PADING)) return TRUE; else return FALSE; } /* At this point we know the batch has a unique clip stack */ static void _cogl_journal_flush_clip_stacks_and_entries (CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; COGL_STATIC_TIMER (time_flush_clip_stack_pipeline_entries, "Journal Flush", /* parent */ "flush: clip+vbo+texcoords+pipeline+entries", "The time spent flushing clip + vbo + texcoord offsets + " "pipeline + entries", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_flush_clip_stack_pipeline_entries); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_BATCHING)) g_print ("BATCHING: clip stack batch len = %d\n", batch_len); _cogl_clip_stack_flush (batch_start->clip_stack); _cogl_matrix_stack_push (state->modelview_stack); /* If we have transformed all our quads at log time then we ensure * no further model transform is applied by loading the identity * matrix here. We need to do this after flushing the clip stack * because the clip stack flushing code can modify the matrix */ if (G_LIKELY (!(cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM))) { _cogl_matrix_stack_load_identity (state->modelview_stack); _cogl_matrix_stack_flush_to_gl (state->modelview_stack, COGL_MATRIX_MODELVIEW); } /* Setting up the clip state can sometimes also flush the projection matrix so we should flush it again. This will be a no-op if the clip code didn't modify the projection */ _cogl_matrix_stack_flush_to_gl (state->projection_stack, COGL_MATRIX_PROJECTION); batch_and_call (batch_start, batch_len, compare_entry_strides, _cogl_journal_flush_vbo_offsets_and_entries, /* callback */ data); _cogl_matrix_stack_pop (state->modelview_stack); COGL_TIMER_STOP (_cogl_uprof_context, time_flush_clip_stack_pipeline_entries); } static gboolean calculate_translation (const CoglMatrix *a, const CoglMatrix *b, float *tx_p, float *ty_p) { float tx, ty; int x, y; /* Assuming we had the original matrix in this form: * * [ a₁₁, a₁₂, a₁₃, a₁₄ ] * [ a₂₁, a₂₂, a₂₃, a₂₄ ] * a = [ a₃₁, a₃₂, a₃₃, a₃₄ ] * [ a₄₁, a₄₂, a₄₃, a₄₄ ] * * then a translation of that matrix would be a multiplication by a * matrix of this form: * * [ 1, 0, 0, x ] * [ 0, 1, 0, y ] * t = [ 0, 0, 1, 0 ] * [ 0, 0, 0, 1 ] * * That would give us a matrix of this form. * * [ a₁₁, a₁₂, a₁₃, a₁₁ x + a₁₂ y + a₁₄ ] * [ a₂₁, a₂₂, a₂₃, a₂₁ x + a₂₂ y + a₂₄ ] * b = a ⋅ t = [ a₃₁, a₃₂, a₃₃, a₃₁ x + a₃₂ y + a₃₄ ] * [ a₄₁, a₄₂, a₄₃, a₄₁ x + a₄₂ y + a₄₄ ] * * We can use the two equations from the top left of the matrix to * work out the x and y translation given the two matrices: * * b₁₄ = a₁₁x + a₁₂y + a₁₄ * b₂₄ = a₂₁x + a₂₂y + a₂₄ * * Rearranging gives us: * * a₁₂ b₂₄ - a₂₄ a₁₂ * ----------------- + a₁₄ - b₁₄ * a₂₂ * x = --------------------------------- * a₁₂ a₂₁ * ------- - a₁₁ * a₂₂ * * b₂₄ - a₂₁x - a₂₄ * y = ---------------- * a₂₂ * * Once we've worked out what x and y would be if this was a valid * translation then we can simply verify that the rest of the matrix * matches up. */ /* The leftmost 3x4 part of the matrix shouldn't change by a translation so we can just compare it directly */ for (y = 0; y < 4; y++) for (x = 0; x < 3; x++) if ((&a->xx)[x * 4 + y] != (&b->xx)[x * 4 + y]) return FALSE; tx = (((a->xy * b->yw - a->yw * a->xy) / a->yy + a->xw - b->xw) / ((a->xy * a->yx) / a->yy - a->xx)); ty = (b->yw - a->yx * tx - a->yw) / a->yy; #define APPROX_EQUAL(a, b) (fabsf ((a) - (b)) < 1e-6f) /* Check whether the 4th column of the matrices match up to the calculation */ if (!APPROX_EQUAL (b->xw, a->xx * tx + a->xy * ty + a->xw) || !APPROX_EQUAL (b->yw, a->yx * tx + a->yy * ty + a->yw) || !APPROX_EQUAL (b->zw, a->zx * tx + a->zy * ty + a->zw) || !APPROX_EQUAL (b->ww, a->wx * tx + a->wy * ty + a->ww)) return FALSE; #undef APPROX_EQUAL *tx_p = tx; *ty_p = ty; return TRUE; } typedef struct { float x_1, y_1; float x_2, y_2; } ClipBounds; static void check_software_clip_for_batch (CoglJournalEntry *batch_start, int batch_len, CoglJournalFlushState *state) { CoglClipStack *clip_stack, *clip_entry; int entry_num; _COGL_GET_CONTEXT (ctx, NO_RETVAL); /* This tries to find cases where the entry is logged with a clip but it would be faster to modify the vertex and texture coordinates rather than flush the clip so that it can batch better */ /* If the batch is reasonably long then it's worthwhile programming the GPU to do the clip */ if (batch_len >= COGL_JOURNAL_HARDWARE_CLIP_THRESHOLD) return; clip_stack = batch_start->clip_stack; if (clip_stack == NULL) return; /* Verify that all of the clip stack entries are a simple rectangle clip */ for (clip_entry = clip_stack; clip_entry; clip_entry = clip_entry->parent) if (clip_entry->type != COGL_CLIP_STACK_RECT) return; /* This scratch buffer is used to store the translation for each entry in the journal. We store it in a separate buffer because it's expensive to calculate but at this point we still don't know whether we can clip all of the entries so we don't want to do the rest of the dependant calculations until we're sure we can. */ if (ctx->journal_clip_bounds == NULL) ctx->journal_clip_bounds = g_array_new (FALSE, FALSE, sizeof (ClipBounds)); g_array_set_size (ctx->journal_clip_bounds, batch_len); for (entry_num = 0; entry_num < batch_len; entry_num++) { CoglJournalEntry *journal_entry = batch_start + entry_num; CoglPipeline *pipeline = journal_entry->pipeline; ClipBounds *clip_bounds = &g_array_index (ctx->journal_clip_bounds, ClipBounds, entry_num); int layer_num; clip_bounds->x_1 = -G_MAXFLOAT; clip_bounds->y_1 = -G_MAXFLOAT; clip_bounds->x_2 = G_MAXFLOAT; clip_bounds->y_2 = G_MAXFLOAT; /* Check the pipeline is usable. We can short-cut here for entries using the same pipeline as the previous entry */ if (entry_num == 0 || pipeline != batch_start[entry_num - 1].pipeline) { /* If the pipeline has a user program then we can't reliably modify the texture coordinates */ if (cogl_pipeline_get_user_program (pipeline)) return; /* If any of the pipeline layers have a texture matrix then we can't reliably modify the texture coordinates */ for (layer_num = cogl_pipeline_get_n_layers (pipeline) - 1; layer_num >= 0; layer_num--) if (_cogl_pipeline_layer_has_user_matrix (pipeline, layer_num)) return; } /* Now we need to verify that each clip entry's matrix is just a translation of the journal entry's modelview matrix. We can also work out the bounds of the clip in modelview space using this translation */ for (clip_entry = clip_stack; clip_entry; clip_entry = clip_entry->parent) { float rect_x1, rect_y1, rect_x2, rect_y2; CoglClipStackRect *clip_rect; float tx, ty; clip_rect = (CoglClipStackRect *) clip_entry; if (!calculate_translation (&clip_rect->matrix, &journal_entry->model_view, &tx, &ty)) return; if (clip_rect->x0 < clip_rect->x1) { rect_x1 = clip_rect->x0; rect_x2 = clip_rect->x1; } else { rect_x1 = clip_rect->x1; rect_x2 = clip_rect->x0; } if (clip_rect->y0 < clip_rect->y1) { rect_y1 = clip_rect->y0; rect_y2 = clip_rect->y1; } else { rect_y1 = clip_rect->y1; rect_y2 = clip_rect->y0; } clip_bounds->x_1 = MAX (clip_bounds->x_1, rect_x1 - tx); clip_bounds->y_1 = MAX (clip_bounds->y_1, rect_y1 - ty); clip_bounds->x_2 = MIN (clip_bounds->x_2, rect_x2 - tx); clip_bounds->y_2 = MIN (clip_bounds->y_2, rect_y2 - ty); } } /* If we make it here then we know we can software clip the entire batch */ for (entry_num = 0; entry_num < batch_len; entry_num++) { CoglJournalEntry *journal_entry = batch_start + entry_num; float *verts = &g_array_index (ctx->logged_vertices, float, journal_entry->array_offset + 1); ClipBounds *clip_bounds = &g_array_index (ctx->journal_clip_bounds, ClipBounds, entry_num); size_t stride = GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS (journal_entry->n_layers); float rx1, ry1, rx2, ry2; float vx1, vy1, vx2, vy2; int layer_num; /* Remove the clip on the entry */ _cogl_clip_stack_unref (journal_entry->clip_stack); journal_entry->clip_stack = NULL; vx1 = verts[0]; vy1 = verts[1]; vx2 = verts[stride]; vy2 = verts[stride + 1]; if (vx1 < vx2) { rx1 = vx1; rx2 = vx2; } else { rx1 = vx2; rx2 = vx1; } if (vy1 < vy2) { ry1 = vy1; ry2 = vy2; } else { ry1 = vy2; ry2 = vy1; } rx1 = CLAMP (rx1, clip_bounds->x_1, clip_bounds->x_2); ry1 = CLAMP (ry1, clip_bounds->y_1, clip_bounds->y_2); rx2 = CLAMP (rx2, clip_bounds->x_1, clip_bounds->x_2); ry2 = CLAMP (ry2, clip_bounds->y_1, clip_bounds->y_2); /* Check if the rectangle intersects the clip at all */ if (rx1 == rx2 || ry1 == ry2) /* Will set all of the vertex data to 0 in the hope that this will create a degenerate rectangle and the GL driver will be able to clip it quickly */ memset (verts, 0, sizeof (float) * stride * 2); else { if (vx1 > vx2) { float t = rx1; rx1 = rx2; rx2 = t; } if (vy1 > vy2) { float t = ry1; ry1 = ry2; ry2 = t; } verts[0] = rx1; verts[1] = ry1; verts[stride] = rx2; verts[stride + 1] = ry2; /* Convert the rectangle coordinates to a fraction of the original rectangle */ rx1 = (rx1 - vx1) / (vx2 - vx1); ry1 = (ry1 - vy1) / (vy2 - vy1); rx2 = (rx2 - vx1) / (vx2 - vx1); ry2 = (ry2 - vy1) / (vy2 - vy1); for (layer_num = 0; layer_num < journal_entry->n_layers; layer_num++) { float *t = verts + 2 + 2 * layer_num; float tx1 = t[0], ty1 = t[1]; float tx2 = t[stride], ty2 = t[stride + 1]; t[0] = rx1 * (tx2 - tx1) + tx1; t[1] = ry1 * (ty2 - ty1) + ty1; t[stride] = rx2 * (tx2 - tx1) + tx1; t[stride + 1] = ry2 * (ty2 - ty1) + ty1; } } } return; } static void _cogl_journal_check_software_clip (CoglJournalEntry *batch_start, int batch_len, void *data) { CoglJournalFlushState *state = data; COGL_STATIC_TIMER (time_check_software_clip, "Journal Flush", /* parent */ "flush: check software clip", "Time spent checking for software clip", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, time_check_software_clip); check_software_clip_for_batch (batch_start, batch_len, state); COGL_TIMER_STOP (_cogl_uprof_context, time_check_software_clip); } static gboolean compare_entry_clip_stacks (CoglJournalEntry *entry0, CoglJournalEntry *entry1) { return entry0->clip_stack == entry1->clip_stack; } static CoglVertexArray * upload_vertices (const CoglJournalEntry *entries, int n_entries, size_t needed_vbo_len, GArray *vertices) { CoglVertexArray *array; CoglBuffer *buffer; const float *vin; float *vout; int entry_num; int i; g_assert (needed_vbo_len); array = cogl_vertex_array_new (needed_vbo_len * 4, NULL); buffer = COGL_BUFFER (array); cogl_buffer_set_update_hint (buffer, COGL_BUFFER_UPDATE_HINT_STATIC); vout = cogl_buffer_map (buffer, COGL_BUFFER_ACCESS_WRITE, COGL_BUFFER_MAP_HINT_DISCARD); vin = &g_array_index (vertices, float, 0); /* Expand the number of vertices from 2 to 4 while uploading */ for (entry_num = 0; entry_num < n_entries; entry_num++) { const CoglJournalEntry *entry = entries + entry_num; size_t vb_stride = GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS (entry->n_layers); size_t array_stride = GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS (entry->n_layers); /* Copy the color to all four of the vertices */ for (i = 0; i < 4; i++) memcpy (vout + vb_stride * i + POS_STRIDE, vin, 4); vin++; if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_TRANSFORM)) { vout[vb_stride * 0] = vin[0]; vout[vb_stride * 0 + 1] = vin[1]; vout[vb_stride * 1] = vin[0]; vout[vb_stride * 1 + 1] = vin[array_stride + 1]; vout[vb_stride * 2] = vin[array_stride]; vout[vb_stride * 2 + 1] = vin[array_stride + 1]; vout[vb_stride * 3] = vin[array_stride]; vout[vb_stride * 3 + 1] = vin[1]; } else { float v[8]; v[0] = vin[0]; v[1] = vin[1]; v[2] = vin[0]; v[3] = vin[array_stride + 1]; v[4] = vin[array_stride]; v[5] = vin[array_stride + 1]; v[6] = vin[array_stride]; v[7] = vin[1]; cogl_matrix_transform_points (&entry->model_view, 2, /* n_components */ sizeof (float) * 2, /* stride_in */ v, /* points_in */ /* strideout */ vb_stride * sizeof (float), vout, /* points_out */ 4 /* n_points */); } for (i = 0; i < entry->n_layers; i++) { const float *tin = vin + 2; float *tout = vout + POS_STRIDE + COLOR_STRIDE; tout[vb_stride * 0 + i * 2] = tin[i * 2]; tout[vb_stride * 0 + 1 + i * 2] = tin[i * 2 + 1]; tout[vb_stride * 1 + i * 2] = tin[i * 2]; tout[vb_stride * 1 + 1 + i * 2] = tin[array_stride + i * 2 + 1]; tout[vb_stride * 2 + i * 2] = tin[array_stride + i * 2]; tout[vb_stride * 2 + 1 + i * 2] = tin[array_stride + i * 2 + 1]; tout[vb_stride * 3 + i * 2] = tin[array_stride + i * 2]; tout[vb_stride * 3 + 1 + i * 2] = tin[i * 2 + 1]; } vin += array_stride * 2; vout += vb_stride * 4; } cogl_buffer_unmap (buffer); return array; } /* XXX NB: When _cogl_journal_flush() returns all state relating * to pipelines, all glEnable flags and current matrix state * is undefined. */ void _cogl_journal_flush (void) { CoglJournalFlushState state; int i; CoglFramebuffer *framebuffer; CoglMatrixStack *modelview_stack; COGL_STATIC_TIMER (flush_timer, "Mainloop", /* parent */ "Journal Flush", "The time spent flushing the Cogl journal", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); if (ctx->journal->len == 0) return; COGL_TIMER_START (_cogl_uprof_context, flush_timer); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_BATCHING)) g_print ("BATCHING: journal len = %d\n", ctx->journal->len); state.attributes = ctx->journal_flush_attributes_array; framebuffer = _cogl_get_framebuffer (); modelview_stack = _cogl_framebuffer_get_modelview_stack (framebuffer); state.modelview_stack = modelview_stack; state.projection_stack = _cogl_framebuffer_get_projection_stack (framebuffer); if (G_UNLIKELY ((cogl_debug_flags & COGL_DEBUG_DISABLE_SOFTWARE_CLIP) == 0)) { /* We do an initial walk of the journal to analyse the clip stack batches to see if we can do software clipping. We do this as a separate walk of the journal because we can modify entries and this may end up joining together clip stack batches in the next iteration. */ batch_and_call ((CoglJournalEntry *)ctx->journal->data, /* first entry */ ctx->journal->len, /* max number of entries to consider */ compare_entry_clip_stacks, _cogl_journal_check_software_clip, /* callback */ &state); /* data */ } /* We upload the vertices after the clip stack pass in case it modifies the entries */ state.vertex_array = upload_vertices (&g_array_index (ctx->journal, CoglJournalEntry, 0), ctx->journal->len, ctx->journal_needed_vbo_len, ctx->logged_vertices); state.array_offset = 0; /* batch_and_call() batches a list of journal entries according to some * given criteria and calls a callback once for each determined batch. * * The process of flushing the journal is staggered to reduce the amount * of driver/GPU state changes necessary: * 1) We split the entries according to the clip state. * 2) We split the entries according to the stride of the vertices: * Each time the stride of our vertex data changes we need to call * gl{Vertex,Color}Pointer to inform GL of new VBO offsets. * Currently the only thing that affects the stride of our vertex data * is the number of pipeline layers. * 3) We split the entries explicitly by the number of pipeline layers: * We pad our vertex data when the number of layers is < 2 so that we * can minimize changes in stride. Each time the number of layers * changes we need to call glTexCoordPointer to inform GL of new VBO * offsets. * 4) We then split according to compatible Cogl pipelines: * This is where we flush pipeline state * 5) Finally we split according to modelview matrix changes: * This is when we finally tell GL to draw something. * Note: Splitting by modelview changes is skipped when are doing the * vertex transformation in software at log time. */ batch_and_call ((CoglJournalEntry *)ctx->journal->data, /* first entry */ ctx->journal->len, /* max number of entries to consider */ compare_entry_clip_stacks, _cogl_journal_flush_clip_stacks_and_entries, /* callback */ &state); /* data */ for (i = 0; i < state.attributes->len; i++) cogl_object_unref (g_array_index (state.attributes, CoglVertexAttribute *, i)); g_array_set_size (state.attributes, 0); cogl_object_unref (state.vertex_array); for (i = 0; i < ctx->journal->len; i++) { CoglJournalEntry *entry = &g_array_index (ctx->journal, CoglJournalEntry, i); _cogl_pipeline_journal_unref (entry->pipeline); _cogl_clip_stack_unref (entry->clip_stack); } g_array_set_size (ctx->journal, 0); g_array_set_size (ctx->logged_vertices, 0); COGL_TIMER_STOP (_cogl_uprof_context, flush_timer); } static void _cogl_journal_init (void) { _COGL_GET_CONTEXT (ctx, NO_RETVAL); /* Here we flush anything that we know must remain constant until the * next the the journal is flushed. Note: This lets up flush things * that themselves depend on the journal, such as clip state. */ /* NB: the journal deals with flushing the modelview stack and clip state manually */ _cogl_framebuffer_flush_state (_cogl_get_framebuffer (), COGL_FRAMEBUFFER_FLUSH_SKIP_MODELVIEW | COGL_FRAMEBUFFER_FLUSH_SKIP_CLIP_STATE); ctx->journal_needed_vbo_len = 0; } void _cogl_journal_log_quad (const float *position, CoglPipeline *pipeline, int n_layers, CoglHandle layer0_override_texture, const float *tex_coords, unsigned int tex_coords_len) { gsize stride; int next_vert; GLfloat *v; int i; int next_entry; guint32 disable_layers; CoglJournalEntry *entry; CoglPipeline *source; CoglPipelineFlushOptions flush_options; COGL_STATIC_TIMER (log_timer, "Mainloop", /* parent */ "Journal Log", "The time spent logging in the Cogl journal", 0 /* no application private data */); _COGL_GET_CONTEXT (ctx, NO_RETVAL); COGL_TIMER_START (_cogl_uprof_context, log_timer); if (ctx->logged_vertices->len == 0) _cogl_journal_init (); /* The vertex data is logged into a separate array. The data needs to be copied into a vertex array before it's given to GL so we only store two vertices per quad and expand it to four while uploading. */ /* XXX: See definition of GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS for details * about how we pack our vertex data */ stride = GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS (n_layers); next_vert = ctx->logged_vertices->len; g_array_set_size (ctx->logged_vertices, next_vert + 2 * stride + 1); v = &g_array_index (ctx->logged_vertices, GLfloat, next_vert); /* We calculate the needed size of the vbo as we go because it depends on the number of layers in each entry and it's not easy calculate based on the length of the logged vertices array */ ctx->journal_needed_vbo_len += GET_JOURNAL_VB_STRIDE_FOR_N_LAYERS (n_layers) * 4; /* XXX: All the jumping around to fill in this strided buffer doesn't * seem ideal. */ /* FIXME: This is a hacky optimization, since it will break if we * change the definition of CoglColor: */ _cogl_pipeline_get_colorubv (pipeline, (guint8 *) v); v++; memcpy (v, position, sizeof (float) * 2); memcpy (v + stride, position + 2, sizeof (float) * 2); for (i = 0; i < n_layers; i++) { /* XXX: See definition of GET_JOURNAL_ARRAY_STRIDE_FOR_N_LAYERS * for details about how we pack our vertex data */ GLfloat *t = v + 2 + i * 2; memcpy (t, tex_coords + i * 4, sizeof (float) * 2); memcpy (t + stride, tex_coords + i * 4 + 2, sizeof (float) * 2); } if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_JOURNAL)) { g_print ("Logged new quad:\n"); v = &g_array_index (ctx->logged_vertices, GLfloat, next_vert); _cogl_journal_dump_logged_quad ((guint8 *)v, n_layers); } next_entry = ctx->journal->len; g_array_set_size (ctx->journal, next_entry + 1); entry = &g_array_index (ctx->journal, CoglJournalEntry, next_entry); entry->n_layers = n_layers; entry->array_offset = next_vert; source = pipeline; if (G_UNLIKELY (ctx->legacy_state_set)) { source = cogl_pipeline_copy (pipeline); _cogl_pipeline_apply_legacy_state (source); } flush_options.flags = 0; if (G_UNLIKELY (cogl_pipeline_get_n_layers (pipeline) != n_layers)) { disable_layers = (1 << n_layers) - 1; disable_layers = ~disable_layers; flush_options.disable_layers = disable_layers; flush_options.flags |= COGL_PIPELINE_FLUSH_DISABLE_MASK; } if (G_UNLIKELY (layer0_override_texture)) { flush_options.flags |= COGL_PIPELINE_FLUSH_LAYER0_OVERRIDE; flush_options.layer0_override_texture = layer0_override_texture; } if (G_UNLIKELY (flush_options.flags)) { /* If we haven't already created a derived pipeline... */ if (source == pipeline) source = cogl_pipeline_copy (pipeline); _cogl_pipeline_apply_overrides (source, &flush_options); } entry->pipeline = _cogl_pipeline_journal_ref (source); entry->clip_stack = _cogl_clip_stack_ref (_cogl_get_clip_stack ()); if (G_UNLIKELY (source != pipeline)) cogl_handle_unref (source); cogl_get_modelview_matrix (&entry->model_view); if (G_UNLIKELY (cogl_debug_flags & COGL_DEBUG_DISABLE_BATCHING)) _cogl_journal_flush (); COGL_TIMER_STOP (_cogl_uprof_context, log_timer); }