mutter/cogl/tests/conform/test-conform-main.c

126 lines
3.9 KiB
C
Raw Normal View History

#include "cogl-config.h"
#include <cogl/cogl.h>
#include <glib.h>
#include <locale.h>
#include <stdlib.h>
#include <string.h>
#include "test-declarations.h"
#include "test-utils.h"
/* A bit of sugar for adding new conformance tests */
#define ADD_TEST(FUNC, REQUIREMENTS, KNOWN_FAIL_REQUIREMENTS) \
G_STMT_START { \
if (strcmp (#FUNC, argv[1]) == 0) \
{ \
if (test_utils_init (REQUIREMENTS, KNOWN_FAIL_REQUIREMENTS) \
|| g_getenv ("COGL_TEST_TRY_EVERYTHING") != NULL) \
{ \
FUNC (); \
test_utils_fini (); \
exit (0); \
} \
else \
{ \
exit (1); \
} \
} \
} G_STMT_END
#define UNPORTED_TEST(FUNC)
int
main (int argc, char **argv)
{
int i;
if (argc != 2)
{
g_printerr ("usage %s UNIT_TEST\n", argv[0]);
exit (1);
}
/* Just for convenience in case people try passing the wrapper
* filenames for the UNIT_TEST argument we normalize '-' characters
* to '_' characters... */
for (i = 0; argv[1][i]; i++)
{
if (argv[1][i] == '-')
argv[1][i] = '_';
}
/* This file is run through a sed script during the make step so the
* lines containing the tests need to be formatted on a single line
* each.
*/
UNPORTED_TEST (test_object);
UNPORTED_TEST (test_fixed);
UNPORTED_TEST (test_materials);
UNPORTED_TEST (test_readpixels);
ADD_TEST (test_layer_remove, 0, 0);
ADD_TEST (test_sparse_pipeline, 0, 0);
ADD_TEST (test_npot_texture, 0, 0);
UNPORTED_TEST (test_multitexture);
UNPORTED_TEST (test_texture_mipmaps);
UNPORTED_TEST (test_texture_rectangle);
UNPORTED_TEST (test_texture_pixmap_x11);
ADD_TEST (test_texture_get_set_data, 0, 0);
ADD_TEST (test_read_texture_formats, 0, TEST_KNOWN_FAILURE);
ADD_TEST (test_write_texture_formats, 0, 0);
ADD_TEST (test_alpha_textures, 0, 0);
UNPORTED_TEST (test_vertex_buffer_contiguous);
UNPORTED_TEST (test_vertex_buffer_interleved);
UNPORTED_TEST (test_vertex_buffer_mutability);
ADD_TEST (test_primitive, 0, 0);
ADD_TEST (test_custom_attributes, 0, 0);
ADD_TEST (test_offscreen, 0, 0);
cogl/journal: Don't sometimes hold a ref on the framebuffer d42f1873fcd0876244eb8468d72ce35459ba94ca introduced a semi circular reference between the CoglFramebuffer, and CoglJournal, where CoglJournal would keep a reference on the CoglFramebuffer when there were any entries in the journal log. To avoid risking leaking these objects indefinitely, when freeing objects without doing anything that triggered a flush, CoglFramebuffer had a "filter" on cogl_object_unref() calls, which knew about under what conditions CoglJournal had a reference to it. When it could detect that there were only the journal itself holding such a reference, it'd flush the journal, effectively releasing the reference the journal held, thus freeing itself, as well as the journal. When CoglFramebuffer was ported to be implemented using GObject instead of CoglObject, this "filter" was missed, causing not only awkward but infrequent leaks, but also situations where we'd flush journals when only the journal itself held the last reference to the framebuffer, meaning the journal would free the framebuffer, thus itself, in the middle of flushing, causing memory corruption and crashes. A way to detect this, by asserting on CoglObject reference count during flush, is by adding the `g_assert()` as described below, which will assert instead cause memory corruption. void _cogl_journal_flush (CoglJournal *journal { ... _cogl_journal_discard (journal); + g_assert (journal->_parent.ref_count > 0); ... } Fix this by making CoglFramebuffer the owner of the journal, which it already was, and remove any circle referencing that was there before, as it is not needed given that the CoglFramebuffer pointer is guaranteed to be valid for the lifetime of CoglJournal as the framebuffer is the owner of the journal. However, to not miss flushing before tearing down, which is important as this flushes painting calls to the driver that is important for e.g. using the result of those journal entries, flush the journal the first time cogl_framebuffer_dispose() is called, before doing anything else. This also adds a test case. Without having broken the circular reference, the test would fail on g_assert_null (offscreen), as it would have been "leaked" at this point, but the actual memory corruption would be a result of the `cogl_texture_get_data()` call, which flushes the framebuffer, and causes the 'mid-flush' destruction of the journal described above. Note that the texture keeps track of dependent framebuffers, but it does not hold any references to them. Closes: https://gitlab.gnome.org/GNOME/mutter/-/issues/1474 Part-of: <https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1735>
2021-02-18 09:46:28 -05:00
ADD_TEST (test_journal_unref_flush, 0, 0);
ADD_TEST (test_framebuffer_get_bits,
TEST_REQUIREMENT_GL,
0);
ADD_TEST (test_point_size, 0, 0);
Add support for per-vertex point sizes This adds a new function to enable per-vertex point size on a pipeline. This can be set with cogl_pipeline_set_per_vertex_point_size(). Once enabled the point size can be set either by drawing with an attribute named 'cogl_point_size_in' or by writing to the 'cogl_point_size_out' builtin from a snippet. There is a feature flag which must be checked for before using per-vertex point sizes. This will only be set on GL >= 2.0 or on GLES 2.0. GL will only let you set a per-vertex point size from GLSL by writing to gl_PointSize. This is only available in GL2 and not in the older GLSL extensions. The per-vertex point size has its own pipeline state flag so that it can be part of the state that affects vertex shader generation. Having to enable the per vertex point size with a separate function is a bit awkward. Ideally it would work like the color attribute where you can just set it for every vertex in your primitive with cogl_pipeline_set_color or set it per-vertex by just using the attribute. This is harder to get working with the point size because we need to generate a different vertex shader depending on what attributes are bound. I think if we wanted to make this work transparently we would still want to internally have a pipeline property describing whether the shader was generated with per-vertex support so that it would work with the shader cache correctly. Potentially we could make the per-vertex property internal and automatically make a weak pipeline whenever the attribute is bound. However we would then also need to automatically detect when an application is writing to cogl_point_size_out from a snippet. Reviewed-by: Robert Bragg <robert@linux.intel.com> (cherry picked from commit 8495d9c1c15ce389885a9356d965eabd97758115) Conflicts: cogl/cogl-context.c cogl/cogl-pipeline-private.h cogl/cogl-pipeline.c cogl/cogl-private.h cogl/driver/gl/cogl-pipeline-progend-fixed.c cogl/driver/gl/gl/cogl-pipeline-progend-fixed-arbfp.c
2012-11-08 11:56:02 -05:00
ADD_TEST (test_point_size_attribute,
0, 0);
Add support for per-vertex point sizes This adds a new function to enable per-vertex point size on a pipeline. This can be set with cogl_pipeline_set_per_vertex_point_size(). Once enabled the point size can be set either by drawing with an attribute named 'cogl_point_size_in' or by writing to the 'cogl_point_size_out' builtin from a snippet. There is a feature flag which must be checked for before using per-vertex point sizes. This will only be set on GL >= 2.0 or on GLES 2.0. GL will only let you set a per-vertex point size from GLSL by writing to gl_PointSize. This is only available in GL2 and not in the older GLSL extensions. The per-vertex point size has its own pipeline state flag so that it can be part of the state that affects vertex shader generation. Having to enable the per vertex point size with a separate function is a bit awkward. Ideally it would work like the color attribute where you can just set it for every vertex in your primitive with cogl_pipeline_set_color or set it per-vertex by just using the attribute. This is harder to get working with the point size because we need to generate a different vertex shader depending on what attributes are bound. I think if we wanted to make this work transparently we would still want to internally have a pipeline property describing whether the shader was generated with per-vertex support so that it would work with the shader cache correctly. Potentially we could make the per-vertex property internal and automatically make a weak pipeline whenever the attribute is bound. However we would then also need to automatically detect when an application is writing to cogl_point_size_out from a snippet. Reviewed-by: Robert Bragg <robert@linux.intel.com> (cherry picked from commit 8495d9c1c15ce389885a9356d965eabd97758115) Conflicts: cogl/cogl-context.c cogl/cogl-pipeline-private.h cogl/cogl-pipeline.c cogl/cogl-private.h cogl/driver/gl/cogl-pipeline-progend-fixed.c cogl/driver/gl/gl/cogl-pipeline-progend-fixed-arbfp.c
2012-11-08 11:56:02 -05:00
ADD_TEST (test_point_size_attribute_snippet,
0, 0);
ADD_TEST (test_point_sprite,
0, 0);
ADD_TEST (test_point_sprite_orientation,
0, TEST_KNOWN_FAILURE);
ADD_TEST (test_point_sprite_glsl,
0, 0);
ADD_TEST (test_alpha_test, 0, 0);
ADD_TEST (test_map_buffer_range, TEST_REQUIREMENT_MAP_WRITE, 0);
ADD_TEST (test_primitive_and_journal, 0, 0);
ADD_TEST (test_copy_replace_texture, 0, 0);
ADD_TEST (test_pipeline_cache_unrefs_texture, 0, 0);
ADD_TEST (test_pipeline_shader_state, 0, 0);
UNPORTED_TEST (test_viewport);
ADD_TEST (test_fence, TEST_REQUIREMENT_FENCE, 0);
ADD_TEST (test_texture_no_allocate, 0, 0);
ADD_TEST (test_texture_rg, TEST_REQUIREMENT_TEXTURE_RG, 0);
g_printerr ("Unknown test name \"%s\"\n", argv[1]);
return 1;
}