cogl/gl: Use CPU packing/unpacking for opaque fp16 formats
There is no internal fp16 format which has no alpha which means we would get garbage alpha when reading the framebuffer directly. We have to use the packing/unpacking to always get the alpha of 1. Part-of: <https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/3441>
This commit is contained in:
parent
9c219db9fe
commit
64f4415f28
@ -280,10 +280,8 @@ _cogl_driver_pixel_format_to_gl (CoglContext *context,
|
||||
glformat = GL_BGRA;
|
||||
gltype = GL_HALF_FLOAT;
|
||||
break;
|
||||
case COGL_PIXEL_FORMAT_XRGB_FP_16161616:
|
||||
case COGL_PIXEL_FORMAT_ARGB_FP_16161616:
|
||||
case COGL_PIXEL_FORMAT_ARGB_FP_16161616_PRE:
|
||||
case COGL_PIXEL_FORMAT_XBGR_FP_16161616:
|
||||
case COGL_PIXEL_FORMAT_ABGR_FP_16161616:
|
||||
case COGL_PIXEL_FORMAT_ABGR_FP_16161616_PRE:
|
||||
required_format =
|
||||
@ -294,6 +292,15 @@ _cogl_driver_pixel_format_to_gl (CoglContext *context,
|
||||
&glformat,
|
||||
&gltype);
|
||||
break;
|
||||
case COGL_PIXEL_FORMAT_XRGB_FP_16161616:
|
||||
case COGL_PIXEL_FORMAT_XBGR_FP_16161616:
|
||||
required_format =
|
||||
_cogl_driver_pixel_format_to_gl (context,
|
||||
COGL_PIXEL_FORMAT_RGBX_FP_16161616,
|
||||
&glintformat,
|
||||
&glformat,
|
||||
&gltype);
|
||||
break;
|
||||
|
||||
case COGL_PIXEL_FORMAT_DEPTH_16:
|
||||
glintformat = GL_DEPTH_COMPONENT16;
|
||||
|
Loading…
Reference in New Issue
Block a user