#version 450 #extension GL_EXT_scalar_block_layout : require #extension GL_EXT_shader_realtime_clock : require layout( local_size_x = 8, local_size_y = 8, local_size_z = 1) in; const int MaxLayers = 4; layout(constant_id = 0) const int c_layerCount = 1; layout(constant_id = 1) const bool c_swapChannels = false; layout(constant_id = 2) const uint c_ycbcrMask = 0; layout(constant_id = 3) const bool c_compositing_debug = false; layout(binding = 0, rgba8) writeonly uniform image2D dst; layout(std430, push_constant) uniform layers_t { vec2 u_scale[MaxLayers]; vec2 u_offset[MaxLayers]; float u_opacity[MaxLayers]; float u_borderAlpha[MaxLayers]; uint u_frameId; }; layout(binding = 1) uniform sampler2D s_samplers[MaxLayers]; layout(binding = 2) uniform sampler2D s_ycbcr_samplers[MaxLayers]; vec3 srgbToLinear(vec3 color) { bvec3 isLo = lessThanEqual(color, vec3(0.04045f)); vec3 loPart = color / 12.92f; vec3 hiPart = pow((color + 0.055f) / 1.055f, vec3(12.0f / 5.0f)); return mix(hiPart, loPart, isLo); } vec4 srgbToLinear(vec4 color) { return vec4(srgbToLinear(color.rgb), color.a); } vec3 linearToSrgb(vec3 color) { bvec3 isLo = lessThanEqual(color, vec3(0.0031308f)); vec3 loPart = color * 12.92f; vec3 hiPart = pow(color, vec3(5.0f / 12.0f)) * 1.055f - 0.055f; return mix(hiPart, loPart, isLo); } vec4 linearToSrgb(vec4 color) { return vec4(linearToSrgb(color.rgb), color.a); } void compositing_debug(uvec2 size, uvec2 coord) { uvec2 pos = coord; pos.x -= (u_frameId & 2) != 0 ? /* size.x - 160 */ 128 : 0; pos.y -= (u_frameId & 1) != 0 ? /* size.y - 160 */ 128 : 0; if (pos.x >= 40 && pos.x < 120 && pos.y >= 40 && pos.y < 120) { vec4 value = vec4(1.0f, 1.0f, 1.0f, 1.0f); if (pos.x >= 48 && pos.x < 112 && pos.y >= 48 && pos.y < 112) { vec4 time = round(unpackUnorm4x8(clockRealtime2x32EXT().x * 1664525u + 1013904223u)).xyzw; if (time.x + time.y + time.z + time.w < 2.0f) value = vec4(0.0f, 0.0f, 0.0f, 1.0f); } imageStore(dst, ivec2(coord), value); } } vec4 sampleLayer(sampler2D layerSampler, uint layerIdx, vec2 uv) { vec2 coord = ((uv + u_offset[layerIdx]) * u_scale[layerIdx]); vec2 texSize = textureSize(layerSampler, 0); if (coord.x < 0.0f || coord.y < 0.0f || coord.x >= texSize.x || coord.y >= texSize.y ) return vec4(0.0f, 0.0f, 0.0f, u_borderAlpha[layerIdx]); return textureLod(layerSampler, coord, 0.0f); } vec4 sampleLayer(uint layerIdx, vec2 uv) { if ((c_ycbcrMask & (1 << layerIdx)) != 0) return srgbToLinear(sampleLayer(s_ycbcr_samplers[layerIdx], layerIdx, uv)); return sampleLayer(s_samplers[layerIdx], layerIdx, uv); } void main() { uvec2 coord = uvec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y); uvec2 outSize = imageSize(dst); if (coord.x >= outSize.x || coord.y >= outSize.y) return; vec2 uv = vec2(coord); vec4 outputValue = vec4(0.0f); if (c_layerCount > 0) outputValue = sampleLayer(0, uv) * u_opacity[0]; for (int i = 1; i < c_layerCount; i++) { vec4 layerColor = sampleLayer(i, uv); // wl_surfaces come with premultiplied alpha, so that's them being // premultiplied by layerColor.a. // We need to then multiply that by the layer's opacity to get to our // final premultiplied state. // For the other side of things, we need to multiply by (1.0f - (layerColor.a * opacity)) float opacity = u_opacity[i]; float layerAlpha = opacity * layerColor.a; outputValue = layerColor * opacity + outputValue * (1.0f - layerAlpha); } if (c_swapChannels) outputValue = outputValue.bgra; imageStore(dst, ivec2(coord), linearToSrgb(outputValue)); // Indicator to quickly tell if we're in the compositing path or not. if (c_compositing_debug) compositing_debug(outSize, coord); }