diff options
Diffstat (limited to 'src/qcam')
23 files changed, 1049 insertions, 237 deletions
diff --git a/src/qcam/assets/feathericons/feathericons.qrc b/src/qcam/assets/feathericons/feathericons.qrc index 656f2b46..c5302040 100644 --- a/src/qcam/assets/feathericons/feathericons.qrc +++ b/src/qcam/assets/feathericons/feathericons.qrc @@ -1,11 +1,11 @@ <!-- SPDX-License-Identifier: GPL-2.0-or-later --> <!DOCTYPE RCC><RCC version="1.0"> <qresource> -<file>./aperture.svg</file> -<file>./camera-off.svg</file> -<file>./play-circle.svg</file> -<file>./save.svg</file> -<file>./stop-circle.svg</file> -<file>./x-circle.svg</file> + <file>aperture.svg</file> + <file>camera-off.svg</file> + <file>play-circle.svg</file> + <file>save.svg</file> + <file>stop-circle.svg</file> + <file>x-circle.svg</file> </qresource> </RCC> diff --git a/src/qcam/assets/shader/NV_2_planes_VU_f.glsl b/src/qcam/assets/shader/NV_2_planes_VU_f.glsl deleted file mode 100644 index 086c5b6d..00000000 --- a/src/qcam/assets/shader/NV_2_planes_VU_f.glsl +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2020, Linaro - * - * NV_2_planes_VU_f.glsl - Fragment shader code for NV21, NV61 and NV42 formats - */ - -#ifdef GL_ES -precision mediump float; -#endif - -varying vec2 textureOut; -uniform sampler2D tex_y; -uniform sampler2D tex_u; - -void main(void) -{ - vec3 yuv; - vec3 rgb; - mat3 yuv2rgb_bt601_mat = mat3( - vec3(1.164, 1.164, 1.164), - vec3(0.000, -0.392, 2.017), - vec3(1.596, -0.813, 0.000) - ); - - yuv.x = texture2D(tex_y, textureOut).r - 0.063; - yuv.y = texture2D(tex_u, textureOut).g - 0.500; - yuv.z = texture2D(tex_u, textureOut).r - 0.500; - - rgb = yuv2rgb_bt601_mat * yuv; - gl_FragColor = vec4(rgb, 1.0); -} diff --git a/src/qcam/assets/shader/RGB.frag b/src/qcam/assets/shader/RGB.frag new file mode 100644 index 00000000..4c374ac9 --- /dev/null +++ b/src/qcam/assets/shader/RGB.frag @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart + * + * RGB.frag - Fragment shader code for RGB formats + */ + +#ifdef GL_ES +precision mediump float; +#endif + +varying vec2 textureOut; +uniform sampler2D tex_y; + +void main(void) +{ + vec3 rgb; + + rgb = texture2D(tex_y, textureOut).RGB_PATTERN; + + gl_FragColor = vec4(rgb, 1.0); +} diff --git a/src/qcam/assets/shader/NV_2_planes_UV_f.glsl b/src/qcam/assets/shader/YUV_2_planes.frag index 67633a11..125f1c85 100644 --- a/src/qcam/assets/shader/NV_2_planes_UV_f.glsl +++ b/src/qcam/assets/shader/YUV_2_planes.frag @@ -2,7 +2,7 @@ /* * Copyright (C) 2020, Linaro * - * NV_2_planes_UV_f.glsl - Fragment shader code for NV12, NV16 and NV24 formats + * YUV_2_planes.frag - Fragment shader code for NV12, NV16 and NV24 formats */ #ifdef GL_ES @@ -24,8 +24,15 @@ void main(void) ); yuv.x = texture2D(tex_y, textureOut).r - 0.063; +#if defined(YUV_PATTERN_UV) yuv.y = texture2D(tex_u, textureOut).r - 0.500; yuv.z = texture2D(tex_u, textureOut).g - 0.500; +#elif defined(YUV_PATTERN_VU) + yuv.y = texture2D(tex_u, textureOut).g - 0.500; + yuv.z = texture2D(tex_u, textureOut).r - 0.500; +#else +#error Invalid pattern +#endif rgb = yuv2rgb_bt601_mat * yuv; gl_FragColor = vec4(rgb, 1.0); diff --git a/src/qcam/assets/shader/NV_3_planes_f.glsl b/src/qcam/assets/shader/YUV_3_planes.frag index 4bc94184..2be74b5d 100644 --- a/src/qcam/assets/shader/NV_3_planes_f.glsl +++ b/src/qcam/assets/shader/YUV_3_planes.frag @@ -2,7 +2,7 @@ /* * Copyright (C) 2020, Linaro * - * NV_3_planes_UV_f.glsl - Fragment shader code for YUV420 format + * YUV_3_planes_UV.frag - Fragment shader code for YUV420 format */ #ifdef GL_ES diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/qcam/assets/shader/YUV_packed.frag new file mode 100644 index 00000000..d6efd4ce --- /dev/null +++ b/src/qcam/assets/shader/YUV_packed.frag @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * YUV_packed.frag - Fragment shader code for YUYV packed formats + */ + +#ifdef GL_ES +precision mediump float; +#endif + +varying vec2 textureOut; + +uniform sampler2D tex_y; +uniform vec2 tex_step; + +void main(void) +{ + mat3 yuv2rgb_bt601_mat = mat3( + vec3(1.164, 1.164, 1.164), + vec3(0.000, -0.392, 2.017), + vec3(1.596, -0.813, 0.000) + ); + vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500); + + /* + * The sampler won't interpolate the texture correctly along the X axis, + * as each RGBA pixel effectively stores two pixels. We thus need to + * interpolate manually. + * + * In integer texture coordinates, the Y values are layed out in the + * texture memory as follows: + * + * ...| Y U Y V | Y U Y V | Y U Y V |... + * ...| R G B A | R G B A | R G B A |... + * ^ ^ ^ ^ ^ ^ + * | | | | | | + * n-1 n-0.5 n n+0.5 n+1 n+1.5 + * + * For a texture location x in the interval [n, n+1[, sample the left + * and right pixels at n and n+1, and interpolate them with + * + * left.r * (1 - a) + left.b * a if fract(x) < 0.5 + * left.b * (1 - a) + right.r * a if fract(x) >= 0.5 + * + * with a = fract(x * 2) which can also be written + * + * a = fract(x) * 2 if fract(x) < 0.5 + * a = fract(x) * 2 - 1 if fract(x) >= 0.5 + */ + vec2 pos = textureOut; + float f_x = fract(pos.x / tex_step.x); + + vec4 left = texture2D(tex_y, vec2(pos.x - f_x * tex_step.x, pos.y)); + vec4 right = texture2D(tex_y, vec2(pos.x + (1.0 - f_x) * tex_step.x , pos.y)); + +#if defined(YUV_PATTERN_UYVY) + float y_left = mix(left.g, left.a, f_x * 2.0); + float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0); + vec2 uv = mix(left.rb, right.rb, f_x); +#elif defined(YUV_PATTERN_VYUY) + float y_left = mix(left.g, left.a, f_x * 2.0); + float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0); + vec2 uv = mix(left.br, right.br, f_x); +#elif defined(YUV_PATTERN_YUYV) + float y_left = mix(left.r, left.b, f_x * 2.0); + float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0); + vec2 uv = mix(left.ga, right.ga, f_x); +#elif defined(YUV_PATTERN_YVYU) + float y_left = mix(left.r, left.b, f_x * 2.0); + float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0); + vec2 uv = mix(left.ag, right.ag, f_x); +#else +#error Invalid pattern +#endif + + float y = mix(y_left, y_right, step(0.5, f_x)); + + vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset); + + gl_FragColor = vec4(rgb, 1.0); +} diff --git a/src/qcam/assets/shader/bayer_1x_packed.frag b/src/qcam/assets/shader/bayer_1x_packed.frag new file mode 100644 index 00000000..f53f5575 --- /dev/null +++ b/src/qcam/assets/shader/bayer_1x_packed.frag @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* + * Based on the code from http://jgt.akpeters.com/papers/McGuire08/ + * + * Efficient, High-Quality Bayer Demosaic Filtering on GPUs + * + * Morgan McGuire + * + * This paper appears in issue Volume 13, Number 4. + * --------------------------------------------------------- + * Copyright (c) 2008, Morgan McGuire. All rights reserved. + * + * + * Modified by Linaro Ltd for 10/12-bit packed vs 8-bit raw Bayer format, + * and for simpler demosaic algorithm. + * Copyright (C) 2020, Linaro + * + * bayer_1x_packed.frag - Fragment shader code for raw Bayer 10-bit and 12-bit + * packed formats + */ + +#ifdef GL_ES +precision mediump float; +#endif + +/* + * These constants are used to select the bytes containing the HS part of + * the pixel value: + * BPP - bytes per pixel, + * THRESHOLD_L = fract(BPP) * 0.5 + 0.02 + * THRESHOLD_H = 1.0 - fract(BPP) * 1.5 + 0.02 + * Let X is the x coordinate in the texture measured in bytes (so that the + * range is from 0 to (stride_-1)) aligned on the nearest pixel. + * E.g. for RAW10P: + * -------------+-------------------+-------------------+-- + * pixel No | 0 1 2 3 | 4 5 6 7 | ... + * -------------+-------------------+-------------------+-- + * byte offset | 0 1 2 3 4 | 5 6 7 8 9 | ... + * -------------+-------------------+-------------------+-- + * X | 0.0 1.25 2.5 3.75 | 5.0 6.25 7.5 8.75 | ... + * -------------+-------------------+-------------------+-- + * If fract(X) < THRESHOLD_L then the previous byte contains the LS + * bits of the pixel values and needs to be skipped. + * If fract(X) > THRESHOLD_H then the next byte contains the LS bits + * of the pixel values and needs to be skipped. + */ +#if defined(RAW10P) +#define BPP 1.25 +#define THRESHOLD_L 0.14 +#define THRESHOLD_H 0.64 +#elif defined(RAW12P) +#define BPP 1.5 +#define THRESHOLD_L 0.27 +#define THRESHOLD_H 0.27 +#else +#error Invalid raw format +#endif + + +varying vec2 textureOut; + +/* the texture size in pixels */ +uniform vec2 tex_size; +uniform vec2 tex_step; +uniform vec2 tex_bayer_first_red; + +uniform sampler2D tex_y; + +void main(void) +{ + vec3 rgb; + + /* + * center_bytes holds the coordinates of the MS byte of the pixel + * being sampled on the [0, stride-1/height-1] range. + * center_pixel holds the coordinates of the pixel being sampled + * on the [0, width/height-1] range. + */ + vec2 center_bytes; + vec2 center_pixel; + + /* + * x- and y-positions of the adjacent pixels on the [0, 1] range. + */ + vec2 xcoords; + vec2 ycoords; + + /* + * The coordinates passed to the shader in textureOut may point + * to a place in between the pixels if the texture format doesn't + * match the image format. In particular, MIPI packed raw Bayer + * formats don't have a matching texture format. + * In this case align the coordinates to the left nearest pixel + * by hand. + */ + center_pixel = floor(textureOut * tex_size); + center_bytes.y = center_pixel.y; + + /* + * Add a small number (a few mantissa's LSBs) to avoid float + * representation issues. Maybe paranoic. + */ + center_bytes.x = BPP * center_pixel.x + 0.02; + + float fract_x = fract(center_bytes.x); + + /* + * The below floor() call ensures that center_bytes.x points + * at one of the bytes representing the 8 higher bits of + * the pixel value, not at the byte containing the LS bits + * of the group of the pixels. + */ + center_bytes.x = floor(center_bytes.x); + center_bytes *= tex_step; + + xcoords = center_bytes.x + vec2(-tex_step.x, tex_step.x); + ycoords = center_bytes.y + vec2(-tex_step.y, tex_step.y); + + /* + * If xcoords[0] points at the byte containing the LS bits + * of the previous group of the pixels, move xcoords[0] one + * byte back. + */ + xcoords[0] += (fract_x < THRESHOLD_L) ? -tex_step.x : 0.0; + + /* + * If xcoords[1] points at the byte containing the LS bits + * of the current group of the pixels, move xcoords[1] one + * byte forward. + */ + xcoords[1] += (fract_x > THRESHOLD_H) ? tex_step.x : 0.0; + + vec2 alternate = mod(center_pixel.xy + tex_bayer_first_red, 2.0); + bool even_col = alternate.x < 1.0; + bool even_row = alternate.y < 1.0; + + /* + * We need to sample the central pixel and the ones with offset + * of -1 to +1 pixel in both X and Y directions. Let's name these + * pixels as below, where C is the central pixel: + * + * +----+----+----+----+ + * | \ x| | | | + * |y \ | -1 | 0 | +1 | + * +----+----+----+----+ + * | +1 | D2 | A1 | D3 | + * +----+----+----+----+ + * | 0 | B0 | C | B1 | + * +----+----+----+----+ + * | -1 | D0 | A0 | D1 | + * +----+----+----+----+ + * + * In the below equations (0,-1).r means "r component of the texel + * shifted by -tex_step.y from the center_bytes one" etc. + * + * In the even row / even column (EE) case the colour values are: + * R = C = (0,0).r, + * G = (A0 + A1 + B0 + B1) / 4.0 = + * ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0, + * B = (D0 + D1 + D2 + D3) / 4.0 = + * ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0 + * + * For even row / odd column (EO): + * R = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0, + * G = C = (0,0).r, + * B = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0 + * + * For odd row / even column (OE): + * R = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0, + * G = C = (0,0).r, + * B = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0 + * + * For odd row / odd column (OO): + * R = (D0 + D1 + D2 + D3) / 4.0 = + * ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0, + * G = (A0 + A1 + B0 + B1) / 4.0 = + * ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0, + * B = C = (0,0).r + */ + + /* + * Fetch the values and precalculate the terms: + * patterns.x = (A0 + A1) / 2.0 + * patterns.y = (B0 + B1) / 2.0 + * patterns.z = (A0 + A1 + B0 + B1) / 4.0 + * patterns.w = (D0 + D1 + D2 + D3) / 4.0 + */ + #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r + + float C = texture2D(tex_y, center_bytes).r; + vec4 patterns = vec4( + fetch(center_bytes.x, ycoords[0]), /* A0: (0,-1) */ + fetch(xcoords[0], center_bytes.y), /* B0: (-1,0) */ + fetch(xcoords[0], ycoords[0]), /* D0: (-1,-1) */ + fetch(xcoords[1], ycoords[0])); /* D1: (1,-1) */ + vec4 temp = vec4( + fetch(center_bytes.x, ycoords[1]), /* A1: (0,1) */ + fetch(xcoords[1], center_bytes.y), /* B1: (1,0) */ + fetch(xcoords[1], ycoords[1]), /* D3: (1,1) */ + fetch(xcoords[0], ycoords[1])); /* D2: (-1,1) */ + patterns = (patterns + temp) * 0.5; + /* .x = (A0 + A1) / 2.0, .y = (B0 + B1) / 2.0 */ + /* .z = (D0 + D3) / 2.0, .w = (D1 + D2) / 2.0 */ + patterns.w = (patterns.z + patterns.w) * 0.5; + patterns.z = (patterns.x + patterns.y) * 0.5; + + rgb = even_col ? + (even_row ? + vec3(C, patterns.zw) : + vec3(patterns.x, C, patterns.y)) : + (even_row ? + vec3(patterns.y, C, patterns.x) : + vec3(patterns.wz, C)); + + gl_FragColor = vec4(rgb, 1.0); +} diff --git a/src/qcam/assets/shader/bayer_8.frag b/src/qcam/assets/shader/bayer_8.frag new file mode 100644 index 00000000..4ece44ab --- /dev/null +++ b/src/qcam/assets/shader/bayer_8.frag @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* +From http://jgt.akpeters.com/papers/McGuire08/ + +Efficient, High-Quality Bayer Demosaic Filtering on GPUs + +Morgan McGuire + +This paper appears in issue Volume 13, Number 4. +--------------------------------------------------------- +Copyright (c) 2008, Morgan McGuire. All rights reserved. + +Modified by Linaro Ltd to integrate it into libcamera. +Copyright (C) 2021, Linaro +*/ + +//Pixel Shader + +/** Monochrome RGBA or GL_LUMINANCE Bayer encoded texture.*/ +uniform sampler2D tex_y; +varying vec4 center; +varying vec4 yCoord; +varying vec4 xCoord; + +void main(void) { + #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r + + float C = texture2D(tex_y, center.xy).r; // ( 0, 0) + const vec4 kC = vec4( 4.0, 6.0, 5.0, 5.0) / 8.0; + + // Determine which of four types of pixels we are on. + vec2 alternate = mod(floor(center.zw), 2.0); + + vec4 Dvec = vec4( + fetch(xCoord[1], yCoord[1]), // (-1,-1) + fetch(xCoord[1], yCoord[2]), // (-1, 1) + fetch(xCoord[2], yCoord[1]), // ( 1,-1) + fetch(xCoord[2], yCoord[2])); // ( 1, 1) + + vec4 PATTERN = (kC.xyz * C).xyzz; + + // Can also be a dot product with (1,1,1,1) on hardware where that is + // specially optimized. + // Equivalent to: D = Dvec[0] + Dvec[1] + Dvec[2] + Dvec[3]; + Dvec.xy += Dvec.zw; + Dvec.x += Dvec.y; + + vec4 value = vec4( + fetch(center.x, yCoord[0]), // ( 0,-2) + fetch(center.x, yCoord[1]), // ( 0,-1) + fetch(xCoord[0], center.y), // (-2, 0) + fetch(xCoord[1], center.y)); // (-1, 0) + + vec4 temp = vec4( + fetch(center.x, yCoord[3]), // ( 0, 2) + fetch(center.x, yCoord[2]), // ( 0, 1) + fetch(xCoord[3], center.y), // ( 2, 0) + fetch(xCoord[2], center.y)); // ( 1, 0) + + // Even the simplest compilers should be able to constant-fold these to + // avoid the division. + // Note that on scalar processors these constants force computation of some + // identical products twice. + const vec4 kA = vec4(-1.0, -1.5, 0.5, -1.0) / 8.0; + const vec4 kB = vec4( 2.0, 0.0, 0.0, 4.0) / 8.0; + const vec4 kD = vec4( 0.0, 2.0, -1.0, -1.0) / 8.0; + + // Conserve constant registers and take advantage of free swizzle on load + #define kE (kA.xywz) + #define kF (kB.xywz) + + value += temp; + + // There are five filter patterns (identity, cross, checker, + // theta, phi). Precompute the terms from all of them and then + // use swizzles to assign to color channels. + // + // Channel Matches + // x cross (e.g., EE G) + // y checker (e.g., EE B) + // z theta (e.g., EO R) + // w phi (e.g., EO R) + #define A (value[0]) + #define B (value[1]) + #define D (Dvec.x) + #define E (value[2]) + #define F (value[3]) + + // Avoid zero elements. On a scalar processor this saves two MADDs + // and it has no effect on a vector processor. + PATTERN.yzw += (kD.yz * D).xyy; + + PATTERN += (kA.xyz * A).xyzx + (kE.xyw * E).xyxz; + PATTERN.xw += kB.xw * B; + PATTERN.xz += kF.xz * F; + + gl_FragColor.rgb = (alternate.y == 0.0) ? + ((alternate.x == 0.0) ? + vec3(C, PATTERN.xy) : + vec3(PATTERN.z, C, PATTERN.w)) : + ((alternate.x == 0.0) ? + vec3(PATTERN.w, C, PATTERN.z) : + vec3(PATTERN.yx, C)); +} diff --git a/src/qcam/assets/shader/bayer_8.vert b/src/qcam/assets/shader/bayer_8.vert new file mode 100644 index 00000000..3695a5e9 --- /dev/null +++ b/src/qcam/assets/shader/bayer_8.vert @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ +/* +From http://jgt.akpeters.com/papers/McGuire08/ + +Efficient, High-Quality Bayer Demosaic Filtering on GPUs + +Morgan McGuire + +This paper appears in issue Volume 13, Number 4. +--------------------------------------------------------- +Copyright (c) 2008, Morgan McGuire. All rights reserved. + +Modified by Linaro Ltd to integrate it into libcamera. +Copyright (C) 2021, Linaro +*/ + +//Vertex Shader + +attribute vec4 vertexIn; +attribute vec2 textureIn; + +uniform vec2 tex_size; /* The texture size in pixels */ +uniform vec2 tex_step; + +/** Pixel position of the first red pixel in the */ +/** Bayer pattern. [{0,1}, {0, 1}]*/ +uniform vec2 tex_bayer_first_red; + +/** .xy = Pixel being sampled in the fragment shader on the range [0, 1] + .zw = ...on the range [0, sourceSize], offset by firstRed */ +varying vec4 center; + +/** center.x + (-2/w, -1/w, 1/w, 2/w); These are the x-positions */ +/** of the adjacent pixels.*/ +varying vec4 xCoord; + +/** center.y + (-2/h, -1/h, 1/h, 2/h); These are the y-positions */ +/** of the adjacent pixels.*/ +varying vec4 yCoord; + +void main(void) { + center.xy = textureIn; + center.zw = textureIn * tex_size + tex_bayer_first_red; + + xCoord = center.x + vec4(-2.0 * tex_step.x, + -tex_step.x, tex_step.x, 2.0 * tex_step.x); + yCoord = center.y + vec4(-2.0 * tex_step.y, + -tex_step.y, tex_step.y, 2.0 * tex_step.y); + + gl_Position = vertexIn; +} diff --git a/src/qcam/assets/shader/NV_vertex_shader.glsl b/src/qcam/assets/shader/identity.vert index 12e791e3..6d6f7551 100644 --- a/src/qcam/assets/shader/NV_vertex_shader.glsl +++ b/src/qcam/assets/shader/identity.vert @@ -2,7 +2,7 @@ /* * Copyright (C) 2020, Linaro * - * NV_vertex_shader.glsl - Vertex shader code for NV family + * identity.vert - Identity vertex shader for pixel format conversion */ attribute vec4 vertexIn; diff --git a/src/qcam/assets/shader/shaders.qrc b/src/qcam/assets/shader/shaders.qrc index 33eab278..96c709f9 100644 --- a/src/qcam/assets/shader/shaders.qrc +++ b/src/qcam/assets/shader/shaders.qrc @@ -1,9 +1,13 @@ <!-- SPDX-License-Identifier: LGPL-2.1-or-later --> <!DOCTYPE RCC><RCC version="1.0"> <qresource> -<file>./NV_vertex_shader.glsl</file> -<file>./NV_2_planes_UV_f.glsl</file> -<file>./NV_2_planes_VU_f.glsl</file> -<file>./NV_3_planes_f.glsl</file> + <file>RGB.frag</file> + <file>YUV_2_planes.frag</file> + <file>YUV_3_planes.frag</file> + <file>YUV_packed.frag</file> + <file>bayer_1x_packed.frag</file> + <file>bayer_8.frag</file> + <file>bayer_8.vert</file> + <file>identity.vert</file> </qresource> </RCC> diff --git a/src/qcam/dng_writer.cpp b/src/qcam/dng_writer.cpp index 030d1387..34c8df5a 100644 --- a/src/qcam/dng_writer.cpp +++ b/src/qcam/dng_writer.cpp @@ -15,6 +15,7 @@ #include <libcamera/control_ids.h> #include <libcamera/formats.h> +#include <libcamera/property_ids.h> using namespace libcamera; @@ -353,6 +354,8 @@ int DNGWriter::write(const char *filename, const Camera *camera, [[maybe_unused]] const FrameBuffer *buffer, const void *data) { + const ControlList &cameraProperties = camera->properties(); + const auto it = formatInfo.find(config.pixelFormat); if (it == formatInfo.cend()) { std::cerr << "Unsupported pixel format" << std::endl; @@ -387,9 +390,13 @@ int DNGWriter::write(const char *filename, const Camera *camera, TIFFSetField(tif, TIFFTAG_DNGBACKWARDVERSION, version); TIFFSetField(tif, TIFFTAG_FILLORDER, FILLORDER_MSB2LSB); TIFFSetField(tif, TIFFTAG_MAKE, "libcamera"); - /* \todo Report a real model string instead of id. */ - TIFFSetField(tif, TIFFTAG_MODEL, camera->id().c_str()); - TIFFSetField(tif, TIFFTAG_UNIQUECAMERAMODEL, camera->id().c_str()); + + if (cameraProperties.contains(properties::Model)) { + std::string model = cameraProperties.get(properties::Model); + TIFFSetField(tif, TIFFTAG_MODEL, model.c_str()); + /* \todo set TIFFTAG_UNIQUECAMERAMODEL. */ + } + TIFFSetField(tif, TIFFTAG_SOFTWARE, "qcam"); TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT); diff --git a/src/qcam/dng_writer.h b/src/qcam/dng_writer.h index bf44c879..20905f37 100644 --- a/src/qcam/dng_writer.h +++ b/src/qcam/dng_writer.h @@ -10,9 +10,9 @@ #ifdef HAVE_TIFF #define HAVE_DNG -#include <libcamera/buffer.h> #include <libcamera/camera.h> #include <libcamera/controls.h> +#include <libcamera/framebuffer.h> #include <libcamera/stream.h> using namespace libcamera; diff --git a/src/qcam/main.cpp b/src/qcam/main.cpp index f60d3cef..5eff90a3 100644 --- a/src/qcam/main.cpp +++ b/src/qcam/main.cpp @@ -2,7 +2,7 @@ /* * Copyright (C) 2019, Google Inc. * - * main.cpp - cam - The libcamera swiss army knife + * main.cpp - qcam - The libcamera GUI test application */ #include <signal.h> @@ -16,6 +16,7 @@ #include "../cam/options.h" #include "../cam/stream_options.h" #include "main_window.h" +#include "message_handler.h" void signalHandler([[maybe_unused]] int signal) { @@ -38,6 +39,8 @@ OptionsParser::Options parseOptions(int argc, char *argv[]) "renderer", ArgumentRequired, "renderer"); parser.addOption(OptStream, &streamKeyValue, "Set configuration of a camera stream", "stream", true); + parser.addOption(OptVerbose, OptionNone, + "Print verbose log messages", "verbose"); OptionsParser::Options options = parser.parse(argc, argv); if (options.isSet(OptHelp)) @@ -57,6 +60,8 @@ int main(int argc, char **argv) if (options.isSet(OptHelp)) return 0; + MessageHandler msgHandler(options.isSet(OptVerbose)); + struct sigaction sa = {}; sa.sa_handler = &signalHandler; sigaction(SIGINT, &sa, nullptr); diff --git a/src/qcam/main_window.cpp b/src/qcam/main_window.cpp index ecb9dd66..39d034de 100644 --- a/src/qcam/main_window.cpp +++ b/src/qcam/main_window.cpp @@ -367,7 +367,6 @@ void MainWindow::toggleCapture(bool start) int MainWindow::startCapture() { StreamRoles roles = StreamKeyValueParser::roles(options_[OptStream]); - std::vector<Request *> requests; int ret; /* Verify roles are supported. */ @@ -486,7 +485,7 @@ int MainWindow::startCapture() while (!freeBuffers_[vfStream_].isEmpty()) { FrameBuffer *buffer = freeBuffers_[vfStream_].dequeue(); - Request *request = camera_->createRequest(); + std::unique_ptr<Request> request = camera_->createRequest(); if (!request) { qWarning() << "Can't create request"; ret = -ENOMEM; @@ -499,7 +498,7 @@ int MainWindow::startCapture() goto error; } - requests.push_back(request); + requests_.push_back(std::move(request)); } /* Start the title timer and the camera. */ @@ -518,8 +517,8 @@ int MainWindow::startCapture() camera_->requestCompleted.connect(this, &MainWindow::requestComplete); /* Queue all requests. */ - for (Request *request : requests) { - ret = camera_->queueRequest(request); + for (std::unique_ptr<Request> &request : requests_) { + ret = camera_->queueRequest(request.get()); if (ret < 0) { qWarning() << "Can't queue request"; goto error_disconnect; @@ -535,8 +534,7 @@ error_disconnect: camera_->stop(); error: - for (Request *request : requests) - delete request; + requests_.clear(); for (auto &iter : mappedBuffers_) { const MappedBuffer &buffer = iter.second; @@ -580,6 +578,9 @@ void MainWindow::stopCapture() } mappedBuffers_.clear(); + requests_.clear(); + freeQueue_.clear(); + delete allocator_; isCapturing_ = false; @@ -701,7 +702,7 @@ void MainWindow::requestComplete(Request *request) */ { QMutexLocker locker(&mutex_); - doneQueue_.enqueue({ request->buffers(), request->metadata() }); + doneQueue_.enqueue(request); } QCoreApplication::postEvent(this, new CaptureEvent); @@ -714,8 +715,7 @@ void MainWindow::processCapture() * if stopCapture() has been called while a CaptureEvent was posted but * not processed yet. Return immediately in that case. */ - CaptureRequest request; - + Request *request; { QMutexLocker locker(&mutex_); if (doneQueue_.isEmpty()) @@ -725,11 +725,15 @@ void MainWindow::processCapture() } /* Process buffers. */ - if (request.buffers_.count(vfStream_)) - processViewfinder(request.buffers_[vfStream_]); + if (request->buffers().count(vfStream_)) + processViewfinder(request->buffers().at(vfStream_)); - if (request.buffers_.count(rawStream_)) - processRaw(request.buffers_[rawStream_], request.metadata_); + if (request->buffers().count(rawStream_)) + processRaw(request->buffers().at(rawStream_), request->metadata()); + + request->reuse(); + QMutexLocker locker(&mutex_); + freeQueue_.enqueue(request); } void MainWindow::processViewfinder(FrameBuffer *buffer) @@ -742,7 +746,7 @@ void MainWindow::processViewfinder(FrameBuffer *buffer) fps = lastBufferTime_ && fps ? 1000000000.0 / fps : 0.0; lastBufferTime_ = metadata.timestamp; - qInfo().noquote() + qDebug().noquote() << QString("seq: %1").arg(metadata.sequence, 6, 10, QLatin1Char('0')) << "bytesused:" << metadata.planes[0].bytesused << "timestamp:" << metadata.timestamp @@ -754,25 +758,28 @@ void MainWindow::processViewfinder(FrameBuffer *buffer) void MainWindow::queueRequest(FrameBuffer *buffer) { - Request *request = camera_->createRequest(); - if (!request) { - qWarning() << "Can't create request"; - return; + Request *request; + { + QMutexLocker locker(&mutex_); + if (freeQueue_.isEmpty()) + return; + + request = freeQueue_.dequeue(); } request->addBuffer(vfStream_, buffer); if (captureRaw_) { - FrameBuffer *buffer = nullptr; + FrameBuffer *rawBuffer = nullptr; { QMutexLocker locker(&mutex_); if (!freeBuffers_[rawStream_].isEmpty()) - buffer = freeBuffers_[rawStream_].dequeue(); + rawBuffer = freeBuffers_[rawStream_].dequeue(); } - if (buffer) { - request->addBuffer(rawStream_, buffer); + if (rawBuffer) { + request->addBuffer(rawStream_, rawBuffer); captureRaw_ = false; } else { qWarning() << "No free buffer available for RAW capture"; diff --git a/src/qcam/main_window.h b/src/qcam/main_window.h index 5c61a4df..85d56ce4 100644 --- a/src/qcam/main_window.h +++ b/src/qcam/main_window.h @@ -8,6 +8,7 @@ #define __QCAM_MAIN_WINDOW_H__ #include <memory> +#include <vector> #include <QElapsedTimer> #include <QIcon> @@ -17,11 +18,12 @@ #include <QQueue> #include <QTimer> -#include <libcamera/buffer.h> #include <libcamera/camera.h> #include <libcamera/camera_manager.h> #include <libcamera/controls.h> +#include <libcamera/framebuffer.h> #include <libcamera/framebuffer_allocator.h> +#include <libcamera/request.h> #include <libcamera/stream.h> #include "../cam/stream_options.h" @@ -39,23 +41,7 @@ enum { OptHelp = 'h', OptRenderer = 'r', OptStream = 's', -}; - -class CaptureRequest -{ -public: - CaptureRequest() - { - } - - CaptureRequest(const Request::BufferMap &buffers, - const ControlList &metadata) - : buffers_(buffers), metadata_(metadata) - { - } - - Request::BufferMap buffers_; - ControlList metadata_; + OptVerbose = 'v', }; class MainWindow : public QMainWindow @@ -128,13 +114,16 @@ private: Stream *vfStream_; Stream *rawStream_; std::map<const Stream *, QQueue<FrameBuffer *>> freeBuffers_; - QQueue<CaptureRequest> doneQueue_; - QMutex mutex_; /* Protects freeBuffers_ and doneQueue_ */ + QQueue<Request *> doneQueue_; + QQueue<Request *> freeQueue_; + QMutex mutex_; /* Protects freeBuffers_, doneQueue_, and freeQueue_ */ uint64_t lastBufferTime_; QElapsedTimer frameRateInterval_; uint32_t previousFrames_; uint32_t framesCaptured_; + + std::vector<std::unique_ptr<Request>> requests_; }; #endif /* __QCAM_MAIN_WINDOW__ */ diff --git a/src/qcam/meson.build b/src/qcam/meson.build index 9bb48c0d..7d3621c9 100644 --- a/src/qcam/meson.build +++ b/src/qcam/meson.build @@ -1,11 +1,26 @@ # SPDX-License-Identifier: CC0-1.0 +qt5 = import('qt5') +qt5_dep = dependency('qt5', + method : 'pkg-config', + modules : ['Core', 'Gui', 'Widgets'], + required : get_option('qcam'), + version : '>=5.4') + +if not qt5_dep.found() + qcam_enabled = false + subdir_done() +endif + +qcam_enabled = true + qcam_sources = files([ '../cam/options.cpp', '../cam/stream_options.cpp', 'format_converter.cpp', 'main.cpp', 'main_window.cpp', + 'message_handler.cpp', 'viewfinder_qt.cpp', ]) @@ -18,58 +33,50 @@ qcam_resources = files([ 'assets/feathericons/feathericons.qrc', ]) -qt5 = import('qt5') -qt5_dep = dependency('qt5', - method : 'pkg-config', - modules : ['Core', 'Gui', 'Widgets'], - required : get_option('qcam'), - version : '>=5.4') +qcam_deps = [ + libatomic, + libcamera_public, + qt5_dep, +] -if qt5_dep.found() - qcam_deps = [ - libcamera_dep, - qt5_dep, - ] +qt5_cpp_args = ['-DQT_NO_KEYWORDS'] - qt5_cpp_args = [ '-DQT_NO_KEYWORDS' ] - - tiff_dep = dependency('libtiff-4', required : false) - if tiff_dep.found() - qt5_cpp_args += [ '-DHAVE_TIFF' ] - qcam_deps += [ tiff_dep ] - qcam_sources += files([ - 'dng_writer.cpp', - ]) - endif +tiff_dep = dependency('libtiff-4', required : false) +if tiff_dep.found() + qt5_cpp_args += ['-DHAVE_TIFF'] + qcam_deps += [tiff_dep] + qcam_sources += files([ + 'dng_writer.cpp', + ]) +endif - if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget', - dependencies : qt5_dep, args : '-fPIC') - qcam_sources += files([ - 'viewfinder_gl.cpp', - ]) - qcam_moc_headers += files([ - 'viewfinder_gl.h', - ]) - qcam_resources += files([ - 'assets/shader/shaders.qrc' - ]) - endif +if cxx.has_header_symbol('QOpenGLWidget', 'QOpenGLWidget', + dependencies : qt5_dep, args : '-fPIC') + qcam_sources += files([ + 'viewfinder_gl.cpp', + ]) + qcam_moc_headers += files([ + 'viewfinder_gl.h', + ]) + qcam_resources += files([ + 'assets/shader/shaders.qrc' + ]) +endif - # gcc 9 introduced a deprecated-copy warning that is triggered by Qt until - # Qt 5.13. clang 10 introduced the same warning, but detects more issues - # that are not fixed in Qt yet. Disable the warning manually in both cases. - if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and - qt5_dep.version().version_compare('<5.13')) or - (cc.get_id() == 'clang' and cc.version().version_compare('>=10.0'))) - qt5_cpp_args += [ '-Wno-deprecated-copy' ] - endif +# gcc 9 introduced a deprecated-copy warning that is triggered by Qt until +# Qt 5.13. clang 10 introduced the same warning, but detects more issues +# that are not fixed in Qt yet. Disable the warning manually in both cases. +if ((cc.get_id() == 'gcc' and cc.version().version_compare('>=9.0') and + qt5_dep.version().version_compare('<5.13')) or + (cc.get_id() == 'clang' and cc.version().version_compare('>=10.0'))) + qt5_cpp_args += ['-Wno-deprecated-copy'] +endif - resources = qt5.preprocess(moc_headers: qcam_moc_headers, - qresources : qcam_resources, - dependencies: qt5_dep) +resources = qt5.preprocess(moc_headers: qcam_moc_headers, + qresources : qcam_resources, + dependencies: qt5_dep) - qcam = executable('qcam', qcam_sources, resources, - install : true, - dependencies : qcam_deps, - cpp_args : qt5_cpp_args) -endif +qcam = executable('qcam', qcam_sources, resources, + install : true, + dependencies : qcam_deps, + cpp_args : qt5_cpp_args) diff --git a/src/qcam/message_handler.cpp b/src/qcam/message_handler.cpp new file mode 100644 index 00000000..261623e1 --- /dev/null +++ b/src/qcam/message_handler.cpp @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * message_handler.cpp - qcam - Log message handling + */ + +#include "message_handler.h" + +QtMessageHandler MessageHandler::handler_ = nullptr; +bool MessageHandler::verbose_ = false; + +MessageHandler::MessageHandler(bool verbose) +{ + verbose_ = verbose; + handler_ = qInstallMessageHandler(&MessageHandler::handleMessage); +} + +void MessageHandler::handleMessage(QtMsgType type, + const QMessageLogContext &context, + const QString &msg) +{ + if (type == QtDebugMsg && !verbose_) + return; + + handler_(type, context, msg); +} diff --git a/src/qcam/message_handler.h b/src/qcam/message_handler.h new file mode 100644 index 00000000..4534db9d --- /dev/null +++ b/src/qcam/message_handler.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * message_handler.cpp - qcam - Log message handling + */ +#ifndef __QCAM_MESSAGE_HANDLER_H__ +#define __QCAM_MESSAGE_HANDLER_H__ + +#include <QtGlobal> + +class MessageHandler +{ +public: + MessageHandler(bool verbose); + +private: + static void handleMessage(QtMsgType type, + const QMessageLogContext &context, + const QString &msg); + + static QtMessageHandler handler_; + static bool verbose_; +}; + +#endif /* __QCAM_MESSAGE_HANDLER_H__ */ diff --git a/src/qcam/viewfinder.h b/src/qcam/viewfinder.h index 67da1df2..46747c22 100644 --- a/src/qcam/viewfinder.h +++ b/src/qcam/viewfinder.h @@ -11,8 +11,8 @@ #include <QList> #include <QSize> -#include <libcamera/buffer.h> #include <libcamera/formats.h> +#include <libcamera/framebuffer.h> struct MappedBuffer { void *memory; @@ -22,7 +22,7 @@ struct MappedBuffer { class ViewFinder { public: - virtual ~ViewFinder() {} + virtual ~ViewFinder() = default; virtual const QList<libcamera::PixelFormat> &nativeFormats() const = 0; diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp index fbe21dcf..e7c8620c 100644 --- a/src/qcam/viewfinder_gl.cpp +++ b/src/qcam/viewfinder_gl.cpp @@ -7,28 +7,55 @@ #include "viewfinder_gl.h" +#include <QByteArray> +#include <QFile> #include <QImage> #include <libcamera/formats.h> static const QList<libcamera::PixelFormat> supportedFormats{ + /* YUV - packed (single plane) */ + libcamera::formats::UYVY, + libcamera::formats::VYUY, + libcamera::formats::YUYV, + libcamera::formats::YVYU, + /* YUV - semi planar (two planes) */ libcamera::formats::NV12, libcamera::formats::NV21, libcamera::formats::NV16, libcamera::formats::NV61, libcamera::formats::NV24, libcamera::formats::NV42, + /* YUV - fully planar (three planes) */ libcamera::formats::YUV420, libcamera::formats::YVU420, + /* RGB */ + libcamera::formats::ABGR8888, + libcamera::formats::ARGB8888, + libcamera::formats::BGRA8888, + libcamera::formats::RGBA8888, + libcamera::formats::BGR888, + libcamera::formats::RGB888, + /* Raw Bayer 8-bit */ + libcamera::formats::SBGGR8, + libcamera::formats::SGBRG8, + libcamera::formats::SGRBG8, + libcamera::formats::SRGGB8, + /* Raw Bayer 10-bit packed */ + libcamera::formats::SBGGR10_CSI2P, + libcamera::formats::SGBRG10_CSI2P, + libcamera::formats::SGRBG10_CSI2P, + libcamera::formats::SRGGB10_CSI2P, + /* Raw Bayer 12-bit packed */ + libcamera::formats::SBGGR12_CSI2P, + libcamera::formats::SGBRG12_CSI2P, + libcamera::formats::SGRBG12_CSI2P, + libcamera::formats::SRGGB12_CSI2P, }; ViewFinderGL::ViewFinderGL(QWidget *parent) - : QOpenGLWidget(parent), buffer_(nullptr), yuvData_(nullptr), - fragmentShader_(nullptr), vertexShader_(nullptr), - vertexBuffer_(QOpenGLBuffer::VertexBuffer), - textureU_(QOpenGLTexture::Target2D), - textureV_(QOpenGLTexture::Target2D), - textureY_(QOpenGLTexture::Target2D) + : QOpenGLWidget(parent), buffer_(nullptr), data_(nullptr), + vertexBuffer_(QOpenGLBuffer::VertexBuffer) { } @@ -45,19 +72,23 @@ const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size) { - /* If the fragment is created remove it and create a new one. */ - if (fragmentShader_) { + if (format != format_) { + /* + * If the fragment already exists, remove it and create a new + * one for the new format. + */ if (shaderProgram_.isLinked()) { shaderProgram_.release(); - shaderProgram_.removeShader(fragmentShader_); - delete fragmentShader_; + shaderProgram_.removeShader(fragmentShader_.get()); + fragmentShader_.reset(); } - } - if (!selectFormat(format)) - return -1; + if (!selectFormat(format)) + return -1; + + format_ = format; + } - format_ = format; size_ = size; updateGeometry(); @@ -89,7 +120,11 @@ void ViewFinderGL::render(libcamera::FrameBuffer *buffer, MappedBuffer *map) if (buffer_) renderComplete(buffer_); - yuvData_ = static_cast<unsigned char *>(map->memory); + data_ = static_cast<unsigned char *>(map->memory); + /* + * \todo Get the stride from the buffer instead of computing it naively + */ + stride_ = buffer->metadata().planes[0].bytesused / size_.height(); update(); buffer_ = buffer; } @@ -97,54 +132,185 @@ void ViewFinderGL::render(libcamera::FrameBuffer *buffer, MappedBuffer *map) bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format) { bool ret = true; + + /* Set min/mag filters to GL_LINEAR by default. */ + textureMinMagFilters_ = GL_LINEAR; + + /* Use identity.vert as the default vertex shader. */ + vertexShaderFile_ = ":identity.vert"; + + fragmentShaderDefines_.clear(); + switch (format) { case libcamera::formats::NV12: horzSubSample_ = 2; vertSubSample_ = 2; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_UV_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::NV21: horzSubSample_ = 2; vertSubSample_ = 2; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_VU_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::NV16: horzSubSample_ = 2; vertSubSample_ = 1; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_UV_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::NV61: horzSubSample_ = 2; vertSubSample_ = 1; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_VU_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::NV24: horzSubSample_ = 1; vertSubSample_ = 1; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_UV_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::NV42: horzSubSample_ = 1; vertSubSample_ = 1; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_2_planes_VU_f.glsl"; + fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); + fragmentShaderFile_ = ":YUV_2_planes.frag"; break; case libcamera::formats::YUV420: horzSubSample_ = 2; vertSubSample_ = 2; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_3_planes_f.glsl"; + fragmentShaderFile_ = ":YUV_3_planes.frag"; break; case libcamera::formats::YVU420: horzSubSample_ = 2; vertSubSample_ = 2; - vertexShaderSrc_ = ":NV_vertex_shader.glsl"; - fragmentShaderSrc_ = ":NV_3_planes_f.glsl"; + fragmentShaderFile_ = ":YUV_3_planes.frag"; + break; + case libcamera::formats::UYVY: + fragmentShaderDefines_.append("#define YUV_PATTERN_UYVY"); + fragmentShaderFile_ = ":YUV_packed.frag"; + break; + case libcamera::formats::VYUY: + fragmentShaderDefines_.append("#define YUV_PATTERN_VYUY"); + fragmentShaderFile_ = ":YUV_packed.frag"; + break; + case libcamera::formats::YUYV: + fragmentShaderDefines_.append("#define YUV_PATTERN_YUYV"); + fragmentShaderFile_ = ":YUV_packed.frag"; + break; + case libcamera::formats::YVYU: + fragmentShaderDefines_.append("#define YUV_PATTERN_YVYU"); + fragmentShaderFile_ = ":YUV_packed.frag"; + break; + case libcamera::formats::ABGR8888: + fragmentShaderDefines_.append("#define RGB_PATTERN rgb"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::ARGB8888: + fragmentShaderDefines_.append("#define RGB_PATTERN bgr"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::BGRA8888: + fragmentShaderDefines_.append("#define RGB_PATTERN gba"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::RGBA8888: + fragmentShaderDefines_.append("#define RGB_PATTERN abg"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::BGR888: + fragmentShaderDefines_.append("#define RGB_PATTERN rgb"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::RGB888: + fragmentShaderDefines_.append("#define RGB_PATTERN bgr"); + fragmentShaderFile_ = ":RGB.frag"; + break; + case libcamera::formats::SBGGR8: + firstRed_.setX(1.0); + firstRed_.setY(1.0); + vertexShaderFile_ = ":bayer_8.vert"; + fragmentShaderFile_ = ":bayer_8.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGBRG8: + firstRed_.setX(0.0); + firstRed_.setY(1.0); + vertexShaderFile_ = ":bayer_8.vert"; + fragmentShaderFile_ = ":bayer_8.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGRBG8: + firstRed_.setX(1.0); + firstRed_.setY(0.0); + vertexShaderFile_ = ":bayer_8.vert"; + fragmentShaderFile_ = ":bayer_8.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SRGGB8: + firstRed_.setX(0.0); + firstRed_.setY(0.0); + vertexShaderFile_ = ":bayer_8.vert"; + fragmentShaderFile_ = ":bayer_8.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SBGGR10_CSI2P: + firstRed_.setX(1.0); + firstRed_.setY(1.0); + fragmentShaderDefines_.append("#define RAW10P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGBRG10_CSI2P: + firstRed_.setX(0.0); + firstRed_.setY(1.0); + fragmentShaderDefines_.append("#define RAW10P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGRBG10_CSI2P: + firstRed_.setX(1.0); + firstRed_.setY(0.0); + fragmentShaderDefines_.append("#define RAW10P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SRGGB10_CSI2P: + firstRed_.setX(0.0); + firstRed_.setY(0.0); + fragmentShaderDefines_.append("#define RAW10P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SBGGR12_CSI2P: + firstRed_.setX(1.0); + firstRed_.setY(1.0); + fragmentShaderDefines_.append("#define RAW12P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGBRG12_CSI2P: + firstRed_.setX(0.0); + firstRed_.setY(1.0); + fragmentShaderDefines_.append("#define RAW12P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SGRBG12_CSI2P: + firstRed_.setX(1.0); + firstRed_.setY(0.0); + fragmentShaderDefines_.append("#define RAW12P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; + break; + case libcamera::formats::SRGGB12_CSI2P: + firstRed_.setX(0.0); + firstRed_.setY(0.0); + fragmentShaderDefines_.append("#define RAW12P"); + fragmentShaderFile_ = ":bayer_1x_packed.frag"; + textureMinMagFilters_ = GL_NEAREST; break; default: ret = false; @@ -159,15 +325,15 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format) bool ViewFinderGL::createVertexShader() { /* Create Vertex Shader */ - vertexShader_ = new QOpenGLShader(QOpenGLShader::Vertex, this); + vertexShader_ = std::make_unique<QOpenGLShader>(QOpenGLShader::Vertex, this); /* Compile the vertex shader */ - if (!vertexShader_->compileSourceFile(vertexShaderSrc_)) { + if (!vertexShader_->compileSourceFile(vertexShaderFile_)) { qWarning() << "[ViewFinderGL]:" << vertexShader_->log(); return false; } - shaderProgram_.addShader(vertexShader_); + shaderProgram_.addShader(vertexShader_.get()); return true; } @@ -176,16 +342,29 @@ bool ViewFinderGL::createFragmentShader() int attributeVertex; int attributeTexture; - /* Create Fragment Shader */ - fragmentShader_ = new QOpenGLShader(QOpenGLShader::Fragment, this); + /* + * Create the fragment shader, compile it, and add it to the shader + * program. The #define macros stored in fragmentShaderDefines_, if + * any, are prepended to the source code. + */ + fragmentShader_ = std::make_unique<QOpenGLShader>(QOpenGLShader::Fragment, this); - /* Compile the fragment shader */ - if (!fragmentShader_->compileSourceFile(fragmentShaderSrc_)) { + QFile file(fragmentShaderFile_); + if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) { + qWarning() << "Shader" << fragmentShaderFile_ << "not found"; + return false; + } + + QString defines = fragmentShaderDefines_.join('\n') + "\n"; + QByteArray src = file.readAll(); + src.prepend(defines.toUtf8()); + + if (!fragmentShader_->compileSourceCode(src)) { qWarning() << "[ViewFinderGL]:" << fragmentShader_->log(); return false; } - shaderProgram_.addShader(fragmentShader_); + shaderProgram_.addShader(fragmentShader_.get()); /* Link shader pipeline */ if (!shaderProgram_.link()) { @@ -219,27 +398,29 @@ bool ViewFinderGL::createFragmentShader() textureUniformY_ = shaderProgram_.uniformLocation("tex_y"); textureUniformU_ = shaderProgram_.uniformLocation("tex_u"); textureUniformV_ = shaderProgram_.uniformLocation("tex_v"); + textureUniformStep_ = shaderProgram_.uniformLocation("tex_step"); + textureUniformSize_ = shaderProgram_.uniformLocation("tex_size"); + textureUniformBayerFirstRed_ = shaderProgram_.uniformLocation("tex_bayer_first_red"); - if (!textureY_.isCreated()) - textureY_.create(); + /* Create the textures. */ + for (std::unique_ptr<QOpenGLTexture> &texture : textures_) { + if (texture) + continue; - if (!textureU_.isCreated()) - textureU_.create(); - - if (!textureV_.isCreated()) - textureV_.create(); + texture = std::make_unique<QOpenGLTexture>(QOpenGLTexture::Target2D); + texture->create(); + } - id_y_ = textureY_.textureId(); - id_u_ = textureU_.textureId(); - id_v_ = textureV_.textureId(); return true; } -void ViewFinderGL::configureTexture(unsigned int id) +void ViewFinderGL::configureTexture(QOpenGLTexture &texture) { - glBindTexture(GL_TEXTURE_2D, id); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glBindTexture(GL_TEXTURE_2D, texture.textureId()); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, + textureMinMagFilters_); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, + textureMinMagFilters_); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); } @@ -250,12 +431,6 @@ void ViewFinderGL::removeShader() shaderProgram_.release(); shaderProgram_.removeAllShaders(); } - - if (fragmentShader_) - delete fragmentShader_; - - if (vertexShader_) - delete vertexShader_; } void ViewFinderGL::initializeGL() @@ -303,7 +478,7 @@ void ViewFinderGL::doRender() case libcamera::formats::NV42: /* Activate texture Y */ glActiveTexture(GL_TEXTURE0); - configureTexture(id_y_); + configureTexture(*textures_[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -312,12 +487,12 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - yuvData_); + data_); shaderProgram_.setUniformValue(textureUniformY_, 0); /* Activate texture UV/VU */ glActiveTexture(GL_TEXTURE1); - configureTexture(id_u_); + configureTexture(*textures_[1]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RG, @@ -326,14 +501,14 @@ void ViewFinderGL::doRender() 0, GL_RG, GL_UNSIGNED_BYTE, - (char *)yuvData_ + size_.width() * size_.height()); + data_ + size_.width() * size_.height()); shaderProgram_.setUniformValue(textureUniformU_, 1); break; case libcamera::formats::YUV420: /* Activate texture Y */ glActiveTexture(GL_TEXTURE0); - configureTexture(id_y_); + configureTexture(*textures_[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -342,12 +517,12 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - yuvData_); + data_); shaderProgram_.setUniformValue(textureUniformY_, 0); /* Activate texture U */ glActiveTexture(GL_TEXTURE1); - configureTexture(id_u_); + configureTexture(*textures_[1]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -356,12 +531,12 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - (char *)yuvData_ + size_.width() * size_.height()); + data_ + size_.width() * size_.height()); shaderProgram_.setUniformValue(textureUniformU_, 1); /* Activate texture V */ glActiveTexture(GL_TEXTURE2); - configureTexture(id_v_); + configureTexture(*textures_[2]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -370,14 +545,14 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - (char *)yuvData_ + size_.width() * size_.height() * 5 / 4); + data_ + size_.width() * size_.height() * 5 / 4); shaderProgram_.setUniformValue(textureUniformV_, 2); break; case libcamera::formats::YVU420: /* Activate texture Y */ glActiveTexture(GL_TEXTURE0); - configureTexture(id_y_); + configureTexture(*textures_[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -386,12 +561,12 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - yuvData_); + data_); shaderProgram_.setUniformValue(textureUniformY_, 0); /* Activate texture V */ glActiveTexture(GL_TEXTURE2); - configureTexture(id_v_); + configureTexture(*textures_[2]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -400,12 +575,12 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - (char *)yuvData_ + size_.width() * size_.height()); + data_ + size_.width() * size_.height()); shaderProgram_.setUniformValue(textureUniformV_, 2); /* Activate texture U */ glActiveTexture(GL_TEXTURE1); - configureTexture(id_u_); + configureTexture(*textures_[1]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, @@ -414,10 +589,116 @@ void ViewFinderGL::doRender() 0, GL_RED, GL_UNSIGNED_BYTE, - (char *)yuvData_ + size_.width() * size_.height() * 5 / 4); + data_ + size_.width() * size_.height() * 5 / 4); shaderProgram_.setUniformValue(textureUniformU_, 1); break; + case libcamera::formats::UYVY: + case libcamera::formats::VYUY: + case libcamera::formats::YUYV: + case libcamera::formats::YVYU: + /* + * Packed YUV formats are stored in a RGBA texture to match the + * OpenGL texel size with the 4 bytes repeating pattern in YUV. + * The texture width is thus half of the image with. + */ + glActiveTexture(GL_TEXTURE0); + configureTexture(*textures_[0]); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RGBA, + size_.width() / 2, + size_.height(), + 0, + GL_RGBA, + GL_UNSIGNED_BYTE, + data_); + shaderProgram_.setUniformValue(textureUniformY_, 0); + + /* + * The shader needs the step between two texture pixels in the + * horizontal direction, expressed in texture coordinate units + * ([0, 1]). There are exactly width - 1 steps between the + * leftmost and rightmost texels. + */ + shaderProgram_.setUniformValue(textureUniformStep_, + 1.0f / (size_.width() / 2 - 1), + 1.0f /* not used */); + break; + + case libcamera::formats::ABGR8888: + case libcamera::formats::ARGB8888: + case libcamera::formats::BGRA8888: + case libcamera::formats::RGBA8888: + glActiveTexture(GL_TEXTURE0); + configureTexture(*textures_[0]); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RGBA, + size_.width(), + size_.height(), + 0, + GL_RGBA, + GL_UNSIGNED_BYTE, + data_); + shaderProgram_.setUniformValue(textureUniformY_, 0); + break; + + case libcamera::formats::BGR888: + case libcamera::formats::RGB888: + glActiveTexture(GL_TEXTURE0); + configureTexture(*textures_[0]); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RGB, + size_.width(), + size_.height(), + 0, + GL_RGB, + GL_UNSIGNED_BYTE, + data_); + shaderProgram_.setUniformValue(textureUniformY_, 0); + break; + + case libcamera::formats::SBGGR8: + case libcamera::formats::SGBRG8: + case libcamera::formats::SGRBG8: + case libcamera::formats::SRGGB8: + case libcamera::formats::SBGGR10_CSI2P: + case libcamera::formats::SGBRG10_CSI2P: + case libcamera::formats::SGRBG10_CSI2P: + case libcamera::formats::SRGGB10_CSI2P: + case libcamera::formats::SBGGR12_CSI2P: + case libcamera::formats::SGBRG12_CSI2P: + case libcamera::formats::SGRBG12_CSI2P: + case libcamera::formats::SRGGB12_CSI2P: + /* + * Raw Bayer 8-bit, and packed raw Bayer 10-bit/12-bit formats + * are stored in GL_RED texture. + * The texture width is equal to the stride. + */ + glActiveTexture(GL_TEXTURE0); + configureTexture(*textures_[0]); + glTexImage2D(GL_TEXTURE_2D, + 0, + GL_RED, + stride_, + size_.height(), + 0, + GL_RED, + GL_UNSIGNED_BYTE, + data_); + shaderProgram_.setUniformValue(textureUniformY_, 0); + shaderProgram_.setUniformValue(textureUniformBayerFirstRed_, + firstRed_); + shaderProgram_.setUniformValue(textureUniformSize_, + size_.width(), /* in pixels */ + size_.height()); + shaderProgram_.setUniformValue(textureUniformStep_, + 1.0f / (stride_ - 1), + 1.0f / (size_.height() - 1)); + break; + default: break; }; @@ -431,7 +712,7 @@ void ViewFinderGL::paintGL() << "create fragment shader failed."; } - if (yuvData_) { + if (data_) { glClearColor(0.0, 0.0, 0.0, 1.0); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h index 69502b7a..4a0f8ca5 100644 --- a/src/qcam/viewfinder_gl.h +++ b/src/qcam/viewfinder_gl.h @@ -8,6 +8,9 @@ #ifndef __VIEWFINDER_GL_H__ #define __VIEWFINDER_GL_H__ +#include <array> +#include <memory> + #include <QImage> #include <QMutex> #include <QOpenGLBuffer> @@ -18,8 +21,8 @@ #include <QOpenGLWidget> #include <QSize> -#include <libcamera/buffer.h> #include <libcamera/formats.h> +#include <libcamera/framebuffer.h> #include "viewfinder.h" @@ -53,7 +56,7 @@ protected: private: bool selectFormat(const libcamera::PixelFormat &format); - void configureTexture(unsigned int id); + void configureTexture(QOpenGLTexture &texture); bool createFragmentShader(); bool createVertexShader(); void removeShader(); @@ -63,33 +66,39 @@ private: libcamera::FrameBuffer *buffer_; libcamera::PixelFormat format_; QSize size_; - unsigned char *yuvData_; + unsigned int stride_; + unsigned char *data_; - /* OpenGL components for rendering */ - QOpenGLShader *fragmentShader_; - QOpenGLShader *vertexShader_; + /* Shaders */ QOpenGLShaderProgram shaderProgram_; + std::unique_ptr<QOpenGLShader> vertexShader_; + std::unique_ptr<QOpenGLShader> fragmentShader_; + QString vertexShaderFile_; + QString fragmentShaderFile_; + QStringList fragmentShaderDefines_; /* Vertex buffer */ QOpenGLBuffer vertexBuffer_; - /* Fragment and Vertex shader file name */ - QString fragmentShaderSrc_; - QString vertexShaderSrc_; + /* Textures */ + std::array<std::unique_ptr<QOpenGLTexture>, 3> textures_; + + /* Common texture parameters */ + GLuint textureMinMagFilters_; - /* YUV texture planars and parameters */ - GLuint id_u_; - GLuint id_v_; - GLuint id_y_; + /* YUV texture parameters */ GLuint textureUniformU_; GLuint textureUniformV_; GLuint textureUniformY_; - QOpenGLTexture textureU_; - QOpenGLTexture textureV_; - QOpenGLTexture textureY_; + GLuint textureUniformStep_; unsigned int horzSubSample_; unsigned int vertSubSample_; + /* Raw Bayer texture parameters */ + GLuint textureUniformSize_; + GLuint textureUniformBayerFirstRed_; + QPointF firstRed_; + QMutex mutex_; /* Prevent concurrent access to image_ */ }; diff --git a/src/qcam/viewfinder_qt.h b/src/qcam/viewfinder_qt.h index d7554288..501c72a7 100644 --- a/src/qcam/viewfinder_qt.h +++ b/src/qcam/viewfinder_qt.h @@ -14,8 +14,8 @@ #include <QSize> #include <QWidget> -#include <libcamera/buffer.h> #include <libcamera/formats.h> +#include <libcamera/framebuffer.h> #include <libcamera/pixel_format.h> #include "format_converter.h" |