summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/apps/cam/main.cpp4
-rw-r--r--src/apps/common/dng_writer.cpp212
-rw-r--r--src/apps/qcam/main.cpp4
-rw-r--r--src/ipa/libipa/camera_sensor_helper.cpp42
-rw-r--r--src/ipa/libipa/camera_sensor_helper.h3
-rw-r--r--src/ipa/libipa/exposure_mode_helper.cpp9
-rw-r--r--src/ipa/libipa/matrix.cpp4
-rw-r--r--src/ipa/rkisp1/algorithms/blc.cpp85
-rw-r--r--src/ipa/rkisp1/algorithms/blc.h5
-rw-r--r--src/ipa/rkisp1/data/imx219.yaml4
-rw-r--r--src/ipa/rkisp1/data/imx258.yaml1
-rw-r--r--src/ipa/rkisp1/data/ov4689.yaml4
-rw-r--r--src/ipa/rkisp1/data/ov5640.yaml4
-rw-r--r--src/ipa/rkisp1/data/uncalibrated.yaml1
-rw-r--r--src/ipa/rkisp1/ipa_context.h6
-rw-r--r--src/ipa/rkisp1/rkisp1.cpp26
-rw-r--r--src/ipa/rpi/controller/rpi/awb.cpp2
-rw-r--r--src/ipa/rpi/controller/rpi/ccm.cpp6
-rw-r--r--src/ipa/rpi/controller/rpi/contrast.cpp4
-rw-r--r--src/libcamera/converter.cpp6
-rw-r--r--src/libcamera/converter/converter_v4l2_m2m.cpp79
-rw-r--r--src/libcamera/device_enumerator_sysfs.cpp2
-rw-r--r--src/libcamera/pipeline/rpi/vc4/vc4.cpp2
-rw-r--r--src/libcamera/pipeline/simple/simple.cpp15
-rw-r--r--src/libcamera/software_isp/software_isp.cpp26
-rw-r--r--src/py/libcamera/py_color_space.cpp2
-rw-r--r--src/py/libcamera/py_controls_generated.cpp.in2
-rw-r--r--src/py/libcamera/py_enums.cpp2
-rw-r--r--src/py/libcamera/py_formats_generated.cpp.in2
-rw-r--r--src/py/libcamera/py_geometry.cpp2
-rw-r--r--src/py/libcamera/py_main.cpp8
-rw-r--r--src/py/libcamera/py_main.h10
-rw-r--r--src/py/libcamera/py_properties_generated.cpp.in2
-rw-r--r--src/py/libcamera/py_transform.cpp2
-rw-r--r--src/v4l2/v4l2_compat.cpp73
35 files changed, 488 insertions, 173 deletions
diff --git a/src/apps/cam/main.cpp b/src/apps/cam/main.cpp
index 4f87f200..460dbc81 100644
--- a/src/apps/cam/main.cpp
+++ b/src/apps/cam/main.cpp
@@ -344,12 +344,16 @@ std::string CamApp::cameraName(const Camera *camera)
return name;
}
+namespace {
+
void signalHandler([[maybe_unused]] int signal)
{
std::cout << "Exiting" << std::endl;
CamApp::instance()->quit();
}
+} /* namespace */
+
int main(int argc, char **argv)
{
CamApp app;
diff --git a/src/apps/common/dng_writer.cpp b/src/apps/common/dng_writer.cpp
index 59f1fa23..355433b0 100644
--- a/src/apps/common/dng_writer.cpp
+++ b/src/apps/common/dng_writer.cpp
@@ -8,6 +8,7 @@
#include "dng_writer.h"
#include <algorithm>
+#include <endian.h>
#include <iostream>
#include <map>
@@ -126,7 +127,9 @@ struct Matrix3d {
float m[9];
};
-void packScanlineSBGGR8(void *output, const void *input, unsigned int width)
+namespace {
+
+void packScanlineRaw8(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -134,7 +137,67 @@ void packScanlineSBGGR8(void *output, const void *input, unsigned int width)
std::copy(in, in + width, out);
}
-void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
+void packScanlineRaw10(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 4) {
+ *out++ = in[1] << 6 | in[0] >> 2;
+ *out++ = in[0] << 6 | (in[3] & 0x03) << 4 | in[2] >> 4;
+ *out++ = in[2] << 4 | (in[5] & 0x03) << 2 | in[4] >> 6;
+ *out++ = in[4] << 2 | (in[7] & 0x03) << 0;
+ *out++ = in[6];
+ in += 8;
+ }
+}
+
+void packScanlineRaw12(void *output, const void *input, unsigned int width)
+{
+ const uint8_t *in = static_cast<const uint8_t *>(input);
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ for (unsigned int i = 0; i < width; i += 2) {
+ *out++ = in[1] << 4 | in[0] >> 4;
+ *out++ = in[0] << 4 | (in[3] & 0x0f);
+ *out++ = in[2];
+ in += 4;
+ }
+}
+
+void packScanlineRaw16(void *output, const void *input, unsigned int width)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ uint16_t *out = static_cast<uint16_t *>(output);
+
+ std::copy(in, in + width, out);
+}
+
+/* Thumbnail function for raw data with each pixel aligned to 16bit. */
+void thumbScanlineRaw(const FormatInfo &info, void *output, const void *input,
+ unsigned int width, unsigned int stride)
+{
+ const uint16_t *in = static_cast<const uint16_t *>(input);
+ const uint16_t *in2 = static_cast<const uint16_t *>(input) + stride / 2;
+ uint8_t *out = static_cast<uint8_t *>(output);
+
+ /* Shift down to 8. */
+ unsigned int shift = info.bitsPerSample - 8;
+
+ /* Simple averaging that produces greyscale RGB values. */
+ for (unsigned int x = 0; x < width; x++) {
+ uint16_t value = (le16toh(in[0]) + le16toh(in[1]) +
+ le16toh(in2[0]) + le16toh(in2[1])) >> 2;
+ value = value >> shift;
+ *out++ = value;
+ *out++ = value;
+ *out++ = value;
+ in += 16;
+ in2 += 16;
+ }
+}
+
+void packScanlineRaw10_CSI2P(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -150,7 +213,7 @@ void packScanlineSBGGR10P(void *output, const void *input, unsigned int width)
}
}
-void packScanlineSBGGR12P(void *output, const void *input, unsigned int width)
+void packScanlineRaw12_CSI2P(void *output, const void *input, unsigned int width)
{
const uint8_t *in = static_cast<const uint8_t *>(input);
uint8_t *out = static_cast<uint8_t *>(output);
@@ -164,7 +227,7 @@ void packScanlineSBGGR12P(void *output, const void *input, unsigned int width)
}
}
-void thumbScanlineSBGGRxxP(const FormatInfo &info, void *output,
+void thumbScanlineRaw_CSI2P(const FormatInfo &info, void *output,
const void *input, unsigned int width,
unsigned int stride)
{
@@ -282,78 +345,150 @@ void thumbScanlineIPU3([[maybe_unused]] const FormatInfo &info, void *output,
}
}
-static const std::map<PixelFormat, FormatInfo> formatInfo = {
+const std::map<PixelFormat, FormatInfo> formatInfo = {
{ formats::SBGGR8, {
.bitsPerSample = 8,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG8, {
.bitsPerSample = 8,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG8, {
.bitsPerSample = 8,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB8, {
.bitsPerSample = 8,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR8,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw8,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
+ } },
+ { formats::SBGGR10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB10, {
+ .bitsPerSample = 10,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw10,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB12, {
+ .bitsPerSample = 12,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw12,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SBGGR16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGBRG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SGRBG16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
+ } },
+ { formats::SRGGB16, {
+ .bitsPerSample = 16,
+ .pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
+ .packScanline = packScanlineRaw16,
+ .thumbScanline = thumbScanlineRaw,
} },
{ formats::SBGGR10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB10_CSI2P, {
.bitsPerSample = 10,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR10P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw10_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SBGGR12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternBlue, CFAPatternGreen, CFAPatternGreen, CFAPatternRed },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGBRG12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternGreen, CFAPatternBlue, CFAPatternRed, CFAPatternGreen },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SGRBG12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternGreen, CFAPatternRed, CFAPatternBlue, CFAPatternGreen },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SRGGB12_CSI2P, {
.bitsPerSample = 12,
.pattern = { CFAPatternRed, CFAPatternGreen, CFAPatternGreen, CFAPatternBlue },
- .packScanline = packScanlineSBGGR12P,
- .thumbScanline = thumbScanlineSBGGRxxP,
+ .packScanline = packScanlineRaw12_CSI2P,
+ .thumbScanline = thumbScanlineRaw_CSI2P,
} },
{ formats::SBGGR10_IPU3, {
.bitsPerSample = 16,
@@ -381,6 +516,8 @@ static const std::map<PixelFormat, FormatInfo> formatInfo = {
} },
};
+} /* namespace */
+
int DNGWriter::write(const char *filename, const Camera *camera,
const StreamConfiguration &config,
const ControlList &metadata,
@@ -522,6 +659,23 @@ int DNGWriter::write(const char *filename, const Camera *camera,
TIFFWriteDirectory(tif);
+ /*
+ * Workaround for a bug introduced in libtiff version 4.5.1 and no fix
+ * released. In these versions the CFA* tags were missing in the field
+ * info.
+ * Introduced by: https://gitlab.com/libtiff/libtiff/-/commit/738e04099b13192bb1f654e74e9b5829313f3161
+ * Fixed by: https://gitlab.com/libtiff/libtiff/-/commit/49856998c3d82e65444b47bb4fb11b7830a0c2be
+ */
+ if (!TIFFFindField(tif, TIFFTAG_CFAREPEATPATTERNDIM, TIFF_ANY)) {
+ static const TIFFFieldInfo infos[] = {
+ { TIFFTAG_CFAREPEATPATTERNDIM, 2, 2, TIFF_SHORT, FIELD_CUSTOM,
+ 1, 0, const_cast<char *>("CFARepeatPatternDim") },
+ { TIFFTAG_CFAPATTERN, -1, -1, TIFF_BYTE, FIELD_CUSTOM,
+ 1, 1, const_cast<char *>("CFAPattern") },
+ };
+ TIFFMergeFieldInfo(tif, infos, 2);
+ }
+
/* Create a new IFD for the RAW image. */
const uint16_t cfaRepeatPatternDim[] = { 2, 2 };
const uint8_t cfaPlaneColor[] = {
diff --git a/src/apps/qcam/main.cpp b/src/apps/qcam/main.cpp
index 9846fba5..d0bde141 100644
--- a/src/apps/qcam/main.cpp
+++ b/src/apps/qcam/main.cpp
@@ -21,6 +21,8 @@
using namespace libcamera;
+namespace {
+
void signalHandler([[maybe_unused]] int signal)
{
qInfo() << "Exiting";
@@ -52,6 +54,8 @@ OptionsParser::Options parseOptions(int argc, char *argv[])
return options;
}
+} /* namespace */
+
int main(int argc, char **argv)
{
QApplication app(argc, argv);
diff --git a/src/ipa/libipa/camera_sensor_helper.cpp b/src/ipa/libipa/camera_sensor_helper.cpp
index 782ff990..a1339c83 100644
--- a/src/ipa/libipa/camera_sensor_helper.cpp
+++ b/src/ipa/libipa/camera_sensor_helper.cpp
@@ -48,6 +48,33 @@ namespace ipa {
*/
/**
+ * \fn CameraSensorHelper::blackLevel()
+ * \brief Fetch the black level of the sensor
+ *
+ * This function returns the black level of the sensor scaled to a 16bit pixel
+ * width. If it is unknown an empty optional is returned.
+ *
+ * \todo Fill the blanks and add pedestal values for all supported sensors. Once
+ * done, drop the std::optional<>.
+ *
+ * Black levels are typically the result of the following phenomena:
+ * - Pedestal added by the sensor to pixel values. They are typically fixed,
+ * sometimes programmable and should be reported in datasheets (but
+ * documentation is not always available).
+ * - Dark currents and other physical effects that add charge to pixels in the
+ * absence of light. Those can depend on the integration time and the sensor
+ * die temperature, and their contribution to pixel values depend on the
+ * sensor gains.
+ *
+ * The pedestal is usually the value with the biggest contribution to the
+ * overall black level. In most cases it is either known before or in rare cases
+ * (there is not a single driver with such a control in the linux kernel) can be
+ * queried from the sensor. This function provides that fixed, known value.
+ *
+ * \return The black level of the sensor, or std::nullopt if not known
+ */
+
+/**
* \brief Compute gain code from the analogue gain absolute value
* \param[in] gain The real gain to pass
*
@@ -205,6 +232,11 @@ double CameraSensorHelper::gain(uint32_t gainCode) const
*/
/**
+ * \var CameraSensorHelper::blackLevel_
+ * \brief The black level of the sensor
+ */
+
+/**
* \var CameraSensorHelper::gainType_
* \brief The analogue gain model type
*/
@@ -396,6 +428,8 @@ class CameraSensorHelperImx219 : public CameraSensorHelper
public:
CameraSensorHelperImx219()
{
+ /* From datasheet: 64 at 10bits. */
+ blackLevel_ = 4096;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 0, 256, -1, 256 };
}
@@ -407,6 +441,8 @@ class CameraSensorHelperImx258 : public CameraSensorHelper
public:
CameraSensorHelperImx258()
{
+ /* From datasheet: 0x40 at 10bits. */
+ blackLevel_ = 4096;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 0, 512, -1, 512 };
}
@@ -456,6 +492,8 @@ class CameraSensorHelperImx335 : public CameraSensorHelper
public:
CameraSensorHelperImx335()
{
+ /* From datasheet: 0x32 at 10bits. */
+ blackLevel_ = 3200;
gainType_ = AnalogueGainExponential;
gainConstants_.exp = { 1.0, expGainDb(0.3) };
}
@@ -515,6 +553,8 @@ class CameraSensorHelperOv4689 : public CameraSensorHelper
public:
CameraSensorHelperOv4689()
{
+ /* From datasheet: 0x40 at 12bits. */
+ blackLevel_ = 1024;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 1, 0, 0, 128 };
}
@@ -526,6 +566,8 @@ class CameraSensorHelperOv5640 : public CameraSensorHelper
public:
CameraSensorHelperOv5640()
{
+ /* From datasheet: 0x10 at 10bits. */
+ blackLevel_ = 1024;
gainType_ = AnalogueGainLinear;
gainConstants_.linear = { 1, 0, 0, 16 };
}
diff --git a/src/ipa/libipa/camera_sensor_helper.h b/src/ipa/libipa/camera_sensor_helper.h
index 0d99073b..ac276e27 100644
--- a/src/ipa/libipa/camera_sensor_helper.h
+++ b/src/ipa/libipa/camera_sensor_helper.h
@@ -10,6 +10,7 @@
#include <stdint.h>
#include <memory>
+#include <optional>
#include <string>
#include <vector>
@@ -25,6 +26,7 @@ public:
CameraSensorHelper() = default;
virtual ~CameraSensorHelper() = default;
+ std::optional<int16_t> blackLevel() const { return blackLevel_; }
virtual uint32_t gainCode(double gain) const;
virtual double gain(uint32_t gainCode) const;
@@ -51,6 +53,7 @@ protected:
AnalogueGainExpConstants exp;
};
+ std::optional<int16_t> blackLevel_;
AnalogueGainType gainType_;
AnalogueGainConstants gainConstants_;
diff --git a/src/ipa/libipa/exposure_mode_helper.cpp b/src/ipa/libipa/exposure_mode_helper.cpp
index 683a564a..7703becc 100644
--- a/src/ipa/libipa/exposure_mode_helper.cpp
+++ b/src/ipa/libipa/exposure_mode_helper.cpp
@@ -166,7 +166,7 @@ ExposureModeHelper::splitExposure(utils::Duration exposure) const
return { minShutter_, minGain_, exposure / (minShutter_ * minGain_) };
utils::Duration shutter;
- double stageGain;
+ double stageGain = 1.0;
double gain;
for (unsigned int stage = 0; stage < gains_.size(); stage++) {
@@ -201,12 +201,9 @@ ExposureModeHelper::splitExposure(utils::Duration exposure) const
* From here on all we can do is max out the shutter time, followed by
* the analogue gain. If we still haven't achieved the target we send
* the rest of the exposure time to digital gain. If we were given no
- * stages to use then set stageGain to 1.0 so that shutter time is maxed
- * before gain touched at all.
+ * stages to use then the default stageGain of 1.0 is used so that
+ * shutter time is maxed before gain is touched at all.
*/
- if (gains_.empty())
- stageGain = 1.0;
-
shutter = clampShutter(exposure / clampGain(stageGain));
gain = clampGain(exposure / shutter);
diff --git a/src/ipa/libipa/matrix.cpp b/src/ipa/libipa/matrix.cpp
index 7f000382..8346f0d3 100644
--- a/src/ipa/libipa/matrix.cpp
+++ b/src/ipa/libipa/matrix.cpp
@@ -122,8 +122,6 @@ namespace ipa {
* \return Matrix sum of matrices \a m1 and \a m2
*/
-} /* namespace ipa */
-
#ifndef __DOXYGEN__
/*
* The YAML data shall be a list of numerical values. Its size shall be equal
@@ -146,4 +144,6 @@ bool matrixValidateYaml(const YamlObject &obj, unsigned int size)
}
#endif /* __DOXYGEN__ */
+} /* namespace ipa */
+
} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/blc.cpp b/src/ipa/rkisp1/algorithms/blc.cpp
index d2e74354..871dd204 100644
--- a/src/ipa/rkisp1/algorithms/blc.cpp
+++ b/src/ipa/rkisp1/algorithms/blc.cpp
@@ -9,6 +9,8 @@
#include <libcamera/base/log.h>
+#include <libcamera/control_ids.h>
+
#include "libcamera/internal/yaml_parser.h"
/**
@@ -38,18 +40,61 @@ LOG_DEFINE_CATEGORY(RkISP1Blc)
BlackLevelCorrection::BlackLevelCorrection()
: tuningParameters_(false)
{
+ /*
+ * This is a bit of a hack. In raw mode no black level correction
+ * happens. This flag is used to ensure the metadata gets populated with
+ * the black level which is needed to capture proper raw images for
+ * tuning.
+ */
+ supportsRaw_ = true;
}
/**
* \copydoc libcamera::ipa::Algorithm::init
*/
-int BlackLevelCorrection::init([[maybe_unused]] IPAContext &context,
- const YamlObject &tuningData)
+int BlackLevelCorrection::init(IPAContext &context, const YamlObject &tuningData)
{
- blackLevelRed_ = tuningData["R"].get<int16_t>(256);
- blackLevelGreenR_ = tuningData["Gr"].get<int16_t>(256);
- blackLevelGreenB_ = tuningData["Gb"].get<int16_t>(256);
- blackLevelBlue_ = tuningData["B"].get<int16_t>(256);
+ std::optional<int16_t> levelRed = tuningData["R"].get<int16_t>();
+ std::optional<int16_t> levelGreenR = tuningData["Gr"].get<int16_t>();
+ std::optional<int16_t> levelGreenB = tuningData["Gb"].get<int16_t>();
+ std::optional<int16_t> levelBlue = tuningData["B"].get<int16_t>();
+ bool tuningHasLevels = levelRed && levelGreenR && levelGreenB && levelBlue;
+
+ auto blackLevel = context.camHelper->blackLevel();
+ if (!blackLevel) {
+ /*
+ * Not all camera sensor helpers have been updated with black
+ * levels. Print a warning and fall back to the levels from the
+ * tuning data to preserve backward compatibility. This should
+ * be removed once all helpers provide the data.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "No black levels provided by camera sensor helper"
+ << ", please fix";
+
+ blackLevelRed_ = levelRed.value_or(4096);
+ blackLevelGreenR_ = levelGreenR.value_or(4096);
+ blackLevelGreenB_ = levelGreenB.value_or(4096);
+ blackLevelBlue_ = levelBlue.value_or(4096);
+ } else if (tuningHasLevels) {
+ /*
+ * If black levels are provided in the tuning file, use them to
+ * avoid breaking existing camera tuning. This is deprecated and
+ * will be removed.
+ */
+ LOG(RkISP1Blc, Warning)
+ << "Deprecated: black levels overwritten by tuning file";
+
+ blackLevelRed_ = *levelRed;
+ blackLevelGreenR_ = *levelGreenR;
+ blackLevelGreenB_ = *levelGreenB;
+ blackLevelBlue_ = *levelBlue;
+ } else {
+ blackLevelRed_ = *blackLevel;
+ blackLevelGreenR_ = *blackLevel;
+ blackLevelGreenB_ = *blackLevel;
+ blackLevelBlue_ = *blackLevel;
+ }
tuningParameters_ = true;
@@ -70,6 +115,9 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
[[maybe_unused]] IPAFrameContext &frameContext,
rkisp1_params_cfg *params)
{
+ if (context.configuration.raw)
+ return;
+
if (frame > 0)
return;
@@ -77,16 +125,33 @@ void BlackLevelCorrection::prepare([[maybe_unused]] IPAContext &context,
return;
params->others.bls_config.enable_auto = 0;
- params->others.bls_config.fixed_val.r = blackLevelRed_;
- params->others.bls_config.fixed_val.gr = blackLevelGreenR_;
- params->others.bls_config.fixed_val.gb = blackLevelGreenB_;
- params->others.bls_config.fixed_val.b = blackLevelBlue_;
+ /* The rkisp1 uses 12bit based black levels. Scale down accordingly. */
+ params->others.bls_config.fixed_val.r = blackLevelRed_ >> 4;
+ params->others.bls_config.fixed_val.gr = blackLevelGreenR_ >> 4;
+ params->others.bls_config.fixed_val.gb = blackLevelGreenB_ >> 4;
+ params->others.bls_config.fixed_val.b = blackLevelBlue_ >> 4;
params->module_en_update |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_ens |= RKISP1_CIF_ISP_MODULE_BLS;
params->module_cfg_update |= RKISP1_CIF_ISP_MODULE_BLS;
}
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void BlackLevelCorrection::process([[maybe_unused]] IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ [[maybe_unused]] IPAFrameContext &frameContext,
+ [[maybe_unused]] const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ metadata.set(controls::SensorBlackLevels,
+ { static_cast<int32_t>(blackLevelRed_),
+ static_cast<int32_t>(blackLevelGreenR_),
+ static_cast<int32_t>(blackLevelGreenB_),
+ static_cast<int32_t>(blackLevelBlue_) });
+}
+
REGISTER_IPA_ALGORITHM(BlackLevelCorrection, "BlackLevelCorrection")
} /* namespace ipa::rkisp1::algorithms */
diff --git a/src/ipa/rkisp1/algorithms/blc.h b/src/ipa/rkisp1/algorithms/blc.h
index 460ebcc1..4ecac233 100644
--- a/src/ipa/rkisp1/algorithms/blc.h
+++ b/src/ipa/rkisp1/algorithms/blc.h
@@ -23,7 +23,10 @@ public:
void prepare(IPAContext &context, const uint32_t frame,
IPAFrameContext &frameContext,
rkisp1_params_cfg *params) override;
-
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
private:
bool tuningParameters_;
int16_t blackLevelRed_;
diff --git a/src/ipa/rkisp1/data/imx219.yaml b/src/ipa/rkisp1/data/imx219.yaml
index cbcc43b8..0d99cb52 100644
--- a/src/ipa/rkisp1/data/imx219.yaml
+++ b/src/ipa/rkisp1/data/imx219.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/imx258.yaml b/src/ipa/rkisp1/data/imx258.yaml
index 43dddf20..202af36a 100644
--- a/src/ipa/rkisp1/data/imx258.yaml
+++ b/src/ipa/rkisp1/data/imx258.yaml
@@ -5,6 +5,7 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
- LensShadingCorrection:
x-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
y-size: [ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625 ]
diff --git a/src/ipa/rkisp1/data/ov4689.yaml b/src/ipa/rkisp1/data/ov4689.yaml
index 2068684c..60901296 100644
--- a/src/ipa/rkisp1/data/ov4689.yaml
+++ b/src/ipa/rkisp1/data/ov4689.yaml
@@ -6,8 +6,4 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 66
- Gr: 66
- Gb: 66
- B: 66
...
diff --git a/src/ipa/rkisp1/data/ov5640.yaml b/src/ipa/rkisp1/data/ov5640.yaml
index 897b83cb..4b21d412 100644
--- a/src/ipa/rkisp1/data/ov5640.yaml
+++ b/src/ipa/rkisp1/data/ov5640.yaml
@@ -6,10 +6,6 @@ algorithms:
- Agc:
- Awb:
- BlackLevelCorrection:
- R: 256
- Gr: 256
- Gb: 256
- B: 256
- ColorProcessing:
- GammaSensorLinearization:
x-intervals: [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]
diff --git a/src/ipa/rkisp1/data/uncalibrated.yaml b/src/ipa/rkisp1/data/uncalibrated.yaml
index a7bbd8d8..60901296 100644
--- a/src/ipa/rkisp1/data/uncalibrated.yaml
+++ b/src/ipa/rkisp1/data/uncalibrated.yaml
@@ -5,4 +5,5 @@ version: 1
algorithms:
- Agc:
- Awb:
+ - BlackLevelCorrection:
...
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
index 8602b408..1d0e9030 100644
--- a/src/ipa/rkisp1/ipa_context.h
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -8,6 +8,8 @@
#pragma once
+#include <memory>
+
#include <linux/rkisp1-config.h>
#include <libcamera/base/utils.h>
@@ -16,6 +18,7 @@
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
+#include <libipa/camera_sensor_helper.h>
#include <libipa/fc_queue.h>
#include <libipa/matrix.h>
@@ -178,6 +181,9 @@ struct IPAContext {
FCQueue<IPAFrameContext> frameContexts;
ControlInfoMap::Map ctrlMap;
+
+ /* Interface to the Camera Helper */
+ std::unique_ptr<CameraSensorHelper> camHelper;
};
} /* namespace ipa::rkisp1 */
diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp
index d31cdbab..23e0826c 100644
--- a/src/ipa/rkisp1/rkisp1.cpp
+++ b/src/ipa/rkisp1/rkisp1.cpp
@@ -29,7 +29,6 @@
#include "libcamera/internal/yaml_parser.h"
#include "algorithms/algorithm.h"
-#include "libipa/camera_sensor_helper.h"
#include "ipa_context.h"
@@ -81,9 +80,6 @@ private:
ControlInfoMap sensorControls_;
- /* Interface to the Camera Helper */
- std::unique_ptr<CameraSensorHelper> camHelper_;
-
/* Local parameter storage */
struct IPAContext context_;
};
@@ -115,7 +111,7 @@ const ControlInfoMap::Map rkisp1Controls{
} /* namespace */
IPARkISP1::IPARkISP1()
- : context_({ {}, {}, {}, { kMaxFrameContexts }, {} })
+ : context_({ {}, {}, {}, { kMaxFrameContexts }, {}, {} })
{
}
@@ -147,8 +143,8 @@ int IPARkISP1::init(const IPASettings &settings, unsigned int hwRevision,
LOG(IPARkISP1, Debug) << "Hardware revision is " << hwRevision;
- camHelper_ = CameraSensorHelperFactoryBase::create(settings.sensorModel);
- if (!camHelper_) {
+ context_.camHelper = CameraSensorHelperFactoryBase::create(settings.sensorModel);
+ if (!context_.camHelper) {
LOG(IPARkISP1, Error)
<< "Failed to create camera sensor helper for "
<< settings.sensorModel;
@@ -250,8 +246,10 @@ int IPARkISP1::configure(const IPAConfigInfo &ipaConfig,
minExposure * context_.configuration.sensor.lineDuration;
context_.configuration.sensor.maxShutterSpeed =
maxExposure * context_.configuration.sensor.lineDuration;
- context_.configuration.sensor.minAnalogueGain = camHelper_->gain(minGain);
- context_.configuration.sensor.maxAnalogueGain = camHelper_->gain(maxGain);
+ context_.configuration.sensor.minAnalogueGain =
+ context_.camHelper->gain(minGain);
+ context_.configuration.sensor.maxAnalogueGain =
+ context_.camHelper->gain(maxGain);
context_.configuration.raw = std::any_of(streamConfig.begin(), streamConfig.end(),
[](auto &cfg) -> bool {
@@ -352,7 +350,7 @@ void IPARkISP1::processStatsBuffer(const uint32_t frame, const uint32_t bufferId
frameContext.sensor.exposure =
sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
frameContext.sensor.gain =
- camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+ context_.camHelper->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
ControlList metadata(controls::controls);
@@ -389,9 +387,9 @@ void IPARkISP1::updateControls(const IPACameraSensorInfo &sensorInfo,
/* Compute the analogue gain limits. */
const ControlInfo &v4l2Gain = sensorControls.find(V4L2_CID_ANALOGUE_GAIN)->second;
- float minGain = camHelper_->gain(v4l2Gain.min().get<int32_t>());
- float maxGain = camHelper_->gain(v4l2Gain.max().get<int32_t>());
- float defGain = camHelper_->gain(v4l2Gain.def().get<int32_t>());
+ float minGain = context_.camHelper->gain(v4l2Gain.min().get<int32_t>());
+ float maxGain = context_.camHelper->gain(v4l2Gain.max().get<int32_t>());
+ float defGain = context_.camHelper->gain(v4l2Gain.def().get<int32_t>());
ctrlMap.emplace(std::piecewise_construct,
std::forward_as_tuple(&controls::AnalogueGain),
std::forward_as_tuple(minGain, maxGain, defGain));
@@ -436,7 +434,7 @@ void IPARkISP1::setControls(unsigned int frame)
IPAFrameContext &frameContext = context_.frameContexts.get(frame);
uint32_t exposure = frameContext.agc.exposure;
- uint32_t gain = camHelper_->gainCode(frameContext.agc.gain);
+ uint32_t gain = context_.camHelper->gainCode(frameContext.agc.gain);
ControlList ctrls(sensorControls_);
ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure));
diff --git a/src/ipa/rpi/controller/rpi/awb.cpp b/src/ipa/rpi/controller/rpi/awb.cpp
index 003c8fa1..f45525bc 100644
--- a/src/ipa/rpi/controller/rpi/awb.cpp
+++ b/src/ipa/rpi/controller/rpi/awb.cpp
@@ -122,7 +122,7 @@ int AwbConfig::read(const libcamera::YamlObject &params)
}
if (priors.empty()) {
LOG(RPiAwb, Error) << "AwbConfig: no AWB priors configured";
- return ret;
+ return -EINVAL;
}
}
if (params.contains("modes")) {
diff --git a/src/ipa/rpi/controller/rpi/ccm.cpp b/src/ipa/rpi/controller/rpi/ccm.cpp
index e673964c..aefa580c 100644
--- a/src/ipa/rpi/controller/rpi/ccm.cpp
+++ b/src/ipa/rpi/controller/rpi/ccm.cpp
@@ -113,8 +113,10 @@ void Ccm::initialise()
{
}
+namespace {
+
template<typename T>
-static bool getLocked(Metadata *metadata, std::string const &tag, T &value)
+bool getLocked(Metadata *metadata, std::string const &tag, T &value)
{
T *ptr = metadata->getLocked<T>(tag);
if (ptr == nullptr)
@@ -149,6 +151,8 @@ Matrix applySaturation(Matrix const &ccm, double saturation)
return Y2RGB * S * RGB2Y * ccm;
}
+} /* namespace */
+
void Ccm::prepare(Metadata *imageMetadata)
{
bool awbOk = false, luxOk = false;
diff --git a/src/ipa/rpi/controller/rpi/contrast.cpp b/src/ipa/rpi/controller/rpi/contrast.cpp
index 9b37943a..fe866a54 100644
--- a/src/ipa/rpi/controller/rpi/contrast.cpp
+++ b/src/ipa/rpi/controller/rpi/contrast.cpp
@@ -94,6 +94,8 @@ void Contrast::prepare(Metadata *imageMetadata)
imageMetadata->set("contrast.status", status_);
}
+namespace {
+
ipa::Pwl computeStretchCurve(Histogram const &histogram,
ContrastConfig const &config)
{
@@ -153,6 +155,8 @@ ipa::Pwl applyManualContrast(ipa::Pwl const &gammaCurve, double brightness,
return newGammaCurve;
}
+} /* namespace */
+
void Contrast::process(StatisticsPtr &stats,
[[maybe_unused]] Metadata *imageMetadata)
{
diff --git a/src/libcamera/converter.cpp b/src/libcamera/converter.cpp
index d3d38c1b..2ab46133 100644
--- a/src/libcamera/converter.cpp
+++ b/src/libcamera/converter.cpp
@@ -111,12 +111,12 @@ Converter::~Converter()
/**
* \fn Converter::exportBuffers()
* \brief Export buffers from the converter device
- * \param[in] output Output stream index exporting the buffers
+ * \param[in] stream Output stream pointer exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store allocated buffers
*
* This function operates similarly to V4L2VideoDevice::exportBuffers() on the
- * output stream indicated by the \a output index.
+ * output stream indicated by the \a output.
*
* \return The number of allocated buffers on success or a negative error code
* otherwise
@@ -137,7 +137,7 @@ Converter::~Converter()
* \fn Converter::queueBuffers()
* \brief Queue buffers to converter device
* \param[in] input The frame buffer to apply the conversion
- * \param[out] outputs The container holding the output stream indexes and
+ * \param[out] outputs The container holding the output stream pointers and
* their respective frame buffer outputs.
*
* This function queues the \a input frame buffer on the output streams of the
diff --git a/src/libcamera/converter/converter_v4l2_m2m.cpp b/src/libcamera/converter/converter_v4l2_m2m.cpp
index d8929fc5..2e77872e 100644
--- a/src/libcamera/converter/converter_v4l2_m2m.cpp
+++ b/src/libcamera/converter/converter_v4l2_m2m.cpp
@@ -32,24 +32,24 @@ namespace libcamera {
LOG_DECLARE_CATEGORY(Converter)
/* -----------------------------------------------------------------------------
- * V4L2M2MConverter::Stream
+ * V4L2M2MConverter::V4L2M2MStream
*/
-V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
- : converter_(converter), index_(index)
+V4L2M2MConverter::V4L2M2MStream::V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream)
+ : converter_(converter), stream_(stream)
{
m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
- m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
- m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
+ m2m_->output()->bufferReady.connect(this, &V4L2M2MStream::outputBufferReady);
+ m2m_->capture()->bufferReady.connect(this, &V4L2M2MStream::captureBufferReady);
int ret = m2m_->open();
if (ret < 0)
m2m_.reset();
}
-int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
- const StreamConfiguration &outputCfg)
+int V4L2M2MConverter::V4L2M2MStream::configure(const StreamConfiguration &inputCfg,
+ const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
@@ -101,13 +101,13 @@ int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
return 0;
}
-int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
- std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+int V4L2M2MConverter::V4L2M2MStream::exportBuffers(unsigned int count,
+ std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
-int V4L2M2MConverter::Stream::start()
+int V4L2M2MConverter::V4L2M2MStream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
@@ -134,7 +134,7 @@ int V4L2M2MConverter::Stream::start()
return 0;
}
-void V4L2M2MConverter::Stream::stop()
+void V4L2M2MConverter::V4L2M2MStream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
@@ -142,7 +142,7 @@ void V4L2M2MConverter::Stream::stop()
m2m_->output()->releaseBuffers();
}
-int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
+int V4L2M2MConverter::V4L2M2MStream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
@@ -155,12 +155,12 @@ int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *outp
return 0;
}
-std::string V4L2M2MConverter::Stream::logPrefix() const
+std::string V4L2M2MConverter::V4L2M2MStream::logPrefix() const
{
- return "stream" + std::to_string(index_);
+ return stream_->configuration().toString();
}
-void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::V4L2M2MStream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
@@ -172,7 +172,7 @@ void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
}
}
-void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
+void V4L2M2MConverter::V4L2M2MStream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
@@ -333,21 +333,24 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
int ret = 0;
streams_.clear();
- streams_.reserve(outputCfgs.size());
for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
- Stream &stream = streams_.emplace_back(this, i);
+ const StreamConfiguration &cfg = outputCfgs[i];
+ std::unique_ptr<V4L2M2MStream> stream =
+ std::make_unique<V4L2M2MStream>(this, cfg.stream());
- if (!stream.isValid()) {
+ if (!stream->isValid()) {
LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
}
- ret = stream.configure(inputCfg, outputCfgs[i]);
+ ret = stream->configure(inputCfg, cfg);
if (ret < 0)
break;
+
+ streams_.emplace(cfg.stream(), std::move(stream));
}
if (ret < 0) {
@@ -361,13 +364,14 @@ int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
/**
* \copydoc libcamera::Converter::exportBuffers
*/
-int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
+int V4L2M2MConverter::exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
- if (output >= streams_.size())
+ auto iter = streams_.find(stream);
+ if (iter == streams_.end())
return -EINVAL;
- return streams_[output].exportBuffers(count, buffers);
+ return iter->second->exportBuffers(count, buffers);
}
/**
@@ -377,8 +381,8 @@ int V4L2M2MConverter::start()
{
int ret;
- for (Stream &stream : streams_) {
- ret = stream.start();
+ for (auto &iter : streams_) {
+ ret = iter.second->start();
if (ret < 0) {
stop();
return ret;
@@ -393,41 +397,40 @@ int V4L2M2MConverter::start()
*/
void V4L2M2MConverter::stop()
{
- for (Stream &stream : utils::reverse(streams_))
- stream.stop();
+ for (auto &iter : streams_)
+ iter.second->stop();
}
/**
* \copydoc libcamera::Converter::queueBuffers
*/
int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+ const std::map<const Stream *, FrameBuffer *> &outputs)
{
- unsigned int mask = 0;
+ std::set<FrameBuffer *> outputBufs;
int ret;
/*
* Validate the outputs as a sanity check: at least one output is
* required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
+ * streams can reference same output framebuffers.
*/
if (outputs.empty())
return -EINVAL;
- for (auto [index, buffer] : outputs) {
+ for (auto [stream, buffer] : outputs) {
if (!buffer)
return -EINVAL;
- if (index >= streams_.size())
- return -EINVAL;
- if (mask & (1 << index))
- return -EINVAL;
- mask |= 1 << index;
+ outputBufs.insert(buffer);
}
+ if (outputBufs.size() != streams_.size())
+ return -EINVAL;
+
/* Queue the input and output buffers to all the streams. */
- for (auto [index, buffer] : outputs) {
- ret = streams_[index].queueBuffers(input, buffer);
+ for (auto [stream, buffer] : outputs) {
+ ret = streams_.at(stream)->queueBuffers(input, buffer);
if (ret < 0)
return ret;
}
diff --git a/src/libcamera/device_enumerator_sysfs.cpp b/src/libcamera/device_enumerator_sysfs.cpp
index fc33ba52..7866885c 100644
--- a/src/libcamera/device_enumerator_sysfs.cpp
+++ b/src/libcamera/device_enumerator_sysfs.cpp
@@ -33,7 +33,7 @@ int DeviceEnumeratorSysfs::init()
int DeviceEnumeratorSysfs::enumerate()
{
struct dirent *ent;
- DIR *dir;
+ DIR *dir = nullptr;
static const char * const sysfs_dirs[] = {
"/sys/subsystem/media/devices",
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
index 4a89e35f..e5b6ef2b 100644
--- a/src/libcamera/pipeline/rpi/vc4/vc4.cpp
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -802,7 +802,7 @@ void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
{
RPi::Stream *stream = nullptr;
- unsigned int index;
+ unsigned int index = 0;
if (!isRunning())
return;
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index eb36578e..60aafc4e 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -198,6 +198,7 @@ namespace {
static const SimplePipelineInfo supportedDevices[] = {
{ "dcmipp", {}, false },
{ "imx7-csi", { { "pxp", 1 } }, false },
+ { "intel-ipu6", {}, true },
{ "j721e-csi2rx", {}, false },
{ "mtk-seninf", { { "mtk-mdp", 3 } }, false },
{ "mxc-isi", {}, false },
@@ -277,7 +278,7 @@ public:
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
- std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
+ std::queue<std::map<const Stream *, FrameBuffer *>> conversionQueue_;
bool useConversion_;
std::unique_ptr<Converter> converter_;
@@ -836,7 +837,7 @@ void SimpleCameraData::bufferReady(FrameBuffer *buffer)
Request *request = buffer->request();
if (useConversion_ && !conversionQueue_.empty()) {
- const std::map<unsigned int, FrameBuffer *> &outputs =
+ const std::map<const Stream *, FrameBuffer *> &outputs =
conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
@@ -1303,10 +1304,8 @@ int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
*/
if (data->useConversion_)
return data->converter_
- ? data->converter_->exportBuffers(data->streamIndex(stream),
- count, buffers)
- : data->swIsp_->exportBuffers(data->streamIndex(stream),
- count, buffers);
+ ? data->converter_->exportBuffers(stream, count, buffers)
+ : data->swIsp_->exportBuffers(stream, count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
@@ -1398,7 +1397,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
SimpleCameraData *data = cameraData(camera);
int ret;
- std::map<unsigned int, FrameBuffer *> buffers;
+ std::map<const Stream *, FrameBuffer *> buffers;
for (auto &[stream, buffer] : request->buffers()) {
/*
@@ -1407,7 +1406,7 @@ int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
* completion handler.
*/
if (data->useConversion_) {
- buffers.emplace(data->streamIndex(stream), buffer);
+ buffers.emplace(stream, buffer);
} else {
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp
index 20fb6f48..c8748d88 100644
--- a/src/libcamera/software_isp/software_isp.cpp
+++ b/src/libcamera/software_isp/software_isp.cpp
@@ -241,19 +241,19 @@ int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
/**
* \brief Export the buffers from the Software ISP
- * \param[in] output Output stream index exporting the buffers
+ * \param[in] stream Output stream exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store the allocated buffers
* \return The number of allocated buffers on success or a negative error code
* otherwise
*/
-int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
+int SoftwareIsp::exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
ASSERT(debayer_ != nullptr);
/* single output for now */
- if (output >= 1)
+ if (stream == nullptr)
return -EINVAL;
for (unsigned int i = 0; i < count; i++) {
@@ -280,35 +280,29 @@ int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
/**
* \brief Queue buffers to Software ISP
* \param[in] input The input framebuffer
- * \param[in] outputs The container holding the output stream indexes and
+ * \param[in] outputs The container holding the output stream pointers and
* their respective frame buffer outputs
* \return 0 on success, a negative errno on failure
*/
int SoftwareIsp::queueBuffers(FrameBuffer *input,
- const std::map<unsigned int, FrameBuffer *> &outputs)
+ const std::map<const Stream *, FrameBuffer *> &outputs)
{
- unsigned int mask = 0;
-
/*
* Validate the outputs as a sanity check: at least one output is
- * required, all outputs must reference a valid stream and no two
- * outputs can reference the same stream.
+ * required, all outputs must reference a valid stream.
*/
if (outputs.empty())
return -EINVAL;
- for (auto [index, buffer] : outputs) {
+ for (auto [stream, buffer] : outputs) {
if (!buffer)
return -EINVAL;
- if (index >= 1) /* only single stream atm */
- return -EINVAL;
- if (mask & (1 << index))
+ if (outputs.size() != 1) /* only single stream atm */
return -EINVAL;
-
- mask |= 1 << index;
}
- process(input, outputs.at(0));
+ for (auto iter = outputs.begin(); iter != outputs.end(); iter++)
+ process(input, iter->second);
return 0;
}
diff --git a/src/py/libcamera/py_color_space.cpp b/src/py/libcamera/py_color_space.cpp
index 5201121a..fd5a5dab 100644
--- a/src/py/libcamera/py_color_space.cpp
+++ b/src/py/libcamera/py_color_space.cpp
@@ -12,6 +12,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/py/libcamera/py_controls_generated.cpp.in b/src/py/libcamera/py_controls_generated.cpp.in
index 8d282ce5..26d5a104 100644
--- a/src/py/libcamera/py_controls_generated.cpp.in
+++ b/src/py/libcamera/py_controls_generated.cpp.in
@@ -11,6 +11,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
class PyControls
diff --git a/src/py/libcamera/py_enums.cpp b/src/py/libcamera/py_enums.cpp
index e25689c6..ca6aeb86 100644
--- a/src/py/libcamera/py_enums.cpp
+++ b/src/py/libcamera/py_enums.cpp
@@ -9,6 +9,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/py/libcamera/py_formats_generated.cpp.in b/src/py/libcamera/py_formats_generated.cpp.in
index a3f7f94d..c5fb9063 100644
--- a/src/py/libcamera/py_formats_generated.cpp.in
+++ b/src/py/libcamera/py_formats_generated.cpp.in
@@ -11,6 +11,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
class PyFormats
diff --git a/src/py/libcamera/py_geometry.cpp b/src/py/libcamera/py_geometry.cpp
index 5c2aeac4..c7e30360 100644
--- a/src/py/libcamera/py_geometry.cpp
+++ b/src/py/libcamera/py_geometry.cpp
@@ -14,6 +14,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/py/libcamera/py_main.cpp b/src/py/libcamera/py_main.cpp
index bce08218..ab33f38a 100644
--- a/src/py/libcamera/py_main.cpp
+++ b/src/py/libcamera/py_main.cpp
@@ -85,14 +85,6 @@ PYBIND11_DECLARE_HOLDER_TYPE(T, PyCameraSmartPtr<T>)
*/
static std::weak_ptr<PyCameraManager> gCameraManager;
-void init_py_color_space(py::module &m);
-void init_py_controls_generated(py::module &m);
-void init_py_enums(py::module &m);
-void init_py_formats_generated(py::module &m);
-void init_py_geometry(py::module &m);
-void init_py_properties_generated(py::module &m);
-void init_py_transform(py::module &m);
-
PYBIND11_MODULE(_libcamera, m)
{
init_py_enums(m);
diff --git a/src/py/libcamera/py_main.h b/src/py/libcamera/py_main.h
index 5bb5f2d1..4d594326 100644
--- a/src/py/libcamera/py_main.h
+++ b/src/py/libcamera/py_main.h
@@ -7,8 +7,18 @@
#include <libcamera/base/log.h>
+#include <pybind11/pybind11.h>
+
namespace libcamera {
LOG_DECLARE_CATEGORY(Python)
}
+
+void init_py_color_space(pybind11::module &m);
+void init_py_controls_generated(pybind11::module &m);
+void init_py_enums(pybind11::module &m);
+void init_py_formats_generated(pybind11::module &m);
+void init_py_geometry(pybind11::module &m);
+void init_py_properties_generated(pybind11::module &m);
+void init_py_transform(pybind11::module &m);
diff --git a/src/py/libcamera/py_properties_generated.cpp.in b/src/py/libcamera/py_properties_generated.cpp.in
index e3802b81..d28f1ab8 100644
--- a/src/py/libcamera/py_properties_generated.cpp.in
+++ b/src/py/libcamera/py_properties_generated.cpp.in
@@ -11,6 +11,8 @@
#include <pybind11/pybind11.h>
+#include "py_main.h"
+
namespace py = pybind11;
class PyProperties
diff --git a/src/py/libcamera/py_transform.cpp b/src/py/libcamera/py_transform.cpp
index f3a0bfaf..768260ff 100644
--- a/src/py/libcamera/py_transform.cpp
+++ b/src/py/libcamera/py_transform.cpp
@@ -12,6 +12,8 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
+#include "py_main.h"
+
namespace py = pybind11;
using namespace libcamera;
diff --git a/src/v4l2/v4l2_compat.cpp b/src/v4l2/v4l2_compat.cpp
index 8e2b7e92..66468bf3 100644
--- a/src/v4l2/v4l2_compat.cpp
+++ b/src/v4l2/v4l2_compat.cpp
@@ -7,12 +7,15 @@
#include "v4l2_compat_manager.h"
+#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdarg.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <unistd.h>
#include <libcamera/base/utils.h>
@@ -28,71 +31,97 @@ using namespace libcamera;
va_end(ap); \
}
+namespace {
+
+/*
+ * Determine if the flags require a further mode arguments that needs to be
+ * parsed from va_args.
+ */
+bool needs_mode(int flags)
+{
+ return (flags & O_CREAT) || ((flags & O_TMPFILE) == O_TMPFILE);
+}
+
+} /* namespace */
+
extern "C" {
LIBCAMERA_PUBLIC int open(const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
oflag, mode);
}
-/* _FORTIFY_SOURCE redirects open to __open_2 */
-LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
-{
- return open(path, oflag);
-}
-
#ifndef open64
LIBCAMERA_PUBLIC int open64(const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(AT_FDCWD, path,
oflag | O_LARGEFILE, mode);
}
-
-LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag)
-{
- return open(path, oflag);
-}
#endif
LIBCAMERA_PUBLIC int openat(int dirfd, const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(dirfd, path, oflag, mode);
}
-LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag)
-{
- return openat(dirfd, path, oflag);
-}
-
#ifndef openat64
LIBCAMERA_PUBLIC int openat64(int dirfd, const char *path, int oflag, ...)
{
mode_t mode = 0;
- if (oflag & O_CREAT || oflag & O_TMPFILE)
+ if (needs_mode(oflag))
extract_va_arg(mode_t, mode, oflag);
return V4L2CompatManager::instance()->openat(dirfd, path,
oflag | O_LARGEFILE, mode);
}
+#endif
-LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag)
+/*
+ * _FORTIFY_SOURCE redirects open* to __open*_2. Disable the
+ * -Wmissing-declarations warnings, as the functions won't be declared if
+ * _FORTIFY_SOURCE is not in use.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-declarations"
+
+LIBCAMERA_PUBLIC int __open_2(const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return open(path, oflag);
+}
+
+LIBCAMERA_PUBLIC int __open64_2(const char *path, int oflag)
{
+ assert(!needs_mode(oflag));
+ return open64(path, oflag);
+}
+
+LIBCAMERA_PUBLIC int __openat_2(int dirfd, const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
return openat(dirfd, path, oflag);
}
-#endif
+
+LIBCAMERA_PUBLIC int __openat64_2(int dirfd, const char *path, int oflag)
+{
+ assert(!needs_mode(oflag));
+ return openat64(dirfd, path, oflag);
+}
+
+#pragma GCC diagnostic pop
LIBCAMERA_PUBLIC int dup(int oldfd)
{