summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/gstreamer/gstlibcameraallocator.cpp14
-rw-r--r--src/ipa/rkisp1/algorithms/lux.cpp80
-rw-r--r--src/ipa/rkisp1/algorithms/lux.h36
-rw-r--r--src/ipa/rkisp1/algorithms/meson.build1
-rw-r--r--src/ipa/rkisp1/ipa_context.h4
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.cpp5
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper.h2
-rw-r--r--src/ipa/rpi/cam_helper/cam_helper_imx415.cpp64
-rw-r--r--src/ipa/rpi/cam_helper/meson.build1
-rw-r--r--src/ipa/rpi/common/ipa_base.cpp1
-rw-r--r--src/ipa/rpi/controller/controller.cpp2
-rw-r--r--src/ipa/rpi/controller/controller.h1
-rw-r--r--src/ipa/rpi/controller/metadata.h23
-rwxr-xr-xsrc/ipa/rpi/vc4/data/imx415.json413
-rw-r--r--src/ipa/rpi/vc4/data/meson.build1
-rw-r--r--src/libcamera/base/thread.cpp4
-rw-r--r--src/libcamera/dma_buf_allocator.cpp12
-rw-r--r--src/libcamera/pipeline/virtual/image_frame_generator.cpp2
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.cpp59
-rw-r--r--src/libcamera/pipeline/virtual/test_pattern_generator.h4
-rw-r--r--src/libcamera/pipeline/virtual/virtual.cpp5
-rw-r--r--src/libcamera/sensor/camera_sensor.cpp67
-rw-r--r--src/libcamera/sensor/camera_sensor_properties.cpp7
-rw-r--r--src/libcamera/sensor/camera_sensor_raw.cpp1157
-rw-r--r--src/libcamera/sensor/meson.build1
-rw-r--r--src/libcamera/v4l2_pixelformat.cpp34
-rw-r--r--src/libcamera/v4l2_subdevice.cpp63
-rw-r--r--src/libcamera/v4l2_videodevice.cpp51
-rw-r--r--src/v4l2/meson.build7
29 files changed, 2046 insertions, 75 deletions
diff --git a/src/gstreamer/gstlibcameraallocator.cpp b/src/gstreamer/gstlibcameraallocator.cpp
index 7e4c904d..d4492d99 100644
--- a/src/gstreamer/gstlibcameraallocator.cpp
+++ b/src/gstreamer/gstlibcameraallocator.cpp
@@ -8,6 +8,8 @@
#include "gstlibcameraallocator.h"
+#include <utility>
+
#include <libcamera/camera.h>
#include <libcamera/framebuffer_allocator.h>
#include <libcamera/stream.h>
@@ -199,22 +201,20 @@ GstLibcameraAllocator *
gst_libcamera_allocator_new(std::shared_ptr<Camera> camera,
CameraConfiguration *config_)
{
- auto *self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
- nullptr));
+ g_autoptr(GstLibcameraAllocator) self = GST_LIBCAMERA_ALLOCATOR(g_object_new(GST_TYPE_LIBCAMERA_ALLOCATOR,
+ nullptr));
gint ret;
self->cm_ptr = new std::shared_ptr<CameraManager>(gst_libcamera_get_camera_manager(ret));
- if (ret) {
- g_object_unref(self);
+ if (ret)
return nullptr;
- }
self->fb_allocator = new FrameBufferAllocator(camera);
for (StreamConfiguration &streamCfg : *config_) {
Stream *stream = streamCfg.stream();
ret = self->fb_allocator->allocate(stream);
- if (ret == 0)
+ if (ret <= 0)
return nullptr;
GQueue *pool = g_queue_new();
@@ -228,7 +228,7 @@ gst_libcamera_allocator_new(std::shared_ptr<Camera> camera,
g_hash_table_insert(self->pools, stream, pool);
}
- return self;
+ return std::exchange(self, nullptr);
}
bool
diff --git a/src/ipa/rkisp1/algorithms/lux.cpp b/src/ipa/rkisp1/algorithms/lux.cpp
new file mode 100644
index 00000000..b0f74963
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.cpp
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.cpp - RkISP1 Lux control
+ */
+
+#include "lux.h"
+
+#include <libcamera/base/log.h>
+
+#include <libcamera/control_ids.h>
+
+#include "libipa/histogram.h"
+#include "libipa/lux.h"
+
+/**
+ * \file lux.h
+ */
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+/**
+ * \class Lux
+ * \brief RkISP1 Lux control
+ *
+ * The Lux algorithm is responsible for estimating the lux level of the image.
+ * It doesn't take or generate any controls, but it provides a lux level for
+ * other algorithms (such as AGC) to use.
+ */
+
+/**
+ * \brief Construct an rkisp1 Lux algo module
+ *
+ * The Lux helper is initialized to 65535 as that is the max bin count on the
+ * rkisp1.
+ */
+Lux::Lux()
+ : lux_(65535)
+{
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::init
+ */
+int Lux::init([[maybe_unused]] IPAContext &context, const YamlObject &tuningData)
+{
+ return lux_.parseTuningData(tuningData);
+}
+
+/**
+ * \copydoc libcamera::ipa::Algorithm::process
+ */
+void Lux::process(IPAContext &context,
+ [[maybe_unused]] const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata)
+{
+ utils::Duration exposureTime = context.configuration.sensor.lineDuration
+ * frameContext.sensor.exposure;
+ double gain = frameContext.sensor.gain;
+
+ /* \todo Deduplicate the histogram calculation from AGC */
+ const rkisp1_cif_isp_stat *params = &stats->params;
+ Histogram yHist({ params->hist.hist_bins, context.hw->numHistogramBins },
+ [](uint32_t x) { return x >> 4; });
+
+ double lux = lux_.estimateLux(exposureTime, gain, 1.0, yHist);
+ frameContext.lux.lux = lux;
+ metadata.set(controls::Lux, lux);
+}
+
+REGISTER_IPA_ALGORITHM(Lux, "Lux")
+
+} /* namespace ipa::rkisp1::algorithms */
+
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/lux.h b/src/ipa/rkisp1/algorithms/lux.h
new file mode 100644
index 00000000..8a90de55
--- /dev/null
+++ b/src/ipa/rkisp1/algorithms/lux.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas On Board
+ *
+ * lux.h - RkISP1 Lux control
+ */
+
+#pragma once
+
+#include <sys/types.h>
+
+#include "libipa/lux.h"
+
+#include "algorithm.h"
+
+namespace libcamera {
+
+namespace ipa::rkisp1::algorithms {
+
+class Lux : public Algorithm
+{
+public:
+ Lux();
+
+ int init(IPAContext &context, const YamlObject &tuningData) override;
+ void process(IPAContext &context, const uint32_t frame,
+ IPAFrameContext &frameContext,
+ const rkisp1_stat_buffer *stats,
+ ControlList &metadata) override;
+
+private:
+ ipa::Lux lux_;
+};
+
+} /* namespace ipa::rkisp1::algorithms */
+} /* namespace libcamera */
diff --git a/src/ipa/rkisp1/algorithms/meson.build b/src/ipa/rkisp1/algorithms/meson.build
index 1734a667..c66b0b70 100644
--- a/src/ipa/rkisp1/algorithms/meson.build
+++ b/src/ipa/rkisp1/algorithms/meson.build
@@ -12,4 +12,5 @@ rkisp1_ipa_algorithms = files([
'goc.cpp',
'gsl.cpp',
'lsc.cpp',
+ 'lux.cpp',
])
diff --git a/src/ipa/rkisp1/ipa_context.h b/src/ipa/rkisp1/ipa_context.h
index 4b50015b..b83c1822 100644
--- a/src/ipa/rkisp1/ipa_context.h
+++ b/src/ipa/rkisp1/ipa_context.h
@@ -169,6 +169,10 @@ struct IPAFrameContext : public FrameContext {
struct {
Matrix<float, 3, 3> ccm;
} ccm;
+
+ struct {
+ double lux;
+ } lux;
};
struct IPAContext {
diff --git a/src/ipa/rpi/cam_helper/cam_helper.cpp b/src/ipa/rpi/cam_helper/cam_helper.cpp
index 8c720652..a78db9c1 100644
--- a/src/ipa/rpi/cam_helper/cam_helper.cpp
+++ b/src/ipa/rpi/cam_helper/cam_helper.cpp
@@ -156,6 +156,11 @@ void CamHelper::setCameraMode(const CameraMode &mode)
}
}
+void CamHelper::setHwConfig(const Controller::HardwareConfig &hwConfig)
+{
+ hwConfig_ = hwConfig;
+}
+
bool CamHelper::sensorEmbeddedDataPresent() const
{
return false;
diff --git a/src/ipa/rpi/cam_helper/cam_helper.h b/src/ipa/rpi/cam_helper/cam_helper.h
index 29371bdb..4a826690 100644
--- a/src/ipa/rpi/cam_helper/cam_helper.h
+++ b/src/ipa/rpi/cam_helper/cam_helper.h
@@ -71,6 +71,7 @@ public:
CamHelper(std::unique_ptr<MdParser> parser, unsigned int frameIntegrationDiff);
virtual ~CamHelper();
void setCameraMode(const CameraMode &mode);
+ void setHwConfig(const Controller::HardwareConfig &hwConfig);
virtual void prepare(libcamera::Span<const uint8_t> buffer,
Metadata &metadata);
virtual void process(StatisticsPtr &stats, Metadata &metadata);
@@ -101,6 +102,7 @@ protected:
std::unique_ptr<MdParser> parser_;
CameraMode mode_;
+ Controller::HardwareConfig hwConfig_;
private:
/*
diff --git a/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
new file mode 100644
index 00000000..c0a09eee
--- /dev/null
+++ b/src/ipa/rpi/cam_helper/cam_helper_imx415.cpp
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (C) 2025, Raspberry Pi Ltd
+ *
+ * camera helper for imx415 sensor
+ */
+
+#include <cmath>
+
+#include "cam_helper.h"
+
+using namespace RPiController;
+
+class CamHelperImx415 : public CamHelper
+{
+public:
+ CamHelperImx415();
+ uint32_t gainCode(double gain) const override;
+ double gain(uint32_t gainCode) const override;
+ unsigned int hideFramesStartup() const override;
+ unsigned int hideFramesModeSwitch() const override;
+
+private:
+ /*
+ * Smallest difference between the frame length and integration time,
+ * in units of lines.
+ */
+ static constexpr int frameIntegrationDiff = 8;
+};
+
+CamHelperImx415::CamHelperImx415()
+ : CamHelper({}, frameIntegrationDiff)
+{
+}
+
+uint32_t CamHelperImx415::gainCode(double gain) const
+{
+ int code = 66.6667 * std::log10(gain);
+ return std::max(0, std::min(code, 0xf0));
+}
+
+double CamHelperImx415::gain(uint32_t gainCode) const
+{
+ return std::pow(10, 0.015 * gainCode);
+}
+
+unsigned int CamHelperImx415::hideFramesStartup() const
+{
+ /* On startup, we seem to get 1 bad frame. */
+ return 1;
+}
+
+unsigned int CamHelperImx415::hideFramesModeSwitch() const
+{
+ /* After a mode switch, we seem to get 1 bad frame. */
+ return 1;
+}
+
+static CamHelper *create()
+{
+ return new CamHelperImx415();
+}
+
+static RegisterCamHelper reg("imx415", &create);
diff --git a/src/ipa/rpi/cam_helper/meson.build b/src/ipa/rpi/cam_helper/meson.build
index 03e88fe0..abf02147 100644
--- a/src/ipa/rpi/cam_helper/meson.build
+++ b/src/ipa/rpi/cam_helper/meson.build
@@ -7,6 +7,7 @@ rpi_ipa_cam_helper_sources = files([
'cam_helper_imx283.cpp',
'cam_helper_imx290.cpp',
'cam_helper_imx296.cpp',
+ 'cam_helper_imx415.cpp',
'cam_helper_imx477.cpp',
'cam_helper_imx519.cpp',
'cam_helper_imx708.cpp',
diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp
index 0c8aee69..6ff1e22b 100644
--- a/src/ipa/rpi/common/ipa_base.cpp
+++ b/src/ipa/rpi/common/ipa_base.cpp
@@ -151,6 +151,7 @@ int32_t IpaBase::init(const IPASettings &settings, const InitParams &params, Ini
lensPresent_ = params.lensPresent;
controller_.initialise();
+ helper_->setHwConfig(controller_.getHardwareConfig());
/* Return the controls handled by the IPA */
ControlInfoMap::Map ctrlMap = ipaControls;
diff --git a/src/ipa/rpi/controller/controller.cpp b/src/ipa/rpi/controller/controller.cpp
index e0131018..651fff63 100644
--- a/src/ipa/rpi/controller/controller.cpp
+++ b/src/ipa/rpi/controller/controller.cpp
@@ -39,6 +39,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap
.pipelineWidth = 13,
.statsInline = false,
.minPixelProcessingTime = 0s,
+ .dataBufferStrided = true,
}
},
{
@@ -71,6 +72,7 @@ static const std::map<std::string, Controller::HardwareConfig> HardwareConfigMap
* frames wider than ~16,000 pixels.
*/
.minPixelProcessingTime = 1.0us / 380,
+ .dataBufferStrided = false,
}
},
};
diff --git a/src/ipa/rpi/controller/controller.h b/src/ipa/rpi/controller/controller.h
index eff520bd..fdb46557 100644
--- a/src/ipa/rpi/controller/controller.h
+++ b/src/ipa/rpi/controller/controller.h
@@ -49,6 +49,7 @@ public:
unsigned int pipelineWidth;
bool statsInline;
libcamera::utils::Duration minPixelProcessingTime;
+ bool dataBufferStrided;
};
Controller();
diff --git a/src/ipa/rpi/controller/metadata.h b/src/ipa/rpi/controller/metadata.h
index b4650d25..77d3b074 100644
--- a/src/ipa/rpi/controller/metadata.h
+++ b/src/ipa/rpi/controller/metadata.h
@@ -12,6 +12,7 @@
#include <map>
#include <mutex>
#include <string>
+#include <utility>
#include <libcamera/base/thread_annotations.h>
@@ -36,10 +37,10 @@ public:
}
template<typename T>
- void set(std::string const &tag, T const &value)
+ void set(std::string const &tag, T &&value)
{
std::scoped_lock lock(mutex_);
- data_[tag] = value;
+ data_[tag] = std::forward<T>(value);
}
template<typename T>
@@ -90,6 +91,12 @@ public:
data_.insert(other.data_.begin(), other.data_.end());
}
+ void erase(std::string const &tag)
+ {
+ std::scoped_lock lock(mutex_);
+ eraseLocked(tag);
+ }
+
template<typename T>
T *getLocked(std::string const &tag)
{
@@ -104,10 +111,18 @@ public:
}
template<typename T>
- void setLocked(std::string const &tag, T const &value)
+ void setLocked(std::string const &tag, T &&value)
{
/* Use this only if you're holding the lock yourself. */
- data_[tag] = value;
+ data_[tag] = std::forward<T>(value);
+ }
+
+ void eraseLocked(std::string const &tag)
+ {
+ auto it = data_.find(tag);
+ if (it == data_.end())
+ return;
+ data_.erase(it);
}
/*
diff --git a/src/ipa/rpi/vc4/data/imx415.json b/src/ipa/rpi/vc4/data/imx415.json
new file mode 100755
index 00000000..6ed16b17
--- /dev/null
+++ b/src/ipa/rpi/vc4/data/imx415.json
@@ -0,0 +1,413 @@
+{
+ "version": 2.0,
+ "target": "bcm2835",
+ "algorithms": [
+ {
+ "rpi.black_level":
+ {
+ "black_level": 3840
+ }
+ },
+ {
+ "rpi.dpc": { }
+ },
+ {
+ "rpi.lux":
+ {
+ "reference_shutter_speed": 19230,
+ "reference_gain": 1.0,
+ "reference_aperture": 1.0,
+ "reference_lux": 1198,
+ "reference_Y": 14876
+ }
+ },
+ {
+ "rpi.noise":
+ {
+ "reference_constant": 17,
+ "reference_slope": 3.439
+ }
+ },
+ {
+ "rpi.geq":
+ {
+ "offset": 193,
+ "slope": 0.00902
+ }
+ },
+ {
+ "rpi.sdn": { }
+ },
+ {
+ "rpi.awb":
+ {
+ "priors": [
+ {
+ "lux": 0,
+ "prior":
+ [
+ 2000, 1.0,
+ 3000, 0.0,
+ 13000, 0.0
+ ]
+ },
+ {
+ "lux": 800,
+ "prior":
+ [
+ 2000, 0.0,
+ 6000, 2.0,
+ 13000, 2.0
+ ]
+ },
+ {
+ "lux": 1500,
+ "prior":
+ [
+ 2000, 0.0,
+ 4000, 1.0,
+ 6000, 6.0,
+ 6500, 7.0,
+ 7000, 1.0,
+ 13000, 1.0
+ ]
+ }
+ ],
+ "modes":
+ {
+ "auto":
+ {
+ "lo": 2500,
+ "hi": 8000
+ },
+ "incandescent":
+ {
+ "lo": 2500,
+ "hi": 3000
+ },
+ "tungsten":
+ {
+ "lo": 3000,
+ "hi": 3500
+ },
+ "fluorescent":
+ {
+ "lo": 4000,
+ "hi": 4700
+ },
+ "indoor":
+ {
+ "lo": 3000,
+ "hi": 5000
+ },
+ "daylight":
+ {
+ "lo": 5500,
+ "hi": 6500
+ },
+ "cloudy":
+ {
+ "lo": 7000,
+ "hi": 8600
+ }
+ },
+ "bayes": 1,
+ "ct_curve":
+ [
+ 2698.0, 0.7681, 0.2026,
+ 2930.0, 0.7515, 0.2116,
+ 3643.0, 0.6355, 0.2858,
+ 4605.0, 0.4992, 0.4041,
+ 5658.0, 0.4498, 0.4574
+ ],
+ "sensitivity_r": 1.0,
+ "sensitivity_b": 1.0,
+ "transverse_pos": 0.0112,
+ "transverse_neg": 0.01424
+ }
+ },
+ {
+ "rpi.agc":
+ {
+ "metering_modes":
+ {
+ "centre-weighted":
+ {
+ "weights":
+ [
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0
+ ]
+ },
+ "spot":
+ {
+ "weights":
+ [
+ 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ]
+ },
+ "matrix":
+ {
+ "weights":
+ [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ ]
+ }
+ },
+ "exposure_modes":
+ {
+ "normal":
+ {
+ "shutter": [ 100, 10000, 30000, 60000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ },
+ "short":
+ {
+ "shutter": [ 100, 5000, 10000, 20000, 120000 ],
+ "gain": [ 1.0, 2.0, 4.0, 6.0, 6.0 ]
+ }
+ },
+ "constraint_modes":
+ {
+ "normal": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ }
+ ],
+ "highlight": [
+ {
+ "bound": "LOWER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.5,
+ 1000, 0.5
+ ]
+ },
+ {
+ "bound": "UPPER",
+ "q_lo": 0.98,
+ "q_hi": 1.0,
+ "y_target":
+ [
+ 0, 0.8,
+ 1000, 0.8
+ ]
+ }
+ ]
+ },
+ "y_target":
+ [
+ 0, 0.16,
+ 1000, 0.165,
+ 10000, 0.17
+ ]
+ }
+ },
+ {
+ "rpi.alsc":
+ {
+ "omega": 1.3,
+ "n_iter": 100,
+ "luminance_strength": 0.8,
+ "calibrations_Cr": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.025, 1.016, 1.013, 1.011, 1.008, 1.005, 1.003, 1.001, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.027, 1.035,
+ 1.025, 1.017, 1.013, 1.011, 1.008, 1.005, 1.003, 1.003, 1.004, 1.005, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.022, 1.017, 1.013, 1.009, 1.007, 1.005, 1.003, 1.003, 1.004, 1.006, 1.009, 1.012, 1.017, 1.023, 1.029, 1.035,
+ 1.019, 1.015, 1.011, 1.007, 1.005, 1.003, 1.001, 1.001, 1.003, 1.004, 1.007, 1.009, 1.015, 1.022, 1.028, 1.035,
+ 1.018, 1.014, 1.009, 1.006, 1.004, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.021, 1.028, 1.035,
+ 1.018, 1.013, 1.011, 1.006, 1.003, 1.002, 1.001, 1.001, 1.001, 1.003, 1.006, 1.009, 1.015, 1.022, 1.028, 1.036,
+ 1.018, 1.014, 1.011, 1.007, 1.004, 1.002, 1.001, 1.001, 1.001, 1.004, 1.007, 1.009, 1.015, 1.023, 1.029, 1.036,
+ 1.019, 1.014, 1.012, 1.008, 1.005, 1.003, 1.002, 1.001, 1.003, 1.005, 1.008, 1.012, 1.016, 1.024, 1.031, 1.037,
+ 1.021, 1.016, 1.013, 1.009, 1.008, 1.005, 1.003, 1.003, 1.005, 1.008, 1.011, 1.014, 1.019, 1.026, 1.033, 1.039,
+ 1.025, 1.021, 1.016, 1.013, 1.009, 1.008, 1.006, 1.006, 1.008, 1.011, 1.014, 1.019, 1.024, 1.031, 1.038, 1.046,
+ 1.029, 1.025, 1.021, 1.018, 1.014, 1.013, 1.011, 1.011, 1.012, 1.015, 1.019, 1.023, 1.028, 1.035, 1.046, 1.051,
+ 1.032, 1.029, 1.023, 1.021, 1.018, 1.015, 1.014, 1.014, 1.015, 1.018, 1.022, 1.027, 1.033, 1.041, 1.051, 1.054
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.025, 1.011, 1.009, 1.005, 1.004, 1.003, 1.001, 1.001, 1.002, 1.006, 1.009, 1.012, 1.016, 1.021, 1.031, 1.041,
+ 1.025, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.004, 1.007, 1.009, 1.013, 1.021, 1.028, 1.037, 1.041,
+ 1.023, 1.014, 1.009, 1.007, 1.005, 1.004, 1.003, 1.003, 1.005, 1.007, 1.011, 1.014, 1.021, 1.028, 1.037, 1.048,
+ 1.022, 1.012, 1.007, 1.005, 1.002, 1.001, 1.001, 1.001, 1.003, 1.005, 1.009, 1.014, 1.019, 1.028, 1.039, 1.048,
+ 1.022, 1.011, 1.006, 1.003, 1.001, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.014, 1.021, 1.029, 1.039, 1.051,
+ 1.022, 1.012, 1.007, 1.003, 1.002, 1.001, 1.001, 1.001, 1.002, 1.005, 1.009, 1.015, 1.021, 1.031, 1.041, 1.053,
+ 1.023, 1.013, 1.009, 1.005, 1.003, 1.003, 1.001, 1.002, 1.004, 1.006, 1.011, 1.015, 1.022, 1.031, 1.042, 1.056,
+ 1.024, 1.015, 1.012, 1.008, 1.005, 1.004, 1.004, 1.004, 1.006, 1.009, 1.013, 1.018, 1.024, 1.034, 1.045, 1.057,
+ 1.027, 1.017, 1.015, 1.012, 1.009, 1.007, 1.007, 1.008, 1.009, 1.013, 1.018, 1.023, 1.029, 1.038, 1.051, 1.061,
+ 1.029, 1.023, 1.017, 1.015, 1.014, 1.012, 1.011, 1.011, 1.014, 1.018, 1.024, 1.029, 1.036, 1.044, 1.056, 1.066,
+ 1.034, 1.028, 1.023, 1.022, 1.019, 1.019, 1.018, 1.018, 1.021, 1.025, 1.031, 1.035, 1.042, 1.053, 1.066, 1.074,
+ 1.041, 1.034, 1.027, 1.025, 1.025, 1.023, 1.023, 1.023, 1.025, 1.031, 1.035, 1.041, 1.049, 1.059, 1.074, 1.079
+ ]
+ }
+ ],
+ "calibrations_Cb": [
+ {
+ "ct": 3000,
+ "table":
+ [
+ 1.001, 1.001, 1.007, 1.015, 1.027, 1.034, 1.038, 1.041, 1.042, 1.043, 1.043, 1.043, 1.041, 1.039, 1.049, 1.054,
+ 1.011, 1.011, 1.013, 1.023, 1.032, 1.039, 1.044, 1.047, 1.052, 1.056, 1.059, 1.059, 1.055, 1.051, 1.054, 1.056,
+ 1.015, 1.015, 1.019, 1.032, 1.039, 1.044, 1.047, 1.052, 1.055, 1.059, 1.061, 1.066, 1.063, 1.058, 1.061, 1.064,
+ 1.016, 1.017, 1.023, 1.032, 1.041, 1.045, 1.048, 1.053, 1.056, 1.061, 1.066, 1.069, 1.067, 1.064, 1.065, 1.068,
+ 1.018, 1.019, 1.025, 1.033, 1.042, 1.045, 1.049, 1.054, 1.058, 1.063, 1.071, 1.072, 1.071, 1.068, 1.069, 1.071,
+ 1.023, 1.024, 1.029, 1.035, 1.043, 1.048, 1.052, 1.057, 1.061, 1.065, 1.074, 1.075, 1.075, 1.072, 1.072, 1.075,
+ 1.027, 1.028, 1.031, 1.038, 1.045, 1.051, 1.054, 1.059, 1.064, 1.068, 1.075, 1.079, 1.078, 1.075, 1.076, 1.081,
+ 1.029, 1.031, 1.033, 1.044, 1.048, 1.054, 1.059, 1.064, 1.067, 1.073, 1.079, 1.082, 1.082, 1.079, 1.081, 1.085,
+ 1.033, 1.033, 1.035, 1.047, 1.053, 1.058, 1.064, 1.067, 1.073, 1.079, 1.084, 1.086, 1.086, 1.084, 1.089, 1.091,
+ 1.037, 1.037, 1.038, 1.049, 1.057, 1.062, 1.068, 1.073, 1.079, 1.084, 1.089, 1.092, 1.092, 1.092, 1.096, 1.104,
+ 1.041, 1.041, 1.043, 1.051, 1.061, 1.068, 1.073, 1.079, 1.083, 1.089, 1.092, 1.094, 1.097, 1.099, 1.105, 1.115,
+ 1.048, 1.044, 1.044, 1.051, 1.063, 1.071, 1.076, 1.082, 1.088, 1.091, 1.094, 1.097, 1.099, 1.104, 1.115, 1.126
+ ]
+ },
+ {
+ "ct": 5000,
+ "table":
+ [
+ 1.001, 1.001, 1.005, 1.011, 1.014, 1.018, 1.019, 1.019, 1.019, 1.021, 1.021, 1.021, 1.019, 1.017, 1.014, 1.014,
+ 1.009, 1.009, 1.011, 1.014, 1.019, 1.024, 1.026, 1.029, 1.031, 1.032, 1.032, 1.031, 1.027, 1.023, 1.022, 1.022,
+ 1.011, 1.012, 1.015, 1.018, 1.024, 1.026, 1.029, 1.032, 1.035, 1.036, 1.036, 1.034, 1.031, 1.027, 1.025, 1.025,
+ 1.012, 1.013, 1.015, 1.019, 1.025, 1.029, 1.032, 1.035, 1.036, 1.038, 1.038, 1.036, 1.034, 1.029, 1.026, 1.026,
+ 1.013, 1.014, 1.016, 1.019, 1.027, 1.031, 1.034, 1.037, 1.039, 1.039, 1.041, 1.039, 1.036, 1.031, 1.028, 1.027,
+ 1.014, 1.014, 1.017, 1.021, 1.027, 1.033, 1.037, 1.039, 1.041, 1.041, 1.042, 1.042, 1.039, 1.033, 1.029, 1.028,
+ 1.015, 1.015, 1.018, 1.021, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.042, 1.042, 1.039, 1.034, 1.029, 1.029,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.033, 1.037, 1.041, 1.041, 1.042, 1.043, 1.043, 1.041, 1.035, 1.031, 1.031,
+ 1.015, 1.016, 1.018, 1.022, 1.027, 1.032, 1.037, 1.041, 1.042, 1.042, 1.044, 1.043, 1.041, 1.036, 1.034, 1.033,
+ 1.016, 1.017, 1.017, 1.022, 1.027, 1.032, 1.036, 1.039, 1.042, 1.042, 1.043, 1.043, 1.041, 1.039, 1.036, 1.034,
+ 1.017, 1.017, 1.018, 1.022, 1.027, 1.031, 1.035, 1.039, 1.041, 1.042, 1.042, 1.042, 1.042, 1.039, 1.039, 1.039,
+ 1.018, 1.017, 1.017, 1.021, 1.027, 1.031, 1.033, 1.038, 1.041, 1.041, 1.042, 1.042, 1.041, 1.041, 1.041, 1.041
+ ]
+ }
+ ],
+ "luminance_lut":
+ [
+ 2.102, 1.903, 1.658, 1.483, 1.358, 1.267, 1.202, 1.202, 1.202, 1.242, 1.323, 1.431, 1.585, 1.797, 2.096, 2.351,
+ 1.996, 1.776, 1.549, 1.385, 1.273, 1.204, 1.138, 1.133, 1.133, 1.185, 1.252, 1.343, 1.484, 1.679, 1.954, 2.228,
+ 1.923, 1.689, 1.474, 1.318, 1.204, 1.138, 1.079, 1.071, 1.071, 1.133, 1.185, 1.284, 1.415, 1.597, 1.854, 2.146,
+ 1.881, 1.631, 1.423, 1.272, 1.159, 1.079, 1.051, 1.026, 1.046, 1.071, 1.144, 1.245, 1.369, 1.543, 1.801, 2.095,
+ 1.867, 1.595, 1.391, 1.242, 1.131, 1.051, 1.013, 1.002, 1.013, 1.046, 1.121, 1.219, 1.343, 1.511, 1.752, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.001, 1.001, 1.003, 1.045, 1.118, 1.217, 1.342, 1.511, 1.746, 2.079,
+ 1.867, 1.589, 1.385, 1.236, 1.125, 1.048, 1.011, 1.003, 1.011, 1.046, 1.118, 1.217, 1.343, 1.511, 1.746, 2.079,
+ 1.884, 1.621, 1.411, 1.261, 1.149, 1.071, 1.048, 1.024, 1.046, 1.069, 1.141, 1.239, 1.369, 1.541, 1.781, 2.093,
+ 1.913, 1.675, 1.459, 1.304, 1.191, 1.125, 1.071, 1.065, 1.069, 1.124, 1.181, 1.278, 1.413, 1.592, 1.842, 2.133,
+ 1.981, 1.755, 1.529, 1.368, 1.251, 1.191, 1.125, 1.124, 1.124, 1.181, 1.242, 1.337, 1.479, 1.669, 1.935, 2.207,
+ 2.078, 1.867, 1.625, 1.453, 1.344, 1.251, 1.202, 1.201, 1.201, 1.242, 1.333, 1.418, 1.571, 1.776, 2.063, 2.321,
+ 2.217, 2.011, 1.747, 1.562, 1.431, 1.331, 1.278, 1.278, 1.278, 1.313, 1.407, 1.523, 1.686, 1.911, 2.226, 2.484
+ ],
+ "sigma": 0.00135,
+ "sigma_Cb": 0.00279
+ }
+ },
+ {
+ "rpi.contrast":
+ {
+ "ce_enable": 1,
+ "gamma_curve":
+ [
+ 0, 0,
+ 1024, 5040,
+ 2048, 9338,
+ 3072, 12356,
+ 4096, 15312,
+ 5120, 18051,
+ 6144, 20790,
+ 7168, 23193,
+ 8192, 25744,
+ 9216, 27942,
+ 10240, 30035,
+ 11264, 32005,
+ 12288, 33975,
+ 13312, 35815,
+ 14336, 37600,
+ 15360, 39168,
+ 16384, 40642,
+ 18432, 43379,
+ 20480, 45749,
+ 22528, 47753,
+ 24576, 49621,
+ 26624, 51253,
+ 28672, 52698,
+ 30720, 53796,
+ 32768, 54876,
+ 36864, 57012,
+ 40960, 58656,
+ 45056, 59954,
+ 49152, 61183,
+ 53248, 62355,
+ 57344, 63419,
+ 61440, 64476,
+ 65535, 65535
+ ]
+ }
+ },
+ {
+ "rpi.ccm":
+ {
+ "ccms": [
+ {
+ "ct": 2698,
+ "ccm":
+ [
+ 1.57227, -0.32596, -0.24631,
+ -0.61264, 1.70791, -0.09526,
+ -0.43254, 0.48489, 0.94765
+ ]
+ },
+ {
+ "ct": 2930,
+ "ccm":
+ [
+ 1.69455, -0.52724, -0.16731,
+ -0.67131, 1.78468, -0.11338,
+ -0.41609, 0.54693, 0.86916
+ ]
+ },
+ {
+ "ct": 3643,
+ "ccm":
+ [
+ 1.74041, -0.77553, 0.03512,
+ -0.44073, 1.34131, 0.09943,
+ -0.11035, -0.93919, 2.04954
+ ]
+ },
+ {
+ "ct": 4605,
+ "ccm":
+ [
+ 1.49865, -0.41638, -0.08227,
+ -0.39445, 1.70114, -0.30669,
+ 0.01319, -0.88009, 1.86689
+ ]
+ },
+ {
+ "ct": 5658,
+ "ccm":
+ [
+ 1.38601, -0.23128, -0.15472,
+ -0.37641, 1.70444, -0.32803,
+ -0.01575, -0.71466, 1.73041
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "rpi.sharpen": { }
+ }
+ ]
+}
diff --git a/src/ipa/rpi/vc4/data/meson.build b/src/ipa/rpi/vc4/data/meson.build
index 94c0ee6e..7a8001ee 100644
--- a/src/ipa/rpi/vc4/data/meson.build
+++ b/src/ipa/rpi/vc4/data/meson.build
@@ -9,6 +9,7 @@ conf_files = files([
'imx296_mono.json',
'imx327.json',
'imx378.json',
+ 'imx415.json',
'imx462.json',
'imx477.json',
'imx477_noir.json',
diff --git a/src/libcamera/base/thread.cpp b/src/libcamera/base/thread.cpp
index f6322fe3..319bfda9 100644
--- a/src/libcamera/base/thread.cpp
+++ b/src/libcamera/base/thread.cpp
@@ -257,6 +257,8 @@ void Thread::start()
data_->exit_.store(false, std::memory_order_relaxed);
thread_ = std::thread(&Thread::startThread, this);
+
+ setThreadAffinityInternal();
}
void Thread::startThread()
@@ -284,8 +286,6 @@ void Thread::startThread()
data_->tid_ = syscall(SYS_gettid);
currentThreadData = data_;
- setThreadAffinityInternal();
-
run();
}
diff --git a/src/libcamera/dma_buf_allocator.cpp b/src/libcamera/dma_buf_allocator.cpp
index 3cc52f96..a014c3b4 100644
--- a/src/libcamera/dma_buf_allocator.cpp
+++ b/src/libcamera/dma_buf_allocator.cpp
@@ -311,6 +311,18 @@ DmaSyncer::DmaSyncer(SharedFD fd, SyncType type)
sync(DMA_BUF_SYNC_START);
}
+/**
+ * \fn DmaSyncer::DmaSyncer(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
+/**
+ * \fn DmaSyncer::operator=(DmaSyncer &&other);
+ * \param[in] other The other instance
+ * \brief Enable move on class DmaSyncer
+ */
+
DmaSyncer::~DmaSyncer()
{
sync(DMA_BUF_SYNC_END);
diff --git a/src/libcamera/pipeline/virtual/image_frame_generator.cpp b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
index 277efbb0..d1545b5d 100644
--- a/src/libcamera/pipeline/virtual/image_frame_generator.cpp
+++ b/src/libcamera/pipeline/virtual/image_frame_generator.cpp
@@ -129,7 +129,7 @@ int ImageFrameGenerator::generateFrame(const Size &size, const FrameBuffer *buff
MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
- auto planes = mappedFrameBuffer.planes();
+ const auto &planes = mappedFrameBuffer.planes();
/* Loop only around the number of images available */
frameIndex_ %= imageFrameDatas_.size();
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
index 7bc2b338..745be83b 100644
--- a/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.cpp
@@ -7,12 +7,34 @@
#include "test_pattern_generator.h"
+#include <string.h>
+
#include <libcamera/base/log.h>
#include "libcamera/internal/mapped_framebuffer.h"
#include <libyuv/convert_from_argb.h>
+namespace {
+
+template<size_t SampleSize>
+void rotateLeft1Column(const libcamera::Size &size, uint8_t *image)
+{
+ if (size.width < 2)
+ return;
+
+ const size_t stride = size.width * SampleSize;
+ uint8_t first[SampleSize];
+
+ for (size_t i = 0; i < size.height; i++, image += stride) {
+ memcpy(first, &image[0], SampleSize);
+ memmove(&image[0], &image[SampleSize], stride - SampleSize);
+ memcpy(&image[stride - SampleSize], first, SampleSize);
+ }
+}
+
+} /* namespace */
+
namespace libcamera {
LOG_DECLARE_CATEGORY(Virtual)
@@ -25,9 +47,9 @@ int TestPatternGenerator::generateFrame(const Size &size,
MappedFrameBuffer mappedFrameBuffer(buffer,
MappedFrameBuffer::MapFlag::Write);
- auto planes = mappedFrameBuffer.planes();
+ const auto &planes = mappedFrameBuffer.planes();
- shiftLeft(size);
+ rotateLeft1Column<kARGBSize>(size, template_.get());
/* Convert the template_ to the frame buffer */
int ret = libyuv::ARGBToNV12(template_.get(), size.width * kARGBSize,
@@ -40,39 +62,6 @@ int TestPatternGenerator::generateFrame(const Size &size,
return ret;
}
-void TestPatternGenerator::shiftLeft(const Size &size)
-{
- /* Store the first column temporarily */
- auto firstColumn = std::make_unique<uint8_t[]>(size.height * kARGBSize);
- for (size_t h = 0; h < size.height; h++) {
- unsigned int index = h * size.width * kARGBSize;
- unsigned int index1 = h * kARGBSize;
- firstColumn[index1] = template_[index];
- firstColumn[index1 + 1] = template_[index + 1];
- firstColumn[index1 + 2] = template_[index + 2];
- firstColumn[index1 + 3] = 0x00;
- }
-
- /* Overwrite template_ */
- uint8_t *buf = template_.get();
- for (size_t h = 0; h < size.height; h++) {
- for (size_t w = 0; w < size.width - 1; w++) {
- /* Overwrite with the pixel on the right */
- unsigned int index = (h * size.width + w + 1) * kARGBSize;
- *buf++ = template_[index]; /* B */
- *buf++ = template_[index + 1]; /* G */
- *buf++ = template_[index + 2]; /* R */
- *buf++ = 0x00; /* A */
- }
- /* Overwrite the new last column with the original first column */
- unsigned int index1 = h * kARGBSize;
- *buf++ = firstColumn[index1]; /* B */
- *buf++ = firstColumn[index1 + 1]; /* G */
- *buf++ = firstColumn[index1 + 2]; /* R */
- *buf++ = 0x00; /* A */
- }
-}
-
void ColorBarsGenerator::configure(const Size &size)
{
constexpr uint8_t kColorBar[8][3] = {
diff --git a/src/libcamera/pipeline/virtual/test_pattern_generator.h b/src/libcamera/pipeline/virtual/test_pattern_generator.h
index 05f4ab7a..2a51bd31 100644
--- a/src/libcamera/pipeline/virtual/test_pattern_generator.h
+++ b/src/libcamera/pipeline/virtual/test_pattern_generator.h
@@ -29,10 +29,6 @@ public:
protected:
/* Buffer of test pattern template */
std::unique_ptr<uint8_t[]> template_;
-
-private:
- /* Shift the buffer by 1 pixel left each frame */
- void shiftLeft(const Size &size);
};
class ColorBarsGenerator : public TestPatternGenerator
diff --git a/src/libcamera/pipeline/virtual/virtual.cpp b/src/libcamera/pipeline/virtual/virtual.cpp
index cec8a85b..e692a543 100644
--- a/src/libcamera/pipeline/virtual/virtual.cpp
+++ b/src/libcamera/pipeline/virtual/virtual.cpp
@@ -275,11 +275,10 @@ int PipelineHandlerVirtual::exportFrameBuffers([[maybe_unused]] Camera *camera,
return -ENOBUFS;
const StreamConfiguration &config = stream->configuration();
-
- auto info = PixelFormatInfo::info(config.pixelFormat);
+ const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
std::vector<unsigned int> planeSizes;
- for (size_t i = 0; i < info.planes.size(); ++i)
+ for (size_t i = 0; i < info.numPlanes(); ++i)
planeSizes.push_back(info.planeSize(config.size, i));
return dmaBufAllocator_.exportBuffers(config.bufferCount, planeSizes, buffers);
diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp
index a131ac22..d19b5e2e 100644
--- a/src/libcamera/sensor/camera_sensor.cpp
+++ b/src/libcamera/sensor/camera_sensor.cpp
@@ -200,6 +200,73 @@ CameraSensor::~CameraSensor() = default;
*/
/**
+ * \brief Retrieve the image source stream
+ *
+ * Sensors that produce multiple streams do not guarantee that the image stream
+ * is always assigned number 0. This function allows callers to retrieve the
+ * image stream on the sensor's source pad, in order to configure the receiving
+ * side accordingly.
+ *
+ * \return The image source stream
+ */
+V4L2Subdevice::Stream CameraSensor::imageStream() const
+{
+ return { 0, 0 };
+}
+
+/**
+ * \brief Retrieve the embedded data source stream
+ *
+ * Some sensors produce embedded data in a stream separate from the image
+ * stream. This function indicates if the sensor supports this feature by
+ * returning the embedded data stream on the sensor's source pad if available,
+ * or an std::optional<> without a value otheriwse.
+ *
+ * \return The embedded data source stream
+ */
+std::optional<V4L2Subdevice::Stream> CameraSensor::embeddedDataStream() const
+{
+ return {};
+}
+
+/**
+ * \brief Retrieve the format on the embedded data stream
+ *
+ * When an embedded data stream is available, this function returns the
+ * corresponding format on the sensor's source pad. The format may vary with
+ * the image stream format, and should therefore be retrieved after configuring
+ * the image stream.
+ *
+ * If the sensor doesn't support embedded data, this function returns a
+ * default-constructed format.
+ *
+ * \return The format on the embedded data stream
+ */
+V4L2SubdeviceFormat CameraSensor::embeddedDataFormat() const
+{
+ return {};
+}
+
+/**
+ * \brief Enable or disable the embedded data stream
+ * \param[in] enable True to enable the embedded data stream, false to disable it
+ *
+ * For sensors that support embedded data, this function enables or disables
+ * generation of embedded data. Some of such sensors always produce embedded
+ * data, in which case this function return -EISCONN if the caller attempts to
+ * disable embedded data.
+ *
+ * If the sensor doesn't support embedded data, this function returns 0 when \a
+ * enable is false, and -ENOSTR otherwise.
+ *
+ * \return 0 on success, or a negative error code otherwise
+ */
+int CameraSensor::setEmbeddedDataEnabled(bool enable)
+{
+ return enable ? -ENOSTR : 0;
+}
+
+/**
* \fn CameraSensor::properties()
* \brief Retrieve the camera sensor properties
* \return The list of camera sensor properties
diff --git a/src/libcamera/sensor/camera_sensor_properties.cpp b/src/libcamera/sensor/camera_sensor_properties.cpp
index bd1fc869..e2f518f9 100644
--- a/src/libcamera/sensor/camera_sensor_properties.cpp
+++ b/src/libcamera/sensor/camera_sensor_properties.cpp
@@ -232,7 +232,12 @@ const CameraSensorProperties *CameraSensorProperties::get(const std::string &sen
{ "imx415", {
.unitCellSize = { 1450, 1450 },
.testPatternModes = {},
- .sensorDelays = { },
+ .sensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 2,
+ .vblankDelay = 2,
+ .hblankDelay = 2
+ },
} },
{ "imx462", {
.unitCellSize = { 2900, 2900 },
diff --git a/src/libcamera/sensor/camera_sensor_raw.cpp b/src/libcamera/sensor/camera_sensor_raw.cpp
new file mode 100644
index 00000000..ab75b1f8
--- /dev/null
+++ b/src/libcamera/sensor/camera_sensor_raw.cpp
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Ideas on Board Oy.
+ *
+ * camera_sensor_raw.cpp - A raw camera sensor using the V4L2 streams API
+ */
+
+#include <algorithm>
+#include <cmath>
+#include <float.h>
+#include <iomanip>
+#include <limits.h>
+#include <map>
+#include <memory>
+#include <optional>
+#include <string.h>
+#include <string>
+#include <vector>
+
+#include <libcamera/base/class.h>
+#include <libcamera/base/log.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/camera.h>
+#include <libcamera/control_ids.h>
+#include <libcamera/controls.h>
+#include <libcamera/geometry.h>
+#include <libcamera/orientation.h>
+#include <libcamera/property_ids.h>
+#include <libcamera/transform.h>
+
+#include <libcamera/ipa/core_ipa_interface.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/camera_sensor_properties.h"
+#include "libcamera/internal/formats.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/sysfs.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+namespace libcamera {
+
+class BayerFormat;
+class CameraLens;
+class MediaEntity;
+class SensorConfiguration;
+
+struct CameraSensorProperties;
+
+enum class Orientation;
+
+LOG_DECLARE_CATEGORY(CameraSensor)
+
+class CameraSensorRaw : public CameraSensor, protected Loggable
+{
+public:
+ CameraSensorRaw(const MediaEntity *entity);
+ ~CameraSensorRaw();
+
+ static std::variant<std::unique_ptr<CameraSensor>, int>
+ match(MediaEntity *entity);
+
+ const std::string &model() const override { return model_; }
+ const std::string &id() const override { return id_; }
+
+ const MediaEntity *entity() const override { return entity_; }
+ V4L2Subdevice *device() override { return subdev_.get(); }
+
+ CameraLens *focusLens() override { return focusLens_.get(); }
+
+ const std::vector<unsigned int> &mbusCodes() const override { return mbusCodes_; }
+ std::vector<Size> sizes(unsigned int mbusCode) const override;
+ Size resolution() const override;
+
+ V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size,
+ const Size maxSize) const override;
+ int setFormat(V4L2SubdeviceFormat *format,
+ Transform transform = Transform::Identity) override;
+ int tryFormat(V4L2SubdeviceFormat *format) const override;
+
+ int applyConfiguration(const SensorConfiguration &config,
+ Transform transform = Transform::Identity,
+ V4L2SubdeviceFormat *sensorFormat = nullptr) override;
+
+ V4L2Subdevice::Stream imageStream() const override;
+ std::optional<V4L2Subdevice::Stream> embeddedDataStream() const override;
+ V4L2SubdeviceFormat embeddedDataFormat() const override;
+ int setEmbeddedDataEnabled(bool enable) override;
+
+ const ControlList &properties() const override { return properties_; }
+ int sensorInfo(IPACameraSensorInfo *info) const override;
+ Transform computeTransform(Orientation *orientation) const override;
+ BayerFormat::Order bayerOrder(Transform t) const override;
+
+ const ControlInfoMap &controls() const override;
+ ControlList getControls(const std::vector<uint32_t> &ids) override;
+ int setControls(ControlList *ctrls) override;
+
+ const std::vector<controls::draft::TestPatternModeEnum> &
+ testPatternModes() const override { return testPatternModes_; }
+ int setTestPatternMode(controls::draft::TestPatternModeEnum mode) override;
+ const CameraSensorProperties::SensorDelays &sensorDelays() override;
+
+protected:
+ std::string logPrefix() const override;
+
+private:
+ LIBCAMERA_DISABLE_COPY(CameraSensorRaw)
+
+ std::optional<int> init();
+ int initProperties();
+ void initStaticProperties();
+ void initTestPatternModes();
+ int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
+
+ const MediaEntity *entity_;
+ std::unique_ptr<V4L2Subdevice> subdev_;
+
+ struct Streams {
+ V4L2Subdevice::Stream sink;
+ V4L2Subdevice::Stream source;
+ };
+
+ struct {
+ Streams image;
+ std::optional<Streams> edata;
+ } streams_;
+
+ const CameraSensorProperties *staticProps_;
+
+ std::string model_;
+ std::string id_;
+
+ V4L2Subdevice::Formats formats_;
+ std::vector<unsigned int> mbusCodes_;
+ std::vector<Size> sizes_;
+ std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
+ controls::draft::TestPatternModeEnum testPatternMode_;
+
+ Size pixelArraySize_;
+ Rectangle activeArea_;
+ BayerFormat::Order cfaPattern_;
+ bool supportFlips_;
+ bool flipsAlterBayerOrder_;
+ Orientation mountingOrientation_;
+
+ ControlList properties_;
+
+ std::unique_ptr<CameraLens> focusLens_;
+};
+
+/**
+ * \class CameraSensorRaw
+ * \brief A camera sensor based on V4L2 subdevices
+ *
+ * This class supports single-subdev sensors with a single source pad and one
+ * or two internal sink pads (for the image and embedded data streams).
+ */
+
+CameraSensorRaw::CameraSensorRaw(const MediaEntity *entity)
+ : entity_(entity), staticProps_(nullptr), supportFlips_(false),
+ flipsAlterBayerOrder_(false), properties_(properties::properties)
+{
+}
+
+CameraSensorRaw::~CameraSensorRaw() = default;
+
+std::variant<std::unique_ptr<CameraSensor>, int>
+CameraSensorRaw::match(MediaEntity *entity)
+{
+ /* Check the entity type. */
+ if (entity->type() != MediaEntity::Type::V4L2Subdevice ||
+ entity->function() != MEDIA_ENT_F_CAM_SENSOR) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported entity type ("
+ << utils::to_underlying(entity->type())
+ << ") or function (" << utils::hex(entity->function()) << ")";
+ return { 0 };
+ }
+
+ /* Count and check the number of pads. */
+ static constexpr uint32_t kPadFlagsMask = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_INTERNAL;
+ unsigned int numSinks = 0;
+ unsigned int numSources = 0;
+
+ for (const MediaPad *pad : entity->pads()) {
+ switch (pad->flags() & kPadFlagsMask) {
+ case MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_INTERNAL:
+ numSinks++;
+ break;
+
+ case MEDIA_PAD_FL_SOURCE:
+ numSources++;
+ break;
+
+ default:
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported pad " << pad->index()
+ << " type " << utils::hex(pad->flags());
+ return { 0 };
+ }
+ }
+
+ if (numSinks < 1 || numSinks > 2 || numSources != 1) {
+ libcamera::LOG(CameraSensor, Debug)
+ << entity->name() << ": unsupported number of sinks ("
+ << numSinks << ") or sources (" << numSources << ")";
+ return { 0 };
+ }
+
+ /*
+ * The entity matches. Create the camera sensor and initialize it. The
+ * init() function will perform further match checks.
+ */
+ std::unique_ptr<CameraSensorRaw> sensor =
+ std::make_unique<CameraSensorRaw>(entity);
+
+ std::optional<int> err = sensor->init();
+ if (err)
+ return { *err };
+
+ return { std::move(sensor) };
+}
+
+std::optional<int> CameraSensorRaw::init()
+{
+ /* Create and open the subdev. */
+ subdev_ = std::make_unique<V4L2Subdevice>(entity_);
+ int ret = subdev_->open();
+ if (ret)
+ return { ret };
+
+ /*
+ * 1. Identify the pads.
+ */
+
+ /*
+ * First locate the source pad. The match() function guarantees there
+ * is one and only one source pad.
+ */
+ unsigned int sourcePad = UINT_MAX;
+
+ for (const MediaPad *pad : entity_->pads()) {
+ if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+ sourcePad = pad->index();
+ break;
+ }
+ }
+
+ /*
+ * Iterate over the routes to identify the streams on the source pad,
+ * and the internal sink pads.
+ */
+ V4L2Subdevice::Routing routing = {};
+ ret = subdev_->getRouting(&routing, V4L2Subdevice::TryFormat);
+ if (ret)
+ return { ret };
+
+ bool imageStreamFound = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source.pad != sourcePad) {
+ LOG(CameraSensor, Error) << "Invalid route " << route;
+ return { -EINVAL };
+ }
+
+ /* Identify the stream type based on the supported formats. */
+ V4L2Subdevice::Formats formats = subdev_->formats(route.source);
+
+ std::optional<MediaBusFormatInfo::Type> type;
+
+ for (const auto &[code, sizes] : formats) {
+ const MediaBusFormatInfo &info =
+ MediaBusFormatInfo::info(code);
+ if (info.isValid()) {
+ type = info.type;
+ break;
+ }
+ }
+
+ if (!type) {
+ LOG(CameraSensor, Warning)
+ << "No known format on pad " << route.source;
+ continue;
+ }
+
+ switch (*type) {
+ case MediaBusFormatInfo::Type::Image:
+ if (imageStreamFound) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal image streams ("
+ << streams_.image.sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ imageStreamFound = true;
+ streams_.image.sink = route.sink;
+ streams_.image.source = route.source;
+ break;
+
+ case MediaBusFormatInfo::Type::Metadata:
+ /*
+ * Skip metadata streams that are not sensor embedded
+ * data. The source stream reports a generic metadata
+ * format, check the sink stream for the exact format.
+ */
+ formats = subdev_->formats(route.sink);
+ if (formats.size() != 1)
+ continue;
+
+ if (MediaBusFormatInfo::info(formats.cbegin()->first).type !=
+ MediaBusFormatInfo::Type::EmbeddedData)
+ continue;
+
+ if (streams_.edata) {
+ LOG(CameraSensor, Error)
+ << "Multiple internal embedded data streams ("
+ << streams_.edata->sink << " and "
+ << route.sink << ")";
+ return { -EINVAL };
+ }
+
+ streams_.edata = { route.sink, route.source };
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!imageStreamFound) {
+ LOG(CameraSensor, Error) << "No image stream found";
+ return { -EINVAL };
+ }
+
+ LOG(CameraSensor, Debug)
+ << "Found image stream " << streams_.image.sink
+ << " -> " << streams_.image.source;
+
+ if (streams_.edata)
+ LOG(CameraSensor, Debug)
+ << "Found embedded data stream " << streams_.edata->sink
+ << " -> " << streams_.edata->source;
+
+ /*
+ * 2. Enumerate and cache the media bus codes, sizes and colour filter
+ * array order for the image stream.
+ */
+
+ /*
+ * Get the native sensor CFA pattern. It is simpler to retrieve it from
+ * the internal image sink pad as it is guaranteed to expose a single
+ * format, and is not affected by flips.
+ */
+ V4L2Subdevice::Formats formats = subdev_->formats(streams_.image.sink);
+ if (formats.size() != 1) {
+ LOG(CameraSensor, Error)
+ << "Image pad has " << formats.size()
+ << " formats, expected 1";
+ return { -EINVAL };
+ }
+
+ uint32_t nativeFormat = formats.cbegin()->first;
+ const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(nativeFormat);
+ if (!bayerFormat.isValid()) {
+ LOG(CameraSensor, Error)
+ << "Invalid native format " << nativeFormat;
+ return { 0 };
+ }
+
+ cfaPattern_ = bayerFormat.order;
+
+ /*
+ * Retrieve and cache the media bus codes and sizes on the source image
+ * stream.
+ */
+ formats_ = subdev_->formats(streams_.image.source);
+ if (formats_.empty()) {
+ LOG(CameraSensor, Error) << "No image format found";
+ return { -EINVAL };
+ }
+
+ /* Populate and sort the media bus codes and the sizes. */
+ for (const auto &[code, ranges] : formats_) {
+ /* Drop non-raw formats (in case we have a hybrid sensor). */
+ const MediaBusFormatInfo &info = MediaBusFormatInfo::info(code);
+ if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW)
+ continue;
+
+ mbusCodes_.push_back(code);
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
+ [](const SizeRange &range) { return range.max; });
+ }
+
+ if (mbusCodes_.empty()) {
+ LOG(CameraSensor, Debug) << "No raw image formats found";
+ return { 0 };
+ }
+
+ std::sort(mbusCodes_.begin(), mbusCodes_.end());
+ std::sort(sizes_.begin(), sizes_.end());
+
+ /*
+ * Remove duplicate sizes. There are no duplicate media bus codes as
+ * they are the keys in the formats map.
+ */
+ auto last = std::unique(sizes_.begin(), sizes_.end());
+ sizes_.erase(last, sizes_.end());
+
+ /*
+ * 3. Query selection rectangles. Retrieve properties, and verify that
+ * all the expected selection rectangles are supported.
+ */
+
+ Rectangle rect;
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_BOUNDS,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop bounds";
+ return { ret };
+ }
+
+ pixelArraySize_ = rect.size();
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP_DEFAULT,
+ &activeArea_);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop default";
+ return { ret };
+ }
+
+ ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &rect);
+ if (ret) {
+ LOG(CameraSensor, Error) << "No pixel array crop rectangle";
+ return { ret };
+ }
+
+ /*
+ * 4. Verify that all required controls are present.
+ */
+
+ const ControlIdMap &controls = subdev_->controls().idmap();
+
+ static constexpr uint32_t mandatoryControls[] = {
+ V4L2_CID_ANALOGUE_GAIN,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CID_EXPOSURE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_VBLANK,
+ };
+
+ ret = 0;
+
+ for (uint32_t ctrl : mandatoryControls) {
+ if (!controls.count(ctrl)) {
+ LOG(CameraSensor, Error)
+ << "Mandatory V4L2 control " << utils::hex(ctrl)
+ << " not available";
+ ret = -EINVAL;
+ }
+ }
+
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "The sensor kernel driver needs to be fixed";
+ LOG(CameraSensor, Error)
+ << "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
+ return { ret };
+ }
+
+ /*
+ * Verify if sensor supports horizontal/vertical flips
+ *
+ * \todo Handle horizontal and vertical flips independently.
+ */
+ const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
+ const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
+ if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
+ vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ supportFlips_ = true;
+
+ if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
+ vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
+ flipsAlterBayerOrder_ = true;
+ }
+
+ if (!supportFlips_)
+ LOG(CameraSensor, Debug)
+ << "Camera sensor does not support horizontal/vertical flip";
+
+ /*
+ * 5. Discover ancillary devices.
+ *
+ * \todo This code may be shared by different V4L2 sensor classes.
+ */
+ for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
+ switch (ancillary->function()) {
+ case MEDIA_ENT_F_LENS:
+ focusLens_ = std::make_unique<CameraLens>(ancillary);
+ ret = focusLens_->init();
+ if (ret) {
+ LOG(CameraSensor, Error)
+ << "Lens initialisation failed, lens disabled";
+ focusLens_.reset();
+ }
+ break;
+
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported ancillary entity function "
+ << ancillary->function();
+ break;
+ }
+ }
+
+ /*
+ * 6. Initialize properties.
+ */
+
+ ret = initProperties();
+ if (ret)
+ return { ret };
+
+ /*
+ * 7. Initialize controls.
+ */
+
+ /*
+ * Set HBLANK to the minimum to start with a well-defined line length,
+ * allowing IPA modules that do not modify HBLANK to use the sensor
+ * minimum line length in their calculations.
+ */
+ const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
+ if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ControlList ctrl(subdev_->controls());
+
+ ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
+ ret = subdev_->setControls(&ctrl);
+ if (ret)
+ return ret;
+ }
+
+ ret = applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
+ if (ret)
+ return { ret };
+
+ return {};
+}
+
+int CameraSensorRaw::initProperties()
+{
+ model_ = subdev_->model();
+ properties_.set(properties::Model, utils::toAscii(model_));
+
+ /* Generate a unique ID for the sensor. */
+ id_ = sysfs::firmwareNodePath(subdev_->devicePath());
+ if (id_.empty()) {
+ LOG(CameraSensor, Error) << "Can't generate sensor ID";
+ return -EINVAL;
+ }
+
+ /* Initialize the static properties from the sensor database. */
+ initStaticProperties();
+
+ /* Retrieve and register properties from the kernel interface. */
+ const ControlInfoMap &controls = subdev_->controls();
+
+ const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
+ if (orientation != controls.end()) {
+ int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
+ int32_t propertyValue;
+
+ switch (v4l2Orientation) {
+ default:
+ LOG(CameraSensor, Warning)
+ << "Unsupported camera location "
+ << v4l2Orientation << ", setting to External";
+ [[fallthrough]];
+ case V4L2_CAMERA_ORIENTATION_EXTERNAL:
+ propertyValue = properties::CameraLocationExternal;
+ break;
+ case V4L2_CAMERA_ORIENTATION_FRONT:
+ propertyValue = properties::CameraLocationFront;
+ break;
+ case V4L2_CAMERA_ORIENTATION_BACK:
+ propertyValue = properties::CameraLocationBack;
+ break;
+ }
+ properties_.set(properties::Location, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
+ }
+
+ const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
+ if (rotationControl != controls.end()) {
+ int32_t propertyValue = rotationControl->second.def().get<int32_t>();
+
+ /*
+ * Cache the Transform associated with the camera mounting
+ * rotation for later use in computeTransform().
+ */
+ bool success;
+ mountingOrientation_ = orientationFromRotation(propertyValue, &success);
+ if (!success) {
+ LOG(CameraSensor, Warning)
+ << "Invalid rotation of " << propertyValue
+ << " degrees - ignoring";
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::Rotation, propertyValue);
+ } else {
+ LOG(CameraSensor, Warning)
+ << "Rotation control not available, default to 0 degrees";
+ properties_.set(properties::Rotation, 0);
+ mountingOrientation_ = Orientation::Rotate0;
+ }
+
+ properties_.set(properties::PixelArraySize, pixelArraySize_);
+ properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
+
+ /* Color filter array pattern. */
+ uint32_t cfa;
+
+ switch (cfaPattern_) {
+ case BayerFormat::BGGR:
+ cfa = properties::draft::BGGR;
+ break;
+ case BayerFormat::GBRG:
+ cfa = properties::draft::GBRG;
+ break;
+ case BayerFormat::GRBG:
+ cfa = properties::draft::GRBG;
+ break;
+ case BayerFormat::RGGB:
+ cfa = properties::draft::RGGB;
+ break;
+ case BayerFormat::MONO:
+ default:
+ cfa = properties::draft::MONO;
+ break;
+ }
+
+ properties_.set(properties::draft::ColorFilterArrangement, cfa);
+
+ return 0;
+}
+
+void CameraSensorRaw::initStaticProperties()
+{
+ staticProps_ = CameraSensorProperties::get(model_);
+ if (!staticProps_)
+ return;
+
+ /* Register the properties retrieved from the sensor database. */
+ properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
+
+ initTestPatternModes();
+}
+
+const CameraSensorProperties::SensorDelays &CameraSensorRaw::sensorDelays()
+{
+ static constexpr CameraSensorProperties::SensorDelays defaultSensorDelays = {
+ .exposureDelay = 2,
+ .gainDelay = 1,
+ .vblankDelay = 2,
+ .hblankDelay = 2,
+ };
+
+ if (!staticProps_ ||
+ (!staticProps_->sensorDelays.exposureDelay &&
+ !staticProps_->sensorDelays.gainDelay &&
+ !staticProps_->sensorDelays.vblankDelay &&
+ !staticProps_->sensorDelays.hblankDelay)) {
+ LOG(CameraSensor, Warning)
+ << "No sensor delays found in static properties. "
+ "Assuming unverified defaults.";
+
+ return defaultSensorDelays;
+ }
+
+ return staticProps_->sensorDelays;
+}
+
+void CameraSensorRaw::initTestPatternModes()
+{
+ const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
+ if (v4l2TestPattern == controls().end()) {
+ LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
+ return;
+ }
+
+ const auto &testPatternModes = staticProps_->testPatternModes;
+ if (testPatternModes.empty()) {
+ /*
+ * The camera sensor supports test patterns but we don't know
+ * how to map them so this should be fixed.
+ */
+ LOG(CameraSensor, Debug) << "No static test pattern map for \'"
+ << model() << "\'";
+ return;
+ }
+
+ /*
+ * Create a map that associates the V4L2 control index to the test
+ * pattern mode by reversing the testPatternModes map provided by the
+ * camera sensor properties. This makes it easier to verify if the
+ * control index is supported in the below for loop that creates the
+ * list of supported test patterns.
+ */
+ std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
+ for (const auto &it : testPatternModes)
+ indexToTestPatternMode[it.second] = it.first;
+
+ for (const ControlValue &value : v4l2TestPattern->second.values()) {
+ const int32_t index = value.get<int32_t>();
+
+ const auto it = indexToTestPatternMode.find(index);
+ if (it == indexToTestPatternMode.end()) {
+ LOG(CameraSensor, Debug)
+ << "Test pattern mode " << index << " ignored";
+ continue;
+ }
+
+ testPatternModes_.push_back(it->second);
+ }
+}
+
+std::vector<Size> CameraSensorRaw::sizes(unsigned int mbusCode) const
+{
+ std::vector<Size> sizes;
+
+ const auto &format = formats_.find(mbusCode);
+ if (format == formats_.end())
+ return sizes;
+
+ const std::vector<SizeRange> &ranges = format->second;
+ std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
+ [](const SizeRange &range) { return range.max; });
+
+ std::sort(sizes.begin(), sizes.end());
+
+ return sizes;
+}
+
+Size CameraSensorRaw::resolution() const
+{
+ return std::min(sizes_.back(), activeArea_.size());
+}
+
+V4L2SubdeviceFormat
+CameraSensorRaw::getFormat(const std::vector<unsigned int> &mbusCodes,
+ const Size &size, Size maxSize) const
+{
+ unsigned int desiredArea = size.width * size.height;
+ unsigned int bestArea = UINT_MAX;
+ float desiredRatio = static_cast<float>(size.width) / size.height;
+ float bestRatio = FLT_MAX;
+ const Size *bestSize = nullptr;
+ uint32_t bestCode = 0;
+
+ for (unsigned int code : mbusCodes) {
+ const auto formats = formats_.find(code);
+ if (formats == formats_.end())
+ continue;
+
+ for (const SizeRange &range : formats->second) {
+ const Size &sz = range.max;
+
+ if (!maxSize.isNull() &&
+ (sz.width > maxSize.width || sz.height > maxSize.height))
+ continue;
+
+ if (sz.width < size.width || sz.height < size.height)
+ continue;
+
+ float ratio = static_cast<float>(sz.width) / sz.height;
+ float ratioDiff = std::abs(ratio - desiredRatio);
+ unsigned int area = sz.width * sz.height;
+ unsigned int areaDiff = area - desiredArea;
+
+ if (ratioDiff > bestRatio)
+ continue;
+
+ if (ratioDiff < bestRatio || areaDiff < bestArea) {
+ bestRatio = ratioDiff;
+ bestArea = areaDiff;
+ bestSize = &sz;
+ bestCode = code;
+ }
+ }
+ }
+
+ if (!bestSize) {
+ LOG(CameraSensor, Debug) << "No supported format or size found";
+ return {};
+ }
+
+ V4L2SubdeviceFormat format{
+ .code = bestCode,
+ .size = *bestSize,
+ .colorSpace = ColorSpace::Raw,
+ };
+
+ return format;
+}
+
+int CameraSensorRaw::setFormat(V4L2SubdeviceFormat *format, Transform transform)
+{
+ /* Configure flips if the sensor supports that. */
+ if (supportFlips_) {
+ ControlList flipCtrls(subdev_->controls());
+
+ flipCtrls.set(V4L2_CID_HFLIP,
+ static_cast<int32_t>(!!(transform & Transform::HFlip)));
+ flipCtrls.set(V4L2_CID_VFLIP,
+ static_cast<int32_t>(!!(transform & Transform::VFlip)));
+
+ int ret = subdev_->setControls(&flipCtrls);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply format on the subdev. */
+ int ret = subdev_->setFormat(streams_.image.source, format);
+ if (ret)
+ return ret;
+
+ subdev_->updateControlInfo();
+ return 0;
+}
+
+int CameraSensorRaw::tryFormat(V4L2SubdeviceFormat *format) const
+{
+ return subdev_->setFormat(streams_.image.source, format,
+ V4L2Subdevice::Whence::TryFormat);
+}
+
+int CameraSensorRaw::applyConfiguration(const SensorConfiguration &config,
+ Transform transform,
+ V4L2SubdeviceFormat *sensorFormat)
+{
+ if (!config.isValid()) {
+ LOG(CameraSensor, Error) << "Invalid sensor configuration";
+ return -EINVAL;
+ }
+
+ std::vector<unsigned int> filteredCodes;
+ std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
+ std::back_inserter(filteredCodes),
+ [&config](unsigned int mbusCode) {
+ BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
+ if (bayer.bitDepth == config.bitDepth)
+ return true;
+ return false;
+ });
+ if (filteredCodes.empty()) {
+ LOG(CameraSensor, Error)
+ << "Cannot find any format with bit depth "
+ << config.bitDepth;
+ return -EINVAL;
+ }
+
+ /*
+ * Compute the sensor's data frame size by applying the cropping
+ * rectangle, subsampling and output crop to the sensor's pixel array
+ * size.
+ *
+ * \todo The actual size computation is for now ignored and only the
+ * output size is considered. This implies that resolutions obtained
+ * with two different cropping/subsampling will look identical and
+ * only the first found one will be considered.
+ */
+ V4L2SubdeviceFormat subdevFormat = {};
+ for (unsigned int code : filteredCodes) {
+ for (const Size &size : sizes(code)) {
+ if (size.width != config.outputSize.width ||
+ size.height != config.outputSize.height)
+ continue;
+
+ subdevFormat.code = code;
+ subdevFormat.size = size;
+ break;
+ }
+ }
+ if (!subdevFormat.code) {
+ LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
+ return -EINVAL;
+ }
+
+ int ret = setFormat(&subdevFormat, transform);
+ if (ret)
+ return ret;
+
+ /*
+ * Return to the caller the format actually applied to the sensor.
+ * This is relevant if transform has changed the bayer pattern order.
+ */
+ if (sensorFormat)
+ *sensorFormat = subdevFormat;
+
+ /* \todo Handle AnalogCrop. Most sensors do not support set_selection */
+ /* \todo Handle scaling in the digital domain. */
+
+ return 0;
+}
+
+V4L2Subdevice::Stream CameraSensorRaw::imageStream() const
+{
+ return streams_.image.source;
+}
+
+std::optional<V4L2Subdevice::Stream> CameraSensorRaw::embeddedDataStream() const
+{
+ if (!streams_.edata)
+ return {};
+
+ return { streams_.edata->source };
+}
+
+V4L2SubdeviceFormat CameraSensorRaw::embeddedDataFormat() const
+{
+ if (!streams_.edata)
+ return {};
+
+ V4L2SubdeviceFormat format;
+ int ret = subdev_->getFormat(streams_.edata->source, &format);
+ if (ret)
+ return {};
+
+ return format;
+}
+
+int CameraSensorRaw::setEmbeddedDataEnabled(bool enable)
+{
+ if (!streams_.edata)
+ return enable ? -ENOSTR : 0;
+
+ V4L2Subdevice::Routing routing{ 2 };
+
+ routing[0].sink = streams_.image.sink;
+ routing[0].source = streams_.image.source;
+ routing[0].flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+
+ routing[1].sink = streams_.edata->sink;
+ routing[1].source = streams_.edata->source;
+ routing[1].flags = enable ? V4L2_SUBDEV_ROUTE_FL_ACTIVE : 0;
+
+ int ret = subdev_->setRouting(&routing);
+ if (ret)
+ return ret;
+
+ /*
+ * Check if the embedded data stream has been enabled or disabled
+ * correctly. Assume at least one route will match the embedded data
+ * source stream, as there would be something seriously wrong
+ * otherwise.
+ */
+ bool enabled = false;
+
+ for (const V4L2Subdevice::Route &route : routing) {
+ if (route.source != streams_.edata->source)
+ continue;
+
+ enabled = route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE;
+ break;
+ }
+
+ if (enabled != enable)
+ return enabled ? -EISCONN : -ENOSTR;
+
+ return 0;
+}
+
+int CameraSensorRaw::sensorInfo(IPACameraSensorInfo *info) const
+{
+ info->model = model();
+
+ /*
+ * The active area size is a static property, while the crop
+ * rectangle needs to be re-read as it depends on the sensor
+ * configuration.
+ */
+ info->activeAreaSize = { activeArea_.width, activeArea_.height };
+
+ int ret = subdev_->getSelection(streams_.image.sink, V4L2_SEL_TGT_CROP,
+ &info->analogCrop);
+ if (ret)
+ return ret;
+
+ /*
+ * IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
+ * are defined relatively to the active pixel area, while V4L2's
+ * TGT_CROP target is defined in respect to the full pixel array.
+ *
+ * Compensate it by subtracting the active area offset.
+ */
+ info->analogCrop.x -= activeArea_.x;
+ info->analogCrop.y -= activeArea_.y;
+
+ /* The bit depth and image size depend on the currently applied format. */
+ V4L2SubdeviceFormat format{};
+ ret = subdev_->getFormat(streams_.image.source, &format);
+ if (ret)
+ return ret;
+ info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
+ info->outputSize = format.size;
+
+ std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
+ info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
+
+ /*
+ * Retrieve the pixel rate, line length and minimum/maximum frame
+ * duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
+ * V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
+ */
+ ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
+ V4L2_CID_HBLANK,
+ V4L2_CID_VBLANK });
+ if (ctrls.empty()) {
+ LOG(CameraSensor, Error)
+ << "Failed to retrieve camera info controls";
+ return -EINVAL;
+ }
+
+ info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
+
+ const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
+ info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
+ info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
+
+ const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
+ info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
+ info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
+
+ return 0;
+}
+
+Transform CameraSensorRaw::computeTransform(Orientation *orientation) const
+{
+ /*
+ * If we cannot do any flips we cannot change the native camera mounting
+ * orientation.
+ */
+ if (!supportFlips_) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ /*
+ * Now compute the required transform to obtain 'orientation' starting
+ * from the mounting rotation.
+ *
+ * As a note:
+ * orientation / mountingOrientation_ = transform
+ * mountingOrientation_ * transform = orientation
+ */
+ Transform transform = *orientation / mountingOrientation_;
+
+ /*
+ * If transform contains any Transpose we cannot do it, so adjust
+ * 'orientation' to report the image native orientation and return Identity.
+ */
+ if (!!(transform & Transform::Transpose)) {
+ *orientation = mountingOrientation_;
+ return Transform::Identity;
+ }
+
+ return transform;
+}
+
+BayerFormat::Order CameraSensorRaw::bayerOrder(Transform t) const
+{
+ if (!flipsAlterBayerOrder_)
+ return cfaPattern_;
+
+ /*
+ * Apply the transform to the native (i.e. untransformed) Bayer order,
+ * using the rest of the Bayer format supplied by the caller.
+ */
+ BayerFormat format{ cfaPattern_, 8, BayerFormat::Packing::None };
+ return format.transform(t).order;
+}
+
+const ControlInfoMap &CameraSensorRaw::controls() const
+{
+ return subdev_->controls();
+}
+
+ControlList CameraSensorRaw::getControls(const std::vector<uint32_t> &ids)
+{
+ return subdev_->getControls(ids);
+}
+
+int CameraSensorRaw::setControls(ControlList *ctrls)
+{
+ return subdev_->setControls(ctrls);
+}
+
+int CameraSensorRaw::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternMode_ == mode)
+ return 0;
+
+ if (testPatternModes_.empty()) {
+ LOG(CameraSensor, Error)
+ << "Camera sensor does not support test pattern modes.";
+ return -EINVAL;
+ }
+
+ return applyTestPatternMode(mode);
+}
+
+int CameraSensorRaw::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
+{
+ if (testPatternModes_.empty())
+ return 0;
+
+ auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
+ mode);
+ if (it == testPatternModes_.end()) {
+ LOG(CameraSensor, Error) << "Unsupported test pattern mode "
+ << mode;
+ return -EINVAL;
+ }
+
+ LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
+
+ int32_t index = staticProps_->testPatternModes.at(mode);
+ ControlList ctrls{ controls() };
+ ctrls.set(V4L2_CID_TEST_PATTERN, index);
+
+ int ret = setControls(&ctrls);
+ if (ret)
+ return ret;
+
+ testPatternMode_ = mode;
+
+ return 0;
+}
+
+std::string CameraSensorRaw::logPrefix() const
+{
+ return "'" + entity_->name() + "'";
+}
+
+REGISTER_CAMERA_SENSOR(CameraSensorRaw, 0)
+
+} /* namespace libcamera */
diff --git a/src/libcamera/sensor/meson.build b/src/libcamera/sensor/meson.build
index f0d58897..dce74ed6 100644
--- a/src/libcamera/sensor/meson.build
+++ b/src/libcamera/sensor/meson.build
@@ -4,4 +4,5 @@ libcamera_internal_sources += files([
'camera_sensor.cpp',
'camera_sensor_legacy.cpp',
'camera_sensor_properties.cpp',
+ 'camera_sensor_raw.cpp',
])
diff --git a/src/libcamera/v4l2_pixelformat.cpp b/src/libcamera/v4l2_pixelformat.cpp
index eb9ac222..e8b3eb9c 100644
--- a/src/libcamera/v4l2_pixelformat.cpp
+++ b/src/libcamera/v4l2_pixelformat.cpp
@@ -373,6 +373,40 @@ V4L2PixelFormat::fromPixelFormat(const PixelFormat &pixelFormat)
}
/**
+ * \brief Test if a V4L2PixelFormat is one of the line based generic metadata
+ * formats
+ *
+ * A limited number of metadata formats, the ones that represents generic
+ * line-based metadata buffers, need to have their width, height and
+ * bytesperline set by userspace.
+ *
+ * This function tests if the current V4L2PixelFormat is one of those.
+ *
+ * Note: It would have been nicer to store this information in a
+ * V4L2PixelFormat::Info instance, but as metadata format are not exposed to
+ * applications, there are no PixelFormat and DRM fourcc codes associated to
+ * them.
+ *
+ * \return True if the V4L2PixelFormat() is a generic line based format, false
+ * otherwise
+ */
+bool V4L2PixelFormat::isGenericLineBasedMetadata() const
+{
+ switch (fourcc_) {
+ case V4L2_META_FMT_GENERIC_8:
+ case V4L2_META_FMT_GENERIC_CSI2_10:
+ case V4L2_META_FMT_GENERIC_CSI2_12:
+ case V4L2_META_FMT_GENERIC_CSI2_14:
+ case V4L2_META_FMT_GENERIC_CSI2_16:
+ case V4L2_META_FMT_GENERIC_CSI2_20:
+ case V4L2_META_FMT_GENERIC_CSI2_24:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* \brief Insert a text representation of a V4L2PixelFormat into an output
* stream
* \param[in] out The output stream
diff --git a/src/libcamera/v4l2_subdevice.cpp b/src/libcamera/v4l2_subdevice.cpp
index 3a0d075f..7a064d87 100644
--- a/src/libcamera/v4l2_subdevice.cpp
+++ b/src/libcamera/v4l2_subdevice.cpp
@@ -742,6 +742,69 @@ const std::map<uint32_t, MediaBusFormatInfo> mediaBusFormatInfo{
.bitsPerPixel = 0,
.colourEncoding = PixelFormatInfo::ColourEncodingRAW,
} },
+ { MEDIA_BUS_FMT_META_8, {
+ .name = "META_8",
+ .code = MEDIA_BUS_FMT_META_8,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 8,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_10, {
+ .name = "META_10",
+ .code = MEDIA_BUS_FMT_META_10,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 10,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_12, {
+ .name = "META_12",
+ .code = MEDIA_BUS_FMT_META_12,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 12,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_14, {
+ .name = "META_14",
+ .code = MEDIA_BUS_FMT_META_14,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 14,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_16, {
+ .name = "META_16",
+ .code = MEDIA_BUS_FMT_META_16,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 16,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_20, {
+ .name = "META_20",
+ .code = MEDIA_BUS_FMT_META_20,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 20,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_META_24, {
+ .name = "META_24",
+ .code = MEDIA_BUS_FMT_META_24,
+ .type = MediaBusFormatInfo::Type::Metadata,
+ .bitsPerPixel = 24,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_CCS_EMBEDDED, {
+ .name = "CCS_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
+ { MEDIA_BUS_FMT_OV2740_EMBEDDED, {
+ .name = "OV2740_EMBEDDED",
+ .code = MEDIA_BUS_FMT_CCS_EMBEDDED,
+ .type = MediaBusFormatInfo::Type::EmbeddedData,
+ .bitsPerPixel = 0,
+ .colourEncoding = PixelFormatInfo::ColourEncodingRAW,
+ } },
};
} /* namespace */
diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp
index a5cf6784..e241eb47 100644
--- a/src/libcamera/v4l2_videodevice.cpp
+++ b/src/libcamera/v4l2_videodevice.cpp
@@ -888,7 +888,7 @@ int V4L2VideoDevice::setFormat(V4L2DeviceFormat *format)
int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
{
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
@@ -898,25 +898,42 @@ int V4L2VideoDevice::getFormatMeta(V4L2DeviceFormat *format)
return ret;
}
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
+ format->planes[0].size = meta->buffersize;
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
+
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
{
+ bool genericLineBased = caps_.isMetaCapture() &&
+ format->fourcc.isGenericLineBasedMetadata();
struct v4l2_format v4l2Format = {};
- struct v4l2_meta_format *pix = &v4l2Format.fmt.meta;
+ struct v4l2_meta_format *meta = &v4l2Format.fmt.meta;
int ret;
v4l2Format.type = bufferType_;
- pix->dataformat = format->fourcc;
- pix->buffersize = format->planes[0].size;
+ meta->dataformat = format->fourcc;
+ meta->buffersize = format->planes[0].size;
+ if (genericLineBased) {
+ meta->width = format->size.width;
+ meta->height = format->size.height;
+ meta->bytesperline = format->planes[0].bpl;
+ }
ret = ioctl(set ? VIDIOC_S_FMT : VIDIOC_TRY_FMT, &v4l2Format);
if (ret) {
LOG(V4L2, Error)
@@ -929,12 +946,18 @@ int V4L2VideoDevice::trySetFormatMeta(V4L2DeviceFormat *format, bool set)
* Return to caller the format actually applied on the video device,
* which might differ from the requested one.
*/
- format->size.width = 0;
- format->size.height = 0;
- format->fourcc = V4L2PixelFormat(pix->dataformat);
+ format->fourcc = V4L2PixelFormat(meta->dataformat);
format->planesCount = 1;
- format->planes[0].bpl = pix->buffersize;
- format->planes[0].size = pix->buffersize;
+ format->planes[0].size = meta->buffersize;
+ if (genericLineBased) {
+ format->size.width = meta->width;
+ format->size.height = meta->height;
+ format->planes[0].bpl = meta->bytesperline;
+ } else {
+ format->size.width = 0;
+ format->size.height = 0;
+ format->planes[0].bpl = meta->buffersize;
+ }
return 0;
}
diff --git a/src/v4l2/meson.build b/src/v4l2/meson.build
index 58f53bf3..2c040414 100644
--- a/src/v4l2/meson.build
+++ b/src/v4l2/meson.build
@@ -1,12 +1,11 @@
# SPDX-License-Identifier: CC0-1.0
-if not get_option('v4l2')
- v4l2_enabled = false
+v4l2_enabled = get_option('v4l2').allowed()
+
+if not v4l2_enabled
subdir_done()
endif
-v4l2_enabled = true
-
v4l2_compat_sources = files([
'v4l2_camera.cpp',
'v4l2_camera_file.cpp',